]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.7.4-201301230048.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.7.4-201301230048.patch
CommitLineData
e35eadf0
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 74c25c8..deadba2 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125-linux
126+lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130@@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134-media
135 mconf
136+mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143+mkpiggy
144 mkprep
145 mkregtable
146 mktables
147@@ -186,6 +205,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151+parse-events*
152+pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156@@ -195,6 +216,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164@@ -204,7 +226,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168+realmode.lds
169+realmode.relocs
170 recordmcount
171+regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175@@ -214,8 +239,11 @@ series
176 setup
177 setup.bin
178 setup.elf
179+size_overflow_hash.h
180 sImage
181+slabinfo
182 sm_tbl*
183+sortextable
184 split-include
185 syscalltab.h
186 tables.c
187@@ -225,6 +253,7 @@ tftpboot.img
188 timeconst.h
189 times.h*
190 trix_boot.h
191+user_constants.h
192 utsrelease.h*
193 vdso-syms.lds
194 vdso.lds
195@@ -236,13 +265,17 @@ vdso32.lds
196 vdso32.so.dbg
197 vdso64.lds
198 vdso64.so.dbg
199+vdsox32.lds
200+vdsox32-syms.lds
201 version.h*
202 vmImage
203 vmlinux
204 vmlinux-*
205 vmlinux.aout
206 vmlinux.bin.all
207+vmlinux.bin.bz2
208 vmlinux.lds
209+vmlinux.relocs
210 vmlinuz
211 voffset.h
212 vsyscall.lds
213@@ -250,9 +283,11 @@ vsyscall_32.lds
214 wanxlfw.inc
215 uImage
216 unifdef
217+utsrelease.h
218 wakeup.bin
219 wakeup.elf
220 wakeup.lds
221 zImage*
222 zconf.hash.c
223+zconf.lex.c
224 zoffset.h
225diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
226index 9776f06..18b1856 100644
227--- a/Documentation/kernel-parameters.txt
228+++ b/Documentation/kernel-parameters.txt
229@@ -905,6 +905,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
230 gpt [EFI] Forces disk with valid GPT signature but
231 invalid Protective MBR to be treated as GPT.
232
233+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
234+ ignore grsecurity's /proc restrictions
235+
236 hashdist= [KNL,NUMA] Large hashes allocated during boot
237 are distributed across NUMA nodes. Defaults on
238 for 64-bit NUMA, off otherwise.
239@@ -2082,6 +2085,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
240 the specified number of seconds. This is to be used if
241 your oopses keep scrolling off the screen.
242
243+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
244+ virtualization environments that don't cope well with the
245+ expand down segment used by UDEREF on X86-32 or the frequent
246+ page table updates on X86-64.
247+
248+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
249+
250 pcbit= [HW,ISDN]
251
252 pcd. [PARIDE]
253diff --git a/Makefile b/Makefile
254index f9196bc..63b33e4 100644
255--- a/Makefile
256+++ b/Makefile
257@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
258
259 HOSTCC = gcc
260 HOSTCXX = g++
261-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
262-HOSTCXXFLAGS = -O2
263+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
264+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
265+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
266
267 # Decide whether to build built-in, modular, or both.
268 # Normally, just do built-in.
269@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
270 # Rules shared between *config targets and build targets
271
272 # Basic helpers built in scripts/
273-PHONY += scripts_basic
274-scripts_basic:
275+PHONY += scripts_basic gcc-plugins
276+scripts_basic: gcc-plugins
277 $(Q)$(MAKE) $(build)=scripts/basic
278 $(Q)rm -f .tmp_quiet_recordmcount
279
280@@ -575,6 +576,60 @@ else
281 KBUILD_CFLAGS += -O2
282 endif
283
284+ifndef DISABLE_PAX_PLUGINS
285+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
286+ifneq ($(PLUGINCC),)
287+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
288+ifndef CONFIG_UML
289+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
290+endif
291+endif
292+ifdef CONFIG_PAX_MEMORY_STACKLEAK
293+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
294+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
295+endif
296+ifdef CONFIG_KALLOCSTAT_PLUGIN
297+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
298+endif
299+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
300+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
301+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
302+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
303+endif
304+ifdef CONFIG_CHECKER_PLUGIN
305+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
306+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
307+endif
308+endif
309+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
310+ifdef CONFIG_PAX_SIZE_OVERFLOW
311+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
312+endif
313+ifdef CONFIG_PAX_LATENT_ENTROPY
314+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
315+endif
316+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
317+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
318+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
319+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
320+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
321+ifeq ($(KBUILD_EXTMOD),)
322+gcc-plugins:
323+ $(Q)$(MAKE) $(build)=tools/gcc
324+else
325+gcc-plugins: ;
326+endif
327+else
328+gcc-plugins:
329+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
330+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
331+else
332+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
333+endif
334+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
335+endif
336+endif
337+
338 include $(srctree)/arch/$(SRCARCH)/Makefile
339
340 ifdef CONFIG_READABLE_ASM
341@@ -731,7 +786,7 @@ export mod_sign_cmd
342
343
344 ifeq ($(KBUILD_EXTMOD),)
345-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
346+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
347
348 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
349 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
350@@ -778,6 +833,8 @@ endif
351
352 # The actual objects are generated when descending,
353 # make sure no implicit rule kicks in
354+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
355+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
356 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
357
358 # Handle descending into subdirectories listed in $(vmlinux-dirs)
359@@ -787,7 +844,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
360 # Error messages still appears in the original language
361
362 PHONY += $(vmlinux-dirs)
363-$(vmlinux-dirs): prepare scripts
364+$(vmlinux-dirs): gcc-plugins prepare scripts
365 $(Q)$(MAKE) $(build)=$@
366
367 # Store (new) KERNELRELASE string in include/config/kernel.release
368@@ -831,6 +888,7 @@ prepare0: archprepare FORCE
369 $(Q)$(MAKE) $(build)=.
370
371 # All the preparing..
372+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
373 prepare: prepare0
374
375 # Generate some files
376@@ -938,6 +996,8 @@ all: modules
377 # using awk while concatenating to the final file.
378
379 PHONY += modules
380+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
381+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
382 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
383 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
384 @$(kecho) ' Building modules, stage 2.';
385@@ -953,7 +1013,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
386
387 # Target to prepare building external modules
388 PHONY += modules_prepare
389-modules_prepare: prepare scripts
390+modules_prepare: gcc-plugins prepare scripts
391
392 # Target to install modules
393 PHONY += modules_install
394@@ -1013,7 +1073,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
395 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
396 signing_key.priv signing_key.x509 x509.genkey \
397 extra_certificates signing_key.x509.keyid \
398- signing_key.x509.signer
399+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
400
401 # clean - Delete most, but leave enough to build external modules
402 #
403@@ -1053,6 +1113,7 @@ distclean: mrproper
404 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
405 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
406 -o -name '.*.rej' \
407+ -o -name '.*.rej' -o -name '*.so' \
408 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
409 -type f -print | xargs rm -f
410
411@@ -1213,6 +1274,8 @@ PHONY += $(module-dirs) modules
412 $(module-dirs): crmodverdir $(objtree)/Module.symvers
413 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
414
415+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
416+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
417 modules: $(module-dirs)
418 @$(kecho) ' Building modules, stage 2.';
419 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
420@@ -1349,17 +1412,21 @@ else
421 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
422 endif
423
424-%.s: %.c prepare scripts FORCE
425+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
426+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
427+%.s: %.c gcc-plugins prepare scripts FORCE
428 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
429 %.i: %.c prepare scripts FORCE
430 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
431-%.o: %.c prepare scripts FORCE
432+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
433+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
434+%.o: %.c gcc-plugins prepare scripts FORCE
435 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
436 %.lst: %.c prepare scripts FORCE
437 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
438-%.s: %.S prepare scripts FORCE
439+%.s: %.S gcc-plugins prepare scripts FORCE
440 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
441-%.o: %.S prepare scripts FORCE
442+%.o: %.S gcc-plugins prepare scripts FORCE
443 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
444 %.symtypes: %.c prepare scripts FORCE
445 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
446@@ -1369,11 +1436,15 @@ endif
447 $(cmd_crmodverdir)
448 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
449 $(build)=$(build-dir)
450-%/: prepare scripts FORCE
451+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
452+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
453+%/: gcc-plugins prepare scripts FORCE
454 $(cmd_crmodverdir)
455 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
456 $(build)=$(build-dir)
457-%.ko: prepare scripts FORCE
458+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
459+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
460+%.ko: gcc-plugins prepare scripts FORCE
461 $(cmd_crmodverdir)
462 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
463 $(build)=$(build-dir) $(@:.ko=.o)
464diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
465index c2cbe4f..f7264b4 100644
466--- a/arch/alpha/include/asm/atomic.h
467+++ b/arch/alpha/include/asm/atomic.h
468@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
469 #define atomic_dec(v) atomic_sub(1,(v))
470 #define atomic64_dec(v) atomic64_sub(1,(v))
471
472+#define atomic64_read_unchecked(v) atomic64_read(v)
473+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
474+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
475+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
476+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
477+#define atomic64_inc_unchecked(v) atomic64_inc(v)
478+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
479+#define atomic64_dec_unchecked(v) atomic64_dec(v)
480+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
481+
482 #define smp_mb__before_atomic_dec() smp_mb()
483 #define smp_mb__after_atomic_dec() smp_mb()
484 #define smp_mb__before_atomic_inc() smp_mb()
485diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
486index ad368a9..fbe0f25 100644
487--- a/arch/alpha/include/asm/cache.h
488+++ b/arch/alpha/include/asm/cache.h
489@@ -4,19 +4,19 @@
490 #ifndef __ARCH_ALPHA_CACHE_H
491 #define __ARCH_ALPHA_CACHE_H
492
493+#include <linux/const.h>
494
495 /* Bytes per L1 (data) cache line. */
496 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
497-# define L1_CACHE_BYTES 64
498 # define L1_CACHE_SHIFT 6
499 #else
500 /* Both EV4 and EV5 are write-through, read-allocate,
501 direct-mapped, physical.
502 */
503-# define L1_CACHE_BYTES 32
504 # define L1_CACHE_SHIFT 5
505 #endif
506
507+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
508 #define SMP_CACHE_BYTES L1_CACHE_BYTES
509
510 #endif
511diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
512index 968d999..d36b2df 100644
513--- a/arch/alpha/include/asm/elf.h
514+++ b/arch/alpha/include/asm/elf.h
515@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
516
517 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
518
519+#ifdef CONFIG_PAX_ASLR
520+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
521+
522+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
523+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
524+#endif
525+
526 /* $0 is set by ld.so to a pointer to a function which might be
527 registered using atexit. This provides a mean for the dynamic
528 linker to call DT_FINI functions for shared libraries that have
529diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
530index bc2a0da..8ad11ee 100644
531--- a/arch/alpha/include/asm/pgalloc.h
532+++ b/arch/alpha/include/asm/pgalloc.h
533@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
534 pgd_set(pgd, pmd);
535 }
536
537+static inline void
538+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
539+{
540+ pgd_populate(mm, pgd, pmd);
541+}
542+
543 extern pgd_t *pgd_alloc(struct mm_struct *mm);
544
545 static inline void
546diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
547index 81a4342..348b927 100644
548--- a/arch/alpha/include/asm/pgtable.h
549+++ b/arch/alpha/include/asm/pgtable.h
550@@ -102,6 +102,17 @@ struct vm_area_struct;
551 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
552 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
553 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
554+
555+#ifdef CONFIG_PAX_PAGEEXEC
556+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
557+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
558+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
559+#else
560+# define PAGE_SHARED_NOEXEC PAGE_SHARED
561+# define PAGE_COPY_NOEXEC PAGE_COPY
562+# define PAGE_READONLY_NOEXEC PAGE_READONLY
563+#endif
564+
565 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
566
567 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
568diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
569index 2fd00b7..cfd5069 100644
570--- a/arch/alpha/kernel/module.c
571+++ b/arch/alpha/kernel/module.c
572@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
573
574 /* The small sections were sorted to the end of the segment.
575 The following should definitely cover them. */
576- gp = (u64)me->module_core + me->core_size - 0x8000;
577+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
578 got = sechdrs[me->arch.gotsecindex].sh_addr;
579
580 for (i = 0; i < n; i++) {
581diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
582index 14db93e..47bed62 100644
583--- a/arch/alpha/kernel/osf_sys.c
584+++ b/arch/alpha/kernel/osf_sys.c
585@@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
586 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
587
588 static unsigned long
589-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
590- unsigned long limit)
591+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
592+ unsigned long limit, unsigned long flags)
593 {
594 struct vm_area_struct *vma = find_vma(current->mm, addr);
595-
596+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
597 while (1) {
598 /* At this point: (!vma || addr < vma->vm_end). */
599 if (limit - len < addr)
600 return -ENOMEM;
601- if (!vma || addr + len <= vma->vm_start)
602+ if (check_heap_stack_gap(vma, addr, len, offset))
603 return addr;
604 addr = vma->vm_end;
605 vma = vma->vm_next;
606@@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
607 merely specific addresses, but regions of memory -- perhaps
608 this feature should be incorporated into all ports? */
609
610+#ifdef CONFIG_PAX_RANDMMAP
611+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
612+#endif
613+
614 if (addr) {
615- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
616+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
617 if (addr != (unsigned long) -ENOMEM)
618 return addr;
619 }
620
621 /* Next, try allocating at TASK_UNMAPPED_BASE. */
622- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
623- len, limit);
624+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
625+
626 if (addr != (unsigned long) -ENOMEM)
627 return addr;
628
629 /* Finally, try allocating in low memory. */
630- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
631+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
632
633 return addr;
634 }
635diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
636index 0c4132d..88f0d53 100644
637--- a/arch/alpha/mm/fault.c
638+++ b/arch/alpha/mm/fault.c
639@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
640 __reload_thread(pcb);
641 }
642
643+#ifdef CONFIG_PAX_PAGEEXEC
644+/*
645+ * PaX: decide what to do with offenders (regs->pc = fault address)
646+ *
647+ * returns 1 when task should be killed
648+ * 2 when patched PLT trampoline was detected
649+ * 3 when unpatched PLT trampoline was detected
650+ */
651+static int pax_handle_fetch_fault(struct pt_regs *regs)
652+{
653+
654+#ifdef CONFIG_PAX_EMUPLT
655+ int err;
656+
657+ do { /* PaX: patched PLT emulation #1 */
658+ unsigned int ldah, ldq, jmp;
659+
660+ err = get_user(ldah, (unsigned int *)regs->pc);
661+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
662+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
663+
664+ if (err)
665+ break;
666+
667+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
668+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
669+ jmp == 0x6BFB0000U)
670+ {
671+ unsigned long r27, addr;
672+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
673+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
674+
675+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
676+ err = get_user(r27, (unsigned long *)addr);
677+ if (err)
678+ break;
679+
680+ regs->r27 = r27;
681+ regs->pc = r27;
682+ return 2;
683+ }
684+ } while (0);
685+
686+ do { /* PaX: patched PLT emulation #2 */
687+ unsigned int ldah, lda, br;
688+
689+ err = get_user(ldah, (unsigned int *)regs->pc);
690+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
691+ err |= get_user(br, (unsigned int *)(regs->pc+8));
692+
693+ if (err)
694+ break;
695+
696+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
697+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
698+ (br & 0xFFE00000U) == 0xC3E00000U)
699+ {
700+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
701+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
702+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
703+
704+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
705+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
706+ return 2;
707+ }
708+ } while (0);
709+
710+ do { /* PaX: unpatched PLT emulation */
711+ unsigned int br;
712+
713+ err = get_user(br, (unsigned int *)regs->pc);
714+
715+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
716+ unsigned int br2, ldq, nop, jmp;
717+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
718+
719+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
720+ err = get_user(br2, (unsigned int *)addr);
721+ err |= get_user(ldq, (unsigned int *)(addr+4));
722+ err |= get_user(nop, (unsigned int *)(addr+8));
723+ err |= get_user(jmp, (unsigned int *)(addr+12));
724+ err |= get_user(resolver, (unsigned long *)(addr+16));
725+
726+ if (err)
727+ break;
728+
729+ if (br2 == 0xC3600000U &&
730+ ldq == 0xA77B000CU &&
731+ nop == 0x47FF041FU &&
732+ jmp == 0x6B7B0000U)
733+ {
734+ regs->r28 = regs->pc+4;
735+ regs->r27 = addr+16;
736+ regs->pc = resolver;
737+ return 3;
738+ }
739+ }
740+ } while (0);
741+#endif
742+
743+ return 1;
744+}
745+
746+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
747+{
748+ unsigned long i;
749+
750+ printk(KERN_ERR "PAX: bytes at PC: ");
751+ for (i = 0; i < 5; i++) {
752+ unsigned int c;
753+ if (get_user(c, (unsigned int *)pc+i))
754+ printk(KERN_CONT "???????? ");
755+ else
756+ printk(KERN_CONT "%08x ", c);
757+ }
758+ printk("\n");
759+}
760+#endif
761
762 /*
763 * This routine handles page faults. It determines the address,
764@@ -133,8 +251,29 @@ retry:
765 good_area:
766 si_code = SEGV_ACCERR;
767 if (cause < 0) {
768- if (!(vma->vm_flags & VM_EXEC))
769+ if (!(vma->vm_flags & VM_EXEC)) {
770+
771+#ifdef CONFIG_PAX_PAGEEXEC
772+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
773+ goto bad_area;
774+
775+ up_read(&mm->mmap_sem);
776+ switch (pax_handle_fetch_fault(regs)) {
777+
778+#ifdef CONFIG_PAX_EMUPLT
779+ case 2:
780+ case 3:
781+ return;
782+#endif
783+
784+ }
785+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
786+ do_group_exit(SIGKILL);
787+#else
788 goto bad_area;
789+#endif
790+
791+ }
792 } else if (!cause) {
793 /* Allow reads even for write-only mappings */
794 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
795diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
796index c79f61f..9ac0642 100644
797--- a/arch/arm/include/asm/atomic.h
798+++ b/arch/arm/include/asm/atomic.h
799@@ -17,17 +17,35 @@
800 #include <asm/barrier.h>
801 #include <asm/cmpxchg.h>
802
803+#ifdef CONFIG_GENERIC_ATOMIC64
804+#include <asm-generic/atomic64.h>
805+#endif
806+
807 #define ATOMIC_INIT(i) { (i) }
808
809 #ifdef __KERNEL__
810
811+#define _ASM_EXTABLE(from, to) \
812+" .pushsection __ex_table,\"a\"\n"\
813+" .align 3\n" \
814+" .long " #from ", " #to"\n" \
815+" .popsection"
816+
817 /*
818 * On ARM, ordinary assignment (str instruction) doesn't clear the local
819 * strex/ldrex monitor on some implementations. The reason we can use it for
820 * atomic_set() is the clrex or dummy strex done on every exception return.
821 */
822 #define atomic_read(v) (*(volatile int *)&(v)->counter)
823+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
824+{
825+ return v->counter;
826+}
827 #define atomic_set(v,i) (((v)->counter) = (i))
828+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
829+{
830+ v->counter = i;
831+}
832
833 #if __LINUX_ARM_ARCH__ >= 6
834
835@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
836 int result;
837
838 __asm__ __volatile__("@ atomic_add\n"
839+"1: ldrex %1, [%3]\n"
840+" adds %0, %1, %4\n"
841+
842+#ifdef CONFIG_PAX_REFCOUNT
843+" bvc 3f\n"
844+"2: bkpt 0xf103\n"
845+"3:\n"
846+#endif
847+
848+" strex %1, %0, [%3]\n"
849+" teq %1, #0\n"
850+" bne 1b"
851+
852+#ifdef CONFIG_PAX_REFCOUNT
853+"\n4:\n"
854+ _ASM_EXTABLE(2b, 4b)
855+#endif
856+
857+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
858+ : "r" (&v->counter), "Ir" (i)
859+ : "cc");
860+}
861+
862+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
863+{
864+ unsigned long tmp;
865+ int result;
866+
867+ __asm__ __volatile__("@ atomic_add_unchecked\n"
868 "1: ldrex %0, [%3]\n"
869 " add %0, %0, %4\n"
870 " strex %1, %0, [%3]\n"
871@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
872 smp_mb();
873
874 __asm__ __volatile__("@ atomic_add_return\n"
875+"1: ldrex %1, [%3]\n"
876+" adds %0, %1, %4\n"
877+
878+#ifdef CONFIG_PAX_REFCOUNT
879+" bvc 3f\n"
880+" mov %0, %1\n"
881+"2: bkpt 0xf103\n"
882+"3:\n"
883+#endif
884+
885+" strex %1, %0, [%3]\n"
886+" teq %1, #0\n"
887+" bne 1b"
888+
889+#ifdef CONFIG_PAX_REFCOUNT
890+"\n4:\n"
891+ _ASM_EXTABLE(2b, 4b)
892+#endif
893+
894+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
895+ : "r" (&v->counter), "Ir" (i)
896+ : "cc");
897+
898+ smp_mb();
899+
900+ return result;
901+}
902+
903+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
904+{
905+ unsigned long tmp;
906+ int result;
907+
908+ smp_mb();
909+
910+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
911 "1: ldrex %0, [%3]\n"
912 " add %0, %0, %4\n"
913 " strex %1, %0, [%3]\n"
914@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
915 int result;
916
917 __asm__ __volatile__("@ atomic_sub\n"
918+"1: ldrex %1, [%3]\n"
919+" subs %0, %1, %4\n"
920+
921+#ifdef CONFIG_PAX_REFCOUNT
922+" bvc 3f\n"
923+"2: bkpt 0xf103\n"
924+"3:\n"
925+#endif
926+
927+" strex %1, %0, [%3]\n"
928+" teq %1, #0\n"
929+" bne 1b"
930+
931+#ifdef CONFIG_PAX_REFCOUNT
932+"\n4:\n"
933+ _ASM_EXTABLE(2b, 4b)
934+#endif
935+
936+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
937+ : "r" (&v->counter), "Ir" (i)
938+ : "cc");
939+}
940+
941+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
942+{
943+ unsigned long tmp;
944+ int result;
945+
946+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
947 "1: ldrex %0, [%3]\n"
948 " sub %0, %0, %4\n"
949 " strex %1, %0, [%3]\n"
950@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
951 smp_mb();
952
953 __asm__ __volatile__("@ atomic_sub_return\n"
954-"1: ldrex %0, [%3]\n"
955-" sub %0, %0, %4\n"
956+"1: ldrex %1, [%3]\n"
957+" subs %0, %1, %4\n"
958+
959+#ifdef CONFIG_PAX_REFCOUNT
960+" bvc 3f\n"
961+" mov %0, %1\n"
962+"2: bkpt 0xf103\n"
963+"3:\n"
964+#endif
965+
966 " strex %1, %0, [%3]\n"
967 " teq %1, #0\n"
968 " bne 1b"
969+
970+#ifdef CONFIG_PAX_REFCOUNT
971+"\n4:\n"
972+ _ASM_EXTABLE(2b, 4b)
973+#endif
974+
975 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
976 : "r" (&v->counter), "Ir" (i)
977 : "cc");
978@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
979 return oldval;
980 }
981
982+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
983+{
984+ unsigned long oldval, res;
985+
986+ smp_mb();
987+
988+ do {
989+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
990+ "ldrex %1, [%3]\n"
991+ "mov %0, #0\n"
992+ "teq %1, %4\n"
993+ "strexeq %0, %5, [%3]\n"
994+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
995+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
996+ : "cc");
997+ } while (res);
998+
999+ smp_mb();
1000+
1001+ return oldval;
1002+}
1003+
1004 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1005 {
1006 unsigned long tmp, tmp2;
1007@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1008
1009 return val;
1010 }
1011+
1012+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1013+{
1014+ return atomic_add_return(i, v);
1015+}
1016+
1017 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1018+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1019+{
1020+ (void) atomic_add_return(i, v);
1021+}
1022
1023 static inline int atomic_sub_return(int i, atomic_t *v)
1024 {
1025@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1026 return val;
1027 }
1028 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1029+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1030+{
1031+ (void) atomic_sub_return(i, v);
1032+}
1033
1034 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1035 {
1036@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1037 return ret;
1038 }
1039
1040+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1041+{
1042+ return atomic_cmpxchg(v, old, new);
1043+}
1044+
1045 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1046 {
1047 unsigned long flags;
1048@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1049 #endif /* __LINUX_ARM_ARCH__ */
1050
1051 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1052+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1053+{
1054+ return xchg(&v->counter, new);
1055+}
1056
1057 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1058 {
1059@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1060 }
1061
1062 #define atomic_inc(v) atomic_add(1, v)
1063+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1064+{
1065+ atomic_add_unchecked(1, v);
1066+}
1067 #define atomic_dec(v) atomic_sub(1, v)
1068+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1069+{
1070+ atomic_sub_unchecked(1, v);
1071+}
1072
1073 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1074+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1075+{
1076+ return atomic_add_return_unchecked(1, v) == 0;
1077+}
1078 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1079 #define atomic_inc_return(v) (atomic_add_return(1, v))
1080+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1081+{
1082+ return atomic_add_return_unchecked(1, v);
1083+}
1084 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1085 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1086
1087@@ -241,6 +428,14 @@ typedef struct {
1088 u64 __aligned(8) counter;
1089 } atomic64_t;
1090
1091+#ifdef CONFIG_PAX_REFCOUNT
1092+typedef struct {
1093+ u64 __aligned(8) counter;
1094+} atomic64_unchecked_t;
1095+#else
1096+typedef atomic64_t atomic64_unchecked_t;
1097+#endif
1098+
1099 #define ATOMIC64_INIT(i) { (i) }
1100
1101 static inline u64 atomic64_read(const atomic64_t *v)
1102@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1103 return result;
1104 }
1105
1106+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1107+{
1108+ u64 result;
1109+
1110+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1111+" ldrexd %0, %H0, [%1]"
1112+ : "=&r" (result)
1113+ : "r" (&v->counter), "Qo" (v->counter)
1114+ );
1115+
1116+ return result;
1117+}
1118+
1119 static inline void atomic64_set(atomic64_t *v, u64 i)
1120 {
1121 u64 tmp;
1122@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1123 : "cc");
1124 }
1125
1126+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1127+{
1128+ u64 tmp;
1129+
1130+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1131+"1: ldrexd %0, %H0, [%2]\n"
1132+" strexd %0, %3, %H3, [%2]\n"
1133+" teq %0, #0\n"
1134+" bne 1b"
1135+ : "=&r" (tmp), "=Qo" (v->counter)
1136+ : "r" (&v->counter), "r" (i)
1137+ : "cc");
1138+}
1139+
1140 static inline void atomic64_add(u64 i, atomic64_t *v)
1141 {
1142 u64 result;
1143@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1144 __asm__ __volatile__("@ atomic64_add\n"
1145 "1: ldrexd %0, %H0, [%3]\n"
1146 " adds %0, %0, %4\n"
1147+" adcs %H0, %H0, %H4\n"
1148+
1149+#ifdef CONFIG_PAX_REFCOUNT
1150+" bvc 3f\n"
1151+"2: bkpt 0xf103\n"
1152+"3:\n"
1153+#endif
1154+
1155+" strexd %1, %0, %H0, [%3]\n"
1156+" teq %1, #0\n"
1157+" bne 1b"
1158+
1159+#ifdef CONFIG_PAX_REFCOUNT
1160+"\n4:\n"
1161+ _ASM_EXTABLE(2b, 4b)
1162+#endif
1163+
1164+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1165+ : "r" (&v->counter), "r" (i)
1166+ : "cc");
1167+}
1168+
1169+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1170+{
1171+ u64 result;
1172+ unsigned long tmp;
1173+
1174+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1175+"1: ldrexd %0, %H0, [%3]\n"
1176+" adds %0, %0, %4\n"
1177 " adc %H0, %H0, %H4\n"
1178 " strexd %1, %0, %H0, [%3]\n"
1179 " teq %1, #0\n"
1180@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1181
1182 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1183 {
1184- u64 result;
1185- unsigned long tmp;
1186+ u64 result, tmp;
1187
1188 smp_mb();
1189
1190 __asm__ __volatile__("@ atomic64_add_return\n"
1191+"1: ldrexd %1, %H1, [%3]\n"
1192+" adds %0, %1, %4\n"
1193+" adcs %H0, %H1, %H4\n"
1194+
1195+#ifdef CONFIG_PAX_REFCOUNT
1196+" bvc 3f\n"
1197+" mov %0, %1\n"
1198+" mov %H0, %H1\n"
1199+"2: bkpt 0xf103\n"
1200+"3:\n"
1201+#endif
1202+
1203+" strexd %1, %0, %H0, [%3]\n"
1204+" teq %1, #0\n"
1205+" bne 1b"
1206+
1207+#ifdef CONFIG_PAX_REFCOUNT
1208+"\n4:\n"
1209+ _ASM_EXTABLE(2b, 4b)
1210+#endif
1211+
1212+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1213+ : "r" (&v->counter), "r" (i)
1214+ : "cc");
1215+
1216+ smp_mb();
1217+
1218+ return result;
1219+}
1220+
1221+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1222+{
1223+ u64 result;
1224+ unsigned long tmp;
1225+
1226+ smp_mb();
1227+
1228+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1229 "1: ldrexd %0, %H0, [%3]\n"
1230 " adds %0, %0, %4\n"
1231 " adc %H0, %H0, %H4\n"
1232@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1233 __asm__ __volatile__("@ atomic64_sub\n"
1234 "1: ldrexd %0, %H0, [%3]\n"
1235 " subs %0, %0, %4\n"
1236+" sbcs %H0, %H0, %H4\n"
1237+
1238+#ifdef CONFIG_PAX_REFCOUNT
1239+" bvc 3f\n"
1240+"2: bkpt 0xf103\n"
1241+"3:\n"
1242+#endif
1243+
1244+" strexd %1, %0, %H0, [%3]\n"
1245+" teq %1, #0\n"
1246+" bne 1b"
1247+
1248+#ifdef CONFIG_PAX_REFCOUNT
1249+"\n4:\n"
1250+ _ASM_EXTABLE(2b, 4b)
1251+#endif
1252+
1253+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1254+ : "r" (&v->counter), "r" (i)
1255+ : "cc");
1256+}
1257+
1258+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1259+{
1260+ u64 result;
1261+ unsigned long tmp;
1262+
1263+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1264+"1: ldrexd %0, %H0, [%3]\n"
1265+" subs %0, %0, %4\n"
1266 " sbc %H0, %H0, %H4\n"
1267 " strexd %1, %0, %H0, [%3]\n"
1268 " teq %1, #0\n"
1269@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1270
1271 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1272 {
1273- u64 result;
1274- unsigned long tmp;
1275+ u64 result, tmp;
1276
1277 smp_mb();
1278
1279 __asm__ __volatile__("@ atomic64_sub_return\n"
1280-"1: ldrexd %0, %H0, [%3]\n"
1281-" subs %0, %0, %4\n"
1282-" sbc %H0, %H0, %H4\n"
1283+"1: ldrexd %1, %H1, [%3]\n"
1284+" subs %0, %1, %4\n"
1285+" sbcs %H0, %H1, %H4\n"
1286+
1287+#ifdef CONFIG_PAX_REFCOUNT
1288+" bvc 3f\n"
1289+" mov %0, %1\n"
1290+" mov %H0, %H1\n"
1291+"2: bkpt 0xf103\n"
1292+"3:\n"
1293+#endif
1294+
1295 " strexd %1, %0, %H0, [%3]\n"
1296 " teq %1, #0\n"
1297 " bne 1b"
1298+
1299+#ifdef CONFIG_PAX_REFCOUNT
1300+"\n4:\n"
1301+ _ASM_EXTABLE(2b, 4b)
1302+#endif
1303+
1304 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1305 : "r" (&v->counter), "r" (i)
1306 : "cc");
1307@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1308 return oldval;
1309 }
1310
1311+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1312+{
1313+ u64 oldval;
1314+ unsigned long res;
1315+
1316+ smp_mb();
1317+
1318+ do {
1319+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1320+ "ldrexd %1, %H1, [%3]\n"
1321+ "mov %0, #0\n"
1322+ "teq %1, %4\n"
1323+ "teqeq %H1, %H4\n"
1324+ "strexdeq %0, %5, %H5, [%3]"
1325+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1326+ : "r" (&ptr->counter), "r" (old), "r" (new)
1327+ : "cc");
1328+ } while (res);
1329+
1330+ smp_mb();
1331+
1332+ return oldval;
1333+}
1334+
1335 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1336 {
1337 u64 result;
1338@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1339
1340 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1341 {
1342- u64 result;
1343- unsigned long tmp;
1344+ u64 result, tmp;
1345
1346 smp_mb();
1347
1348 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1349-"1: ldrexd %0, %H0, [%3]\n"
1350-" subs %0, %0, #1\n"
1351-" sbc %H0, %H0, #0\n"
1352+"1: ldrexd %1, %H1, [%3]\n"
1353+" subs %0, %1, #1\n"
1354+" sbcs %H0, %H1, #0\n"
1355+
1356+#ifdef CONFIG_PAX_REFCOUNT
1357+" bvc 3f\n"
1358+" mov %0, %1\n"
1359+" mov %H0, %H1\n"
1360+"2: bkpt 0xf103\n"
1361+"3:\n"
1362+#endif
1363+
1364 " teq %H0, #0\n"
1365-" bmi 2f\n"
1366+" bmi 4f\n"
1367 " strexd %1, %0, %H0, [%3]\n"
1368 " teq %1, #0\n"
1369 " bne 1b\n"
1370-"2:"
1371+"4:\n"
1372+
1373+#ifdef CONFIG_PAX_REFCOUNT
1374+ _ASM_EXTABLE(2b, 4b)
1375+#endif
1376+
1377 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1378 : "r" (&v->counter)
1379 : "cc");
1380@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1381 " teq %0, %5\n"
1382 " teqeq %H0, %H5\n"
1383 " moveq %1, #0\n"
1384-" beq 2f\n"
1385+" beq 4f\n"
1386 " adds %0, %0, %6\n"
1387-" adc %H0, %H0, %H6\n"
1388+" adcs %H0, %H0, %H6\n"
1389+
1390+#ifdef CONFIG_PAX_REFCOUNT
1391+" bvc 3f\n"
1392+"2: bkpt 0xf103\n"
1393+"3:\n"
1394+#endif
1395+
1396 " strexd %2, %0, %H0, [%4]\n"
1397 " teq %2, #0\n"
1398 " bne 1b\n"
1399-"2:"
1400+"4:\n"
1401+
1402+#ifdef CONFIG_PAX_REFCOUNT
1403+ _ASM_EXTABLE(2b, 4b)
1404+#endif
1405+
1406 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1407 : "r" (&v->counter), "r" (u), "r" (a)
1408 : "cc");
1409@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1410
1411 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1412 #define atomic64_inc(v) atomic64_add(1LL, (v))
1413+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1414 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1415+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1416 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1417 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1418 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1419+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1420 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1421 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1422 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1423diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1424index 75fe66b..ba3dee4 100644
1425--- a/arch/arm/include/asm/cache.h
1426+++ b/arch/arm/include/asm/cache.h
1427@@ -4,8 +4,10 @@
1428 #ifndef __ASMARM_CACHE_H
1429 #define __ASMARM_CACHE_H
1430
1431+#include <linux/const.h>
1432+
1433 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1434-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1435+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1436
1437 /*
1438 * Memory returned by kmalloc() may be used for DMA, so we must make
1439@@ -24,5 +26,6 @@
1440 #endif
1441
1442 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1443+#define __read_only __attribute__ ((__section__(".data..read_only")))
1444
1445 #endif
1446diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1447index e1489c5..d418304 100644
1448--- a/arch/arm/include/asm/cacheflush.h
1449+++ b/arch/arm/include/asm/cacheflush.h
1450@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1451 void (*dma_unmap_area)(const void *, size_t, int);
1452
1453 void (*dma_flush_range)(const void *, const void *);
1454-};
1455+} __no_const;
1456
1457 /*
1458 * Select the calling method
1459diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1460index 7eb18c1..e38b6d2 100644
1461--- a/arch/arm/include/asm/cmpxchg.h
1462+++ b/arch/arm/include/asm/cmpxchg.h
1463@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1464
1465 #define xchg(ptr,x) \
1466 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1467+#define xchg_unchecked(ptr,x) \
1468+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1469
1470 #include <asm-generic/cmpxchg-local.h>
1471
1472diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
1473index ab98fdd..6b19938 100644
1474--- a/arch/arm/include/asm/delay.h
1475+++ b/arch/arm/include/asm/delay.h
1476@@ -24,9 +24,9 @@ extern struct arm_delay_ops {
1477 void (*delay)(unsigned long);
1478 void (*const_udelay)(unsigned long);
1479 void (*udelay)(unsigned long);
1480-} arm_delay_ops;
1481+} *arm_delay_ops;
1482
1483-#define __delay(n) arm_delay_ops.delay(n)
1484+#define __delay(n) arm_delay_ops->delay(n)
1485
1486 /*
1487 * This function intentionally does not exist; if you see references to
1488@@ -47,8 +47,8 @@ extern void __bad_udelay(void);
1489 * first constant multiplications gets optimized away if the delay is
1490 * a constant)
1491 */
1492-#define __udelay(n) arm_delay_ops.udelay(n)
1493-#define __const_udelay(n) arm_delay_ops.const_udelay(n)
1494+#define __udelay(n) arm_delay_ops->udelay(n)
1495+#define __const_udelay(n) arm_delay_ops->const_udelay(n)
1496
1497 #define udelay(n) \
1498 (__builtin_constant_p(n) ? \
1499diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1500index 38050b1..9d90e8b 100644
1501--- a/arch/arm/include/asm/elf.h
1502+++ b/arch/arm/include/asm/elf.h
1503@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1504 the loader. We need to make sure that it is out of the way of the program
1505 that it will "exec", and that there is sufficient room for the brk. */
1506
1507-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1508+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1509+
1510+#ifdef CONFIG_PAX_ASLR
1511+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1512+
1513+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1514+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1515+#endif
1516
1517 /* When the program starts, a1 contains a pointer to a function to be
1518 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1519@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1520 extern void elf_set_personality(const struct elf32_hdr *);
1521 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1522
1523-struct mm_struct;
1524-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1525-#define arch_randomize_brk arch_randomize_brk
1526-
1527 #endif
1528diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1529index 83eb2f7..ed77159 100644
1530--- a/arch/arm/include/asm/kmap_types.h
1531+++ b/arch/arm/include/asm/kmap_types.h
1532@@ -4,6 +4,6 @@
1533 /*
1534 * This is the "bare minimum". AIO seems to require this.
1535 */
1536-#define KM_TYPE_NR 16
1537+#define KM_TYPE_NR 17
1538
1539 #endif
1540diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1541index 195ac2f..2272f0d 100644
1542--- a/arch/arm/include/asm/mach/map.h
1543+++ b/arch/arm/include/asm/mach/map.h
1544@@ -34,6 +34,9 @@ struct map_desc {
1545 #define MT_MEMORY_ITCM 13
1546 #define MT_MEMORY_SO 14
1547 #define MT_MEMORY_DMA_READY 15
1548+#define MT_MEMORY_R 16
1549+#define MT_MEMORY_RW 17
1550+#define MT_MEMORY_RX 18
1551
1552 #ifdef CONFIG_MMU
1553 extern void iotable_init(struct map_desc *, int);
1554diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1555index 53426c6..c7baff3 100644
1556--- a/arch/arm/include/asm/outercache.h
1557+++ b/arch/arm/include/asm/outercache.h
1558@@ -35,7 +35,7 @@ struct outer_cache_fns {
1559 #endif
1560 void (*set_debug)(unsigned long);
1561 void (*resume)(void);
1562-};
1563+} __no_const;
1564
1565 #ifdef CONFIG_OUTER_CACHE
1566
1567diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1568index 812a494..71fc0b6 100644
1569--- a/arch/arm/include/asm/page.h
1570+++ b/arch/arm/include/asm/page.h
1571@@ -114,7 +114,7 @@ struct cpu_user_fns {
1572 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1573 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1574 unsigned long vaddr, struct vm_area_struct *vma);
1575-};
1576+} __no_const;
1577
1578 #ifdef MULTI_USER
1579 extern struct cpu_user_fns cpu_user;
1580diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1581index 943504f..1ad2de8 100644
1582--- a/arch/arm/include/asm/pgalloc.h
1583+++ b/arch/arm/include/asm/pgalloc.h
1584@@ -22,7 +22,7 @@
1585
1586 #ifdef CONFIG_MMU
1587
1588-#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
1589+#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PXNTABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
1590 #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
1591
1592 #ifdef CONFIG_ARM_LPAE
1593@@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1594 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1595 }
1596
1597+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1598+{
1599+ pud_populate(mm, pud, pmd);
1600+}
1601+
1602 #else /* !CONFIG_ARM_LPAE */
1603
1604 /*
1605@@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1606 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1607 #define pmd_free(mm, pmd) do { } while (0)
1608 #define pud_populate(mm,pmd,pte) BUG()
1609+#define pud_populate_kernel(mm,pmd,pte) BUG()
1610
1611 #endif /* CONFIG_ARM_LPAE */
1612
1613@@ -126,6 +132,16 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1614 __free_page(pte);
1615 }
1616
1617+static inline void __pmd_update(pmd_t *pmdp, pmdval_t prot)
1618+{
1619+ pmdval_t pmdval = pmd_val(*pmdp) | prot;
1620+ pmdp[0] = __pmd(pmdval);
1621+#ifndef CONFIG_ARM_LPAE
1622+ pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
1623+#endif
1624+ flush_pmd_entry(pmdp);
1625+}
1626+
1627 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1628 pmdval_t prot)
1629 {
1630diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1631index 5cfba15..d437dc2 100644
1632--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1633+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1634@@ -20,12 +20,15 @@
1635 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1636 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1637 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1638+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* PXN */
1639 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1640 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1641 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1642+
1643 /*
1644 * - section
1645 */
1646+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0)
1647 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1648 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1649 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1650@@ -37,6 +40,7 @@
1651 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1652 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1653 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1654+#define PMD_SECT_AP_RDONLY (_AT(pmdval_t, 0))
1655
1656 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1657 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1658diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1659index d795282..d82ff13 100644
1660--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1661+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1662@@ -32,6 +32,7 @@
1663 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1664 #define PMD_BIT4 (_AT(pmdval_t, 0))
1665 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1666+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1667
1668 /*
1669 * - section
1670@@ -41,9 +42,11 @@
1671 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1672 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1673 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1674+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1675 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
1676 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
1677 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
1678+#define PMD_SECT_AP_RDONLY (_AT(pmdval_t, 1) << 7)
1679 #define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
1680
1681 /*
1682@@ -66,6 +69,7 @@
1683 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1684 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1685 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1686+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1687 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1688
1689 /*
1690diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1691index b249035..4ab204b 100644
1692--- a/arch/arm/include/asm/pgtable-3level.h
1693+++ b/arch/arm/include/asm/pgtable-3level.h
1694@@ -73,6 +73,7 @@
1695 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1696 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1697 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1698+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1699 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1700 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1701 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1702@@ -80,6 +81,7 @@
1703 /*
1704 * To be used in assembly code with the upper page attributes.
1705 */
1706+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1707 #define L_PTE_XN_HIGH (1 << (54 - 32))
1708 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1709
1710diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1711index 08c1231..6a2d6b0 100644
1712--- a/arch/arm/include/asm/pgtable.h
1713+++ b/arch/arm/include/asm/pgtable.h
1714@@ -30,6 +30,9 @@
1715 #include <asm/pgtable-2level.h>
1716 #endif
1717
1718+#define ktla_ktva(addr) (addr)
1719+#define ktva_ktla(addr) (addr)
1720+
1721 /*
1722 * Just any arbitrary offset to the start of the vmalloc VM area: the
1723 * current 8MB value just means that there will be a 8MB "hole" after the
1724@@ -53,6 +56,17 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1725 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1726 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1727
1728+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1729+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
1730+
1731+#ifdef CONFIG_PAX_KERNEXEC
1732+static inline unsigned long pax_open_kernel(void) { return 0; /* TODO */ }
1733+static inline unsigned long pax_close_kernel(void) { return 0; /* TODO */ }
1734+#else
1735+static inline unsigned long pax_open_kernel(void) { return 0; }
1736+static inline unsigned long pax_close_kernel(void) { return 0; }
1737+#endif
1738+
1739 /*
1740 * This is the lowest virtual address we can permit any user space
1741 * mapping to be mapped at. This is particularly important for
1742@@ -73,23 +87,23 @@ extern pgprot_t pgprot_kernel;
1743
1744 #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
1745
1746-#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
1747-#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
1748-#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
1749-#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1750-#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
1751-#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1752-#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
1753+#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_PXN | L_PTE_RDONLY)
1754+#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN | L_PTE_PXN)
1755+#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_PXN)
1756+#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1757+#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1758+#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1759+#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1760 #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
1761 #define PAGE_KERNEL_EXEC pgprot_kernel
1762
1763-#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
1764-#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
1765-#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
1766-#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1767-#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
1768-#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1769-#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
1770+#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1771+#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN | L_PTE_PXN)
1772+#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_PXN)
1773+#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1774+#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1775+#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1776+#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1777
1778 #define __pgprot_modify(prot,mask,bits) \
1779 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
1780@@ -242,7 +256,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
1781
1782 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1783 {
1784- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
1785+ const pteval_t mask = L_PTE_XN | L_PTE_PXN | L_PTE_RDONLY | L_PTE_USER;
1786 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
1787 return pte;
1788 }
1789diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
1790index f3628fb..a0672dd 100644
1791--- a/arch/arm/include/asm/proc-fns.h
1792+++ b/arch/arm/include/asm/proc-fns.h
1793@@ -75,7 +75,7 @@ extern struct processor {
1794 unsigned int suspend_size;
1795 void (*do_suspend)(void *);
1796 void (*do_resume)(void *);
1797-} processor;
1798+} __do_const processor;
1799
1800 #ifndef MULTI_CPU
1801 extern void cpu_proc_init(void);
1802diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
1803index 2e3be16..4dc90fc 100644
1804--- a/arch/arm/include/asm/smp.h
1805+++ b/arch/arm/include/asm/smp.h
1806@@ -106,7 +106,7 @@ struct smp_operations {
1807 int (*cpu_disable)(unsigned int cpu);
1808 #endif
1809 #endif
1810-};
1811+} __no_const;
1812
1813 /*
1814 * set platform specific SMP operations
1815diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1816index 8477b4c..801a6a9 100644
1817--- a/arch/arm/include/asm/thread_info.h
1818+++ b/arch/arm/include/asm/thread_info.h
1819@@ -151,6 +151,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1820 #define TIF_SYSCALL_TRACE 8
1821 #define TIF_SYSCALL_AUDIT 9
1822 #define TIF_SYSCALL_TRACEPOINT 10
1823+
1824+/* within 8 bits of TIF_SYSCALL_TRACE
1825+ to meet flexible second operand requirements
1826+*/
1827+#define TIF_GRSEC_SETXID 11
1828+
1829 #define TIF_USING_IWMMXT 17
1830 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1831 #define TIF_RESTORE_SIGMASK 20
1832@@ -165,9 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1833 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
1834 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1835 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1836+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1837
1838 /* Checks for any syscall work in entry-common.S */
1839-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
1840+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | \
1841+ _TIF_GRSEC_SETXID)
1842
1843 /*
1844 * Change these and you break ASM code in entry-common.S
1845diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1846index 7e1f760..1af891c 100644
1847--- a/arch/arm/include/asm/uaccess.h
1848+++ b/arch/arm/include/asm/uaccess.h
1849@@ -22,6 +22,8 @@
1850 #define VERIFY_READ 0
1851 #define VERIFY_WRITE 1
1852
1853+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1854+
1855 /*
1856 * The exception table consists of pairs of addresses: the first is the
1857 * address of an instruction that is allowed to fault, and the second is
1858@@ -418,8 +420,23 @@ do { \
1859
1860
1861 #ifdef CONFIG_MMU
1862-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1863-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1864+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1865+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1866+
1867+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1868+{
1869+ if (!__builtin_constant_p(n))
1870+ check_object_size(to, n, false);
1871+ return ___copy_from_user(to, from, n);
1872+}
1873+
1874+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1875+{
1876+ if (!__builtin_constant_p(n))
1877+ check_object_size(from, n, true);
1878+ return ___copy_to_user(to, from, n);
1879+}
1880+
1881 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1882 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1883 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1884@@ -431,6 +448,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
1885
1886 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1887 {
1888+ if ((long)n < 0)
1889+ return n;
1890+
1891 if (access_ok(VERIFY_READ, from, n))
1892 n = __copy_from_user(to, from, n);
1893 else /* security hole - plug it */
1894@@ -440,6 +460,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1895
1896 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1897 {
1898+ if ((long)n < 0)
1899+ return n;
1900+
1901 if (access_ok(VERIFY_WRITE, to, n))
1902 n = __copy_to_user(to, from, n);
1903 return n;
1904diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
1905index 96ee092..37f1844 100644
1906--- a/arch/arm/include/uapi/asm/ptrace.h
1907+++ b/arch/arm/include/uapi/asm/ptrace.h
1908@@ -73,7 +73,7 @@
1909 * ARMv7 groups of PSR bits
1910 */
1911 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
1912-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
1913+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
1914 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
1915 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
1916
1917diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1918index 60d3b73..9168db0 100644
1919--- a/arch/arm/kernel/armksyms.c
1920+++ b/arch/arm/kernel/armksyms.c
1921@@ -89,8 +89,8 @@ EXPORT_SYMBOL(__memzero);
1922 #ifdef CONFIG_MMU
1923 EXPORT_SYMBOL(copy_page);
1924
1925-EXPORT_SYMBOL(__copy_from_user);
1926-EXPORT_SYMBOL(__copy_to_user);
1927+EXPORT_SYMBOL(___copy_from_user);
1928+EXPORT_SYMBOL(___copy_to_user);
1929 EXPORT_SYMBOL(__clear_user);
1930
1931 EXPORT_SYMBOL(__get_user_1);
1932diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
1933index 4eee351..e247728 100644
1934--- a/arch/arm/kernel/head.S
1935+++ b/arch/arm/kernel/head.S
1936@@ -52,7 +52,9 @@
1937 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
1938
1939 .macro pgtbl, rd, phys
1940- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
1941+ mov \rd, #TEXT_OFFSET
1942+ sub \rd, #PG_DIR_SIZE
1943+ add \rd, \rd, \phys
1944 .endm
1945
1946 /*
1947diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
1948index 1e9be5d..b9a75e1 100644
1949--- a/arch/arm/kernel/module.c
1950+++ b/arch/arm/kernel/module.c
1951@@ -37,12 +37,35 @@
1952 #endif
1953
1954 #ifdef CONFIG_MMU
1955-void *module_alloc(unsigned long size)
1956+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
1957 {
1958 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
1959- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
1960+ GFP_KERNEL, prot, -1,
1961 __builtin_return_address(0));
1962 }
1963+
1964+void *module_alloc(unsigned long size)
1965+{
1966+
1967+#ifdef CONFIG_PAX_KERNEXEC
1968+ return __module_alloc(size, PAGE_KERNEL);
1969+#else
1970+ return __module_alloc(size, PAGE_KERNEL_EXEC);
1971+#endif
1972+
1973+}
1974+
1975+#ifdef CONFIG_PAX_KERNEXEC
1976+void module_free_exec(struct module *mod, void *module_region)
1977+{
1978+ module_free(mod, module_region);
1979+}
1980+
1981+void *module_alloc_exec(unsigned long size)
1982+{
1983+ return __module_alloc(size, PAGE_KERNEL_EXEC);
1984+}
1985+#endif
1986 #endif
1987
1988 int
1989diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1990index 90084a6..bf4bcfb 100644
1991--- a/arch/arm/kernel/process.c
1992+++ b/arch/arm/kernel/process.c
1993@@ -28,7 +28,6 @@
1994 #include <linux/tick.h>
1995 #include <linux/utsname.h>
1996 #include <linux/uaccess.h>
1997-#include <linux/random.h>
1998 #include <linux/hw_breakpoint.h>
1999 #include <linux/cpuidle.h>
2000 #include <linux/leds.h>
2001@@ -256,9 +255,10 @@ void machine_power_off(void)
2002 machine_shutdown();
2003 if (pm_power_off)
2004 pm_power_off();
2005+ BUG();
2006 }
2007
2008-void machine_restart(char *cmd)
2009+__noreturn void machine_restart(char *cmd)
2010 {
2011 machine_shutdown();
2012
2013@@ -451,12 +451,6 @@ unsigned long get_wchan(struct task_struct *p)
2014 return 0;
2015 }
2016
2017-unsigned long arch_randomize_brk(struct mm_struct *mm)
2018-{
2019- unsigned long range_end = mm->brk + 0x02000000;
2020- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2021-}
2022-
2023 #ifdef CONFIG_MMU
2024 /*
2025 * The vectors page is always readable from user space for the
2026diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2027index 739db3a..7f4a272 100644
2028--- a/arch/arm/kernel/ptrace.c
2029+++ b/arch/arm/kernel/ptrace.c
2030@@ -916,6 +916,10 @@ enum ptrace_syscall_dir {
2031 PTRACE_SYSCALL_EXIT,
2032 };
2033
2034+#ifdef CONFIG_GRKERNSEC_SETXID
2035+extern void gr_delayed_cred_worker(void);
2036+#endif
2037+
2038 static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
2039 enum ptrace_syscall_dir dir)
2040 {
2041@@ -923,6 +927,11 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
2042
2043 current_thread_info()->syscall = scno;
2044
2045+#ifdef CONFIG_GRKERNSEC_SETXID
2046+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2047+ gr_delayed_cred_worker();
2048+#endif
2049+
2050 if (!test_thread_flag(TIF_SYSCALL_TRACE))
2051 return scno;
2052
2053diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2054index da1d1aa..5aaf182 100644
2055--- a/arch/arm/kernel/setup.c
2056+++ b/arch/arm/kernel/setup.c
2057@@ -99,19 +99,19 @@ EXPORT_SYMBOL(elf_hwcap);
2058
2059
2060 #ifdef MULTI_CPU
2061-struct processor processor __read_mostly;
2062+struct processor processor;
2063 #endif
2064 #ifdef MULTI_TLB
2065-struct cpu_tlb_fns cpu_tlb __read_mostly;
2066+struct cpu_tlb_fns cpu_tlb __read_only;
2067 #endif
2068 #ifdef MULTI_USER
2069-struct cpu_user_fns cpu_user __read_mostly;
2070+struct cpu_user_fns cpu_user __read_only;
2071 #endif
2072 #ifdef MULTI_CACHE
2073-struct cpu_cache_fns cpu_cache __read_mostly;
2074+struct cpu_cache_fns cpu_cache __read_only;
2075 #endif
2076 #ifdef CONFIG_OUTER_CACHE
2077-struct outer_cache_fns outer_cache __read_mostly;
2078+struct outer_cache_fns outer_cache __read_only;
2079 EXPORT_SYMBOL(outer_cache);
2080 #endif
2081
2082@@ -455,7 +455,7 @@ static void __init setup_processor(void)
2083 __cpu_architecture = __get_cpu_architecture();
2084
2085 #ifdef MULTI_CPU
2086- processor = *list->proc;
2087+ memcpy((void *)&processor, list->proc, sizeof processor);
2088 #endif
2089 #ifdef MULTI_TLB
2090 cpu_tlb = *list->tlb;
2091diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
2092index fbc8b26..000ded0 100644
2093--- a/arch/arm/kernel/smp.c
2094+++ b/arch/arm/kernel/smp.c
2095@@ -70,7 +70,7 @@ enum ipi_msg_type {
2096
2097 static DECLARE_COMPLETION(cpu_running);
2098
2099-static struct smp_operations smp_ops;
2100+static struct smp_operations smp_ops __read_only;
2101
2102 void __init smp_set_ops(struct smp_operations *ops)
2103 {
2104diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
2105index b0179b8..b54c6c1 100644
2106--- a/arch/arm/kernel/traps.c
2107+++ b/arch/arm/kernel/traps.c
2108@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2109 static int die_owner = -1;
2110 static unsigned int die_nest_count;
2111
2112+extern void gr_handle_kernel_exploit(void);
2113+
2114 static unsigned long oops_begin(void)
2115 {
2116 int cpu;
2117@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
2118 panic("Fatal exception in interrupt");
2119 if (panic_on_oops)
2120 panic("Fatal exception");
2121+
2122+ gr_handle_kernel_exploit();
2123+
2124 if (signr)
2125 do_exit(signr);
2126 }
2127diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
2128index 36ff15b..75d9e9d 100644
2129--- a/arch/arm/kernel/vmlinux.lds.S
2130+++ b/arch/arm/kernel/vmlinux.lds.S
2131@@ -8,7 +8,11 @@
2132 #include <asm/thread_info.h>
2133 #include <asm/memory.h>
2134 #include <asm/page.h>
2135-
2136+
2137+#ifdef CONFIG_PAX_KERNEXEC
2138+#include <asm/pgtable.h>
2139+#endif
2140+
2141 #define PROC_INFO \
2142 . = ALIGN(4); \
2143 VMLINUX_SYMBOL(__proc_info_begin) = .; \
2144@@ -90,6 +94,11 @@ SECTIONS
2145 _text = .;
2146 HEAD_TEXT
2147 }
2148+
2149+#ifdef CONFIG_PAX_KERNEXEC
2150+ . = ALIGN(1<<SECTION_SHIFT);
2151+#endif
2152+
2153 .text : { /* Real text segment */
2154 _stext = .; /* Text and read-only data */
2155 __exception_text_start = .;
2156@@ -133,6 +142,10 @@ SECTIONS
2157
2158 _etext = .; /* End of text and rodata section */
2159
2160+#ifdef CONFIG_PAX_KERNEXEC
2161+ . = ALIGN(1<<SECTION_SHIFT);
2162+#endif
2163+
2164 #ifndef CONFIG_XIP_KERNEL
2165 . = ALIGN(PAGE_SIZE);
2166 __init_begin = .;
2167@@ -192,6 +205,11 @@ SECTIONS
2168 . = PAGE_OFFSET + TEXT_OFFSET;
2169 #else
2170 __init_end = .;
2171+
2172+#ifdef CONFIG_PAX_KERNEXEC
2173+ . = ALIGN(1<<SECTION_SHIFT);
2174+#endif
2175+
2176 . = ALIGN(THREAD_SIZE);
2177 __data_loc = .;
2178 #endif
2179diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
2180index 66a477a..bee61d3 100644
2181--- a/arch/arm/lib/copy_from_user.S
2182+++ b/arch/arm/lib/copy_from_user.S
2183@@ -16,7 +16,7 @@
2184 /*
2185 * Prototype:
2186 *
2187- * size_t __copy_from_user(void *to, const void *from, size_t n)
2188+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
2189 *
2190 * Purpose:
2191 *
2192@@ -84,11 +84,11 @@
2193
2194 .text
2195
2196-ENTRY(__copy_from_user)
2197+ENTRY(___copy_from_user)
2198
2199 #include "copy_template.S"
2200
2201-ENDPROC(__copy_from_user)
2202+ENDPROC(___copy_from_user)
2203
2204 .pushsection .fixup,"ax"
2205 .align 0
2206diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
2207index 6ee2f67..d1cce76 100644
2208--- a/arch/arm/lib/copy_page.S
2209+++ b/arch/arm/lib/copy_page.S
2210@@ -10,6 +10,7 @@
2211 * ASM optimised string functions
2212 */
2213 #include <linux/linkage.h>
2214+#include <linux/const.h>
2215 #include <asm/assembler.h>
2216 #include <asm/asm-offsets.h>
2217 #include <asm/cache.h>
2218diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
2219index d066df6..df28194 100644
2220--- a/arch/arm/lib/copy_to_user.S
2221+++ b/arch/arm/lib/copy_to_user.S
2222@@ -16,7 +16,7 @@
2223 /*
2224 * Prototype:
2225 *
2226- * size_t __copy_to_user(void *to, const void *from, size_t n)
2227+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
2228 *
2229 * Purpose:
2230 *
2231@@ -88,11 +88,11 @@
2232 .text
2233
2234 ENTRY(__copy_to_user_std)
2235-WEAK(__copy_to_user)
2236+WEAK(___copy_to_user)
2237
2238 #include "copy_template.S"
2239
2240-ENDPROC(__copy_to_user)
2241+ENDPROC(___copy_to_user)
2242 ENDPROC(__copy_to_user_std)
2243
2244 .pushsection .fixup,"ax"
2245diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
2246index 0dc5385..45833ef 100644
2247--- a/arch/arm/lib/delay.c
2248+++ b/arch/arm/lib/delay.c
2249@@ -28,12 +28,14 @@
2250 /*
2251 * Default to the loop-based delay implementation.
2252 */
2253-struct arm_delay_ops arm_delay_ops = {
2254+static struct arm_delay_ops arm_loop_delay_ops = {
2255 .delay = __loop_delay,
2256 .const_udelay = __loop_const_udelay,
2257 .udelay = __loop_udelay,
2258 };
2259
2260+struct arm_delay_ops *arm_delay_ops __read_only = &arm_loop_delay_ops;
2261+
2262 static const struct delay_timer *delay_timer;
2263 static bool delay_calibrated;
2264
2265@@ -67,6 +69,12 @@ static void __timer_udelay(unsigned long usecs)
2266 __timer_const_udelay(usecs * UDELAY_MULT);
2267 }
2268
2269+static struct arm_delay_ops arm_timer_delay_ops = {
2270+ .delay = __timer_delay,
2271+ .const_udelay = __timer_const_udelay,
2272+ .udelay = __timer_udelay,
2273+};
2274+
2275 void __init register_current_timer_delay(const struct delay_timer *timer)
2276 {
2277 if (!delay_calibrated) {
2278@@ -74,9 +82,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
2279 delay_timer = timer;
2280 lpj_fine = timer->freq / HZ;
2281 loops_per_jiffy = lpj_fine;
2282- arm_delay_ops.delay = __timer_delay;
2283- arm_delay_ops.const_udelay = __timer_const_udelay;
2284- arm_delay_ops.udelay = __timer_udelay;
2285+ arm_delay_ops = &arm_timer_delay_ops;
2286 delay_calibrated = true;
2287 } else {
2288 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
2289diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
2290index 025f742..8432b08 100644
2291--- a/arch/arm/lib/uaccess_with_memcpy.c
2292+++ b/arch/arm/lib/uaccess_with_memcpy.c
2293@@ -104,7 +104,7 @@ out:
2294 }
2295
2296 unsigned long
2297-__copy_to_user(void __user *to, const void *from, unsigned long n)
2298+___copy_to_user(void __user *to, const void *from, unsigned long n)
2299 {
2300 /*
2301 * This test is stubbed out of the main function above to keep
2302diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
2303index 2c6c218..2b87c2d 100644
2304--- a/arch/arm/mach-kirkwood/common.c
2305+++ b/arch/arm/mach-kirkwood/common.c
2306@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
2307 clk_gate_ops.disable(hw);
2308 }
2309
2310-static struct clk_ops clk_gate_fn_ops;
2311+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
2312+{
2313+ return clk_gate_ops.is_enabled(hw);
2314+}
2315+
2316+static struct clk_ops clk_gate_fn_ops = {
2317+ .enable = clk_gate_fn_enable,
2318+ .disable = clk_gate_fn_disable,
2319+ .is_enabled = clk_gate_fn_is_enabled,
2320+};
2321
2322 static struct clk __init *clk_register_gate_fn(struct device *dev,
2323 const char *name,
2324@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
2325 gate_fn->fn_en = fn_en;
2326 gate_fn->fn_dis = fn_dis;
2327
2328- /* ops is the gate ops, but with our enable/disable functions */
2329- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
2330- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
2331- clk_gate_fn_ops = clk_gate_ops;
2332- clk_gate_fn_ops.enable = clk_gate_fn_enable;
2333- clk_gate_fn_ops.disable = clk_gate_fn_disable;
2334- }
2335-
2336 clk = clk_register(dev, &gate_fn->gate.hw);
2337
2338 if (IS_ERR(clk))
2339diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
2340index d95f727..12f10dd 100644
2341--- a/arch/arm/mach-omap2/board-n8x0.c
2342+++ b/arch/arm/mach-omap2/board-n8x0.c
2343@@ -589,7 +589,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
2344 }
2345 #endif
2346
2347-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
2348+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
2349 .late_init = n8x0_menelaus_late_init,
2350 };
2351
2352diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
2353index 87cc6d0..fd4f248 100644
2354--- a/arch/arm/mach-omap2/omap_hwmod.c
2355+++ b/arch/arm/mach-omap2/omap_hwmod.c
2356@@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
2357 int (*is_hardreset_asserted)(struct omap_hwmod *oh,
2358 struct omap_hwmod_rst_info *ohri);
2359 int (*init_clkdm)(struct omap_hwmod *oh);
2360-};
2361+} __no_const;
2362
2363 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
2364-static struct omap_hwmod_soc_ops soc_ops;
2365+static struct omap_hwmod_soc_ops soc_ops __read_only;
2366
2367 /* omap_hwmod_list contains all registered struct omap_hwmods */
2368 static LIST_HEAD(omap_hwmod_list);
2369diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
2370index 5dbf13f..9be36fd 100644
2371--- a/arch/arm/mm/fault.c
2372+++ b/arch/arm/mm/fault.c
2373@@ -25,6 +25,7 @@
2374 #include <asm/system_misc.h>
2375 #include <asm/system_info.h>
2376 #include <asm/tlbflush.h>
2377+#include <asm/sections.h>
2378
2379 #include "fault.h"
2380
2381@@ -138,6 +139,19 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
2382 if (fixup_exception(regs))
2383 return;
2384
2385+#ifdef CONFIG_PAX_KERNEXEC
2386+ if (fsr & FSR_WRITE) {
2387+ if (((unsigned long)_stext <= addr && addr < init_mm.end_code) || (MODULES_VADDR <= addr && addr < MODULES_END)) {
2388+ if (current->signal->curr_ip)
2389+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
2390+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
2391+ else
2392+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
2393+ current->comm, task_pid_nr(current), current_uid(), current_euid());
2394+ }
2395+ }
2396+#endif
2397+
2398 /*
2399 * No handler, we'll have to terminate things with extreme prejudice.
2400 */
2401@@ -174,6 +188,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
2402 }
2403 #endif
2404
2405+#ifdef CONFIG_PAX_PAGEEXEC
2406+ if (fsr & FSR_LNX_PF) {
2407+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
2408+ do_group_exit(SIGKILL);
2409+ }
2410+#endif
2411+
2412 tsk->thread.address = addr;
2413 tsk->thread.error_code = fsr;
2414 tsk->thread.trap_no = 14;
2415@@ -398,6 +419,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
2416 }
2417 #endif /* CONFIG_MMU */
2418
2419+#ifdef CONFIG_PAX_PAGEEXEC
2420+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2421+{
2422+ long i;
2423+
2424+ printk(KERN_ERR "PAX: bytes at PC: ");
2425+ for (i = 0; i < 20; i++) {
2426+ unsigned char c;
2427+ if (get_user(c, (__force unsigned char __user *)pc+i))
2428+ printk(KERN_CONT "?? ");
2429+ else
2430+ printk(KERN_CONT "%02x ", c);
2431+ }
2432+ printk("\n");
2433+
2434+ printk(KERN_ERR "PAX: bytes at SP-4: ");
2435+ for (i = -1; i < 20; i++) {
2436+ unsigned long c;
2437+ if (get_user(c, (__force unsigned long __user *)sp+i))
2438+ printk(KERN_CONT "???????? ");
2439+ else
2440+ printk(KERN_CONT "%08lx ", c);
2441+ }
2442+ printk("\n");
2443+}
2444+#endif
2445+
2446 /*
2447 * First Level Translation Fault Handler
2448 *
2449@@ -575,12 +623,41 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
2450 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
2451 struct siginfo info;
2452
2453+#ifdef CONFIG_PAX_KERNEXEC
2454+ if (!user_mode(regs) && is_xn_fault(ifsr)) {
2455+ if (current->signal->curr_ip)
2456+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n",
2457+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid(),
2458+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
2459+ else
2460+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n",
2461+ current->comm, task_pid_nr(current), current_uid(), current_euid(),
2462+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
2463+ goto die;
2464+ }
2465+#endif
2466+
2467+#ifdef CONFIG_PAX_REFCOUNT
2468+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
2469+ unsigned int bkpt;
2470+
2471+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
2472+ current->thread.error_code = ifsr;
2473+ current->thread.trap_no = 0;
2474+ pax_report_refcount_overflow(regs);
2475+ fixup_exception(regs);
2476+ return;
2477+ }
2478+ }
2479+#endif
2480+
2481 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
2482 return;
2483
2484 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
2485 inf->name, ifsr, addr);
2486
2487+die:
2488 info.si_signo = inf->sig;
2489 info.si_errno = 0;
2490 info.si_code = inf->code;
2491diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
2492index cf08bdf..f1a0383 100644
2493--- a/arch/arm/mm/fault.h
2494+++ b/arch/arm/mm/fault.h
2495@@ -3,6 +3,7 @@
2496
2497 /*
2498 * Fault status register encodings. We steal bit 31 for our own purposes.
2499+ * Set when the FSR value is from an instruction fault.
2500 */
2501 #define FSR_LNX_PF (1 << 31)
2502 #define FSR_WRITE (1 << 11)
2503@@ -22,6 +23,12 @@ static inline int fsr_fs(unsigned int fsr)
2504 }
2505 #endif
2506
2507+/* valid for LPAE and !LPAE */
2508+static inline int is_xn_fault(unsigned int fsr)
2509+{
2510+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
2511+}
2512+
2513 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
2514 unsigned long search_exception_table(unsigned long addr);
2515
2516diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
2517index ad722f1..46b670e 100644
2518--- a/arch/arm/mm/init.c
2519+++ b/arch/arm/mm/init.c
2520@@ -734,9 +734,43 @@ void __init mem_init(void)
2521
2522 void free_initmem(void)
2523 {
2524+
2525+#ifdef CONFIG_PAX_KERNEXEC
2526+ unsigned long addr;
2527+ pgd_t *pgd;
2528+ pud_t *pud;
2529+ pmd_t *pmd;
2530+#endif
2531+
2532 #ifdef CONFIG_HAVE_TCM
2533 extern char __tcm_start, __tcm_end;
2534+#endif
2535
2536+#ifdef CONFIG_PAX_KERNEXEC
2537+ /* make pages tables, etc before .text NX */
2538+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += PMD_SIZE) {
2539+ pgd = pgd_offset_k(addr);
2540+ pud = pud_offset(pgd, addr);
2541+ pmd = pmd_offset(pud, addr);
2542+ __pmd_update(pmd, PMD_SECT_XN);
2543+ }
2544+ /* make init NX */
2545+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += PMD_SIZE) {
2546+ pgd = pgd_offset_k(addr);
2547+ pud = pud_offset(pgd, addr);
2548+ pmd = pmd_offset(pud, addr);
2549+ __pmd_update(pmd, PMD_SECT_XN);
2550+ }
2551+ /* make kernel code/rodata read-only */
2552+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += PMD_SIZE) {
2553+ pgd = pgd_offset_k(addr);
2554+ pud = pud_offset(pgd, addr);
2555+ pmd = pmd_offset(pud, addr);
2556+ __pmd_update(pmd, PMD_SECT_AP_RDONLY);
2557+ }
2558+#endif
2559+
2560+#ifdef CONFIG_HAVE_TCM
2561 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
2562 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
2563 __phys_to_pfn(__pa(&__tcm_end)),
2564diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
2565index ce8cb19..b9fe4d7 100644
2566--- a/arch/arm/mm/mmap.c
2567+++ b/arch/arm/mm/mmap.c
2568@@ -72,6 +72,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
2569 unsigned long start_addr;
2570 int do_align = 0;
2571 int aliasing = cache_is_vipt_aliasing();
2572+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
2573
2574 /*
2575 * We only need to do colour alignment if either the I or D
2576@@ -93,6 +94,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
2577 if (len > TASK_SIZE)
2578 return -ENOMEM;
2579
2580+#ifdef CONFIG_PAX_RANDMMAP
2581+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
2582+#endif
2583+
2584 if (addr) {
2585 if (do_align)
2586 addr = COLOUR_ALIGN(addr, pgoff);
2587@@ -100,15 +105,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
2588 addr = PAGE_ALIGN(addr);
2589
2590 vma = find_vma(mm, addr);
2591- if (TASK_SIZE - len >= addr &&
2592- (!vma || addr + len <= vma->vm_start))
2593+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
2594 return addr;
2595 }
2596 if (len > mm->cached_hole_size) {
2597- start_addr = addr = mm->free_area_cache;
2598+ start_addr = addr = mm->free_area_cache;
2599 } else {
2600- start_addr = addr = mm->mmap_base;
2601- mm->cached_hole_size = 0;
2602+ start_addr = addr = mm->mmap_base;
2603+ mm->cached_hole_size = 0;
2604 }
2605
2606 full_search:
2607@@ -124,14 +128,14 @@ full_search:
2608 * Start a new search - just in case we missed
2609 * some holes.
2610 */
2611- if (start_addr != TASK_UNMAPPED_BASE) {
2612- start_addr = addr = TASK_UNMAPPED_BASE;
2613+ if (start_addr != mm->mmap_base) {
2614+ start_addr = addr = mm->mmap_base;
2615 mm->cached_hole_size = 0;
2616 goto full_search;
2617 }
2618 return -ENOMEM;
2619 }
2620- if (!vma || addr + len <= vma->vm_start) {
2621+ if (check_heap_stack_gap(vma, addr, len, offset)) {
2622 /*
2623 * Remember the place where we stopped the search:
2624 */
2625@@ -156,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2626 unsigned long addr = addr0;
2627 int do_align = 0;
2628 int aliasing = cache_is_vipt_aliasing();
2629+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
2630
2631 /*
2632 * We only need to do colour alignment if either the I or D
2633@@ -175,6 +180,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2634 return addr;
2635 }
2636
2637+#ifdef CONFIG_PAX_RANDMMAP
2638+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
2639+#endif
2640+
2641 /* requesting a specific address */
2642 if (addr) {
2643 if (do_align)
2644@@ -182,8 +191,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2645 else
2646 addr = PAGE_ALIGN(addr);
2647 vma = find_vma(mm, addr);
2648- if (TASK_SIZE - len >= addr &&
2649- (!vma || addr + len <= vma->vm_start))
2650+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
2651 return addr;
2652 }
2653
2654@@ -203,7 +211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2655 /* make sure it can fit in the remaining address space */
2656 if (addr > len) {
2657 vma = find_vma(mm, addr-len);
2658- if (!vma || addr <= vma->vm_start)
2659+ if (check_heap_stack_gap(vma, addr - len, len, offset))
2660 /* remember the address as a hint for next time */
2661 return (mm->free_area_cache = addr-len);
2662 }
2663@@ -212,17 +220,17 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2664 goto bottomup;
2665
2666 addr = mm->mmap_base - len;
2667- if (do_align)
2668- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2669
2670 do {
2671+ if (do_align)
2672+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2673 /*
2674 * Lookup failure means no vma is above this address,
2675 * else if new region fits below vma->vm_start,
2676 * return with success:
2677 */
2678 vma = find_vma(mm, addr);
2679- if (!vma || addr+len <= vma->vm_start)
2680+ if (check_heap_stack_gap(vma, addr, len, offset))
2681 /* remember the address as a hint for next time */
2682 return (mm->free_area_cache = addr);
2683
2684@@ -231,10 +239,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2685 mm->cached_hole_size = vma->vm_start - addr;
2686
2687 /* try just below the current vma->vm_start */
2688- addr = vma->vm_start - len;
2689- if (do_align)
2690- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2691- } while (len < vma->vm_start);
2692+ addr = skip_heap_stack_gap(vma, len, offset);
2693+ } while (!IS_ERR_VALUE(addr));
2694
2695 bottomup:
2696 /*
2697@@ -266,10 +272,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2698
2699 if (mmap_is_legacy()) {
2700 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
2701+
2702+#ifdef CONFIG_PAX_RANDMMAP
2703+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2704+ mm->mmap_base += mm->delta_mmap;
2705+#endif
2706+
2707 mm->get_unmapped_area = arch_get_unmapped_area;
2708 mm->unmap_area = arch_unmap_area;
2709 } else {
2710 mm->mmap_base = mmap_base(random_factor);
2711+
2712+#ifdef CONFIG_PAX_RANDMMAP
2713+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2714+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2715+#endif
2716+
2717 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2718 mm->unmap_area = arch_unmap_area_topdown;
2719 }
2720diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
2721index 99b47b9..ede7824 100644
2722--- a/arch/arm/mm/mmu.c
2723+++ b/arch/arm/mm/mmu.c
2724@@ -227,16 +227,16 @@ static struct mem_type mem_types[] = {
2725 [MT_UNCACHED] = {
2726 .prot_pte = PROT_PTE_DEVICE,
2727 .prot_l1 = PMD_TYPE_TABLE,
2728- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2729+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_XN,
2730 .domain = DOMAIN_IO,
2731 },
2732 [MT_CACHECLEAN] = {
2733- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2734+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_RDONLY,
2735 .domain = DOMAIN_KERNEL,
2736 },
2737 #ifndef CONFIG_ARM_LPAE
2738 [MT_MINICLEAN] = {
2739- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
2740+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE | PMD_SECT_AP_RDONLY,
2741 .domain = DOMAIN_KERNEL,
2742 },
2743 #endif
2744@@ -258,8 +258,26 @@ static struct mem_type mem_types[] = {
2745 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
2746 .domain = DOMAIN_KERNEL,
2747 },
2748+ [MT_MEMORY_R] = {
2749+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_RDONLY | L_PTE_XN,
2750+ .prot_l1 = PMD_TYPE_TABLE,
2751+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY | PMD_SECT_XN,
2752+ .domain = DOMAIN_KERNEL,
2753+ },
2754+ [MT_MEMORY_RW] = {
2755+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN,
2756+ .prot_l1 = PMD_TYPE_TABLE,
2757+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
2758+ .domain = DOMAIN_KERNEL,
2759+ },
2760+ [MT_MEMORY_RX] = {
2761+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_RDONLY,
2762+ .prot_l1 = PMD_TYPE_TABLE,
2763+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY,
2764+ .domain = DOMAIN_KERNEL,
2765+ },
2766 [MT_ROM] = {
2767- .prot_sect = PMD_TYPE_SECT,
2768+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY,
2769 .domain = DOMAIN_KERNEL,
2770 },
2771 [MT_MEMORY_NONCACHED] = {
2772@@ -273,7 +291,7 @@ static struct mem_type mem_types[] = {
2773 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
2774 L_PTE_XN,
2775 .prot_l1 = PMD_TYPE_TABLE,
2776- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2777+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_RDONLY,
2778 .domain = DOMAIN_KERNEL,
2779 },
2780 [MT_MEMORY_ITCM] = {
2781@@ -432,6 +450,8 @@ static void __init build_mem_type_table(void)
2782 * from SVC mode and no access from userspace.
2783 */
2784 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2785+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2786+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2787 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2788 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2789 #endif
2790@@ -450,6 +470,12 @@ static void __init build_mem_type_table(void)
2791 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
2792 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
2793 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
2794+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
2795+ mem_types[MT_MEMORY_R].prot_pte |= L_PTE_SHARED;
2796+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
2797+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
2798+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
2799+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
2800 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
2801 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
2802 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
2803@@ -503,6 +529,12 @@ static void __init build_mem_type_table(void)
2804 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
2805 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
2806 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
2807+ mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
2808+ mem_types[MT_MEMORY_R].prot_pte |= kern_pgprot;
2809+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
2810+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
2811+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
2812+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
2813 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
2814 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
2815 mem_types[MT_ROM].prot_sect |= cp->pmd;
2816@@ -1198,8 +1230,37 @@ static void __init map_lowmem(void)
2817 map.pfn = __phys_to_pfn(start);
2818 map.virtual = __phys_to_virt(start);
2819 map.length = end - start;
2820- map.type = MT_MEMORY;
2821
2822+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
2823+ struct map_desc kernel;
2824+ struct map_desc initmap;
2825+
2826+ /* when freeing initmem we will make this RW */
2827+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
2828+ initmap.virtual = (unsigned long)__init_begin;
2829+ initmap.length = _sdata - __init_begin;
2830+ initmap.type = MT_MEMORY;
2831+ create_mapping(&initmap);
2832+
2833+ /* when freeing initmem we will make this RX */
2834+ kernel.pfn = __phys_to_pfn(__pa(_stext));
2835+ kernel.virtual = (unsigned long)_stext;
2836+ kernel.length = __init_begin - _stext;
2837+ kernel.type = MT_MEMORY;
2838+ create_mapping(&kernel);
2839+
2840+ if (map.virtual < (unsigned long)_stext) {
2841+ map.length = (unsigned long)_stext - map.virtual;
2842+ map.type = MT_MEMORY;
2843+ create_mapping(&map);
2844+ }
2845+
2846+ map.pfn = __phys_to_pfn(__pa(_sdata));
2847+ map.virtual = (unsigned long)_sdata;
2848+ map.length = end - __pa(_sdata);
2849+ }
2850+
2851+ map.type = MT_MEMORY_RW;
2852 create_mapping(&map);
2853 }
2854 }
2855diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2856index ec63e4a..62aa5f1d 100644
2857--- a/arch/arm/plat-orion/include/plat/addr-map.h
2858+++ b/arch/arm/plat-orion/include/plat/addr-map.h
2859@@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2860 value in bridge_virt_base */
2861 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2862 const int win);
2863-};
2864+} __no_const;
2865
2866 /*
2867 * Information needed to setup one address mapping.
2868diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2869index f5144cd..71f6d1f 100644
2870--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2871+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2872@@ -47,7 +47,7 @@ struct samsung_dma_ops {
2873 int (*started)(unsigned ch);
2874 int (*flush)(unsigned ch);
2875 int (*stop)(unsigned ch);
2876-};
2877+} __no_const;
2878
2879 extern void *samsung_dmadev_get_ops(void);
2880 extern void *s3c_dma_get_ops(void);
2881diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2882index c3a58a1..78fbf54 100644
2883--- a/arch/avr32/include/asm/cache.h
2884+++ b/arch/avr32/include/asm/cache.h
2885@@ -1,8 +1,10 @@
2886 #ifndef __ASM_AVR32_CACHE_H
2887 #define __ASM_AVR32_CACHE_H
2888
2889+#include <linux/const.h>
2890+
2891 #define L1_CACHE_SHIFT 5
2892-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2893+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2894
2895 /*
2896 * Memory returned by kmalloc() may be used for DMA, so we must make
2897diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2898index e2c3287..6c4f98c 100644
2899--- a/arch/avr32/include/asm/elf.h
2900+++ b/arch/avr32/include/asm/elf.h
2901@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2902 the loader. We need to make sure that it is out of the way of the program
2903 that it will "exec", and that there is sufficient room for the brk. */
2904
2905-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2906+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2907
2908+#ifdef CONFIG_PAX_ASLR
2909+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2910+
2911+#define PAX_DELTA_MMAP_LEN 15
2912+#define PAX_DELTA_STACK_LEN 15
2913+#endif
2914
2915 /* This yields a mask that user programs can use to figure out what
2916 instruction set this CPU supports. This could be done in user space,
2917diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2918index 479330b..53717a8 100644
2919--- a/arch/avr32/include/asm/kmap_types.h
2920+++ b/arch/avr32/include/asm/kmap_types.h
2921@@ -2,9 +2,9 @@
2922 #define __ASM_AVR32_KMAP_TYPES_H
2923
2924 #ifdef CONFIG_DEBUG_HIGHMEM
2925-# define KM_TYPE_NR 29
2926+# define KM_TYPE_NR 30
2927 #else
2928-# define KM_TYPE_NR 14
2929+# define KM_TYPE_NR 15
2930 #endif
2931
2932 #endif /* __ASM_AVR32_KMAP_TYPES_H */
2933diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2934index b2f2d2d..d1c85cb 100644
2935--- a/arch/avr32/mm/fault.c
2936+++ b/arch/avr32/mm/fault.c
2937@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2938
2939 int exception_trace = 1;
2940
2941+#ifdef CONFIG_PAX_PAGEEXEC
2942+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2943+{
2944+ unsigned long i;
2945+
2946+ printk(KERN_ERR "PAX: bytes at PC: ");
2947+ for (i = 0; i < 20; i++) {
2948+ unsigned char c;
2949+ if (get_user(c, (unsigned char *)pc+i))
2950+ printk(KERN_CONT "???????? ");
2951+ else
2952+ printk(KERN_CONT "%02x ", c);
2953+ }
2954+ printk("\n");
2955+}
2956+#endif
2957+
2958 /*
2959 * This routine handles page faults. It determines the address and the
2960 * problem, and then passes it off to one of the appropriate routines.
2961@@ -174,6 +191,16 @@ bad_area:
2962 up_read(&mm->mmap_sem);
2963
2964 if (user_mode(regs)) {
2965+
2966+#ifdef CONFIG_PAX_PAGEEXEC
2967+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2968+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2969+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2970+ do_group_exit(SIGKILL);
2971+ }
2972+ }
2973+#endif
2974+
2975 if (exception_trace && printk_ratelimit())
2976 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2977 "sp %08lx ecr %lu\n",
2978diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2979index 568885a..f8008df 100644
2980--- a/arch/blackfin/include/asm/cache.h
2981+++ b/arch/blackfin/include/asm/cache.h
2982@@ -7,6 +7,7 @@
2983 #ifndef __ARCH_BLACKFIN_CACHE_H
2984 #define __ARCH_BLACKFIN_CACHE_H
2985
2986+#include <linux/const.h>
2987 #include <linux/linkage.h> /* for asmlinkage */
2988
2989 /*
2990@@ -14,7 +15,7 @@
2991 * Blackfin loads 32 bytes for cache
2992 */
2993 #define L1_CACHE_SHIFT 5
2994-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2995+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2996 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2997
2998 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2999diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
3000index aea2718..3639a60 100644
3001--- a/arch/cris/include/arch-v10/arch/cache.h
3002+++ b/arch/cris/include/arch-v10/arch/cache.h
3003@@ -1,8 +1,9 @@
3004 #ifndef _ASM_ARCH_CACHE_H
3005 #define _ASM_ARCH_CACHE_H
3006
3007+#include <linux/const.h>
3008 /* Etrax 100LX have 32-byte cache-lines. */
3009-#define L1_CACHE_BYTES 32
3010 #define L1_CACHE_SHIFT 5
3011+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3012
3013 #endif /* _ASM_ARCH_CACHE_H */
3014diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
3015index 7caf25d..ee65ac5 100644
3016--- a/arch/cris/include/arch-v32/arch/cache.h
3017+++ b/arch/cris/include/arch-v32/arch/cache.h
3018@@ -1,11 +1,12 @@
3019 #ifndef _ASM_CRIS_ARCH_CACHE_H
3020 #define _ASM_CRIS_ARCH_CACHE_H
3021
3022+#include <linux/const.h>
3023 #include <arch/hwregs/dma.h>
3024
3025 /* A cache-line is 32 bytes. */
3026-#define L1_CACHE_BYTES 32
3027 #define L1_CACHE_SHIFT 5
3028+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3029
3030 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
3031
3032diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
3033index b86329d..6709906 100644
3034--- a/arch/frv/include/asm/atomic.h
3035+++ b/arch/frv/include/asm/atomic.h
3036@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
3037 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
3038 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
3039
3040+#define atomic64_read_unchecked(v) atomic64_read(v)
3041+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3042+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3043+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3044+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3045+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3046+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3047+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3048+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3049+
3050 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
3051 {
3052 int c, old;
3053diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
3054index 2797163..c2a401d 100644
3055--- a/arch/frv/include/asm/cache.h
3056+++ b/arch/frv/include/asm/cache.h
3057@@ -12,10 +12,11 @@
3058 #ifndef __ASM_CACHE_H
3059 #define __ASM_CACHE_H
3060
3061+#include <linux/const.h>
3062
3063 /* bytes per L1 cache line */
3064 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
3065-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3066+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3067
3068 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
3069 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
3070diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
3071index 43901f2..0d8b865 100644
3072--- a/arch/frv/include/asm/kmap_types.h
3073+++ b/arch/frv/include/asm/kmap_types.h
3074@@ -2,6 +2,6 @@
3075 #ifndef _ASM_KMAP_TYPES_H
3076 #define _ASM_KMAP_TYPES_H
3077
3078-#define KM_TYPE_NR 17
3079+#define KM_TYPE_NR 18
3080
3081 #endif
3082diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
3083index 385fd30..3aaf4fe 100644
3084--- a/arch/frv/mm/elf-fdpic.c
3085+++ b/arch/frv/mm/elf-fdpic.c
3086@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3087 {
3088 struct vm_area_struct *vma;
3089 unsigned long limit;
3090+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
3091
3092 if (len > TASK_SIZE)
3093 return -ENOMEM;
3094@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3095 if (addr) {
3096 addr = PAGE_ALIGN(addr);
3097 vma = find_vma(current->mm, addr);
3098- if (TASK_SIZE - len >= addr &&
3099- (!vma || addr + len <= vma->vm_start))
3100+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3101 goto success;
3102 }
3103
3104@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3105 for (; vma; vma = vma->vm_next) {
3106 if (addr > limit)
3107 break;
3108- if (addr + len <= vma->vm_start)
3109+ if (check_heap_stack_gap(vma, addr, len, offset))
3110 goto success;
3111 addr = vma->vm_end;
3112 }
3113@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3114 for (; vma; vma = vma->vm_next) {
3115 if (addr > limit)
3116 break;
3117- if (addr + len <= vma->vm_start)
3118+ if (check_heap_stack_gap(vma, addr, len, offset))
3119 goto success;
3120 addr = vma->vm_end;
3121 }
3122diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
3123index f4ca594..adc72fd6 100644
3124--- a/arch/hexagon/include/asm/cache.h
3125+++ b/arch/hexagon/include/asm/cache.h
3126@@ -21,9 +21,11 @@
3127 #ifndef __ASM_CACHE_H
3128 #define __ASM_CACHE_H
3129
3130+#include <linux/const.h>
3131+
3132 /* Bytes per L1 cache line */
3133-#define L1_CACHE_SHIFT (5)
3134-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3135+#define L1_CACHE_SHIFT 5
3136+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3137
3138 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
3139 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
3140diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
3141index 6e6fe18..a6ae668 100644
3142--- a/arch/ia64/include/asm/atomic.h
3143+++ b/arch/ia64/include/asm/atomic.h
3144@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
3145 #define atomic64_inc(v) atomic64_add(1, (v))
3146 #define atomic64_dec(v) atomic64_sub(1, (v))
3147
3148+#define atomic64_read_unchecked(v) atomic64_read(v)
3149+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3150+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3151+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3152+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3153+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3154+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3155+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3156+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3157+
3158 /* Atomic operations are already serializing */
3159 #define smp_mb__before_atomic_dec() barrier()
3160 #define smp_mb__after_atomic_dec() barrier()
3161diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
3162index 988254a..e1ee885 100644
3163--- a/arch/ia64/include/asm/cache.h
3164+++ b/arch/ia64/include/asm/cache.h
3165@@ -1,6 +1,7 @@
3166 #ifndef _ASM_IA64_CACHE_H
3167 #define _ASM_IA64_CACHE_H
3168
3169+#include <linux/const.h>
3170
3171 /*
3172 * Copyright (C) 1998-2000 Hewlett-Packard Co
3173@@ -9,7 +10,7 @@
3174
3175 /* Bytes per L1 (data) cache line. */
3176 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
3177-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3178+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3179
3180 #ifdef CONFIG_SMP
3181 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
3182diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
3183index b5298eb..67c6e62 100644
3184--- a/arch/ia64/include/asm/elf.h
3185+++ b/arch/ia64/include/asm/elf.h
3186@@ -42,6 +42,13 @@
3187 */
3188 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
3189
3190+#ifdef CONFIG_PAX_ASLR
3191+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
3192+
3193+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
3194+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
3195+#endif
3196+
3197 #define PT_IA_64_UNWIND 0x70000001
3198
3199 /* IA-64 relocations: */
3200diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
3201index 96a8d92..617a1cf 100644
3202--- a/arch/ia64/include/asm/pgalloc.h
3203+++ b/arch/ia64/include/asm/pgalloc.h
3204@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
3205 pgd_val(*pgd_entry) = __pa(pud);
3206 }
3207
3208+static inline void
3209+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
3210+{
3211+ pgd_populate(mm, pgd_entry, pud);
3212+}
3213+
3214 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3215 {
3216 return quicklist_alloc(0, GFP_KERNEL, NULL);
3217@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
3218 pud_val(*pud_entry) = __pa(pmd);
3219 }
3220
3221+static inline void
3222+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
3223+{
3224+ pud_populate(mm, pud_entry, pmd);
3225+}
3226+
3227 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
3228 {
3229 return quicklist_alloc(0, GFP_KERNEL, NULL);
3230diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
3231index 815810c..d60bd4c 100644
3232--- a/arch/ia64/include/asm/pgtable.h
3233+++ b/arch/ia64/include/asm/pgtable.h
3234@@ -12,7 +12,7 @@
3235 * David Mosberger-Tang <davidm@hpl.hp.com>
3236 */
3237
3238-
3239+#include <linux/const.h>
3240 #include <asm/mman.h>
3241 #include <asm/page.h>
3242 #include <asm/processor.h>
3243@@ -142,6 +142,17 @@
3244 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3245 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3246 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
3247+
3248+#ifdef CONFIG_PAX_PAGEEXEC
3249+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
3250+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3251+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3252+#else
3253+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3254+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3255+# define PAGE_COPY_NOEXEC PAGE_COPY
3256+#endif
3257+
3258 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
3259 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
3260 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
3261diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
3262index 54ff557..70c88b7 100644
3263--- a/arch/ia64/include/asm/spinlock.h
3264+++ b/arch/ia64/include/asm/spinlock.h
3265@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
3266 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
3267
3268 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
3269- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
3270+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
3271 }
3272
3273 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
3274diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
3275index 449c8c0..50cdf87 100644
3276--- a/arch/ia64/include/asm/uaccess.h
3277+++ b/arch/ia64/include/asm/uaccess.h
3278@@ -42,6 +42,8 @@
3279 #include <asm/pgtable.h>
3280 #include <asm/io.h>
3281
3282+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3283+
3284 /*
3285 * For historical reasons, the following macros are grossly misnamed:
3286 */
3287@@ -240,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
3288 static inline unsigned long
3289 __copy_to_user (void __user *to, const void *from, unsigned long count)
3290 {
3291+ if (count > INT_MAX)
3292+ return count;
3293+
3294+ if (!__builtin_constant_p(count))
3295+ check_object_size(from, count, true);
3296+
3297 return __copy_user(to, (__force void __user *) from, count);
3298 }
3299
3300 static inline unsigned long
3301 __copy_from_user (void *to, const void __user *from, unsigned long count)
3302 {
3303+ if (count > INT_MAX)
3304+ return count;
3305+
3306+ if (!__builtin_constant_p(count))
3307+ check_object_size(to, count, false);
3308+
3309 return __copy_user((__force void __user *) to, from, count);
3310 }
3311
3312@@ -255,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
3313 ({ \
3314 void __user *__cu_to = (to); \
3315 const void *__cu_from = (from); \
3316- long __cu_len = (n); \
3317+ unsigned long __cu_len = (n); \
3318 \
3319- if (__access_ok(__cu_to, __cu_len, get_fs())) \
3320+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
3321+ if (!__builtin_constant_p(n)) \
3322+ check_object_size(__cu_from, __cu_len, true); \
3323 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
3324+ } \
3325 __cu_len; \
3326 })
3327
3328@@ -266,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
3329 ({ \
3330 void *__cu_to = (to); \
3331 const void __user *__cu_from = (from); \
3332- long __cu_len = (n); \
3333+ unsigned long __cu_len = (n); \
3334 \
3335 __chk_user_ptr(__cu_from); \
3336- if (__access_ok(__cu_from, __cu_len, get_fs())) \
3337+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
3338+ if (!__builtin_constant_p(n)) \
3339+ check_object_size(__cu_to, __cu_len, false); \
3340 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
3341+ } \
3342 __cu_len; \
3343 })
3344
3345diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
3346index 24603be..948052d 100644
3347--- a/arch/ia64/kernel/module.c
3348+++ b/arch/ia64/kernel/module.c
3349@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
3350 void
3351 module_free (struct module *mod, void *module_region)
3352 {
3353- if (mod && mod->arch.init_unw_table &&
3354- module_region == mod->module_init) {
3355+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
3356 unw_remove_unwind_table(mod->arch.init_unw_table);
3357 mod->arch.init_unw_table = NULL;
3358 }
3359@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
3360 }
3361
3362 static inline int
3363+in_init_rx (const struct module *mod, uint64_t addr)
3364+{
3365+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
3366+}
3367+
3368+static inline int
3369+in_init_rw (const struct module *mod, uint64_t addr)
3370+{
3371+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
3372+}
3373+
3374+static inline int
3375 in_init (const struct module *mod, uint64_t addr)
3376 {
3377- return addr - (uint64_t) mod->module_init < mod->init_size;
3378+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
3379+}
3380+
3381+static inline int
3382+in_core_rx (const struct module *mod, uint64_t addr)
3383+{
3384+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
3385+}
3386+
3387+static inline int
3388+in_core_rw (const struct module *mod, uint64_t addr)
3389+{
3390+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
3391 }
3392
3393 static inline int
3394 in_core (const struct module *mod, uint64_t addr)
3395 {
3396- return addr - (uint64_t) mod->module_core < mod->core_size;
3397+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
3398 }
3399
3400 static inline int
3401@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
3402 break;
3403
3404 case RV_BDREL:
3405- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
3406+ if (in_init_rx(mod, val))
3407+ val -= (uint64_t) mod->module_init_rx;
3408+ else if (in_init_rw(mod, val))
3409+ val -= (uint64_t) mod->module_init_rw;
3410+ else if (in_core_rx(mod, val))
3411+ val -= (uint64_t) mod->module_core_rx;
3412+ else if (in_core_rw(mod, val))
3413+ val -= (uint64_t) mod->module_core_rw;
3414 break;
3415
3416 case RV_LTV:
3417@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
3418 * addresses have been selected...
3419 */
3420 uint64_t gp;
3421- if (mod->core_size > MAX_LTOFF)
3422+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
3423 /*
3424 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
3425 * at the end of the module.
3426 */
3427- gp = mod->core_size - MAX_LTOFF / 2;
3428+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
3429 else
3430- gp = mod->core_size / 2;
3431- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
3432+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
3433+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
3434 mod->arch.gp = gp;
3435 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
3436 }
3437diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
3438index d9439ef..d0cac6b 100644
3439--- a/arch/ia64/kernel/sys_ia64.c
3440+++ b/arch/ia64/kernel/sys_ia64.c
3441@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
3442 unsigned long start_addr, align_mask = PAGE_SIZE - 1;
3443 struct mm_struct *mm = current->mm;
3444 struct vm_area_struct *vma;
3445+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3446
3447 if (len > RGN_MAP_LIMIT)
3448 return -ENOMEM;
3449@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
3450 if (REGION_NUMBER(addr) == RGN_HPAGE)
3451 addr = 0;
3452 #endif
3453+
3454+#ifdef CONFIG_PAX_RANDMMAP
3455+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3456+ addr = mm->free_area_cache;
3457+ else
3458+#endif
3459+
3460 if (!addr)
3461 addr = mm->free_area_cache;
3462
3463@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
3464 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
3465 /* At this point: (!vma || addr < vma->vm_end). */
3466 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
3467- if (start_addr != TASK_UNMAPPED_BASE) {
3468+ if (start_addr != mm->mmap_base) {
3469 /* Start a new search --- just in case we missed some holes. */
3470- addr = TASK_UNMAPPED_BASE;
3471+ addr = mm->mmap_base;
3472 goto full_search;
3473 }
3474 return -ENOMEM;
3475 }
3476- if (!vma || addr + len <= vma->vm_start) {
3477+ if (check_heap_stack_gap(vma, addr, len, offset)) {
3478 /* Remember the address where we stopped this search: */
3479 mm->free_area_cache = addr + len;
3480 return addr;
3481diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
3482index 0ccb28f..8992469 100644
3483--- a/arch/ia64/kernel/vmlinux.lds.S
3484+++ b/arch/ia64/kernel/vmlinux.lds.S
3485@@ -198,7 +198,7 @@ SECTIONS {
3486 /* Per-cpu data: */
3487 . = ALIGN(PERCPU_PAGE_SIZE);
3488 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
3489- __phys_per_cpu_start = __per_cpu_load;
3490+ __phys_per_cpu_start = per_cpu_load;
3491 /*
3492 * ensure percpu data fits
3493 * into percpu page size
3494diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
3495index 6cf0341..d352594 100644
3496--- a/arch/ia64/mm/fault.c
3497+++ b/arch/ia64/mm/fault.c
3498@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
3499 return pte_present(pte);
3500 }
3501
3502+#ifdef CONFIG_PAX_PAGEEXEC
3503+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3504+{
3505+ unsigned long i;
3506+
3507+ printk(KERN_ERR "PAX: bytes at PC: ");
3508+ for (i = 0; i < 8; i++) {
3509+ unsigned int c;
3510+ if (get_user(c, (unsigned int *)pc+i))
3511+ printk(KERN_CONT "???????? ");
3512+ else
3513+ printk(KERN_CONT "%08x ", c);
3514+ }
3515+ printk("\n");
3516+}
3517+#endif
3518+
3519 # define VM_READ_BIT 0
3520 # define VM_WRITE_BIT 1
3521 # define VM_EXEC_BIT 2
3522@@ -149,8 +166,21 @@ retry:
3523 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
3524 goto bad_area;
3525
3526- if ((vma->vm_flags & mask) != mask)
3527+ if ((vma->vm_flags & mask) != mask) {
3528+
3529+#ifdef CONFIG_PAX_PAGEEXEC
3530+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
3531+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
3532+ goto bad_area;
3533+
3534+ up_read(&mm->mmap_sem);
3535+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
3536+ do_group_exit(SIGKILL);
3537+ }
3538+#endif
3539+
3540 goto bad_area;
3541+ }
3542
3543 /*
3544 * If for any reason at all we couldn't handle the fault, make
3545diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
3546index 5ca674b..127c3cb 100644
3547--- a/arch/ia64/mm/hugetlbpage.c
3548+++ b/arch/ia64/mm/hugetlbpage.c
3549@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
3550 unsigned long pgoff, unsigned long flags)
3551 {
3552 struct vm_area_struct *vmm;
3553+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
3554
3555 if (len > RGN_MAP_LIMIT)
3556 return -ENOMEM;
3557@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
3558 /* At this point: (!vmm || addr < vmm->vm_end). */
3559 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
3560 return -ENOMEM;
3561- if (!vmm || (addr + len) <= vmm->vm_start)
3562+ if (check_heap_stack_gap(vmm, addr, len, offset))
3563 return addr;
3564 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
3565 }
3566diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
3567index 082e383..fb7be80 100644
3568--- a/arch/ia64/mm/init.c
3569+++ b/arch/ia64/mm/init.c
3570@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
3571 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
3572 vma->vm_end = vma->vm_start + PAGE_SIZE;
3573 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
3574+
3575+#ifdef CONFIG_PAX_PAGEEXEC
3576+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
3577+ vma->vm_flags &= ~VM_EXEC;
3578+
3579+#ifdef CONFIG_PAX_MPROTECT
3580+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
3581+ vma->vm_flags &= ~VM_MAYEXEC;
3582+#endif
3583+
3584+ }
3585+#endif
3586+
3587 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3588 down_write(&current->mm->mmap_sem);
3589 if (insert_vm_struct(current->mm, vma)) {
3590diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
3591index 40b3ee9..8c2c112 100644
3592--- a/arch/m32r/include/asm/cache.h
3593+++ b/arch/m32r/include/asm/cache.h
3594@@ -1,8 +1,10 @@
3595 #ifndef _ASM_M32R_CACHE_H
3596 #define _ASM_M32R_CACHE_H
3597
3598+#include <linux/const.h>
3599+
3600 /* L1 cache line size */
3601 #define L1_CACHE_SHIFT 4
3602-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3603+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3604
3605 #endif /* _ASM_M32R_CACHE_H */
3606diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
3607index 82abd15..d95ae5d 100644
3608--- a/arch/m32r/lib/usercopy.c
3609+++ b/arch/m32r/lib/usercopy.c
3610@@ -14,6 +14,9 @@
3611 unsigned long
3612 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
3613 {
3614+ if ((long)n < 0)
3615+ return n;
3616+
3617 prefetch(from);
3618 if (access_ok(VERIFY_WRITE, to, n))
3619 __copy_user(to,from,n);
3620@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
3621 unsigned long
3622 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
3623 {
3624+ if ((long)n < 0)
3625+ return n;
3626+
3627 prefetchw(to);
3628 if (access_ok(VERIFY_READ, from, n))
3629 __copy_user_zeroing(to,from,n);
3630diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
3631index 0395c51..5f26031 100644
3632--- a/arch/m68k/include/asm/cache.h
3633+++ b/arch/m68k/include/asm/cache.h
3634@@ -4,9 +4,11 @@
3635 #ifndef __ARCH_M68K_CACHE_H
3636 #define __ARCH_M68K_CACHE_H
3637
3638+#include <linux/const.h>
3639+
3640 /* bytes per L1 cache line */
3641 #define L1_CACHE_SHIFT 4
3642-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
3643+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3644
3645 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
3646
3647diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
3648index 4efe96a..60e8699 100644
3649--- a/arch/microblaze/include/asm/cache.h
3650+++ b/arch/microblaze/include/asm/cache.h
3651@@ -13,11 +13,12 @@
3652 #ifndef _ASM_MICROBLAZE_CACHE_H
3653 #define _ASM_MICROBLAZE_CACHE_H
3654
3655+#include <linux/const.h>
3656 #include <asm/registers.h>
3657
3658 #define L1_CACHE_SHIFT 5
3659 /* word-granular cache in microblaze */
3660-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3661+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3662
3663 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3664
3665diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
3666index 01cc6ba..bcb7a5d 100644
3667--- a/arch/mips/include/asm/atomic.h
3668+++ b/arch/mips/include/asm/atomic.h
3669@@ -21,6 +21,10 @@
3670 #include <asm/cmpxchg.h>
3671 #include <asm/war.h>
3672
3673+#ifdef CONFIG_GENERIC_ATOMIC64
3674+#include <asm-generic/atomic64.h>
3675+#endif
3676+
3677 #define ATOMIC_INIT(i) { (i) }
3678
3679 /*
3680@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3681 */
3682 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
3683
3684+#define atomic64_read_unchecked(v) atomic64_read(v)
3685+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3686+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3687+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3688+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3689+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3690+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3691+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3692+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3693+
3694 #endif /* CONFIG_64BIT */
3695
3696 /*
3697diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
3698index b4db69f..8f3b093 100644
3699--- a/arch/mips/include/asm/cache.h
3700+++ b/arch/mips/include/asm/cache.h
3701@@ -9,10 +9,11 @@
3702 #ifndef _ASM_CACHE_H
3703 #define _ASM_CACHE_H
3704
3705+#include <linux/const.h>
3706 #include <kmalloc.h>
3707
3708 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
3709-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3710+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3711
3712 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
3713 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3714diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
3715index 455c0ac..ad65fbe 100644
3716--- a/arch/mips/include/asm/elf.h
3717+++ b/arch/mips/include/asm/elf.h
3718@@ -372,13 +372,16 @@ extern const char *__elf_platform;
3719 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
3720 #endif
3721
3722+#ifdef CONFIG_PAX_ASLR
3723+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3724+
3725+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3726+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3727+#endif
3728+
3729 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
3730 struct linux_binprm;
3731 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3732 int uses_interp);
3733
3734-struct mm_struct;
3735-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3736-#define arch_randomize_brk arch_randomize_brk
3737-
3738 #endif /* _ASM_ELF_H */
3739diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
3740index c1f6afa..38cc6e9 100644
3741--- a/arch/mips/include/asm/exec.h
3742+++ b/arch/mips/include/asm/exec.h
3743@@ -12,6 +12,6 @@
3744 #ifndef _ASM_EXEC_H
3745 #define _ASM_EXEC_H
3746
3747-extern unsigned long arch_align_stack(unsigned long sp);
3748+#define arch_align_stack(x) ((x) & ~0xfUL)
3749
3750 #endif /* _ASM_EXEC_H */
3751diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
3752index da9bd7d..91aa7ab 100644
3753--- a/arch/mips/include/asm/page.h
3754+++ b/arch/mips/include/asm/page.h
3755@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
3756 #ifdef CONFIG_CPU_MIPS32
3757 typedef struct { unsigned long pte_low, pte_high; } pte_t;
3758 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
3759- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
3760+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
3761 #else
3762 typedef struct { unsigned long long pte; } pte_t;
3763 #define pte_val(x) ((x).pte)
3764diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
3765index 881d18b..cea38bc 100644
3766--- a/arch/mips/include/asm/pgalloc.h
3767+++ b/arch/mips/include/asm/pgalloc.h
3768@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3769 {
3770 set_pud(pud, __pud((unsigned long)pmd));
3771 }
3772+
3773+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3774+{
3775+ pud_populate(mm, pud, pmd);
3776+}
3777 #endif
3778
3779 /*
3780diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
3781index 18806a5..141ffcf 100644
3782--- a/arch/mips/include/asm/thread_info.h
3783+++ b/arch/mips/include/asm/thread_info.h
3784@@ -110,6 +110,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
3785 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
3786 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
3787 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
3788+/* li takes a 32bit immediate */
3789+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
3790 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
3791
3792 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3793@@ -125,15 +127,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
3794 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
3795 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
3796 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
3797+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3798+
3799+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
3800
3801 /* work to do in syscall_trace_leave() */
3802-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
3803+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
3804
3805 /* work to do on interrupt/exception return */
3806 #define _TIF_WORK_MASK \
3807 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
3808 /* work to do on any return to u-space */
3809-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
3810+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
3811
3812 #endif /* __KERNEL__ */
3813
3814diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
3815index 9fdd8bc..4bd7f1a 100644
3816--- a/arch/mips/kernel/binfmt_elfn32.c
3817+++ b/arch/mips/kernel/binfmt_elfn32.c
3818@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
3819 #undef ELF_ET_DYN_BASE
3820 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
3821
3822+#ifdef CONFIG_PAX_ASLR
3823+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3824+
3825+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3826+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3827+#endif
3828+
3829 #include <asm/processor.h>
3830 #include <linux/module.h>
3831 #include <linux/elfcore.h>
3832diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
3833index ff44823..97f8906 100644
3834--- a/arch/mips/kernel/binfmt_elfo32.c
3835+++ b/arch/mips/kernel/binfmt_elfo32.c
3836@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
3837 #undef ELF_ET_DYN_BASE
3838 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
3839
3840+#ifdef CONFIG_PAX_ASLR
3841+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3842+
3843+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3844+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3845+#endif
3846+
3847 #include <asm/processor.h>
3848
3849 /*
3850diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
3851index 69b17a9..9db82f9 100644
3852--- a/arch/mips/kernel/process.c
3853+++ b/arch/mips/kernel/process.c
3854@@ -478,15 +478,3 @@ unsigned long get_wchan(struct task_struct *task)
3855 out:
3856 return pc;
3857 }
3858-
3859-/*
3860- * Don't forget that the stack pointer must be aligned on a 8 bytes
3861- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
3862- */
3863-unsigned long arch_align_stack(unsigned long sp)
3864-{
3865- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3866- sp -= get_random_int() & ~PAGE_MASK;
3867-
3868- return sp & ALMASK;
3869-}
3870diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
3871index 4812c6d..2069554 100644
3872--- a/arch/mips/kernel/ptrace.c
3873+++ b/arch/mips/kernel/ptrace.c
3874@@ -528,6 +528,10 @@ static inline int audit_arch(void)
3875 return arch;
3876 }
3877
3878+#ifdef CONFIG_GRKERNSEC_SETXID
3879+extern void gr_delayed_cred_worker(void);
3880+#endif
3881+
3882 /*
3883 * Notification of system call entry/exit
3884 * - triggered by current->work.syscall_trace
3885@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
3886 /* do the secure computing check first */
3887 secure_computing_strict(regs->regs[2]);
3888
3889+#ifdef CONFIG_GRKERNSEC_SETXID
3890+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3891+ gr_delayed_cred_worker();
3892+#endif
3893+
3894 if (!(current->ptrace & PT_PTRACED))
3895 goto out;
3896
3897diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3898index 374f66e..1c882a0 100644
3899--- a/arch/mips/kernel/scall32-o32.S
3900+++ b/arch/mips/kernel/scall32-o32.S
3901@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3902
3903 stack_done:
3904 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3905- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3906+ li t1, _TIF_SYSCALL_WORK
3907 and t0, t1
3908 bnez t0, syscall_trace_entry # -> yes
3909
3910diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3911index 169de6a..f594a89 100644
3912--- a/arch/mips/kernel/scall64-64.S
3913+++ b/arch/mips/kernel/scall64-64.S
3914@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3915
3916 sd a3, PT_R26(sp) # save a3 for syscall restarting
3917
3918- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3919+ li t1, _TIF_SYSCALL_WORK
3920 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3921 and t0, t1, t0
3922 bnez t0, syscall_trace_entry
3923diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3924index 86ec03f..1235baf 100644
3925--- a/arch/mips/kernel/scall64-n32.S
3926+++ b/arch/mips/kernel/scall64-n32.S
3927@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3928
3929 sd a3, PT_R26(sp) # save a3 for syscall restarting
3930
3931- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3932+ li t1, _TIF_SYSCALL_WORK
3933 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3934 and t0, t1, t0
3935 bnez t0, n32_syscall_trace_entry
3936diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3937index 53c2d72..3734584 100644
3938--- a/arch/mips/kernel/scall64-o32.S
3939+++ b/arch/mips/kernel/scall64-o32.S
3940@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3941 PTR 4b, bad_stack
3942 .previous
3943
3944- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3945+ li t1, _TIF_SYSCALL_WORK
3946 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3947 and t0, t1, t0
3948 bnez t0, trace_a_syscall
3949diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3950index ddcec1e..c7f983e 100644
3951--- a/arch/mips/mm/fault.c
3952+++ b/arch/mips/mm/fault.c
3953@@ -27,6 +27,23 @@
3954 #include <asm/highmem.h> /* For VMALLOC_END */
3955 #include <linux/kdebug.h>
3956
3957+#ifdef CONFIG_PAX_PAGEEXEC
3958+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3959+{
3960+ unsigned long i;
3961+
3962+ printk(KERN_ERR "PAX: bytes at PC: ");
3963+ for (i = 0; i < 5; i++) {
3964+ unsigned int c;
3965+ if (get_user(c, (unsigned int *)pc+i))
3966+ printk(KERN_CONT "???????? ");
3967+ else
3968+ printk(KERN_CONT "%08x ", c);
3969+ }
3970+ printk("\n");
3971+}
3972+#endif
3973+
3974 /*
3975 * This routine handles page faults. It determines the address,
3976 * and the problem, and then passes it off to one of the appropriate
3977diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3978index 302d779..ad1772c 100644
3979--- a/arch/mips/mm/mmap.c
3980+++ b/arch/mips/mm/mmap.c
3981@@ -71,6 +71,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3982 struct vm_area_struct *vma;
3983 unsigned long addr = addr0;
3984 int do_color_align;
3985+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3986
3987 if (unlikely(len > TASK_SIZE))
3988 return -ENOMEM;
3989@@ -95,6 +96,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3990 do_color_align = 1;
3991
3992 /* requesting a specific address */
3993+
3994+#ifdef CONFIG_PAX_RANDMMAP
3995+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3996+#endif
3997+
3998 if (addr) {
3999 if (do_color_align)
4000 addr = COLOUR_ALIGN(addr, pgoff);
4001@@ -102,8 +108,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4002 addr = PAGE_ALIGN(addr);
4003
4004 vma = find_vma(mm, addr);
4005- if (TASK_SIZE - len >= addr &&
4006- (!vma || addr + len <= vma->vm_start))
4007+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
4008 return addr;
4009 }
4010
4011@@ -118,7 +123,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4012 /* At this point: (!vma || addr < vma->vm_end). */
4013 if (TASK_SIZE - len < addr)
4014 return -ENOMEM;
4015- if (!vma || addr + len <= vma->vm_start)
4016+ if (check_heap_stack_gap(vmm, addr, len, offset))
4017 return addr;
4018 addr = vma->vm_end;
4019 if (do_color_align)
4020@@ -145,7 +150,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4021 /* make sure it can fit in the remaining address space */
4022 if (likely(addr > len)) {
4023 vma = find_vma(mm, addr - len);
4024- if (!vma || addr <= vma->vm_start) {
4025+ if (check_heap_stack_gap(vmm, addr - len, len, offset))
4026 /* cache the address as a hint for next time */
4027 return mm->free_area_cache = addr - len;
4028 }
4029@@ -165,7 +170,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4030 * return with success:
4031 */
4032 vma = find_vma(mm, addr);
4033- if (likely(!vma || addr + len <= vma->vm_start)) {
4034+ if (check_heap_stack_gap(vmm, addr, len, offset)) {
4035 /* cache the address as a hint for next time */
4036 return mm->free_area_cache = addr;
4037 }
4038@@ -242,30 +247,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4039 mm->unmap_area = arch_unmap_area_topdown;
4040 }
4041 }
4042-
4043-static inline unsigned long brk_rnd(void)
4044-{
4045- unsigned long rnd = get_random_int();
4046-
4047- rnd = rnd << PAGE_SHIFT;
4048- /* 8MB for 32bit, 256MB for 64bit */
4049- if (TASK_IS_32BIT_ADDR)
4050- rnd = rnd & 0x7ffffful;
4051- else
4052- rnd = rnd & 0xffffffful;
4053-
4054- return rnd;
4055-}
4056-
4057-unsigned long arch_randomize_brk(struct mm_struct *mm)
4058-{
4059- unsigned long base = mm->brk;
4060- unsigned long ret;
4061-
4062- ret = PAGE_ALIGN(base + brk_rnd());
4063-
4064- if (ret < mm->brk)
4065- return mm->brk;
4066-
4067- return ret;
4068-}
4069diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
4070index 967d144..db12197 100644
4071--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
4072+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
4073@@ -11,12 +11,14 @@
4074 #ifndef _ASM_PROC_CACHE_H
4075 #define _ASM_PROC_CACHE_H
4076
4077+#include <linux/const.h>
4078+
4079 /* L1 cache */
4080
4081 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
4082 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
4083-#define L1_CACHE_BYTES 16 /* bytes per entry */
4084 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
4085+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
4086 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
4087
4088 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
4089diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
4090index bcb5df2..84fabd2 100644
4091--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
4092+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
4093@@ -16,13 +16,15 @@
4094 #ifndef _ASM_PROC_CACHE_H
4095 #define _ASM_PROC_CACHE_H
4096
4097+#include <linux/const.h>
4098+
4099 /*
4100 * L1 cache
4101 */
4102 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
4103 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
4104-#define L1_CACHE_BYTES 32 /* bytes per entry */
4105 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
4106+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
4107 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
4108
4109 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
4110diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
4111index 4ce7a01..449202a 100644
4112--- a/arch/openrisc/include/asm/cache.h
4113+++ b/arch/openrisc/include/asm/cache.h
4114@@ -19,11 +19,13 @@
4115 #ifndef __ASM_OPENRISC_CACHE_H
4116 #define __ASM_OPENRISC_CACHE_H
4117
4118+#include <linux/const.h>
4119+
4120 /* FIXME: How can we replace these with values from the CPU...
4121 * they shouldn't be hard-coded!
4122 */
4123
4124-#define L1_CACHE_BYTES 16
4125 #define L1_CACHE_SHIFT 4
4126+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4127
4128 #endif /* __ASM_OPENRISC_CACHE_H */
4129diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
4130index af9cf30..2aae9b2 100644
4131--- a/arch/parisc/include/asm/atomic.h
4132+++ b/arch/parisc/include/asm/atomic.h
4133@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
4134
4135 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4136
4137+#define atomic64_read_unchecked(v) atomic64_read(v)
4138+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4139+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4140+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4141+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4142+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4143+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4144+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4145+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4146+
4147 #endif /* !CONFIG_64BIT */
4148
4149
4150diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
4151index 47f11c7..3420df2 100644
4152--- a/arch/parisc/include/asm/cache.h
4153+++ b/arch/parisc/include/asm/cache.h
4154@@ -5,6 +5,7 @@
4155 #ifndef __ARCH_PARISC_CACHE_H
4156 #define __ARCH_PARISC_CACHE_H
4157
4158+#include <linux/const.h>
4159
4160 /*
4161 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
4162@@ -15,13 +16,13 @@
4163 * just ruin performance.
4164 */
4165 #ifdef CONFIG_PA20
4166-#define L1_CACHE_BYTES 64
4167 #define L1_CACHE_SHIFT 6
4168 #else
4169-#define L1_CACHE_BYTES 32
4170 #define L1_CACHE_SHIFT 5
4171 #endif
4172
4173+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4174+
4175 #ifndef __ASSEMBLY__
4176
4177 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4178diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
4179index 19f6cb1..6c78cf2 100644
4180--- a/arch/parisc/include/asm/elf.h
4181+++ b/arch/parisc/include/asm/elf.h
4182@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
4183
4184 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
4185
4186+#ifdef CONFIG_PAX_ASLR
4187+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4188+
4189+#define PAX_DELTA_MMAP_LEN 16
4190+#define PAX_DELTA_STACK_LEN 16
4191+#endif
4192+
4193 /* This yields a mask that user programs can use to figure out what
4194 instruction set this CPU supports. This could be done in user space,
4195 but it's not easy, and we've already done it here. */
4196diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
4197index fc987a1..6e068ef 100644
4198--- a/arch/parisc/include/asm/pgalloc.h
4199+++ b/arch/parisc/include/asm/pgalloc.h
4200@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
4201 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
4202 }
4203
4204+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
4205+{
4206+ pgd_populate(mm, pgd, pmd);
4207+}
4208+
4209 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
4210 {
4211 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
4212@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
4213 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
4214 #define pmd_free(mm, x) do { } while (0)
4215 #define pgd_populate(mm, pmd, pte) BUG()
4216+#define pgd_populate_kernel(mm, pmd, pte) BUG()
4217
4218 #endif
4219
4220diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
4221index ee99f23..802b0a1 100644
4222--- a/arch/parisc/include/asm/pgtable.h
4223+++ b/arch/parisc/include/asm/pgtable.h
4224@@ -212,6 +212,17 @@ struct vm_area_struct;
4225 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
4226 #define PAGE_COPY PAGE_EXECREAD
4227 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
4228+
4229+#ifdef CONFIG_PAX_PAGEEXEC
4230+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
4231+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
4232+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
4233+#else
4234+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4235+# define PAGE_COPY_NOEXEC PAGE_COPY
4236+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4237+#endif
4238+
4239 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
4240 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
4241 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
4242diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
4243index 4ba2c93..f5e3974 100644
4244--- a/arch/parisc/include/asm/uaccess.h
4245+++ b/arch/parisc/include/asm/uaccess.h
4246@@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
4247 const void __user *from,
4248 unsigned long n)
4249 {
4250- int sz = __compiletime_object_size(to);
4251+ size_t sz = __compiletime_object_size(to);
4252 int ret = -EFAULT;
4253
4254- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
4255+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
4256 ret = __copy_from_user(to, from, n);
4257 else
4258 copy_from_user_overflow();
4259diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
4260index 5e34ccf..672bc9c 100644
4261--- a/arch/parisc/kernel/module.c
4262+++ b/arch/parisc/kernel/module.c
4263@@ -98,16 +98,38 @@
4264
4265 /* three functions to determine where in the module core
4266 * or init pieces the location is */
4267+static inline int in_init_rx(struct module *me, void *loc)
4268+{
4269+ return (loc >= me->module_init_rx &&
4270+ loc < (me->module_init_rx + me->init_size_rx));
4271+}
4272+
4273+static inline int in_init_rw(struct module *me, void *loc)
4274+{
4275+ return (loc >= me->module_init_rw &&
4276+ loc < (me->module_init_rw + me->init_size_rw));
4277+}
4278+
4279 static inline int in_init(struct module *me, void *loc)
4280 {
4281- return (loc >= me->module_init &&
4282- loc <= (me->module_init + me->init_size));
4283+ return in_init_rx(me, loc) || in_init_rw(me, loc);
4284+}
4285+
4286+static inline int in_core_rx(struct module *me, void *loc)
4287+{
4288+ return (loc >= me->module_core_rx &&
4289+ loc < (me->module_core_rx + me->core_size_rx));
4290+}
4291+
4292+static inline int in_core_rw(struct module *me, void *loc)
4293+{
4294+ return (loc >= me->module_core_rw &&
4295+ loc < (me->module_core_rw + me->core_size_rw));
4296 }
4297
4298 static inline int in_core(struct module *me, void *loc)
4299 {
4300- return (loc >= me->module_core &&
4301- loc <= (me->module_core + me->core_size));
4302+ return in_core_rx(me, loc) || in_core_rw(me, loc);
4303 }
4304
4305 static inline int in_local(struct module *me, void *loc)
4306@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
4307 }
4308
4309 /* align things a bit */
4310- me->core_size = ALIGN(me->core_size, 16);
4311- me->arch.got_offset = me->core_size;
4312- me->core_size += gots * sizeof(struct got_entry);
4313+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
4314+ me->arch.got_offset = me->core_size_rw;
4315+ me->core_size_rw += gots * sizeof(struct got_entry);
4316
4317- me->core_size = ALIGN(me->core_size, 16);
4318- me->arch.fdesc_offset = me->core_size;
4319- me->core_size += fdescs * sizeof(Elf_Fdesc);
4320+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
4321+ me->arch.fdesc_offset = me->core_size_rw;
4322+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
4323
4324 me->arch.got_max = gots;
4325 me->arch.fdesc_max = fdescs;
4326@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
4327
4328 BUG_ON(value == 0);
4329
4330- got = me->module_core + me->arch.got_offset;
4331+ got = me->module_core_rw + me->arch.got_offset;
4332 for (i = 0; got[i].addr; i++)
4333 if (got[i].addr == value)
4334 goto out;
4335@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
4336 #ifdef CONFIG_64BIT
4337 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
4338 {
4339- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
4340+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
4341
4342 if (!value) {
4343 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
4344@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
4345
4346 /* Create new one */
4347 fdesc->addr = value;
4348- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
4349+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
4350 return (Elf_Addr)fdesc;
4351 }
4352 #endif /* CONFIG_64BIT */
4353@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
4354
4355 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
4356 end = table + sechdrs[me->arch.unwind_section].sh_size;
4357- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
4358+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
4359
4360 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
4361 me->arch.unwind_section, table, end, gp);
4362diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
4363index f76c108..92bad82 100644
4364--- a/arch/parisc/kernel/sys_parisc.c
4365+++ b/arch/parisc/kernel/sys_parisc.c
4366@@ -33,9 +33,11 @@
4367 #include <linux/utsname.h>
4368 #include <linux/personality.h>
4369
4370-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
4371+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
4372+ unsigned long flags)
4373 {
4374 struct vm_area_struct *vma;
4375+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4376
4377 addr = PAGE_ALIGN(addr);
4378
4379@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
4380 /* At this point: (!vma || addr < vma->vm_end). */
4381 if (TASK_SIZE - len < addr)
4382 return -ENOMEM;
4383- if (!vma || addr + len <= vma->vm_start)
4384+ if (check_heap_stack_gap(vma, addr, len, offset))
4385 return addr;
4386 addr = vma->vm_end;
4387 }
4388@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
4389 return offset & 0x3FF000;
4390 }
4391
4392-static unsigned long get_shared_area(struct address_space *mapping,
4393- unsigned long addr, unsigned long len, unsigned long pgoff)
4394+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
4395+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
4396 {
4397 struct vm_area_struct *vma;
4398 int offset = mapping ? get_offset(mapping) : 0;
4399+ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4400
4401 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
4402
4403@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
4404 /* At this point: (!vma || addr < vma->vm_end). */
4405 if (TASK_SIZE - len < addr)
4406 return -ENOMEM;
4407- if (!vma || addr + len <= vma->vm_start)
4408+ if (check_heap_stack_gap(vma, addr, len, rand_offset))
4409 return addr;
4410 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
4411 if (addr < vma->vm_end) /* handle wraparound */
4412@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4413 if (flags & MAP_FIXED)
4414 return addr;
4415 if (!addr)
4416- addr = TASK_UNMAPPED_BASE;
4417+ addr = current->mm->mmap_base;
4418
4419 if (filp) {
4420- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
4421+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
4422 } else if(flags & MAP_SHARED) {
4423- addr = get_shared_area(NULL, addr, len, pgoff);
4424+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
4425 } else {
4426- addr = get_unshared_area(addr, len);
4427+ addr = get_unshared_area(filp, addr, len, flags);
4428 }
4429 return addr;
4430 }
4431diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
4432index 45ba99f..8e22c33 100644
4433--- a/arch/parisc/kernel/traps.c
4434+++ b/arch/parisc/kernel/traps.c
4435@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
4436
4437 down_read(&current->mm->mmap_sem);
4438 vma = find_vma(current->mm,regs->iaoq[0]);
4439- if (vma && (regs->iaoq[0] >= vma->vm_start)
4440- && (vma->vm_flags & VM_EXEC)) {
4441-
4442+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
4443 fault_address = regs->iaoq[0];
4444 fault_space = regs->iasq[0];
4445
4446diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
4447index 18162ce..94de376 100644
4448--- a/arch/parisc/mm/fault.c
4449+++ b/arch/parisc/mm/fault.c
4450@@ -15,6 +15,7 @@
4451 #include <linux/sched.h>
4452 #include <linux/interrupt.h>
4453 #include <linux/module.h>
4454+#include <linux/unistd.h>
4455
4456 #include <asm/uaccess.h>
4457 #include <asm/traps.h>
4458@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
4459 static unsigned long
4460 parisc_acctyp(unsigned long code, unsigned int inst)
4461 {
4462- if (code == 6 || code == 16)
4463+ if (code == 6 || code == 7 || code == 16)
4464 return VM_EXEC;
4465
4466 switch (inst & 0xf0000000) {
4467@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
4468 }
4469 #endif
4470
4471+#ifdef CONFIG_PAX_PAGEEXEC
4472+/*
4473+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
4474+ *
4475+ * returns 1 when task should be killed
4476+ * 2 when rt_sigreturn trampoline was detected
4477+ * 3 when unpatched PLT trampoline was detected
4478+ */
4479+static int pax_handle_fetch_fault(struct pt_regs *regs)
4480+{
4481+
4482+#ifdef CONFIG_PAX_EMUPLT
4483+ int err;
4484+
4485+ do { /* PaX: unpatched PLT emulation */
4486+ unsigned int bl, depwi;
4487+
4488+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
4489+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
4490+
4491+ if (err)
4492+ break;
4493+
4494+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
4495+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
4496+
4497+ err = get_user(ldw, (unsigned int *)addr);
4498+ err |= get_user(bv, (unsigned int *)(addr+4));
4499+ err |= get_user(ldw2, (unsigned int *)(addr+8));
4500+
4501+ if (err)
4502+ break;
4503+
4504+ if (ldw == 0x0E801096U &&
4505+ bv == 0xEAC0C000U &&
4506+ ldw2 == 0x0E881095U)
4507+ {
4508+ unsigned int resolver, map;
4509+
4510+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
4511+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
4512+ if (err)
4513+ break;
4514+
4515+ regs->gr[20] = instruction_pointer(regs)+8;
4516+ regs->gr[21] = map;
4517+ regs->gr[22] = resolver;
4518+ regs->iaoq[0] = resolver | 3UL;
4519+ regs->iaoq[1] = regs->iaoq[0] + 4;
4520+ return 3;
4521+ }
4522+ }
4523+ } while (0);
4524+#endif
4525+
4526+#ifdef CONFIG_PAX_EMUTRAMP
4527+
4528+#ifndef CONFIG_PAX_EMUSIGRT
4529+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
4530+ return 1;
4531+#endif
4532+
4533+ do { /* PaX: rt_sigreturn emulation */
4534+ unsigned int ldi1, ldi2, bel, nop;
4535+
4536+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
4537+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
4538+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
4539+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
4540+
4541+ if (err)
4542+ break;
4543+
4544+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
4545+ ldi2 == 0x3414015AU &&
4546+ bel == 0xE4008200U &&
4547+ nop == 0x08000240U)
4548+ {
4549+ regs->gr[25] = (ldi1 & 2) >> 1;
4550+ regs->gr[20] = __NR_rt_sigreturn;
4551+ regs->gr[31] = regs->iaoq[1] + 16;
4552+ regs->sr[0] = regs->iasq[1];
4553+ regs->iaoq[0] = 0x100UL;
4554+ regs->iaoq[1] = regs->iaoq[0] + 4;
4555+ regs->iasq[0] = regs->sr[2];
4556+ regs->iasq[1] = regs->sr[2];
4557+ return 2;
4558+ }
4559+ } while (0);
4560+#endif
4561+
4562+ return 1;
4563+}
4564+
4565+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4566+{
4567+ unsigned long i;
4568+
4569+ printk(KERN_ERR "PAX: bytes at PC: ");
4570+ for (i = 0; i < 5; i++) {
4571+ unsigned int c;
4572+ if (get_user(c, (unsigned int *)pc+i))
4573+ printk(KERN_CONT "???????? ");
4574+ else
4575+ printk(KERN_CONT "%08x ", c);
4576+ }
4577+ printk("\n");
4578+}
4579+#endif
4580+
4581 int fixup_exception(struct pt_regs *regs)
4582 {
4583 const struct exception_table_entry *fix;
4584@@ -192,8 +303,33 @@ good_area:
4585
4586 acc_type = parisc_acctyp(code,regs->iir);
4587
4588- if ((vma->vm_flags & acc_type) != acc_type)
4589+ if ((vma->vm_flags & acc_type) != acc_type) {
4590+
4591+#ifdef CONFIG_PAX_PAGEEXEC
4592+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
4593+ (address & ~3UL) == instruction_pointer(regs))
4594+ {
4595+ up_read(&mm->mmap_sem);
4596+ switch (pax_handle_fetch_fault(regs)) {
4597+
4598+#ifdef CONFIG_PAX_EMUPLT
4599+ case 3:
4600+ return;
4601+#endif
4602+
4603+#ifdef CONFIG_PAX_EMUTRAMP
4604+ case 2:
4605+ return;
4606+#endif
4607+
4608+ }
4609+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
4610+ do_group_exit(SIGKILL);
4611+ }
4612+#endif
4613+
4614 goto bad_area;
4615+ }
4616
4617 /*
4618 * If for any reason at all we couldn't handle the fault, make
4619diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
4620index e3b1d41..8e81edf 100644
4621--- a/arch/powerpc/include/asm/atomic.h
4622+++ b/arch/powerpc/include/asm/atomic.h
4623@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
4624 return t1;
4625 }
4626
4627+#define atomic64_read_unchecked(v) atomic64_read(v)
4628+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4629+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4630+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4631+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4632+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4633+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4634+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4635+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4636+
4637 #endif /* __powerpc64__ */
4638
4639 #endif /* __KERNEL__ */
4640diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
4641index 9e495c9..b6878e5 100644
4642--- a/arch/powerpc/include/asm/cache.h
4643+++ b/arch/powerpc/include/asm/cache.h
4644@@ -3,6 +3,7 @@
4645
4646 #ifdef __KERNEL__
4647
4648+#include <linux/const.h>
4649
4650 /* bytes per L1 cache line */
4651 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
4652@@ -22,7 +23,7 @@
4653 #define L1_CACHE_SHIFT 7
4654 #endif
4655
4656-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4658
4659 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4660
4661diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
4662index 6abf0a1..459d0f1 100644
4663--- a/arch/powerpc/include/asm/elf.h
4664+++ b/arch/powerpc/include/asm/elf.h
4665@@ -28,8 +28,19 @@
4666 the loader. We need to make sure that it is out of the way of the program
4667 that it will "exec", and that there is sufficient room for the brk. */
4668
4669-extern unsigned long randomize_et_dyn(unsigned long base);
4670-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
4671+#define ELF_ET_DYN_BASE (0x20000000)
4672+
4673+#ifdef CONFIG_PAX_ASLR
4674+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
4675+
4676+#ifdef __powerpc64__
4677+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
4678+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
4679+#else
4680+#define PAX_DELTA_MMAP_LEN 15
4681+#define PAX_DELTA_STACK_LEN 15
4682+#endif
4683+#endif
4684
4685 /*
4686 * Our registers are always unsigned longs, whether we're a 32 bit
4687@@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
4688 (0x7ff >> (PAGE_SHIFT - 12)) : \
4689 (0x3ffff >> (PAGE_SHIFT - 12)))
4690
4691-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4692-#define arch_randomize_brk arch_randomize_brk
4693-
4694-
4695 #ifdef CONFIG_SPU_BASE
4696 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
4697 #define NT_SPU 1
4698diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
4699index 8196e9c..d83a9f3 100644
4700--- a/arch/powerpc/include/asm/exec.h
4701+++ b/arch/powerpc/include/asm/exec.h
4702@@ -4,6 +4,6 @@
4703 #ifndef _ASM_POWERPC_EXEC_H
4704 #define _ASM_POWERPC_EXEC_H
4705
4706-extern unsigned long arch_align_stack(unsigned long sp);
4707+#define arch_align_stack(x) ((x) & ~0xfUL)
4708
4709 #endif /* _ASM_POWERPC_EXEC_H */
4710diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
4711index 5acabbd..7ea14fa 100644
4712--- a/arch/powerpc/include/asm/kmap_types.h
4713+++ b/arch/powerpc/include/asm/kmap_types.h
4714@@ -10,7 +10,7 @@
4715 * 2 of the License, or (at your option) any later version.
4716 */
4717
4718-#define KM_TYPE_NR 16
4719+#define KM_TYPE_NR 17
4720
4721 #endif /* __KERNEL__ */
4722 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
4723diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
4724index 8565c25..2865190 100644
4725--- a/arch/powerpc/include/asm/mman.h
4726+++ b/arch/powerpc/include/asm/mman.h
4727@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
4728 }
4729 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
4730
4731-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
4732+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
4733 {
4734 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
4735 }
4736diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
4737index f072e97..b436dee 100644
4738--- a/arch/powerpc/include/asm/page.h
4739+++ b/arch/powerpc/include/asm/page.h
4740@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
4741 * and needs to be executable. This means the whole heap ends
4742 * up being executable.
4743 */
4744-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
4745- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4746+#define VM_DATA_DEFAULT_FLAGS32 \
4747+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
4748+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4749
4750 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
4751 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4752@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
4753 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
4754 #endif
4755
4756+#define ktla_ktva(addr) (addr)
4757+#define ktva_ktla(addr) (addr)
4758+
4759 /*
4760 * Use the top bit of the higher-level page table entries to indicate whether
4761 * the entries we point to contain hugepages. This works because we know that
4762diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
4763index cd915d6..c10cee8 100644
4764--- a/arch/powerpc/include/asm/page_64.h
4765+++ b/arch/powerpc/include/asm/page_64.h
4766@@ -154,15 +154,18 @@ do { \
4767 * stack by default, so in the absence of a PT_GNU_STACK program header
4768 * we turn execute permission off.
4769 */
4770-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
4771- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4772+#define VM_STACK_DEFAULT_FLAGS32 \
4773+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
4774+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4775
4776 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
4777 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4778
4779+#ifndef CONFIG_PAX_PAGEEXEC
4780 #define VM_STACK_DEFAULT_FLAGS \
4781 (is_32bit_task() ? \
4782 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
4783+#endif
4784
4785 #include <asm-generic/getorder.h>
4786
4787diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
4788index 292725c..f87ae14 100644
4789--- a/arch/powerpc/include/asm/pgalloc-64.h
4790+++ b/arch/powerpc/include/asm/pgalloc-64.h
4791@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
4792 #ifndef CONFIG_PPC_64K_PAGES
4793
4794 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
4795+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
4796
4797 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4798 {
4799@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4800 pud_set(pud, (unsigned long)pmd);
4801 }
4802
4803+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4804+{
4805+ pud_populate(mm, pud, pmd);
4806+}
4807+
4808 #define pmd_populate(mm, pmd, pte_page) \
4809 pmd_populate_kernel(mm, pmd, page_address(pte_page))
4810 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
4811@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4812 #else /* CONFIG_PPC_64K_PAGES */
4813
4814 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
4815+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
4816
4817 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
4818 pte_t *pte)
4819diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
4820index a9cbd3b..3b67efa 100644
4821--- a/arch/powerpc/include/asm/pgtable.h
4822+++ b/arch/powerpc/include/asm/pgtable.h
4823@@ -2,6 +2,7 @@
4824 #define _ASM_POWERPC_PGTABLE_H
4825 #ifdef __KERNEL__
4826
4827+#include <linux/const.h>
4828 #ifndef __ASSEMBLY__
4829 #include <asm/processor.h> /* For TASK_SIZE */
4830 #include <asm/mmu.h>
4831diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
4832index 4aad413..85d86bf 100644
4833--- a/arch/powerpc/include/asm/pte-hash32.h
4834+++ b/arch/powerpc/include/asm/pte-hash32.h
4835@@ -21,6 +21,7 @@
4836 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
4837 #define _PAGE_USER 0x004 /* usermode access allowed */
4838 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
4839+#define _PAGE_EXEC _PAGE_GUARDED
4840 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
4841 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
4842 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
4843diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
4844index d24c141..b60696e 100644
4845--- a/arch/powerpc/include/asm/reg.h
4846+++ b/arch/powerpc/include/asm/reg.h
4847@@ -215,6 +215,7 @@
4848 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
4849 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
4850 #define DSISR_NOHPTE 0x40000000 /* no translation found */
4851+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
4852 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
4853 #define DSISR_ISSTORE 0x02000000 /* access was a store */
4854 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
4855diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
4856index 406b7b9..af63426 100644
4857--- a/arch/powerpc/include/asm/thread_info.h
4858+++ b/arch/powerpc/include/asm/thread_info.h
4859@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
4860 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
4861 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
4862 #define TIF_SINGLESTEP 8 /* singlestepping active */
4863-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
4864 #define TIF_SECCOMP 10 /* secure computing */
4865 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
4866 #define TIF_NOERROR 12 /* Force successful syscall return */
4867@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
4868 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
4869 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
4870 for stack store? */
4871+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
4872+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
4873+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
4874
4875 /* as above, but as bit values */
4876 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
4877@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
4878 #define _TIF_UPROBE (1<<TIF_UPROBE)
4879 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
4880 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
4881+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
4882 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
4883- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
4884+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
4885+ _TIF_GRSEC_SETXID)
4886
4887 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
4888 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
4889diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
4890index 4db4959..335e00c 100644
4891--- a/arch/powerpc/include/asm/uaccess.h
4892+++ b/arch/powerpc/include/asm/uaccess.h
4893@@ -13,6 +13,8 @@
4894 #define VERIFY_READ 0
4895 #define VERIFY_WRITE 1
4896
4897+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4898+
4899 /*
4900 * The fs value determines whether argument validity checking should be
4901 * performed or not. If get_fs() == USER_DS, checking is performed, with
4902@@ -318,52 +320,6 @@ do { \
4903 extern unsigned long __copy_tofrom_user(void __user *to,
4904 const void __user *from, unsigned long size);
4905
4906-#ifndef __powerpc64__
4907-
4908-static inline unsigned long copy_from_user(void *to,
4909- const void __user *from, unsigned long n)
4910-{
4911- unsigned long over;
4912-
4913- if (access_ok(VERIFY_READ, from, n))
4914- return __copy_tofrom_user((__force void __user *)to, from, n);
4915- if ((unsigned long)from < TASK_SIZE) {
4916- over = (unsigned long)from + n - TASK_SIZE;
4917- return __copy_tofrom_user((__force void __user *)to, from,
4918- n - over) + over;
4919- }
4920- return n;
4921-}
4922-
4923-static inline unsigned long copy_to_user(void __user *to,
4924- const void *from, unsigned long n)
4925-{
4926- unsigned long over;
4927-
4928- if (access_ok(VERIFY_WRITE, to, n))
4929- return __copy_tofrom_user(to, (__force void __user *)from, n);
4930- if ((unsigned long)to < TASK_SIZE) {
4931- over = (unsigned long)to + n - TASK_SIZE;
4932- return __copy_tofrom_user(to, (__force void __user *)from,
4933- n - over) + over;
4934- }
4935- return n;
4936-}
4937-
4938-#else /* __powerpc64__ */
4939-
4940-#define __copy_in_user(to, from, size) \
4941- __copy_tofrom_user((to), (from), (size))
4942-
4943-extern unsigned long copy_from_user(void *to, const void __user *from,
4944- unsigned long n);
4945-extern unsigned long copy_to_user(void __user *to, const void *from,
4946- unsigned long n);
4947-extern unsigned long copy_in_user(void __user *to, const void __user *from,
4948- unsigned long n);
4949-
4950-#endif /* __powerpc64__ */
4951-
4952 static inline unsigned long __copy_from_user_inatomic(void *to,
4953 const void __user *from, unsigned long n)
4954 {
4955@@ -387,6 +343,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
4956 if (ret == 0)
4957 return 0;
4958 }
4959+
4960+ if (!__builtin_constant_p(n))
4961+ check_object_size(to, n, false);
4962+
4963 return __copy_tofrom_user((__force void __user *)to, from, n);
4964 }
4965
4966@@ -413,6 +373,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4967 if (ret == 0)
4968 return 0;
4969 }
4970+
4971+ if (!__builtin_constant_p(n))
4972+ check_object_size(from, n, true);
4973+
4974 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4975 }
4976
4977@@ -430,6 +394,92 @@ static inline unsigned long __copy_to_user(void __user *to,
4978 return __copy_to_user_inatomic(to, from, size);
4979 }
4980
4981+#ifndef __powerpc64__
4982+
4983+static inline unsigned long __must_check copy_from_user(void *to,
4984+ const void __user *from, unsigned long n)
4985+{
4986+ unsigned long over;
4987+
4988+ if ((long)n < 0)
4989+ return n;
4990+
4991+ if (access_ok(VERIFY_READ, from, n)) {
4992+ if (!__builtin_constant_p(n))
4993+ check_object_size(to, n, false);
4994+ return __copy_tofrom_user((__force void __user *)to, from, n);
4995+ }
4996+ if ((unsigned long)from < TASK_SIZE) {
4997+ over = (unsigned long)from + n - TASK_SIZE;
4998+ if (!__builtin_constant_p(n - over))
4999+ check_object_size(to, n - over, false);
5000+ return __copy_tofrom_user((__force void __user *)to, from,
5001+ n - over) + over;
5002+ }
5003+ return n;
5004+}
5005+
5006+static inline unsigned long __must_check copy_to_user(void __user *to,
5007+ const void *from, unsigned long n)
5008+{
5009+ unsigned long over;
5010+
5011+ if ((long)n < 0)
5012+ return n;
5013+
5014+ if (access_ok(VERIFY_WRITE, to, n)) {
5015+ if (!__builtin_constant_p(n))
5016+ check_object_size(from, n, true);
5017+ return __copy_tofrom_user(to, (__force void __user *)from, n);
5018+ }
5019+ if ((unsigned long)to < TASK_SIZE) {
5020+ over = (unsigned long)to + n - TASK_SIZE;
5021+ if (!__builtin_constant_p(n))
5022+ check_object_size(from, n - over, true);
5023+ return __copy_tofrom_user(to, (__force void __user *)from,
5024+ n - over) + over;
5025+ }
5026+ return n;
5027+}
5028+
5029+#else /* __powerpc64__ */
5030+
5031+#define __copy_in_user(to, from, size) \
5032+ __copy_tofrom_user((to), (from), (size))
5033+
5034+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
5035+{
5036+ if ((long)n < 0 || n > INT_MAX)
5037+ return n;
5038+
5039+ if (!__builtin_constant_p(n))
5040+ check_object_size(to, n, false);
5041+
5042+ if (likely(access_ok(VERIFY_READ, from, n)))
5043+ n = __copy_from_user(to, from, n);
5044+ else
5045+ memset(to, 0, n);
5046+ return n;
5047+}
5048+
5049+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
5050+{
5051+ if ((long)n < 0 || n > INT_MAX)
5052+ return n;
5053+
5054+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
5055+ if (!__builtin_constant_p(n))
5056+ check_object_size(from, n, true);
5057+ n = __copy_to_user(to, from, n);
5058+ }
5059+ return n;
5060+}
5061+
5062+extern unsigned long copy_in_user(void __user *to, const void __user *from,
5063+ unsigned long n);
5064+
5065+#endif /* __powerpc64__ */
5066+
5067 extern unsigned long __clear_user(void __user *addr, unsigned long size);
5068
5069 static inline unsigned long clear_user(void __user *addr, unsigned long size)
5070diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
5071index 4684e33..acc4d19e 100644
5072--- a/arch/powerpc/kernel/exceptions-64e.S
5073+++ b/arch/powerpc/kernel/exceptions-64e.S
5074@@ -715,6 +715,7 @@ storage_fault_common:
5075 std r14,_DAR(r1)
5076 std r15,_DSISR(r1)
5077 addi r3,r1,STACK_FRAME_OVERHEAD
5078+ bl .save_nvgprs
5079 mr r4,r14
5080 mr r5,r15
5081 ld r14,PACA_EXGEN+EX_R14(r13)
5082@@ -723,8 +724,7 @@ storage_fault_common:
5083 cmpdi r3,0
5084 bne- 1f
5085 b .ret_from_except_lite
5086-1: bl .save_nvgprs
5087- mr r5,r3
5088+1: mr r5,r3
5089 addi r3,r1,STACK_FRAME_OVERHEAD
5090 ld r4,_DAR(r1)
5091 bl .bad_page_fault
5092diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
5093index 10b658a..e542888 100644
5094--- a/arch/powerpc/kernel/exceptions-64s.S
5095+++ b/arch/powerpc/kernel/exceptions-64s.S
5096@@ -1013,10 +1013,10 @@ handle_page_fault:
5097 11: ld r4,_DAR(r1)
5098 ld r5,_DSISR(r1)
5099 addi r3,r1,STACK_FRAME_OVERHEAD
5100+ bl .save_nvgprs
5101 bl .do_page_fault
5102 cmpdi r3,0
5103 beq+ 12f
5104- bl .save_nvgprs
5105 mr r5,r3
5106 addi r3,r1,STACK_FRAME_OVERHEAD
5107 lwz r4,_DAR(r1)
5108diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
5109index 2e3200c..72095ce 100644
5110--- a/arch/powerpc/kernel/module_32.c
5111+++ b/arch/powerpc/kernel/module_32.c
5112@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
5113 me->arch.core_plt_section = i;
5114 }
5115 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
5116- printk("Module doesn't contain .plt or .init.plt sections.\n");
5117+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
5118 return -ENOEXEC;
5119 }
5120
5121@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
5122
5123 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
5124 /* Init, or core PLT? */
5125- if (location >= mod->module_core
5126- && location < mod->module_core + mod->core_size)
5127+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
5128+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
5129 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
5130- else
5131+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
5132+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
5133 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
5134+ else {
5135+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
5136+ return ~0UL;
5137+ }
5138
5139 /* Find this entry, or if that fails, the next avail. entry */
5140 while (entry->jump[0]) {
5141diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
5142index ba48233..16ac31d 100644
5143--- a/arch/powerpc/kernel/process.c
5144+++ b/arch/powerpc/kernel/process.c
5145@@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
5146 * Lookup NIP late so we have the best change of getting the
5147 * above info out without failing
5148 */
5149- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
5150- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
5151+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
5152+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
5153 #endif
5154 show_stack(current, (unsigned long *) regs->gpr[1]);
5155 if (!user_mode(regs))
5156@@ -1175,10 +1175,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
5157 newsp = stack[0];
5158 ip = stack[STACK_FRAME_LR_SAVE];
5159 if (!firstframe || ip != lr) {
5160- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
5161+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
5162 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5163 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
5164- printk(" (%pS)",
5165+ printk(" (%pA)",
5166 (void *)current->ret_stack[curr_frame].ret);
5167 curr_frame--;
5168 }
5169@@ -1198,7 +1198,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
5170 struct pt_regs *regs = (struct pt_regs *)
5171 (sp + STACK_FRAME_OVERHEAD);
5172 lr = regs->link;
5173- printk("--- Exception: %lx at %pS\n LR = %pS\n",
5174+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
5175 regs->trap, (void *)regs->nip, (void *)lr);
5176 firstframe = 1;
5177 }
5178@@ -1240,58 +1240,3 @@ void __ppc64_runlatch_off(void)
5179 mtspr(SPRN_CTRLT, ctrl);
5180 }
5181 #endif /* CONFIG_PPC64 */
5182-
5183-unsigned long arch_align_stack(unsigned long sp)
5184-{
5185- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5186- sp -= get_random_int() & ~PAGE_MASK;
5187- return sp & ~0xf;
5188-}
5189-
5190-static inline unsigned long brk_rnd(void)
5191-{
5192- unsigned long rnd = 0;
5193-
5194- /* 8MB for 32bit, 1GB for 64bit */
5195- if (is_32bit_task())
5196- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
5197- else
5198- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
5199-
5200- return rnd << PAGE_SHIFT;
5201-}
5202-
5203-unsigned long arch_randomize_brk(struct mm_struct *mm)
5204-{
5205- unsigned long base = mm->brk;
5206- unsigned long ret;
5207-
5208-#ifdef CONFIG_PPC_STD_MMU_64
5209- /*
5210- * If we are using 1TB segments and we are allowed to randomise
5211- * the heap, we can put it above 1TB so it is backed by a 1TB
5212- * segment. Otherwise the heap will be in the bottom 1TB
5213- * which always uses 256MB segments and this may result in a
5214- * performance penalty.
5215- */
5216- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
5217- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
5218-#endif
5219-
5220- ret = PAGE_ALIGN(base + brk_rnd());
5221-
5222- if (ret < mm->brk)
5223- return mm->brk;
5224-
5225- return ret;
5226-}
5227-
5228-unsigned long randomize_et_dyn(unsigned long base)
5229-{
5230- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
5231-
5232- if (ret < base)
5233- return base;
5234-
5235- return ret;
5236-}
5237diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
5238index 79d8e56..38ffcbb 100644
5239--- a/arch/powerpc/kernel/ptrace.c
5240+++ b/arch/powerpc/kernel/ptrace.c
5241@@ -1663,6 +1663,10 @@ long arch_ptrace(struct task_struct *child, long request,
5242 return ret;
5243 }
5244
5245+#ifdef CONFIG_GRKERNSEC_SETXID
5246+extern void gr_delayed_cred_worker(void);
5247+#endif
5248+
5249 /*
5250 * We must return the syscall number to actually look up in the table.
5251 * This can be -1L to skip running any syscall at all.
5252@@ -1673,6 +1677,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
5253
5254 secure_computing_strict(regs->gpr[0]);
5255
5256+#ifdef CONFIG_GRKERNSEC_SETXID
5257+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5258+ gr_delayed_cred_worker();
5259+#endif
5260+
5261 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
5262 tracehook_report_syscall_entry(regs))
5263 /*
5264@@ -1707,6 +1716,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
5265 {
5266 int step;
5267
5268+#ifdef CONFIG_GRKERNSEC_SETXID
5269+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5270+ gr_delayed_cred_worker();
5271+#endif
5272+
5273 audit_syscall_exit(regs);
5274
5275 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5276diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
5277index 804e323..79181c1 100644
5278--- a/arch/powerpc/kernel/signal_32.c
5279+++ b/arch/powerpc/kernel/signal_32.c
5280@@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
5281 /* Save user registers on the stack */
5282 frame = &rt_sf->uc.uc_mcontext;
5283 addr = frame;
5284- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
5285+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
5286 if (save_user_regs(regs, frame, 0, 1))
5287 goto badframe;
5288 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
5289diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
5290index d183f87..1867f1a 100644
5291--- a/arch/powerpc/kernel/signal_64.c
5292+++ b/arch/powerpc/kernel/signal_64.c
5293@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
5294 current->thread.fpscr.val = 0;
5295
5296 /* Set up to return from userspace. */
5297- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
5298+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
5299 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
5300 } else {
5301 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
5302diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
5303index 3251840..3f7c77a 100644
5304--- a/arch/powerpc/kernel/traps.c
5305+++ b/arch/powerpc/kernel/traps.c
5306@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
5307 return flags;
5308 }
5309
5310+extern void gr_handle_kernel_exploit(void);
5311+
5312 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
5313 int signr)
5314 {
5315@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
5316 panic("Fatal exception in interrupt");
5317 if (panic_on_oops)
5318 panic("Fatal exception");
5319+
5320+ gr_handle_kernel_exploit();
5321+
5322 do_exit(signr);
5323 }
5324
5325diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
5326index 1b2076f..835e4be 100644
5327--- a/arch/powerpc/kernel/vdso.c
5328+++ b/arch/powerpc/kernel/vdso.c
5329@@ -34,6 +34,7 @@
5330 #include <asm/firmware.h>
5331 #include <asm/vdso.h>
5332 #include <asm/vdso_datapage.h>
5333+#include <asm/mman.h>
5334
5335 #include "setup.h"
5336
5337@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
5338 vdso_base = VDSO32_MBASE;
5339 #endif
5340
5341- current->mm->context.vdso_base = 0;
5342+ current->mm->context.vdso_base = ~0UL;
5343
5344 /* vDSO has a problem and was disabled, just don't "enable" it for the
5345 * process
5346@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
5347 vdso_base = get_unmapped_area(NULL, vdso_base,
5348 (vdso_pages << PAGE_SHIFT) +
5349 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
5350- 0, 0);
5351+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
5352 if (IS_ERR_VALUE(vdso_base)) {
5353 rc = vdso_base;
5354 goto fail_mmapsem;
5355diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
5356index 5eea6f3..5d10396 100644
5357--- a/arch/powerpc/lib/usercopy_64.c
5358+++ b/arch/powerpc/lib/usercopy_64.c
5359@@ -9,22 +9,6 @@
5360 #include <linux/module.h>
5361 #include <asm/uaccess.h>
5362
5363-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5364-{
5365- if (likely(access_ok(VERIFY_READ, from, n)))
5366- n = __copy_from_user(to, from, n);
5367- else
5368- memset(to, 0, n);
5369- return n;
5370-}
5371-
5372-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5373-{
5374- if (likely(access_ok(VERIFY_WRITE, to, n)))
5375- n = __copy_to_user(to, from, n);
5376- return n;
5377-}
5378-
5379 unsigned long copy_in_user(void __user *to, const void __user *from,
5380 unsigned long n)
5381 {
5382@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
5383 return n;
5384 }
5385
5386-EXPORT_SYMBOL(copy_from_user);
5387-EXPORT_SYMBOL(copy_to_user);
5388 EXPORT_SYMBOL(copy_in_user);
5389
5390diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
5391index 0a6b283..7674925 100644
5392--- a/arch/powerpc/mm/fault.c
5393+++ b/arch/powerpc/mm/fault.c
5394@@ -32,6 +32,10 @@
5395 #include <linux/perf_event.h>
5396 #include <linux/magic.h>
5397 #include <linux/ratelimit.h>
5398+#include <linux/slab.h>
5399+#include <linux/pagemap.h>
5400+#include <linux/compiler.h>
5401+#include <linux/unistd.h>
5402
5403 #include <asm/firmware.h>
5404 #include <asm/page.h>
5405@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
5406 }
5407 #endif
5408
5409+#ifdef CONFIG_PAX_PAGEEXEC
5410+/*
5411+ * PaX: decide what to do with offenders (regs->nip = fault address)
5412+ *
5413+ * returns 1 when task should be killed
5414+ */
5415+static int pax_handle_fetch_fault(struct pt_regs *regs)
5416+{
5417+ return 1;
5418+}
5419+
5420+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5421+{
5422+ unsigned long i;
5423+
5424+ printk(KERN_ERR "PAX: bytes at PC: ");
5425+ for (i = 0; i < 5; i++) {
5426+ unsigned int c;
5427+ if (get_user(c, (unsigned int __user *)pc+i))
5428+ printk(KERN_CONT "???????? ");
5429+ else
5430+ printk(KERN_CONT "%08x ", c);
5431+ }
5432+ printk("\n");
5433+}
5434+#endif
5435+
5436 /*
5437 * Check whether the instruction at regs->nip is a store using
5438 * an update addressing form which will update r1.
5439@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
5440 * indicate errors in DSISR but can validly be set in SRR1.
5441 */
5442 if (trap == 0x400)
5443- error_code &= 0x48200000;
5444+ error_code &= 0x58200000;
5445 else
5446 is_write = error_code & DSISR_ISSTORE;
5447 #else
5448@@ -367,7 +398,7 @@ good_area:
5449 * "undefined". Of those that can be set, this is the only
5450 * one which seems bad.
5451 */
5452- if (error_code & 0x10000000)
5453+ if (error_code & DSISR_GUARDED)
5454 /* Guarded storage error. */
5455 goto bad_area;
5456 #endif /* CONFIG_8xx */
5457@@ -382,7 +413,7 @@ good_area:
5458 * processors use the same I/D cache coherency mechanism
5459 * as embedded.
5460 */
5461- if (error_code & DSISR_PROTFAULT)
5462+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
5463 goto bad_area;
5464 #endif /* CONFIG_PPC_STD_MMU */
5465
5466@@ -465,6 +496,23 @@ bad_area:
5467 bad_area_nosemaphore:
5468 /* User mode accesses cause a SIGSEGV */
5469 if (user_mode(regs)) {
5470+
5471+#ifdef CONFIG_PAX_PAGEEXEC
5472+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5473+#ifdef CONFIG_PPC_STD_MMU
5474+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
5475+#else
5476+ if (is_exec && regs->nip == address) {
5477+#endif
5478+ switch (pax_handle_fetch_fault(regs)) {
5479+ }
5480+
5481+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
5482+ do_group_exit(SIGKILL);
5483+ }
5484+ }
5485+#endif
5486+
5487 _exception(SIGSEGV, regs, code, address);
5488 return 0;
5489 }
5490diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
5491index 67a42ed..1c7210c 100644
5492--- a/arch/powerpc/mm/mmap_64.c
5493+++ b/arch/powerpc/mm/mmap_64.c
5494@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5495 */
5496 if (mmap_is_legacy()) {
5497 mm->mmap_base = TASK_UNMAPPED_BASE;
5498+
5499+#ifdef CONFIG_PAX_RANDMMAP
5500+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5501+ mm->mmap_base += mm->delta_mmap;
5502+#endif
5503+
5504 mm->get_unmapped_area = arch_get_unmapped_area;
5505 mm->unmap_area = arch_unmap_area;
5506 } else {
5507 mm->mmap_base = mmap_base();
5508+
5509+#ifdef CONFIG_PAX_RANDMMAP
5510+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5511+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5512+#endif
5513+
5514 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5515 mm->unmap_area = arch_unmap_area_topdown;
5516 }
5517diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
5518index 5829d2a..af84242 100644
5519--- a/arch/powerpc/mm/slice.c
5520+++ b/arch/powerpc/mm/slice.c
5521@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
5522 if ((mm->task_size - len) < addr)
5523 return 0;
5524 vma = find_vma(mm, addr);
5525- return (!vma || (addr + len) <= vma->vm_start);
5526+ return check_heap_stack_gap(vma, addr, len, 0);
5527 }
5528
5529 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
5530@@ -272,7 +272,7 @@ full_search:
5531 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
5532 continue;
5533 }
5534- if (!vma || addr + len <= vma->vm_start) {
5535+ if (check_heap_stack_gap(vma, addr, len, 0)) {
5536 /*
5537 * Remember the place where we stopped the search:
5538 */
5539@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5540 }
5541 }
5542
5543- addr = mm->mmap_base;
5544- while (addr > len) {
5545+ if (mm->mmap_base < len)
5546+ addr = -ENOMEM;
5547+ else
5548+ addr = mm->mmap_base - len;
5549+
5550+ while (!IS_ERR_VALUE(addr)) {
5551 /* Go down by chunk size */
5552- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
5553+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
5554
5555 /* Check for hit with different page size */
5556 mask = slice_range_to_mask(addr, len);
5557@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5558 * return with success:
5559 */
5560 vma = find_vma(mm, addr);
5561- if (!vma || (addr + len) <= vma->vm_start) {
5562+ if (check_heap_stack_gap(vma, addr, len, 0)) {
5563 /* remember the address as a hint for next time */
5564 if (use_cache)
5565 mm->free_area_cache = addr;
5566@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5567 mm->cached_hole_size = vma->vm_start - addr;
5568
5569 /* try just below the current vma->vm_start */
5570- addr = vma->vm_start;
5571+ addr = skip_heap_stack_gap(vma, len, 0);
5572 }
5573
5574 /*
5575@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
5576 if (fixed && addr > (mm->task_size - len))
5577 return -EINVAL;
5578
5579+#ifdef CONFIG_PAX_RANDMMAP
5580+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
5581+ addr = 0;
5582+#endif
5583+
5584 /* If hint, make sure it matches our alignment restrictions */
5585 if (!fixed && addr) {
5586 addr = _ALIGN_UP(addr, 1ul << pshift);
5587diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
5588index c797832..ce575c8 100644
5589--- a/arch/s390/include/asm/atomic.h
5590+++ b/arch/s390/include/asm/atomic.h
5591@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
5592 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
5593 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5594
5595+#define atomic64_read_unchecked(v) atomic64_read(v)
5596+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5597+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5598+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5599+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5600+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5601+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5602+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5603+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5604+
5605 #define smp_mb__before_atomic_dec() smp_mb()
5606 #define smp_mb__after_atomic_dec() smp_mb()
5607 #define smp_mb__before_atomic_inc() smp_mb()
5608diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
5609index 4d7ccac..d03d0ad 100644
5610--- a/arch/s390/include/asm/cache.h
5611+++ b/arch/s390/include/asm/cache.h
5612@@ -9,8 +9,10 @@
5613 #ifndef __ARCH_S390_CACHE_H
5614 #define __ARCH_S390_CACHE_H
5615
5616-#define L1_CACHE_BYTES 256
5617+#include <linux/const.h>
5618+
5619 #define L1_CACHE_SHIFT 8
5620+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5621 #define NET_SKB_PAD 32
5622
5623 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5624diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
5625index 178ff96..8c93bd1 100644
5626--- a/arch/s390/include/asm/elf.h
5627+++ b/arch/s390/include/asm/elf.h
5628@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
5629 the loader. We need to make sure that it is out of the way of the program
5630 that it will "exec", and that there is sufficient room for the brk. */
5631
5632-extern unsigned long randomize_et_dyn(unsigned long base);
5633-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
5634+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
5635+
5636+#ifdef CONFIG_PAX_ASLR
5637+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
5638+
5639+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
5640+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
5641+#endif
5642
5643 /* This yields a mask that user programs can use to figure out what
5644 instruction set this CPU supports. */
5645@@ -210,9 +216,6 @@ struct linux_binprm;
5646 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5647 int arch_setup_additional_pages(struct linux_binprm *, int);
5648
5649-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5650-#define arch_randomize_brk arch_randomize_brk
5651-
5652 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
5653
5654 #endif
5655diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
5656index c4a93d6..4d2a9b4 100644
5657--- a/arch/s390/include/asm/exec.h
5658+++ b/arch/s390/include/asm/exec.h
5659@@ -7,6 +7,6 @@
5660 #ifndef __ASM_EXEC_H
5661 #define __ASM_EXEC_H
5662
5663-extern unsigned long arch_align_stack(unsigned long sp);
5664+#define arch_align_stack(x) ((x) & ~0xfUL)
5665
5666 #endif /* __ASM_EXEC_H */
5667diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
5668index 34268df..ea97318 100644
5669--- a/arch/s390/include/asm/uaccess.h
5670+++ b/arch/s390/include/asm/uaccess.h
5671@@ -252,6 +252,10 @@ static inline unsigned long __must_check
5672 copy_to_user(void __user *to, const void *from, unsigned long n)
5673 {
5674 might_fault();
5675+
5676+ if ((long)n < 0)
5677+ return n;
5678+
5679 if (access_ok(VERIFY_WRITE, to, n))
5680 n = __copy_to_user(to, from, n);
5681 return n;
5682@@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
5683 static inline unsigned long __must_check
5684 __copy_from_user(void *to, const void __user *from, unsigned long n)
5685 {
5686+ if ((long)n < 0)
5687+ return n;
5688+
5689 if (__builtin_constant_p(n) && (n <= 256))
5690 return uaccess.copy_from_user_small(n, from, to);
5691 else
5692@@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
5693 static inline unsigned long __must_check
5694 copy_from_user(void *to, const void __user *from, unsigned long n)
5695 {
5696- unsigned int sz = __compiletime_object_size(to);
5697+ size_t sz = __compiletime_object_size(to);
5698
5699 might_fault();
5700- if (unlikely(sz != -1 && sz < n)) {
5701+
5702+ if ((long)n < 0)
5703+ return n;
5704+
5705+ if (unlikely(sz != (size_t)-1 && sz < n)) {
5706 copy_from_user_overflow();
5707 return n;
5708 }
5709diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
5710index 4610dea..cf0af21 100644
5711--- a/arch/s390/kernel/module.c
5712+++ b/arch/s390/kernel/module.c
5713@@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
5714
5715 /* Increase core size by size of got & plt and set start
5716 offsets for got and plt. */
5717- me->core_size = ALIGN(me->core_size, 4);
5718- me->arch.got_offset = me->core_size;
5719- me->core_size += me->arch.got_size;
5720- me->arch.plt_offset = me->core_size;
5721- me->core_size += me->arch.plt_size;
5722+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
5723+ me->arch.got_offset = me->core_size_rw;
5724+ me->core_size_rw += me->arch.got_size;
5725+ me->arch.plt_offset = me->core_size_rx;
5726+ me->core_size_rx += me->arch.plt_size;
5727 return 0;
5728 }
5729
5730@@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5731 if (info->got_initialized == 0) {
5732 Elf_Addr *gotent;
5733
5734- gotent = me->module_core + me->arch.got_offset +
5735+ gotent = me->module_core_rw + me->arch.got_offset +
5736 info->got_offset;
5737 *gotent = val;
5738 info->got_initialized = 1;
5739@@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5740 else if (r_type == R_390_GOTENT ||
5741 r_type == R_390_GOTPLTENT)
5742 *(unsigned int *) loc =
5743- (val + (Elf_Addr) me->module_core - loc) >> 1;
5744+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
5745 else if (r_type == R_390_GOT64 ||
5746 r_type == R_390_GOTPLT64)
5747 *(unsigned long *) loc = val;
5748@@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5749 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
5750 if (info->plt_initialized == 0) {
5751 unsigned int *ip;
5752- ip = me->module_core + me->arch.plt_offset +
5753+ ip = me->module_core_rx + me->arch.plt_offset +
5754 info->plt_offset;
5755 #ifndef CONFIG_64BIT
5756 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
5757@@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5758 val - loc + 0xffffUL < 0x1ffffeUL) ||
5759 (r_type == R_390_PLT32DBL &&
5760 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
5761- val = (Elf_Addr) me->module_core +
5762+ val = (Elf_Addr) me->module_core_rx +
5763 me->arch.plt_offset +
5764 info->plt_offset;
5765 val += rela->r_addend - loc;
5766@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5767 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
5768 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
5769 val = val + rela->r_addend -
5770- ((Elf_Addr) me->module_core + me->arch.got_offset);
5771+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
5772 if (r_type == R_390_GOTOFF16)
5773 *(unsigned short *) loc = val;
5774 else if (r_type == R_390_GOTOFF32)
5775@@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5776 break;
5777 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
5778 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
5779- val = (Elf_Addr) me->module_core + me->arch.got_offset +
5780+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
5781 rela->r_addend - loc;
5782 if (r_type == R_390_GOTPC)
5783 *(unsigned int *) loc = val;
5784diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
5785index cd31ad4..201c5a3 100644
5786--- a/arch/s390/kernel/process.c
5787+++ b/arch/s390/kernel/process.c
5788@@ -283,39 +283,3 @@ unsigned long get_wchan(struct task_struct *p)
5789 }
5790 return 0;
5791 }
5792-
5793-unsigned long arch_align_stack(unsigned long sp)
5794-{
5795- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5796- sp -= get_random_int() & ~PAGE_MASK;
5797- return sp & ~0xf;
5798-}
5799-
5800-static inline unsigned long brk_rnd(void)
5801-{
5802- /* 8MB for 32bit, 1GB for 64bit */
5803- if (is_32bit_task())
5804- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
5805- else
5806- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
5807-}
5808-
5809-unsigned long arch_randomize_brk(struct mm_struct *mm)
5810-{
5811- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
5812-
5813- if (ret < mm->brk)
5814- return mm->brk;
5815- return ret;
5816-}
5817-
5818-unsigned long randomize_et_dyn(unsigned long base)
5819-{
5820- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
5821-
5822- if (!(current->flags & PF_RANDOMIZE))
5823- return base;
5824- if (ret < base)
5825- return base;
5826- return ret;
5827-}
5828diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
5829index c59a5ef..3fae59c 100644
5830--- a/arch/s390/mm/mmap.c
5831+++ b/arch/s390/mm/mmap.c
5832@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5833 */
5834 if (mmap_is_legacy()) {
5835 mm->mmap_base = TASK_UNMAPPED_BASE;
5836+
5837+#ifdef CONFIG_PAX_RANDMMAP
5838+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5839+ mm->mmap_base += mm->delta_mmap;
5840+#endif
5841+
5842 mm->get_unmapped_area = arch_get_unmapped_area;
5843 mm->unmap_area = arch_unmap_area;
5844 } else {
5845 mm->mmap_base = mmap_base();
5846+
5847+#ifdef CONFIG_PAX_RANDMMAP
5848+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5849+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5850+#endif
5851+
5852 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5853 mm->unmap_area = arch_unmap_area_topdown;
5854 }
5855@@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5856 */
5857 if (mmap_is_legacy()) {
5858 mm->mmap_base = TASK_UNMAPPED_BASE;
5859+
5860+#ifdef CONFIG_PAX_RANDMMAP
5861+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5862+ mm->mmap_base += mm->delta_mmap;
5863+#endif
5864+
5865 mm->get_unmapped_area = s390_get_unmapped_area;
5866 mm->unmap_area = arch_unmap_area;
5867 } else {
5868 mm->mmap_base = mmap_base();
5869+
5870+#ifdef CONFIG_PAX_RANDMMAP
5871+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5872+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5873+#endif
5874+
5875 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
5876 mm->unmap_area = arch_unmap_area_topdown;
5877 }
5878diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
5879index ae3d59f..f65f075 100644
5880--- a/arch/score/include/asm/cache.h
5881+++ b/arch/score/include/asm/cache.h
5882@@ -1,7 +1,9 @@
5883 #ifndef _ASM_SCORE_CACHE_H
5884 #define _ASM_SCORE_CACHE_H
5885
5886+#include <linux/const.h>
5887+
5888 #define L1_CACHE_SHIFT 4
5889-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5890+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5891
5892 #endif /* _ASM_SCORE_CACHE_H */
5893diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
5894index f9f3cd5..58ff438 100644
5895--- a/arch/score/include/asm/exec.h
5896+++ b/arch/score/include/asm/exec.h
5897@@ -1,6 +1,6 @@
5898 #ifndef _ASM_SCORE_EXEC_H
5899 #define _ASM_SCORE_EXEC_H
5900
5901-extern unsigned long arch_align_stack(unsigned long sp);
5902+#define arch_align_stack(x) (x)
5903
5904 #endif /* _ASM_SCORE_EXEC_H */
5905diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
5906index 637970c..0b6556b 100644
5907--- a/arch/score/kernel/process.c
5908+++ b/arch/score/kernel/process.c
5909@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
5910
5911 return task_pt_regs(task)->cp0_epc;
5912 }
5913-
5914-unsigned long arch_align_stack(unsigned long sp)
5915-{
5916- return sp;
5917-}
5918diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
5919index ef9e555..331bd29 100644
5920--- a/arch/sh/include/asm/cache.h
5921+++ b/arch/sh/include/asm/cache.h
5922@@ -9,10 +9,11 @@
5923 #define __ASM_SH_CACHE_H
5924 #ifdef __KERNEL__
5925
5926+#include <linux/const.h>
5927 #include <linux/init.h>
5928 #include <cpu/cache.h>
5929
5930-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5931+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5932
5933 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5934
5935diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5936index afeb710..e8366ef 100644
5937--- a/arch/sh/mm/mmap.c
5938+++ b/arch/sh/mm/mmap.c
5939@@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5940 struct vm_area_struct *vma;
5941 unsigned long start_addr;
5942 int do_colour_align;
5943+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5944
5945 if (flags & MAP_FIXED) {
5946 /* We do not accept a shared mapping if it would violate
5947@@ -74,8 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5948 addr = PAGE_ALIGN(addr);
5949
5950 vma = find_vma(mm, addr);
5951- if (TASK_SIZE - len >= addr &&
5952- (!vma || addr + len <= vma->vm_start))
5953+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5954 return addr;
5955 }
5956
5957@@ -106,7 +106,7 @@ full_search:
5958 }
5959 return -ENOMEM;
5960 }
5961- if (likely(!vma || addr + len <= vma->vm_start)) {
5962+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
5963 /*
5964 * Remember the place where we stopped the search:
5965 */
5966@@ -131,6 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5967 struct mm_struct *mm = current->mm;
5968 unsigned long addr = addr0;
5969 int do_colour_align;
5970+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5971
5972 if (flags & MAP_FIXED) {
5973 /* We do not accept a shared mapping if it would violate
5974@@ -157,8 +158,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5975 addr = PAGE_ALIGN(addr);
5976
5977 vma = find_vma(mm, addr);
5978- if (TASK_SIZE - len >= addr &&
5979- (!vma || addr + len <= vma->vm_start))
5980+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5981 return addr;
5982 }
5983
5984@@ -179,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5985 /* make sure it can fit in the remaining address space */
5986 if (likely(addr > len)) {
5987 vma = find_vma(mm, addr-len);
5988- if (!vma || addr <= vma->vm_start) {
5989+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
5990 /* remember the address as a hint for next time */
5991 return (mm->free_area_cache = addr-len);
5992 }
5993@@ -188,18 +188,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5994 if (unlikely(mm->mmap_base < len))
5995 goto bottomup;
5996
5997- addr = mm->mmap_base-len;
5998- if (do_colour_align)
5999- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6000+ addr = mm->mmap_base - len;
6001
6002 do {
6003+ if (do_colour_align)
6004+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6005 /*
6006 * Lookup failure means no vma is above this address,
6007 * else if new region fits below vma->vm_start,
6008 * return with success:
6009 */
6010 vma = find_vma(mm, addr);
6011- if (likely(!vma || addr+len <= vma->vm_start)) {
6012+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
6013 /* remember the address as a hint for next time */
6014 return (mm->free_area_cache = addr);
6015 }
6016@@ -209,10 +209,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6017 mm->cached_hole_size = vma->vm_start - addr;
6018
6019 /* try just below the current vma->vm_start */
6020- addr = vma->vm_start-len;
6021- if (do_colour_align)
6022- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6023- } while (likely(len < vma->vm_start));
6024+ addr = skip_heap_stack_gap(vma, len, offset);
6025+ } while (!IS_ERR_VALUE(addr));
6026
6027 bottomup:
6028 /*
6029diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
6030index be56a24..443328f 100644
6031--- a/arch/sparc/include/asm/atomic_64.h
6032+++ b/arch/sparc/include/asm/atomic_64.h
6033@@ -14,18 +14,40 @@
6034 #define ATOMIC64_INIT(i) { (i) }
6035
6036 #define atomic_read(v) (*(volatile int *)&(v)->counter)
6037+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6038+{
6039+ return v->counter;
6040+}
6041 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
6042+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6043+{
6044+ return v->counter;
6045+}
6046
6047 #define atomic_set(v, i) (((v)->counter) = i)
6048+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6049+{
6050+ v->counter = i;
6051+}
6052 #define atomic64_set(v, i) (((v)->counter) = i)
6053+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6054+{
6055+ v->counter = i;
6056+}
6057
6058 extern void atomic_add(int, atomic_t *);
6059+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
6060 extern void atomic64_add(long, atomic64_t *);
6061+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
6062 extern void atomic_sub(int, atomic_t *);
6063+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
6064 extern void atomic64_sub(long, atomic64_t *);
6065+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
6066
6067 extern int atomic_add_ret(int, atomic_t *);
6068+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
6069 extern long atomic64_add_ret(long, atomic64_t *);
6070+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
6071 extern int atomic_sub_ret(int, atomic_t *);
6072 extern long atomic64_sub_ret(long, atomic64_t *);
6073
6074@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
6075 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
6076
6077 #define atomic_inc_return(v) atomic_add_ret(1, v)
6078+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6079+{
6080+ return atomic_add_ret_unchecked(1, v);
6081+}
6082 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
6083+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6084+{
6085+ return atomic64_add_ret_unchecked(1, v);
6086+}
6087
6088 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
6089 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
6090
6091 #define atomic_add_return(i, v) atomic_add_ret(i, v)
6092+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6093+{
6094+ return atomic_add_ret_unchecked(i, v);
6095+}
6096 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
6097+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6098+{
6099+ return atomic64_add_ret_unchecked(i, v);
6100+}
6101
6102 /*
6103 * atomic_inc_and_test - increment and test
6104@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
6105 * other cases.
6106 */
6107 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6108+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6109+{
6110+ return atomic_inc_return_unchecked(v) == 0;
6111+}
6112 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6113
6114 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
6115@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
6116 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
6117
6118 #define atomic_inc(v) atomic_add(1, v)
6119+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6120+{
6121+ atomic_add_unchecked(1, v);
6122+}
6123 #define atomic64_inc(v) atomic64_add(1, v)
6124+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6125+{
6126+ atomic64_add_unchecked(1, v);
6127+}
6128
6129 #define atomic_dec(v) atomic_sub(1, v)
6130+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6131+{
6132+ atomic_sub_unchecked(1, v);
6133+}
6134 #define atomic64_dec(v) atomic64_sub(1, v)
6135+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6136+{
6137+ atomic64_sub_unchecked(1, v);
6138+}
6139
6140 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
6141 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
6142
6143 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6144+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6145+{
6146+ return cmpxchg(&v->counter, old, new);
6147+}
6148 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
6149+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6150+{
6151+ return xchg(&v->counter, new);
6152+}
6153
6154 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
6155 {
6156- int c, old;
6157+ int c, old, new;
6158 c = atomic_read(v);
6159 for (;;) {
6160- if (unlikely(c == (u)))
6161+ if (unlikely(c == u))
6162 break;
6163- old = atomic_cmpxchg((v), c, c + (a));
6164+
6165+ asm volatile("addcc %2, %0, %0\n"
6166+
6167+#ifdef CONFIG_PAX_REFCOUNT
6168+ "tvs %%icc, 6\n"
6169+#endif
6170+
6171+ : "=r" (new)
6172+ : "0" (c), "ir" (a)
6173+ : "cc");
6174+
6175+ old = atomic_cmpxchg(v, c, new);
6176 if (likely(old == c))
6177 break;
6178 c = old;
6179@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
6180 #define atomic64_cmpxchg(v, o, n) \
6181 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6182 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
6183+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6184+{
6185+ return xchg(&v->counter, new);
6186+}
6187
6188 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
6189 {
6190- long c, old;
6191+ long c, old, new;
6192 c = atomic64_read(v);
6193 for (;;) {
6194- if (unlikely(c == (u)))
6195+ if (unlikely(c == u))
6196 break;
6197- old = atomic64_cmpxchg((v), c, c + (a));
6198+
6199+ asm volatile("addcc %2, %0, %0\n"
6200+
6201+#ifdef CONFIG_PAX_REFCOUNT
6202+ "tvs %%xcc, 6\n"
6203+#endif
6204+
6205+ : "=r" (new)
6206+ : "0" (c), "ir" (a)
6207+ : "cc");
6208+
6209+ old = atomic64_cmpxchg(v, c, new);
6210 if (likely(old == c))
6211 break;
6212 c = old;
6213 }
6214- return c != (u);
6215+ return c != u;
6216 }
6217
6218 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6219diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
6220index 5bb6991..5c2132e 100644
6221--- a/arch/sparc/include/asm/cache.h
6222+++ b/arch/sparc/include/asm/cache.h
6223@@ -7,10 +7,12 @@
6224 #ifndef _SPARC_CACHE_H
6225 #define _SPARC_CACHE_H
6226
6227+#include <linux/const.h>
6228+
6229 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
6230
6231 #define L1_CACHE_SHIFT 5
6232-#define L1_CACHE_BYTES 32
6233+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6234
6235 #ifdef CONFIG_SPARC32
6236 #define SMP_CACHE_BYTES_SHIFT 5
6237diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
6238index ac74a2c..a9e58af 100644
6239--- a/arch/sparc/include/asm/elf_32.h
6240+++ b/arch/sparc/include/asm/elf_32.h
6241@@ -114,6 +114,13 @@ typedef struct {
6242
6243 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
6244
6245+#ifdef CONFIG_PAX_ASLR
6246+#define PAX_ELF_ET_DYN_BASE 0x10000UL
6247+
6248+#define PAX_DELTA_MMAP_LEN 16
6249+#define PAX_DELTA_STACK_LEN 16
6250+#endif
6251+
6252 /* This yields a mask that user programs can use to figure out what
6253 instruction set this cpu supports. This can NOT be done in userspace
6254 on Sparc. */
6255diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
6256index 370ca1e..d4f4a98 100644
6257--- a/arch/sparc/include/asm/elf_64.h
6258+++ b/arch/sparc/include/asm/elf_64.h
6259@@ -189,6 +189,13 @@ typedef struct {
6260 #define ELF_ET_DYN_BASE 0x0000010000000000UL
6261 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
6262
6263+#ifdef CONFIG_PAX_ASLR
6264+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
6265+
6266+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
6267+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
6268+#endif
6269+
6270 extern unsigned long sparc64_elf_hwcap;
6271 #define ELF_HWCAP sparc64_elf_hwcap
6272
6273diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
6274index 9b1c36d..209298b 100644
6275--- a/arch/sparc/include/asm/pgalloc_32.h
6276+++ b/arch/sparc/include/asm/pgalloc_32.h
6277@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
6278 }
6279
6280 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
6281+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
6282
6283 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
6284 unsigned long address)
6285diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
6286index bcfe063..b333142 100644
6287--- a/arch/sparc/include/asm/pgalloc_64.h
6288+++ b/arch/sparc/include/asm/pgalloc_64.h
6289@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6290 }
6291
6292 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
6293+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
6294
6295 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
6296 {
6297diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
6298index 6fc1348..390c50a 100644
6299--- a/arch/sparc/include/asm/pgtable_32.h
6300+++ b/arch/sparc/include/asm/pgtable_32.h
6301@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
6302 #define PAGE_SHARED SRMMU_PAGE_SHARED
6303 #define PAGE_COPY SRMMU_PAGE_COPY
6304 #define PAGE_READONLY SRMMU_PAGE_RDONLY
6305+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
6306+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
6307+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
6308 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
6309
6310 /* Top-level page directory - dummy used by init-mm.
6311@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
6312
6313 /* xwr */
6314 #define __P000 PAGE_NONE
6315-#define __P001 PAGE_READONLY
6316-#define __P010 PAGE_COPY
6317-#define __P011 PAGE_COPY
6318+#define __P001 PAGE_READONLY_NOEXEC
6319+#define __P010 PAGE_COPY_NOEXEC
6320+#define __P011 PAGE_COPY_NOEXEC
6321 #define __P100 PAGE_READONLY
6322 #define __P101 PAGE_READONLY
6323 #define __P110 PAGE_COPY
6324 #define __P111 PAGE_COPY
6325
6326 #define __S000 PAGE_NONE
6327-#define __S001 PAGE_READONLY
6328-#define __S010 PAGE_SHARED
6329-#define __S011 PAGE_SHARED
6330+#define __S001 PAGE_READONLY_NOEXEC
6331+#define __S010 PAGE_SHARED_NOEXEC
6332+#define __S011 PAGE_SHARED_NOEXEC
6333 #define __S100 PAGE_READONLY
6334 #define __S101 PAGE_READONLY
6335 #define __S110 PAGE_SHARED
6336diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
6337index 79da178..c2eede8 100644
6338--- a/arch/sparc/include/asm/pgtsrmmu.h
6339+++ b/arch/sparc/include/asm/pgtsrmmu.h
6340@@ -115,6 +115,11 @@
6341 SRMMU_EXEC | SRMMU_REF)
6342 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
6343 SRMMU_EXEC | SRMMU_REF)
6344+
6345+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
6346+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
6347+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
6348+
6349 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
6350 SRMMU_DIRTY | SRMMU_REF)
6351
6352diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
6353index 9689176..63c18ea 100644
6354--- a/arch/sparc/include/asm/spinlock_64.h
6355+++ b/arch/sparc/include/asm/spinlock_64.h
6356@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
6357
6358 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
6359
6360-static void inline arch_read_lock(arch_rwlock_t *lock)
6361+static inline void arch_read_lock(arch_rwlock_t *lock)
6362 {
6363 unsigned long tmp1, tmp2;
6364
6365 __asm__ __volatile__ (
6366 "1: ldsw [%2], %0\n"
6367 " brlz,pn %0, 2f\n"
6368-"4: add %0, 1, %1\n"
6369+"4: addcc %0, 1, %1\n"
6370+
6371+#ifdef CONFIG_PAX_REFCOUNT
6372+" tvs %%icc, 6\n"
6373+#endif
6374+
6375 " cas [%2], %0, %1\n"
6376 " cmp %0, %1\n"
6377 " bne,pn %%icc, 1b\n"
6378@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
6379 " .previous"
6380 : "=&r" (tmp1), "=&r" (tmp2)
6381 : "r" (lock)
6382- : "memory");
6383+ : "memory", "cc");
6384 }
6385
6386-static int inline arch_read_trylock(arch_rwlock_t *lock)
6387+static inline int arch_read_trylock(arch_rwlock_t *lock)
6388 {
6389 int tmp1, tmp2;
6390
6391@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
6392 "1: ldsw [%2], %0\n"
6393 " brlz,a,pn %0, 2f\n"
6394 " mov 0, %0\n"
6395-" add %0, 1, %1\n"
6396+" addcc %0, 1, %1\n"
6397+
6398+#ifdef CONFIG_PAX_REFCOUNT
6399+" tvs %%icc, 6\n"
6400+#endif
6401+
6402 " cas [%2], %0, %1\n"
6403 " cmp %0, %1\n"
6404 " bne,pn %%icc, 1b\n"
6405@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
6406 return tmp1;
6407 }
6408
6409-static void inline arch_read_unlock(arch_rwlock_t *lock)
6410+static inline void arch_read_unlock(arch_rwlock_t *lock)
6411 {
6412 unsigned long tmp1, tmp2;
6413
6414 __asm__ __volatile__(
6415 "1: lduw [%2], %0\n"
6416-" sub %0, 1, %1\n"
6417+" subcc %0, 1, %1\n"
6418+
6419+#ifdef CONFIG_PAX_REFCOUNT
6420+" tvs %%icc, 6\n"
6421+#endif
6422+
6423 " cas [%2], %0, %1\n"
6424 " cmp %0, %1\n"
6425 " bne,pn %%xcc, 1b\n"
6426@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
6427 : "memory");
6428 }
6429
6430-static void inline arch_write_lock(arch_rwlock_t *lock)
6431+static inline void arch_write_lock(arch_rwlock_t *lock)
6432 {
6433 unsigned long mask, tmp1, tmp2;
6434
6435@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
6436 : "memory");
6437 }
6438
6439-static void inline arch_write_unlock(arch_rwlock_t *lock)
6440+static inline void arch_write_unlock(arch_rwlock_t *lock)
6441 {
6442 __asm__ __volatile__(
6443 " stw %%g0, [%0]"
6444@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
6445 : "memory");
6446 }
6447
6448-static int inline arch_write_trylock(arch_rwlock_t *lock)
6449+static inline int arch_write_trylock(arch_rwlock_t *lock)
6450 {
6451 unsigned long mask, tmp1, tmp2, result;
6452
6453diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
6454index 25849ae..924c54b 100644
6455--- a/arch/sparc/include/asm/thread_info_32.h
6456+++ b/arch/sparc/include/asm/thread_info_32.h
6457@@ -49,6 +49,8 @@ struct thread_info {
6458 unsigned long w_saved;
6459
6460 struct restart_block restart_block;
6461+
6462+ unsigned long lowest_stack;
6463 };
6464
6465 /*
6466diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
6467index a3fe4dc..cae132a 100644
6468--- a/arch/sparc/include/asm/thread_info_64.h
6469+++ b/arch/sparc/include/asm/thread_info_64.h
6470@@ -63,6 +63,8 @@ struct thread_info {
6471 struct pt_regs *kern_una_regs;
6472 unsigned int kern_una_insn;
6473
6474+ unsigned long lowest_stack;
6475+
6476 unsigned long fpregs[0] __attribute__ ((aligned(64)));
6477 };
6478
6479@@ -193,10 +195,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
6480 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
6481 /* flag bit 6 is available */
6482 #define TIF_32BIT 7 /* 32-bit binary */
6483-/* flag bit 8 is available */
6484+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
6485 #define TIF_SECCOMP 9 /* secure computing */
6486 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
6487 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
6488+
6489 /* NOTE: Thread flags >= 12 should be ones we have no interest
6490 * in using in assembly, else we can't use the mask as
6491 * an immediate value in instructions such as andcc.
6492@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
6493 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
6494 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6495 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
6496+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6497
6498 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
6499 _TIF_DO_NOTIFY_RESUME_MASK | \
6500 _TIF_NEED_RESCHED)
6501 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
6502
6503+#define _TIF_WORK_SYSCALL \
6504+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
6505+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6506+
6507+
6508 /*
6509 * Thread-synchronous status.
6510 *
6511diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
6512index 0167d26..9acd8ed 100644
6513--- a/arch/sparc/include/asm/uaccess.h
6514+++ b/arch/sparc/include/asm/uaccess.h
6515@@ -1,5 +1,13 @@
6516 #ifndef ___ASM_SPARC_UACCESS_H
6517 #define ___ASM_SPARC_UACCESS_H
6518+
6519+#ifdef __KERNEL__
6520+#ifndef __ASSEMBLY__
6521+#include <linux/types.h>
6522+extern void check_object_size(const void *ptr, unsigned long n, bool to);
6523+#endif
6524+#endif
6525+
6526 #if defined(__sparc__) && defined(__arch64__)
6527 #include <asm/uaccess_64.h>
6528 #else
6529diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
6530index 53a28dd..50c38c3 100644
6531--- a/arch/sparc/include/asm/uaccess_32.h
6532+++ b/arch/sparc/include/asm/uaccess_32.h
6533@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
6534
6535 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6536 {
6537- if (n && __access_ok((unsigned long) to, n))
6538+ if ((long)n < 0)
6539+ return n;
6540+
6541+ if (n && __access_ok((unsigned long) to, n)) {
6542+ if (!__builtin_constant_p(n))
6543+ check_object_size(from, n, true);
6544 return __copy_user(to, (__force void __user *) from, n);
6545- else
6546+ } else
6547 return n;
6548 }
6549
6550 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
6551 {
6552+ if ((long)n < 0)
6553+ return n;
6554+
6555+ if (!__builtin_constant_p(n))
6556+ check_object_size(from, n, true);
6557+
6558 return __copy_user(to, (__force void __user *) from, n);
6559 }
6560
6561 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6562 {
6563- if (n && __access_ok((unsigned long) from, n))
6564+ if ((long)n < 0)
6565+ return n;
6566+
6567+ if (n && __access_ok((unsigned long) from, n)) {
6568+ if (!__builtin_constant_p(n))
6569+ check_object_size(to, n, false);
6570 return __copy_user((__force void __user *) to, from, n);
6571- else
6572+ } else
6573 return n;
6574 }
6575
6576 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
6577 {
6578+ if ((long)n < 0)
6579+ return n;
6580+
6581 return __copy_user((__force void __user *) to, from, n);
6582 }
6583
6584diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
6585index 73083e1..2bc62a6 100644
6586--- a/arch/sparc/include/asm/uaccess_64.h
6587+++ b/arch/sparc/include/asm/uaccess_64.h
6588@@ -10,6 +10,7 @@
6589 #include <linux/compiler.h>
6590 #include <linux/string.h>
6591 #include <linux/thread_info.h>
6592+#include <linux/kernel.h>
6593 #include <asm/asi.h>
6594 #include <asm/spitfire.h>
6595 #include <asm-generic/uaccess-unaligned.h>
6596@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
6597 static inline unsigned long __must_check
6598 copy_from_user(void *to, const void __user *from, unsigned long size)
6599 {
6600- unsigned long ret = ___copy_from_user(to, from, size);
6601+ unsigned long ret;
6602
6603+ if ((long)size < 0 || size > INT_MAX)
6604+ return size;
6605+
6606+ if (!__builtin_constant_p(size))
6607+ check_object_size(to, size, false);
6608+
6609+ ret = ___copy_from_user(to, from, size);
6610 if (unlikely(ret))
6611 ret = copy_from_user_fixup(to, from, size);
6612
6613@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
6614 static inline unsigned long __must_check
6615 copy_to_user(void __user *to, const void *from, unsigned long size)
6616 {
6617- unsigned long ret = ___copy_to_user(to, from, size);
6618+ unsigned long ret;
6619
6620+ if ((long)size < 0 || size > INT_MAX)
6621+ return size;
6622+
6623+ if (!__builtin_constant_p(size))
6624+ check_object_size(from, size, true);
6625+
6626+ ret = ___copy_to_user(to, from, size);
6627 if (unlikely(ret))
6628 ret = copy_to_user_fixup(to, from, size);
6629 return ret;
6630diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
6631index 6cf591b..b49e65a 100644
6632--- a/arch/sparc/kernel/Makefile
6633+++ b/arch/sparc/kernel/Makefile
6634@@ -3,7 +3,7 @@
6635 #
6636
6637 asflags-y := -ansi
6638-ccflags-y := -Werror
6639+#ccflags-y := -Werror
6640
6641 extra-y := head_$(BITS).o
6642
6643diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
6644index 487bffb..955a925 100644
6645--- a/arch/sparc/kernel/process_32.c
6646+++ b/arch/sparc/kernel/process_32.c
6647@@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
6648
6649 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
6650 r->psr, r->pc, r->npc, r->y, print_tainted());
6651- printk("PC: <%pS>\n", (void *) r->pc);
6652+ printk("PC: <%pA>\n", (void *) r->pc);
6653 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6654 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
6655 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
6656 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6657 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
6658 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
6659- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
6660+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
6661
6662 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6663 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
6664@@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6665 rw = (struct reg_window32 *) fp;
6666 pc = rw->ins[7];
6667 printk("[%08lx : ", pc);
6668- printk("%pS ] ", (void *) pc);
6669+ printk("%pA ] ", (void *) pc);
6670 fp = rw->ins[6];
6671 } while (++count < 16);
6672 printk("\n");
6673diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
6674index c6e0c29..052832b 100644
6675--- a/arch/sparc/kernel/process_64.c
6676+++ b/arch/sparc/kernel/process_64.c
6677@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
6678 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
6679 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
6680 if (regs->tstate & TSTATE_PRIV)
6681- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
6682+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
6683 }
6684
6685 void show_regs(struct pt_regs *regs)
6686 {
6687 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
6688 regs->tpc, regs->tnpc, regs->y, print_tainted());
6689- printk("TPC: <%pS>\n", (void *) regs->tpc);
6690+ printk("TPC: <%pA>\n", (void *) regs->tpc);
6691 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
6692 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
6693 regs->u_regs[3]);
6694@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
6695 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
6696 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
6697 regs->u_regs[15]);
6698- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
6699+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
6700 show_regwindow(regs);
6701 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
6702 }
6703@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
6704 ((tp && tp->task) ? tp->task->pid : -1));
6705
6706 if (gp->tstate & TSTATE_PRIV) {
6707- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
6708+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
6709 (void *) gp->tpc,
6710 (void *) gp->o7,
6711 (void *) gp->i7,
6712diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
6713index 7ff45e4..a58f271 100644
6714--- a/arch/sparc/kernel/ptrace_64.c
6715+++ b/arch/sparc/kernel/ptrace_64.c
6716@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
6717 return ret;
6718 }
6719
6720+#ifdef CONFIG_GRKERNSEC_SETXID
6721+extern void gr_delayed_cred_worker(void);
6722+#endif
6723+
6724 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6725 {
6726 int ret = 0;
6727@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6728 /* do the secure computing check first */
6729 secure_computing_strict(regs->u_regs[UREG_G1]);
6730
6731+#ifdef CONFIG_GRKERNSEC_SETXID
6732+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6733+ gr_delayed_cred_worker();
6734+#endif
6735+
6736 if (test_thread_flag(TIF_SYSCALL_TRACE))
6737 ret = tracehook_report_syscall_entry(regs);
6738
6739@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6740
6741 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
6742 {
6743+#ifdef CONFIG_GRKERNSEC_SETXID
6744+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6745+ gr_delayed_cred_worker();
6746+#endif
6747+
6748 audit_syscall_exit(regs);
6749
6750 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6751diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
6752index 0c9b31b..55a8ba6 100644
6753--- a/arch/sparc/kernel/sys_sparc_32.c
6754+++ b/arch/sparc/kernel/sys_sparc_32.c
6755@@ -39,6 +39,7 @@ asmlinkage unsigned long sys_getpagesize(void)
6756 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
6757 {
6758 struct vm_area_struct * vmm;
6759+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6760
6761 if (flags & MAP_FIXED) {
6762 /* We do not accept a shared mapping if it would violate
6763@@ -54,7 +55,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6764 if (len > TASK_SIZE - PAGE_SIZE)
6765 return -ENOMEM;
6766 if (!addr)
6767- addr = TASK_UNMAPPED_BASE;
6768+ addr = current->mm->mmap_base;
6769
6770 if (flags & MAP_SHARED)
6771 addr = COLOUR_ALIGN(addr);
6772@@ -65,7 +66,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6773 /* At this point: (!vmm || addr < vmm->vm_end). */
6774 if (TASK_SIZE - PAGE_SIZE - len < addr)
6775 return -ENOMEM;
6776- if (!vmm || addr + len <= vmm->vm_start)
6777+ if (check_heap_stack_gap(vmm, addr, len, offset))
6778 return addr;
6779 addr = vmm->vm_end;
6780 if (flags & MAP_SHARED)
6781diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
6782index 878ef3d..609e41f 100644
6783--- a/arch/sparc/kernel/sys_sparc_64.c
6784+++ b/arch/sparc/kernel/sys_sparc_64.c
6785@@ -102,12 +102,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6786 unsigned long task_size = TASK_SIZE;
6787 unsigned long start_addr;
6788 int do_color_align;
6789+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6790
6791 if (flags & MAP_FIXED) {
6792 /* We do not accept a shared mapping if it would violate
6793 * cache aliasing constraints.
6794 */
6795- if ((flags & MAP_SHARED) &&
6796+ if ((filp || (flags & MAP_SHARED)) &&
6797 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6798 return -EINVAL;
6799 return addr;
6800@@ -122,6 +123,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6801 if (filp || (flags & MAP_SHARED))
6802 do_color_align = 1;
6803
6804+#ifdef CONFIG_PAX_RANDMMAP
6805+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6806+#endif
6807+
6808 if (addr) {
6809 if (do_color_align)
6810 addr = COLOUR_ALIGN(addr, pgoff);
6811@@ -129,15 +134,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6812 addr = PAGE_ALIGN(addr);
6813
6814 vma = find_vma(mm, addr);
6815- if (task_size - len >= addr &&
6816- (!vma || addr + len <= vma->vm_start))
6817+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6818 return addr;
6819 }
6820
6821 if (len > mm->cached_hole_size) {
6822- start_addr = addr = mm->free_area_cache;
6823+ start_addr = addr = mm->free_area_cache;
6824 } else {
6825- start_addr = addr = TASK_UNMAPPED_BASE;
6826+ start_addr = addr = mm->mmap_base;
6827 mm->cached_hole_size = 0;
6828 }
6829
6830@@ -157,14 +161,14 @@ full_search:
6831 vma = find_vma(mm, VA_EXCLUDE_END);
6832 }
6833 if (unlikely(task_size < addr)) {
6834- if (start_addr != TASK_UNMAPPED_BASE) {
6835- start_addr = addr = TASK_UNMAPPED_BASE;
6836+ if (start_addr != mm->mmap_base) {
6837+ start_addr = addr = mm->mmap_base;
6838 mm->cached_hole_size = 0;
6839 goto full_search;
6840 }
6841 return -ENOMEM;
6842 }
6843- if (likely(!vma || addr + len <= vma->vm_start)) {
6844+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
6845 /*
6846 * Remember the place where we stopped the search:
6847 */
6848@@ -190,6 +194,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6849 unsigned long task_size = STACK_TOP32;
6850 unsigned long addr = addr0;
6851 int do_color_align;
6852+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6853
6854 /* This should only ever run for 32-bit processes. */
6855 BUG_ON(!test_thread_flag(TIF_32BIT));
6856@@ -198,7 +203,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6857 /* We do not accept a shared mapping if it would violate
6858 * cache aliasing constraints.
6859 */
6860- if ((flags & MAP_SHARED) &&
6861+ if ((filp || (flags & MAP_SHARED)) &&
6862 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6863 return -EINVAL;
6864 return addr;
6865@@ -219,8 +224,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6866 addr = PAGE_ALIGN(addr);
6867
6868 vma = find_vma(mm, addr);
6869- if (task_size - len >= addr &&
6870- (!vma || addr + len <= vma->vm_start))
6871+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6872 return addr;
6873 }
6874
6875@@ -241,7 +245,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6876 /* make sure it can fit in the remaining address space */
6877 if (likely(addr > len)) {
6878 vma = find_vma(mm, addr-len);
6879- if (!vma || addr <= vma->vm_start) {
6880+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
6881 /* remember the address as a hint for next time */
6882 return (mm->free_area_cache = addr-len);
6883 }
6884@@ -250,18 +254,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6885 if (unlikely(mm->mmap_base < len))
6886 goto bottomup;
6887
6888- addr = mm->mmap_base-len;
6889- if (do_color_align)
6890- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6891+ addr = mm->mmap_base - len;
6892
6893 do {
6894+ if (do_color_align)
6895+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6896 /*
6897 * Lookup failure means no vma is above this address,
6898 * else if new region fits below vma->vm_start,
6899 * return with success:
6900 */
6901 vma = find_vma(mm, addr);
6902- if (likely(!vma || addr+len <= vma->vm_start)) {
6903+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
6904 /* remember the address as a hint for next time */
6905 return (mm->free_area_cache = addr);
6906 }
6907@@ -271,10 +275,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6908 mm->cached_hole_size = vma->vm_start - addr;
6909
6910 /* try just below the current vma->vm_start */
6911- addr = vma->vm_start-len;
6912- if (do_color_align)
6913- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6914- } while (likely(len < vma->vm_start));
6915+ addr = skip_heap_stack_gap(vma, len, offset);
6916+ } while (!IS_ERR_VALUE(addr));
6917
6918 bottomup:
6919 /*
6920@@ -373,6 +375,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6921 gap == RLIM_INFINITY ||
6922 sysctl_legacy_va_layout) {
6923 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6924+
6925+#ifdef CONFIG_PAX_RANDMMAP
6926+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6927+ mm->mmap_base += mm->delta_mmap;
6928+#endif
6929+
6930 mm->get_unmapped_area = arch_get_unmapped_area;
6931 mm->unmap_area = arch_unmap_area;
6932 } else {
6933@@ -385,6 +393,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6934 gap = (task_size / 6 * 5);
6935
6936 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
6937+
6938+#ifdef CONFIG_PAX_RANDMMAP
6939+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6940+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6941+#endif
6942+
6943 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6944 mm->unmap_area = arch_unmap_area_topdown;
6945 }
6946diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
6947index bf23477..b7425a6 100644
6948--- a/arch/sparc/kernel/syscalls.S
6949+++ b/arch/sparc/kernel/syscalls.S
6950@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
6951 #endif
6952 .align 32
6953 1: ldx [%g6 + TI_FLAGS], %l5
6954- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6955+ andcc %l5, _TIF_WORK_SYSCALL, %g0
6956 be,pt %icc, rtrap
6957 nop
6958 call syscall_trace_leave
6959@@ -189,7 +189,7 @@ linux_sparc_syscall32:
6960
6961 srl %i5, 0, %o5 ! IEU1
6962 srl %i2, 0, %o2 ! IEU0 Group
6963- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6964+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6965 bne,pn %icc, linux_syscall_trace32 ! CTI
6966 mov %i0, %l5 ! IEU1
6967 call %l7 ! CTI Group brk forced
6968@@ -212,7 +212,7 @@ linux_sparc_syscall:
6969
6970 mov %i3, %o3 ! IEU1
6971 mov %i4, %o4 ! IEU0 Group
6972- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6973+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6974 bne,pn %icc, linux_syscall_trace ! CTI Group
6975 mov %i0, %l5 ! IEU0
6976 2: call %l7 ! CTI Group brk forced
6977@@ -228,7 +228,7 @@ ret_sys_call:
6978
6979 cmp %o0, -ERESTART_RESTARTBLOCK
6980 bgeu,pn %xcc, 1f
6981- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6982+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6983 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
6984
6985 2:
6986diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6987index a5785ea..405c5f7 100644
6988--- a/arch/sparc/kernel/traps_32.c
6989+++ b/arch/sparc/kernel/traps_32.c
6990@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6991 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6992 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6993
6994+extern void gr_handle_kernel_exploit(void);
6995+
6996 void die_if_kernel(char *str, struct pt_regs *regs)
6997 {
6998 static int die_counter;
6999@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
7000 count++ < 30 &&
7001 (((unsigned long) rw) >= PAGE_OFFSET) &&
7002 !(((unsigned long) rw) & 0x7)) {
7003- printk("Caller[%08lx]: %pS\n", rw->ins[7],
7004+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
7005 (void *) rw->ins[7]);
7006 rw = (struct reg_window32 *)rw->ins[6];
7007 }
7008 }
7009 printk("Instruction DUMP:");
7010 instruction_dump ((unsigned long *) regs->pc);
7011- if(regs->psr & PSR_PS)
7012+ if(regs->psr & PSR_PS) {
7013+ gr_handle_kernel_exploit();
7014 do_exit(SIGKILL);
7015+ }
7016 do_exit(SIGSEGV);
7017 }
7018
7019diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
7020index b66a779..8e8d66c 100644
7021--- a/arch/sparc/kernel/traps_64.c
7022+++ b/arch/sparc/kernel/traps_64.c
7023@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
7024 i + 1,
7025 p->trapstack[i].tstate, p->trapstack[i].tpc,
7026 p->trapstack[i].tnpc, p->trapstack[i].tt);
7027- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
7028+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
7029 }
7030 }
7031
7032@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
7033
7034 lvl -= 0x100;
7035 if (regs->tstate & TSTATE_PRIV) {
7036+
7037+#ifdef CONFIG_PAX_REFCOUNT
7038+ if (lvl == 6)
7039+ pax_report_refcount_overflow(regs);
7040+#endif
7041+
7042 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
7043 die_if_kernel(buffer, regs);
7044 }
7045@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
7046 void bad_trap_tl1(struct pt_regs *regs, long lvl)
7047 {
7048 char buffer[32];
7049-
7050+
7051 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
7052 0, lvl, SIGTRAP) == NOTIFY_STOP)
7053 return;
7054
7055+#ifdef CONFIG_PAX_REFCOUNT
7056+ if (lvl == 6)
7057+ pax_report_refcount_overflow(regs);
7058+#endif
7059+
7060 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
7061
7062 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
7063@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
7064 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
7065 printk("%s" "ERROR(%d): ",
7066 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
7067- printk("TPC<%pS>\n", (void *) regs->tpc);
7068+ printk("TPC<%pA>\n", (void *) regs->tpc);
7069 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
7070 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
7071 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
7072@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
7073 smp_processor_id(),
7074 (type & 0x1) ? 'I' : 'D',
7075 regs->tpc);
7076- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
7077+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
7078 panic("Irrecoverable Cheetah+ parity error.");
7079 }
7080
7081@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
7082 smp_processor_id(),
7083 (type & 0x1) ? 'I' : 'D',
7084 regs->tpc);
7085- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
7086+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
7087 }
7088
7089 struct sun4v_error_entry {
7090@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
7091
7092 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
7093 regs->tpc, tl);
7094- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
7095+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
7096 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
7097- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
7098+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
7099 (void *) regs->u_regs[UREG_I7]);
7100 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
7101 "pte[%lx] error[%lx]\n",
7102@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
7103
7104 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
7105 regs->tpc, tl);
7106- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
7107+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
7108 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
7109- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
7110+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
7111 (void *) regs->u_regs[UREG_I7]);
7112 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
7113 "pte[%lx] error[%lx]\n",
7114@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
7115 fp = (unsigned long)sf->fp + STACK_BIAS;
7116 }
7117
7118- printk(" [%016lx] %pS\n", pc, (void *) pc);
7119+ printk(" [%016lx] %pA\n", pc, (void *) pc);
7120 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
7121 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
7122 int index = tsk->curr_ret_stack;
7123 if (tsk->ret_stack && index >= graph) {
7124 pc = tsk->ret_stack[index - graph].ret;
7125- printk(" [%016lx] %pS\n", pc, (void *) pc);
7126+ printk(" [%016lx] %pA\n", pc, (void *) pc);
7127 graph++;
7128 }
7129 }
7130@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
7131 return (struct reg_window *) (fp + STACK_BIAS);
7132 }
7133
7134+extern void gr_handle_kernel_exploit(void);
7135+
7136 void die_if_kernel(char *str, struct pt_regs *regs)
7137 {
7138 static int die_counter;
7139@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
7140 while (rw &&
7141 count++ < 30 &&
7142 kstack_valid(tp, (unsigned long) rw)) {
7143- printk("Caller[%016lx]: %pS\n", rw->ins[7],
7144+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
7145 (void *) rw->ins[7]);
7146
7147 rw = kernel_stack_up(rw);
7148@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
7149 }
7150 user_instruction_dump ((unsigned int __user *) regs->tpc);
7151 }
7152- if (regs->tstate & TSTATE_PRIV)
7153+ if (regs->tstate & TSTATE_PRIV) {
7154+ gr_handle_kernel_exploit();
7155 do_exit(SIGKILL);
7156+ }
7157 do_exit(SIGSEGV);
7158 }
7159 EXPORT_SYMBOL(die_if_kernel);
7160diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
7161index 8201c25e..072a2a7 100644
7162--- a/arch/sparc/kernel/unaligned_64.c
7163+++ b/arch/sparc/kernel/unaligned_64.c
7164@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
7165 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
7166
7167 if (__ratelimit(&ratelimit)) {
7168- printk("Kernel unaligned access at TPC[%lx] %pS\n",
7169+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
7170 regs->tpc, (void *) regs->tpc);
7171 }
7172 }
7173diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
7174index 8410065f2..4fd4ca22 100644
7175--- a/arch/sparc/lib/Makefile
7176+++ b/arch/sparc/lib/Makefile
7177@@ -2,7 +2,7 @@
7178 #
7179
7180 asflags-y := -ansi -DST_DIV0=0x02
7181-ccflags-y := -Werror
7182+#ccflags-y := -Werror
7183
7184 lib-$(CONFIG_SPARC32) += ashrdi3.o
7185 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
7186diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
7187index 85c233d..68500e0 100644
7188--- a/arch/sparc/lib/atomic_64.S
7189+++ b/arch/sparc/lib/atomic_64.S
7190@@ -17,7 +17,12 @@
7191 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
7192 BACKOFF_SETUP(%o2)
7193 1: lduw [%o1], %g1
7194- add %g1, %o0, %g7
7195+ addcc %g1, %o0, %g7
7196+
7197+#ifdef CONFIG_PAX_REFCOUNT
7198+ tvs %icc, 6
7199+#endif
7200+
7201 cas [%o1], %g1, %g7
7202 cmp %g1, %g7
7203 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7204@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
7205 2: BACKOFF_SPIN(%o2, %o3, 1b)
7206 ENDPROC(atomic_add)
7207
7208+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7209+ BACKOFF_SETUP(%o2)
7210+1: lduw [%o1], %g1
7211+ add %g1, %o0, %g7
7212+ cas [%o1], %g1, %g7
7213+ cmp %g1, %g7
7214+ bne,pn %icc, 2f
7215+ nop
7216+ retl
7217+ nop
7218+2: BACKOFF_SPIN(%o2, %o3, 1b)
7219+ENDPROC(atomic_add_unchecked)
7220+
7221 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7222 BACKOFF_SETUP(%o2)
7223 1: lduw [%o1], %g1
7224- sub %g1, %o0, %g7
7225+ subcc %g1, %o0, %g7
7226+
7227+#ifdef CONFIG_PAX_REFCOUNT
7228+ tvs %icc, 6
7229+#endif
7230+
7231 cas [%o1], %g1, %g7
7232 cmp %g1, %g7
7233 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7234@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7235 2: BACKOFF_SPIN(%o2, %o3, 1b)
7236 ENDPROC(atomic_sub)
7237
7238+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
7239+ BACKOFF_SETUP(%o2)
7240+1: lduw [%o1], %g1
7241+ sub %g1, %o0, %g7
7242+ cas [%o1], %g1, %g7
7243+ cmp %g1, %g7
7244+ bne,pn %icc, 2f
7245+ nop
7246+ retl
7247+ nop
7248+2: BACKOFF_SPIN(%o2, %o3, 1b)
7249+ENDPROC(atomic_sub_unchecked)
7250+
7251 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7252 BACKOFF_SETUP(%o2)
7253 1: lduw [%o1], %g1
7254- add %g1, %o0, %g7
7255+ addcc %g1, %o0, %g7
7256+
7257+#ifdef CONFIG_PAX_REFCOUNT
7258+ tvs %icc, 6
7259+#endif
7260+
7261 cas [%o1], %g1, %g7
7262 cmp %g1, %g7
7263 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7264@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7265 2: BACKOFF_SPIN(%o2, %o3, 1b)
7266 ENDPROC(atomic_add_ret)
7267
7268+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7269+ BACKOFF_SETUP(%o2)
7270+1: lduw [%o1], %g1
7271+ addcc %g1, %o0, %g7
7272+ cas [%o1], %g1, %g7
7273+ cmp %g1, %g7
7274+ bne,pn %icc, 2f
7275+ add %g7, %o0, %g7
7276+ sra %g7, 0, %o0
7277+ retl
7278+ nop
7279+2: BACKOFF_SPIN(%o2, %o3, 1b)
7280+ENDPROC(atomic_add_ret_unchecked)
7281+
7282 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
7283 BACKOFF_SETUP(%o2)
7284 1: lduw [%o1], %g1
7285- sub %g1, %o0, %g7
7286+ subcc %g1, %o0, %g7
7287+
7288+#ifdef CONFIG_PAX_REFCOUNT
7289+ tvs %icc, 6
7290+#endif
7291+
7292 cas [%o1], %g1, %g7
7293 cmp %g1, %g7
7294 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7295@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
7296 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
7297 BACKOFF_SETUP(%o2)
7298 1: ldx [%o1], %g1
7299- add %g1, %o0, %g7
7300+ addcc %g1, %o0, %g7
7301+
7302+#ifdef CONFIG_PAX_REFCOUNT
7303+ tvs %xcc, 6
7304+#endif
7305+
7306 casx [%o1], %g1, %g7
7307 cmp %g1, %g7
7308 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7309@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
7310 2: BACKOFF_SPIN(%o2, %o3, 1b)
7311 ENDPROC(atomic64_add)
7312
7313+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7314+ BACKOFF_SETUP(%o2)
7315+1: ldx [%o1], %g1
7316+ addcc %g1, %o0, %g7
7317+ casx [%o1], %g1, %g7
7318+ cmp %g1, %g7
7319+ bne,pn %xcc, 2f
7320+ nop
7321+ retl
7322+ nop
7323+2: BACKOFF_SPIN(%o2, %o3, 1b)
7324+ENDPROC(atomic64_add_unchecked)
7325+
7326 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7327 BACKOFF_SETUP(%o2)
7328 1: ldx [%o1], %g1
7329- sub %g1, %o0, %g7
7330+ subcc %g1, %o0, %g7
7331+
7332+#ifdef CONFIG_PAX_REFCOUNT
7333+ tvs %xcc, 6
7334+#endif
7335+
7336 casx [%o1], %g1, %g7
7337 cmp %g1, %g7
7338 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7339@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7340 2: BACKOFF_SPIN(%o2, %o3, 1b)
7341 ENDPROC(atomic64_sub)
7342
7343+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
7344+ BACKOFF_SETUP(%o2)
7345+1: ldx [%o1], %g1
7346+ subcc %g1, %o0, %g7
7347+ casx [%o1], %g1, %g7
7348+ cmp %g1, %g7
7349+ bne,pn %xcc, 2f
7350+ nop
7351+ retl
7352+ nop
7353+2: BACKOFF_SPIN(%o2, %o3, 1b)
7354+ENDPROC(atomic64_sub_unchecked)
7355+
7356 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7357 BACKOFF_SETUP(%o2)
7358 1: ldx [%o1], %g1
7359- add %g1, %o0, %g7
7360+ addcc %g1, %o0, %g7
7361+
7362+#ifdef CONFIG_PAX_REFCOUNT
7363+ tvs %xcc, 6
7364+#endif
7365+
7366 casx [%o1], %g1, %g7
7367 cmp %g1, %g7
7368 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7369@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7370 2: BACKOFF_SPIN(%o2, %o3, 1b)
7371 ENDPROC(atomic64_add_ret)
7372
7373+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7374+ BACKOFF_SETUP(%o2)
7375+1: ldx [%o1], %g1
7376+ addcc %g1, %o0, %g7
7377+ casx [%o1], %g1, %g7
7378+ cmp %g1, %g7
7379+ bne,pn %xcc, 2f
7380+ add %g7, %o0, %g7
7381+ mov %g7, %o0
7382+ retl
7383+ nop
7384+2: BACKOFF_SPIN(%o2, %o3, 1b)
7385+ENDPROC(atomic64_add_ret_unchecked)
7386+
7387 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
7388 BACKOFF_SETUP(%o2)
7389 1: ldx [%o1], %g1
7390- sub %g1, %o0, %g7
7391+ subcc %g1, %o0, %g7
7392+
7393+#ifdef CONFIG_PAX_REFCOUNT
7394+ tvs %xcc, 6
7395+#endif
7396+
7397 casx [%o1], %g1, %g7
7398 cmp %g1, %g7
7399 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7400diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
7401index 0c4e35e..745d3e4 100644
7402--- a/arch/sparc/lib/ksyms.c
7403+++ b/arch/sparc/lib/ksyms.c
7404@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
7405
7406 /* Atomic counter implementation. */
7407 EXPORT_SYMBOL(atomic_add);
7408+EXPORT_SYMBOL(atomic_add_unchecked);
7409 EXPORT_SYMBOL(atomic_add_ret);
7410+EXPORT_SYMBOL(atomic_add_ret_unchecked);
7411 EXPORT_SYMBOL(atomic_sub);
7412+EXPORT_SYMBOL(atomic_sub_unchecked);
7413 EXPORT_SYMBOL(atomic_sub_ret);
7414 EXPORT_SYMBOL(atomic64_add);
7415+EXPORT_SYMBOL(atomic64_add_unchecked);
7416 EXPORT_SYMBOL(atomic64_add_ret);
7417+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
7418 EXPORT_SYMBOL(atomic64_sub);
7419+EXPORT_SYMBOL(atomic64_sub_unchecked);
7420 EXPORT_SYMBOL(atomic64_sub_ret);
7421 EXPORT_SYMBOL(atomic64_dec_if_positive);
7422
7423diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
7424index 30c3ecc..736f015 100644
7425--- a/arch/sparc/mm/Makefile
7426+++ b/arch/sparc/mm/Makefile
7427@@ -2,7 +2,7 @@
7428 #
7429
7430 asflags-y := -ansi
7431-ccflags-y := -Werror
7432+#ccflags-y := -Werror
7433
7434 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
7435 obj-y += fault_$(BITS).o
7436diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
7437index e98bfda..ea8d221 100644
7438--- a/arch/sparc/mm/fault_32.c
7439+++ b/arch/sparc/mm/fault_32.c
7440@@ -21,6 +21,9 @@
7441 #include <linux/perf_event.h>
7442 #include <linux/interrupt.h>
7443 #include <linux/kdebug.h>
7444+#include <linux/slab.h>
7445+#include <linux/pagemap.h>
7446+#include <linux/compiler.h>
7447
7448 #include <asm/page.h>
7449 #include <asm/pgtable.h>
7450@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
7451 return safe_compute_effective_address(regs, insn);
7452 }
7453
7454+#ifdef CONFIG_PAX_PAGEEXEC
7455+#ifdef CONFIG_PAX_DLRESOLVE
7456+static void pax_emuplt_close(struct vm_area_struct *vma)
7457+{
7458+ vma->vm_mm->call_dl_resolve = 0UL;
7459+}
7460+
7461+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7462+{
7463+ unsigned int *kaddr;
7464+
7465+ vmf->page = alloc_page(GFP_HIGHUSER);
7466+ if (!vmf->page)
7467+ return VM_FAULT_OOM;
7468+
7469+ kaddr = kmap(vmf->page);
7470+ memset(kaddr, 0, PAGE_SIZE);
7471+ kaddr[0] = 0x9DE3BFA8U; /* save */
7472+ flush_dcache_page(vmf->page);
7473+ kunmap(vmf->page);
7474+ return VM_FAULT_MAJOR;
7475+}
7476+
7477+static const struct vm_operations_struct pax_vm_ops = {
7478+ .close = pax_emuplt_close,
7479+ .fault = pax_emuplt_fault
7480+};
7481+
7482+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7483+{
7484+ int ret;
7485+
7486+ INIT_LIST_HEAD(&vma->anon_vma_chain);
7487+ vma->vm_mm = current->mm;
7488+ vma->vm_start = addr;
7489+ vma->vm_end = addr + PAGE_SIZE;
7490+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7491+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7492+ vma->vm_ops = &pax_vm_ops;
7493+
7494+ ret = insert_vm_struct(current->mm, vma);
7495+ if (ret)
7496+ return ret;
7497+
7498+ ++current->mm->total_vm;
7499+ return 0;
7500+}
7501+#endif
7502+
7503+/*
7504+ * PaX: decide what to do with offenders (regs->pc = fault address)
7505+ *
7506+ * returns 1 when task should be killed
7507+ * 2 when patched PLT trampoline was detected
7508+ * 3 when unpatched PLT trampoline was detected
7509+ */
7510+static int pax_handle_fetch_fault(struct pt_regs *regs)
7511+{
7512+
7513+#ifdef CONFIG_PAX_EMUPLT
7514+ int err;
7515+
7516+ do { /* PaX: patched PLT emulation #1 */
7517+ unsigned int sethi1, sethi2, jmpl;
7518+
7519+ err = get_user(sethi1, (unsigned int *)regs->pc);
7520+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
7521+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
7522+
7523+ if (err)
7524+ break;
7525+
7526+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7527+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
7528+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
7529+ {
7530+ unsigned int addr;
7531+
7532+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7533+ addr = regs->u_regs[UREG_G1];
7534+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7535+ regs->pc = addr;
7536+ regs->npc = addr+4;
7537+ return 2;
7538+ }
7539+ } while (0);
7540+
7541+ do { /* PaX: patched PLT emulation #2 */
7542+ unsigned int ba;
7543+
7544+ err = get_user(ba, (unsigned int *)regs->pc);
7545+
7546+ if (err)
7547+ break;
7548+
7549+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
7550+ unsigned int addr;
7551+
7552+ if ((ba & 0xFFC00000U) == 0x30800000U)
7553+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7554+ else
7555+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7556+ regs->pc = addr;
7557+ regs->npc = addr+4;
7558+ return 2;
7559+ }
7560+ } while (0);
7561+
7562+ do { /* PaX: patched PLT emulation #3 */
7563+ unsigned int sethi, bajmpl, nop;
7564+
7565+ err = get_user(sethi, (unsigned int *)regs->pc);
7566+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
7567+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
7568+
7569+ if (err)
7570+ break;
7571+
7572+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7573+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
7574+ nop == 0x01000000U)
7575+ {
7576+ unsigned int addr;
7577+
7578+ addr = (sethi & 0x003FFFFFU) << 10;
7579+ regs->u_regs[UREG_G1] = addr;
7580+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
7581+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7582+ else
7583+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7584+ regs->pc = addr;
7585+ regs->npc = addr+4;
7586+ return 2;
7587+ }
7588+ } while (0);
7589+
7590+ do { /* PaX: unpatched PLT emulation step 1 */
7591+ unsigned int sethi, ba, nop;
7592+
7593+ err = get_user(sethi, (unsigned int *)regs->pc);
7594+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
7595+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
7596+
7597+ if (err)
7598+ break;
7599+
7600+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7601+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7602+ nop == 0x01000000U)
7603+ {
7604+ unsigned int addr, save, call;
7605+
7606+ if ((ba & 0xFFC00000U) == 0x30800000U)
7607+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7608+ else
7609+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7610+
7611+ err = get_user(save, (unsigned int *)addr);
7612+ err |= get_user(call, (unsigned int *)(addr+4));
7613+ err |= get_user(nop, (unsigned int *)(addr+8));
7614+ if (err)
7615+ break;
7616+
7617+#ifdef CONFIG_PAX_DLRESOLVE
7618+ if (save == 0x9DE3BFA8U &&
7619+ (call & 0xC0000000U) == 0x40000000U &&
7620+ nop == 0x01000000U)
7621+ {
7622+ struct vm_area_struct *vma;
7623+ unsigned long call_dl_resolve;
7624+
7625+ down_read(&current->mm->mmap_sem);
7626+ call_dl_resolve = current->mm->call_dl_resolve;
7627+ up_read(&current->mm->mmap_sem);
7628+ if (likely(call_dl_resolve))
7629+ goto emulate;
7630+
7631+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7632+
7633+ down_write(&current->mm->mmap_sem);
7634+ if (current->mm->call_dl_resolve) {
7635+ call_dl_resolve = current->mm->call_dl_resolve;
7636+ up_write(&current->mm->mmap_sem);
7637+ if (vma)
7638+ kmem_cache_free(vm_area_cachep, vma);
7639+ goto emulate;
7640+ }
7641+
7642+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7643+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7644+ up_write(&current->mm->mmap_sem);
7645+ if (vma)
7646+ kmem_cache_free(vm_area_cachep, vma);
7647+ return 1;
7648+ }
7649+
7650+ if (pax_insert_vma(vma, call_dl_resolve)) {
7651+ up_write(&current->mm->mmap_sem);
7652+ kmem_cache_free(vm_area_cachep, vma);
7653+ return 1;
7654+ }
7655+
7656+ current->mm->call_dl_resolve = call_dl_resolve;
7657+ up_write(&current->mm->mmap_sem);
7658+
7659+emulate:
7660+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7661+ regs->pc = call_dl_resolve;
7662+ regs->npc = addr+4;
7663+ return 3;
7664+ }
7665+#endif
7666+
7667+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7668+ if ((save & 0xFFC00000U) == 0x05000000U &&
7669+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7670+ nop == 0x01000000U)
7671+ {
7672+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7673+ regs->u_regs[UREG_G2] = addr + 4;
7674+ addr = (save & 0x003FFFFFU) << 10;
7675+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7676+ regs->pc = addr;
7677+ regs->npc = addr+4;
7678+ return 3;
7679+ }
7680+ }
7681+ } while (0);
7682+
7683+ do { /* PaX: unpatched PLT emulation step 2 */
7684+ unsigned int save, call, nop;
7685+
7686+ err = get_user(save, (unsigned int *)(regs->pc-4));
7687+ err |= get_user(call, (unsigned int *)regs->pc);
7688+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
7689+ if (err)
7690+ break;
7691+
7692+ if (save == 0x9DE3BFA8U &&
7693+ (call & 0xC0000000U) == 0x40000000U &&
7694+ nop == 0x01000000U)
7695+ {
7696+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
7697+
7698+ regs->u_regs[UREG_RETPC] = regs->pc;
7699+ regs->pc = dl_resolve;
7700+ regs->npc = dl_resolve+4;
7701+ return 3;
7702+ }
7703+ } while (0);
7704+#endif
7705+
7706+ return 1;
7707+}
7708+
7709+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7710+{
7711+ unsigned long i;
7712+
7713+ printk(KERN_ERR "PAX: bytes at PC: ");
7714+ for (i = 0; i < 8; i++) {
7715+ unsigned int c;
7716+ if (get_user(c, (unsigned int *)pc+i))
7717+ printk(KERN_CONT "???????? ");
7718+ else
7719+ printk(KERN_CONT "%08x ", c);
7720+ }
7721+ printk("\n");
7722+}
7723+#endif
7724+
7725 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
7726 int text_fault)
7727 {
7728@@ -230,6 +504,24 @@ good_area:
7729 if (!(vma->vm_flags & VM_WRITE))
7730 goto bad_area;
7731 } else {
7732+
7733+#ifdef CONFIG_PAX_PAGEEXEC
7734+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
7735+ up_read(&mm->mmap_sem);
7736+ switch (pax_handle_fetch_fault(regs)) {
7737+
7738+#ifdef CONFIG_PAX_EMUPLT
7739+ case 2:
7740+ case 3:
7741+ return;
7742+#endif
7743+
7744+ }
7745+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
7746+ do_group_exit(SIGKILL);
7747+ }
7748+#endif
7749+
7750 /* Allow reads even for write-only mappings */
7751 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
7752 goto bad_area;
7753diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
7754index 097aee7..5ca6697 100644
7755--- a/arch/sparc/mm/fault_64.c
7756+++ b/arch/sparc/mm/fault_64.c
7757@@ -21,6 +21,9 @@
7758 #include <linux/kprobes.h>
7759 #include <linux/kdebug.h>
7760 #include <linux/percpu.h>
7761+#include <linux/slab.h>
7762+#include <linux/pagemap.h>
7763+#include <linux/compiler.h>
7764
7765 #include <asm/page.h>
7766 #include <asm/pgtable.h>
7767@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
7768 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
7769 regs->tpc);
7770 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
7771- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
7772+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
7773 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
7774 dump_stack();
7775 unhandled_fault(regs->tpc, current, regs);
7776@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
7777 show_regs(regs);
7778 }
7779
7780+#ifdef CONFIG_PAX_PAGEEXEC
7781+#ifdef CONFIG_PAX_DLRESOLVE
7782+static void pax_emuplt_close(struct vm_area_struct *vma)
7783+{
7784+ vma->vm_mm->call_dl_resolve = 0UL;
7785+}
7786+
7787+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7788+{
7789+ unsigned int *kaddr;
7790+
7791+ vmf->page = alloc_page(GFP_HIGHUSER);
7792+ if (!vmf->page)
7793+ return VM_FAULT_OOM;
7794+
7795+ kaddr = kmap(vmf->page);
7796+ memset(kaddr, 0, PAGE_SIZE);
7797+ kaddr[0] = 0x9DE3BFA8U; /* save */
7798+ flush_dcache_page(vmf->page);
7799+ kunmap(vmf->page);
7800+ return VM_FAULT_MAJOR;
7801+}
7802+
7803+static const struct vm_operations_struct pax_vm_ops = {
7804+ .close = pax_emuplt_close,
7805+ .fault = pax_emuplt_fault
7806+};
7807+
7808+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7809+{
7810+ int ret;
7811+
7812+ INIT_LIST_HEAD(&vma->anon_vma_chain);
7813+ vma->vm_mm = current->mm;
7814+ vma->vm_start = addr;
7815+ vma->vm_end = addr + PAGE_SIZE;
7816+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7817+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7818+ vma->vm_ops = &pax_vm_ops;
7819+
7820+ ret = insert_vm_struct(current->mm, vma);
7821+ if (ret)
7822+ return ret;
7823+
7824+ ++current->mm->total_vm;
7825+ return 0;
7826+}
7827+#endif
7828+
7829+/*
7830+ * PaX: decide what to do with offenders (regs->tpc = fault address)
7831+ *
7832+ * returns 1 when task should be killed
7833+ * 2 when patched PLT trampoline was detected
7834+ * 3 when unpatched PLT trampoline was detected
7835+ */
7836+static int pax_handle_fetch_fault(struct pt_regs *regs)
7837+{
7838+
7839+#ifdef CONFIG_PAX_EMUPLT
7840+ int err;
7841+
7842+ do { /* PaX: patched PLT emulation #1 */
7843+ unsigned int sethi1, sethi2, jmpl;
7844+
7845+ err = get_user(sethi1, (unsigned int *)regs->tpc);
7846+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
7847+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
7848+
7849+ if (err)
7850+ break;
7851+
7852+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7853+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
7854+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
7855+ {
7856+ unsigned long addr;
7857+
7858+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7859+ addr = regs->u_regs[UREG_G1];
7860+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7861+
7862+ if (test_thread_flag(TIF_32BIT))
7863+ addr &= 0xFFFFFFFFUL;
7864+
7865+ regs->tpc = addr;
7866+ regs->tnpc = addr+4;
7867+ return 2;
7868+ }
7869+ } while (0);
7870+
7871+ do { /* PaX: patched PLT emulation #2 */
7872+ unsigned int ba;
7873+
7874+ err = get_user(ba, (unsigned int *)regs->tpc);
7875+
7876+ if (err)
7877+ break;
7878+
7879+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
7880+ unsigned long addr;
7881+
7882+ if ((ba & 0xFFC00000U) == 0x30800000U)
7883+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7884+ else
7885+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7886+
7887+ if (test_thread_flag(TIF_32BIT))
7888+ addr &= 0xFFFFFFFFUL;
7889+
7890+ regs->tpc = addr;
7891+ regs->tnpc = addr+4;
7892+ return 2;
7893+ }
7894+ } while (0);
7895+
7896+ do { /* PaX: patched PLT emulation #3 */
7897+ unsigned int sethi, bajmpl, nop;
7898+
7899+ err = get_user(sethi, (unsigned int *)regs->tpc);
7900+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
7901+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7902+
7903+ if (err)
7904+ break;
7905+
7906+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7907+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
7908+ nop == 0x01000000U)
7909+ {
7910+ unsigned long addr;
7911+
7912+ addr = (sethi & 0x003FFFFFU) << 10;
7913+ regs->u_regs[UREG_G1] = addr;
7914+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
7915+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7916+ else
7917+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7918+
7919+ if (test_thread_flag(TIF_32BIT))
7920+ addr &= 0xFFFFFFFFUL;
7921+
7922+ regs->tpc = addr;
7923+ regs->tnpc = addr+4;
7924+ return 2;
7925+ }
7926+ } while (0);
7927+
7928+ do { /* PaX: patched PLT emulation #4 */
7929+ unsigned int sethi, mov1, call, mov2;
7930+
7931+ err = get_user(sethi, (unsigned int *)regs->tpc);
7932+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7933+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
7934+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7935+
7936+ if (err)
7937+ break;
7938+
7939+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7940+ mov1 == 0x8210000FU &&
7941+ (call & 0xC0000000U) == 0x40000000U &&
7942+ mov2 == 0x9E100001U)
7943+ {
7944+ unsigned long addr;
7945+
7946+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7947+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7948+
7949+ if (test_thread_flag(TIF_32BIT))
7950+ addr &= 0xFFFFFFFFUL;
7951+
7952+ regs->tpc = addr;
7953+ regs->tnpc = addr+4;
7954+ return 2;
7955+ }
7956+ } while (0);
7957+
7958+ do { /* PaX: patched PLT emulation #5 */
7959+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7960+
7961+ err = get_user(sethi, (unsigned int *)regs->tpc);
7962+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7963+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7964+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7965+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7966+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7967+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7968+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7969+
7970+ if (err)
7971+ break;
7972+
7973+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7974+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7975+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7976+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7977+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7978+ sllx == 0x83287020U &&
7979+ jmpl == 0x81C04005U &&
7980+ nop == 0x01000000U)
7981+ {
7982+ unsigned long addr;
7983+
7984+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7985+ regs->u_regs[UREG_G1] <<= 32;
7986+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7987+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7988+ regs->tpc = addr;
7989+ regs->tnpc = addr+4;
7990+ return 2;
7991+ }
7992+ } while (0);
7993+
7994+ do { /* PaX: patched PLT emulation #6 */
7995+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7996+
7997+ err = get_user(sethi, (unsigned int *)regs->tpc);
7998+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7999+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
8000+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
8001+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
8002+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
8003+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
8004+
8005+ if (err)
8006+ break;
8007+
8008+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8009+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
8010+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
8011+ sllx == 0x83287020U &&
8012+ (or & 0xFFFFE000U) == 0x8A116000U &&
8013+ jmpl == 0x81C04005U &&
8014+ nop == 0x01000000U)
8015+ {
8016+ unsigned long addr;
8017+
8018+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
8019+ regs->u_regs[UREG_G1] <<= 32;
8020+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
8021+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
8022+ regs->tpc = addr;
8023+ regs->tnpc = addr+4;
8024+ return 2;
8025+ }
8026+ } while (0);
8027+
8028+ do { /* PaX: unpatched PLT emulation step 1 */
8029+ unsigned int sethi, ba, nop;
8030+
8031+ err = get_user(sethi, (unsigned int *)regs->tpc);
8032+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
8033+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
8034+
8035+ if (err)
8036+ break;
8037+
8038+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8039+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
8040+ nop == 0x01000000U)
8041+ {
8042+ unsigned long addr;
8043+ unsigned int save, call;
8044+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
8045+
8046+ if ((ba & 0xFFC00000U) == 0x30800000U)
8047+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
8048+ else
8049+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
8050+
8051+ if (test_thread_flag(TIF_32BIT))
8052+ addr &= 0xFFFFFFFFUL;
8053+
8054+ err = get_user(save, (unsigned int *)addr);
8055+ err |= get_user(call, (unsigned int *)(addr+4));
8056+ err |= get_user(nop, (unsigned int *)(addr+8));
8057+ if (err)
8058+ break;
8059+
8060+#ifdef CONFIG_PAX_DLRESOLVE
8061+ if (save == 0x9DE3BFA8U &&
8062+ (call & 0xC0000000U) == 0x40000000U &&
8063+ nop == 0x01000000U)
8064+ {
8065+ struct vm_area_struct *vma;
8066+ unsigned long call_dl_resolve;
8067+
8068+ down_read(&current->mm->mmap_sem);
8069+ call_dl_resolve = current->mm->call_dl_resolve;
8070+ up_read(&current->mm->mmap_sem);
8071+ if (likely(call_dl_resolve))
8072+ goto emulate;
8073+
8074+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
8075+
8076+ down_write(&current->mm->mmap_sem);
8077+ if (current->mm->call_dl_resolve) {
8078+ call_dl_resolve = current->mm->call_dl_resolve;
8079+ up_write(&current->mm->mmap_sem);
8080+ if (vma)
8081+ kmem_cache_free(vm_area_cachep, vma);
8082+ goto emulate;
8083+ }
8084+
8085+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
8086+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
8087+ up_write(&current->mm->mmap_sem);
8088+ if (vma)
8089+ kmem_cache_free(vm_area_cachep, vma);
8090+ return 1;
8091+ }
8092+
8093+ if (pax_insert_vma(vma, call_dl_resolve)) {
8094+ up_write(&current->mm->mmap_sem);
8095+ kmem_cache_free(vm_area_cachep, vma);
8096+ return 1;
8097+ }
8098+
8099+ current->mm->call_dl_resolve = call_dl_resolve;
8100+ up_write(&current->mm->mmap_sem);
8101+
8102+emulate:
8103+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
8104+ regs->tpc = call_dl_resolve;
8105+ regs->tnpc = addr+4;
8106+ return 3;
8107+ }
8108+#endif
8109+
8110+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
8111+ if ((save & 0xFFC00000U) == 0x05000000U &&
8112+ (call & 0xFFFFE000U) == 0x85C0A000U &&
8113+ nop == 0x01000000U)
8114+ {
8115+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
8116+ regs->u_regs[UREG_G2] = addr + 4;
8117+ addr = (save & 0x003FFFFFU) << 10;
8118+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
8119+
8120+ if (test_thread_flag(TIF_32BIT))
8121+ addr &= 0xFFFFFFFFUL;
8122+
8123+ regs->tpc = addr;
8124+ regs->tnpc = addr+4;
8125+ return 3;
8126+ }
8127+
8128+ /* PaX: 64-bit PLT stub */
8129+ err = get_user(sethi1, (unsigned int *)addr);
8130+ err |= get_user(sethi2, (unsigned int *)(addr+4));
8131+ err |= get_user(or1, (unsigned int *)(addr+8));
8132+ err |= get_user(or2, (unsigned int *)(addr+12));
8133+ err |= get_user(sllx, (unsigned int *)(addr+16));
8134+ err |= get_user(add, (unsigned int *)(addr+20));
8135+ err |= get_user(jmpl, (unsigned int *)(addr+24));
8136+ err |= get_user(nop, (unsigned int *)(addr+28));
8137+ if (err)
8138+ break;
8139+
8140+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
8141+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
8142+ (or1 & 0xFFFFE000U) == 0x88112000U &&
8143+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
8144+ sllx == 0x89293020U &&
8145+ add == 0x8A010005U &&
8146+ jmpl == 0x89C14000U &&
8147+ nop == 0x01000000U)
8148+ {
8149+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
8150+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
8151+ regs->u_regs[UREG_G4] <<= 32;
8152+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
8153+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
8154+ regs->u_regs[UREG_G4] = addr + 24;
8155+ addr = regs->u_regs[UREG_G5];
8156+ regs->tpc = addr;
8157+ regs->tnpc = addr+4;
8158+ return 3;
8159+ }
8160+ }
8161+ } while (0);
8162+
8163+#ifdef CONFIG_PAX_DLRESOLVE
8164+ do { /* PaX: unpatched PLT emulation step 2 */
8165+ unsigned int save, call, nop;
8166+
8167+ err = get_user(save, (unsigned int *)(regs->tpc-4));
8168+ err |= get_user(call, (unsigned int *)regs->tpc);
8169+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
8170+ if (err)
8171+ break;
8172+
8173+ if (save == 0x9DE3BFA8U &&
8174+ (call & 0xC0000000U) == 0x40000000U &&
8175+ nop == 0x01000000U)
8176+ {
8177+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
8178+
8179+ if (test_thread_flag(TIF_32BIT))
8180+ dl_resolve &= 0xFFFFFFFFUL;
8181+
8182+ regs->u_regs[UREG_RETPC] = regs->tpc;
8183+ regs->tpc = dl_resolve;
8184+ regs->tnpc = dl_resolve+4;
8185+ return 3;
8186+ }
8187+ } while (0);
8188+#endif
8189+
8190+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
8191+ unsigned int sethi, ba, nop;
8192+
8193+ err = get_user(sethi, (unsigned int *)regs->tpc);
8194+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
8195+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
8196+
8197+ if (err)
8198+ break;
8199+
8200+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8201+ (ba & 0xFFF00000U) == 0x30600000U &&
8202+ nop == 0x01000000U)
8203+ {
8204+ unsigned long addr;
8205+
8206+ addr = (sethi & 0x003FFFFFU) << 10;
8207+ regs->u_regs[UREG_G1] = addr;
8208+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
8209+
8210+ if (test_thread_flag(TIF_32BIT))
8211+ addr &= 0xFFFFFFFFUL;
8212+
8213+ regs->tpc = addr;
8214+ regs->tnpc = addr+4;
8215+ return 2;
8216+ }
8217+ } while (0);
8218+
8219+#endif
8220+
8221+ return 1;
8222+}
8223+
8224+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8225+{
8226+ unsigned long i;
8227+
8228+ printk(KERN_ERR "PAX: bytes at PC: ");
8229+ for (i = 0; i < 8; i++) {
8230+ unsigned int c;
8231+ if (get_user(c, (unsigned int *)pc+i))
8232+ printk(KERN_CONT "???????? ");
8233+ else
8234+ printk(KERN_CONT "%08x ", c);
8235+ }
8236+ printk("\n");
8237+}
8238+#endif
8239+
8240 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
8241 {
8242 struct mm_struct *mm = current->mm;
8243@@ -341,6 +804,29 @@ retry:
8244 if (!vma)
8245 goto bad_area;
8246
8247+#ifdef CONFIG_PAX_PAGEEXEC
8248+ /* PaX: detect ITLB misses on non-exec pages */
8249+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
8250+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
8251+ {
8252+ if (address != regs->tpc)
8253+ goto good_area;
8254+
8255+ up_read(&mm->mmap_sem);
8256+ switch (pax_handle_fetch_fault(regs)) {
8257+
8258+#ifdef CONFIG_PAX_EMUPLT
8259+ case 2:
8260+ case 3:
8261+ return;
8262+#endif
8263+
8264+ }
8265+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
8266+ do_group_exit(SIGKILL);
8267+ }
8268+#endif
8269+
8270 /* Pure DTLB misses do not tell us whether the fault causing
8271 * load/store/atomic was a write or not, it only says that there
8272 * was no match. So in such a case we (carefully) read the
8273diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
8274index f76f83d..ee0d859 100644
8275--- a/arch/sparc/mm/hugetlbpage.c
8276+++ b/arch/sparc/mm/hugetlbpage.c
8277@@ -34,6 +34,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
8278 struct vm_area_struct * vma;
8279 unsigned long task_size = TASK_SIZE;
8280 unsigned long start_addr;
8281+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8282
8283 if (test_thread_flag(TIF_32BIT))
8284 task_size = STACK_TOP32;
8285@@ -67,7 +68,7 @@ full_search:
8286 }
8287 return -ENOMEM;
8288 }
8289- if (likely(!vma || addr + len <= vma->vm_start)) {
8290+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
8291 /*
8292 * Remember the place where we stopped the search:
8293 */
8294@@ -90,6 +91,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8295 struct vm_area_struct *vma;
8296 struct mm_struct *mm = current->mm;
8297 unsigned long addr = addr0;
8298+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8299
8300 /* This should only ever run for 32-bit processes. */
8301 BUG_ON(!test_thread_flag(TIF_32BIT));
8302@@ -106,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8303 /* make sure it can fit in the remaining address space */
8304 if (likely(addr > len)) {
8305 vma = find_vma(mm, addr-len);
8306- if (!vma || addr <= vma->vm_start) {
8307+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
8308 /* remember the address as a hint for next time */
8309 return (mm->free_area_cache = addr-len);
8310 }
8311@@ -115,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8312 if (unlikely(mm->mmap_base < len))
8313 goto bottomup;
8314
8315- addr = (mm->mmap_base-len) & HPAGE_MASK;
8316+ addr = mm->mmap_base - len;
8317
8318 do {
8319+ addr &= HPAGE_MASK;
8320 /*
8321 * Lookup failure means no vma is above this address,
8322 * else if new region fits below vma->vm_start,
8323 * return with success:
8324 */
8325 vma = find_vma(mm, addr);
8326- if (likely(!vma || addr+len <= vma->vm_start)) {
8327+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
8328 /* remember the address as a hint for next time */
8329 return (mm->free_area_cache = addr);
8330 }
8331@@ -134,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8332 mm->cached_hole_size = vma->vm_start - addr;
8333
8334 /* try just below the current vma->vm_start */
8335- addr = (vma->vm_start-len) & HPAGE_MASK;
8336- } while (likely(len < vma->vm_start));
8337+ addr = skip_heap_stack_gap(vma, len, offset);
8338+ } while (!IS_ERR_VALUE(addr));
8339
8340 bottomup:
8341 /*
8342@@ -163,6 +166,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
8343 struct mm_struct *mm = current->mm;
8344 struct vm_area_struct *vma;
8345 unsigned long task_size = TASK_SIZE;
8346+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8347
8348 if (test_thread_flag(TIF_32BIT))
8349 task_size = STACK_TOP32;
8350@@ -181,8 +185,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
8351 if (addr) {
8352 addr = ALIGN(addr, HPAGE_SIZE);
8353 vma = find_vma(mm, addr);
8354- if (task_size - len >= addr &&
8355- (!vma || addr + len <= vma->vm_start))
8356+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8357 return addr;
8358 }
8359 if (mm->get_unmapped_area == arch_get_unmapped_area)
8360diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
8361index f4500c6..889656c 100644
8362--- a/arch/tile/include/asm/atomic_64.h
8363+++ b/arch/tile/include/asm/atomic_64.h
8364@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8365
8366 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8367
8368+#define atomic64_read_unchecked(v) atomic64_read(v)
8369+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8370+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8371+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8372+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8373+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8374+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8375+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8376+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8377+
8378 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
8379 #define smp_mb__before_atomic_dec() smp_mb()
8380 #define smp_mb__after_atomic_dec() smp_mb()
8381diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
8382index a9a5299..0fce79e 100644
8383--- a/arch/tile/include/asm/cache.h
8384+++ b/arch/tile/include/asm/cache.h
8385@@ -15,11 +15,12 @@
8386 #ifndef _ASM_TILE_CACHE_H
8387 #define _ASM_TILE_CACHE_H
8388
8389+#include <linux/const.h>
8390 #include <arch/chip.h>
8391
8392 /* bytes per L1 data cache line */
8393 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
8394-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8395+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8396
8397 /* bytes per L2 cache line */
8398 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
8399diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
8400index 9ab078a..d6635c2 100644
8401--- a/arch/tile/include/asm/uaccess.h
8402+++ b/arch/tile/include/asm/uaccess.h
8403@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
8404 const void __user *from,
8405 unsigned long n)
8406 {
8407- int sz = __compiletime_object_size(to);
8408+ size_t sz = __compiletime_object_size(to);
8409
8410- if (likely(sz == -1 || sz >= n))
8411+ if (likely(sz == (size_t)-1 || sz >= n))
8412 n = _copy_from_user(to, from, n);
8413 else
8414 copy_from_user_overflow();
8415diff --git a/arch/um/Makefile b/arch/um/Makefile
8416index 133f7de..1d6f2f1 100644
8417--- a/arch/um/Makefile
8418+++ b/arch/um/Makefile
8419@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
8420 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
8421 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
8422
8423+ifdef CONSTIFY_PLUGIN
8424+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8425+endif
8426+
8427 #This will adjust *FLAGS accordingly to the platform.
8428 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
8429
8430diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
8431index 19e1bdd..3665b77 100644
8432--- a/arch/um/include/asm/cache.h
8433+++ b/arch/um/include/asm/cache.h
8434@@ -1,6 +1,7 @@
8435 #ifndef __UM_CACHE_H
8436 #define __UM_CACHE_H
8437
8438+#include <linux/const.h>
8439
8440 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
8441 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8442@@ -12,6 +13,6 @@
8443 # define L1_CACHE_SHIFT 5
8444 #endif
8445
8446-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8447+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8448
8449 #endif
8450diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
8451index 2e0a6b1..a64d0f5 100644
8452--- a/arch/um/include/asm/kmap_types.h
8453+++ b/arch/um/include/asm/kmap_types.h
8454@@ -8,6 +8,6 @@
8455
8456 /* No more #include "asm/arch/kmap_types.h" ! */
8457
8458-#define KM_TYPE_NR 14
8459+#define KM_TYPE_NR 15
8460
8461 #endif
8462diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
8463index 5ff53d9..5850cdf 100644
8464--- a/arch/um/include/asm/page.h
8465+++ b/arch/um/include/asm/page.h
8466@@ -14,6 +14,9 @@
8467 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
8468 #define PAGE_MASK (~(PAGE_SIZE-1))
8469
8470+#define ktla_ktva(addr) (addr)
8471+#define ktva_ktla(addr) (addr)
8472+
8473 #ifndef __ASSEMBLY__
8474
8475 struct page;
8476diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
8477index 0032f92..cd151e0 100644
8478--- a/arch/um/include/asm/pgtable-3level.h
8479+++ b/arch/um/include/asm/pgtable-3level.h
8480@@ -58,6 +58,7 @@
8481 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
8482 #define pud_populate(mm, pud, pmd) \
8483 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
8484+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8485
8486 #ifdef CONFIG_64BIT
8487 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
8488diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
8489index b6d699c..df7ac1d 100644
8490--- a/arch/um/kernel/process.c
8491+++ b/arch/um/kernel/process.c
8492@@ -387,22 +387,6 @@ int singlestepping(void * t)
8493 return 2;
8494 }
8495
8496-/*
8497- * Only x86 and x86_64 have an arch_align_stack().
8498- * All other arches have "#define arch_align_stack(x) (x)"
8499- * in their asm/system.h
8500- * As this is included in UML from asm-um/system-generic.h,
8501- * we can use it to behave as the subarch does.
8502- */
8503-#ifndef arch_align_stack
8504-unsigned long arch_align_stack(unsigned long sp)
8505-{
8506- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8507- sp -= get_random_int() % 8192;
8508- return sp & ~0xf;
8509-}
8510-#endif
8511-
8512 unsigned long get_wchan(struct task_struct *p)
8513 {
8514 unsigned long stack_page, sp, ip;
8515diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
8516index ad8f795..2c7eec6 100644
8517--- a/arch/unicore32/include/asm/cache.h
8518+++ b/arch/unicore32/include/asm/cache.h
8519@@ -12,8 +12,10 @@
8520 #ifndef __UNICORE_CACHE_H__
8521 #define __UNICORE_CACHE_H__
8522
8523-#define L1_CACHE_SHIFT (5)
8524-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8525+#include <linux/const.h>
8526+
8527+#define L1_CACHE_SHIFT 5
8528+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8529
8530 /*
8531 * Memory returned by kmalloc() may be used for DMA, so we must make
8532diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
8533index 46c3bff..c2286e7 100644
8534--- a/arch/x86/Kconfig
8535+++ b/arch/x86/Kconfig
8536@@ -241,7 +241,7 @@ config X86_HT
8537
8538 config X86_32_LAZY_GS
8539 def_bool y
8540- depends on X86_32 && !CC_STACKPROTECTOR
8541+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
8542
8543 config ARCH_HWEIGHT_CFLAGS
8544 string
8545@@ -1056,7 +1056,7 @@ choice
8546
8547 config NOHIGHMEM
8548 bool "off"
8549- depends on !X86_NUMAQ
8550+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
8551 ---help---
8552 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
8553 However, the address space of 32-bit x86 processors is only 4
8554@@ -1093,7 +1093,7 @@ config NOHIGHMEM
8555
8556 config HIGHMEM4G
8557 bool "4GB"
8558- depends on !X86_NUMAQ
8559+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
8560 ---help---
8561 Select this if you have a 32-bit processor and between 1 and 4
8562 gigabytes of physical RAM.
8563@@ -1147,7 +1147,7 @@ config PAGE_OFFSET
8564 hex
8565 default 0xB0000000 if VMSPLIT_3G_OPT
8566 default 0x80000000 if VMSPLIT_2G
8567- default 0x78000000 if VMSPLIT_2G_OPT
8568+ default 0x70000000 if VMSPLIT_2G_OPT
8569 default 0x40000000 if VMSPLIT_1G
8570 default 0xC0000000
8571 depends on X86_32
8572@@ -1548,6 +1548,7 @@ config SECCOMP
8573
8574 config CC_STACKPROTECTOR
8575 bool "Enable -fstack-protector buffer overflow detection"
8576+ depends on X86_64 || !PAX_MEMORY_UDEREF
8577 ---help---
8578 This option turns on the -fstack-protector GCC feature. This
8579 feature puts, at the beginning of functions, a canary value on
8580@@ -1605,6 +1606,7 @@ config KEXEC_JUMP
8581 config PHYSICAL_START
8582 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
8583 default "0x1000000"
8584+ range 0x400000 0x40000000
8585 ---help---
8586 This gives the physical address where the kernel is loaded.
8587
8588@@ -1668,6 +1670,7 @@ config X86_NEED_RELOCS
8589 config PHYSICAL_ALIGN
8590 hex "Alignment value to which kernel should be aligned" if X86_32
8591 default "0x1000000"
8592+ range 0x400000 0x1000000 if PAX_KERNEXEC
8593 range 0x2000 0x1000000
8594 ---help---
8595 This value puts the alignment restrictions on physical address
8596@@ -1699,9 +1702,10 @@ config HOTPLUG_CPU
8597 Say N if you want to disable CPU hotplug.
8598
8599 config COMPAT_VDSO
8600- def_bool y
8601+ def_bool n
8602 prompt "Compat VDSO support"
8603 depends on X86_32 || IA32_EMULATION
8604+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
8605 ---help---
8606 Map the 32-bit VDSO to the predictable old-style address too.
8607
8608diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
8609index f3b86d0..17fd30f 100644
8610--- a/arch/x86/Kconfig.cpu
8611+++ b/arch/x86/Kconfig.cpu
8612@@ -335,7 +335,7 @@ config X86_PPRO_FENCE
8613
8614 config X86_F00F_BUG
8615 def_bool y
8616- depends on M586MMX || M586TSC || M586 || M486 || M386
8617+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
8618
8619 config X86_INVD_BUG
8620 def_bool y
8621@@ -359,7 +359,7 @@ config X86_POPAD_OK
8622
8623 config X86_ALIGNMENT_16
8624 def_bool y
8625- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8626+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8627
8628 config X86_INTEL_USERCOPY
8629 def_bool y
8630@@ -405,7 +405,7 @@ config X86_CMPXCHG64
8631 # generates cmov.
8632 config X86_CMOV
8633 def_bool y
8634- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
8635+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
8636
8637 config X86_MINIMUM_CPU_FAMILY
8638 int
8639diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
8640index b322f12..652d0d9 100644
8641--- a/arch/x86/Kconfig.debug
8642+++ b/arch/x86/Kconfig.debug
8643@@ -84,7 +84,7 @@ config X86_PTDUMP
8644 config DEBUG_RODATA
8645 bool "Write protect kernel read-only data structures"
8646 default y
8647- depends on DEBUG_KERNEL
8648+ depends on DEBUG_KERNEL && BROKEN
8649 ---help---
8650 Mark the kernel read-only data as write-protected in the pagetables,
8651 in order to catch accidental (and incorrect) writes to such const
8652@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
8653
8654 config DEBUG_SET_MODULE_RONX
8655 bool "Set loadable kernel module data as NX and text as RO"
8656- depends on MODULES
8657+ depends on MODULES && BROKEN
8658 ---help---
8659 This option helps catch unintended modifications to loadable
8660 kernel module's text and read-only data. It also prevents execution
8661@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
8662
8663 config DEBUG_STRICT_USER_COPY_CHECKS
8664 bool "Strict copy size checks"
8665- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
8666+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
8667 ---help---
8668 Enabling this option turns a certain set of sanity checks for user
8669 copy operations into compile time failures.
8670diff --git a/arch/x86/Makefile b/arch/x86/Makefile
8671index 05afcca..b6ecb51 100644
8672--- a/arch/x86/Makefile
8673+++ b/arch/x86/Makefile
8674@@ -50,6 +50,7 @@ else
8675 UTS_MACHINE := x86_64
8676 CHECKFLAGS += -D__x86_64__ -m64
8677
8678+ biarch := $(call cc-option,-m64)
8679 KBUILD_AFLAGS += -m64
8680 KBUILD_CFLAGS += -m64
8681
8682@@ -229,3 +230,12 @@ define archhelp
8683 echo ' FDARGS="..." arguments for the booted kernel'
8684 echo ' FDINITRD=file initrd for the booted kernel'
8685 endef
8686+
8687+define OLD_LD
8688+
8689+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
8690+*** Please upgrade your binutils to 2.18 or newer
8691+endef
8692+
8693+archprepare:
8694+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
8695diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
8696index ccce0ed..fd9da25 100644
8697--- a/arch/x86/boot/Makefile
8698+++ b/arch/x86/boot/Makefile
8699@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
8700 $(call cc-option, -fno-stack-protector) \
8701 $(call cc-option, -mpreferred-stack-boundary=2)
8702 KBUILD_CFLAGS += $(call cc-option, -m32)
8703+ifdef CONSTIFY_PLUGIN
8704+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8705+endif
8706 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8707 GCOV_PROFILE := n
8708
8709diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
8710index 878e4b9..20537ab 100644
8711--- a/arch/x86/boot/bitops.h
8712+++ b/arch/x86/boot/bitops.h
8713@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8714 u8 v;
8715 const u32 *p = (const u32 *)addr;
8716
8717- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8718+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8719 return v;
8720 }
8721
8722@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8723
8724 static inline void set_bit(int nr, void *addr)
8725 {
8726- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8727+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8728 }
8729
8730 #endif /* BOOT_BITOPS_H */
8731diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
8732index 18997e5..83d9c67 100644
8733--- a/arch/x86/boot/boot.h
8734+++ b/arch/x86/boot/boot.h
8735@@ -85,7 +85,7 @@ static inline void io_delay(void)
8736 static inline u16 ds(void)
8737 {
8738 u16 seg;
8739- asm("movw %%ds,%0" : "=rm" (seg));
8740+ asm volatile("movw %%ds,%0" : "=rm" (seg));
8741 return seg;
8742 }
8743
8744@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
8745 static inline int memcmp(const void *s1, const void *s2, size_t len)
8746 {
8747 u8 diff;
8748- asm("repe; cmpsb; setnz %0"
8749+ asm volatile("repe; cmpsb; setnz %0"
8750 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
8751 return diff;
8752 }
8753diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
8754index 8a84501..b2d165f 100644
8755--- a/arch/x86/boot/compressed/Makefile
8756+++ b/arch/x86/boot/compressed/Makefile
8757@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
8758 KBUILD_CFLAGS += $(cflags-y)
8759 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
8760 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
8761+ifdef CONSTIFY_PLUGIN
8762+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8763+endif
8764
8765 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8766 GCOV_PROFILE := n
8767diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
8768index e87b0ca..2bce457 100644
8769--- a/arch/x86/boot/compressed/eboot.c
8770+++ b/arch/x86/boot/compressed/eboot.c
8771@@ -144,7 +144,6 @@ again:
8772 *addr = max_addr;
8773 }
8774
8775-free_pool:
8776 efi_call_phys1(sys_table->boottime->free_pool, map);
8777
8778 fail:
8779@@ -208,7 +207,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
8780 if (i == map_size / desc_size)
8781 status = EFI_NOT_FOUND;
8782
8783-free_pool:
8784 efi_call_phys1(sys_table->boottime->free_pool, map);
8785 fail:
8786 return status;
8787diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
8788index aa4aaf1..6656f2f 100644
8789--- a/arch/x86/boot/compressed/head_32.S
8790+++ b/arch/x86/boot/compressed/head_32.S
8791@@ -116,7 +116,7 @@ preferred_addr:
8792 notl %eax
8793 andl %eax, %ebx
8794 #else
8795- movl $LOAD_PHYSICAL_ADDR, %ebx
8796+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8797 #endif
8798
8799 /* Target address to relocate to for decompression */
8800@@ -202,7 +202,7 @@ relocated:
8801 * and where it was actually loaded.
8802 */
8803 movl %ebp, %ebx
8804- subl $LOAD_PHYSICAL_ADDR, %ebx
8805+ subl $____LOAD_PHYSICAL_ADDR, %ebx
8806 jz 2f /* Nothing to be done if loaded at compiled addr. */
8807 /*
8808 * Process relocations.
8809@@ -210,8 +210,7 @@ relocated:
8810
8811 1: subl $4, %edi
8812 movl (%edi), %ecx
8813- testl %ecx, %ecx
8814- jz 2f
8815+ jecxz 2f
8816 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
8817 jmp 1b
8818 2:
8819diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
8820index 2c4b171..e1fa5b1 100644
8821--- a/arch/x86/boot/compressed/head_64.S
8822+++ b/arch/x86/boot/compressed/head_64.S
8823@@ -91,7 +91,7 @@ ENTRY(startup_32)
8824 notl %eax
8825 andl %eax, %ebx
8826 #else
8827- movl $LOAD_PHYSICAL_ADDR, %ebx
8828+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8829 #endif
8830
8831 /* Target address to relocate to for decompression */
8832@@ -273,7 +273,7 @@ preferred_addr:
8833 notq %rax
8834 andq %rax, %rbp
8835 #else
8836- movq $LOAD_PHYSICAL_ADDR, %rbp
8837+ movq $____LOAD_PHYSICAL_ADDR, %rbp
8838 #endif
8839
8840 /* Target address to relocate to for decompression */
8841diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
8842index 88f7ff6..ed695dd 100644
8843--- a/arch/x86/boot/compressed/misc.c
8844+++ b/arch/x86/boot/compressed/misc.c
8845@@ -303,7 +303,7 @@ static void parse_elf(void *output)
8846 case PT_LOAD:
8847 #ifdef CONFIG_RELOCATABLE
8848 dest = output;
8849- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
8850+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
8851 #else
8852 dest = (void *)(phdr->p_paddr);
8853 #endif
8854@@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
8855 error("Destination address too large");
8856 #endif
8857 #ifndef CONFIG_RELOCATABLE
8858- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
8859+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
8860 error("Wrong destination address");
8861 #endif
8862
8863diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8864index 4d3ff03..e4972ff 100644
8865--- a/arch/x86/boot/cpucheck.c
8866+++ b/arch/x86/boot/cpucheck.c
8867@@ -74,7 +74,7 @@ static int has_fpu(void)
8868 u16 fcw = -1, fsw = -1;
8869 u32 cr0;
8870
8871- asm("movl %%cr0,%0" : "=r" (cr0));
8872+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
8873 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8874 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8875 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8876@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8877 {
8878 u32 f0, f1;
8879
8880- asm("pushfl ; "
8881+ asm volatile("pushfl ; "
8882 "pushfl ; "
8883 "popl %0 ; "
8884 "movl %0,%1 ; "
8885@@ -115,7 +115,7 @@ static void get_flags(void)
8886 set_bit(X86_FEATURE_FPU, cpu.flags);
8887
8888 if (has_eflag(X86_EFLAGS_ID)) {
8889- asm("cpuid"
8890+ asm volatile("cpuid"
8891 : "=a" (max_intel_level),
8892 "=b" (cpu_vendor[0]),
8893 "=d" (cpu_vendor[1]),
8894@@ -124,7 +124,7 @@ static void get_flags(void)
8895
8896 if (max_intel_level >= 0x00000001 &&
8897 max_intel_level <= 0x0000ffff) {
8898- asm("cpuid"
8899+ asm volatile("cpuid"
8900 : "=a" (tfms),
8901 "=c" (cpu.flags[4]),
8902 "=d" (cpu.flags[0])
8903@@ -136,7 +136,7 @@ static void get_flags(void)
8904 cpu.model += ((tfms >> 16) & 0xf) << 4;
8905 }
8906
8907- asm("cpuid"
8908+ asm volatile("cpuid"
8909 : "=a" (max_amd_level)
8910 : "a" (0x80000000)
8911 : "ebx", "ecx", "edx");
8912@@ -144,7 +144,7 @@ static void get_flags(void)
8913 if (max_amd_level >= 0x80000001 &&
8914 max_amd_level <= 0x8000ffff) {
8915 u32 eax = 0x80000001;
8916- asm("cpuid"
8917+ asm volatile("cpuid"
8918 : "+a" (eax),
8919 "=c" (cpu.flags[6]),
8920 "=d" (cpu.flags[1])
8921@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8922 u32 ecx = MSR_K7_HWCR;
8923 u32 eax, edx;
8924
8925- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8926+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8927 eax &= ~(1 << 15);
8928- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8929+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8930
8931 get_flags(); /* Make sure it really did something */
8932 err = check_flags();
8933@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8934 u32 ecx = MSR_VIA_FCR;
8935 u32 eax, edx;
8936
8937- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8938+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8939 eax |= (1<<1)|(1<<7);
8940- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8941+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8942
8943 set_bit(X86_FEATURE_CX8, cpu.flags);
8944 err = check_flags();
8945@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8946 u32 eax, edx;
8947 u32 level = 1;
8948
8949- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8950- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8951- asm("cpuid"
8952+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8953+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8954+ asm volatile("cpuid"
8955 : "+a" (level), "=d" (cpu.flags[0])
8956 : : "ecx", "ebx");
8957- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8958+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8959
8960 err = check_flags();
8961 }
8962diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8963index 8c132a6..13e5c96 100644
8964--- a/arch/x86/boot/header.S
8965+++ b/arch/x86/boot/header.S
8966@@ -387,10 +387,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
8967 # single linked list of
8968 # struct setup_data
8969
8970-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8971+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8972
8973 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8974+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
8975+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
8976+#else
8977 #define VO_INIT_SIZE (VO__end - VO__text)
8978+#endif
8979 #if ZO_INIT_SIZE > VO_INIT_SIZE
8980 #define INIT_SIZE ZO_INIT_SIZE
8981 #else
8982diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8983index db75d07..8e6d0af 100644
8984--- a/arch/x86/boot/memory.c
8985+++ b/arch/x86/boot/memory.c
8986@@ -19,7 +19,7 @@
8987
8988 static int detect_memory_e820(void)
8989 {
8990- int count = 0;
8991+ unsigned int count = 0;
8992 struct biosregs ireg, oreg;
8993 struct e820entry *desc = boot_params.e820_map;
8994 static struct e820entry buf; /* static so it is zeroed */
8995diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8996index 11e8c6e..fdbb1ed 100644
8997--- a/arch/x86/boot/video-vesa.c
8998+++ b/arch/x86/boot/video-vesa.c
8999@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
9000
9001 boot_params.screen_info.vesapm_seg = oreg.es;
9002 boot_params.screen_info.vesapm_off = oreg.di;
9003+ boot_params.screen_info.vesapm_size = oreg.cx;
9004 }
9005
9006 /*
9007diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
9008index 43eda28..5ab5fdb 100644
9009--- a/arch/x86/boot/video.c
9010+++ b/arch/x86/boot/video.c
9011@@ -96,7 +96,7 @@ static void store_mode_params(void)
9012 static unsigned int get_entry(void)
9013 {
9014 char entry_buf[4];
9015- int i, len = 0;
9016+ unsigned int i, len = 0;
9017 int key;
9018 unsigned int v;
9019
9020diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
9021index 5b577d5..3c1fed4 100644
9022--- a/arch/x86/crypto/aes-x86_64-asm_64.S
9023+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
9024@@ -8,6 +8,8 @@
9025 * including this sentence is retained in full.
9026 */
9027
9028+#include <asm/alternative-asm.h>
9029+
9030 .extern crypto_ft_tab
9031 .extern crypto_it_tab
9032 .extern crypto_fl_tab
9033@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
9034 je B192; \
9035 leaq 32(r9),r9;
9036
9037+#define ret pax_force_retaddr 0, 1; ret
9038+
9039 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
9040 movq r1,r2; \
9041 movq r3,r4; \
9042diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
9043index 3470624..201259d 100644
9044--- a/arch/x86/crypto/aesni-intel_asm.S
9045+++ b/arch/x86/crypto/aesni-intel_asm.S
9046@@ -31,6 +31,7 @@
9047
9048 #include <linux/linkage.h>
9049 #include <asm/inst.h>
9050+#include <asm/alternative-asm.h>
9051
9052 #ifdef __x86_64__
9053 .data
9054@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
9055 pop %r14
9056 pop %r13
9057 pop %r12
9058+ pax_force_retaddr 0, 1
9059 ret
9060+ENDPROC(aesni_gcm_dec)
9061
9062
9063 /*****************************************************************************
9064@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
9065 pop %r14
9066 pop %r13
9067 pop %r12
9068+ pax_force_retaddr 0, 1
9069 ret
9070+ENDPROC(aesni_gcm_enc)
9071
9072 #endif
9073
9074@@ -1714,6 +1719,7 @@ _key_expansion_256a:
9075 pxor %xmm1, %xmm0
9076 movaps %xmm0, (TKEYP)
9077 add $0x10, TKEYP
9078+ pax_force_retaddr_bts
9079 ret
9080
9081 .align 4
9082@@ -1738,6 +1744,7 @@ _key_expansion_192a:
9083 shufps $0b01001110, %xmm2, %xmm1
9084 movaps %xmm1, 0x10(TKEYP)
9085 add $0x20, TKEYP
9086+ pax_force_retaddr_bts
9087 ret
9088
9089 .align 4
9090@@ -1757,6 +1764,7 @@ _key_expansion_192b:
9091
9092 movaps %xmm0, (TKEYP)
9093 add $0x10, TKEYP
9094+ pax_force_retaddr_bts
9095 ret
9096
9097 .align 4
9098@@ -1769,6 +1777,7 @@ _key_expansion_256b:
9099 pxor %xmm1, %xmm2
9100 movaps %xmm2, (TKEYP)
9101 add $0x10, TKEYP
9102+ pax_force_retaddr_bts
9103 ret
9104
9105 /*
9106@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
9107 #ifndef __x86_64__
9108 popl KEYP
9109 #endif
9110+ pax_force_retaddr 0, 1
9111 ret
9112+ENDPROC(aesni_set_key)
9113
9114 /*
9115 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
9116@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
9117 popl KLEN
9118 popl KEYP
9119 #endif
9120+ pax_force_retaddr 0, 1
9121 ret
9122+ENDPROC(aesni_enc)
9123
9124 /*
9125 * _aesni_enc1: internal ABI
9126@@ -1959,6 +1972,7 @@ _aesni_enc1:
9127 AESENC KEY STATE
9128 movaps 0x70(TKEYP), KEY
9129 AESENCLAST KEY STATE
9130+ pax_force_retaddr_bts
9131 ret
9132
9133 /*
9134@@ -2067,6 +2081,7 @@ _aesni_enc4:
9135 AESENCLAST KEY STATE2
9136 AESENCLAST KEY STATE3
9137 AESENCLAST KEY STATE4
9138+ pax_force_retaddr_bts
9139 ret
9140
9141 /*
9142@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
9143 popl KLEN
9144 popl KEYP
9145 #endif
9146+ pax_force_retaddr 0, 1
9147 ret
9148+ENDPROC(aesni_dec)
9149
9150 /*
9151 * _aesni_dec1: internal ABI
9152@@ -2146,6 +2163,7 @@ _aesni_dec1:
9153 AESDEC KEY STATE
9154 movaps 0x70(TKEYP), KEY
9155 AESDECLAST KEY STATE
9156+ pax_force_retaddr_bts
9157 ret
9158
9159 /*
9160@@ -2254,6 +2272,7 @@ _aesni_dec4:
9161 AESDECLAST KEY STATE2
9162 AESDECLAST KEY STATE3
9163 AESDECLAST KEY STATE4
9164+ pax_force_retaddr_bts
9165 ret
9166
9167 /*
9168@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
9169 popl KEYP
9170 popl LEN
9171 #endif
9172+ pax_force_retaddr 0, 1
9173 ret
9174+ENDPROC(aesni_ecb_enc)
9175
9176 /*
9177 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
9178@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
9179 popl KEYP
9180 popl LEN
9181 #endif
9182+ pax_force_retaddr 0, 1
9183 ret
9184+ENDPROC(aesni_ecb_dec)
9185
9186 /*
9187 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
9188@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
9189 popl LEN
9190 popl IVP
9191 #endif
9192+ pax_force_retaddr 0, 1
9193 ret
9194+ENDPROC(aesni_cbc_enc)
9195
9196 /*
9197 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
9198@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
9199 popl LEN
9200 popl IVP
9201 #endif
9202+ pax_force_retaddr 0, 1
9203 ret
9204+ENDPROC(aesni_cbc_dec)
9205
9206 #ifdef __x86_64__
9207 .align 16
9208@@ -2526,6 +2553,7 @@ _aesni_inc_init:
9209 mov $1, TCTR_LOW
9210 MOVQ_R64_XMM TCTR_LOW INC
9211 MOVQ_R64_XMM CTR TCTR_LOW
9212+ pax_force_retaddr_bts
9213 ret
9214
9215 /*
9216@@ -2554,6 +2582,7 @@ _aesni_inc:
9217 .Linc_low:
9218 movaps CTR, IV
9219 PSHUFB_XMM BSWAP_MASK IV
9220+ pax_force_retaddr_bts
9221 ret
9222
9223 /*
9224@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
9225 .Lctr_enc_ret:
9226 movups IV, (IVP)
9227 .Lctr_enc_just_ret:
9228+ pax_force_retaddr 0, 1
9229 ret
9230+ENDPROC(aesni_ctr_enc)
9231 #endif
9232diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
9233index 391d245..67f35c2 100644
9234--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
9235+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
9236@@ -20,6 +20,8 @@
9237 *
9238 */
9239
9240+#include <asm/alternative-asm.h>
9241+
9242 .file "blowfish-x86_64-asm.S"
9243 .text
9244
9245@@ -151,9 +153,11 @@ __blowfish_enc_blk:
9246 jnz __enc_xor;
9247
9248 write_block();
9249+ pax_force_retaddr 0, 1
9250 ret;
9251 __enc_xor:
9252 xor_block();
9253+ pax_force_retaddr 0, 1
9254 ret;
9255
9256 .align 8
9257@@ -188,6 +192,7 @@ blowfish_dec_blk:
9258
9259 movq %r11, %rbp;
9260
9261+ pax_force_retaddr 0, 1
9262 ret;
9263
9264 /**********************************************************************
9265@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
9266
9267 popq %rbx;
9268 popq %rbp;
9269+ pax_force_retaddr 0, 1
9270 ret;
9271
9272 __enc_xor4:
9273@@ -349,6 +355,7 @@ __enc_xor4:
9274
9275 popq %rbx;
9276 popq %rbp;
9277+ pax_force_retaddr 0, 1
9278 ret;
9279
9280 .align 8
9281@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
9282 popq %rbx;
9283 popq %rbp;
9284
9285+ pax_force_retaddr 0, 1
9286 ret;
9287
9288diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
9289index 0b33743..7a56206 100644
9290--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
9291+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
9292@@ -20,6 +20,8 @@
9293 *
9294 */
9295
9296+#include <asm/alternative-asm.h>
9297+
9298 .file "camellia-x86_64-asm_64.S"
9299 .text
9300
9301@@ -229,12 +231,14 @@ __enc_done:
9302 enc_outunpack(mov, RT1);
9303
9304 movq RRBP, %rbp;
9305+ pax_force_retaddr 0, 1
9306 ret;
9307
9308 __enc_xor:
9309 enc_outunpack(xor, RT1);
9310
9311 movq RRBP, %rbp;
9312+ pax_force_retaddr 0, 1
9313 ret;
9314
9315 .global camellia_dec_blk;
9316@@ -275,6 +279,7 @@ __dec_rounds16:
9317 dec_outunpack();
9318
9319 movq RRBP, %rbp;
9320+ pax_force_retaddr 0, 1
9321 ret;
9322
9323 /**********************************************************************
9324@@ -468,6 +473,7 @@ __enc2_done:
9325
9326 movq RRBP, %rbp;
9327 popq %rbx;
9328+ pax_force_retaddr 0, 1
9329 ret;
9330
9331 __enc2_xor:
9332@@ -475,6 +481,7 @@ __enc2_xor:
9333
9334 movq RRBP, %rbp;
9335 popq %rbx;
9336+ pax_force_retaddr 0, 1
9337 ret;
9338
9339 .global camellia_dec_blk_2way;
9340@@ -517,4 +524,5 @@ __dec2_rounds16:
9341
9342 movq RRBP, %rbp;
9343 movq RXOR, %rbx;
9344+ pax_force_retaddr 0, 1
9345 ret;
9346diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9347index a41a3aa..bdf5753 100644
9348--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9349+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9350@@ -23,6 +23,8 @@
9351 *
9352 */
9353
9354+#include <asm/alternative-asm.h>
9355+
9356 .file "cast5-avx-x86_64-asm_64.S"
9357
9358 .extern cast5_s1
9359@@ -293,6 +295,7 @@ __skip_enc:
9360 leaq 3*(2*4*4)(%r11), %rax;
9361 outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9362
9363+ pax_force_retaddr 0, 1
9364 ret;
9365
9366 __enc_xor16:
9367@@ -303,6 +306,7 @@ __enc_xor16:
9368 leaq 3*(2*4*4)(%r11), %rax;
9369 outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9370
9371+ pax_force_retaddr 0, 1
9372 ret;
9373
9374 .align 16
9375@@ -369,6 +373,7 @@ __dec_tail:
9376 leaq 3*(2*4*4)(%r11), %rax;
9377 outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9378
9379+ pax_force_retaddr 0, 1
9380 ret;
9381
9382 __skip_dec:
9383diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9384index 218d283..819e6da 100644
9385--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9386+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9387@@ -23,6 +23,8 @@
9388 *
9389 */
9390
9391+#include <asm/alternative-asm.h>
9392+
9393 .file "cast6-avx-x86_64-asm_64.S"
9394
9395 .extern cast6_s1
9396@@ -324,12 +326,14 @@ __cast6_enc_blk_8way:
9397 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9398 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9399
9400+ pax_force_retaddr 0, 1
9401 ret;
9402
9403 __enc_xor8:
9404 outunpack_xor_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9405 outunpack_xor_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9406
9407+ pax_force_retaddr 0, 1
9408 ret;
9409
9410 .align 16
9411@@ -380,4 +384,5 @@ cast6_dec_blk_8way:
9412 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9413 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9414
9415+ pax_force_retaddr 0, 1
9416 ret;
9417diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
9418index 6214a9b..1f4fc9a 100644
9419--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
9420+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
9421@@ -1,3 +1,5 @@
9422+#include <asm/alternative-asm.h>
9423+
9424 # enter ECRYPT_encrypt_bytes
9425 .text
9426 .p2align 5
9427@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
9428 add %r11,%rsp
9429 mov %rdi,%rax
9430 mov %rsi,%rdx
9431+ pax_force_retaddr 0, 1
9432 ret
9433 # bytesatleast65:
9434 ._bytesatleast65:
9435@@ -891,6 +894,7 @@ ECRYPT_keysetup:
9436 add %r11,%rsp
9437 mov %rdi,%rax
9438 mov %rsi,%rdx
9439+ pax_force_retaddr
9440 ret
9441 # enter ECRYPT_ivsetup
9442 .text
9443@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
9444 add %r11,%rsp
9445 mov %rdi,%rax
9446 mov %rsi,%rdx
9447+ pax_force_retaddr
9448 ret
9449diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9450index 504106b..4e50951 100644
9451--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9452+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9453@@ -24,6 +24,8 @@
9454 *
9455 */
9456
9457+#include <asm/alternative-asm.h>
9458+
9459 .file "serpent-avx-x86_64-asm_64.S"
9460 .text
9461
9462@@ -638,12 +640,14 @@ __serpent_enc_blk_8way_avx:
9463 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9464 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9465
9466+ pax_force_retaddr
9467 ret;
9468
9469 __enc_xor8:
9470 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9471 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9472
9473+ pax_force_retaddr
9474 ret;
9475
9476 .align 8
9477@@ -701,4 +705,5 @@ serpent_dec_blk_8way_avx:
9478 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
9479 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
9480
9481+ pax_force_retaddr
9482 ret;
9483diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9484index 3ee1ff0..cbc568b 100644
9485--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9486+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9487@@ -24,6 +24,8 @@
9488 *
9489 */
9490
9491+#include <asm/alternative-asm.h>
9492+
9493 .file "serpent-sse2-x86_64-asm_64.S"
9494 .text
9495
9496@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
9497 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9498 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9499
9500+ pax_force_retaddr
9501 ret;
9502
9503 __enc_xor8:
9504 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9505 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9506
9507+ pax_force_retaddr
9508 ret;
9509
9510 .align 8
9511@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
9512 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
9513 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
9514
9515+ pax_force_retaddr
9516 ret;
9517diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
9518index 49d6987..df66bd4 100644
9519--- a/arch/x86/crypto/sha1_ssse3_asm.S
9520+++ b/arch/x86/crypto/sha1_ssse3_asm.S
9521@@ -28,6 +28,8 @@
9522 * (at your option) any later version.
9523 */
9524
9525+#include <asm/alternative-asm.h>
9526+
9527 #define CTX %rdi // arg1
9528 #define BUF %rsi // arg2
9529 #define CNT %rdx // arg3
9530@@ -104,6 +106,7 @@
9531 pop %r12
9532 pop %rbp
9533 pop %rbx
9534+ pax_force_retaddr 0, 1
9535 ret
9536
9537 .size \name, .-\name
9538diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9539index 1585abb..4a9af16 100644
9540--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9541+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9542@@ -23,6 +23,8 @@
9543 *
9544 */
9545
9546+#include <asm/alternative-asm.h>
9547+
9548 .file "twofish-avx-x86_64-asm_64.S"
9549 .text
9550
9551@@ -303,12 +305,14 @@ __twofish_enc_blk_8way:
9552 outunpack_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
9553 outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
9554
9555+ pax_force_retaddr
9556 ret;
9557
9558 __enc_xor8:
9559 outunpack_xor_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
9560 outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
9561
9562+ pax_force_retaddr
9563 ret;
9564
9565 .align 8
9566@@ -354,4 +358,5 @@ twofish_dec_blk_8way:
9567 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
9568 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
9569
9570+ pax_force_retaddr
9571 ret;
9572diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9573index 5b012a2..36d5364 100644
9574--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9575+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9576@@ -20,6 +20,8 @@
9577 *
9578 */
9579
9580+#include <asm/alternative-asm.h>
9581+
9582 .file "twofish-x86_64-asm-3way.S"
9583 .text
9584
9585@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
9586 popq %r13;
9587 popq %r14;
9588 popq %r15;
9589+ pax_force_retaddr 0, 1
9590 ret;
9591
9592 __enc_xor3:
9593@@ -271,6 +274,7 @@ __enc_xor3:
9594 popq %r13;
9595 popq %r14;
9596 popq %r15;
9597+ pax_force_retaddr 0, 1
9598 ret;
9599
9600 .global twofish_dec_blk_3way
9601@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
9602 popq %r13;
9603 popq %r14;
9604 popq %r15;
9605+ pax_force_retaddr 0, 1
9606 ret;
9607
9608diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
9609index 7bcf3fc..f53832f 100644
9610--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
9611+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
9612@@ -21,6 +21,7 @@
9613 .text
9614
9615 #include <asm/asm-offsets.h>
9616+#include <asm/alternative-asm.h>
9617
9618 #define a_offset 0
9619 #define b_offset 4
9620@@ -268,6 +269,7 @@ twofish_enc_blk:
9621
9622 popq R1
9623 movq $1,%rax
9624+ pax_force_retaddr 0, 1
9625 ret
9626
9627 twofish_dec_blk:
9628@@ -319,4 +321,5 @@ twofish_dec_blk:
9629
9630 popq R1
9631 movq $1,%rax
9632+ pax_force_retaddr 0, 1
9633 ret
9634diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
9635index 07b3a68..bd2a388 100644
9636--- a/arch/x86/ia32/ia32_aout.c
9637+++ b/arch/x86/ia32/ia32_aout.c
9638@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
9639 unsigned long dump_start, dump_size;
9640 struct user32 dump;
9641
9642+ memset(&dump, 0, sizeof(dump));
9643+
9644 fs = get_fs();
9645 set_fs(KERNEL_DS);
9646 has_dumped = 1;
9647diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
9648index efc6a95..95abfe2 100644
9649--- a/arch/x86/ia32/ia32_signal.c
9650+++ b/arch/x86/ia32/ia32_signal.c
9651@@ -163,8 +163,8 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
9652 }
9653 seg = get_fs();
9654 set_fs(KERNEL_DS);
9655- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
9656- (stack_t __force __user *) &uoss, regs->sp);
9657+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
9658+ (stack_t __force_user *) &uoss, regs->sp);
9659 set_fs(seg);
9660 if (ret >= 0 && uoss_ptr) {
9661 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
9662@@ -396,7 +396,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
9663 sp -= frame_size;
9664 /* Align the stack pointer according to the i386 ABI,
9665 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
9666- sp = ((sp + 4) & -16ul) - 4;
9667+ sp = ((sp - 12) & -16ul) - 4;
9668 return (void __user *) sp;
9669 }
9670
9671@@ -454,7 +454,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
9672 * These are actually not used anymore, but left because some
9673 * gdb versions depend on them as a marker.
9674 */
9675- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
9676+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
9677 } put_user_catch(err);
9678
9679 if (err)
9680@@ -496,7 +496,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9681 0xb8,
9682 __NR_ia32_rt_sigreturn,
9683 0x80cd,
9684- 0,
9685+ 0
9686 };
9687
9688 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
9689@@ -522,16 +522,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9690
9691 if (ka->sa.sa_flags & SA_RESTORER)
9692 restorer = ka->sa.sa_restorer;
9693+ else if (current->mm->context.vdso)
9694+ /* Return stub is in 32bit vsyscall page */
9695+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
9696 else
9697- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
9698- rt_sigreturn);
9699+ restorer = &frame->retcode;
9700 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
9701
9702 /*
9703 * Not actually used anymore, but left because some gdb
9704 * versions need it.
9705 */
9706- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
9707+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
9708 } put_user_catch(err);
9709
9710 err |= copy_siginfo_to_user32(&frame->info, info);
9711diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
9712index 076745f..ae8f6cb 100644
9713--- a/arch/x86/ia32/ia32entry.S
9714+++ b/arch/x86/ia32/ia32entry.S
9715@@ -15,8 +15,10 @@
9716 #include <asm/irqflags.h>
9717 #include <asm/asm.h>
9718 #include <asm/smap.h>
9719+#include <asm/pgtable.h>
9720 #include <linux/linkage.h>
9721 #include <linux/err.h>
9722+#include <asm/alternative-asm.h>
9723
9724 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
9725 #include <linux/elf-em.h>
9726@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
9727 ENDPROC(native_irq_enable_sysexit)
9728 #endif
9729
9730+ .macro pax_enter_kernel_user
9731+ pax_set_fptr_mask
9732+#ifdef CONFIG_PAX_MEMORY_UDEREF
9733+ call pax_enter_kernel_user
9734+#endif
9735+ .endm
9736+
9737+ .macro pax_exit_kernel_user
9738+#ifdef CONFIG_PAX_MEMORY_UDEREF
9739+ call pax_exit_kernel_user
9740+#endif
9741+#ifdef CONFIG_PAX_RANDKSTACK
9742+ pushq %rax
9743+ pushq %r11
9744+ call pax_randomize_kstack
9745+ popq %r11
9746+ popq %rax
9747+#endif
9748+ .endm
9749+
9750+.macro pax_erase_kstack
9751+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
9752+ call pax_erase_kstack
9753+#endif
9754+.endm
9755+
9756 /*
9757 * 32bit SYSENTER instruction entry.
9758 *
9759@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
9760 CFI_REGISTER rsp,rbp
9761 SWAPGS_UNSAFE_STACK
9762 movq PER_CPU_VAR(kernel_stack), %rsp
9763- addq $(KERNEL_STACK_OFFSET),%rsp
9764- /*
9765- * No need to follow this irqs on/off section: the syscall
9766- * disabled irqs, here we enable it straight after entry:
9767- */
9768- ENABLE_INTERRUPTS(CLBR_NONE)
9769 movl %ebp,%ebp /* zero extension */
9770 pushq_cfi $__USER32_DS
9771 /*CFI_REL_OFFSET ss,0*/
9772@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
9773 CFI_REL_OFFSET rsp,0
9774 pushfq_cfi
9775 /*CFI_REL_OFFSET rflags,0*/
9776- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
9777- CFI_REGISTER rip,r10
9778+ orl $X86_EFLAGS_IF,(%rsp)
9779+ GET_THREAD_INFO(%r11)
9780+ movl TI_sysenter_return(%r11), %r11d
9781+ CFI_REGISTER rip,r11
9782 pushq_cfi $__USER32_CS
9783 /*CFI_REL_OFFSET cs,0*/
9784 movl %eax, %eax
9785- pushq_cfi %r10
9786+ pushq_cfi %r11
9787 CFI_REL_OFFSET rip,0
9788 pushq_cfi %rax
9789 cld
9790 SAVE_ARGS 0,1,0
9791+ pax_enter_kernel_user
9792+
9793+#ifdef CONFIG_PAX_RANDKSTACK
9794+ pax_erase_kstack
9795+#endif
9796+
9797+ /*
9798+ * No need to follow this irqs on/off section: the syscall
9799+ * disabled irqs, here we enable it straight after entry:
9800+ */
9801+ ENABLE_INTERRUPTS(CLBR_NONE)
9802 /* no need to do an access_ok check here because rbp has been
9803 32bit zero extended */
9804+
9805+#ifdef CONFIG_PAX_MEMORY_UDEREF
9806+ mov $PAX_USER_SHADOW_BASE,%r11
9807+ add %r11,%rbp
9808+#endif
9809+
9810 ASM_STAC
9811 1: movl (%rbp),%ebp
9812 _ASM_EXTABLE(1b,ia32_badarg)
9813 ASM_CLAC
9814- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9815- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9816+ GET_THREAD_INFO(%r11)
9817+ orl $TS_COMPAT,TI_status(%r11)
9818+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9819 CFI_REMEMBER_STATE
9820 jnz sysenter_tracesys
9821 cmpq $(IA32_NR_syscalls-1),%rax
9822@@ -162,12 +204,15 @@ sysenter_do_call:
9823 sysenter_dispatch:
9824 call *ia32_sys_call_table(,%rax,8)
9825 movq %rax,RAX-ARGOFFSET(%rsp)
9826+ GET_THREAD_INFO(%r11)
9827 DISABLE_INTERRUPTS(CLBR_NONE)
9828 TRACE_IRQS_OFF
9829- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9830+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9831 jnz sysexit_audit
9832 sysexit_from_sys_call:
9833- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9834+ pax_exit_kernel_user
9835+ pax_erase_kstack
9836+ andl $~TS_COMPAT,TI_status(%r11)
9837 /* clear IF, that popfq doesn't enable interrupts early */
9838 andl $~0x200,EFLAGS-R11(%rsp)
9839 movl RIP-R11(%rsp),%edx /* User %eip */
9840@@ -193,6 +238,9 @@ sysexit_from_sys_call:
9841 movl %eax,%esi /* 2nd arg: syscall number */
9842 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
9843 call __audit_syscall_entry
9844+
9845+ pax_erase_kstack
9846+
9847 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
9848 cmpq $(IA32_NR_syscalls-1),%rax
9849 ja ia32_badsys
9850@@ -204,7 +252,7 @@ sysexit_from_sys_call:
9851 .endm
9852
9853 .macro auditsys_exit exit
9854- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9855+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9856 jnz ia32_ret_from_sys_call
9857 TRACE_IRQS_ON
9858 sti
9859@@ -215,11 +263,12 @@ sysexit_from_sys_call:
9860 1: setbe %al /* 1 if error, 0 if not */
9861 movzbl %al,%edi /* zero-extend that into %edi */
9862 call __audit_syscall_exit
9863+ GET_THREAD_INFO(%r11)
9864 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
9865 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
9866 cli
9867 TRACE_IRQS_OFF
9868- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9869+ testl %edi,TI_flags(%r11)
9870 jz \exit
9871 CLEAR_RREGS -ARGOFFSET
9872 jmp int_with_check
9873@@ -237,7 +286,7 @@ sysexit_audit:
9874
9875 sysenter_tracesys:
9876 #ifdef CONFIG_AUDITSYSCALL
9877- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9878+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9879 jz sysenter_auditsys
9880 #endif
9881 SAVE_REST
9882@@ -249,6 +298,9 @@ sysenter_tracesys:
9883 RESTORE_REST
9884 cmpq $(IA32_NR_syscalls-1),%rax
9885 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
9886+
9887+ pax_erase_kstack
9888+
9889 jmp sysenter_do_call
9890 CFI_ENDPROC
9891 ENDPROC(ia32_sysenter_target)
9892@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
9893 ENTRY(ia32_cstar_target)
9894 CFI_STARTPROC32 simple
9895 CFI_SIGNAL_FRAME
9896- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
9897+ CFI_DEF_CFA rsp,0
9898 CFI_REGISTER rip,rcx
9899 /*CFI_REGISTER rflags,r11*/
9900 SWAPGS_UNSAFE_STACK
9901 movl %esp,%r8d
9902 CFI_REGISTER rsp,r8
9903 movq PER_CPU_VAR(kernel_stack),%rsp
9904+ SAVE_ARGS 8*6,0,0
9905+ pax_enter_kernel_user
9906+
9907+#ifdef CONFIG_PAX_RANDKSTACK
9908+ pax_erase_kstack
9909+#endif
9910+
9911 /*
9912 * No need to follow this irqs on/off section: the syscall
9913 * disabled irqs and here we enable it straight after entry:
9914 */
9915 ENABLE_INTERRUPTS(CLBR_NONE)
9916- SAVE_ARGS 8,0,0
9917 movl %eax,%eax /* zero extension */
9918 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
9919 movq %rcx,RIP-ARGOFFSET(%rsp)
9920@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
9921 /* no need to do an access_ok check here because r8 has been
9922 32bit zero extended */
9923 /* hardware stack frame is complete now */
9924+
9925+#ifdef CONFIG_PAX_MEMORY_UDEREF
9926+ mov $PAX_USER_SHADOW_BASE,%r11
9927+ add %r11,%r8
9928+#endif
9929+
9930 ASM_STAC
9931 1: movl (%r8),%r9d
9932 _ASM_EXTABLE(1b,ia32_badarg)
9933 ASM_CLAC
9934- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9935- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9936+ GET_THREAD_INFO(%r11)
9937+ orl $TS_COMPAT,TI_status(%r11)
9938+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9939 CFI_REMEMBER_STATE
9940 jnz cstar_tracesys
9941 cmpq $IA32_NR_syscalls-1,%rax
9942@@ -319,12 +384,15 @@ cstar_do_call:
9943 cstar_dispatch:
9944 call *ia32_sys_call_table(,%rax,8)
9945 movq %rax,RAX-ARGOFFSET(%rsp)
9946+ GET_THREAD_INFO(%r11)
9947 DISABLE_INTERRUPTS(CLBR_NONE)
9948 TRACE_IRQS_OFF
9949- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9950+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9951 jnz sysretl_audit
9952 sysretl_from_sys_call:
9953- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9954+ pax_exit_kernel_user
9955+ pax_erase_kstack
9956+ andl $~TS_COMPAT,TI_status(%r11)
9957 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
9958 movl RIP-ARGOFFSET(%rsp),%ecx
9959 CFI_REGISTER rip,rcx
9960@@ -352,7 +420,7 @@ sysretl_audit:
9961
9962 cstar_tracesys:
9963 #ifdef CONFIG_AUDITSYSCALL
9964- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9965+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9966 jz cstar_auditsys
9967 #endif
9968 xchgl %r9d,%ebp
9969@@ -366,6 +434,9 @@ cstar_tracesys:
9970 xchgl %ebp,%r9d
9971 cmpq $(IA32_NR_syscalls-1),%rax
9972 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
9973+
9974+ pax_erase_kstack
9975+
9976 jmp cstar_do_call
9977 END(ia32_cstar_target)
9978
9979@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
9980 CFI_REL_OFFSET rip,RIP-RIP
9981 PARAVIRT_ADJUST_EXCEPTION_FRAME
9982 SWAPGS
9983- /*
9984- * No need to follow this irqs on/off section: the syscall
9985- * disabled irqs and here we enable it straight after entry:
9986- */
9987- ENABLE_INTERRUPTS(CLBR_NONE)
9988 movl %eax,%eax
9989 pushq_cfi %rax
9990 cld
9991 /* note the registers are not zero extended to the sf.
9992 this could be a problem. */
9993 SAVE_ARGS 0,1,0
9994- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9995- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9996+ pax_enter_kernel_user
9997+
9998+#ifdef CONFIG_PAX_RANDKSTACK
9999+ pax_erase_kstack
10000+#endif
10001+
10002+ /*
10003+ * No need to follow this irqs on/off section: the syscall
10004+ * disabled irqs and here we enable it straight after entry:
10005+ */
10006+ ENABLE_INTERRUPTS(CLBR_NONE)
10007+ GET_THREAD_INFO(%r11)
10008+ orl $TS_COMPAT,TI_status(%r11)
10009+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
10010 jnz ia32_tracesys
10011 cmpq $(IA32_NR_syscalls-1),%rax
10012 ja ia32_badsys
10013@@ -442,6 +520,9 @@ ia32_tracesys:
10014 RESTORE_REST
10015 cmpq $(IA32_NR_syscalls-1),%rax
10016 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
10017+
10018+ pax_erase_kstack
10019+
10020 jmp ia32_do_call
10021 END(ia32_syscall)
10022
10023diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
10024index 86d68d1..f9960fe 100644
10025--- a/arch/x86/ia32/sys_ia32.c
10026+++ b/arch/x86/ia32/sys_ia32.c
10027@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
10028 */
10029 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
10030 {
10031- typeof(ubuf->st_uid) uid = 0;
10032- typeof(ubuf->st_gid) gid = 0;
10033+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
10034+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
10035 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
10036 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
10037 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
10038@@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
10039 mm_segment_t old_fs = get_fs();
10040
10041 set_fs(KERNEL_DS);
10042- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
10043+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
10044 set_fs(old_fs);
10045 if (put_compat_timespec(&t, interval))
10046 return -EFAULT;
10047@@ -319,7 +319,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
10048 mm_segment_t old_fs = get_fs();
10049
10050 set_fs(KERNEL_DS);
10051- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
10052+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
10053 set_fs(old_fs);
10054 if (!ret) {
10055 switch (_NSIG_WORDS) {
10056@@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
10057 if (copy_siginfo_from_user32(&info, uinfo))
10058 return -EFAULT;
10059 set_fs(KERNEL_DS);
10060- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
10061+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
10062 set_fs(old_fs);
10063 return ret;
10064 }
10065@@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
10066 return -EFAULT;
10067
10068 set_fs(KERNEL_DS);
10069- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
10070+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
10071 count);
10072 set_fs(old_fs);
10073
10074diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
10075index 372231c..a5aa1a1 100644
10076--- a/arch/x86/include/asm/alternative-asm.h
10077+++ b/arch/x86/include/asm/alternative-asm.h
10078@@ -18,6 +18,45 @@
10079 .endm
10080 #endif
10081
10082+#ifdef KERNEXEC_PLUGIN
10083+ .macro pax_force_retaddr_bts rip=0
10084+ btsq $63,\rip(%rsp)
10085+ .endm
10086+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10087+ .macro pax_force_retaddr rip=0, reload=0
10088+ btsq $63,\rip(%rsp)
10089+ .endm
10090+ .macro pax_force_fptr ptr
10091+ btsq $63,\ptr
10092+ .endm
10093+ .macro pax_set_fptr_mask
10094+ .endm
10095+#endif
10096+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
10097+ .macro pax_force_retaddr rip=0, reload=0
10098+ .if \reload
10099+ pax_set_fptr_mask
10100+ .endif
10101+ orq %r10,\rip(%rsp)
10102+ .endm
10103+ .macro pax_force_fptr ptr
10104+ orq %r10,\ptr
10105+ .endm
10106+ .macro pax_set_fptr_mask
10107+ movabs $0x8000000000000000,%r10
10108+ .endm
10109+#endif
10110+#else
10111+ .macro pax_force_retaddr rip=0, reload=0
10112+ .endm
10113+ .macro pax_force_fptr ptr
10114+ .endm
10115+ .macro pax_force_retaddr_bts rip=0
10116+ .endm
10117+ .macro pax_set_fptr_mask
10118+ .endm
10119+#endif
10120+
10121 .macro altinstruction_entry orig alt feature orig_len alt_len
10122 .long \orig - .
10123 .long \alt - .
10124diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
10125index 58ed6d9..f1cbe58 100644
10126--- a/arch/x86/include/asm/alternative.h
10127+++ b/arch/x86/include/asm/alternative.h
10128@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
10129 ".pushsection .discard,\"aw\",@progbits\n" \
10130 DISCARD_ENTRY(1) \
10131 ".popsection\n" \
10132- ".pushsection .altinstr_replacement, \"ax\"\n" \
10133+ ".pushsection .altinstr_replacement, \"a\"\n" \
10134 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
10135 ".popsection"
10136
10137@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
10138 DISCARD_ENTRY(1) \
10139 DISCARD_ENTRY(2) \
10140 ".popsection\n" \
10141- ".pushsection .altinstr_replacement, \"ax\"\n" \
10142+ ".pushsection .altinstr_replacement, \"a\"\n" \
10143 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
10144 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
10145 ".popsection"
10146diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
10147index 3388034..ba52312 100644
10148--- a/arch/x86/include/asm/apic.h
10149+++ b/arch/x86/include/asm/apic.h
10150@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
10151
10152 #ifdef CONFIG_X86_LOCAL_APIC
10153
10154-extern unsigned int apic_verbosity;
10155+extern int apic_verbosity;
10156 extern int local_apic_timer_c2_ok;
10157
10158 extern int disable_apic;
10159@@ -391,7 +391,7 @@ struct apic {
10160 */
10161 int (*x86_32_numa_cpu_node)(int cpu);
10162 #endif
10163-};
10164+} __do_const;
10165
10166 /*
10167 * Pointer to the local APIC driver in use on this system (there's
10168diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
10169index 20370c6..a2eb9b0 100644
10170--- a/arch/x86/include/asm/apm.h
10171+++ b/arch/x86/include/asm/apm.h
10172@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
10173 __asm__ __volatile__(APM_DO_ZERO_SEGS
10174 "pushl %%edi\n\t"
10175 "pushl %%ebp\n\t"
10176- "lcall *%%cs:apm_bios_entry\n\t"
10177+ "lcall *%%ss:apm_bios_entry\n\t"
10178 "setc %%al\n\t"
10179 "popl %%ebp\n\t"
10180 "popl %%edi\n\t"
10181@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
10182 __asm__ __volatile__(APM_DO_ZERO_SEGS
10183 "pushl %%edi\n\t"
10184 "pushl %%ebp\n\t"
10185- "lcall *%%cs:apm_bios_entry\n\t"
10186+ "lcall *%%ss:apm_bios_entry\n\t"
10187 "setc %%bl\n\t"
10188 "popl %%ebp\n\t"
10189 "popl %%edi\n\t"
10190diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
10191index b6c3b82..b4c077a 100644
10192--- a/arch/x86/include/asm/atomic.h
10193+++ b/arch/x86/include/asm/atomic.h
10194@@ -22,7 +22,18 @@
10195 */
10196 static inline int atomic_read(const atomic_t *v)
10197 {
10198- return (*(volatile int *)&(v)->counter);
10199+ return (*(volatile const int *)&(v)->counter);
10200+}
10201+
10202+/**
10203+ * atomic_read_unchecked - read atomic variable
10204+ * @v: pointer of type atomic_unchecked_t
10205+ *
10206+ * Atomically reads the value of @v.
10207+ */
10208+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
10209+{
10210+ return (*(volatile const int *)&(v)->counter);
10211 }
10212
10213 /**
10214@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
10215 }
10216
10217 /**
10218+ * atomic_set_unchecked - set atomic variable
10219+ * @v: pointer of type atomic_unchecked_t
10220+ * @i: required value
10221+ *
10222+ * Atomically sets the value of @v to @i.
10223+ */
10224+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
10225+{
10226+ v->counter = i;
10227+}
10228+
10229+/**
10230 * atomic_add - add integer to atomic variable
10231 * @i: integer value to add
10232 * @v: pointer of type atomic_t
10233@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
10234 */
10235 static inline void atomic_add(int i, atomic_t *v)
10236 {
10237- asm volatile(LOCK_PREFIX "addl %1,%0"
10238+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10239+
10240+#ifdef CONFIG_PAX_REFCOUNT
10241+ "jno 0f\n"
10242+ LOCK_PREFIX "subl %1,%0\n"
10243+ "int $4\n0:\n"
10244+ _ASM_EXTABLE(0b, 0b)
10245+#endif
10246+
10247+ : "+m" (v->counter)
10248+ : "ir" (i));
10249+}
10250+
10251+/**
10252+ * atomic_add_unchecked - add integer to atomic variable
10253+ * @i: integer value to add
10254+ * @v: pointer of type atomic_unchecked_t
10255+ *
10256+ * Atomically adds @i to @v.
10257+ */
10258+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
10259+{
10260+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10261 : "+m" (v->counter)
10262 : "ir" (i));
10263 }
10264@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
10265 */
10266 static inline void atomic_sub(int i, atomic_t *v)
10267 {
10268- asm volatile(LOCK_PREFIX "subl %1,%0"
10269+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10270+
10271+#ifdef CONFIG_PAX_REFCOUNT
10272+ "jno 0f\n"
10273+ LOCK_PREFIX "addl %1,%0\n"
10274+ "int $4\n0:\n"
10275+ _ASM_EXTABLE(0b, 0b)
10276+#endif
10277+
10278+ : "+m" (v->counter)
10279+ : "ir" (i));
10280+}
10281+
10282+/**
10283+ * atomic_sub_unchecked - subtract integer from atomic variable
10284+ * @i: integer value to subtract
10285+ * @v: pointer of type atomic_unchecked_t
10286+ *
10287+ * Atomically subtracts @i from @v.
10288+ */
10289+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
10290+{
10291+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10292 : "+m" (v->counter)
10293 : "ir" (i));
10294 }
10295@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10296 {
10297 unsigned char c;
10298
10299- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
10300+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
10301+
10302+#ifdef CONFIG_PAX_REFCOUNT
10303+ "jno 0f\n"
10304+ LOCK_PREFIX "addl %2,%0\n"
10305+ "int $4\n0:\n"
10306+ _ASM_EXTABLE(0b, 0b)
10307+#endif
10308+
10309+ "sete %1\n"
10310 : "+m" (v->counter), "=qm" (c)
10311 : "ir" (i) : "memory");
10312 return c;
10313@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10314 */
10315 static inline void atomic_inc(atomic_t *v)
10316 {
10317- asm volatile(LOCK_PREFIX "incl %0"
10318+ asm volatile(LOCK_PREFIX "incl %0\n"
10319+
10320+#ifdef CONFIG_PAX_REFCOUNT
10321+ "jno 0f\n"
10322+ LOCK_PREFIX "decl %0\n"
10323+ "int $4\n0:\n"
10324+ _ASM_EXTABLE(0b, 0b)
10325+#endif
10326+
10327+ : "+m" (v->counter));
10328+}
10329+
10330+/**
10331+ * atomic_inc_unchecked - increment atomic variable
10332+ * @v: pointer of type atomic_unchecked_t
10333+ *
10334+ * Atomically increments @v by 1.
10335+ */
10336+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10337+{
10338+ asm volatile(LOCK_PREFIX "incl %0\n"
10339 : "+m" (v->counter));
10340 }
10341
10342@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
10343 */
10344 static inline void atomic_dec(atomic_t *v)
10345 {
10346- asm volatile(LOCK_PREFIX "decl %0"
10347+ asm volatile(LOCK_PREFIX "decl %0\n"
10348+
10349+#ifdef CONFIG_PAX_REFCOUNT
10350+ "jno 0f\n"
10351+ LOCK_PREFIX "incl %0\n"
10352+ "int $4\n0:\n"
10353+ _ASM_EXTABLE(0b, 0b)
10354+#endif
10355+
10356+ : "+m" (v->counter));
10357+}
10358+
10359+/**
10360+ * atomic_dec_unchecked - decrement atomic variable
10361+ * @v: pointer of type atomic_unchecked_t
10362+ *
10363+ * Atomically decrements @v by 1.
10364+ */
10365+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10366+{
10367+ asm volatile(LOCK_PREFIX "decl %0\n"
10368 : "+m" (v->counter));
10369 }
10370
10371@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
10372 {
10373 unsigned char c;
10374
10375- asm volatile(LOCK_PREFIX "decl %0; sete %1"
10376+ asm volatile(LOCK_PREFIX "decl %0\n"
10377+
10378+#ifdef CONFIG_PAX_REFCOUNT
10379+ "jno 0f\n"
10380+ LOCK_PREFIX "incl %0\n"
10381+ "int $4\n0:\n"
10382+ _ASM_EXTABLE(0b, 0b)
10383+#endif
10384+
10385+ "sete %1\n"
10386 : "+m" (v->counter), "=qm" (c)
10387 : : "memory");
10388 return c != 0;
10389@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
10390 {
10391 unsigned char c;
10392
10393- asm volatile(LOCK_PREFIX "incl %0; sete %1"
10394+ asm volatile(LOCK_PREFIX "incl %0\n"
10395+
10396+#ifdef CONFIG_PAX_REFCOUNT
10397+ "jno 0f\n"
10398+ LOCK_PREFIX "decl %0\n"
10399+ "int $4\n0:\n"
10400+ _ASM_EXTABLE(0b, 0b)
10401+#endif
10402+
10403+ "sete %1\n"
10404+ : "+m" (v->counter), "=qm" (c)
10405+ : : "memory");
10406+ return c != 0;
10407+}
10408+
10409+/**
10410+ * atomic_inc_and_test_unchecked - increment and test
10411+ * @v: pointer of type atomic_unchecked_t
10412+ *
10413+ * Atomically increments @v by 1
10414+ * and returns true if the result is zero, or false for all
10415+ * other cases.
10416+ */
10417+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10418+{
10419+ unsigned char c;
10420+
10421+ asm volatile(LOCK_PREFIX "incl %0\n"
10422+ "sete %1\n"
10423 : "+m" (v->counter), "=qm" (c)
10424 : : "memory");
10425 return c != 0;
10426@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10427 {
10428 unsigned char c;
10429
10430- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
10431+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
10432+
10433+#ifdef CONFIG_PAX_REFCOUNT
10434+ "jno 0f\n"
10435+ LOCK_PREFIX "subl %2,%0\n"
10436+ "int $4\n0:\n"
10437+ _ASM_EXTABLE(0b, 0b)
10438+#endif
10439+
10440+ "sets %1\n"
10441 : "+m" (v->counter), "=qm" (c)
10442 : "ir" (i) : "memory");
10443 return c;
10444@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
10445 goto no_xadd;
10446 #endif
10447 /* Modern 486+ processor */
10448- return i + xadd(&v->counter, i);
10449+ return i + xadd_check_overflow(&v->counter, i);
10450
10451 #ifdef CONFIG_M386
10452 no_xadd: /* Legacy 386 processor */
10453@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
10454 }
10455
10456 /**
10457+ * atomic_add_return_unchecked - add integer and return
10458+ * @i: integer value to add
10459+ * @v: pointer of type atomic_unchecked_t
10460+ *
10461+ * Atomically adds @i to @v and returns @i + @v
10462+ */
10463+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10464+{
10465+#ifdef CONFIG_M386
10466+ int __i;
10467+ unsigned long flags;
10468+ if (unlikely(boot_cpu_data.x86 <= 3))
10469+ goto no_xadd;
10470+#endif
10471+ /* Modern 486+ processor */
10472+ return i + xadd(&v->counter, i);
10473+
10474+#ifdef CONFIG_M386
10475+no_xadd: /* Legacy 386 processor */
10476+ raw_local_irq_save(flags);
10477+ __i = atomic_read_unchecked(v);
10478+ atomic_set_unchecked(v, i + __i);
10479+ raw_local_irq_restore(flags);
10480+ return i + __i;
10481+#endif
10482+}
10483+
10484+/**
10485 * atomic_sub_return - subtract integer and return
10486 * @v: pointer of type atomic_t
10487 * @i: integer value to subtract
10488@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
10489 }
10490
10491 #define atomic_inc_return(v) (atomic_add_return(1, v))
10492+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10493+{
10494+ return atomic_add_return_unchecked(1, v);
10495+}
10496 #define atomic_dec_return(v) (atomic_sub_return(1, v))
10497
10498 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
10499@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
10500 return cmpxchg(&v->counter, old, new);
10501 }
10502
10503+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10504+{
10505+ return cmpxchg(&v->counter, old, new);
10506+}
10507+
10508 static inline int atomic_xchg(atomic_t *v, int new)
10509 {
10510 return xchg(&v->counter, new);
10511 }
10512
10513+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10514+{
10515+ return xchg(&v->counter, new);
10516+}
10517+
10518 /**
10519 * __atomic_add_unless - add unless the number is already a given value
10520 * @v: pointer of type atomic_t
10521@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
10522 */
10523 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10524 {
10525- int c, old;
10526+ int c, old, new;
10527 c = atomic_read(v);
10528 for (;;) {
10529- if (unlikely(c == (u)))
10530+ if (unlikely(c == u))
10531 break;
10532- old = atomic_cmpxchg((v), c, c + (a));
10533+
10534+ asm volatile("addl %2,%0\n"
10535+
10536+#ifdef CONFIG_PAX_REFCOUNT
10537+ "jno 0f\n"
10538+ "subl %2,%0\n"
10539+ "int $4\n0:\n"
10540+ _ASM_EXTABLE(0b, 0b)
10541+#endif
10542+
10543+ : "=r" (new)
10544+ : "0" (c), "ir" (a));
10545+
10546+ old = atomic_cmpxchg(v, c, new);
10547 if (likely(old == c))
10548 break;
10549 c = old;
10550@@ -241,6 +458,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10551 }
10552
10553 /**
10554+ * atomic_inc_not_zero_hint - increment if not null
10555+ * @v: pointer of type atomic_t
10556+ * @hint: probable value of the atomic before the increment
10557+ *
10558+ * This version of atomic_inc_not_zero() gives a hint of probable
10559+ * value of the atomic. This helps processor to not read the memory
10560+ * before doing the atomic read/modify/write cycle, lowering
10561+ * number of bus transactions on some arches.
10562+ *
10563+ * Returns: 0 if increment was not done, 1 otherwise.
10564+ */
10565+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
10566+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
10567+{
10568+ int val, c = hint, new;
10569+
10570+ /* sanity test, should be removed by compiler if hint is a constant */
10571+ if (!hint)
10572+ return __atomic_add_unless(v, 1, 0);
10573+
10574+ do {
10575+ asm volatile("incl %0\n"
10576+
10577+#ifdef CONFIG_PAX_REFCOUNT
10578+ "jno 0f\n"
10579+ "decl %0\n"
10580+ "int $4\n0:\n"
10581+ _ASM_EXTABLE(0b, 0b)
10582+#endif
10583+
10584+ : "=r" (new)
10585+ : "0" (c));
10586+
10587+ val = atomic_cmpxchg(v, c, new);
10588+ if (val == c)
10589+ return 1;
10590+ c = val;
10591+ } while (c);
10592+
10593+ return 0;
10594+}
10595+
10596+/**
10597 * atomic_inc_short - increment of a short integer
10598 * @v: pointer to type int
10599 *
10600@@ -269,14 +529,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
10601 #endif
10602
10603 /* These are x86-specific, used by some header files */
10604-#define atomic_clear_mask(mask, addr) \
10605- asm volatile(LOCK_PREFIX "andl %0,%1" \
10606- : : "r" (~(mask)), "m" (*(addr)) : "memory")
10607+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
10608+{
10609+ asm volatile(LOCK_PREFIX "andl %1,%0"
10610+ : "+m" (v->counter)
10611+ : "r" (~(mask))
10612+ : "memory");
10613+}
10614
10615-#define atomic_set_mask(mask, addr) \
10616- asm volatile(LOCK_PREFIX "orl %0,%1" \
10617- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
10618- : "memory")
10619+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
10620+{
10621+ asm volatile(LOCK_PREFIX "andl %1,%0"
10622+ : "+m" (v->counter)
10623+ : "r" (~(mask))
10624+ : "memory");
10625+}
10626+
10627+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
10628+{
10629+ asm volatile(LOCK_PREFIX "orl %1,%0"
10630+ : "+m" (v->counter)
10631+ : "r" (mask)
10632+ : "memory");
10633+}
10634+
10635+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
10636+{
10637+ asm volatile(LOCK_PREFIX "orl %1,%0"
10638+ : "+m" (v->counter)
10639+ : "r" (mask)
10640+ : "memory");
10641+}
10642
10643 /* Atomic operations are already serializing on x86 */
10644 #define smp_mb__before_atomic_dec() barrier()
10645diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
10646index b154de7..aadebd8 100644
10647--- a/arch/x86/include/asm/atomic64_32.h
10648+++ b/arch/x86/include/asm/atomic64_32.h
10649@@ -12,6 +12,14 @@ typedef struct {
10650 u64 __aligned(8) counter;
10651 } atomic64_t;
10652
10653+#ifdef CONFIG_PAX_REFCOUNT
10654+typedef struct {
10655+ u64 __aligned(8) counter;
10656+} atomic64_unchecked_t;
10657+#else
10658+typedef atomic64_t atomic64_unchecked_t;
10659+#endif
10660+
10661 #define ATOMIC64_INIT(val) { (val) }
10662
10663 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
10664@@ -37,21 +45,31 @@ typedef struct {
10665 ATOMIC64_DECL_ONE(sym##_386)
10666
10667 ATOMIC64_DECL_ONE(add_386);
10668+ATOMIC64_DECL_ONE(add_unchecked_386);
10669 ATOMIC64_DECL_ONE(sub_386);
10670+ATOMIC64_DECL_ONE(sub_unchecked_386);
10671 ATOMIC64_DECL_ONE(inc_386);
10672+ATOMIC64_DECL_ONE(inc_unchecked_386);
10673 ATOMIC64_DECL_ONE(dec_386);
10674+ATOMIC64_DECL_ONE(dec_unchecked_386);
10675 #endif
10676
10677 #define alternative_atomic64(f, out, in...) \
10678 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
10679
10680 ATOMIC64_DECL(read);
10681+ATOMIC64_DECL(read_unchecked);
10682 ATOMIC64_DECL(set);
10683+ATOMIC64_DECL(set_unchecked);
10684 ATOMIC64_DECL(xchg);
10685 ATOMIC64_DECL(add_return);
10686+ATOMIC64_DECL(add_return_unchecked);
10687 ATOMIC64_DECL(sub_return);
10688+ATOMIC64_DECL(sub_return_unchecked);
10689 ATOMIC64_DECL(inc_return);
10690+ATOMIC64_DECL(inc_return_unchecked);
10691 ATOMIC64_DECL(dec_return);
10692+ATOMIC64_DECL(dec_return_unchecked);
10693 ATOMIC64_DECL(dec_if_positive);
10694 ATOMIC64_DECL(inc_not_zero);
10695 ATOMIC64_DECL(add_unless);
10696@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
10697 }
10698
10699 /**
10700+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
10701+ * @p: pointer to type atomic64_unchecked_t
10702+ * @o: expected value
10703+ * @n: new value
10704+ *
10705+ * Atomically sets @v to @n if it was equal to @o and returns
10706+ * the old value.
10707+ */
10708+
10709+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
10710+{
10711+ return cmpxchg64(&v->counter, o, n);
10712+}
10713+
10714+/**
10715 * atomic64_xchg - xchg atomic64 variable
10716 * @v: pointer to type atomic64_t
10717 * @n: value to assign
10718@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
10719 }
10720
10721 /**
10722+ * atomic64_set_unchecked - set atomic64 variable
10723+ * @v: pointer to type atomic64_unchecked_t
10724+ * @n: value to assign
10725+ *
10726+ * Atomically sets the value of @v to @n.
10727+ */
10728+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
10729+{
10730+ unsigned high = (unsigned)(i >> 32);
10731+ unsigned low = (unsigned)i;
10732+ alternative_atomic64(set, /* no output */,
10733+ "S" (v), "b" (low), "c" (high)
10734+ : "eax", "edx", "memory");
10735+}
10736+
10737+/**
10738 * atomic64_read - read atomic64 variable
10739 * @v: pointer to type atomic64_t
10740 *
10741@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
10742 }
10743
10744 /**
10745+ * atomic64_read_unchecked - read atomic64 variable
10746+ * @v: pointer to type atomic64_unchecked_t
10747+ *
10748+ * Atomically reads the value of @v and returns it.
10749+ */
10750+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
10751+{
10752+ long long r;
10753+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
10754+ return r;
10755+ }
10756+
10757+/**
10758 * atomic64_add_return - add and return
10759 * @i: integer value to add
10760 * @v: pointer to type atomic64_t
10761@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
10762 return i;
10763 }
10764
10765+/**
10766+ * atomic64_add_return_unchecked - add and return
10767+ * @i: integer value to add
10768+ * @v: pointer to type atomic64_unchecked_t
10769+ *
10770+ * Atomically adds @i to @v and returns @i + *@v
10771+ */
10772+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
10773+{
10774+ alternative_atomic64(add_return_unchecked,
10775+ ASM_OUTPUT2("+A" (i), "+c" (v)),
10776+ ASM_NO_INPUT_CLOBBER("memory"));
10777+ return i;
10778+}
10779+
10780 /*
10781 * Other variants with different arithmetic operators:
10782 */
10783@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
10784 return a;
10785 }
10786
10787+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10788+{
10789+ long long a;
10790+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
10791+ "S" (v) : "memory", "ecx");
10792+ return a;
10793+}
10794+
10795 static inline long long atomic64_dec_return(atomic64_t *v)
10796 {
10797 long long a;
10798@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
10799 }
10800
10801 /**
10802+ * atomic64_add_unchecked - add integer to atomic64 variable
10803+ * @i: integer value to add
10804+ * @v: pointer to type atomic64_unchecked_t
10805+ *
10806+ * Atomically adds @i to @v.
10807+ */
10808+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
10809+{
10810+ __alternative_atomic64(add_unchecked, add_return_unchecked,
10811+ ASM_OUTPUT2("+A" (i), "+c" (v)),
10812+ ASM_NO_INPUT_CLOBBER("memory"));
10813+ return i;
10814+}
10815+
10816+/**
10817 * atomic64_sub - subtract the atomic64 variable
10818 * @i: integer value to subtract
10819 * @v: pointer to type atomic64_t
10820diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
10821index 0e1cbfc..5623683 100644
10822--- a/arch/x86/include/asm/atomic64_64.h
10823+++ b/arch/x86/include/asm/atomic64_64.h
10824@@ -18,7 +18,19 @@
10825 */
10826 static inline long atomic64_read(const atomic64_t *v)
10827 {
10828- return (*(volatile long *)&(v)->counter);
10829+ return (*(volatile const long *)&(v)->counter);
10830+}
10831+
10832+/**
10833+ * atomic64_read_unchecked - read atomic64 variable
10834+ * @v: pointer of type atomic64_unchecked_t
10835+ *
10836+ * Atomically reads the value of @v.
10837+ * Doesn't imply a read memory barrier.
10838+ */
10839+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10840+{
10841+ return (*(volatile const long *)&(v)->counter);
10842 }
10843
10844 /**
10845@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
10846 }
10847
10848 /**
10849+ * atomic64_set_unchecked - set atomic64 variable
10850+ * @v: pointer to type atomic64_unchecked_t
10851+ * @i: required value
10852+ *
10853+ * Atomically sets the value of @v to @i.
10854+ */
10855+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10856+{
10857+ v->counter = i;
10858+}
10859+
10860+/**
10861 * atomic64_add - add integer to atomic64 variable
10862 * @i: integer value to add
10863 * @v: pointer to type atomic64_t
10864@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
10865 */
10866 static inline void atomic64_add(long i, atomic64_t *v)
10867 {
10868+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
10869+
10870+#ifdef CONFIG_PAX_REFCOUNT
10871+ "jno 0f\n"
10872+ LOCK_PREFIX "subq %1,%0\n"
10873+ "int $4\n0:\n"
10874+ _ASM_EXTABLE(0b, 0b)
10875+#endif
10876+
10877+ : "=m" (v->counter)
10878+ : "er" (i), "m" (v->counter));
10879+}
10880+
10881+/**
10882+ * atomic64_add_unchecked - add integer to atomic64 variable
10883+ * @i: integer value to add
10884+ * @v: pointer to type atomic64_unchecked_t
10885+ *
10886+ * Atomically adds @i to @v.
10887+ */
10888+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
10889+{
10890 asm volatile(LOCK_PREFIX "addq %1,%0"
10891 : "=m" (v->counter)
10892 : "er" (i), "m" (v->counter));
10893@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
10894 */
10895 static inline void atomic64_sub(long i, atomic64_t *v)
10896 {
10897- asm volatile(LOCK_PREFIX "subq %1,%0"
10898+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
10899+
10900+#ifdef CONFIG_PAX_REFCOUNT
10901+ "jno 0f\n"
10902+ LOCK_PREFIX "addq %1,%0\n"
10903+ "int $4\n0:\n"
10904+ _ASM_EXTABLE(0b, 0b)
10905+#endif
10906+
10907+ : "=m" (v->counter)
10908+ : "er" (i), "m" (v->counter));
10909+}
10910+
10911+/**
10912+ * atomic64_sub_unchecked - subtract the atomic64 variable
10913+ * @i: integer value to subtract
10914+ * @v: pointer to type atomic64_unchecked_t
10915+ *
10916+ * Atomically subtracts @i from @v.
10917+ */
10918+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
10919+{
10920+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
10921 : "=m" (v->counter)
10922 : "er" (i), "m" (v->counter));
10923 }
10924@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10925 {
10926 unsigned char c;
10927
10928- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
10929+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
10930+
10931+#ifdef CONFIG_PAX_REFCOUNT
10932+ "jno 0f\n"
10933+ LOCK_PREFIX "addq %2,%0\n"
10934+ "int $4\n0:\n"
10935+ _ASM_EXTABLE(0b, 0b)
10936+#endif
10937+
10938+ "sete %1\n"
10939 : "=m" (v->counter), "=qm" (c)
10940 : "er" (i), "m" (v->counter) : "memory");
10941 return c;
10942@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10943 */
10944 static inline void atomic64_inc(atomic64_t *v)
10945 {
10946+ asm volatile(LOCK_PREFIX "incq %0\n"
10947+
10948+#ifdef CONFIG_PAX_REFCOUNT
10949+ "jno 0f\n"
10950+ LOCK_PREFIX "decq %0\n"
10951+ "int $4\n0:\n"
10952+ _ASM_EXTABLE(0b, 0b)
10953+#endif
10954+
10955+ : "=m" (v->counter)
10956+ : "m" (v->counter));
10957+}
10958+
10959+/**
10960+ * atomic64_inc_unchecked - increment atomic64 variable
10961+ * @v: pointer to type atomic64_unchecked_t
10962+ *
10963+ * Atomically increments @v by 1.
10964+ */
10965+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10966+{
10967 asm volatile(LOCK_PREFIX "incq %0"
10968 : "=m" (v->counter)
10969 : "m" (v->counter));
10970@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
10971 */
10972 static inline void atomic64_dec(atomic64_t *v)
10973 {
10974- asm volatile(LOCK_PREFIX "decq %0"
10975+ asm volatile(LOCK_PREFIX "decq %0\n"
10976+
10977+#ifdef CONFIG_PAX_REFCOUNT
10978+ "jno 0f\n"
10979+ LOCK_PREFIX "incq %0\n"
10980+ "int $4\n0:\n"
10981+ _ASM_EXTABLE(0b, 0b)
10982+#endif
10983+
10984+ : "=m" (v->counter)
10985+ : "m" (v->counter));
10986+}
10987+
10988+/**
10989+ * atomic64_dec_unchecked - decrement atomic64 variable
10990+ * @v: pointer to type atomic64_t
10991+ *
10992+ * Atomically decrements @v by 1.
10993+ */
10994+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10995+{
10996+ asm volatile(LOCK_PREFIX "decq %0\n"
10997 : "=m" (v->counter)
10998 : "m" (v->counter));
10999 }
11000@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
11001 {
11002 unsigned char c;
11003
11004- asm volatile(LOCK_PREFIX "decq %0; sete %1"
11005+ asm volatile(LOCK_PREFIX "decq %0\n"
11006+
11007+#ifdef CONFIG_PAX_REFCOUNT
11008+ "jno 0f\n"
11009+ LOCK_PREFIX "incq %0\n"
11010+ "int $4\n0:\n"
11011+ _ASM_EXTABLE(0b, 0b)
11012+#endif
11013+
11014+ "sete %1\n"
11015 : "=m" (v->counter), "=qm" (c)
11016 : "m" (v->counter) : "memory");
11017 return c != 0;
11018@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
11019 {
11020 unsigned char c;
11021
11022- asm volatile(LOCK_PREFIX "incq %0; sete %1"
11023+ asm volatile(LOCK_PREFIX "incq %0\n"
11024+
11025+#ifdef CONFIG_PAX_REFCOUNT
11026+ "jno 0f\n"
11027+ LOCK_PREFIX "decq %0\n"
11028+ "int $4\n0:\n"
11029+ _ASM_EXTABLE(0b, 0b)
11030+#endif
11031+
11032+ "sete %1\n"
11033 : "=m" (v->counter), "=qm" (c)
11034 : "m" (v->counter) : "memory");
11035 return c != 0;
11036@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
11037 {
11038 unsigned char c;
11039
11040- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
11041+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
11042+
11043+#ifdef CONFIG_PAX_REFCOUNT
11044+ "jno 0f\n"
11045+ LOCK_PREFIX "subq %2,%0\n"
11046+ "int $4\n0:\n"
11047+ _ASM_EXTABLE(0b, 0b)
11048+#endif
11049+
11050+ "sets %1\n"
11051 : "=m" (v->counter), "=qm" (c)
11052 : "er" (i), "m" (v->counter) : "memory");
11053 return c;
11054@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
11055 */
11056 static inline long atomic64_add_return(long i, atomic64_t *v)
11057 {
11058+ return i + xadd_check_overflow(&v->counter, i);
11059+}
11060+
11061+/**
11062+ * atomic64_add_return_unchecked - add and return
11063+ * @i: integer value to add
11064+ * @v: pointer to type atomic64_unchecked_t
11065+ *
11066+ * Atomically adds @i to @v and returns @i + @v
11067+ */
11068+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
11069+{
11070 return i + xadd(&v->counter, i);
11071 }
11072
11073@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
11074 }
11075
11076 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
11077+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
11078+{
11079+ return atomic64_add_return_unchecked(1, v);
11080+}
11081 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
11082
11083 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
11084@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
11085 return cmpxchg(&v->counter, old, new);
11086 }
11087
11088+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
11089+{
11090+ return cmpxchg(&v->counter, old, new);
11091+}
11092+
11093 static inline long atomic64_xchg(atomic64_t *v, long new)
11094 {
11095 return xchg(&v->counter, new);
11096@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
11097 */
11098 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
11099 {
11100- long c, old;
11101+ long c, old, new;
11102 c = atomic64_read(v);
11103 for (;;) {
11104- if (unlikely(c == (u)))
11105+ if (unlikely(c == u))
11106 break;
11107- old = atomic64_cmpxchg((v), c, c + (a));
11108+
11109+ asm volatile("add %2,%0\n"
11110+
11111+#ifdef CONFIG_PAX_REFCOUNT
11112+ "jno 0f\n"
11113+ "sub %2,%0\n"
11114+ "int $4\n0:\n"
11115+ _ASM_EXTABLE(0b, 0b)
11116+#endif
11117+
11118+ : "=r" (new)
11119+ : "0" (c), "ir" (a));
11120+
11121+ old = atomic64_cmpxchg(v, c, new);
11122 if (likely(old == c))
11123 break;
11124 c = old;
11125 }
11126- return c != (u);
11127+ return c != u;
11128 }
11129
11130 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11131diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
11132index 6dfd019..0c6699f 100644
11133--- a/arch/x86/include/asm/bitops.h
11134+++ b/arch/x86/include/asm/bitops.h
11135@@ -40,7 +40,7 @@
11136 * a mask operation on a byte.
11137 */
11138 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
11139-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
11140+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
11141 #define CONST_MASK(nr) (1 << ((nr) & 7))
11142
11143 /**
11144diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
11145index b13fe63..0dab13a 100644
11146--- a/arch/x86/include/asm/boot.h
11147+++ b/arch/x86/include/asm/boot.h
11148@@ -11,10 +11,15 @@
11149 #include <asm/pgtable_types.h>
11150
11151 /* Physical address where kernel should be loaded. */
11152-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
11153+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
11154 + (CONFIG_PHYSICAL_ALIGN - 1)) \
11155 & ~(CONFIG_PHYSICAL_ALIGN - 1))
11156
11157+#ifndef __ASSEMBLY__
11158+extern unsigned char __LOAD_PHYSICAL_ADDR[];
11159+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
11160+#endif
11161+
11162 /* Minimum kernel alignment, as a power of two */
11163 #ifdef CONFIG_X86_64
11164 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
11165diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
11166index 48f99f1..d78ebf9 100644
11167--- a/arch/x86/include/asm/cache.h
11168+++ b/arch/x86/include/asm/cache.h
11169@@ -5,12 +5,13 @@
11170
11171 /* L1 cache line size */
11172 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11173-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11174+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11175
11176 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
11177+#define __read_only __attribute__((__section__(".data..read_only")))
11178
11179 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
11180-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
11181+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
11182
11183 #ifdef CONFIG_X86_VSMP
11184 #ifdef CONFIG_SMP
11185diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
11186index 9863ee3..4a1f8e1 100644
11187--- a/arch/x86/include/asm/cacheflush.h
11188+++ b/arch/x86/include/asm/cacheflush.h
11189@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
11190 unsigned long pg_flags = pg->flags & _PGMT_MASK;
11191
11192 if (pg_flags == _PGMT_DEFAULT)
11193- return -1;
11194+ return ~0UL;
11195 else if (pg_flags == _PGMT_WC)
11196 return _PAGE_CACHE_WC;
11197 else if (pg_flags == _PGMT_UC_MINUS)
11198diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
11199index 46fc474..b02b0f9 100644
11200--- a/arch/x86/include/asm/checksum_32.h
11201+++ b/arch/x86/include/asm/checksum_32.h
11202@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
11203 int len, __wsum sum,
11204 int *src_err_ptr, int *dst_err_ptr);
11205
11206+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
11207+ int len, __wsum sum,
11208+ int *src_err_ptr, int *dst_err_ptr);
11209+
11210+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
11211+ int len, __wsum sum,
11212+ int *src_err_ptr, int *dst_err_ptr);
11213+
11214 /*
11215 * Note: when you get a NULL pointer exception here this means someone
11216 * passed in an incorrect kernel address to one of these functions.
11217@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
11218 int *err_ptr)
11219 {
11220 might_sleep();
11221- return csum_partial_copy_generic((__force void *)src, dst,
11222+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
11223 len, sum, err_ptr, NULL);
11224 }
11225
11226@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
11227 {
11228 might_sleep();
11229 if (access_ok(VERIFY_WRITE, dst, len))
11230- return csum_partial_copy_generic(src, (__force void *)dst,
11231+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
11232 len, sum, NULL, err_ptr);
11233
11234 if (len)
11235diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
11236index 8d871ea..c1a0dc9 100644
11237--- a/arch/x86/include/asm/cmpxchg.h
11238+++ b/arch/x86/include/asm/cmpxchg.h
11239@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
11240 __compiletime_error("Bad argument size for cmpxchg");
11241 extern void __xadd_wrong_size(void)
11242 __compiletime_error("Bad argument size for xadd");
11243+extern void __xadd_check_overflow_wrong_size(void)
11244+ __compiletime_error("Bad argument size for xadd_check_overflow");
11245 extern void __add_wrong_size(void)
11246 __compiletime_error("Bad argument size for add");
11247+extern void __add_check_overflow_wrong_size(void)
11248+ __compiletime_error("Bad argument size for add_check_overflow");
11249
11250 /*
11251 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
11252@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
11253 __ret; \
11254 })
11255
11256+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
11257+ ({ \
11258+ __typeof__ (*(ptr)) __ret = (arg); \
11259+ switch (sizeof(*(ptr))) { \
11260+ case __X86_CASE_L: \
11261+ asm volatile (lock #op "l %0, %1\n" \
11262+ "jno 0f\n" \
11263+ "mov %0,%1\n" \
11264+ "int $4\n0:\n" \
11265+ _ASM_EXTABLE(0b, 0b) \
11266+ : "+r" (__ret), "+m" (*(ptr)) \
11267+ : : "memory", "cc"); \
11268+ break; \
11269+ case __X86_CASE_Q: \
11270+ asm volatile (lock #op "q %q0, %1\n" \
11271+ "jno 0f\n" \
11272+ "mov %0,%1\n" \
11273+ "int $4\n0:\n" \
11274+ _ASM_EXTABLE(0b, 0b) \
11275+ : "+r" (__ret), "+m" (*(ptr)) \
11276+ : : "memory", "cc"); \
11277+ break; \
11278+ default: \
11279+ __ ## op ## _check_overflow_wrong_size(); \
11280+ } \
11281+ __ret; \
11282+ })
11283+
11284 /*
11285 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
11286 * Since this is generally used to protect other memory information, we
11287@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
11288 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
11289 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
11290
11291+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
11292+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
11293+
11294 #define __add(ptr, inc, lock) \
11295 ({ \
11296 __typeof__ (*(ptr)) __ret = (inc); \
11297diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
11298index 8c297aa..7a90f03 100644
11299--- a/arch/x86/include/asm/cpufeature.h
11300+++ b/arch/x86/include/asm/cpufeature.h
11301@@ -205,7 +205,7 @@
11302 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
11303 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
11304 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
11305-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
11306+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
11307 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
11308 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
11309 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
11310@@ -379,7 +379,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
11311 ".section .discard,\"aw\",@progbits\n"
11312 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
11313 ".previous\n"
11314- ".section .altinstr_replacement,\"ax\"\n"
11315+ ".section .altinstr_replacement,\"a\"\n"
11316 "3: movb $1,%0\n"
11317 "4:\n"
11318 ".previous\n"
11319diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
11320index 8bf1c06..f723dfd 100644
11321--- a/arch/x86/include/asm/desc.h
11322+++ b/arch/x86/include/asm/desc.h
11323@@ -4,6 +4,7 @@
11324 #include <asm/desc_defs.h>
11325 #include <asm/ldt.h>
11326 #include <asm/mmu.h>
11327+#include <asm/pgtable.h>
11328
11329 #include <linux/smp.h>
11330 #include <linux/percpu.h>
11331@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
11332
11333 desc->type = (info->read_exec_only ^ 1) << 1;
11334 desc->type |= info->contents << 2;
11335+ desc->type |= info->seg_not_present ^ 1;
11336
11337 desc->s = 1;
11338 desc->dpl = 0x3;
11339@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
11340 }
11341
11342 extern struct desc_ptr idt_descr;
11343-extern gate_desc idt_table[];
11344 extern struct desc_ptr nmi_idt_descr;
11345-extern gate_desc nmi_idt_table[];
11346-
11347-struct gdt_page {
11348- struct desc_struct gdt[GDT_ENTRIES];
11349-} __attribute__((aligned(PAGE_SIZE)));
11350-
11351-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
11352+extern gate_desc idt_table[256];
11353+extern gate_desc nmi_idt_table[256];
11354
11355+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
11356 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
11357 {
11358- return per_cpu(gdt_page, cpu).gdt;
11359+ return cpu_gdt_table[cpu];
11360 }
11361
11362 #ifdef CONFIG_X86_64
11363@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
11364 unsigned long base, unsigned dpl, unsigned flags,
11365 unsigned short seg)
11366 {
11367- gate->a = (seg << 16) | (base & 0xffff);
11368- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
11369+ gate->gate.offset_low = base;
11370+ gate->gate.seg = seg;
11371+ gate->gate.reserved = 0;
11372+ gate->gate.type = type;
11373+ gate->gate.s = 0;
11374+ gate->gate.dpl = dpl;
11375+ gate->gate.p = 1;
11376+ gate->gate.offset_high = base >> 16;
11377 }
11378
11379 #endif
11380@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
11381
11382 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
11383 {
11384+ pax_open_kernel();
11385 memcpy(&idt[entry], gate, sizeof(*gate));
11386+ pax_close_kernel();
11387 }
11388
11389 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
11390 {
11391+ pax_open_kernel();
11392 memcpy(&ldt[entry], desc, 8);
11393+ pax_close_kernel();
11394 }
11395
11396 static inline void
11397@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
11398 default: size = sizeof(*gdt); break;
11399 }
11400
11401+ pax_open_kernel();
11402 memcpy(&gdt[entry], desc, size);
11403+ pax_close_kernel();
11404 }
11405
11406 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
11407@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
11408
11409 static inline void native_load_tr_desc(void)
11410 {
11411+ pax_open_kernel();
11412 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
11413+ pax_close_kernel();
11414 }
11415
11416 static inline void native_load_gdt(const struct desc_ptr *dtr)
11417@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
11418 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
11419 unsigned int i;
11420
11421+ pax_open_kernel();
11422 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
11423 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
11424+ pax_close_kernel();
11425 }
11426
11427 #define _LDT_empty(info) \
11428@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
11429 }
11430
11431 #ifdef CONFIG_X86_64
11432-static inline void set_nmi_gate(int gate, void *addr)
11433+static inline void set_nmi_gate(int gate, const void *addr)
11434 {
11435 gate_desc s;
11436
11437@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
11438 }
11439 #endif
11440
11441-static inline void _set_gate(int gate, unsigned type, void *addr,
11442+static inline void _set_gate(int gate, unsigned type, const void *addr,
11443 unsigned dpl, unsigned ist, unsigned seg)
11444 {
11445 gate_desc s;
11446@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
11447 * Pentium F0 0F bugfix can have resulted in the mapped
11448 * IDT being write-protected.
11449 */
11450-static inline void set_intr_gate(unsigned int n, void *addr)
11451+static inline void set_intr_gate(unsigned int n, const void *addr)
11452 {
11453 BUG_ON((unsigned)n > 0xFF);
11454 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
11455@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
11456 /*
11457 * This routine sets up an interrupt gate at directory privilege level 3.
11458 */
11459-static inline void set_system_intr_gate(unsigned int n, void *addr)
11460+static inline void set_system_intr_gate(unsigned int n, const void *addr)
11461 {
11462 BUG_ON((unsigned)n > 0xFF);
11463 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
11464 }
11465
11466-static inline void set_system_trap_gate(unsigned int n, void *addr)
11467+static inline void set_system_trap_gate(unsigned int n, const void *addr)
11468 {
11469 BUG_ON((unsigned)n > 0xFF);
11470 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
11471 }
11472
11473-static inline void set_trap_gate(unsigned int n, void *addr)
11474+static inline void set_trap_gate(unsigned int n, const void *addr)
11475 {
11476 BUG_ON((unsigned)n > 0xFF);
11477 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
11478@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
11479 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
11480 {
11481 BUG_ON((unsigned)n > 0xFF);
11482- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
11483+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
11484 }
11485
11486-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
11487+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
11488 {
11489 BUG_ON((unsigned)n > 0xFF);
11490 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
11491 }
11492
11493-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
11494+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
11495 {
11496 BUG_ON((unsigned)n > 0xFF);
11497 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
11498 }
11499
11500+#ifdef CONFIG_X86_32
11501+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
11502+{
11503+ struct desc_struct d;
11504+
11505+ if (likely(limit))
11506+ limit = (limit - 1UL) >> PAGE_SHIFT;
11507+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
11508+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
11509+}
11510+#endif
11511+
11512 #endif /* _ASM_X86_DESC_H */
11513diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
11514index 278441f..b95a174 100644
11515--- a/arch/x86/include/asm/desc_defs.h
11516+++ b/arch/x86/include/asm/desc_defs.h
11517@@ -31,6 +31,12 @@ struct desc_struct {
11518 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
11519 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
11520 };
11521+ struct {
11522+ u16 offset_low;
11523+ u16 seg;
11524+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
11525+ unsigned offset_high: 16;
11526+ } gate;
11527 };
11528 } __attribute__((packed));
11529
11530diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
11531index 3778256..c5d4fce 100644
11532--- a/arch/x86/include/asm/e820.h
11533+++ b/arch/x86/include/asm/e820.h
11534@@ -69,7 +69,7 @@ struct e820map {
11535 #define ISA_START_ADDRESS 0xa0000
11536 #define ISA_END_ADDRESS 0x100000
11537
11538-#define BIOS_BEGIN 0x000a0000
11539+#define BIOS_BEGIN 0x000c0000
11540 #define BIOS_END 0x00100000
11541
11542 #define BIOS_ROM_BASE 0xffe00000
11543diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
11544index 5939f44..f8845f6 100644
11545--- a/arch/x86/include/asm/elf.h
11546+++ b/arch/x86/include/asm/elf.h
11547@@ -243,7 +243,25 @@ extern int force_personality32;
11548 the loader. We need to make sure that it is out of the way of the program
11549 that it will "exec", and that there is sufficient room for the brk. */
11550
11551+#ifdef CONFIG_PAX_SEGMEXEC
11552+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
11553+#else
11554 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
11555+#endif
11556+
11557+#ifdef CONFIG_PAX_ASLR
11558+#ifdef CONFIG_X86_32
11559+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
11560+
11561+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11562+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11563+#else
11564+#define PAX_ELF_ET_DYN_BASE 0x400000UL
11565+
11566+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11567+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11568+#endif
11569+#endif
11570
11571 /* This yields a mask that user programs can use to figure out what
11572 instruction set this CPU supports. This could be done in user space,
11573@@ -296,16 +314,12 @@ do { \
11574
11575 #define ARCH_DLINFO \
11576 do { \
11577- if (vdso_enabled) \
11578- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11579- (unsigned long)current->mm->context.vdso); \
11580+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11581 } while (0)
11582
11583 #define ARCH_DLINFO_X32 \
11584 do { \
11585- if (vdso_enabled) \
11586- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11587- (unsigned long)current->mm->context.vdso); \
11588+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11589 } while (0)
11590
11591 #define AT_SYSINFO 32
11592@@ -320,7 +334,7 @@ else \
11593
11594 #endif /* !CONFIG_X86_32 */
11595
11596-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
11597+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
11598
11599 #define VDSO_ENTRY \
11600 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
11601@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
11602 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
11603 #define compat_arch_setup_additional_pages syscall32_setup_pages
11604
11605-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
11606-#define arch_randomize_brk arch_randomize_brk
11607-
11608 /*
11609 * True on X86_32 or when emulating IA32 on X86_64
11610 */
11611diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
11612index 75ce3f4..882e801 100644
11613--- a/arch/x86/include/asm/emergency-restart.h
11614+++ b/arch/x86/include/asm/emergency-restart.h
11615@@ -13,6 +13,6 @@ enum reboot_type {
11616
11617 extern enum reboot_type reboot_type;
11618
11619-extern void machine_emergency_restart(void);
11620+extern void machine_emergency_restart(void) __noreturn;
11621
11622 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
11623diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
11624index 41ab26e..a88c9e6 100644
11625--- a/arch/x86/include/asm/fpu-internal.h
11626+++ b/arch/x86/include/asm/fpu-internal.h
11627@@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
11628 ({ \
11629 int err; \
11630 asm volatile(ASM_STAC "\n" \
11631- "1:" #insn "\n\t" \
11632+ "1:" \
11633+ __copyuser_seg \
11634+ #insn "\n\t" \
11635 "2: " ASM_CLAC "\n" \
11636 ".section .fixup,\"ax\"\n" \
11637 "3: movl $-1,%[err]\n" \
11638@@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
11639 "emms\n\t" /* clear stack tags */
11640 "fildl %P[addr]", /* set F?P to defined value */
11641 X86_FEATURE_FXSAVE_LEAK,
11642- [addr] "m" (tsk->thread.fpu.has_fpu));
11643+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
11644
11645 return fpu_restore_checking(&tsk->thread.fpu);
11646 }
11647diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
11648index f373046..02653e2 100644
11649--- a/arch/x86/include/asm/futex.h
11650+++ b/arch/x86/include/asm/futex.h
11651@@ -12,6 +12,7 @@
11652 #include <asm/smap.h>
11653
11654 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
11655+ typecheck(u32 __user *, uaddr); \
11656 asm volatile("\t" ASM_STAC "\n" \
11657 "1:\t" insn "\n" \
11658 "2:\t" ASM_CLAC "\n" \
11659@@ -20,15 +21,16 @@
11660 "\tjmp\t2b\n" \
11661 "\t.previous\n" \
11662 _ASM_EXTABLE(1b, 3b) \
11663- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
11664+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
11665 : "i" (-EFAULT), "0" (oparg), "1" (0))
11666
11667 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
11668+ typecheck(u32 __user *, uaddr); \
11669 asm volatile("\t" ASM_STAC "\n" \
11670 "1:\tmovl %2, %0\n" \
11671 "\tmovl\t%0, %3\n" \
11672 "\t" insn "\n" \
11673- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
11674+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
11675 "\tjnz\t1b\n" \
11676 "3:\t" ASM_CLAC "\n" \
11677 "\t.section .fixup,\"ax\"\n" \
11678@@ -38,7 +40,7 @@
11679 _ASM_EXTABLE(1b, 4b) \
11680 _ASM_EXTABLE(2b, 4b) \
11681 : "=&a" (oldval), "=&r" (ret), \
11682- "+m" (*uaddr), "=&r" (tem) \
11683+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
11684 : "r" (oparg), "i" (-EFAULT), "1" (0))
11685
11686 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11687@@ -65,10 +67,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11688
11689 switch (op) {
11690 case FUTEX_OP_SET:
11691- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
11692+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
11693 break;
11694 case FUTEX_OP_ADD:
11695- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
11696+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
11697 uaddr, oparg);
11698 break;
11699 case FUTEX_OP_OR:
11700@@ -128,14 +130,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
11701 return -EFAULT;
11702
11703 asm volatile("\t" ASM_STAC "\n"
11704- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
11705+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
11706 "2:\t" ASM_CLAC "\n"
11707 "\t.section .fixup, \"ax\"\n"
11708 "3:\tmov %3, %0\n"
11709 "\tjmp 2b\n"
11710 "\t.previous\n"
11711 _ASM_EXTABLE(1b, 3b)
11712- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
11713+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
11714 : "i" (-EFAULT), "r" (newval), "1" (oldval)
11715 : "memory"
11716 );
11717diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
11718index eb92a6e..b98b2f4 100644
11719--- a/arch/x86/include/asm/hw_irq.h
11720+++ b/arch/x86/include/asm/hw_irq.h
11721@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
11722 extern void enable_IO_APIC(void);
11723
11724 /* Statistics */
11725-extern atomic_t irq_err_count;
11726-extern atomic_t irq_mis_count;
11727+extern atomic_unchecked_t irq_err_count;
11728+extern atomic_unchecked_t irq_mis_count;
11729
11730 /* EISA */
11731 extern void eisa_set_level_irq(unsigned int irq);
11732diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
11733index d8e8eef..15b1179 100644
11734--- a/arch/x86/include/asm/io.h
11735+++ b/arch/x86/include/asm/io.h
11736@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
11737 return ioremap_nocache(offset, size);
11738 }
11739
11740-extern void iounmap(volatile void __iomem *addr);
11741+extern void iounmap(const volatile void __iomem *addr);
11742
11743 extern void set_iounmap_nonlazy(void);
11744
11745@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
11746
11747 #include <linux/vmalloc.h>
11748
11749+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11750+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11751+{
11752+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11753+}
11754+
11755+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11756+{
11757+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11758+}
11759+
11760 /*
11761 * Convert a virtual cached pointer to an uncached pointer
11762 */
11763diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
11764index bba3cf8..06bc8da 100644
11765--- a/arch/x86/include/asm/irqflags.h
11766+++ b/arch/x86/include/asm/irqflags.h
11767@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
11768 sti; \
11769 sysexit
11770
11771+#define GET_CR0_INTO_RDI mov %cr0, %rdi
11772+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
11773+#define GET_CR3_INTO_RDI mov %cr3, %rdi
11774+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
11775+
11776 #else
11777 #define INTERRUPT_RETURN iret
11778 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
11779diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
11780index d3ddd17..c9fb0cc 100644
11781--- a/arch/x86/include/asm/kprobes.h
11782+++ b/arch/x86/include/asm/kprobes.h
11783@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
11784 #define RELATIVEJUMP_SIZE 5
11785 #define RELATIVECALL_OPCODE 0xe8
11786 #define RELATIVE_ADDR_SIZE 4
11787-#define MAX_STACK_SIZE 64
11788-#define MIN_STACK_SIZE(ADDR) \
11789- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
11790- THREAD_SIZE - (unsigned long)(ADDR))) \
11791- ? (MAX_STACK_SIZE) \
11792- : (((unsigned long)current_thread_info()) + \
11793- THREAD_SIZE - (unsigned long)(ADDR)))
11794+#define MAX_STACK_SIZE 64UL
11795+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
11796
11797 #define flush_insn_slot(p) do { } while (0)
11798
11799diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
11800index b2e11f4..f293e2e 100644
11801--- a/arch/x86/include/asm/kvm_host.h
11802+++ b/arch/x86/include/asm/kvm_host.h
11803@@ -707,7 +707,7 @@ struct kvm_x86_ops {
11804 int (*check_intercept)(struct kvm_vcpu *vcpu,
11805 struct x86_instruction_info *info,
11806 enum x86_intercept_stage stage);
11807-};
11808+} __do_const;
11809
11810 struct kvm_arch_async_pf {
11811 u32 token;
11812diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
11813index c8bed0d..85c03fd 100644
11814--- a/arch/x86/include/asm/local.h
11815+++ b/arch/x86/include/asm/local.h
11816@@ -10,33 +10,97 @@ typedef struct {
11817 atomic_long_t a;
11818 } local_t;
11819
11820+typedef struct {
11821+ atomic_long_unchecked_t a;
11822+} local_unchecked_t;
11823+
11824 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
11825
11826 #define local_read(l) atomic_long_read(&(l)->a)
11827+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
11828 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
11829+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
11830
11831 static inline void local_inc(local_t *l)
11832 {
11833- asm volatile(_ASM_INC "%0"
11834+ asm volatile(_ASM_INC "%0\n"
11835+
11836+#ifdef CONFIG_PAX_REFCOUNT
11837+ "jno 0f\n"
11838+ _ASM_DEC "%0\n"
11839+ "int $4\n0:\n"
11840+ _ASM_EXTABLE(0b, 0b)
11841+#endif
11842+
11843+ : "+m" (l->a.counter));
11844+}
11845+
11846+static inline void local_inc_unchecked(local_unchecked_t *l)
11847+{
11848+ asm volatile(_ASM_INC "%0\n"
11849 : "+m" (l->a.counter));
11850 }
11851
11852 static inline void local_dec(local_t *l)
11853 {
11854- asm volatile(_ASM_DEC "%0"
11855+ asm volatile(_ASM_DEC "%0\n"
11856+
11857+#ifdef CONFIG_PAX_REFCOUNT
11858+ "jno 0f\n"
11859+ _ASM_INC "%0\n"
11860+ "int $4\n0:\n"
11861+ _ASM_EXTABLE(0b, 0b)
11862+#endif
11863+
11864+ : "+m" (l->a.counter));
11865+}
11866+
11867+static inline void local_dec_unchecked(local_unchecked_t *l)
11868+{
11869+ asm volatile(_ASM_DEC "%0\n"
11870 : "+m" (l->a.counter));
11871 }
11872
11873 static inline void local_add(long i, local_t *l)
11874 {
11875- asm volatile(_ASM_ADD "%1,%0"
11876+ asm volatile(_ASM_ADD "%1,%0\n"
11877+
11878+#ifdef CONFIG_PAX_REFCOUNT
11879+ "jno 0f\n"
11880+ _ASM_SUB "%1,%0\n"
11881+ "int $4\n0:\n"
11882+ _ASM_EXTABLE(0b, 0b)
11883+#endif
11884+
11885+ : "+m" (l->a.counter)
11886+ : "ir" (i));
11887+}
11888+
11889+static inline void local_add_unchecked(long i, local_unchecked_t *l)
11890+{
11891+ asm volatile(_ASM_ADD "%1,%0\n"
11892 : "+m" (l->a.counter)
11893 : "ir" (i));
11894 }
11895
11896 static inline void local_sub(long i, local_t *l)
11897 {
11898- asm volatile(_ASM_SUB "%1,%0"
11899+ asm volatile(_ASM_SUB "%1,%0\n"
11900+
11901+#ifdef CONFIG_PAX_REFCOUNT
11902+ "jno 0f\n"
11903+ _ASM_ADD "%1,%0\n"
11904+ "int $4\n0:\n"
11905+ _ASM_EXTABLE(0b, 0b)
11906+#endif
11907+
11908+ : "+m" (l->a.counter)
11909+ : "ir" (i));
11910+}
11911+
11912+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
11913+{
11914+ asm volatile(_ASM_SUB "%1,%0\n"
11915 : "+m" (l->a.counter)
11916 : "ir" (i));
11917 }
11918@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
11919 {
11920 unsigned char c;
11921
11922- asm volatile(_ASM_SUB "%2,%0; sete %1"
11923+ asm volatile(_ASM_SUB "%2,%0\n"
11924+
11925+#ifdef CONFIG_PAX_REFCOUNT
11926+ "jno 0f\n"
11927+ _ASM_ADD "%2,%0\n"
11928+ "int $4\n0:\n"
11929+ _ASM_EXTABLE(0b, 0b)
11930+#endif
11931+
11932+ "sete %1\n"
11933 : "+m" (l->a.counter), "=qm" (c)
11934 : "ir" (i) : "memory");
11935 return c;
11936@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
11937 {
11938 unsigned char c;
11939
11940- asm volatile(_ASM_DEC "%0; sete %1"
11941+ asm volatile(_ASM_DEC "%0\n"
11942+
11943+#ifdef CONFIG_PAX_REFCOUNT
11944+ "jno 0f\n"
11945+ _ASM_INC "%0\n"
11946+ "int $4\n0:\n"
11947+ _ASM_EXTABLE(0b, 0b)
11948+#endif
11949+
11950+ "sete %1\n"
11951 : "+m" (l->a.counter), "=qm" (c)
11952 : : "memory");
11953 return c != 0;
11954@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
11955 {
11956 unsigned char c;
11957
11958- asm volatile(_ASM_INC "%0; sete %1"
11959+ asm volatile(_ASM_INC "%0\n"
11960+
11961+#ifdef CONFIG_PAX_REFCOUNT
11962+ "jno 0f\n"
11963+ _ASM_DEC "%0\n"
11964+ "int $4\n0:\n"
11965+ _ASM_EXTABLE(0b, 0b)
11966+#endif
11967+
11968+ "sete %1\n"
11969 : "+m" (l->a.counter), "=qm" (c)
11970 : : "memory");
11971 return c != 0;
11972@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
11973 {
11974 unsigned char c;
11975
11976- asm volatile(_ASM_ADD "%2,%0; sets %1"
11977+ asm volatile(_ASM_ADD "%2,%0\n"
11978+
11979+#ifdef CONFIG_PAX_REFCOUNT
11980+ "jno 0f\n"
11981+ _ASM_SUB "%2,%0\n"
11982+ "int $4\n0:\n"
11983+ _ASM_EXTABLE(0b, 0b)
11984+#endif
11985+
11986+ "sets %1\n"
11987 : "+m" (l->a.counter), "=qm" (c)
11988 : "ir" (i) : "memory");
11989 return c;
11990@@ -132,7 +232,15 @@ static inline long local_add_return(long i, local_t *l)
11991 #endif
11992 /* Modern 486+ processor */
11993 __i = i;
11994- asm volatile(_ASM_XADD "%0, %1;"
11995+ asm volatile(_ASM_XADD "%0, %1\n"
11996+
11997+#ifdef CONFIG_PAX_REFCOUNT
11998+ "jno 0f\n"
11999+ _ASM_MOV "%0,%1\n"
12000+ "int $4\n0:\n"
12001+ _ASM_EXTABLE(0b, 0b)
12002+#endif
12003+
12004 : "+r" (i), "+m" (l->a.counter)
12005 : : "memory");
12006 return i + __i;
12007@@ -147,6 +255,38 @@ no_xadd: /* Legacy 386 processor */
12008 #endif
12009 }
12010
12011+/**
12012+ * local_add_return_unchecked - add and return
12013+ * @i: integer value to add
12014+ * @l: pointer to type local_unchecked_t
12015+ *
12016+ * Atomically adds @i to @l and returns @i + @l
12017+ */
12018+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
12019+{
12020+ long __i;
12021+#ifdef CONFIG_M386
12022+ unsigned long flags;
12023+ if (unlikely(boot_cpu_data.x86 <= 3))
12024+ goto no_xadd;
12025+#endif
12026+ /* Modern 486+ processor */
12027+ __i = i;
12028+ asm volatile(_ASM_XADD "%0, %1\n"
12029+ : "+r" (i), "+m" (l->a.counter)
12030+ : : "memory");
12031+ return i + __i;
12032+
12033+#ifdef CONFIG_M386
12034+no_xadd: /* Legacy 386 processor */
12035+ local_irq_save(flags);
12036+ __i = local_read_unchecked(l);
12037+ local_set_unchecked(l, i + __i);
12038+ local_irq_restore(flags);
12039+ return i + __i;
12040+#endif
12041+}
12042+
12043 static inline long local_sub_return(long i, local_t *l)
12044 {
12045 return local_add_return(-i, l);
12046@@ -157,6 +297,8 @@ static inline long local_sub_return(long i, local_t *l)
12047
12048 #define local_cmpxchg(l, o, n) \
12049 (cmpxchg_local(&((l)->a.counter), (o), (n)))
12050+#define local_cmpxchg_unchecked(l, o, n) \
12051+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
12052 /* Always has a lock prefix */
12053 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
12054
12055diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
12056index 593e51d..fa69c9a 100644
12057--- a/arch/x86/include/asm/mman.h
12058+++ b/arch/x86/include/asm/mman.h
12059@@ -5,4 +5,14 @@
12060
12061 #include <asm-generic/mman.h>
12062
12063+#ifdef __KERNEL__
12064+#ifndef __ASSEMBLY__
12065+#ifdef CONFIG_X86_32
12066+#define arch_mmap_check i386_mmap_check
12067+int i386_mmap_check(unsigned long addr, unsigned long len,
12068+ unsigned long flags);
12069+#endif
12070+#endif
12071+#endif
12072+
12073 #endif /* _ASM_X86_MMAN_H */
12074diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
12075index 5f55e69..e20bfb1 100644
12076--- a/arch/x86/include/asm/mmu.h
12077+++ b/arch/x86/include/asm/mmu.h
12078@@ -9,7 +9,7 @@
12079 * we put the segment information here.
12080 */
12081 typedef struct {
12082- void *ldt;
12083+ struct desc_struct *ldt;
12084 int size;
12085
12086 #ifdef CONFIG_X86_64
12087@@ -18,7 +18,19 @@ typedef struct {
12088 #endif
12089
12090 struct mutex lock;
12091- void *vdso;
12092+ unsigned long vdso;
12093+
12094+#ifdef CONFIG_X86_32
12095+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
12096+ unsigned long user_cs_base;
12097+ unsigned long user_cs_limit;
12098+
12099+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
12100+ cpumask_t cpu_user_cs_mask;
12101+#endif
12102+
12103+#endif
12104+#endif
12105 } mm_context_t;
12106
12107 #ifdef CONFIG_SMP
12108diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
12109index cdbf367..adb37ac 100644
12110--- a/arch/x86/include/asm/mmu_context.h
12111+++ b/arch/x86/include/asm/mmu_context.h
12112@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
12113
12114 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
12115 {
12116+
12117+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12118+ unsigned int i;
12119+ pgd_t *pgd;
12120+
12121+ pax_open_kernel();
12122+ pgd = get_cpu_pgd(smp_processor_id());
12123+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
12124+ set_pgd_batched(pgd+i, native_make_pgd(0));
12125+ pax_close_kernel();
12126+#endif
12127+
12128 #ifdef CONFIG_SMP
12129 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
12130 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
12131@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
12132 struct task_struct *tsk)
12133 {
12134 unsigned cpu = smp_processor_id();
12135+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12136+ int tlbstate = TLBSTATE_OK;
12137+#endif
12138
12139 if (likely(prev != next)) {
12140 #ifdef CONFIG_SMP
12141+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12142+ tlbstate = this_cpu_read(cpu_tlbstate.state);
12143+#endif
12144 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
12145 this_cpu_write(cpu_tlbstate.active_mm, next);
12146 #endif
12147 cpumask_set_cpu(cpu, mm_cpumask(next));
12148
12149 /* Re-load page tables */
12150+#ifdef CONFIG_PAX_PER_CPU_PGD
12151+ pax_open_kernel();
12152+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
12153+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
12154+ pax_close_kernel();
12155+ load_cr3(get_cpu_pgd(cpu));
12156+#else
12157 load_cr3(next->pgd);
12158+#endif
12159
12160 /* stop flush ipis for the previous mm */
12161 cpumask_clear_cpu(cpu, mm_cpumask(prev));
12162@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
12163 */
12164 if (unlikely(prev->context.ldt != next->context.ldt))
12165 load_LDT_nolock(&next->context);
12166- }
12167+
12168+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
12169+ if (!(__supported_pte_mask & _PAGE_NX)) {
12170+ smp_mb__before_clear_bit();
12171+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
12172+ smp_mb__after_clear_bit();
12173+ cpu_set(cpu, next->context.cpu_user_cs_mask);
12174+ }
12175+#endif
12176+
12177+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12178+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
12179+ prev->context.user_cs_limit != next->context.user_cs_limit))
12180+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
12181 #ifdef CONFIG_SMP
12182+ else if (unlikely(tlbstate != TLBSTATE_OK))
12183+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
12184+#endif
12185+#endif
12186+
12187+ }
12188 else {
12189+
12190+#ifdef CONFIG_PAX_PER_CPU_PGD
12191+ pax_open_kernel();
12192+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
12193+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
12194+ pax_close_kernel();
12195+ load_cr3(get_cpu_pgd(cpu));
12196+#endif
12197+
12198+#ifdef CONFIG_SMP
12199 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
12200 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
12201
12202@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
12203 * tlb flush IPI delivery. We must reload CR3
12204 * to make sure to use no freed page tables.
12205 */
12206+
12207+#ifndef CONFIG_PAX_PER_CPU_PGD
12208 load_cr3(next->pgd);
12209+#endif
12210+
12211 load_LDT_nolock(&next->context);
12212+
12213+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
12214+ if (!(__supported_pte_mask & _PAGE_NX))
12215+ cpu_set(cpu, next->context.cpu_user_cs_mask);
12216+#endif
12217+
12218+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12219+#ifdef CONFIG_PAX_PAGEEXEC
12220+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
12221+#endif
12222+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
12223+#endif
12224+
12225 }
12226+#endif
12227 }
12228-#endif
12229 }
12230
12231 #define activate_mm(prev, next) \
12232diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
12233index 9eae775..c914fea 100644
12234--- a/arch/x86/include/asm/module.h
12235+++ b/arch/x86/include/asm/module.h
12236@@ -5,6 +5,7 @@
12237
12238 #ifdef CONFIG_X86_64
12239 /* X86_64 does not define MODULE_PROC_FAMILY */
12240+#define MODULE_PROC_FAMILY ""
12241 #elif defined CONFIG_M386
12242 #define MODULE_PROC_FAMILY "386 "
12243 #elif defined CONFIG_M486
12244@@ -59,8 +60,20 @@
12245 #error unknown processor family
12246 #endif
12247
12248-#ifdef CONFIG_X86_32
12249-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
12250+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
12251+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
12252+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
12253+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
12254+#else
12255+#define MODULE_PAX_KERNEXEC ""
12256 #endif
12257
12258+#ifdef CONFIG_PAX_MEMORY_UDEREF
12259+#define MODULE_PAX_UDEREF "UDEREF "
12260+#else
12261+#define MODULE_PAX_UDEREF ""
12262+#endif
12263+
12264+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
12265+
12266 #endif /* _ASM_X86_MODULE_H */
12267diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
12268index 320f7bb..e89f8f8 100644
12269--- a/arch/x86/include/asm/page_64_types.h
12270+++ b/arch/x86/include/asm/page_64_types.h
12271@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
12272
12273 /* duplicated to the one in bootmem.h */
12274 extern unsigned long max_pfn;
12275-extern unsigned long phys_base;
12276+extern const unsigned long phys_base;
12277
12278 extern unsigned long __phys_addr(unsigned long);
12279 #define __phys_reloc_hide(x) (x)
12280diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
12281index a0facf3..c017b15 100644
12282--- a/arch/x86/include/asm/paravirt.h
12283+++ b/arch/x86/include/asm/paravirt.h
12284@@ -632,6 +632,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
12285 val);
12286 }
12287
12288+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12289+{
12290+ pgdval_t val = native_pgd_val(pgd);
12291+
12292+ if (sizeof(pgdval_t) > sizeof(long))
12293+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
12294+ val, (u64)val >> 32);
12295+ else
12296+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
12297+ val);
12298+}
12299+
12300 static inline void pgd_clear(pgd_t *pgdp)
12301 {
12302 set_pgd(pgdp, __pgd(0));
12303@@ -713,6 +725,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
12304 pv_mmu_ops.set_fixmap(idx, phys, flags);
12305 }
12306
12307+#ifdef CONFIG_PAX_KERNEXEC
12308+static inline unsigned long pax_open_kernel(void)
12309+{
12310+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
12311+}
12312+
12313+static inline unsigned long pax_close_kernel(void)
12314+{
12315+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
12316+}
12317+#else
12318+static inline unsigned long pax_open_kernel(void) { return 0; }
12319+static inline unsigned long pax_close_kernel(void) { return 0; }
12320+#endif
12321+
12322 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
12323
12324 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
12325@@ -929,7 +956,7 @@ extern void default_banner(void);
12326
12327 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
12328 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
12329-#define PARA_INDIRECT(addr) *%cs:addr
12330+#define PARA_INDIRECT(addr) *%ss:addr
12331 #endif
12332
12333 #define INTERRUPT_RETURN \
12334@@ -1004,6 +1031,21 @@ extern void default_banner(void);
12335 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
12336 CLBR_NONE, \
12337 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
12338+
12339+#define GET_CR0_INTO_RDI \
12340+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
12341+ mov %rax,%rdi
12342+
12343+#define SET_RDI_INTO_CR0 \
12344+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12345+
12346+#define GET_CR3_INTO_RDI \
12347+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
12348+ mov %rax,%rdi
12349+
12350+#define SET_RDI_INTO_CR3 \
12351+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
12352+
12353 #endif /* CONFIG_X86_32 */
12354
12355 #endif /* __ASSEMBLY__ */
12356diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
12357index 142236e..57cf5ea 100644
12358--- a/arch/x86/include/asm/paravirt_types.h
12359+++ b/arch/x86/include/asm/paravirt_types.h
12360@@ -312,6 +312,7 @@ struct pv_mmu_ops {
12361 struct paravirt_callee_save make_pud;
12362
12363 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
12364+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
12365 #endif /* PAGETABLE_LEVELS == 4 */
12366 #endif /* PAGETABLE_LEVELS >= 3 */
12367
12368@@ -323,6 +324,12 @@ struct pv_mmu_ops {
12369 an mfn. We can tell which is which from the index. */
12370 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
12371 phys_addr_t phys, pgprot_t flags);
12372+
12373+#ifdef CONFIG_PAX_KERNEXEC
12374+ unsigned long (*pax_open_kernel)(void);
12375+ unsigned long (*pax_close_kernel)(void);
12376+#endif
12377+
12378 };
12379
12380 struct arch_spinlock;
12381diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
12382index b4389a4..7024269 100644
12383--- a/arch/x86/include/asm/pgalloc.h
12384+++ b/arch/x86/include/asm/pgalloc.h
12385@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
12386 pmd_t *pmd, pte_t *pte)
12387 {
12388 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12389+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
12390+}
12391+
12392+static inline void pmd_populate_user(struct mm_struct *mm,
12393+ pmd_t *pmd, pte_t *pte)
12394+{
12395+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12396 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
12397 }
12398
12399@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
12400
12401 #ifdef CONFIG_X86_PAE
12402 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
12403+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
12404+{
12405+ pud_populate(mm, pudp, pmd);
12406+}
12407 #else /* !CONFIG_X86_PAE */
12408 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
12409 {
12410 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
12411 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
12412 }
12413+
12414+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
12415+{
12416+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
12417+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
12418+}
12419 #endif /* CONFIG_X86_PAE */
12420
12421 #if PAGETABLE_LEVELS > 3
12422@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
12423 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
12424 }
12425
12426+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
12427+{
12428+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
12429+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
12430+}
12431+
12432 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
12433 {
12434 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
12435diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
12436index f2b489c..4f7e2e5 100644
12437--- a/arch/x86/include/asm/pgtable-2level.h
12438+++ b/arch/x86/include/asm/pgtable-2level.h
12439@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
12440
12441 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12442 {
12443+ pax_open_kernel();
12444 *pmdp = pmd;
12445+ pax_close_kernel();
12446 }
12447
12448 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12449diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
12450index 4cc9f2b..5fd9226 100644
12451--- a/arch/x86/include/asm/pgtable-3level.h
12452+++ b/arch/x86/include/asm/pgtable-3level.h
12453@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12454
12455 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12456 {
12457+ pax_open_kernel();
12458 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
12459+ pax_close_kernel();
12460 }
12461
12462 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12463 {
12464+ pax_open_kernel();
12465 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
12466+ pax_close_kernel();
12467 }
12468
12469 /*
12470diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
12471index a1f780d..5f38ced4 100644
12472--- a/arch/x86/include/asm/pgtable.h
12473+++ b/arch/x86/include/asm/pgtable.h
12474@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
12475
12476 #ifndef __PAGETABLE_PUD_FOLDED
12477 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
12478+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
12479 #define pgd_clear(pgd) native_pgd_clear(pgd)
12480 #endif
12481
12482@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
12483
12484 #define arch_end_context_switch(prev) do {} while(0)
12485
12486+#define pax_open_kernel() native_pax_open_kernel()
12487+#define pax_close_kernel() native_pax_close_kernel()
12488 #endif /* CONFIG_PARAVIRT */
12489
12490+#define __HAVE_ARCH_PAX_OPEN_KERNEL
12491+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
12492+
12493+#ifdef CONFIG_PAX_KERNEXEC
12494+static inline unsigned long native_pax_open_kernel(void)
12495+{
12496+ unsigned long cr0;
12497+
12498+ preempt_disable();
12499+ barrier();
12500+ cr0 = read_cr0() ^ X86_CR0_WP;
12501+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
12502+ write_cr0(cr0);
12503+ return cr0 ^ X86_CR0_WP;
12504+}
12505+
12506+static inline unsigned long native_pax_close_kernel(void)
12507+{
12508+ unsigned long cr0;
12509+
12510+ cr0 = read_cr0() ^ X86_CR0_WP;
12511+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
12512+ write_cr0(cr0);
12513+ barrier();
12514+ preempt_enable_no_resched();
12515+ return cr0 ^ X86_CR0_WP;
12516+}
12517+#else
12518+static inline unsigned long native_pax_open_kernel(void) { return 0; }
12519+static inline unsigned long native_pax_close_kernel(void) { return 0; }
12520+#endif
12521+
12522 /*
12523 * The following only work if pte_present() is true.
12524 * Undefined behaviour if not..
12525 */
12526+static inline int pte_user(pte_t pte)
12527+{
12528+ return pte_val(pte) & _PAGE_USER;
12529+}
12530+
12531 static inline int pte_dirty(pte_t pte)
12532 {
12533 return pte_flags(pte) & _PAGE_DIRTY;
12534@@ -195,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
12535 return pte_clear_flags(pte, _PAGE_RW);
12536 }
12537
12538+static inline pte_t pte_mkread(pte_t pte)
12539+{
12540+ return __pte(pte_val(pte) | _PAGE_USER);
12541+}
12542+
12543 static inline pte_t pte_mkexec(pte_t pte)
12544 {
12545- return pte_clear_flags(pte, _PAGE_NX);
12546+#ifdef CONFIG_X86_PAE
12547+ if (__supported_pte_mask & _PAGE_NX)
12548+ return pte_clear_flags(pte, _PAGE_NX);
12549+ else
12550+#endif
12551+ return pte_set_flags(pte, _PAGE_USER);
12552+}
12553+
12554+static inline pte_t pte_exprotect(pte_t pte)
12555+{
12556+#ifdef CONFIG_X86_PAE
12557+ if (__supported_pte_mask & _PAGE_NX)
12558+ return pte_set_flags(pte, _PAGE_NX);
12559+ else
12560+#endif
12561+ return pte_clear_flags(pte, _PAGE_USER);
12562 }
12563
12564 static inline pte_t pte_mkdirty(pte_t pte)
12565@@ -389,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
12566 #endif
12567
12568 #ifndef __ASSEMBLY__
12569+
12570+#ifdef CONFIG_PAX_PER_CPU_PGD
12571+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
12572+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
12573+{
12574+ return cpu_pgd[cpu];
12575+}
12576+#endif
12577+
12578 #include <linux/mm_types.h>
12579
12580 static inline int pte_none(pte_t pte)
12581@@ -565,7 +634,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
12582
12583 static inline int pgd_bad(pgd_t pgd)
12584 {
12585- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
12586+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
12587 }
12588
12589 static inline int pgd_none(pgd_t pgd)
12590@@ -588,7 +657,12 @@ static inline int pgd_none(pgd_t pgd)
12591 * pgd_offset() returns a (pgd_t *)
12592 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
12593 */
12594-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
12595+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
12596+
12597+#ifdef CONFIG_PAX_PER_CPU_PGD
12598+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
12599+#endif
12600+
12601 /*
12602 * a shortcut which implies the use of the kernel's pgd, instead
12603 * of a process's
12604@@ -599,6 +673,20 @@ static inline int pgd_none(pgd_t pgd)
12605 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
12606 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
12607
12608+#ifdef CONFIG_X86_32
12609+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
12610+#else
12611+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
12612+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
12613+
12614+#ifdef CONFIG_PAX_MEMORY_UDEREF
12615+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
12616+#else
12617+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
12618+#endif
12619+
12620+#endif
12621+
12622 #ifndef __ASSEMBLY__
12623
12624 extern int direct_gbpages;
12625@@ -763,11 +851,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
12626 * dst and src can be on the same page, but the range must not overlap,
12627 * and must not cross a page boundary.
12628 */
12629-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
12630+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
12631 {
12632- memcpy(dst, src, count * sizeof(pgd_t));
12633+ pax_open_kernel();
12634+ while (count--)
12635+ *dst++ = *src++;
12636+ pax_close_kernel();
12637 }
12638
12639+#ifdef CONFIG_PAX_PER_CPU_PGD
12640+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
12641+#endif
12642+
12643+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12644+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
12645+#else
12646+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
12647+#endif
12648
12649 #include <asm-generic/pgtable.h>
12650 #endif /* __ASSEMBLY__ */
12651diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
12652index 8faa215..a8a17ea 100644
12653--- a/arch/x86/include/asm/pgtable_32.h
12654+++ b/arch/x86/include/asm/pgtable_32.h
12655@@ -25,9 +25,6 @@
12656 struct mm_struct;
12657 struct vm_area_struct;
12658
12659-extern pgd_t swapper_pg_dir[1024];
12660-extern pgd_t initial_page_table[1024];
12661-
12662 static inline void pgtable_cache_init(void) { }
12663 static inline void check_pgt_cache(void) { }
12664 void paging_init(void);
12665@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12666 # include <asm/pgtable-2level.h>
12667 #endif
12668
12669+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
12670+extern pgd_t initial_page_table[PTRS_PER_PGD];
12671+#ifdef CONFIG_X86_PAE
12672+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
12673+#endif
12674+
12675 #if defined(CONFIG_HIGHPTE)
12676 #define pte_offset_map(dir, address) \
12677 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
12678@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12679 /* Clear a kernel PTE and flush it from the TLB */
12680 #define kpte_clear_flush(ptep, vaddr) \
12681 do { \
12682+ pax_open_kernel(); \
12683 pte_clear(&init_mm, (vaddr), (ptep)); \
12684+ pax_close_kernel(); \
12685 __flush_tlb_one((vaddr)); \
12686 } while (0)
12687
12688@@ -75,6 +80,9 @@ do { \
12689
12690 #endif /* !__ASSEMBLY__ */
12691
12692+#define HAVE_ARCH_UNMAPPED_AREA
12693+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
12694+
12695 /*
12696 * kern_addr_valid() is (1) for FLATMEM and (0) for
12697 * SPARSEMEM and DISCONTIGMEM
12698diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
12699index ed5903b..c7fe163 100644
12700--- a/arch/x86/include/asm/pgtable_32_types.h
12701+++ b/arch/x86/include/asm/pgtable_32_types.h
12702@@ -8,7 +8,7 @@
12703 */
12704 #ifdef CONFIG_X86_PAE
12705 # include <asm/pgtable-3level_types.h>
12706-# define PMD_SIZE (1UL << PMD_SHIFT)
12707+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
12708 # define PMD_MASK (~(PMD_SIZE - 1))
12709 #else
12710 # include <asm/pgtable-2level_types.h>
12711@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
12712 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
12713 #endif
12714
12715+#ifdef CONFIG_PAX_KERNEXEC
12716+#ifndef __ASSEMBLY__
12717+extern unsigned char MODULES_EXEC_VADDR[];
12718+extern unsigned char MODULES_EXEC_END[];
12719+#endif
12720+#include <asm/boot.h>
12721+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
12722+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
12723+#else
12724+#define ktla_ktva(addr) (addr)
12725+#define ktva_ktla(addr) (addr)
12726+#endif
12727+
12728 #define MODULES_VADDR VMALLOC_START
12729 #define MODULES_END VMALLOC_END
12730 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
12731diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
12732index 47356f9..deb94a2 100644
12733--- a/arch/x86/include/asm/pgtable_64.h
12734+++ b/arch/x86/include/asm/pgtable_64.h
12735@@ -16,10 +16,14 @@
12736
12737 extern pud_t level3_kernel_pgt[512];
12738 extern pud_t level3_ident_pgt[512];
12739+extern pud_t level3_vmalloc_start_pgt[512];
12740+extern pud_t level3_vmalloc_end_pgt[512];
12741+extern pud_t level3_vmemmap_pgt[512];
12742+extern pud_t level2_vmemmap_pgt[512];
12743 extern pmd_t level2_kernel_pgt[512];
12744 extern pmd_t level2_fixmap_pgt[512];
12745-extern pmd_t level2_ident_pgt[512];
12746-extern pgd_t init_level4_pgt[];
12747+extern pmd_t level2_ident_pgt[512*2];
12748+extern pgd_t init_level4_pgt[512];
12749
12750 #define swapper_pg_dir init_level4_pgt
12751
12752@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12753
12754 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12755 {
12756+ pax_open_kernel();
12757 *pmdp = pmd;
12758+ pax_close_kernel();
12759 }
12760
12761 static inline void native_pmd_clear(pmd_t *pmd)
12762@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
12763
12764 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12765 {
12766+ pax_open_kernel();
12767 *pudp = pud;
12768+ pax_close_kernel();
12769 }
12770
12771 static inline void native_pud_clear(pud_t *pud)
12772@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
12773
12774 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
12775 {
12776+ pax_open_kernel();
12777+ *pgdp = pgd;
12778+ pax_close_kernel();
12779+}
12780+
12781+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12782+{
12783 *pgdp = pgd;
12784 }
12785
12786diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
12787index 766ea16..5b96cb3 100644
12788--- a/arch/x86/include/asm/pgtable_64_types.h
12789+++ b/arch/x86/include/asm/pgtable_64_types.h
12790@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
12791 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
12792 #define MODULES_END _AC(0xffffffffff000000, UL)
12793 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
12794+#define MODULES_EXEC_VADDR MODULES_VADDR
12795+#define MODULES_EXEC_END MODULES_END
12796+
12797+#define ktla_ktva(addr) (addr)
12798+#define ktva_ktla(addr) (addr)
12799
12800 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
12801diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
12802index ec8a1fc..7ccb593 100644
12803--- a/arch/x86/include/asm/pgtable_types.h
12804+++ b/arch/x86/include/asm/pgtable_types.h
12805@@ -16,13 +16,12 @@
12806 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
12807 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
12808 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
12809-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
12810+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
12811 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
12812 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
12813 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
12814-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
12815-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
12816-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
12817+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
12818+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
12819 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
12820
12821 /* If _PAGE_BIT_PRESENT is clear, we use these: */
12822@@ -40,7 +39,6 @@
12823 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
12824 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
12825 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
12826-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
12827 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
12828 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
12829 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
12830@@ -57,8 +55,10 @@
12831
12832 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
12833 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
12834-#else
12835+#elif defined(CONFIG_KMEMCHECK)
12836 #define _PAGE_NX (_AT(pteval_t, 0))
12837+#else
12838+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
12839 #endif
12840
12841 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
12842@@ -96,6 +96,9 @@
12843 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
12844 _PAGE_ACCESSED)
12845
12846+#define PAGE_READONLY_NOEXEC PAGE_READONLY
12847+#define PAGE_SHARED_NOEXEC PAGE_SHARED
12848+
12849 #define __PAGE_KERNEL_EXEC \
12850 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
12851 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
12852@@ -106,7 +109,7 @@
12853 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
12854 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
12855 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
12856-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
12857+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
12858 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
12859 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
12860 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
12861@@ -168,8 +171,8 @@
12862 * bits are combined, this will alow user to access the high address mapped
12863 * VDSO in the presence of CONFIG_COMPAT_VDSO
12864 */
12865-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
12866-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
12867+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12868+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12869 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
12870 #endif
12871
12872@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
12873 {
12874 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
12875 }
12876+#endif
12877
12878+#if PAGETABLE_LEVELS == 3
12879+#include <asm-generic/pgtable-nopud.h>
12880+#endif
12881+
12882+#if PAGETABLE_LEVELS == 2
12883+#include <asm-generic/pgtable-nopmd.h>
12884+#endif
12885+
12886+#ifndef __ASSEMBLY__
12887 #if PAGETABLE_LEVELS > 3
12888 typedef struct { pudval_t pud; } pud_t;
12889
12890@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
12891 return pud.pud;
12892 }
12893 #else
12894-#include <asm-generic/pgtable-nopud.h>
12895-
12896 static inline pudval_t native_pud_val(pud_t pud)
12897 {
12898 return native_pgd_val(pud.pgd);
12899@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
12900 return pmd.pmd;
12901 }
12902 #else
12903-#include <asm-generic/pgtable-nopmd.h>
12904-
12905 static inline pmdval_t native_pmd_val(pmd_t pmd)
12906 {
12907 return native_pgd_val(pmd.pud.pgd);
12908@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
12909
12910 extern pteval_t __supported_pte_mask;
12911 extern void set_nx(void);
12912-extern int nx_enabled;
12913
12914 #define pgprot_writecombine pgprot_writecombine
12915 extern pgprot_t pgprot_writecombine(pgprot_t prot);
12916diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
12917index ad1fc85..0b15fe1 100644
12918--- a/arch/x86/include/asm/processor.h
12919+++ b/arch/x86/include/asm/processor.h
12920@@ -289,7 +289,7 @@ struct tss_struct {
12921
12922 } ____cacheline_aligned;
12923
12924-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
12925+extern struct tss_struct init_tss[NR_CPUS];
12926
12927 /*
12928 * Save the original ist values for checking stack pointers during debugging
12929@@ -818,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
12930 */
12931 #define TASK_SIZE PAGE_OFFSET
12932 #define TASK_SIZE_MAX TASK_SIZE
12933+
12934+#ifdef CONFIG_PAX_SEGMEXEC
12935+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
12936+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
12937+#else
12938 #define STACK_TOP TASK_SIZE
12939-#define STACK_TOP_MAX STACK_TOP
12940+#endif
12941+
12942+#define STACK_TOP_MAX TASK_SIZE
12943
12944 #define INIT_THREAD { \
12945- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12946+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12947 .vm86_info = NULL, \
12948 .sysenter_cs = __KERNEL_CS, \
12949 .io_bitmap_ptr = NULL, \
12950@@ -836,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
12951 */
12952 #define INIT_TSS { \
12953 .x86_tss = { \
12954- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12955+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12956 .ss0 = __KERNEL_DS, \
12957 .ss1 = __KERNEL_CS, \
12958 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
12959@@ -847,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
12960 extern unsigned long thread_saved_pc(struct task_struct *tsk);
12961
12962 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
12963-#define KSTK_TOP(info) \
12964-({ \
12965- unsigned long *__ptr = (unsigned long *)(info); \
12966- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
12967-})
12968+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
12969
12970 /*
12971 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
12972@@ -866,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12973 #define task_pt_regs(task) \
12974 ({ \
12975 struct pt_regs *__regs__; \
12976- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
12977+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
12978 __regs__ - 1; \
12979 })
12980
12981@@ -876,13 +879,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12982 /*
12983 * User space process size. 47bits minus one guard page.
12984 */
12985-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
12986+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
12987
12988 /* This decides where the kernel will search for a free chunk of vm
12989 * space during mmap's.
12990 */
12991 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
12992- 0xc0000000 : 0xFFFFe000)
12993+ 0xc0000000 : 0xFFFFf000)
12994
12995 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
12996 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
12997@@ -893,11 +896,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12998 #define STACK_TOP_MAX TASK_SIZE_MAX
12999
13000 #define INIT_THREAD { \
13001- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
13002+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
13003 }
13004
13005 #define INIT_TSS { \
13006- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
13007+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
13008 }
13009
13010 /*
13011@@ -925,6 +928,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
13012 */
13013 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
13014
13015+#ifdef CONFIG_PAX_SEGMEXEC
13016+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
13017+#endif
13018+
13019 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
13020
13021 /* Get/set a process' ability to use the timestamp counter instruction */
13022@@ -985,12 +992,12 @@ extern bool cpu_has_amd_erratum(const int *);
13023 #define cpu_has_amd_erratum(x) (false)
13024 #endif /* CONFIG_CPU_SUP_AMD */
13025
13026-extern unsigned long arch_align_stack(unsigned long sp);
13027+#define arch_align_stack(x) ((x) & ~0xfUL)
13028 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
13029
13030 void default_idle(void);
13031 bool set_pm_idle_to_default(void);
13032
13033-void stop_this_cpu(void *dummy);
13034+void stop_this_cpu(void *dummy) __noreturn;
13035
13036 #endif /* _ASM_X86_PROCESSOR_H */
13037diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
13038index 19f16eb..b50624b 100644
13039--- a/arch/x86/include/asm/ptrace.h
13040+++ b/arch/x86/include/asm/ptrace.h
13041@@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
13042 }
13043
13044 /*
13045- * user_mode_vm(regs) determines whether a register set came from user mode.
13046+ * user_mode(regs) determines whether a register set came from user mode.
13047 * This is true if V8086 mode was enabled OR if the register set was from
13048 * protected mode with RPL-3 CS value. This tricky test checks that with
13049 * one comparison. Many places in the kernel can bypass this full check
13050- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
13051+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
13052+ * be used.
13053 */
13054-static inline int user_mode(struct pt_regs *regs)
13055+static inline int user_mode_novm(struct pt_regs *regs)
13056 {
13057 #ifdef CONFIG_X86_32
13058 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
13059 #else
13060- return !!(regs->cs & 3);
13061+ return !!(regs->cs & SEGMENT_RPL_MASK);
13062 #endif
13063 }
13064
13065-static inline int user_mode_vm(struct pt_regs *regs)
13066+static inline int user_mode(struct pt_regs *regs)
13067 {
13068 #ifdef CONFIG_X86_32
13069 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
13070 USER_RPL;
13071 #else
13072- return user_mode(regs);
13073+ return user_mode_novm(regs);
13074 #endif
13075 }
13076
13077@@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
13078 #ifdef CONFIG_X86_64
13079 static inline bool user_64bit_mode(struct pt_regs *regs)
13080 {
13081+ unsigned long cs = regs->cs & 0xffff;
13082 #ifndef CONFIG_PARAVIRT
13083 /*
13084 * On non-paravirt systems, this is the only long mode CPL 3
13085 * selector. We do not allow long mode selectors in the LDT.
13086 */
13087- return regs->cs == __USER_CS;
13088+ return cs == __USER_CS;
13089 #else
13090 /* Headers are too twisted for this to go in paravirt.h. */
13091- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
13092+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
13093 #endif
13094 }
13095 #endif
13096diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
13097index fe1ec5b..dc5c3fe 100644
13098--- a/arch/x86/include/asm/realmode.h
13099+++ b/arch/x86/include/asm/realmode.h
13100@@ -22,16 +22,14 @@ struct real_mode_header {
13101 #endif
13102 /* APM/BIOS reboot */
13103 u32 machine_real_restart_asm;
13104-#ifdef CONFIG_X86_64
13105 u32 machine_real_restart_seg;
13106-#endif
13107 };
13108
13109 /* This must match data at trampoline_32/64.S */
13110 struct trampoline_header {
13111 #ifdef CONFIG_X86_32
13112 u32 start;
13113- u16 gdt_pad;
13114+ u16 boot_cs;
13115 u16 gdt_limit;
13116 u32 gdt_base;
13117 #else
13118diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
13119index a82c4f1..f9c9696 100644
13120--- a/arch/x86/include/asm/reboot.h
13121+++ b/arch/x86/include/asm/reboot.h
13122@@ -6,12 +6,12 @@
13123 struct pt_regs;
13124
13125 struct machine_ops {
13126- void (*restart)(char *cmd);
13127- void (*halt)(void);
13128- void (*power_off)(void);
13129+ void (* __noreturn restart)(char *cmd);
13130+ void (* __noreturn halt)(void);
13131+ void (* __noreturn power_off)(void);
13132 void (*shutdown)(void);
13133 void (*crash_shutdown)(struct pt_regs *);
13134- void (*emergency_restart)(void);
13135+ void (* __noreturn emergency_restart)(void);
13136 };
13137
13138 extern struct machine_ops machine_ops;
13139diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
13140index 2dbe4a7..ce1db00 100644
13141--- a/arch/x86/include/asm/rwsem.h
13142+++ b/arch/x86/include/asm/rwsem.h
13143@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
13144 {
13145 asm volatile("# beginning down_read\n\t"
13146 LOCK_PREFIX _ASM_INC "(%1)\n\t"
13147+
13148+#ifdef CONFIG_PAX_REFCOUNT
13149+ "jno 0f\n"
13150+ LOCK_PREFIX _ASM_DEC "(%1)\n"
13151+ "int $4\n0:\n"
13152+ _ASM_EXTABLE(0b, 0b)
13153+#endif
13154+
13155 /* adds 0x00000001 */
13156 " jns 1f\n"
13157 " call call_rwsem_down_read_failed\n"
13158@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
13159 "1:\n\t"
13160 " mov %1,%2\n\t"
13161 " add %3,%2\n\t"
13162+
13163+#ifdef CONFIG_PAX_REFCOUNT
13164+ "jno 0f\n"
13165+ "sub %3,%2\n"
13166+ "int $4\n0:\n"
13167+ _ASM_EXTABLE(0b, 0b)
13168+#endif
13169+
13170 " jle 2f\n\t"
13171 LOCK_PREFIX " cmpxchg %2,%0\n\t"
13172 " jnz 1b\n\t"
13173@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
13174 long tmp;
13175 asm volatile("# beginning down_write\n\t"
13176 LOCK_PREFIX " xadd %1,(%2)\n\t"
13177+
13178+#ifdef CONFIG_PAX_REFCOUNT
13179+ "jno 0f\n"
13180+ "mov %1,(%2)\n"
13181+ "int $4\n0:\n"
13182+ _ASM_EXTABLE(0b, 0b)
13183+#endif
13184+
13185 /* adds 0xffff0001, returns the old value */
13186 " test %1,%1\n\t"
13187 /* was the count 0 before? */
13188@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
13189 long tmp;
13190 asm volatile("# beginning __up_read\n\t"
13191 LOCK_PREFIX " xadd %1,(%2)\n\t"
13192+
13193+#ifdef CONFIG_PAX_REFCOUNT
13194+ "jno 0f\n"
13195+ "mov %1,(%2)\n"
13196+ "int $4\n0:\n"
13197+ _ASM_EXTABLE(0b, 0b)
13198+#endif
13199+
13200 /* subtracts 1, returns the old value */
13201 " jns 1f\n\t"
13202 " call call_rwsem_wake\n" /* expects old value in %edx */
13203@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
13204 long tmp;
13205 asm volatile("# beginning __up_write\n\t"
13206 LOCK_PREFIX " xadd %1,(%2)\n\t"
13207+
13208+#ifdef CONFIG_PAX_REFCOUNT
13209+ "jno 0f\n"
13210+ "mov %1,(%2)\n"
13211+ "int $4\n0:\n"
13212+ _ASM_EXTABLE(0b, 0b)
13213+#endif
13214+
13215 /* subtracts 0xffff0001, returns the old value */
13216 " jns 1f\n\t"
13217 " call call_rwsem_wake\n" /* expects old value in %edx */
13218@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
13219 {
13220 asm volatile("# beginning __downgrade_write\n\t"
13221 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
13222+
13223+#ifdef CONFIG_PAX_REFCOUNT
13224+ "jno 0f\n"
13225+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
13226+ "int $4\n0:\n"
13227+ _ASM_EXTABLE(0b, 0b)
13228+#endif
13229+
13230 /*
13231 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
13232 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
13233@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
13234 */
13235 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
13236 {
13237- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
13238+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
13239+
13240+#ifdef CONFIG_PAX_REFCOUNT
13241+ "jno 0f\n"
13242+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
13243+ "int $4\n0:\n"
13244+ _ASM_EXTABLE(0b, 0b)
13245+#endif
13246+
13247 : "+m" (sem->count)
13248 : "er" (delta));
13249 }
13250@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
13251 */
13252 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
13253 {
13254- return delta + xadd(&sem->count, delta);
13255+ return delta + xadd_check_overflow(&sem->count, delta);
13256 }
13257
13258 #endif /* __KERNEL__ */
13259diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
13260index c48a950..c6d7468 100644
13261--- a/arch/x86/include/asm/segment.h
13262+++ b/arch/x86/include/asm/segment.h
13263@@ -64,10 +64,15 @@
13264 * 26 - ESPFIX small SS
13265 * 27 - per-cpu [ offset to per-cpu data area ]
13266 * 28 - stack_canary-20 [ for stack protector ]
13267- * 29 - unused
13268- * 30 - unused
13269+ * 29 - PCI BIOS CS
13270+ * 30 - PCI BIOS DS
13271 * 31 - TSS for double fault handler
13272 */
13273+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
13274+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
13275+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
13276+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
13277+
13278 #define GDT_ENTRY_TLS_MIN 6
13279 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
13280
13281@@ -79,6 +84,8 @@
13282
13283 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
13284
13285+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
13286+
13287 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
13288
13289 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
13290@@ -104,6 +111,12 @@
13291 #define __KERNEL_STACK_CANARY 0
13292 #endif
13293
13294+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
13295+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
13296+
13297+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
13298+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
13299+
13300 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
13301
13302 /*
13303@@ -141,7 +154,7 @@
13304 */
13305
13306 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
13307-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
13308+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
13309
13310
13311 #else
13312@@ -165,6 +178,8 @@
13313 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
13314 #define __USER32_DS __USER_DS
13315
13316+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
13317+
13318 #define GDT_ENTRY_TSS 8 /* needs two entries */
13319 #define GDT_ENTRY_LDT 10 /* needs two entries */
13320 #define GDT_ENTRY_TLS_MIN 12
13321@@ -185,6 +200,7 @@
13322 #endif
13323
13324 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
13325+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
13326 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
13327 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
13328 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
13329@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
13330 {
13331 unsigned long __limit;
13332 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
13333- return __limit + 1;
13334+ return __limit;
13335 }
13336
13337 #endif /* !__ASSEMBLY__ */
13338diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
13339index 4f19a15..e04d86f 100644
13340--- a/arch/x86/include/asm/smp.h
13341+++ b/arch/x86/include/asm/smp.h
13342@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
13343 /* cpus sharing the last level cache: */
13344 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
13345 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
13346-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
13347+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
13348
13349 static inline struct cpumask *cpu_sibling_mask(int cpu)
13350 {
13351@@ -190,14 +190,8 @@ extern unsigned disabled_cpus __cpuinitdata;
13352 extern int safe_smp_processor_id(void);
13353
13354 #elif defined(CONFIG_X86_64_SMP)
13355-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
13356-
13357-#define stack_smp_processor_id() \
13358-({ \
13359- struct thread_info *ti; \
13360- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
13361- ti->cpu; \
13362-})
13363+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
13364+#define stack_smp_processor_id() raw_smp_processor_id()
13365 #define safe_smp_processor_id() smp_processor_id()
13366
13367 #endif
13368diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
13369index 33692ea..350a534 100644
13370--- a/arch/x86/include/asm/spinlock.h
13371+++ b/arch/x86/include/asm/spinlock.h
13372@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
13373 static inline void arch_read_lock(arch_rwlock_t *rw)
13374 {
13375 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
13376+
13377+#ifdef CONFIG_PAX_REFCOUNT
13378+ "jno 0f\n"
13379+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
13380+ "int $4\n0:\n"
13381+ _ASM_EXTABLE(0b, 0b)
13382+#endif
13383+
13384 "jns 1f\n"
13385 "call __read_lock_failed\n\t"
13386 "1:\n"
13387@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
13388 static inline void arch_write_lock(arch_rwlock_t *rw)
13389 {
13390 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
13391+
13392+#ifdef CONFIG_PAX_REFCOUNT
13393+ "jno 0f\n"
13394+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
13395+ "int $4\n0:\n"
13396+ _ASM_EXTABLE(0b, 0b)
13397+#endif
13398+
13399 "jz 1f\n"
13400 "call __write_lock_failed\n\t"
13401 "1:\n"
13402@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
13403
13404 static inline void arch_read_unlock(arch_rwlock_t *rw)
13405 {
13406- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
13407+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
13408+
13409+#ifdef CONFIG_PAX_REFCOUNT
13410+ "jno 0f\n"
13411+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
13412+ "int $4\n0:\n"
13413+ _ASM_EXTABLE(0b, 0b)
13414+#endif
13415+
13416 :"+m" (rw->lock) : : "memory");
13417 }
13418
13419 static inline void arch_write_unlock(arch_rwlock_t *rw)
13420 {
13421- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
13422+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
13423+
13424+#ifdef CONFIG_PAX_REFCOUNT
13425+ "jno 0f\n"
13426+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
13427+ "int $4\n0:\n"
13428+ _ASM_EXTABLE(0b, 0b)
13429+#endif
13430+
13431 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
13432 }
13433
13434diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
13435index 6a99859..03cb807 100644
13436--- a/arch/x86/include/asm/stackprotector.h
13437+++ b/arch/x86/include/asm/stackprotector.h
13438@@ -47,7 +47,7 @@
13439 * head_32 for boot CPU and setup_per_cpu_areas() for others.
13440 */
13441 #define GDT_STACK_CANARY_INIT \
13442- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
13443+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
13444
13445 /*
13446 * Initialize the stackprotector canary value.
13447@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
13448
13449 static inline void load_stack_canary_segment(void)
13450 {
13451-#ifdef CONFIG_X86_32
13452+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
13453 asm volatile ("mov %0, %%gs" : : "r" (0));
13454 #endif
13455 }
13456diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
13457index 70bbe39..4ae2bd4 100644
13458--- a/arch/x86/include/asm/stacktrace.h
13459+++ b/arch/x86/include/asm/stacktrace.h
13460@@ -11,28 +11,20 @@
13461
13462 extern int kstack_depth_to_print;
13463
13464-struct thread_info;
13465+struct task_struct;
13466 struct stacktrace_ops;
13467
13468-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
13469- unsigned long *stack,
13470- unsigned long bp,
13471- const struct stacktrace_ops *ops,
13472- void *data,
13473- unsigned long *end,
13474- int *graph);
13475+typedef unsigned long walk_stack_t(struct task_struct *task,
13476+ void *stack_start,
13477+ unsigned long *stack,
13478+ unsigned long bp,
13479+ const struct stacktrace_ops *ops,
13480+ void *data,
13481+ unsigned long *end,
13482+ int *graph);
13483
13484-extern unsigned long
13485-print_context_stack(struct thread_info *tinfo,
13486- unsigned long *stack, unsigned long bp,
13487- const struct stacktrace_ops *ops, void *data,
13488- unsigned long *end, int *graph);
13489-
13490-extern unsigned long
13491-print_context_stack_bp(struct thread_info *tinfo,
13492- unsigned long *stack, unsigned long bp,
13493- const struct stacktrace_ops *ops, void *data,
13494- unsigned long *end, int *graph);
13495+extern walk_stack_t print_context_stack;
13496+extern walk_stack_t print_context_stack_bp;
13497
13498 /* Generic stack tracer with callbacks */
13499
13500@@ -40,7 +32,7 @@ struct stacktrace_ops {
13501 void (*address)(void *data, unsigned long address, int reliable);
13502 /* On negative return stop dumping */
13503 int (*stack)(void *data, char *name);
13504- walk_stack_t walk_stack;
13505+ walk_stack_t *walk_stack;
13506 };
13507
13508 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
13509diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
13510index 4ec45b3..a4f0a8a 100644
13511--- a/arch/x86/include/asm/switch_to.h
13512+++ b/arch/x86/include/asm/switch_to.h
13513@@ -108,7 +108,7 @@ do { \
13514 "call __switch_to\n\t" \
13515 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
13516 __switch_canary \
13517- "movq %P[thread_info](%%rsi),%%r8\n\t" \
13518+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
13519 "movq %%rax,%%rdi\n\t" \
13520 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
13521 "jnz ret_from_fork\n\t" \
13522@@ -119,7 +119,7 @@ do { \
13523 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
13524 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
13525 [_tif_fork] "i" (_TIF_FORK), \
13526- [thread_info] "i" (offsetof(struct task_struct, stack)), \
13527+ [thread_info] "m" (current_tinfo), \
13528 [current_task] "m" (current_task) \
13529 __switch_canary_iparam \
13530 : "memory", "cc" __EXTRA_CLOBBER)
13531diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
13532index 2d946e6..e453ec4 100644
13533--- a/arch/x86/include/asm/thread_info.h
13534+++ b/arch/x86/include/asm/thread_info.h
13535@@ -10,6 +10,7 @@
13536 #include <linux/compiler.h>
13537 #include <asm/page.h>
13538 #include <asm/types.h>
13539+#include <asm/percpu.h>
13540
13541 /*
13542 * low level task data that entry.S needs immediate access to
13543@@ -24,7 +25,6 @@ struct exec_domain;
13544 #include <linux/atomic.h>
13545
13546 struct thread_info {
13547- struct task_struct *task; /* main task structure */
13548 struct exec_domain *exec_domain; /* execution domain */
13549 __u32 flags; /* low level flags */
13550 __u32 status; /* thread synchronous flags */
13551@@ -34,19 +34,13 @@ struct thread_info {
13552 mm_segment_t addr_limit;
13553 struct restart_block restart_block;
13554 void __user *sysenter_return;
13555-#ifdef CONFIG_X86_32
13556- unsigned long previous_esp; /* ESP of the previous stack in
13557- case of nested (IRQ) stacks
13558- */
13559- __u8 supervisor_stack[0];
13560-#endif
13561+ unsigned long lowest_stack;
13562 unsigned int sig_on_uaccess_error:1;
13563 unsigned int uaccess_err:1; /* uaccess failed */
13564 };
13565
13566-#define INIT_THREAD_INFO(tsk) \
13567+#define INIT_THREAD_INFO \
13568 { \
13569- .task = &tsk, \
13570 .exec_domain = &default_exec_domain, \
13571 .flags = 0, \
13572 .cpu = 0, \
13573@@ -57,7 +51,7 @@ struct thread_info {
13574 }, \
13575 }
13576
13577-#define init_thread_info (init_thread_union.thread_info)
13578+#define init_thread_info (init_thread_union.stack)
13579 #define init_stack (init_thread_union.stack)
13580
13581 #else /* !__ASSEMBLY__ */
13582@@ -98,6 +92,7 @@ struct thread_info {
13583 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
13584 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
13585 #define TIF_X32 30 /* 32-bit native x86-64 binary */
13586+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
13587
13588 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
13589 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
13590@@ -122,17 +117,18 @@ struct thread_info {
13591 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
13592 #define _TIF_ADDR32 (1 << TIF_ADDR32)
13593 #define _TIF_X32 (1 << TIF_X32)
13594+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
13595
13596 /* work to do in syscall_trace_enter() */
13597 #define _TIF_WORK_SYSCALL_ENTRY \
13598 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
13599 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
13600- _TIF_NOHZ)
13601+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
13602
13603 /* work to do in syscall_trace_leave() */
13604 #define _TIF_WORK_SYSCALL_EXIT \
13605 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
13606- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
13607+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
13608
13609 /* work to do on interrupt/exception return */
13610 #define _TIF_WORK_MASK \
13611@@ -143,7 +139,7 @@ struct thread_info {
13612 /* work to do on any return to user space */
13613 #define _TIF_ALLWORK_MASK \
13614 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
13615- _TIF_NOHZ)
13616+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
13617
13618 /* Only used for 64 bit */
13619 #define _TIF_DO_NOTIFY_MASK \
13620@@ -159,45 +155,40 @@ struct thread_info {
13621
13622 #define PREEMPT_ACTIVE 0x10000000
13623
13624-#ifdef CONFIG_X86_32
13625-
13626-#define STACK_WARN (THREAD_SIZE/8)
13627-/*
13628- * macros/functions for gaining access to the thread information structure
13629- *
13630- * preempt_count needs to be 1 initially, until the scheduler is functional.
13631- */
13632-#ifndef __ASSEMBLY__
13633-
13634-
13635-/* how to get the current stack pointer from C */
13636-register unsigned long current_stack_pointer asm("esp") __used;
13637-
13638-/* how to get the thread information struct from C */
13639-static inline struct thread_info *current_thread_info(void)
13640-{
13641- return (struct thread_info *)
13642- (current_stack_pointer & ~(THREAD_SIZE - 1));
13643-}
13644-
13645-#else /* !__ASSEMBLY__ */
13646-
13647+#ifdef __ASSEMBLY__
13648 /* how to get the thread information struct from ASM */
13649 #define GET_THREAD_INFO(reg) \
13650- movl $-THREAD_SIZE, reg; \
13651- andl %esp, reg
13652+ mov PER_CPU_VAR(current_tinfo), reg
13653
13654 /* use this one if reg already contains %esp */
13655-#define GET_THREAD_INFO_WITH_ESP(reg) \
13656- andl $-THREAD_SIZE, reg
13657+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
13658+#else
13659+/* how to get the thread information struct from C */
13660+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
13661+
13662+static __always_inline struct thread_info *current_thread_info(void)
13663+{
13664+ return this_cpu_read_stable(current_tinfo);
13665+}
13666+#endif
13667+
13668+#ifdef CONFIG_X86_32
13669+
13670+#define STACK_WARN (THREAD_SIZE/8)
13671+/*
13672+ * macros/functions for gaining access to the thread information structure
13673+ *
13674+ * preempt_count needs to be 1 initially, until the scheduler is functional.
13675+ */
13676+#ifndef __ASSEMBLY__
13677+
13678+/* how to get the current stack pointer from C */
13679+register unsigned long current_stack_pointer asm("esp") __used;
13680
13681 #endif
13682
13683 #else /* X86_32 */
13684
13685-#include <asm/percpu.h>
13686-#define KERNEL_STACK_OFFSET (5*8)
13687-
13688 /*
13689 * macros/functions for gaining access to the thread information structure
13690 * preempt_count needs to be 1 initially, until the scheduler is functional.
13691@@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
13692 #ifndef __ASSEMBLY__
13693 DECLARE_PER_CPU(unsigned long, kernel_stack);
13694
13695-static inline struct thread_info *current_thread_info(void)
13696-{
13697- struct thread_info *ti;
13698- ti = (void *)(this_cpu_read_stable(kernel_stack) +
13699- KERNEL_STACK_OFFSET - THREAD_SIZE);
13700- return ti;
13701-}
13702-
13703-#else /* !__ASSEMBLY__ */
13704-
13705-/* how to get the thread information struct from ASM */
13706-#define GET_THREAD_INFO(reg) \
13707- movq PER_CPU_VAR(kernel_stack),reg ; \
13708- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
13709-
13710-/*
13711- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
13712- * a certain register (to be used in assembler memory operands).
13713- */
13714-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
13715-
13716+/* how to get the current stack pointer from C */
13717+register unsigned long current_stack_pointer asm("rsp") __used;
13718 #endif
13719
13720 #endif /* !X86_32 */
13721@@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
13722 extern void arch_task_cache_init(void);
13723 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
13724 extern void arch_release_task_struct(struct task_struct *tsk);
13725+
13726+#define __HAVE_THREAD_FUNCTIONS
13727+#define task_thread_info(task) (&(task)->tinfo)
13728+#define task_stack_page(task) ((task)->stack)
13729+#define setup_thread_stack(p, org) do {} while (0)
13730+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
13731+
13732 #endif
13733 #endif /* _ASM_X86_THREAD_INFO_H */
13734diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
13735index 7ccf8d1..7cdca12 100644
13736--- a/arch/x86/include/asm/uaccess.h
13737+++ b/arch/x86/include/asm/uaccess.h
13738@@ -7,6 +7,7 @@
13739 #include <linux/compiler.h>
13740 #include <linux/thread_info.h>
13741 #include <linux/string.h>
13742+#include <linux/sched.h>
13743 #include <asm/asm.h>
13744 #include <asm/page.h>
13745 #include <asm/smap.h>
13746@@ -14,6 +15,8 @@
13747 #define VERIFY_READ 0
13748 #define VERIFY_WRITE 1
13749
13750+extern void check_object_size(const void *ptr, unsigned long n, bool to);
13751+
13752 /*
13753 * The fs value determines whether argument validity checking should be
13754 * performed or not. If get_fs() == USER_DS, checking is performed, with
13755@@ -29,7 +32,12 @@
13756
13757 #define get_ds() (KERNEL_DS)
13758 #define get_fs() (current_thread_info()->addr_limit)
13759+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13760+void __set_fs(mm_segment_t x);
13761+void set_fs(mm_segment_t x);
13762+#else
13763 #define set_fs(x) (current_thread_info()->addr_limit = (x))
13764+#endif
13765
13766 #define segment_eq(a, b) ((a).seg == (b).seg)
13767
13768@@ -77,8 +85,33 @@
13769 * checks that the pointer is in the user space range - after calling
13770 * this function, memory access functions may still return -EFAULT.
13771 */
13772-#define access_ok(type, addr, size) \
13773- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
13774+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
13775+#define access_ok(type, addr, size) \
13776+({ \
13777+ long __size = size; \
13778+ unsigned long __addr = (unsigned long)addr; \
13779+ unsigned long __addr_ao = __addr & PAGE_MASK; \
13780+ unsigned long __end_ao = __addr + __size - 1; \
13781+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
13782+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
13783+ while(__addr_ao <= __end_ao) { \
13784+ char __c_ao; \
13785+ __addr_ao += PAGE_SIZE; \
13786+ if (__size > PAGE_SIZE) \
13787+ cond_resched(); \
13788+ if (__get_user(__c_ao, (char __user *)__addr)) \
13789+ break; \
13790+ if (type != VERIFY_WRITE) { \
13791+ __addr = __addr_ao; \
13792+ continue; \
13793+ } \
13794+ if (__put_user(__c_ao, (char __user *)__addr)) \
13795+ break; \
13796+ __addr = __addr_ao; \
13797+ } \
13798+ } \
13799+ __ret_ao; \
13800+})
13801
13802 /*
13803 * The exception table consists of pairs of addresses relative to the
13804@@ -189,13 +222,21 @@ extern int __get_user_bad(void);
13805 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
13806 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
13807
13808-
13809+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13810+#define __copyuser_seg "gs;"
13811+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
13812+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
13813+#else
13814+#define __copyuser_seg
13815+#define __COPYUSER_SET_ES
13816+#define __COPYUSER_RESTORE_ES
13817+#endif
13818
13819 #ifdef CONFIG_X86_32
13820 #define __put_user_asm_u64(x, addr, err, errret) \
13821 asm volatile(ASM_STAC "\n" \
13822- "1: movl %%eax,0(%2)\n" \
13823- "2: movl %%edx,4(%2)\n" \
13824+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
13825+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
13826 "3: " ASM_CLAC "\n" \
13827 ".section .fixup,\"ax\"\n" \
13828 "4: movl %3,%0\n" \
13829@@ -208,8 +249,8 @@ extern int __get_user_bad(void);
13830
13831 #define __put_user_asm_ex_u64(x, addr) \
13832 asm volatile(ASM_STAC "\n" \
13833- "1: movl %%eax,0(%1)\n" \
13834- "2: movl %%edx,4(%1)\n" \
13835+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
13836+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
13837 "3: " ASM_CLAC "\n" \
13838 _ASM_EXTABLE_EX(1b, 2b) \
13839 _ASM_EXTABLE_EX(2b, 3b) \
13840@@ -261,7 +302,7 @@ extern void __put_user_8(void);
13841 __typeof__(*(ptr)) __pu_val; \
13842 __chk_user_ptr(ptr); \
13843 might_fault(); \
13844- __pu_val = x; \
13845+ __pu_val = (x); \
13846 switch (sizeof(*(ptr))) { \
13847 case 1: \
13848 __put_user_x(1, __pu_val, ptr, __ret_pu); \
13849@@ -383,7 +424,7 @@ do { \
13850
13851 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13852 asm volatile(ASM_STAC "\n" \
13853- "1: mov"itype" %2,%"rtype"1\n" \
13854+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
13855 "2: " ASM_CLAC "\n" \
13856 ".section .fixup,\"ax\"\n" \
13857 "3: mov %3,%0\n" \
13858@@ -391,7 +432,7 @@ do { \
13859 " jmp 2b\n" \
13860 ".previous\n" \
13861 _ASM_EXTABLE(1b, 3b) \
13862- : "=r" (err), ltype(x) \
13863+ : "=r" (err), ltype (x) \
13864 : "m" (__m(addr)), "i" (errret), "0" (err))
13865
13866 #define __get_user_size_ex(x, ptr, size) \
13867@@ -416,7 +457,7 @@ do { \
13868 } while (0)
13869
13870 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
13871- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
13872+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
13873 "2:\n" \
13874 _ASM_EXTABLE_EX(1b, 2b) \
13875 : ltype(x) : "m" (__m(addr)))
13876@@ -433,13 +474,24 @@ do { \
13877 int __gu_err; \
13878 unsigned long __gu_val; \
13879 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
13880- (x) = (__force __typeof__(*(ptr)))__gu_val; \
13881+ (x) = (__typeof__(*(ptr)))__gu_val; \
13882 __gu_err; \
13883 })
13884
13885 /* FIXME: this hack is definitely wrong -AK */
13886 struct __large_struct { unsigned long buf[100]; };
13887-#define __m(x) (*(struct __large_struct __user *)(x))
13888+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13889+#define ____m(x) \
13890+({ \
13891+ unsigned long ____x = (unsigned long)(x); \
13892+ if (____x < PAX_USER_SHADOW_BASE) \
13893+ ____x += PAX_USER_SHADOW_BASE; \
13894+ (void __user *)____x; \
13895+})
13896+#else
13897+#define ____m(x) (x)
13898+#endif
13899+#define __m(x) (*(struct __large_struct __user *)____m(x))
13900
13901 /*
13902 * Tell gcc we read from memory instead of writing: this is because
13903@@ -448,7 +500,7 @@ struct __large_struct { unsigned long buf[100]; };
13904 */
13905 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13906 asm volatile(ASM_STAC "\n" \
13907- "1: mov"itype" %"rtype"1,%2\n" \
13908+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
13909 "2: " ASM_CLAC "\n" \
13910 ".section .fixup,\"ax\"\n" \
13911 "3: mov %3,%0\n" \
13912@@ -456,10 +508,10 @@ struct __large_struct { unsigned long buf[100]; };
13913 ".previous\n" \
13914 _ASM_EXTABLE(1b, 3b) \
13915 : "=r"(err) \
13916- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
13917+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
13918
13919 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
13920- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
13921+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
13922 "2:\n" \
13923 _ASM_EXTABLE_EX(1b, 2b) \
13924 : : ltype(x), "m" (__m(addr)))
13925@@ -498,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
13926 * On error, the variable @x is set to zero.
13927 */
13928
13929+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13930+#define __get_user(x, ptr) get_user((x), (ptr))
13931+#else
13932 #define __get_user(x, ptr) \
13933 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
13934+#endif
13935
13936 /**
13937 * __put_user: - Write a simple value into user space, with less checking.
13938@@ -521,8 +577,12 @@ struct __large_struct { unsigned long buf[100]; };
13939 * Returns zero on success, or -EFAULT on error.
13940 */
13941
13942+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13943+#define __put_user(x, ptr) put_user((x), (ptr))
13944+#else
13945 #define __put_user(x, ptr) \
13946 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
13947+#endif
13948
13949 #define __get_user_unaligned __get_user
13950 #define __put_user_unaligned __put_user
13951@@ -540,7 +600,7 @@ struct __large_struct { unsigned long buf[100]; };
13952 #define get_user_ex(x, ptr) do { \
13953 unsigned long __gue_val; \
13954 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
13955- (x) = (__force __typeof__(*(ptr)))__gue_val; \
13956+ (x) = (__typeof__(*(ptr)))__gue_val; \
13957 } while (0)
13958
13959 #ifdef CONFIG_X86_WP_WORKS_OK
13960@@ -574,8 +634,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
13961 extern __must_check long strlen_user(const char __user *str);
13962 extern __must_check long strnlen_user(const char __user *str, long n);
13963
13964-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13965-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13966+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13967+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13968
13969 /*
13970 * movsl can be slow when source and dest are not both 8-byte aligned
13971diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
13972index 7f760a9..00f93c0 100644
13973--- a/arch/x86/include/asm/uaccess_32.h
13974+++ b/arch/x86/include/asm/uaccess_32.h
13975@@ -11,15 +11,15 @@
13976 #include <asm/page.h>
13977
13978 unsigned long __must_check __copy_to_user_ll
13979- (void __user *to, const void *from, unsigned long n);
13980+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
13981 unsigned long __must_check __copy_from_user_ll
13982- (void *to, const void __user *from, unsigned long n);
13983+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13984 unsigned long __must_check __copy_from_user_ll_nozero
13985- (void *to, const void __user *from, unsigned long n);
13986+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13987 unsigned long __must_check __copy_from_user_ll_nocache
13988- (void *to, const void __user *from, unsigned long n);
13989+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13990 unsigned long __must_check __copy_from_user_ll_nocache_nozero
13991- (void *to, const void __user *from, unsigned long n);
13992+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13993
13994 /**
13995 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
13996@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
13997 static __always_inline unsigned long __must_check
13998 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13999 {
14000+ if ((long)n < 0)
14001+ return n;
14002+
14003 if (__builtin_constant_p(n)) {
14004 unsigned long ret;
14005
14006@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
14007 return ret;
14008 }
14009 }
14010+ if (!__builtin_constant_p(n))
14011+ check_object_size(from, n, true);
14012 return __copy_to_user_ll(to, from, n);
14013 }
14014
14015@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
14016 __copy_to_user(void __user *to, const void *from, unsigned long n)
14017 {
14018 might_fault();
14019+
14020 return __copy_to_user_inatomic(to, from, n);
14021 }
14022
14023 static __always_inline unsigned long
14024 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
14025 {
14026+ if ((long)n < 0)
14027+ return n;
14028+
14029 /* Avoid zeroing the tail if the copy fails..
14030 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
14031 * but as the zeroing behaviour is only significant when n is not
14032@@ -137,6 +146,10 @@ static __always_inline unsigned long
14033 __copy_from_user(void *to, const void __user *from, unsigned long n)
14034 {
14035 might_fault();
14036+
14037+ if ((long)n < 0)
14038+ return n;
14039+
14040 if (__builtin_constant_p(n)) {
14041 unsigned long ret;
14042
14043@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
14044 return ret;
14045 }
14046 }
14047+ if (!__builtin_constant_p(n))
14048+ check_object_size(to, n, false);
14049 return __copy_from_user_ll(to, from, n);
14050 }
14051
14052@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
14053 const void __user *from, unsigned long n)
14054 {
14055 might_fault();
14056+
14057+ if ((long)n < 0)
14058+ return n;
14059+
14060 if (__builtin_constant_p(n)) {
14061 unsigned long ret;
14062
14063@@ -181,15 +200,19 @@ static __always_inline unsigned long
14064 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
14065 unsigned long n)
14066 {
14067- return __copy_from_user_ll_nocache_nozero(to, from, n);
14068+ if ((long)n < 0)
14069+ return n;
14070+
14071+ return __copy_from_user_ll_nocache_nozero(to, from, n);
14072 }
14073
14074-unsigned long __must_check copy_to_user(void __user *to,
14075- const void *from, unsigned long n);
14076-unsigned long __must_check _copy_from_user(void *to,
14077- const void __user *from,
14078- unsigned long n);
14079-
14080+extern void copy_to_user_overflow(void)
14081+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14082+ __compiletime_error("copy_to_user() buffer size is not provably correct")
14083+#else
14084+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
14085+#endif
14086+;
14087
14088 extern void copy_from_user_overflow(void)
14089 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14090@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
14091 #endif
14092 ;
14093
14094-static inline unsigned long __must_check copy_from_user(void *to,
14095- const void __user *from,
14096- unsigned long n)
14097+/**
14098+ * copy_to_user: - Copy a block of data into user space.
14099+ * @to: Destination address, in user space.
14100+ * @from: Source address, in kernel space.
14101+ * @n: Number of bytes to copy.
14102+ *
14103+ * Context: User context only. This function may sleep.
14104+ *
14105+ * Copy data from kernel space to user space.
14106+ *
14107+ * Returns number of bytes that could not be copied.
14108+ * On success, this will be zero.
14109+ */
14110+static inline unsigned long __must_check
14111+copy_to_user(void __user *to, const void *from, unsigned long n)
14112 {
14113- int sz = __compiletime_object_size(to);
14114+ size_t sz = __compiletime_object_size(from);
14115
14116- if (likely(sz == -1 || sz >= n))
14117- n = _copy_from_user(to, from, n);
14118- else
14119+ if (unlikely(sz != (size_t)-1 && sz < n))
14120+ copy_to_user_overflow();
14121+ else if (access_ok(VERIFY_WRITE, to, n))
14122+ n = __copy_to_user(to, from, n);
14123+ return n;
14124+}
14125+
14126+/**
14127+ * copy_from_user: - Copy a block of data from user space.
14128+ * @to: Destination address, in kernel space.
14129+ * @from: Source address, in user space.
14130+ * @n: Number of bytes to copy.
14131+ *
14132+ * Context: User context only. This function may sleep.
14133+ *
14134+ * Copy data from user space to kernel space.
14135+ *
14136+ * Returns number of bytes that could not be copied.
14137+ * On success, this will be zero.
14138+ *
14139+ * If some data could not be copied, this function will pad the copied
14140+ * data to the requested size using zero bytes.
14141+ */
14142+static inline unsigned long __must_check
14143+copy_from_user(void *to, const void __user *from, unsigned long n)
14144+{
14145+ size_t sz = __compiletime_object_size(to);
14146+
14147+ if (unlikely(sz != (size_t)-1 && sz < n))
14148 copy_from_user_overflow();
14149-
14150+ else if (access_ok(VERIFY_READ, from, n))
14151+ n = __copy_from_user(to, from, n);
14152+ else if ((long)n > 0) {
14153+ if (!__builtin_constant_p(n))
14154+ check_object_size(to, n, false);
14155+ memset(to, 0, n);
14156+ }
14157 return n;
14158 }
14159
14160diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
14161index 142810c..4389299 100644
14162--- a/arch/x86/include/asm/uaccess_64.h
14163+++ b/arch/x86/include/asm/uaccess_64.h
14164@@ -10,6 +10,9 @@
14165 #include <asm/alternative.h>
14166 #include <asm/cpufeature.h>
14167 #include <asm/page.h>
14168+#include <asm/pgtable.h>
14169+
14170+#define set_fs(x) (current_thread_info()->addr_limit = (x))
14171
14172 /*
14173 * Copy To/From Userspace
14174@@ -17,13 +20,13 @@
14175
14176 /* Handles exceptions in both to and from, but doesn't do access_ok */
14177 __must_check unsigned long
14178-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
14179+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
14180 __must_check unsigned long
14181-copy_user_generic_string(void *to, const void *from, unsigned len);
14182+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
14183 __must_check unsigned long
14184-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
14185+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
14186
14187-static __always_inline __must_check unsigned long
14188+static __always_inline __must_check __size_overflow(3) unsigned long
14189 copy_user_generic(void *to, const void *from, unsigned len)
14190 {
14191 unsigned ret;
14192@@ -41,142 +44,205 @@ copy_user_generic(void *to, const void *from, unsigned len)
14193 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
14194 "=d" (len)),
14195 "1" (to), "2" (from), "3" (len)
14196- : "memory", "rcx", "r8", "r9", "r10", "r11");
14197+ : "memory", "rcx", "r8", "r9", "r11");
14198 return ret;
14199 }
14200
14201+static __always_inline __must_check unsigned long
14202+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
14203+static __always_inline __must_check unsigned long
14204+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
14205 __must_check unsigned long
14206-_copy_to_user(void __user *to, const void *from, unsigned len);
14207-__must_check unsigned long
14208-_copy_from_user(void *to, const void __user *from, unsigned len);
14209-__must_check unsigned long
14210-copy_in_user(void __user *to, const void __user *from, unsigned len);
14211+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
14212+
14213+extern void copy_to_user_overflow(void)
14214+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14215+ __compiletime_error("copy_to_user() buffer size is not provably correct")
14216+#else
14217+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
14218+#endif
14219+;
14220+
14221+extern void copy_from_user_overflow(void)
14222+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14223+ __compiletime_error("copy_from_user() buffer size is not provably correct")
14224+#else
14225+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
14226+#endif
14227+;
14228
14229 static inline unsigned long __must_check copy_from_user(void *to,
14230 const void __user *from,
14231 unsigned long n)
14232 {
14233- int sz = __compiletime_object_size(to);
14234-
14235 might_fault();
14236- if (likely(sz == -1 || sz >= n))
14237- n = _copy_from_user(to, from, n);
14238-#ifdef CONFIG_DEBUG_VM
14239- else
14240- WARN(1, "Buffer overflow detected!\n");
14241-#endif
14242+
14243+ if (access_ok(VERIFY_READ, from, n))
14244+ n = __copy_from_user(to, from, n);
14245+ else if (n < INT_MAX) {
14246+ if (!__builtin_constant_p(n))
14247+ check_object_size(to, n, false);
14248+ memset(to, 0, n);
14249+ }
14250 return n;
14251 }
14252
14253 static __always_inline __must_check
14254-int copy_to_user(void __user *dst, const void *src, unsigned size)
14255+int copy_to_user(void __user *dst, const void *src, unsigned long size)
14256 {
14257 might_fault();
14258
14259- return _copy_to_user(dst, src, size);
14260+ if (access_ok(VERIFY_WRITE, dst, size))
14261+ size = __copy_to_user(dst, src, size);
14262+ return size;
14263 }
14264
14265 static __always_inline __must_check
14266-int __copy_from_user(void *dst, const void __user *src, unsigned size)
14267+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
14268 {
14269- int ret = 0;
14270+ size_t sz = __compiletime_object_size(dst);
14271+ unsigned ret = 0;
14272
14273 might_fault();
14274- if (!__builtin_constant_p(size))
14275- return copy_user_generic(dst, (__force void *)src, size);
14276+
14277+ if (size > INT_MAX)
14278+ return size;
14279+
14280+#ifdef CONFIG_PAX_MEMORY_UDEREF
14281+ if (!__access_ok(VERIFY_READ, src, size))
14282+ return size;
14283+#endif
14284+
14285+ if (unlikely(sz != (size_t)-1 && sz < size)) {
14286+ copy_from_user_overflow();
14287+ return size;
14288+ }
14289+
14290+ if (!__builtin_constant_p(size)) {
14291+ check_object_size(dst, size, false);
14292+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14293+ }
14294 switch (size) {
14295- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
14296+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
14297 ret, "b", "b", "=q", 1);
14298 return ret;
14299- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
14300+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
14301 ret, "w", "w", "=r", 2);
14302 return ret;
14303- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
14304+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
14305 ret, "l", "k", "=r", 4);
14306 return ret;
14307- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
14308+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14309 ret, "q", "", "=r", 8);
14310 return ret;
14311 case 10:
14312- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
14313+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14314 ret, "q", "", "=r", 10);
14315 if (unlikely(ret))
14316 return ret;
14317 __get_user_asm(*(u16 *)(8 + (char *)dst),
14318- (u16 __user *)(8 + (char __user *)src),
14319+ (const u16 __user *)(8 + (const char __user *)src),
14320 ret, "w", "w", "=r", 2);
14321 return ret;
14322 case 16:
14323- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
14324+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14325 ret, "q", "", "=r", 16);
14326 if (unlikely(ret))
14327 return ret;
14328 __get_user_asm(*(u64 *)(8 + (char *)dst),
14329- (u64 __user *)(8 + (char __user *)src),
14330+ (const u64 __user *)(8 + (const char __user *)src),
14331 ret, "q", "", "=r", 8);
14332 return ret;
14333 default:
14334- return copy_user_generic(dst, (__force void *)src, size);
14335+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14336 }
14337 }
14338
14339 static __always_inline __must_check
14340-int __copy_to_user(void __user *dst, const void *src, unsigned size)
14341+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
14342 {
14343- int ret = 0;
14344+ size_t sz = __compiletime_object_size(src);
14345+ unsigned ret = 0;
14346
14347 might_fault();
14348- if (!__builtin_constant_p(size))
14349- return copy_user_generic((__force void *)dst, src, size);
14350+
14351+ if (size > INT_MAX)
14352+ return size;
14353+
14354+#ifdef CONFIG_PAX_MEMORY_UDEREF
14355+ if (!__access_ok(VERIFY_WRITE, dst, size))
14356+ return size;
14357+#endif
14358+
14359+ if (unlikely(sz != (size_t)-1 && sz < size)) {
14360+ copy_to_user_overflow();
14361+ return size;
14362+ }
14363+
14364+ if (!__builtin_constant_p(size)) {
14365+ check_object_size(src, size, true);
14366+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14367+ }
14368 switch (size) {
14369- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
14370+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
14371 ret, "b", "b", "iq", 1);
14372 return ret;
14373- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
14374+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
14375 ret, "w", "w", "ir", 2);
14376 return ret;
14377- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
14378+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
14379 ret, "l", "k", "ir", 4);
14380 return ret;
14381- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
14382+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14383 ret, "q", "", "er", 8);
14384 return ret;
14385 case 10:
14386- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
14387+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14388 ret, "q", "", "er", 10);
14389 if (unlikely(ret))
14390 return ret;
14391 asm("":::"memory");
14392- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
14393+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
14394 ret, "w", "w", "ir", 2);
14395 return ret;
14396 case 16:
14397- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
14398+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14399 ret, "q", "", "er", 16);
14400 if (unlikely(ret))
14401 return ret;
14402 asm("":::"memory");
14403- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
14404+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
14405 ret, "q", "", "er", 8);
14406 return ret;
14407 default:
14408- return copy_user_generic((__force void *)dst, src, size);
14409+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14410 }
14411 }
14412
14413 static __always_inline __must_check
14414-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14415+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
14416 {
14417- int ret = 0;
14418+ unsigned ret = 0;
14419
14420 might_fault();
14421+
14422+ if (size > INT_MAX)
14423+ return size;
14424+
14425+#ifdef CONFIG_PAX_MEMORY_UDEREF
14426+ if (!__access_ok(VERIFY_READ, src, size))
14427+ return size;
14428+ if (!__access_ok(VERIFY_WRITE, dst, size))
14429+ return size;
14430+#endif
14431+
14432 if (!__builtin_constant_p(size))
14433- return copy_user_generic((__force void *)dst,
14434- (__force void *)src, size);
14435+ return copy_user_generic((__force_kernel void *)____m(dst),
14436+ (__force_kernel const void *)____m(src), size);
14437 switch (size) {
14438 case 1: {
14439 u8 tmp;
14440- __get_user_asm(tmp, (u8 __user *)src,
14441+ __get_user_asm(tmp, (const u8 __user *)src,
14442 ret, "b", "b", "=q", 1);
14443 if (likely(!ret))
14444 __put_user_asm(tmp, (u8 __user *)dst,
14445@@ -185,7 +251,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14446 }
14447 case 2: {
14448 u16 tmp;
14449- __get_user_asm(tmp, (u16 __user *)src,
14450+ __get_user_asm(tmp, (const u16 __user *)src,
14451 ret, "w", "w", "=r", 2);
14452 if (likely(!ret))
14453 __put_user_asm(tmp, (u16 __user *)dst,
14454@@ -195,7 +261,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14455
14456 case 4: {
14457 u32 tmp;
14458- __get_user_asm(tmp, (u32 __user *)src,
14459+ __get_user_asm(tmp, (const u32 __user *)src,
14460 ret, "l", "k", "=r", 4);
14461 if (likely(!ret))
14462 __put_user_asm(tmp, (u32 __user *)dst,
14463@@ -204,7 +270,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14464 }
14465 case 8: {
14466 u64 tmp;
14467- __get_user_asm(tmp, (u64 __user *)src,
14468+ __get_user_asm(tmp, (const u64 __user *)src,
14469 ret, "q", "", "=r", 8);
14470 if (likely(!ret))
14471 __put_user_asm(tmp, (u64 __user *)dst,
14472@@ -212,41 +278,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14473 return ret;
14474 }
14475 default:
14476- return copy_user_generic((__force void *)dst,
14477- (__force void *)src, size);
14478+ return copy_user_generic((__force_kernel void *)____m(dst),
14479+ (__force_kernel const void *)____m(src), size);
14480 }
14481 }
14482
14483 static __must_check __always_inline int
14484-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
14485+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
14486 {
14487- return copy_user_generic(dst, (__force const void *)src, size);
14488+ if (size > INT_MAX)
14489+ return size;
14490+
14491+#ifdef CONFIG_PAX_MEMORY_UDEREF
14492+ if (!__access_ok(VERIFY_READ, src, size))
14493+ return size;
14494+#endif
14495+
14496+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14497 }
14498
14499-static __must_check __always_inline int
14500-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
14501+static __must_check __always_inline unsigned long
14502+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
14503 {
14504- return copy_user_generic((__force void *)dst, src, size);
14505+ if (size > INT_MAX)
14506+ return size;
14507+
14508+#ifdef CONFIG_PAX_MEMORY_UDEREF
14509+ if (!__access_ok(VERIFY_WRITE, dst, size))
14510+ return size;
14511+#endif
14512+
14513+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14514 }
14515
14516-extern long __copy_user_nocache(void *dst, const void __user *src,
14517- unsigned size, int zerorest);
14518+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
14519+ unsigned long size, int zerorest) __size_overflow(3);
14520
14521-static inline int
14522-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
14523+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
14524 {
14525 might_sleep();
14526+
14527+ if (size > INT_MAX)
14528+ return size;
14529+
14530+#ifdef CONFIG_PAX_MEMORY_UDEREF
14531+ if (!__access_ok(VERIFY_READ, src, size))
14532+ return size;
14533+#endif
14534+
14535 return __copy_user_nocache(dst, src, size, 1);
14536 }
14537
14538-static inline int
14539-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14540- unsigned size)
14541+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14542+ unsigned long size)
14543 {
14544+ if (size > INT_MAX)
14545+ return size;
14546+
14547+#ifdef CONFIG_PAX_MEMORY_UDEREF
14548+ if (!__access_ok(VERIFY_READ, src, size))
14549+ return size;
14550+#endif
14551+
14552 return __copy_user_nocache(dst, src, size, 0);
14553 }
14554
14555-unsigned long
14556-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
14557+extern unsigned long
14558+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
14559
14560 #endif /* _ASM_X86_UACCESS_64_H */
14561diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
14562index 5b238981..77fdd78 100644
14563--- a/arch/x86/include/asm/word-at-a-time.h
14564+++ b/arch/x86/include/asm/word-at-a-time.h
14565@@ -11,7 +11,7 @@
14566 * and shift, for example.
14567 */
14568 struct word_at_a_time {
14569- const unsigned long one_bits, high_bits;
14570+ unsigned long one_bits, high_bits;
14571 };
14572
14573 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
14574diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
14575index 0415cda..b43d877 100644
14576--- a/arch/x86/include/asm/xsave.h
14577+++ b/arch/x86/include/asm/xsave.h
14578@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14579 return -EFAULT;
14580
14581 __asm__ __volatile__(ASM_STAC "\n"
14582- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
14583+ "1:"
14584+ __copyuser_seg
14585+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
14586 "2: " ASM_CLAC "\n"
14587 ".section .fixup,\"ax\"\n"
14588 "3: movl $-1,%[err]\n"
14589@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14590 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
14591 {
14592 int err;
14593- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
14594+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
14595 u32 lmask = mask;
14596 u32 hmask = mask >> 32;
14597
14598 __asm__ __volatile__(ASM_STAC "\n"
14599- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14600+ "1:"
14601+ __copyuser_seg
14602+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14603 "2: " ASM_CLAC "\n"
14604 ".section .fixup,\"ax\"\n"
14605 "3: movl $-1,%[err]\n"
14606diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
14607index 91ce48f..a48ea05 100644
14608--- a/arch/x86/kernel/Makefile
14609+++ b/arch/x86/kernel/Makefile
14610@@ -23,7 +23,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
14611 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
14612 obj-$(CONFIG_IRQ_WORK) += irq_work.o
14613 obj-y += probe_roms.o
14614-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
14615+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
14616 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
14617 obj-y += syscall_$(BITS).o
14618 obj-$(CONFIG_X86_64) += vsyscall_64.o
14619diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
14620index e651f7a..c995dc4 100644
14621--- a/arch/x86/kernel/acpi/boot.c
14622+++ b/arch/x86/kernel/acpi/boot.c
14623@@ -1576,7 +1576,7 @@ int __init acpi_boot_init(void)
14624 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
14625
14626 if (!acpi_noirq)
14627- x86_init.pci.init = pci_acpi_init;
14628+ *(void **)&x86_init.pci.init = pci_acpi_init;
14629
14630 return 0;
14631 }
14632diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
14633index 11676cf..a8cf3ec 100644
14634--- a/arch/x86/kernel/acpi/sleep.c
14635+++ b/arch/x86/kernel/acpi/sleep.c
14636@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
14637 #else /* CONFIG_64BIT */
14638 #ifdef CONFIG_SMP
14639 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
14640+
14641+ pax_open_kernel();
14642 early_gdt_descr.address =
14643 (unsigned long)get_cpu_gdt_table(smp_processor_id());
14644+ pax_close_kernel();
14645+
14646 initial_gs = per_cpu_offset(smp_processor_id());
14647 #endif
14648 initial_code = (unsigned long)wakeup_long64;
14649diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
14650index 13ab720..95d5442 100644
14651--- a/arch/x86/kernel/acpi/wakeup_32.S
14652+++ b/arch/x86/kernel/acpi/wakeup_32.S
14653@@ -30,13 +30,11 @@ wakeup_pmode_return:
14654 # and restore the stack ... but you need gdt for this to work
14655 movl saved_context_esp, %esp
14656
14657- movl %cs:saved_magic, %eax
14658- cmpl $0x12345678, %eax
14659+ cmpl $0x12345678, saved_magic
14660 jne bogus_magic
14661
14662 # jump to place where we left off
14663- movl saved_eip, %eax
14664- jmp *%eax
14665+ jmp *(saved_eip)
14666
14667 bogus_magic:
14668 jmp bogus_magic
14669diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
14670index ef5ccca..bd83949 100644
14671--- a/arch/x86/kernel/alternative.c
14672+++ b/arch/x86/kernel/alternative.c
14673@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
14674 */
14675 for (a = start; a < end; a++) {
14676 instr = (u8 *)&a->instr_offset + a->instr_offset;
14677+
14678+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14679+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14680+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
14681+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14682+#endif
14683+
14684 replacement = (u8 *)&a->repl_offset + a->repl_offset;
14685 BUG_ON(a->replacementlen > a->instrlen);
14686 BUG_ON(a->instrlen > sizeof(insnbuf));
14687@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
14688 for (poff = start; poff < end; poff++) {
14689 u8 *ptr = (u8 *)poff + *poff;
14690
14691+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14692+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14693+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
14694+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14695+#endif
14696+
14697 if (!*poff || ptr < text || ptr >= text_end)
14698 continue;
14699 /* turn DS segment override prefix into lock prefix */
14700- if (*ptr == 0x3e)
14701+ if (*ktla_ktva(ptr) == 0x3e)
14702 text_poke(ptr, ((unsigned char []){0xf0}), 1);
14703 }
14704 mutex_unlock(&text_mutex);
14705@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
14706 for (poff = start; poff < end; poff++) {
14707 u8 *ptr = (u8 *)poff + *poff;
14708
14709+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14710+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14711+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
14712+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14713+#endif
14714+
14715 if (!*poff || ptr < text || ptr >= text_end)
14716 continue;
14717 /* turn lock prefix into DS segment override prefix */
14718- if (*ptr == 0xf0)
14719+ if (*ktla_ktva(ptr) == 0xf0)
14720 text_poke(ptr, ((unsigned char []){0x3E}), 1);
14721 }
14722 mutex_unlock(&text_mutex);
14723@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
14724
14725 BUG_ON(p->len > MAX_PATCH_LEN);
14726 /* prep the buffer with the original instructions */
14727- memcpy(insnbuf, p->instr, p->len);
14728+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
14729 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
14730 (unsigned long)p->instr, p->len);
14731
14732@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
14733 if (!uniproc_patched || num_possible_cpus() == 1)
14734 free_init_pages("SMP alternatives",
14735 (unsigned long)__smp_locks,
14736- (unsigned long)__smp_locks_end);
14737+ PAGE_ALIGN((unsigned long)__smp_locks_end));
14738 #endif
14739
14740 apply_paravirt(__parainstructions, __parainstructions_end);
14741@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
14742 * instructions. And on the local CPU you need to be protected again NMI or MCE
14743 * handlers seeing an inconsistent instruction while you patch.
14744 */
14745-void *__init_or_module text_poke_early(void *addr, const void *opcode,
14746+void *__kprobes text_poke_early(void *addr, const void *opcode,
14747 size_t len)
14748 {
14749 unsigned long flags;
14750 local_irq_save(flags);
14751- memcpy(addr, opcode, len);
14752+
14753+ pax_open_kernel();
14754+ memcpy(ktla_ktva(addr), opcode, len);
14755 sync_core();
14756+ pax_close_kernel();
14757+
14758 local_irq_restore(flags);
14759 /* Could also do a CLFLUSH here to speed up CPU recovery; but
14760 that causes hangs on some VIA CPUs. */
14761@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
14762 */
14763 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
14764 {
14765- unsigned long flags;
14766- char *vaddr;
14767+ unsigned char *vaddr = ktla_ktva(addr);
14768 struct page *pages[2];
14769- int i;
14770+ size_t i;
14771
14772 if (!core_kernel_text((unsigned long)addr)) {
14773- pages[0] = vmalloc_to_page(addr);
14774- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
14775+ pages[0] = vmalloc_to_page(vaddr);
14776+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
14777 } else {
14778- pages[0] = virt_to_page(addr);
14779+ pages[0] = virt_to_page(vaddr);
14780 WARN_ON(!PageReserved(pages[0]));
14781- pages[1] = virt_to_page(addr + PAGE_SIZE);
14782+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
14783 }
14784 BUG_ON(!pages[0]);
14785- local_irq_save(flags);
14786- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
14787- if (pages[1])
14788- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
14789- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
14790- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
14791- clear_fixmap(FIX_TEXT_POKE0);
14792- if (pages[1])
14793- clear_fixmap(FIX_TEXT_POKE1);
14794- local_flush_tlb();
14795- sync_core();
14796- /* Could also do a CLFLUSH here to speed up CPU recovery; but
14797- that causes hangs on some VIA CPUs. */
14798+ text_poke_early(addr, opcode, len);
14799 for (i = 0; i < len; i++)
14800- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
14801- local_irq_restore(flags);
14802+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
14803 return addr;
14804 }
14805
14806diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
14807index e663112..21938a3 100644
14808--- a/arch/x86/kernel/amd_gart_64.c
14809+++ b/arch/x86/kernel/amd_gart_64.c
14810@@ -851,7 +851,7 @@ int __init gart_iommu_init(void)
14811
14812 flush_gart();
14813 dma_ops = &gart_dma_ops;
14814- x86_platform.iommu_shutdown = gart_iommu_shutdown;
14815+ *(void **)&x86_platform.iommu_shutdown = gart_iommu_shutdown;
14816 swiotlb = 0;
14817
14818 return 0;
14819diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
14820index d5fd66f..6119b16 100644
14821--- a/arch/x86/kernel/aperture_64.c
14822+++ b/arch/x86/kernel/aperture_64.c
14823@@ -390,7 +390,7 @@ int __init gart_iommu_hole_init(void)
14824
14825 iommu_detected = 1;
14826 gart_iommu_aperture = 1;
14827- x86_init.iommu.iommu_init = gart_iommu_init;
14828+ *(void **)&x86_init.iommu.iommu_init = gart_iommu_init;
14829
14830 ctl = read_pci_config(bus, slot, 3,
14831 AMD64_GARTAPERTURECTL);
14832diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
14833index b17416e..be6e5dc 100644
14834--- a/arch/x86/kernel/apic/apic.c
14835+++ b/arch/x86/kernel/apic/apic.c
14836@@ -185,7 +185,7 @@ int first_system_vector = 0xfe;
14837 /*
14838 * Debug level, exported for io_apic.c
14839 */
14840-unsigned int apic_verbosity;
14841+int apic_verbosity;
14842
14843 int pic_mode;
14844
14845@@ -1923,7 +1923,7 @@ void smp_error_interrupt(struct pt_regs *regs)
14846 apic_write(APIC_ESR, 0);
14847 v1 = apic_read(APIC_ESR);
14848 ack_APIC_irq();
14849- atomic_inc(&irq_err_count);
14850+ atomic_inc_unchecked(&irq_err_count);
14851
14852 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
14853 smp_processor_id(), v0 , v1);
14854@@ -2155,7 +2155,9 @@ void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
14855 for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
14856 /* Should happen once for each apic */
14857 WARN_ON((*drv)->eoi_write == eoi_write);
14858- (*drv)->eoi_write = eoi_write;
14859+ pax_open_kernel();
14860+ *(void **)&(*drv)->eoi_write = eoi_write;
14861+ pax_close_kernel();
14862 }
14863 }
14864
14865diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
14866index a65829a..6ddc249 100644
14867--- a/arch/x86/kernel/apic/apic_numachip.c
14868+++ b/arch/x86/kernel/apic/apic_numachip.c
14869@@ -178,7 +178,7 @@ static int __init numachip_system_init(void)
14870 if (!numachip_system)
14871 return 0;
14872
14873- x86_cpuinit.fixup_cpu_id = fixup_cpu_id;
14874+ *(void **)&x86_cpuinit.fixup_cpu_id = fixup_cpu_id;
14875
14876 map_csrs();
14877
14878diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
14879index 1817fa9..7bff097 100644
14880--- a/arch/x86/kernel/apic/io_apic.c
14881+++ b/arch/x86/kernel/apic/io_apic.c
14882@@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
14883 }
14884 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14885
14886-void lock_vector_lock(void)
14887+void lock_vector_lock(void) __acquires(vector_lock)
14888 {
14889 /* Used to the online set of cpus does not change
14890 * during assign_irq_vector.
14891@@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
14892 raw_spin_lock(&vector_lock);
14893 }
14894
14895-void unlock_vector_lock(void)
14896+void unlock_vector_lock(void) __releases(vector_lock)
14897 {
14898 raw_spin_unlock(&vector_lock);
14899 }
14900@@ -2411,7 +2411,7 @@ static void ack_apic_edge(struct irq_data *data)
14901 ack_APIC_irq();
14902 }
14903
14904-atomic_t irq_mis_count;
14905+atomic_unchecked_t irq_mis_count;
14906
14907 #ifdef CONFIG_GENERIC_PENDING_IRQ
14908 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
14909@@ -2552,7 +2552,7 @@ static void ack_apic_level(struct irq_data *data)
14910 * at the cpu.
14911 */
14912 if (!(v & (1 << (i & 0x1f)))) {
14913- atomic_inc(&irq_mis_count);
14914+ atomic_inc_unchecked(&irq_mis_count);
14915
14916 eoi_ioapic_irq(irq, cfg);
14917 }
14918diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
14919index d661ee9..512c0a1 100644
14920--- a/arch/x86/kernel/apic/numaq_32.c
14921+++ b/arch/x86/kernel/apic/numaq_32.c
14922@@ -257,14 +257,14 @@ static __init void early_check_numaq(void)
14923 early_get_smp_config();
14924
14925 if (found_numaq) {
14926- x86_init.mpparse.mpc_record = numaq_mpc_record;
14927- x86_init.mpparse.setup_ioapic_ids = x86_init_noop;
14928- x86_init.mpparse.mpc_apic_id = mpc_apic_id;
14929- x86_init.mpparse.smp_read_mpc_oem = smp_read_mpc_oem;
14930- x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
14931- x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
14932- x86_init.timers.tsc_pre_init = numaq_tsc_init;
14933- x86_init.pci.init = pci_numaq_init;
14934+ *(void **)&x86_init.mpparse.mpc_record = numaq_mpc_record;
14935+ *(void **)&x86_init.mpparse.setup_ioapic_ids = x86_init_noop;
14936+ *(void **)&x86_init.mpparse.mpc_apic_id = mpc_apic_id;
14937+ *(void **)&x86_init.mpparse.smp_read_mpc_oem = smp_read_mpc_oem;
14938+ *(void **)&x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
14939+ *(void **)&x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
14940+ *(void **)&x86_init.timers.tsc_pre_init = numaq_tsc_init;
14941+ *(void **)&x86_init.pci.init = pci_numaq_init;
14942 }
14943 }
14944
14945diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
14946index 8cfade9..8ea7b51 100644
14947--- a/arch/x86/kernel/apic/x2apic_uv_x.c
14948+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
14949@@ -139,8 +139,8 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
14950 is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE;
14951 pnodeid = early_get_pnodeid();
14952 early_get_apic_pnode_shift();
14953- x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
14954- x86_platform.nmi_init = uv_nmi_init;
14955+ *(void **)&x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
14956+ *(void **)&x86_platform.nmi_init = uv_nmi_init;
14957 if (!strcmp(oem_table_id, "UVL"))
14958 uv_system_type = UV_LEGACY_APIC;
14959 else if (!strcmp(oem_table_id, "UVX"))
14960diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
14961index d65464e..1035d31 100644
14962--- a/arch/x86/kernel/apm_32.c
14963+++ b/arch/x86/kernel/apm_32.c
14964@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
14965 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14966 * even though they are called in protected mode.
14967 */
14968-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14969+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14970 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14971
14972 static const char driver_version[] = "1.16ac"; /* no spaces */
14973@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
14974 BUG_ON(cpu != 0);
14975 gdt = get_cpu_gdt_table(cpu);
14976 save_desc_40 = gdt[0x40 / 8];
14977+
14978+ pax_open_kernel();
14979 gdt[0x40 / 8] = bad_bios_desc;
14980+ pax_close_kernel();
14981
14982 apm_irq_save(flags);
14983 APM_DO_SAVE_SEGS;
14984@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
14985 &call->esi);
14986 APM_DO_RESTORE_SEGS;
14987 apm_irq_restore(flags);
14988+
14989+ pax_open_kernel();
14990 gdt[0x40 / 8] = save_desc_40;
14991+ pax_close_kernel();
14992+
14993 put_cpu();
14994
14995 return call->eax & 0xff;
14996@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
14997 BUG_ON(cpu != 0);
14998 gdt = get_cpu_gdt_table(cpu);
14999 save_desc_40 = gdt[0x40 / 8];
15000+
15001+ pax_open_kernel();
15002 gdt[0x40 / 8] = bad_bios_desc;
15003+ pax_close_kernel();
15004
15005 apm_irq_save(flags);
15006 APM_DO_SAVE_SEGS;
15007@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
15008 &call->eax);
15009 APM_DO_RESTORE_SEGS;
15010 apm_irq_restore(flags);
15011+
15012+ pax_open_kernel();
15013 gdt[0x40 / 8] = save_desc_40;
15014+ pax_close_kernel();
15015+
15016 put_cpu();
15017 return error;
15018 }
15019@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
15020 * code to that CPU.
15021 */
15022 gdt = get_cpu_gdt_table(0);
15023+
15024+ pax_open_kernel();
15025 set_desc_base(&gdt[APM_CS >> 3],
15026 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
15027 set_desc_base(&gdt[APM_CS_16 >> 3],
15028 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
15029 set_desc_base(&gdt[APM_DS >> 3],
15030 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
15031+ pax_close_kernel();
15032
15033 proc_create("apm", 0, NULL, &apm_file_ops);
15034
15035diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
15036index 2861082..6d4718e 100644
15037--- a/arch/x86/kernel/asm-offsets.c
15038+++ b/arch/x86/kernel/asm-offsets.c
15039@@ -33,6 +33,8 @@ void common(void) {
15040 OFFSET(TI_status, thread_info, status);
15041 OFFSET(TI_addr_limit, thread_info, addr_limit);
15042 OFFSET(TI_preempt_count, thread_info, preempt_count);
15043+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
15044+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
15045
15046 BLANK();
15047 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
15048@@ -53,8 +55,26 @@ void common(void) {
15049 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
15050 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
15051 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
15052+
15053+#ifdef CONFIG_PAX_KERNEXEC
15054+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
15055 #endif
15056
15057+#ifdef CONFIG_PAX_MEMORY_UDEREF
15058+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
15059+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
15060+#ifdef CONFIG_X86_64
15061+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
15062+#endif
15063+#endif
15064+
15065+#endif
15066+
15067+ BLANK();
15068+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
15069+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
15070+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
15071+
15072 #ifdef CONFIG_XEN
15073 BLANK();
15074 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
15075diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
15076index 1b4754f..fbb4227 100644
15077--- a/arch/x86/kernel/asm-offsets_64.c
15078+++ b/arch/x86/kernel/asm-offsets_64.c
15079@@ -76,6 +76,7 @@ int main(void)
15080 BLANK();
15081 #undef ENTRY
15082
15083+ DEFINE(TSS_size, sizeof(struct tss_struct));
15084 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
15085 BLANK();
15086
15087diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
15088index a0e067d..9c7db16 100644
15089--- a/arch/x86/kernel/cpu/Makefile
15090+++ b/arch/x86/kernel/cpu/Makefile
15091@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
15092 CFLAGS_REMOVE_perf_event.o = -pg
15093 endif
15094
15095-# Make sure load_percpu_segment has no stackprotector
15096-nostackp := $(call cc-option, -fno-stack-protector)
15097-CFLAGS_common.o := $(nostackp)
15098-
15099 obj-y := intel_cacheinfo.o scattered.o topology.o
15100 obj-y += proc.o capflags.o powerflags.o common.o
15101 obj-y += vmware.o hypervisor.o mshyperv.o
15102diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
15103index 1b7d165..b9e2627 100644
15104--- a/arch/x86/kernel/cpu/amd.c
15105+++ b/arch/x86/kernel/cpu/amd.c
15106@@ -738,7 +738,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
15107 unsigned int size)
15108 {
15109 /* AMD errata T13 (order #21922) */
15110- if ((c->x86 == 6)) {
15111+ if (c->x86 == 6) {
15112 /* Duron Rev A0 */
15113 if (c->x86_model == 3 && c->x86_mask == 0)
15114 size = 64;
15115diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
15116index 7505f7b..d59dac0 100644
15117--- a/arch/x86/kernel/cpu/common.c
15118+++ b/arch/x86/kernel/cpu/common.c
15119@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
15120
15121 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
15122
15123-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
15124-#ifdef CONFIG_X86_64
15125- /*
15126- * We need valid kernel segments for data and code in long mode too
15127- * IRET will check the segment types kkeil 2000/10/28
15128- * Also sysret mandates a special GDT layout
15129- *
15130- * TLS descriptors are currently at a different place compared to i386.
15131- * Hopefully nobody expects them at a fixed place (Wine?)
15132- */
15133- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
15134- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
15135- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
15136- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
15137- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
15138- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
15139-#else
15140- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
15141- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
15142- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
15143- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
15144- /*
15145- * Segments used for calling PnP BIOS have byte granularity.
15146- * They code segments and data segments have fixed 64k limits,
15147- * the transfer segment sizes are set at run time.
15148- */
15149- /* 32-bit code */
15150- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
15151- /* 16-bit code */
15152- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
15153- /* 16-bit data */
15154- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
15155- /* 16-bit data */
15156- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
15157- /* 16-bit data */
15158- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
15159- /*
15160- * The APM segments have byte granularity and their bases
15161- * are set at run time. All have 64k limits.
15162- */
15163- /* 32-bit code */
15164- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
15165- /* 16-bit code */
15166- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
15167- /* data */
15168- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
15169-
15170- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
15171- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
15172- GDT_STACK_CANARY_INIT
15173-#endif
15174-} };
15175-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
15176-
15177 static int __init x86_xsave_setup(char *s)
15178 {
15179 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
15180@@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
15181 {
15182 struct desc_ptr gdt_descr;
15183
15184- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
15185+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15186 gdt_descr.size = GDT_SIZE - 1;
15187 load_gdt(&gdt_descr);
15188 /* Reload the per-cpu base */
15189@@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
15190 /* Filter out anything that depends on CPUID levels we don't have */
15191 filter_cpuid_features(c, true);
15192
15193+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15194+ setup_clear_cpu_cap(X86_FEATURE_SEP);
15195+#endif
15196+
15197 /* If the model name is still unset, do table lookup. */
15198 if (!c->x86_model_id[0]) {
15199 const char *p;
15200@@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
15201 }
15202 __setup("clearcpuid=", setup_disablecpuid);
15203
15204+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
15205+EXPORT_PER_CPU_SYMBOL(current_tinfo);
15206+
15207 #ifdef CONFIG_X86_64
15208 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
15209-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
15210- (unsigned long) nmi_idt_table };
15211+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
15212
15213 DEFINE_PER_CPU_FIRST(union irq_stack_union,
15214 irq_stack_union) __aligned(PAGE_SIZE);
15215@@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
15216 EXPORT_PER_CPU_SYMBOL(current_task);
15217
15218 DEFINE_PER_CPU(unsigned long, kernel_stack) =
15219- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
15220+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
15221 EXPORT_PER_CPU_SYMBOL(kernel_stack);
15222
15223 DEFINE_PER_CPU(char *, irq_stack_ptr) =
15224@@ -1178,7 +1130,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
15225 {
15226 memset(regs, 0, sizeof(struct pt_regs));
15227 regs->fs = __KERNEL_PERCPU;
15228- regs->gs = __KERNEL_STACK_CANARY;
15229+ savesegment(gs, regs->gs);
15230
15231 return regs;
15232 }
15233@@ -1233,7 +1185,7 @@ void __cpuinit cpu_init(void)
15234 int i;
15235
15236 cpu = stack_smp_processor_id();
15237- t = &per_cpu(init_tss, cpu);
15238+ t = init_tss + cpu;
15239 oist = &per_cpu(orig_ist, cpu);
15240
15241 #ifdef CONFIG_NUMA
15242@@ -1259,7 +1211,7 @@ void __cpuinit cpu_init(void)
15243 switch_to_new_gdt(cpu);
15244 loadsegment(fs, 0);
15245
15246- load_idt((const struct desc_ptr *)&idt_descr);
15247+ load_idt(&idt_descr);
15248
15249 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
15250 syscall_init();
15251@@ -1268,7 +1220,6 @@ void __cpuinit cpu_init(void)
15252 wrmsrl(MSR_KERNEL_GS_BASE, 0);
15253 barrier();
15254
15255- x86_configure_nx();
15256 if (cpu != 0)
15257 enable_x2apic();
15258
15259@@ -1321,7 +1272,7 @@ void __cpuinit cpu_init(void)
15260 {
15261 int cpu = smp_processor_id();
15262 struct task_struct *curr = current;
15263- struct tss_struct *t = &per_cpu(init_tss, cpu);
15264+ struct tss_struct *t = init_tss + cpu;
15265 struct thread_struct *thread = &curr->thread;
15266
15267 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
15268diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
15269index 198e019..867575e 100644
15270--- a/arch/x86/kernel/cpu/intel.c
15271+++ b/arch/x86/kernel/cpu/intel.c
15272@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
15273 * Update the IDT descriptor and reload the IDT so that
15274 * it uses the read-only mapped virtual address.
15275 */
15276- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
15277+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
15278 load_idt(&idt_descr);
15279 }
15280 #endif
15281diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
15282index 93c5451..3887433 100644
15283--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
15284+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
15285@@ -983,6 +983,22 @@ static struct attribute *default_attrs[] = {
15286 };
15287
15288 #ifdef CONFIG_AMD_NB
15289+static struct attribute *default_attrs_amd_nb[] = {
15290+ &type.attr,
15291+ &level.attr,
15292+ &coherency_line_size.attr,
15293+ &physical_line_partition.attr,
15294+ &ways_of_associativity.attr,
15295+ &number_of_sets.attr,
15296+ &size.attr,
15297+ &shared_cpu_map.attr,
15298+ &shared_cpu_list.attr,
15299+ NULL,
15300+ NULL,
15301+ NULL,
15302+ NULL
15303+};
15304+
15305 static struct attribute ** __cpuinit amd_l3_attrs(void)
15306 {
15307 static struct attribute **attrs;
15308@@ -993,18 +1009,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
15309
15310 n = ARRAY_SIZE(default_attrs);
15311
15312- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
15313- n += 2;
15314-
15315- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
15316- n += 1;
15317-
15318- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
15319- if (attrs == NULL)
15320- return attrs = default_attrs;
15321-
15322- for (n = 0; default_attrs[n]; n++)
15323- attrs[n] = default_attrs[n];
15324+ attrs = default_attrs_amd_nb;
15325
15326 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
15327 attrs[n++] = &cache_disable_0.attr;
15328@@ -1055,6 +1060,13 @@ static struct kobj_type ktype_cache = {
15329 .default_attrs = default_attrs,
15330 };
15331
15332+#ifdef CONFIG_AMD_NB
15333+static struct kobj_type ktype_cache_amd_nb = {
15334+ .sysfs_ops = &sysfs_ops,
15335+ .default_attrs = default_attrs_amd_nb,
15336+};
15337+#endif
15338+
15339 static struct kobj_type ktype_percpu_entry = {
15340 .sysfs_ops = &sysfs_ops,
15341 };
15342@@ -1120,20 +1132,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
15343 return retval;
15344 }
15345
15346+#ifdef CONFIG_AMD_NB
15347+ amd_l3_attrs();
15348+#endif
15349+
15350 for (i = 0; i < num_cache_leaves; i++) {
15351+ struct kobj_type *ktype;
15352+
15353 this_object = INDEX_KOBJECT_PTR(cpu, i);
15354 this_object->cpu = cpu;
15355 this_object->index = i;
15356
15357 this_leaf = CPUID4_INFO_IDX(cpu, i);
15358
15359- ktype_cache.default_attrs = default_attrs;
15360+ ktype = &ktype_cache;
15361 #ifdef CONFIG_AMD_NB
15362 if (this_leaf->base.nb)
15363- ktype_cache.default_attrs = amd_l3_attrs();
15364+ ktype = &ktype_cache_amd_nb;
15365 #endif
15366 retval = kobject_init_and_add(&(this_object->kobj),
15367- &ktype_cache,
15368+ ktype,
15369 per_cpu(ici_cache_kobject, cpu),
15370 "index%1lu", i);
15371 if (unlikely(retval)) {
15372diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
15373index 46cbf86..55c7292 100644
15374--- a/arch/x86/kernel/cpu/mcheck/mce.c
15375+++ b/arch/x86/kernel/cpu/mcheck/mce.c
15376@@ -45,6 +45,7 @@
15377 #include <asm/processor.h>
15378 #include <asm/mce.h>
15379 #include <asm/msr.h>
15380+#include <asm/local.h>
15381
15382 #include "mce-internal.h"
15383
15384@@ -254,7 +255,7 @@ static void print_mce(struct mce *m)
15385 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
15386 m->cs, m->ip);
15387
15388- if (m->cs == __KERNEL_CS)
15389+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
15390 print_symbol("{%s}", m->ip);
15391 pr_cont("\n");
15392 }
15393@@ -287,10 +288,10 @@ static void print_mce(struct mce *m)
15394
15395 #define PANIC_TIMEOUT 5 /* 5 seconds */
15396
15397-static atomic_t mce_paniced;
15398+static atomic_unchecked_t mce_paniced;
15399
15400 static int fake_panic;
15401-static atomic_t mce_fake_paniced;
15402+static atomic_unchecked_t mce_fake_paniced;
15403
15404 /* Panic in progress. Enable interrupts and wait for final IPI */
15405 static void wait_for_panic(void)
15406@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15407 /*
15408 * Make sure only one CPU runs in machine check panic
15409 */
15410- if (atomic_inc_return(&mce_paniced) > 1)
15411+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
15412 wait_for_panic();
15413 barrier();
15414
15415@@ -322,7 +323,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15416 console_verbose();
15417 } else {
15418 /* Don't log too much for fake panic */
15419- if (atomic_inc_return(&mce_fake_paniced) > 1)
15420+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
15421 return;
15422 }
15423 /* First print corrected ones that are still unlogged */
15424@@ -694,7 +695,7 @@ static int mce_timed_out(u64 *t)
15425 * might have been modified by someone else.
15426 */
15427 rmb();
15428- if (atomic_read(&mce_paniced))
15429+ if (atomic_read_unchecked(&mce_paniced))
15430 wait_for_panic();
15431 if (!monarch_timeout)
15432 goto out;
15433@@ -1659,7 +1660,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
15434 }
15435
15436 /* Call the installed machine check handler for this CPU setup. */
15437-void (*machine_check_vector)(struct pt_regs *, long error_code) =
15438+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
15439 unexpected_machine_check;
15440
15441 /*
15442@@ -1682,7 +1683,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
15443 return;
15444 }
15445
15446+ pax_open_kernel();
15447 machine_check_vector = do_machine_check;
15448+ pax_close_kernel();
15449
15450 __mcheck_cpu_init_generic();
15451 __mcheck_cpu_init_vendor(c);
15452@@ -1696,7 +1699,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
15453 */
15454
15455 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
15456-static int mce_chrdev_open_count; /* #times opened */
15457+static local_t mce_chrdev_open_count; /* #times opened */
15458 static int mce_chrdev_open_exclu; /* already open exclusive? */
15459
15460 static int mce_chrdev_open(struct inode *inode, struct file *file)
15461@@ -1704,7 +1707,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
15462 spin_lock(&mce_chrdev_state_lock);
15463
15464 if (mce_chrdev_open_exclu ||
15465- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
15466+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
15467 spin_unlock(&mce_chrdev_state_lock);
15468
15469 return -EBUSY;
15470@@ -1712,7 +1715,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
15471
15472 if (file->f_flags & O_EXCL)
15473 mce_chrdev_open_exclu = 1;
15474- mce_chrdev_open_count++;
15475+ local_inc(&mce_chrdev_open_count);
15476
15477 spin_unlock(&mce_chrdev_state_lock);
15478
15479@@ -1723,7 +1726,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
15480 {
15481 spin_lock(&mce_chrdev_state_lock);
15482
15483- mce_chrdev_open_count--;
15484+ local_dec(&mce_chrdev_open_count);
15485 mce_chrdev_open_exclu = 0;
15486
15487 spin_unlock(&mce_chrdev_state_lock);
15488@@ -2367,7 +2370,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
15489 return NOTIFY_OK;
15490 }
15491
15492-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
15493+static struct notifier_block mce_cpu_notifier __cpuinitconst = {
15494 .notifier_call = mce_cpu_callback,
15495 };
15496
15497@@ -2445,7 +2448,7 @@ struct dentry *mce_get_debugfs_dir(void)
15498 static void mce_reset(void)
15499 {
15500 cpu_missing = 0;
15501- atomic_set(&mce_fake_paniced, 0);
15502+ atomic_set_unchecked(&mce_fake_paniced, 0);
15503 atomic_set(&mce_executing, 0);
15504 atomic_set(&mce_callin, 0);
15505 atomic_set(&global_nwo, 0);
15506diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
15507index 2d5454c..51987eb 100644
15508--- a/arch/x86/kernel/cpu/mcheck/p5.c
15509+++ b/arch/x86/kernel/cpu/mcheck/p5.c
15510@@ -11,6 +11,7 @@
15511 #include <asm/processor.h>
15512 #include <asm/mce.h>
15513 #include <asm/msr.h>
15514+#include <asm/pgtable.h>
15515
15516 /* By default disabled */
15517 int mce_p5_enabled __read_mostly;
15518@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
15519 if (!cpu_has(c, X86_FEATURE_MCE))
15520 return;
15521
15522+ pax_open_kernel();
15523 machine_check_vector = pentium_machine_check;
15524+ pax_close_kernel();
15525 /* Make sure the vector pointer is visible before we enable MCEs: */
15526 wmb();
15527
15528diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
15529index 2d7998f..17c9de1 100644
15530--- a/arch/x86/kernel/cpu/mcheck/winchip.c
15531+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
15532@@ -10,6 +10,7 @@
15533 #include <asm/processor.h>
15534 #include <asm/mce.h>
15535 #include <asm/msr.h>
15536+#include <asm/pgtable.h>
15537
15538 /* Machine check handler for WinChip C6: */
15539 static void winchip_machine_check(struct pt_regs *regs, long error_code)
15540@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
15541 {
15542 u32 lo, hi;
15543
15544+ pax_open_kernel();
15545 machine_check_vector = winchip_machine_check;
15546+ pax_close_kernel();
15547 /* Make sure the vector pointer is visible before we enable MCEs: */
15548 wmb();
15549
15550diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
15551index 6b96110..0da73eb 100644
15552--- a/arch/x86/kernel/cpu/mtrr/main.c
15553+++ b/arch/x86/kernel/cpu/mtrr/main.c
15554@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
15555 u64 size_or_mask, size_and_mask;
15556 static bool mtrr_aps_delayed_init;
15557
15558-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
15559+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
15560
15561 const struct mtrr_ops *mtrr_if;
15562
15563diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
15564index df5e41f..816c719 100644
15565--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
15566+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15567@@ -25,7 +25,7 @@ struct mtrr_ops {
15568 int (*validate_add_page)(unsigned long base, unsigned long size,
15569 unsigned int type);
15570 int (*have_wrcomb)(void);
15571-};
15572+} __do_const;
15573
15574 extern int generic_get_free_region(unsigned long base, unsigned long size,
15575 int replace_reg);
15576diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
15577index 4a3374e..1ca3ecb 100644
15578--- a/arch/x86/kernel/cpu/perf_event.c
15579+++ b/arch/x86/kernel/cpu/perf_event.c
15580@@ -1765,7 +1765,7 @@ static unsigned long get_segment_base(unsigned int segment)
15581 if (idx > GDT_ENTRIES)
15582 return 0;
15583
15584- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
15585+ desc = get_cpu_gdt_table(smp_processor_id());
15586 }
15587
15588 return get_desc_base(desc + idx);
15589@@ -1855,7 +1855,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
15590 break;
15591
15592 perf_callchain_store(entry, frame.return_address);
15593- fp = frame.next_frame;
15594+ fp = (const void __force_user *)frame.next_frame;
15595 }
15596 }
15597
15598diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
15599index 324bb52..1a93d85 100644
15600--- a/arch/x86/kernel/cpu/perf_event_intel.c
15601+++ b/arch/x86/kernel/cpu/perf_event_intel.c
15602@@ -1949,10 +1949,10 @@ __init int intel_pmu_init(void)
15603 * v2 and above have a perf capabilities MSR
15604 */
15605 if (version > 1) {
15606- u64 capabilities;
15607+ u64 capabilities = x86_pmu.intel_cap.capabilities;
15608
15609- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
15610- x86_pmu.intel_cap.capabilities = capabilities;
15611+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
15612+ x86_pmu.intel_cap.capabilities = capabilities;
15613 }
15614
15615 intel_ds_init();
15616diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
15617index d22d0c4..088eb6f 100644
15618--- a/arch/x86/kernel/cpu/vmware.c
15619+++ b/arch/x86/kernel/cpu/vmware.c
15620@@ -79,7 +79,7 @@ static void __init vmware_platform_setup(void)
15621 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
15622
15623 if (ebx != UINT_MAX)
15624- x86_platform.calibrate_tsc = vmware_get_tsc_khz;
15625+ *(void **)&x86_platform.calibrate_tsc = vmware_get_tsc_khz;
15626 else
15627 printk(KERN_WARNING
15628 "Failed to get TSC freq from the hypervisor\n");
15629diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
15630index 13ad899..f642b9a 100644
15631--- a/arch/x86/kernel/crash.c
15632+++ b/arch/x86/kernel/crash.c
15633@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
15634 {
15635 #ifdef CONFIG_X86_32
15636 struct pt_regs fixed_regs;
15637-#endif
15638
15639-#ifdef CONFIG_X86_32
15640- if (!user_mode_vm(regs)) {
15641+ if (!user_mode(regs)) {
15642 crash_fixup_ss_esp(&fixed_regs, regs);
15643 regs = &fixed_regs;
15644 }
15645diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
15646index 37250fe..bf2ec74 100644
15647--- a/arch/x86/kernel/doublefault_32.c
15648+++ b/arch/x86/kernel/doublefault_32.c
15649@@ -11,7 +11,7 @@
15650
15651 #define DOUBLEFAULT_STACKSIZE (1024)
15652 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
15653-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
15654+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
15655
15656 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
15657
15658@@ -21,7 +21,7 @@ static void doublefault_fn(void)
15659 unsigned long gdt, tss;
15660
15661 store_gdt(&gdt_desc);
15662- gdt = gdt_desc.address;
15663+ gdt = (unsigned long)gdt_desc.address;
15664
15665 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
15666
15667@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
15668 /* 0x2 bit is always set */
15669 .flags = X86_EFLAGS_SF | 0x2,
15670 .sp = STACK_START,
15671- .es = __USER_DS,
15672+ .es = __KERNEL_DS,
15673 .cs = __KERNEL_CS,
15674 .ss = __KERNEL_DS,
15675- .ds = __USER_DS,
15676+ .ds = __KERNEL_DS,
15677 .fs = __KERNEL_PERCPU,
15678
15679 .__cr3 = __pa_nodebug(swapper_pg_dir),
15680diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
15681index ae42418b..787c16b 100644
15682--- a/arch/x86/kernel/dumpstack.c
15683+++ b/arch/x86/kernel/dumpstack.c
15684@@ -2,6 +2,9 @@
15685 * Copyright (C) 1991, 1992 Linus Torvalds
15686 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
15687 */
15688+#ifdef CONFIG_GRKERNSEC_HIDESYM
15689+#define __INCLUDED_BY_HIDESYM 1
15690+#endif
15691 #include <linux/kallsyms.h>
15692 #include <linux/kprobes.h>
15693 #include <linux/uaccess.h>
15694@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
15695 static void
15696 print_ftrace_graph_addr(unsigned long addr, void *data,
15697 const struct stacktrace_ops *ops,
15698- struct thread_info *tinfo, int *graph)
15699+ struct task_struct *task, int *graph)
15700 {
15701- struct task_struct *task;
15702 unsigned long ret_addr;
15703 int index;
15704
15705 if (addr != (unsigned long)return_to_handler)
15706 return;
15707
15708- task = tinfo->task;
15709 index = task->curr_ret_stack;
15710
15711 if (!task->ret_stack || index < *graph)
15712@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15713 static inline void
15714 print_ftrace_graph_addr(unsigned long addr, void *data,
15715 const struct stacktrace_ops *ops,
15716- struct thread_info *tinfo, int *graph)
15717+ struct task_struct *task, int *graph)
15718 { }
15719 #endif
15720
15721@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15722 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
15723 */
15724
15725-static inline int valid_stack_ptr(struct thread_info *tinfo,
15726- void *p, unsigned int size, void *end)
15727+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
15728 {
15729- void *t = tinfo;
15730 if (end) {
15731 if (p < end && p >= (end-THREAD_SIZE))
15732 return 1;
15733@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
15734 }
15735
15736 unsigned long
15737-print_context_stack(struct thread_info *tinfo,
15738+print_context_stack(struct task_struct *task, void *stack_start,
15739 unsigned long *stack, unsigned long bp,
15740 const struct stacktrace_ops *ops, void *data,
15741 unsigned long *end, int *graph)
15742 {
15743 struct stack_frame *frame = (struct stack_frame *)bp;
15744
15745- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
15746+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
15747 unsigned long addr;
15748
15749 addr = *stack;
15750@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
15751 } else {
15752 ops->address(data, addr, 0);
15753 }
15754- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
15755+ print_ftrace_graph_addr(addr, data, ops, task, graph);
15756 }
15757 stack++;
15758 }
15759@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
15760 EXPORT_SYMBOL_GPL(print_context_stack);
15761
15762 unsigned long
15763-print_context_stack_bp(struct thread_info *tinfo,
15764+print_context_stack_bp(struct task_struct *task, void *stack_start,
15765 unsigned long *stack, unsigned long bp,
15766 const struct stacktrace_ops *ops, void *data,
15767 unsigned long *end, int *graph)
15768@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
15769 struct stack_frame *frame = (struct stack_frame *)bp;
15770 unsigned long *ret_addr = &frame->return_address;
15771
15772- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
15773+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
15774 unsigned long addr = *ret_addr;
15775
15776 if (!__kernel_text_address(addr))
15777@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
15778 ops->address(data, addr, 1);
15779 frame = frame->next_frame;
15780 ret_addr = &frame->return_address;
15781- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
15782+ print_ftrace_graph_addr(addr, data, ops, task, graph);
15783 }
15784
15785 return (unsigned long)frame;
15786@@ -189,7 +188,7 @@ void dump_stack(void)
15787
15788 bp = stack_frame(current, NULL);
15789 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
15790- current->pid, current->comm, print_tainted(),
15791+ task_pid_nr(current), current->comm, print_tainted(),
15792 init_utsname()->release,
15793 (int)strcspn(init_utsname()->version, " "),
15794 init_utsname()->version);
15795@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
15796 }
15797 EXPORT_SYMBOL_GPL(oops_begin);
15798
15799+extern void gr_handle_kernel_exploit(void);
15800+
15801 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15802 {
15803 if (regs && kexec_should_crash(current))
15804@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15805 panic("Fatal exception in interrupt");
15806 if (panic_on_oops)
15807 panic("Fatal exception");
15808- do_exit(signr);
15809+
15810+ gr_handle_kernel_exploit();
15811+
15812+ do_group_exit(signr);
15813 }
15814
15815 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15816@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15817 print_modules();
15818 show_regs(regs);
15819 #ifdef CONFIG_X86_32
15820- if (user_mode_vm(regs)) {
15821+ if (user_mode(regs)) {
15822 sp = regs->sp;
15823 ss = regs->ss & 0xffff;
15824 } else {
15825@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
15826 unsigned long flags = oops_begin();
15827 int sig = SIGSEGV;
15828
15829- if (!user_mode_vm(regs))
15830+ if (!user_mode(regs))
15831 report_bug(regs->ip, regs);
15832
15833 if (__die(str, regs, err))
15834diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
15835index 1038a41..db2c12b 100644
15836--- a/arch/x86/kernel/dumpstack_32.c
15837+++ b/arch/x86/kernel/dumpstack_32.c
15838@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15839 bp = stack_frame(task, regs);
15840
15841 for (;;) {
15842- struct thread_info *context;
15843+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15844
15845- context = (struct thread_info *)
15846- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
15847- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
15848+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15849
15850- stack = (unsigned long *)context->previous_esp;
15851- if (!stack)
15852+ if (stack_start == task_stack_page(task))
15853 break;
15854+ stack = *(unsigned long **)stack_start;
15855 if (ops->stack(data, "IRQ") < 0)
15856 break;
15857 touch_nmi_watchdog();
15858@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
15859 {
15860 int i;
15861
15862- __show_regs(regs, !user_mode_vm(regs));
15863+ __show_regs(regs, !user_mode(regs));
15864
15865 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
15866 TASK_COMM_LEN, current->comm, task_pid_nr(current),
15867@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
15868 * When in-kernel, we also print out the stack and code at the
15869 * time of the fault..
15870 */
15871- if (!user_mode_vm(regs)) {
15872+ if (!user_mode(regs)) {
15873 unsigned int code_prologue = code_bytes * 43 / 64;
15874 unsigned int code_len = code_bytes;
15875 unsigned char c;
15876 u8 *ip;
15877+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
15878
15879 pr_emerg("Stack:\n");
15880 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
15881
15882 pr_emerg("Code:");
15883
15884- ip = (u8 *)regs->ip - code_prologue;
15885+ ip = (u8 *)regs->ip - code_prologue + cs_base;
15886 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
15887 /* try starting at IP */
15888- ip = (u8 *)regs->ip;
15889+ ip = (u8 *)regs->ip + cs_base;
15890 code_len = code_len - code_prologue + 1;
15891 }
15892 for (i = 0; i < code_len; i++, ip++) {
15893@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
15894 pr_cont(" Bad EIP value.");
15895 break;
15896 }
15897- if (ip == (u8 *)regs->ip)
15898+ if (ip == (u8 *)regs->ip + cs_base)
15899 pr_cont(" <%02x>", c);
15900 else
15901 pr_cont(" %02x", c);
15902@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
15903 {
15904 unsigned short ud2;
15905
15906+ ip = ktla_ktva(ip);
15907 if (ip < PAGE_OFFSET)
15908 return 0;
15909 if (probe_kernel_address((unsigned short *)ip, ud2))
15910@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
15911
15912 return ud2 == 0x0b0f;
15913 }
15914+
15915+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15916+void pax_check_alloca(unsigned long size)
15917+{
15918+ unsigned long sp = (unsigned long)&sp, stack_left;
15919+
15920+ /* all kernel stacks are of the same size */
15921+ stack_left = sp & (THREAD_SIZE - 1);
15922+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15923+}
15924+EXPORT_SYMBOL(pax_check_alloca);
15925+#endif
15926diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
15927index b653675..51cc8c0 100644
15928--- a/arch/x86/kernel/dumpstack_64.c
15929+++ b/arch/x86/kernel/dumpstack_64.c
15930@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15931 unsigned long *irq_stack_end =
15932 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
15933 unsigned used = 0;
15934- struct thread_info *tinfo;
15935 int graph = 0;
15936 unsigned long dummy;
15937+ void *stack_start;
15938
15939 if (!task)
15940 task = current;
15941@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15942 * current stack address. If the stacks consist of nested
15943 * exceptions
15944 */
15945- tinfo = task_thread_info(task);
15946 for (;;) {
15947 char *id;
15948 unsigned long *estack_end;
15949+
15950 estack_end = in_exception_stack(cpu, (unsigned long)stack,
15951 &used, &id);
15952
15953@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15954 if (ops->stack(data, id) < 0)
15955 break;
15956
15957- bp = ops->walk_stack(tinfo, stack, bp, ops,
15958+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
15959 data, estack_end, &graph);
15960 ops->stack(data, "<EOE>");
15961 /*
15962@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15963 * second-to-last pointer (index -2 to end) in the
15964 * exception stack:
15965 */
15966+ if ((u16)estack_end[-1] != __KERNEL_DS)
15967+ goto out;
15968 stack = (unsigned long *) estack_end[-2];
15969 continue;
15970 }
15971@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15972 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
15973 if (ops->stack(data, "IRQ") < 0)
15974 break;
15975- bp = ops->walk_stack(tinfo, stack, bp,
15976+ bp = ops->walk_stack(task, irq_stack, stack, bp,
15977 ops, data, irq_stack_end, &graph);
15978 /*
15979 * We link to the next stack (which would be
15980@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15981 /*
15982 * This handles the process stack:
15983 */
15984- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
15985+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15986+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15987+out:
15988 put_cpu();
15989 }
15990 EXPORT_SYMBOL(dump_trace);
15991@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
15992 {
15993 int i;
15994 unsigned long sp;
15995- const int cpu = smp_processor_id();
15996+ const int cpu = raw_smp_processor_id();
15997 struct task_struct *cur = current;
15998
15999 sp = regs->sp;
16000@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
16001
16002 return ud2 == 0x0b0f;
16003 }
16004+
16005+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16006+void pax_check_alloca(unsigned long size)
16007+{
16008+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
16009+ unsigned cpu, used;
16010+ char *id;
16011+
16012+ /* check the process stack first */
16013+ stack_start = (unsigned long)task_stack_page(current);
16014+ stack_end = stack_start + THREAD_SIZE;
16015+ if (likely(stack_start <= sp && sp < stack_end)) {
16016+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
16017+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
16018+ return;
16019+ }
16020+
16021+ cpu = get_cpu();
16022+
16023+ /* check the irq stacks */
16024+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
16025+ stack_start = stack_end - IRQ_STACK_SIZE;
16026+ if (stack_start <= sp && sp < stack_end) {
16027+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
16028+ put_cpu();
16029+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
16030+ return;
16031+ }
16032+
16033+ /* check the exception stacks */
16034+ used = 0;
16035+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
16036+ stack_start = stack_end - EXCEPTION_STKSZ;
16037+ if (stack_end && stack_start <= sp && sp < stack_end) {
16038+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
16039+ put_cpu();
16040+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
16041+ return;
16042+ }
16043+
16044+ put_cpu();
16045+
16046+ /* unknown stack */
16047+ BUG();
16048+}
16049+EXPORT_SYMBOL(pax_check_alloca);
16050+#endif
16051diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
16052index 9b9f18b..9fcaa04 100644
16053--- a/arch/x86/kernel/early_printk.c
16054+++ b/arch/x86/kernel/early_printk.c
16055@@ -7,6 +7,7 @@
16056 #include <linux/pci_regs.h>
16057 #include <linux/pci_ids.h>
16058 #include <linux/errno.h>
16059+#include <linux/sched.h>
16060 #include <asm/io.h>
16061 #include <asm/processor.h>
16062 #include <asm/fcntl.h>
16063diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
16064index cf8639b..98fcee6 100644
16065--- a/arch/x86/kernel/entry_32.S
16066+++ b/arch/x86/kernel/entry_32.S
16067@@ -177,13 +177,153 @@
16068 /*CFI_REL_OFFSET gs, PT_GS*/
16069 .endm
16070 .macro SET_KERNEL_GS reg
16071+
16072+#ifdef CONFIG_CC_STACKPROTECTOR
16073 movl $(__KERNEL_STACK_CANARY), \reg
16074+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16075+ movl $(__USER_DS), \reg
16076+#else
16077+ xorl \reg, \reg
16078+#endif
16079+
16080 movl \reg, %gs
16081 .endm
16082
16083 #endif /* CONFIG_X86_32_LAZY_GS */
16084
16085-.macro SAVE_ALL
16086+.macro pax_enter_kernel
16087+#ifdef CONFIG_PAX_KERNEXEC
16088+ call pax_enter_kernel
16089+#endif
16090+.endm
16091+
16092+.macro pax_exit_kernel
16093+#ifdef CONFIG_PAX_KERNEXEC
16094+ call pax_exit_kernel
16095+#endif
16096+.endm
16097+
16098+#ifdef CONFIG_PAX_KERNEXEC
16099+ENTRY(pax_enter_kernel)
16100+#ifdef CONFIG_PARAVIRT
16101+ pushl %eax
16102+ pushl %ecx
16103+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
16104+ mov %eax, %esi
16105+#else
16106+ mov %cr0, %esi
16107+#endif
16108+ bts $16, %esi
16109+ jnc 1f
16110+ mov %cs, %esi
16111+ cmp $__KERNEL_CS, %esi
16112+ jz 3f
16113+ ljmp $__KERNEL_CS, $3f
16114+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
16115+2:
16116+#ifdef CONFIG_PARAVIRT
16117+ mov %esi, %eax
16118+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16119+#else
16120+ mov %esi, %cr0
16121+#endif
16122+3:
16123+#ifdef CONFIG_PARAVIRT
16124+ popl %ecx
16125+ popl %eax
16126+#endif
16127+ ret
16128+ENDPROC(pax_enter_kernel)
16129+
16130+ENTRY(pax_exit_kernel)
16131+#ifdef CONFIG_PARAVIRT
16132+ pushl %eax
16133+ pushl %ecx
16134+#endif
16135+ mov %cs, %esi
16136+ cmp $__KERNEXEC_KERNEL_CS, %esi
16137+ jnz 2f
16138+#ifdef CONFIG_PARAVIRT
16139+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
16140+ mov %eax, %esi
16141+#else
16142+ mov %cr0, %esi
16143+#endif
16144+ btr $16, %esi
16145+ ljmp $__KERNEL_CS, $1f
16146+1:
16147+#ifdef CONFIG_PARAVIRT
16148+ mov %esi, %eax
16149+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
16150+#else
16151+ mov %esi, %cr0
16152+#endif
16153+2:
16154+#ifdef CONFIG_PARAVIRT
16155+ popl %ecx
16156+ popl %eax
16157+#endif
16158+ ret
16159+ENDPROC(pax_exit_kernel)
16160+#endif
16161+
16162+.macro pax_erase_kstack
16163+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16164+ call pax_erase_kstack
16165+#endif
16166+.endm
16167+
16168+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16169+/*
16170+ * ebp: thread_info
16171+ */
16172+ENTRY(pax_erase_kstack)
16173+ pushl %edi
16174+ pushl %ecx
16175+ pushl %eax
16176+
16177+ mov TI_lowest_stack(%ebp), %edi
16178+ mov $-0xBEEF, %eax
16179+ std
16180+
16181+1: mov %edi, %ecx
16182+ and $THREAD_SIZE_asm - 1, %ecx
16183+ shr $2, %ecx
16184+ repne scasl
16185+ jecxz 2f
16186+
16187+ cmp $2*16, %ecx
16188+ jc 2f
16189+
16190+ mov $2*16, %ecx
16191+ repe scasl
16192+ jecxz 2f
16193+ jne 1b
16194+
16195+2: cld
16196+ mov %esp, %ecx
16197+ sub %edi, %ecx
16198+
16199+ cmp $THREAD_SIZE_asm, %ecx
16200+ jb 3f
16201+ ud2
16202+3:
16203+
16204+ shr $2, %ecx
16205+ rep stosl
16206+
16207+ mov TI_task_thread_sp0(%ebp), %edi
16208+ sub $128, %edi
16209+ mov %edi, TI_lowest_stack(%ebp)
16210+
16211+ popl %eax
16212+ popl %ecx
16213+ popl %edi
16214+ ret
16215+ENDPROC(pax_erase_kstack)
16216+#endif
16217+
16218+.macro __SAVE_ALL _DS
16219 cld
16220 PUSH_GS
16221 pushl_cfi %fs
16222@@ -206,7 +346,7 @@
16223 CFI_REL_OFFSET ecx, 0
16224 pushl_cfi %ebx
16225 CFI_REL_OFFSET ebx, 0
16226- movl $(__USER_DS), %edx
16227+ movl $\_DS, %edx
16228 movl %edx, %ds
16229 movl %edx, %es
16230 movl $(__KERNEL_PERCPU), %edx
16231@@ -214,6 +354,15 @@
16232 SET_KERNEL_GS %edx
16233 .endm
16234
16235+.macro SAVE_ALL
16236+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
16237+ __SAVE_ALL __KERNEL_DS
16238+ pax_enter_kernel
16239+#else
16240+ __SAVE_ALL __USER_DS
16241+#endif
16242+.endm
16243+
16244 .macro RESTORE_INT_REGS
16245 popl_cfi %ebx
16246 CFI_RESTORE ebx
16247@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
16248 popfl_cfi
16249 jmp syscall_exit
16250 CFI_ENDPROC
16251-END(ret_from_fork)
16252+ENDPROC(ret_from_fork)
16253
16254 ENTRY(ret_from_kernel_thread)
16255 CFI_STARTPROC
16256@@ -344,7 +493,15 @@ ret_from_intr:
16257 andl $SEGMENT_RPL_MASK, %eax
16258 #endif
16259 cmpl $USER_RPL, %eax
16260+
16261+#ifdef CONFIG_PAX_KERNEXEC
16262+ jae resume_userspace
16263+
16264+ pax_exit_kernel
16265+ jmp resume_kernel
16266+#else
16267 jb resume_kernel # not returning to v8086 or userspace
16268+#endif
16269
16270 ENTRY(resume_userspace)
16271 LOCKDEP_SYS_EXIT
16272@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
16273 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
16274 # int/exception return?
16275 jne work_pending
16276- jmp restore_all
16277-END(ret_from_exception)
16278+ jmp restore_all_pax
16279+ENDPROC(ret_from_exception)
16280
16281 #ifdef CONFIG_PREEMPT
16282 ENTRY(resume_kernel)
16283@@ -372,7 +529,7 @@ need_resched:
16284 jz restore_all
16285 call preempt_schedule_irq
16286 jmp need_resched
16287-END(resume_kernel)
16288+ENDPROC(resume_kernel)
16289 #endif
16290 CFI_ENDPROC
16291 /*
16292@@ -406,30 +563,45 @@ sysenter_past_esp:
16293 /*CFI_REL_OFFSET cs, 0*/
16294 /*
16295 * Push current_thread_info()->sysenter_return to the stack.
16296- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
16297- * pushed above; +8 corresponds to copy_thread's esp0 setting.
16298 */
16299- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
16300+ pushl_cfi $0
16301 CFI_REL_OFFSET eip, 0
16302
16303 pushl_cfi %eax
16304 SAVE_ALL
16305+ GET_THREAD_INFO(%ebp)
16306+ movl TI_sysenter_return(%ebp),%ebp
16307+ movl %ebp,PT_EIP(%esp)
16308 ENABLE_INTERRUPTS(CLBR_NONE)
16309
16310 /*
16311 * Load the potential sixth argument from user stack.
16312 * Careful about security.
16313 */
16314+ movl PT_OLDESP(%esp),%ebp
16315+
16316+#ifdef CONFIG_PAX_MEMORY_UDEREF
16317+ mov PT_OLDSS(%esp),%ds
16318+1: movl %ds:(%ebp),%ebp
16319+ push %ss
16320+ pop %ds
16321+#else
16322 cmpl $__PAGE_OFFSET-3,%ebp
16323 jae syscall_fault
16324 ASM_STAC
16325 1: movl (%ebp),%ebp
16326 ASM_CLAC
16327+#endif
16328+
16329 movl %ebp,PT_EBP(%esp)
16330 _ASM_EXTABLE(1b,syscall_fault)
16331
16332 GET_THREAD_INFO(%ebp)
16333
16334+#ifdef CONFIG_PAX_RANDKSTACK
16335+ pax_erase_kstack
16336+#endif
16337+
16338 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
16339 jnz sysenter_audit
16340 sysenter_do_call:
16341@@ -444,12 +616,24 @@ sysenter_do_call:
16342 testl $_TIF_ALLWORK_MASK, %ecx
16343 jne sysexit_audit
16344 sysenter_exit:
16345+
16346+#ifdef CONFIG_PAX_RANDKSTACK
16347+ pushl_cfi %eax
16348+ movl %esp, %eax
16349+ call pax_randomize_kstack
16350+ popl_cfi %eax
16351+#endif
16352+
16353+ pax_erase_kstack
16354+
16355 /* if something modifies registers it must also disable sysexit */
16356 movl PT_EIP(%esp), %edx
16357 movl PT_OLDESP(%esp), %ecx
16358 xorl %ebp,%ebp
16359 TRACE_IRQS_ON
16360 1: mov PT_FS(%esp), %fs
16361+2: mov PT_DS(%esp), %ds
16362+3: mov PT_ES(%esp), %es
16363 PTGS_TO_GS
16364 ENABLE_INTERRUPTS_SYSEXIT
16365
16366@@ -466,6 +650,9 @@ sysenter_audit:
16367 movl %eax,%edx /* 2nd arg: syscall number */
16368 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
16369 call __audit_syscall_entry
16370+
16371+ pax_erase_kstack
16372+
16373 pushl_cfi %ebx
16374 movl PT_EAX(%esp),%eax /* reload syscall number */
16375 jmp sysenter_do_call
16376@@ -491,10 +678,16 @@ sysexit_audit:
16377
16378 CFI_ENDPROC
16379 .pushsection .fixup,"ax"
16380-2: movl $0,PT_FS(%esp)
16381+4: movl $0,PT_FS(%esp)
16382+ jmp 1b
16383+5: movl $0,PT_DS(%esp)
16384+ jmp 1b
16385+6: movl $0,PT_ES(%esp)
16386 jmp 1b
16387 .popsection
16388- _ASM_EXTABLE(1b,2b)
16389+ _ASM_EXTABLE(1b,4b)
16390+ _ASM_EXTABLE(2b,5b)
16391+ _ASM_EXTABLE(3b,6b)
16392 PTGS_TO_GS_EX
16393 ENDPROC(ia32_sysenter_target)
16394
16395@@ -509,6 +702,11 @@ ENTRY(system_call)
16396 pushl_cfi %eax # save orig_eax
16397 SAVE_ALL
16398 GET_THREAD_INFO(%ebp)
16399+
16400+#ifdef CONFIG_PAX_RANDKSTACK
16401+ pax_erase_kstack
16402+#endif
16403+
16404 # system call tracing in operation / emulation
16405 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
16406 jnz syscall_trace_entry
16407@@ -527,6 +725,15 @@ syscall_exit:
16408 testl $_TIF_ALLWORK_MASK, %ecx # current->work
16409 jne syscall_exit_work
16410
16411+restore_all_pax:
16412+
16413+#ifdef CONFIG_PAX_RANDKSTACK
16414+ movl %esp, %eax
16415+ call pax_randomize_kstack
16416+#endif
16417+
16418+ pax_erase_kstack
16419+
16420 restore_all:
16421 TRACE_IRQS_IRET
16422 restore_all_notrace:
16423@@ -583,14 +790,34 @@ ldt_ss:
16424 * compensating for the offset by changing to the ESPFIX segment with
16425 * a base address that matches for the difference.
16426 */
16427-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
16428+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
16429 mov %esp, %edx /* load kernel esp */
16430 mov PT_OLDESP(%esp), %eax /* load userspace esp */
16431 mov %dx, %ax /* eax: new kernel esp */
16432 sub %eax, %edx /* offset (low word is 0) */
16433+#ifdef CONFIG_SMP
16434+ movl PER_CPU_VAR(cpu_number), %ebx
16435+ shll $PAGE_SHIFT_asm, %ebx
16436+ addl $cpu_gdt_table, %ebx
16437+#else
16438+ movl $cpu_gdt_table, %ebx
16439+#endif
16440 shr $16, %edx
16441- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
16442- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
16443+
16444+#ifdef CONFIG_PAX_KERNEXEC
16445+ mov %cr0, %esi
16446+ btr $16, %esi
16447+ mov %esi, %cr0
16448+#endif
16449+
16450+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
16451+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
16452+
16453+#ifdef CONFIG_PAX_KERNEXEC
16454+ bts $16, %esi
16455+ mov %esi, %cr0
16456+#endif
16457+
16458 pushl_cfi $__ESPFIX_SS
16459 pushl_cfi %eax /* new kernel esp */
16460 /* Disable interrupts, but do not irqtrace this section: we
16461@@ -619,20 +846,18 @@ work_resched:
16462 movl TI_flags(%ebp), %ecx
16463 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
16464 # than syscall tracing?
16465- jz restore_all
16466+ jz restore_all_pax
16467 testb $_TIF_NEED_RESCHED, %cl
16468 jnz work_resched
16469
16470 work_notifysig: # deal with pending signals and
16471 # notify-resume requests
16472+ movl %esp, %eax
16473 #ifdef CONFIG_VM86
16474 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
16475- movl %esp, %eax
16476 jne work_notifysig_v86 # returning to kernel-space or
16477 # vm86-space
16478 1:
16479-#else
16480- movl %esp, %eax
16481 #endif
16482 TRACE_IRQS_ON
16483 ENABLE_INTERRUPTS(CLBR_NONE)
16484@@ -653,7 +878,7 @@ work_notifysig_v86:
16485 movl %eax, %esp
16486 jmp 1b
16487 #endif
16488-END(work_pending)
16489+ENDPROC(work_pending)
16490
16491 # perform syscall exit tracing
16492 ALIGN
16493@@ -661,11 +886,14 @@ syscall_trace_entry:
16494 movl $-ENOSYS,PT_EAX(%esp)
16495 movl %esp, %eax
16496 call syscall_trace_enter
16497+
16498+ pax_erase_kstack
16499+
16500 /* What it returned is what we'll actually use. */
16501 cmpl $(NR_syscalls), %eax
16502 jnae syscall_call
16503 jmp syscall_exit
16504-END(syscall_trace_entry)
16505+ENDPROC(syscall_trace_entry)
16506
16507 # perform syscall exit tracing
16508 ALIGN
16509@@ -678,21 +906,25 @@ syscall_exit_work:
16510 movl %esp, %eax
16511 call syscall_trace_leave
16512 jmp resume_userspace
16513-END(syscall_exit_work)
16514+ENDPROC(syscall_exit_work)
16515 CFI_ENDPROC
16516
16517 RING0_INT_FRAME # can't unwind into user space anyway
16518 syscall_fault:
16519+#ifdef CONFIG_PAX_MEMORY_UDEREF
16520+ push %ss
16521+ pop %ds
16522+#endif
16523 ASM_CLAC
16524 GET_THREAD_INFO(%ebp)
16525 movl $-EFAULT,PT_EAX(%esp)
16526 jmp resume_userspace
16527-END(syscall_fault)
16528+ENDPROC(syscall_fault)
16529
16530 syscall_badsys:
16531 movl $-ENOSYS,PT_EAX(%esp)
16532 jmp resume_userspace
16533-END(syscall_badsys)
16534+ENDPROC(syscall_badsys)
16535 CFI_ENDPROC
16536 /*
16537 * End of kprobes section
16538@@ -763,6 +995,36 @@ ENTRY(ptregs_clone)
16539 CFI_ENDPROC
16540 ENDPROC(ptregs_clone)
16541
16542+ ALIGN;
16543+ENTRY(kernel_execve)
16544+ CFI_STARTPROC
16545+ pushl_cfi %ebp
16546+ sub $PT_OLDSS+4,%esp
16547+ pushl_cfi %edi
16548+ pushl_cfi %ecx
16549+ pushl_cfi %eax
16550+ lea 3*4(%esp),%edi
16551+ mov $PT_OLDSS/4+1,%ecx
16552+ xorl %eax,%eax
16553+ rep stosl
16554+ popl_cfi %eax
16555+ popl_cfi %ecx
16556+ popl_cfi %edi
16557+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
16558+ pushl_cfi %esp
16559+ call sys_execve
16560+ add $4,%esp
16561+ CFI_ADJUST_CFA_OFFSET -4
16562+ GET_THREAD_INFO(%ebp)
16563+ test %eax,%eax
16564+ jz syscall_exit
16565+ add $PT_OLDSS+4,%esp
16566+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
16567+ popl_cfi %ebp
16568+ ret
16569+ CFI_ENDPROC
16570+ENDPROC(kernel_execve)
16571+
16572 .macro FIXUP_ESPFIX_STACK
16573 /*
16574 * Switch back for ESPFIX stack to the normal zerobased stack
16575@@ -772,8 +1034,15 @@ ENDPROC(ptregs_clone)
16576 * normal stack and adjusts ESP with the matching offset.
16577 */
16578 /* fixup the stack */
16579- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
16580- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
16581+#ifdef CONFIG_SMP
16582+ movl PER_CPU_VAR(cpu_number), %ebx
16583+ shll $PAGE_SHIFT_asm, %ebx
16584+ addl $cpu_gdt_table, %ebx
16585+#else
16586+ movl $cpu_gdt_table, %ebx
16587+#endif
16588+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
16589+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
16590 shl $16, %eax
16591 addl %esp, %eax /* the adjusted stack pointer */
16592 pushl_cfi $__KERNEL_DS
16593@@ -826,7 +1095,7 @@ vector=vector+1
16594 .endr
16595 2: jmp common_interrupt
16596 .endr
16597-END(irq_entries_start)
16598+ENDPROC(irq_entries_start)
16599
16600 .previous
16601 END(interrupt)
16602@@ -877,7 +1146,7 @@ ENTRY(coprocessor_error)
16603 pushl_cfi $do_coprocessor_error
16604 jmp error_code
16605 CFI_ENDPROC
16606-END(coprocessor_error)
16607+ENDPROC(coprocessor_error)
16608
16609 ENTRY(simd_coprocessor_error)
16610 RING0_INT_FRAME
16611@@ -899,7 +1168,7 @@ ENTRY(simd_coprocessor_error)
16612 #endif
16613 jmp error_code
16614 CFI_ENDPROC
16615-END(simd_coprocessor_error)
16616+ENDPROC(simd_coprocessor_error)
16617
16618 ENTRY(device_not_available)
16619 RING0_INT_FRAME
16620@@ -908,18 +1177,18 @@ ENTRY(device_not_available)
16621 pushl_cfi $do_device_not_available
16622 jmp error_code
16623 CFI_ENDPROC
16624-END(device_not_available)
16625+ENDPROC(device_not_available)
16626
16627 #ifdef CONFIG_PARAVIRT
16628 ENTRY(native_iret)
16629 iret
16630 _ASM_EXTABLE(native_iret, iret_exc)
16631-END(native_iret)
16632+ENDPROC(native_iret)
16633
16634 ENTRY(native_irq_enable_sysexit)
16635 sti
16636 sysexit
16637-END(native_irq_enable_sysexit)
16638+ENDPROC(native_irq_enable_sysexit)
16639 #endif
16640
16641 ENTRY(overflow)
16642@@ -929,7 +1198,7 @@ ENTRY(overflow)
16643 pushl_cfi $do_overflow
16644 jmp error_code
16645 CFI_ENDPROC
16646-END(overflow)
16647+ENDPROC(overflow)
16648
16649 ENTRY(bounds)
16650 RING0_INT_FRAME
16651@@ -938,7 +1207,7 @@ ENTRY(bounds)
16652 pushl_cfi $do_bounds
16653 jmp error_code
16654 CFI_ENDPROC
16655-END(bounds)
16656+ENDPROC(bounds)
16657
16658 ENTRY(invalid_op)
16659 RING0_INT_FRAME
16660@@ -947,7 +1216,7 @@ ENTRY(invalid_op)
16661 pushl_cfi $do_invalid_op
16662 jmp error_code
16663 CFI_ENDPROC
16664-END(invalid_op)
16665+ENDPROC(invalid_op)
16666
16667 ENTRY(coprocessor_segment_overrun)
16668 RING0_INT_FRAME
16669@@ -956,7 +1225,7 @@ ENTRY(coprocessor_segment_overrun)
16670 pushl_cfi $do_coprocessor_segment_overrun
16671 jmp error_code
16672 CFI_ENDPROC
16673-END(coprocessor_segment_overrun)
16674+ENDPROC(coprocessor_segment_overrun)
16675
16676 ENTRY(invalid_TSS)
16677 RING0_EC_FRAME
16678@@ -964,7 +1233,7 @@ ENTRY(invalid_TSS)
16679 pushl_cfi $do_invalid_TSS
16680 jmp error_code
16681 CFI_ENDPROC
16682-END(invalid_TSS)
16683+ENDPROC(invalid_TSS)
16684
16685 ENTRY(segment_not_present)
16686 RING0_EC_FRAME
16687@@ -972,7 +1241,7 @@ ENTRY(segment_not_present)
16688 pushl_cfi $do_segment_not_present
16689 jmp error_code
16690 CFI_ENDPROC
16691-END(segment_not_present)
16692+ENDPROC(segment_not_present)
16693
16694 ENTRY(stack_segment)
16695 RING0_EC_FRAME
16696@@ -980,7 +1249,7 @@ ENTRY(stack_segment)
16697 pushl_cfi $do_stack_segment
16698 jmp error_code
16699 CFI_ENDPROC
16700-END(stack_segment)
16701+ENDPROC(stack_segment)
16702
16703 ENTRY(alignment_check)
16704 RING0_EC_FRAME
16705@@ -988,7 +1257,7 @@ ENTRY(alignment_check)
16706 pushl_cfi $do_alignment_check
16707 jmp error_code
16708 CFI_ENDPROC
16709-END(alignment_check)
16710+ENDPROC(alignment_check)
16711
16712 ENTRY(divide_error)
16713 RING0_INT_FRAME
16714@@ -997,7 +1266,7 @@ ENTRY(divide_error)
16715 pushl_cfi $do_divide_error
16716 jmp error_code
16717 CFI_ENDPROC
16718-END(divide_error)
16719+ENDPROC(divide_error)
16720
16721 #ifdef CONFIG_X86_MCE
16722 ENTRY(machine_check)
16723@@ -1007,7 +1276,7 @@ ENTRY(machine_check)
16724 pushl_cfi machine_check_vector
16725 jmp error_code
16726 CFI_ENDPROC
16727-END(machine_check)
16728+ENDPROC(machine_check)
16729 #endif
16730
16731 ENTRY(spurious_interrupt_bug)
16732@@ -1017,7 +1286,7 @@ ENTRY(spurious_interrupt_bug)
16733 pushl_cfi $do_spurious_interrupt_bug
16734 jmp error_code
16735 CFI_ENDPROC
16736-END(spurious_interrupt_bug)
16737+ENDPROC(spurious_interrupt_bug)
16738 /*
16739 * End of kprobes section
16740 */
16741@@ -1120,7 +1389,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
16742
16743 ENTRY(mcount)
16744 ret
16745-END(mcount)
16746+ENDPROC(mcount)
16747
16748 ENTRY(ftrace_caller)
16749 cmpl $0, function_trace_stop
16750@@ -1153,7 +1422,7 @@ ftrace_graph_call:
16751 .globl ftrace_stub
16752 ftrace_stub:
16753 ret
16754-END(ftrace_caller)
16755+ENDPROC(ftrace_caller)
16756
16757 ENTRY(ftrace_regs_caller)
16758 pushf /* push flags before compare (in cs location) */
16759@@ -1254,7 +1523,7 @@ trace:
16760 popl %ecx
16761 popl %eax
16762 jmp ftrace_stub
16763-END(mcount)
16764+ENDPROC(mcount)
16765 #endif /* CONFIG_DYNAMIC_FTRACE */
16766 #endif /* CONFIG_FUNCTION_TRACER */
16767
16768@@ -1272,7 +1541,7 @@ ENTRY(ftrace_graph_caller)
16769 popl %ecx
16770 popl %eax
16771 ret
16772-END(ftrace_graph_caller)
16773+ENDPROC(ftrace_graph_caller)
16774
16775 .globl return_to_handler
16776 return_to_handler:
16777@@ -1328,15 +1597,18 @@ error_code:
16778 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
16779 REG_TO_PTGS %ecx
16780 SET_KERNEL_GS %ecx
16781- movl $(__USER_DS), %ecx
16782+ movl $(__KERNEL_DS), %ecx
16783 movl %ecx, %ds
16784 movl %ecx, %es
16785+
16786+ pax_enter_kernel
16787+
16788 TRACE_IRQS_OFF
16789 movl %esp,%eax # pt_regs pointer
16790 call *%edi
16791 jmp ret_from_exception
16792 CFI_ENDPROC
16793-END(page_fault)
16794+ENDPROC(page_fault)
16795
16796 /*
16797 * Debug traps and NMI can happen at the one SYSENTER instruction
16798@@ -1379,7 +1651,7 @@ debug_stack_correct:
16799 call do_debug
16800 jmp ret_from_exception
16801 CFI_ENDPROC
16802-END(debug)
16803+ENDPROC(debug)
16804
16805 /*
16806 * NMI is doubly nasty. It can happen _while_ we're handling
16807@@ -1417,6 +1689,9 @@ nmi_stack_correct:
16808 xorl %edx,%edx # zero error code
16809 movl %esp,%eax # pt_regs pointer
16810 call do_nmi
16811+
16812+ pax_exit_kernel
16813+
16814 jmp restore_all_notrace
16815 CFI_ENDPROC
16816
16817@@ -1453,12 +1728,15 @@ nmi_espfix_stack:
16818 FIXUP_ESPFIX_STACK # %eax == %esp
16819 xorl %edx,%edx # zero error code
16820 call do_nmi
16821+
16822+ pax_exit_kernel
16823+
16824 RESTORE_REGS
16825 lss 12+4(%esp), %esp # back to espfix stack
16826 CFI_ADJUST_CFA_OFFSET -24
16827 jmp irq_return
16828 CFI_ENDPROC
16829-END(nmi)
16830+ENDPROC(nmi)
16831
16832 ENTRY(int3)
16833 RING0_INT_FRAME
16834@@ -1471,14 +1749,14 @@ ENTRY(int3)
16835 call do_int3
16836 jmp ret_from_exception
16837 CFI_ENDPROC
16838-END(int3)
16839+ENDPROC(int3)
16840
16841 ENTRY(general_protection)
16842 RING0_EC_FRAME
16843 pushl_cfi $do_general_protection
16844 jmp error_code
16845 CFI_ENDPROC
16846-END(general_protection)
16847+ENDPROC(general_protection)
16848
16849 #ifdef CONFIG_KVM_GUEST
16850 ENTRY(async_page_fault)
16851@@ -1487,7 +1765,7 @@ ENTRY(async_page_fault)
16852 pushl_cfi $do_async_page_fault
16853 jmp error_code
16854 CFI_ENDPROC
16855-END(async_page_fault)
16856+ENDPROC(async_page_fault)
16857 #endif
16858
16859 /*
16860diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
16861index 1328fe4..cb03298 100644
16862--- a/arch/x86/kernel/entry_64.S
16863+++ b/arch/x86/kernel/entry_64.S
16864@@ -59,6 +59,8 @@
16865 #include <asm/rcu.h>
16866 #include <asm/smap.h>
16867 #include <linux/err.h>
16868+#include <asm/pgtable.h>
16869+#include <asm/alternative-asm.h>
16870
16871 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
16872 #include <linux/elf-em.h>
16873@@ -80,8 +82,9 @@
16874 #ifdef CONFIG_DYNAMIC_FTRACE
16875
16876 ENTRY(function_hook)
16877+ pax_force_retaddr
16878 retq
16879-END(function_hook)
16880+ENDPROC(function_hook)
16881
16882 /* skip is set if stack has been adjusted */
16883 .macro ftrace_caller_setup skip=0
16884@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
16885 #endif
16886
16887 GLOBAL(ftrace_stub)
16888+ pax_force_retaddr
16889 retq
16890-END(ftrace_caller)
16891+ENDPROC(ftrace_caller)
16892
16893 ENTRY(ftrace_regs_caller)
16894 /* Save the current flags before compare (in SS location)*/
16895@@ -191,7 +195,7 @@ ftrace_restore_flags:
16896 popfq
16897 jmp ftrace_stub
16898
16899-END(ftrace_regs_caller)
16900+ENDPROC(ftrace_regs_caller)
16901
16902
16903 #else /* ! CONFIG_DYNAMIC_FTRACE */
16904@@ -212,6 +216,7 @@ ENTRY(function_hook)
16905 #endif
16906
16907 GLOBAL(ftrace_stub)
16908+ pax_force_retaddr
16909 retq
16910
16911 trace:
16912@@ -225,12 +230,13 @@ trace:
16913 #endif
16914 subq $MCOUNT_INSN_SIZE, %rdi
16915
16916+ pax_force_fptr ftrace_trace_function
16917 call *ftrace_trace_function
16918
16919 MCOUNT_RESTORE_FRAME
16920
16921 jmp ftrace_stub
16922-END(function_hook)
16923+ENDPROC(function_hook)
16924 #endif /* CONFIG_DYNAMIC_FTRACE */
16925 #endif /* CONFIG_FUNCTION_TRACER */
16926
16927@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
16928
16929 MCOUNT_RESTORE_FRAME
16930
16931+ pax_force_retaddr
16932 retq
16933-END(ftrace_graph_caller)
16934+ENDPROC(ftrace_graph_caller)
16935
16936 GLOBAL(return_to_handler)
16937 subq $24, %rsp
16938@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
16939 movq 8(%rsp), %rdx
16940 movq (%rsp), %rax
16941 addq $24, %rsp
16942+ pax_force_fptr %rdi
16943 jmp *%rdi
16944+ENDPROC(return_to_handler)
16945 #endif
16946
16947
16948@@ -284,6 +293,273 @@ ENTRY(native_usergs_sysret64)
16949 ENDPROC(native_usergs_sysret64)
16950 #endif /* CONFIG_PARAVIRT */
16951
16952+ .macro ljmpq sel, off
16953+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
16954+ .byte 0x48; ljmp *1234f(%rip)
16955+ .pushsection .rodata
16956+ .align 16
16957+ 1234: .quad \off; .word \sel
16958+ .popsection
16959+#else
16960+ pushq $\sel
16961+ pushq $\off
16962+ lretq
16963+#endif
16964+ .endm
16965+
16966+ .macro pax_enter_kernel
16967+ pax_set_fptr_mask
16968+#ifdef CONFIG_PAX_KERNEXEC
16969+ call pax_enter_kernel
16970+#endif
16971+ .endm
16972+
16973+ .macro pax_exit_kernel
16974+#ifdef CONFIG_PAX_KERNEXEC
16975+ call pax_exit_kernel
16976+#endif
16977+ .endm
16978+
16979+#ifdef CONFIG_PAX_KERNEXEC
16980+ENTRY(pax_enter_kernel)
16981+ pushq %rdi
16982+
16983+#ifdef CONFIG_PARAVIRT
16984+ PV_SAVE_REGS(CLBR_RDI)
16985+#endif
16986+
16987+ GET_CR0_INTO_RDI
16988+ bts $16,%rdi
16989+ jnc 3f
16990+ mov %cs,%edi
16991+ cmp $__KERNEL_CS,%edi
16992+ jnz 2f
16993+1:
16994+
16995+#ifdef CONFIG_PARAVIRT
16996+ PV_RESTORE_REGS(CLBR_RDI)
16997+#endif
16998+
16999+ popq %rdi
17000+ pax_force_retaddr
17001+ retq
17002+
17003+2: ljmpq __KERNEL_CS,1f
17004+3: ljmpq __KERNEXEC_KERNEL_CS,4f
17005+4: SET_RDI_INTO_CR0
17006+ jmp 1b
17007+ENDPROC(pax_enter_kernel)
17008+
17009+ENTRY(pax_exit_kernel)
17010+ pushq %rdi
17011+
17012+#ifdef CONFIG_PARAVIRT
17013+ PV_SAVE_REGS(CLBR_RDI)
17014+#endif
17015+
17016+ mov %cs,%rdi
17017+ cmp $__KERNEXEC_KERNEL_CS,%edi
17018+ jz 2f
17019+1:
17020+
17021+#ifdef CONFIG_PARAVIRT
17022+ PV_RESTORE_REGS(CLBR_RDI);
17023+#endif
17024+
17025+ popq %rdi
17026+ pax_force_retaddr
17027+ retq
17028+
17029+2: GET_CR0_INTO_RDI
17030+ btr $16,%rdi
17031+ ljmpq __KERNEL_CS,3f
17032+3: SET_RDI_INTO_CR0
17033+ jmp 1b
17034+ENDPROC(pax_exit_kernel)
17035+#endif
17036+
17037+ .macro pax_enter_kernel_user
17038+ pax_set_fptr_mask
17039+#ifdef CONFIG_PAX_MEMORY_UDEREF
17040+ call pax_enter_kernel_user
17041+#endif
17042+ .endm
17043+
17044+ .macro pax_exit_kernel_user
17045+#ifdef CONFIG_PAX_MEMORY_UDEREF
17046+ call pax_exit_kernel_user
17047+#endif
17048+#ifdef CONFIG_PAX_RANDKSTACK
17049+ pushq %rax
17050+ call pax_randomize_kstack
17051+ popq %rax
17052+#endif
17053+ .endm
17054+
17055+#ifdef CONFIG_PAX_MEMORY_UDEREF
17056+ENTRY(pax_enter_kernel_user)
17057+ pushq %rdi
17058+ pushq %rbx
17059+
17060+#ifdef CONFIG_PARAVIRT
17061+ PV_SAVE_REGS(CLBR_RDI)
17062+#endif
17063+
17064+ GET_CR3_INTO_RDI
17065+ mov %rdi,%rbx
17066+ add $__START_KERNEL_map,%rbx
17067+ sub phys_base(%rip),%rbx
17068+
17069+#ifdef CONFIG_PARAVIRT
17070+ pushq %rdi
17071+ cmpl $0, pv_info+PARAVIRT_enabled
17072+ jz 1f
17073+ i = 0
17074+ .rept USER_PGD_PTRS
17075+ mov i*8(%rbx),%rsi
17076+ mov $0,%sil
17077+ lea i*8(%rbx),%rdi
17078+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17079+ i = i + 1
17080+ .endr
17081+ jmp 2f
17082+1:
17083+#endif
17084+
17085+ i = 0
17086+ .rept USER_PGD_PTRS
17087+ movb $0,i*8(%rbx)
17088+ i = i + 1
17089+ .endr
17090+
17091+#ifdef CONFIG_PARAVIRT
17092+2: popq %rdi
17093+#endif
17094+ SET_RDI_INTO_CR3
17095+
17096+#ifdef CONFIG_PAX_KERNEXEC
17097+ GET_CR0_INTO_RDI
17098+ bts $16,%rdi
17099+ SET_RDI_INTO_CR0
17100+#endif
17101+
17102+#ifdef CONFIG_PARAVIRT
17103+ PV_RESTORE_REGS(CLBR_RDI)
17104+#endif
17105+
17106+ popq %rbx
17107+ popq %rdi
17108+ pax_force_retaddr
17109+ retq
17110+ENDPROC(pax_enter_kernel_user)
17111+
17112+ENTRY(pax_exit_kernel_user)
17113+ push %rdi
17114+
17115+#ifdef CONFIG_PARAVIRT
17116+ pushq %rbx
17117+ PV_SAVE_REGS(CLBR_RDI)
17118+#endif
17119+
17120+#ifdef CONFIG_PAX_KERNEXEC
17121+ GET_CR0_INTO_RDI
17122+ btr $16,%rdi
17123+ SET_RDI_INTO_CR0
17124+#endif
17125+
17126+ GET_CR3_INTO_RDI
17127+ add $__START_KERNEL_map,%rdi
17128+ sub phys_base(%rip),%rdi
17129+
17130+#ifdef CONFIG_PARAVIRT
17131+ cmpl $0, pv_info+PARAVIRT_enabled
17132+ jz 1f
17133+ mov %rdi,%rbx
17134+ i = 0
17135+ .rept USER_PGD_PTRS
17136+ mov i*8(%rbx),%rsi
17137+ mov $0x67,%sil
17138+ lea i*8(%rbx),%rdi
17139+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17140+ i = i + 1
17141+ .endr
17142+ jmp 2f
17143+1:
17144+#endif
17145+
17146+ i = 0
17147+ .rept USER_PGD_PTRS
17148+ movb $0x67,i*8(%rdi)
17149+ i = i + 1
17150+ .endr
17151+
17152+#ifdef CONFIG_PARAVIRT
17153+2: PV_RESTORE_REGS(CLBR_RDI)
17154+ popq %rbx
17155+#endif
17156+
17157+ popq %rdi
17158+ pax_force_retaddr
17159+ retq
17160+ENDPROC(pax_exit_kernel_user)
17161+#endif
17162+
17163+.macro pax_erase_kstack
17164+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17165+ call pax_erase_kstack
17166+#endif
17167+.endm
17168+
17169+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17170+ENTRY(pax_erase_kstack)
17171+ pushq %rdi
17172+ pushq %rcx
17173+ pushq %rax
17174+ pushq %r11
17175+
17176+ GET_THREAD_INFO(%r11)
17177+ mov TI_lowest_stack(%r11), %rdi
17178+ mov $-0xBEEF, %rax
17179+ std
17180+
17181+1: mov %edi, %ecx
17182+ and $THREAD_SIZE_asm - 1, %ecx
17183+ shr $3, %ecx
17184+ repne scasq
17185+ jecxz 2f
17186+
17187+ cmp $2*8, %ecx
17188+ jc 2f
17189+
17190+ mov $2*8, %ecx
17191+ repe scasq
17192+ jecxz 2f
17193+ jne 1b
17194+
17195+2: cld
17196+ mov %esp, %ecx
17197+ sub %edi, %ecx
17198+
17199+ cmp $THREAD_SIZE_asm, %rcx
17200+ jb 3f
17201+ ud2
17202+3:
17203+
17204+ shr $3, %ecx
17205+ rep stosq
17206+
17207+ mov TI_task_thread_sp0(%r11), %rdi
17208+ sub $256, %rdi
17209+ mov %rdi, TI_lowest_stack(%r11)
17210+
17211+ popq %r11
17212+ popq %rax
17213+ popq %rcx
17214+ popq %rdi
17215+ pax_force_retaddr
17216+ ret
17217+ENDPROC(pax_erase_kstack)
17218+#endif
17219
17220 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
17221 #ifdef CONFIG_TRACE_IRQFLAGS
17222@@ -375,8 +651,8 @@ ENDPROC(native_usergs_sysret64)
17223 .endm
17224
17225 .macro UNFAKE_STACK_FRAME
17226- addq $8*6, %rsp
17227- CFI_ADJUST_CFA_OFFSET -(6*8)
17228+ addq $8*6 + ARG_SKIP, %rsp
17229+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
17230 .endm
17231
17232 /*
17233@@ -463,7 +739,7 @@ ENDPROC(native_usergs_sysret64)
17234 movq %rsp, %rsi
17235
17236 leaq -RBP(%rsp),%rdi /* arg1 for handler */
17237- testl $3, CS-RBP(%rsi)
17238+ testb $3, CS-RBP(%rsi)
17239 je 1f
17240 SWAPGS
17241 /*
17242@@ -498,9 +774,10 @@ ENTRY(save_rest)
17243 movq_cfi r15, R15+16
17244 movq %r11, 8(%rsp) /* return address */
17245 FIXUP_TOP_OF_STACK %r11, 16
17246+ pax_force_retaddr
17247 ret
17248 CFI_ENDPROC
17249-END(save_rest)
17250+ENDPROC(save_rest)
17251
17252 /* save complete stack frame */
17253 .pushsection .kprobes.text, "ax"
17254@@ -529,9 +806,10 @@ ENTRY(save_paranoid)
17255 js 1f /* negative -> in kernel */
17256 SWAPGS
17257 xorl %ebx,%ebx
17258-1: ret
17259+1: pax_force_retaddr_bts
17260+ ret
17261 CFI_ENDPROC
17262-END(save_paranoid)
17263+ENDPROC(save_paranoid)
17264 .popsection
17265
17266 /*
17267@@ -553,7 +831,7 @@ ENTRY(ret_from_fork)
17268
17269 RESTORE_REST
17270
17271- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17272+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17273 jz 1f
17274
17275 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
17276@@ -571,7 +849,7 @@ ENTRY(ret_from_fork)
17277 RESTORE_REST
17278 jmp int_ret_from_sys_call
17279 CFI_ENDPROC
17280-END(ret_from_fork)
17281+ENDPROC(ret_from_fork)
17282
17283 /*
17284 * System call entry. Up to 6 arguments in registers are supported.
17285@@ -608,7 +886,7 @@ END(ret_from_fork)
17286 ENTRY(system_call)
17287 CFI_STARTPROC simple
17288 CFI_SIGNAL_FRAME
17289- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
17290+ CFI_DEF_CFA rsp,0
17291 CFI_REGISTER rip,rcx
17292 /*CFI_REGISTER rflags,r11*/
17293 SWAPGS_UNSAFE_STACK
17294@@ -621,16 +899,23 @@ GLOBAL(system_call_after_swapgs)
17295
17296 movq %rsp,PER_CPU_VAR(old_rsp)
17297 movq PER_CPU_VAR(kernel_stack),%rsp
17298+ SAVE_ARGS 8*6,0
17299+ pax_enter_kernel_user
17300+
17301+#ifdef CONFIG_PAX_RANDKSTACK
17302+ pax_erase_kstack
17303+#endif
17304+
17305 /*
17306 * No need to follow this irqs off/on section - it's straight
17307 * and short:
17308 */
17309 ENABLE_INTERRUPTS(CLBR_NONE)
17310- SAVE_ARGS 8,0
17311 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
17312 movq %rcx,RIP-ARGOFFSET(%rsp)
17313 CFI_REL_OFFSET rip,RIP-ARGOFFSET
17314- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
17315+ GET_THREAD_INFO(%rcx)
17316+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
17317 jnz tracesys
17318 system_call_fastpath:
17319 #if __SYSCALL_MASK == ~0
17320@@ -640,7 +925,7 @@ system_call_fastpath:
17321 cmpl $__NR_syscall_max,%eax
17322 #endif
17323 ja badsys
17324- movq %r10,%rcx
17325+ movq R10-ARGOFFSET(%rsp),%rcx
17326 call *sys_call_table(,%rax,8) # XXX: rip relative
17327 movq %rax,RAX-ARGOFFSET(%rsp)
17328 /*
17329@@ -654,10 +939,13 @@ sysret_check:
17330 LOCKDEP_SYS_EXIT
17331 DISABLE_INTERRUPTS(CLBR_NONE)
17332 TRACE_IRQS_OFF
17333- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
17334+ GET_THREAD_INFO(%rcx)
17335+ movl TI_flags(%rcx),%edx
17336 andl %edi,%edx
17337 jnz sysret_careful
17338 CFI_REMEMBER_STATE
17339+ pax_exit_kernel_user
17340+ pax_erase_kstack
17341 /*
17342 * sysretq will re-enable interrupts:
17343 */
17344@@ -709,14 +997,18 @@ badsys:
17345 * jump back to the normal fast path.
17346 */
17347 auditsys:
17348- movq %r10,%r9 /* 6th arg: 4th syscall arg */
17349+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
17350 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
17351 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
17352 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
17353 movq %rax,%rsi /* 2nd arg: syscall number */
17354 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
17355 call __audit_syscall_entry
17356+
17357+ pax_erase_kstack
17358+
17359 LOAD_ARGS 0 /* reload call-clobbered registers */
17360+ pax_set_fptr_mask
17361 jmp system_call_fastpath
17362
17363 /*
17364@@ -737,7 +1029,7 @@ sysret_audit:
17365 /* Do syscall tracing */
17366 tracesys:
17367 #ifdef CONFIG_AUDITSYSCALL
17368- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
17369+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
17370 jz auditsys
17371 #endif
17372 SAVE_REST
17373@@ -745,12 +1037,16 @@ tracesys:
17374 FIXUP_TOP_OF_STACK %rdi
17375 movq %rsp,%rdi
17376 call syscall_trace_enter
17377+
17378+ pax_erase_kstack
17379+
17380 /*
17381 * Reload arg registers from stack in case ptrace changed them.
17382 * We don't reload %rax because syscall_trace_enter() returned
17383 * the value it wants us to use in the table lookup.
17384 */
17385 LOAD_ARGS ARGOFFSET, 1
17386+ pax_set_fptr_mask
17387 RESTORE_REST
17388 #if __SYSCALL_MASK == ~0
17389 cmpq $__NR_syscall_max,%rax
17390@@ -759,7 +1055,7 @@ tracesys:
17391 cmpl $__NR_syscall_max,%eax
17392 #endif
17393 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
17394- movq %r10,%rcx /* fixup for C */
17395+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
17396 call *sys_call_table(,%rax,8)
17397 movq %rax,RAX-ARGOFFSET(%rsp)
17398 /* Use IRET because user could have changed frame */
17399@@ -780,7 +1076,9 @@ GLOBAL(int_with_check)
17400 andl %edi,%edx
17401 jnz int_careful
17402 andl $~TS_COMPAT,TI_status(%rcx)
17403- jmp retint_swapgs
17404+ pax_exit_kernel_user
17405+ pax_erase_kstack
17406+ jmp retint_swapgs_pax
17407
17408 /* Either reschedule or signal or syscall exit tracking needed. */
17409 /* First do a reschedule test. */
17410@@ -826,7 +1124,7 @@ int_restore_rest:
17411 TRACE_IRQS_OFF
17412 jmp int_with_check
17413 CFI_ENDPROC
17414-END(system_call)
17415+ENDPROC(system_call)
17416
17417 /*
17418 * Certain special system calls that need to save a complete full stack frame.
17419@@ -842,7 +1140,7 @@ ENTRY(\label)
17420 call \func
17421 jmp ptregscall_common
17422 CFI_ENDPROC
17423-END(\label)
17424+ENDPROC(\label)
17425 .endm
17426
17427 PTREGSCALL stub_clone, sys_clone, %r8
17428@@ -860,9 +1158,10 @@ ENTRY(ptregscall_common)
17429 movq_cfi_restore R12+8, r12
17430 movq_cfi_restore RBP+8, rbp
17431 movq_cfi_restore RBX+8, rbx
17432+ pax_force_retaddr
17433 ret $REST_SKIP /* pop extended registers */
17434 CFI_ENDPROC
17435-END(ptregscall_common)
17436+ENDPROC(ptregscall_common)
17437
17438 ENTRY(stub_execve)
17439 CFI_STARTPROC
17440@@ -876,7 +1175,7 @@ ENTRY(stub_execve)
17441 RESTORE_REST
17442 jmp int_ret_from_sys_call
17443 CFI_ENDPROC
17444-END(stub_execve)
17445+ENDPROC(stub_execve)
17446
17447 /*
17448 * sigreturn is special because it needs to restore all registers on return.
17449@@ -894,7 +1193,7 @@ ENTRY(stub_rt_sigreturn)
17450 RESTORE_REST
17451 jmp int_ret_from_sys_call
17452 CFI_ENDPROC
17453-END(stub_rt_sigreturn)
17454+ENDPROC(stub_rt_sigreturn)
17455
17456 #ifdef CONFIG_X86_X32_ABI
17457 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
17458@@ -962,7 +1261,7 @@ vector=vector+1
17459 2: jmp common_interrupt
17460 .endr
17461 CFI_ENDPROC
17462-END(irq_entries_start)
17463+ENDPROC(irq_entries_start)
17464
17465 .previous
17466 END(interrupt)
17467@@ -982,6 +1281,16 @@ END(interrupt)
17468 subq $ORIG_RAX-RBP, %rsp
17469 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
17470 SAVE_ARGS_IRQ
17471+#ifdef CONFIG_PAX_MEMORY_UDEREF
17472+ testb $3, CS(%rdi)
17473+ jnz 1f
17474+ pax_enter_kernel
17475+ jmp 2f
17476+1: pax_enter_kernel_user
17477+2:
17478+#else
17479+ pax_enter_kernel
17480+#endif
17481 call \func
17482 .endm
17483
17484@@ -1014,7 +1323,7 @@ ret_from_intr:
17485
17486 exit_intr:
17487 GET_THREAD_INFO(%rcx)
17488- testl $3,CS-ARGOFFSET(%rsp)
17489+ testb $3,CS-ARGOFFSET(%rsp)
17490 je retint_kernel
17491
17492 /* Interrupt came from user space */
17493@@ -1036,12 +1345,16 @@ retint_swapgs: /* return to user-space */
17494 * The iretq could re-enable interrupts:
17495 */
17496 DISABLE_INTERRUPTS(CLBR_ANY)
17497+ pax_exit_kernel_user
17498+retint_swapgs_pax:
17499 TRACE_IRQS_IRETQ
17500 SWAPGS
17501 jmp restore_args
17502
17503 retint_restore_args: /* return to kernel space */
17504 DISABLE_INTERRUPTS(CLBR_ANY)
17505+ pax_exit_kernel
17506+ pax_force_retaddr (RIP-ARGOFFSET)
17507 /*
17508 * The iretq could re-enable interrupts:
17509 */
17510@@ -1124,7 +1437,7 @@ ENTRY(retint_kernel)
17511 #endif
17512
17513 CFI_ENDPROC
17514-END(common_interrupt)
17515+ENDPROC(common_interrupt)
17516 /*
17517 * End of kprobes section
17518 */
17519@@ -1142,7 +1455,7 @@ ENTRY(\sym)
17520 interrupt \do_sym
17521 jmp ret_from_intr
17522 CFI_ENDPROC
17523-END(\sym)
17524+ENDPROC(\sym)
17525 .endm
17526
17527 #ifdef CONFIG_SMP
17528@@ -1198,12 +1511,22 @@ ENTRY(\sym)
17529 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17530 call error_entry
17531 DEFAULT_FRAME 0
17532+#ifdef CONFIG_PAX_MEMORY_UDEREF
17533+ testb $3, CS(%rsp)
17534+ jnz 1f
17535+ pax_enter_kernel
17536+ jmp 2f
17537+1: pax_enter_kernel_user
17538+2:
17539+#else
17540+ pax_enter_kernel
17541+#endif
17542 movq %rsp,%rdi /* pt_regs pointer */
17543 xorl %esi,%esi /* no error code */
17544 call \do_sym
17545 jmp error_exit /* %ebx: no swapgs flag */
17546 CFI_ENDPROC
17547-END(\sym)
17548+ENDPROC(\sym)
17549 .endm
17550
17551 .macro paranoidzeroentry sym do_sym
17552@@ -1216,15 +1539,25 @@ ENTRY(\sym)
17553 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17554 call save_paranoid
17555 TRACE_IRQS_OFF
17556+#ifdef CONFIG_PAX_MEMORY_UDEREF
17557+ testb $3, CS(%rsp)
17558+ jnz 1f
17559+ pax_enter_kernel
17560+ jmp 2f
17561+1: pax_enter_kernel_user
17562+2:
17563+#else
17564+ pax_enter_kernel
17565+#endif
17566 movq %rsp,%rdi /* pt_regs pointer */
17567 xorl %esi,%esi /* no error code */
17568 call \do_sym
17569 jmp paranoid_exit /* %ebx: no swapgs flag */
17570 CFI_ENDPROC
17571-END(\sym)
17572+ENDPROC(\sym)
17573 .endm
17574
17575-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
17576+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
17577 .macro paranoidzeroentry_ist sym do_sym ist
17578 ENTRY(\sym)
17579 INTR_FRAME
17580@@ -1235,14 +1568,30 @@ ENTRY(\sym)
17581 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17582 call save_paranoid
17583 TRACE_IRQS_OFF_DEBUG
17584+#ifdef CONFIG_PAX_MEMORY_UDEREF
17585+ testb $3, CS(%rsp)
17586+ jnz 1f
17587+ pax_enter_kernel
17588+ jmp 2f
17589+1: pax_enter_kernel_user
17590+2:
17591+#else
17592+ pax_enter_kernel
17593+#endif
17594 movq %rsp,%rdi /* pt_regs pointer */
17595 xorl %esi,%esi /* no error code */
17596+#ifdef CONFIG_SMP
17597+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
17598+ lea init_tss(%r12), %r12
17599+#else
17600+ lea init_tss(%rip), %r12
17601+#endif
17602 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
17603 call \do_sym
17604 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
17605 jmp paranoid_exit /* %ebx: no swapgs flag */
17606 CFI_ENDPROC
17607-END(\sym)
17608+ENDPROC(\sym)
17609 .endm
17610
17611 .macro errorentry sym do_sym
17612@@ -1254,13 +1603,23 @@ ENTRY(\sym)
17613 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17614 call error_entry
17615 DEFAULT_FRAME 0
17616+#ifdef CONFIG_PAX_MEMORY_UDEREF
17617+ testb $3, CS(%rsp)
17618+ jnz 1f
17619+ pax_enter_kernel
17620+ jmp 2f
17621+1: pax_enter_kernel_user
17622+2:
17623+#else
17624+ pax_enter_kernel
17625+#endif
17626 movq %rsp,%rdi /* pt_regs pointer */
17627 movq ORIG_RAX(%rsp),%rsi /* get error code */
17628 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17629 call \do_sym
17630 jmp error_exit /* %ebx: no swapgs flag */
17631 CFI_ENDPROC
17632-END(\sym)
17633+ENDPROC(\sym)
17634 .endm
17635
17636 /* error code is on the stack already */
17637@@ -1274,13 +1633,23 @@ ENTRY(\sym)
17638 call save_paranoid
17639 DEFAULT_FRAME 0
17640 TRACE_IRQS_OFF
17641+#ifdef CONFIG_PAX_MEMORY_UDEREF
17642+ testb $3, CS(%rsp)
17643+ jnz 1f
17644+ pax_enter_kernel
17645+ jmp 2f
17646+1: pax_enter_kernel_user
17647+2:
17648+#else
17649+ pax_enter_kernel
17650+#endif
17651 movq %rsp,%rdi /* pt_regs pointer */
17652 movq ORIG_RAX(%rsp),%rsi /* get error code */
17653 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17654 call \do_sym
17655 jmp paranoid_exit /* %ebx: no swapgs flag */
17656 CFI_ENDPROC
17657-END(\sym)
17658+ENDPROC(\sym)
17659 .endm
17660
17661 zeroentry divide_error do_divide_error
17662@@ -1310,9 +1679,10 @@ gs_change:
17663 2: mfence /* workaround */
17664 SWAPGS
17665 popfq_cfi
17666+ pax_force_retaddr
17667 ret
17668 CFI_ENDPROC
17669-END(native_load_gs_index)
17670+ENDPROC(native_load_gs_index)
17671
17672 _ASM_EXTABLE(gs_change,bad_gs)
17673 .section .fixup,"ax"
17674@@ -1340,9 +1710,10 @@ ENTRY(call_softirq)
17675 CFI_DEF_CFA_REGISTER rsp
17676 CFI_ADJUST_CFA_OFFSET -8
17677 decl PER_CPU_VAR(irq_count)
17678+ pax_force_retaddr
17679 ret
17680 CFI_ENDPROC
17681-END(call_softirq)
17682+ENDPROC(call_softirq)
17683
17684 #ifdef CONFIG_XEN
17685 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
17686@@ -1380,7 +1751,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
17687 decl PER_CPU_VAR(irq_count)
17688 jmp error_exit
17689 CFI_ENDPROC
17690-END(xen_do_hypervisor_callback)
17691+ENDPROC(xen_do_hypervisor_callback)
17692
17693 /*
17694 * Hypervisor uses this for application faults while it executes.
17695@@ -1439,7 +1810,7 @@ ENTRY(xen_failsafe_callback)
17696 SAVE_ALL
17697 jmp error_exit
17698 CFI_ENDPROC
17699-END(xen_failsafe_callback)
17700+ENDPROC(xen_failsafe_callback)
17701
17702 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
17703 xen_hvm_callback_vector xen_evtchn_do_upcall
17704@@ -1488,16 +1859,31 @@ ENTRY(paranoid_exit)
17705 TRACE_IRQS_OFF_DEBUG
17706 testl %ebx,%ebx /* swapgs needed? */
17707 jnz paranoid_restore
17708- testl $3,CS(%rsp)
17709+ testb $3,CS(%rsp)
17710 jnz paranoid_userspace
17711+#ifdef CONFIG_PAX_MEMORY_UDEREF
17712+ pax_exit_kernel
17713+ TRACE_IRQS_IRETQ 0
17714+ SWAPGS_UNSAFE_STACK
17715+ RESTORE_ALL 8
17716+ pax_force_retaddr_bts
17717+ jmp irq_return
17718+#endif
17719 paranoid_swapgs:
17720+#ifdef CONFIG_PAX_MEMORY_UDEREF
17721+ pax_exit_kernel_user
17722+#else
17723+ pax_exit_kernel
17724+#endif
17725 TRACE_IRQS_IRETQ 0
17726 SWAPGS_UNSAFE_STACK
17727 RESTORE_ALL 8
17728 jmp irq_return
17729 paranoid_restore:
17730+ pax_exit_kernel
17731 TRACE_IRQS_IRETQ_DEBUG 0
17732 RESTORE_ALL 8
17733+ pax_force_retaddr_bts
17734 jmp irq_return
17735 paranoid_userspace:
17736 GET_THREAD_INFO(%rcx)
17737@@ -1526,7 +1912,7 @@ paranoid_schedule:
17738 TRACE_IRQS_OFF
17739 jmp paranoid_userspace
17740 CFI_ENDPROC
17741-END(paranoid_exit)
17742+ENDPROC(paranoid_exit)
17743
17744 /*
17745 * Exception entry point. This expects an error code/orig_rax on the stack.
17746@@ -1553,12 +1939,13 @@ ENTRY(error_entry)
17747 movq_cfi r14, R14+8
17748 movq_cfi r15, R15+8
17749 xorl %ebx,%ebx
17750- testl $3,CS+8(%rsp)
17751+ testb $3,CS+8(%rsp)
17752 je error_kernelspace
17753 error_swapgs:
17754 SWAPGS
17755 error_sti:
17756 TRACE_IRQS_OFF
17757+ pax_force_retaddr_bts
17758 ret
17759
17760 /*
17761@@ -1585,7 +1972,7 @@ bstep_iret:
17762 movq %rcx,RIP+8(%rsp)
17763 jmp error_swapgs
17764 CFI_ENDPROC
17765-END(error_entry)
17766+ENDPROC(error_entry)
17767
17768
17769 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
17770@@ -1605,7 +1992,7 @@ ENTRY(error_exit)
17771 jnz retint_careful
17772 jmp retint_swapgs
17773 CFI_ENDPROC
17774-END(error_exit)
17775+ENDPROC(error_exit)
17776
17777 /*
17778 * Test if a given stack is an NMI stack or not.
17779@@ -1663,9 +2050,11 @@ ENTRY(nmi)
17780 * If %cs was not the kernel segment, then the NMI triggered in user
17781 * space, which means it is definitely not nested.
17782 */
17783+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
17784+ je 1f
17785 cmpl $__KERNEL_CS, 16(%rsp)
17786 jne first_nmi
17787-
17788+1:
17789 /*
17790 * Check the special variable on the stack to see if NMIs are
17791 * executing.
17792@@ -1824,6 +2213,17 @@ end_repeat_nmi:
17793 */
17794 movq %cr2, %r12
17795
17796+#ifdef CONFIG_PAX_MEMORY_UDEREF
17797+ testb $3, CS(%rsp)
17798+ jnz 1f
17799+ pax_enter_kernel
17800+ jmp 2f
17801+1: pax_enter_kernel_user
17802+2:
17803+#else
17804+ pax_enter_kernel
17805+#endif
17806+
17807 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
17808 movq %rsp,%rdi
17809 movq $-1,%rsi
17810@@ -1839,21 +2239,32 @@ end_repeat_nmi:
17811 testl %ebx,%ebx /* swapgs needed? */
17812 jnz nmi_restore
17813 nmi_swapgs:
17814+#ifdef CONFIG_PAX_MEMORY_UDEREF
17815+ pax_exit_kernel_user
17816+#else
17817+ pax_exit_kernel
17818+#endif
17819 SWAPGS_UNSAFE_STACK
17820+ RESTORE_ALL 8
17821+ /* Clear the NMI executing stack variable */
17822+ movq $0, 10*8(%rsp)
17823+ jmp irq_return
17824 nmi_restore:
17825+ pax_exit_kernel
17826 RESTORE_ALL 8
17827+ pax_force_retaddr_bts
17828 /* Clear the NMI executing stack variable */
17829 movq $0, 10*8(%rsp)
17830 jmp irq_return
17831 CFI_ENDPROC
17832-END(nmi)
17833+ENDPROC(nmi)
17834
17835 ENTRY(ignore_sysret)
17836 CFI_STARTPROC
17837 mov $-ENOSYS,%eax
17838 sysret
17839 CFI_ENDPROC
17840-END(ignore_sysret)
17841+ENDPROC(ignore_sysret)
17842
17843 /*
17844 * End of kprobes section
17845diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
17846index 1d41402..af9a46a 100644
17847--- a/arch/x86/kernel/ftrace.c
17848+++ b/arch/x86/kernel/ftrace.c
17849@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
17850 {
17851 unsigned char replaced[MCOUNT_INSN_SIZE];
17852
17853+ ip = ktla_ktva(ip);
17854+
17855 /*
17856 * Note: Due to modules and __init, code can
17857 * disappear and change, we need to protect against faulting
17858@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17859 unsigned char old[MCOUNT_INSN_SIZE], *new;
17860 int ret;
17861
17862- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
17863+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
17864 new = ftrace_call_replace(ip, (unsigned long)func);
17865
17866 /* See comment above by declaration of modifying_ftrace_code */
17867@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17868 /* Also update the regs callback function */
17869 if (!ret) {
17870 ip = (unsigned long)(&ftrace_regs_call);
17871- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
17872+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
17873 new = ftrace_call_replace(ip, (unsigned long)func);
17874 ret = ftrace_modify_code(ip, old, new);
17875 }
17876@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
17877 * kernel identity mapping to modify code.
17878 */
17879 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
17880- ip = (unsigned long)__va(__pa(ip));
17881+ ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
17882
17883 return probe_kernel_write((void *)ip, val, size);
17884 }
17885@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
17886 unsigned char replaced[MCOUNT_INSN_SIZE];
17887 unsigned char brk = BREAKPOINT_INSTRUCTION;
17888
17889- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
17890+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
17891 return -EFAULT;
17892
17893 /* Make sure it is what we expect it to be */
17894@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
17895 return ret;
17896
17897 fail_update:
17898- probe_kernel_write((void *)ip, &old_code[0], 1);
17899+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
17900 goto out;
17901 }
17902
17903@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
17904 {
17905 unsigned char code[MCOUNT_INSN_SIZE];
17906
17907+ ip = ktla_ktva(ip);
17908+
17909 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
17910 return -EFAULT;
17911
17912diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
17913index c18f59d..69ddbc4 100644
17914--- a/arch/x86/kernel/head32.c
17915+++ b/arch/x86/kernel/head32.c
17916@@ -18,20 +18,20 @@
17917 #include <asm/io_apic.h>
17918 #include <asm/bios_ebda.h>
17919 #include <asm/tlbflush.h>
17920+#include <asm/boot.h>
17921
17922 static void __init i386_default_early_setup(void)
17923 {
17924 /* Initialize 32bit specific setup functions */
17925- x86_init.resources.reserve_resources = i386_reserve_resources;
17926- x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
17927+ *(void **)&x86_init.resources.reserve_resources = i386_reserve_resources;
17928+ *(void **)&x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
17929
17930 reserve_ebda_region();
17931 }
17932
17933 void __init i386_start_kernel(void)
17934 {
17935- memblock_reserve(__pa_symbol(&_text),
17936- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
17937+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
17938
17939 #ifdef CONFIG_BLK_DEV_INITRD
17940 /* Reserve INITRD */
17941diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
17942index 4dac2f6..bc6a335 100644
17943--- a/arch/x86/kernel/head_32.S
17944+++ b/arch/x86/kernel/head_32.S
17945@@ -26,6 +26,12 @@
17946 /* Physical address */
17947 #define pa(X) ((X) - __PAGE_OFFSET)
17948
17949+#ifdef CONFIG_PAX_KERNEXEC
17950+#define ta(X) (X)
17951+#else
17952+#define ta(X) ((X) - __PAGE_OFFSET)
17953+#endif
17954+
17955 /*
17956 * References to members of the new_cpu_data structure.
17957 */
17958@@ -55,11 +61,7 @@
17959 * and small than max_low_pfn, otherwise will waste some page table entries
17960 */
17961
17962-#if PTRS_PER_PMD > 1
17963-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
17964-#else
17965-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
17966-#endif
17967+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
17968
17969 /* Number of possible pages in the lowmem region */
17970 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
17971@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
17972 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17973
17974 /*
17975+ * Real beginning of normal "text" segment
17976+ */
17977+ENTRY(stext)
17978+ENTRY(_stext)
17979+
17980+/*
17981 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
17982 * %esi points to the real-mode code as a 32-bit pointer.
17983 * CS and DS must be 4 GB flat segments, but we don't depend on
17984@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17985 * can.
17986 */
17987 __HEAD
17988+
17989+#ifdef CONFIG_PAX_KERNEXEC
17990+ jmp startup_32
17991+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
17992+.fill PAGE_SIZE-5,1,0xcc
17993+#endif
17994+
17995 ENTRY(startup_32)
17996 movl pa(stack_start),%ecx
17997
17998@@ -106,6 +121,59 @@ ENTRY(startup_32)
17999 2:
18000 leal -__PAGE_OFFSET(%ecx),%esp
18001
18002+#ifdef CONFIG_SMP
18003+ movl $pa(cpu_gdt_table),%edi
18004+ movl $__per_cpu_load,%eax
18005+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
18006+ rorl $16,%eax
18007+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
18008+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
18009+ movl $__per_cpu_end - 1,%eax
18010+ subl $__per_cpu_start,%eax
18011+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
18012+#endif
18013+
18014+#ifdef CONFIG_PAX_MEMORY_UDEREF
18015+ movl $NR_CPUS,%ecx
18016+ movl $pa(cpu_gdt_table),%edi
18017+1:
18018+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
18019+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
18020+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
18021+ addl $PAGE_SIZE_asm,%edi
18022+ loop 1b
18023+#endif
18024+
18025+#ifdef CONFIG_PAX_KERNEXEC
18026+ movl $pa(boot_gdt),%edi
18027+ movl $__LOAD_PHYSICAL_ADDR,%eax
18028+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
18029+ rorl $16,%eax
18030+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
18031+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
18032+ rorl $16,%eax
18033+
18034+ ljmp $(__BOOT_CS),$1f
18035+1:
18036+
18037+ movl $NR_CPUS,%ecx
18038+ movl $pa(cpu_gdt_table),%edi
18039+ addl $__PAGE_OFFSET,%eax
18040+1:
18041+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
18042+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
18043+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
18044+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
18045+ rorl $16,%eax
18046+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
18047+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
18048+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
18049+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
18050+ rorl $16,%eax
18051+ addl $PAGE_SIZE_asm,%edi
18052+ loop 1b
18053+#endif
18054+
18055 /*
18056 * Clear BSS first so that there are no surprises...
18057 */
18058@@ -196,8 +264,11 @@ ENTRY(startup_32)
18059 movl %eax, pa(max_pfn_mapped)
18060
18061 /* Do early initialization of the fixmap area */
18062- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
18063- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
18064+#ifdef CONFIG_COMPAT_VDSO
18065+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
18066+#else
18067+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
18068+#endif
18069 #else /* Not PAE */
18070
18071 page_pde_offset = (__PAGE_OFFSET >> 20);
18072@@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
18073 movl %eax, pa(max_pfn_mapped)
18074
18075 /* Do early initialization of the fixmap area */
18076- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
18077- movl %eax,pa(initial_page_table+0xffc)
18078+#ifdef CONFIG_COMPAT_VDSO
18079+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
18080+#else
18081+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
18082+#endif
18083 #endif
18084
18085 #ifdef CONFIG_PARAVIRT
18086@@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
18087 cmpl $num_subarch_entries, %eax
18088 jae bad_subarch
18089
18090- movl pa(subarch_entries)(,%eax,4), %eax
18091- subl $__PAGE_OFFSET, %eax
18092- jmp *%eax
18093+ jmp *pa(subarch_entries)(,%eax,4)
18094
18095 bad_subarch:
18096 WEAK(lguest_entry)
18097@@ -256,10 +328,10 @@ WEAK(xen_entry)
18098 __INITDATA
18099
18100 subarch_entries:
18101- .long default_entry /* normal x86/PC */
18102- .long lguest_entry /* lguest hypervisor */
18103- .long xen_entry /* Xen hypervisor */
18104- .long default_entry /* Moorestown MID */
18105+ .long ta(default_entry) /* normal x86/PC */
18106+ .long ta(lguest_entry) /* lguest hypervisor */
18107+ .long ta(xen_entry) /* Xen hypervisor */
18108+ .long ta(default_entry) /* Moorestown MID */
18109 num_subarch_entries = (. - subarch_entries) / 4
18110 .previous
18111 #else
18112@@ -316,6 +388,7 @@ default_entry:
18113 movl pa(mmu_cr4_features),%eax
18114 movl %eax,%cr4
18115
18116+#ifdef CONFIG_X86_PAE
18117 testb $X86_CR4_PAE, %al # check if PAE is enabled
18118 jz 6f
18119
18120@@ -344,6 +417,9 @@ default_entry:
18121 /* Make changes effective */
18122 wrmsr
18123
18124+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
18125+#endif
18126+
18127 6:
18128
18129 /*
18130@@ -442,14 +518,20 @@ is386: movl $2,%ecx # set MP
18131 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
18132 movl %eax,%ss # after changing gdt.
18133
18134- movl $(__USER_DS),%eax # DS/ES contains default USER segment
18135+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
18136 movl %eax,%ds
18137 movl %eax,%es
18138
18139 movl $(__KERNEL_PERCPU), %eax
18140 movl %eax,%fs # set this cpu's percpu
18141
18142+#ifdef CONFIG_CC_STACKPROTECTOR
18143 movl $(__KERNEL_STACK_CANARY),%eax
18144+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18145+ movl $(__USER_DS),%eax
18146+#else
18147+ xorl %eax,%eax
18148+#endif
18149 movl %eax,%gs
18150
18151 xorl %eax,%eax # Clear LDT
18152@@ -526,8 +608,11 @@ setup_once:
18153 * relocation. Manually set base address in stack canary
18154 * segment descriptor.
18155 */
18156- movl $gdt_page,%eax
18157+ movl $cpu_gdt_table,%eax
18158 movl $stack_canary,%ecx
18159+#ifdef CONFIG_SMP
18160+ addl $__per_cpu_load,%ecx
18161+#endif
18162 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
18163 shrl $16, %ecx
18164 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
18165@@ -558,7 +643,7 @@ ENDPROC(early_idt_handlers)
18166 /* This is global to keep gas from relaxing the jumps */
18167 ENTRY(early_idt_handler)
18168 cld
18169- cmpl $2,%ss:early_recursion_flag
18170+ cmpl $1,%ss:early_recursion_flag
18171 je hlt_loop
18172 incl %ss:early_recursion_flag
18173
18174@@ -596,8 +681,8 @@ ENTRY(early_idt_handler)
18175 pushl (20+6*4)(%esp) /* trapno */
18176 pushl $fault_msg
18177 call printk
18178-#endif
18179 call dump_stack
18180+#endif
18181 hlt_loop:
18182 hlt
18183 jmp hlt_loop
18184@@ -616,8 +701,11 @@ ENDPROC(early_idt_handler)
18185 /* This is the default interrupt "handler" :-) */
18186 ALIGN
18187 ignore_int:
18188- cld
18189 #ifdef CONFIG_PRINTK
18190+ cmpl $2,%ss:early_recursion_flag
18191+ je hlt_loop
18192+ incl %ss:early_recursion_flag
18193+ cld
18194 pushl %eax
18195 pushl %ecx
18196 pushl %edx
18197@@ -626,9 +714,6 @@ ignore_int:
18198 movl $(__KERNEL_DS),%eax
18199 movl %eax,%ds
18200 movl %eax,%es
18201- cmpl $2,early_recursion_flag
18202- je hlt_loop
18203- incl early_recursion_flag
18204 pushl 16(%esp)
18205 pushl 24(%esp)
18206 pushl 32(%esp)
18207@@ -662,29 +747,43 @@ ENTRY(setup_once_ref)
18208 /*
18209 * BSS section
18210 */
18211-__PAGE_ALIGNED_BSS
18212- .align PAGE_SIZE
18213 #ifdef CONFIG_X86_PAE
18214+.section .initial_pg_pmd,"a",@progbits
18215 initial_pg_pmd:
18216 .fill 1024*KPMDS,4,0
18217 #else
18218+.section .initial_page_table,"a",@progbits
18219 ENTRY(initial_page_table)
18220 .fill 1024,4,0
18221 #endif
18222+.section .initial_pg_fixmap,"a",@progbits
18223 initial_pg_fixmap:
18224 .fill 1024,4,0
18225+.section .empty_zero_page,"a",@progbits
18226 ENTRY(empty_zero_page)
18227 .fill 4096,1,0
18228+.section .swapper_pg_dir,"a",@progbits
18229 ENTRY(swapper_pg_dir)
18230+#ifdef CONFIG_X86_PAE
18231+ .fill 4,8,0
18232+#else
18233 .fill 1024,4,0
18234+#endif
18235+
18236+/*
18237+ * The IDT has to be page-aligned to simplify the Pentium
18238+ * F0 0F bug workaround.. We have a special link segment
18239+ * for this.
18240+ */
18241+.section .idt,"a",@progbits
18242+ENTRY(idt_table)
18243+ .fill 256,8,0
18244
18245 /*
18246 * This starts the data section.
18247 */
18248 #ifdef CONFIG_X86_PAE
18249-__PAGE_ALIGNED_DATA
18250- /* Page-aligned for the benefit of paravirt? */
18251- .align PAGE_SIZE
18252+.section .initial_page_table,"a",@progbits
18253 ENTRY(initial_page_table)
18254 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
18255 # if KPMDS == 3
18256@@ -703,12 +802,20 @@ ENTRY(initial_page_table)
18257 # error "Kernel PMDs should be 1, 2 or 3"
18258 # endif
18259 .align PAGE_SIZE /* needs to be page-sized too */
18260+
18261+#ifdef CONFIG_PAX_PER_CPU_PGD
18262+ENTRY(cpu_pgd)
18263+ .rept NR_CPUS
18264+ .fill 4,8,0
18265+ .endr
18266+#endif
18267+
18268 #endif
18269
18270 .data
18271 .balign 4
18272 ENTRY(stack_start)
18273- .long init_thread_union+THREAD_SIZE
18274+ .long init_thread_union+THREAD_SIZE-8
18275
18276 __INITRODATA
18277 int_msg:
18278@@ -736,7 +843,7 @@ fault_msg:
18279 * segment size, and 32-bit linear address value:
18280 */
18281
18282- .data
18283+.section .rodata,"a",@progbits
18284 .globl boot_gdt_descr
18285 .globl idt_descr
18286
18287@@ -745,7 +852,7 @@ fault_msg:
18288 .word 0 # 32 bit align gdt_desc.address
18289 boot_gdt_descr:
18290 .word __BOOT_DS+7
18291- .long boot_gdt - __PAGE_OFFSET
18292+ .long pa(boot_gdt)
18293
18294 .word 0 # 32-bit align idt_desc.address
18295 idt_descr:
18296@@ -756,7 +863,7 @@ idt_descr:
18297 .word 0 # 32 bit align gdt_desc.address
18298 ENTRY(early_gdt_descr)
18299 .word GDT_ENTRIES*8-1
18300- .long gdt_page /* Overwritten for secondary CPUs */
18301+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
18302
18303 /*
18304 * The boot_gdt must mirror the equivalent in setup.S and is
18305@@ -765,5 +872,65 @@ ENTRY(early_gdt_descr)
18306 .align L1_CACHE_BYTES
18307 ENTRY(boot_gdt)
18308 .fill GDT_ENTRY_BOOT_CS,8,0
18309- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
18310- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
18311+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
18312+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
18313+
18314+ .align PAGE_SIZE_asm
18315+ENTRY(cpu_gdt_table)
18316+ .rept NR_CPUS
18317+ .quad 0x0000000000000000 /* NULL descriptor */
18318+ .quad 0x0000000000000000 /* 0x0b reserved */
18319+ .quad 0x0000000000000000 /* 0x13 reserved */
18320+ .quad 0x0000000000000000 /* 0x1b reserved */
18321+
18322+#ifdef CONFIG_PAX_KERNEXEC
18323+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
18324+#else
18325+ .quad 0x0000000000000000 /* 0x20 unused */
18326+#endif
18327+
18328+ .quad 0x0000000000000000 /* 0x28 unused */
18329+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
18330+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
18331+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
18332+ .quad 0x0000000000000000 /* 0x4b reserved */
18333+ .quad 0x0000000000000000 /* 0x53 reserved */
18334+ .quad 0x0000000000000000 /* 0x5b reserved */
18335+
18336+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
18337+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
18338+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
18339+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
18340+
18341+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
18342+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
18343+
18344+ /*
18345+ * Segments used for calling PnP BIOS have byte granularity.
18346+ * The code segments and data segments have fixed 64k limits,
18347+ * the transfer segment sizes are set at run time.
18348+ */
18349+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
18350+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
18351+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
18352+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
18353+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
18354+
18355+ /*
18356+ * The APM segments have byte granularity and their bases
18357+ * are set at run time. All have 64k limits.
18358+ */
18359+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
18360+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
18361+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
18362+
18363+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
18364+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
18365+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
18366+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
18367+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
18368+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
18369+
18370+ /* Be sure this is zeroed to avoid false validations in Xen */
18371+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
18372+ .endr
18373diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
18374index 94bf9cc..400455a 100644
18375--- a/arch/x86/kernel/head_64.S
18376+++ b/arch/x86/kernel/head_64.S
18377@@ -20,6 +20,8 @@
18378 #include <asm/processor-flags.h>
18379 #include <asm/percpu.h>
18380 #include <asm/nops.h>
18381+#include <asm/cpufeature.h>
18382+#include <asm/alternative-asm.h>
18383
18384 #ifdef CONFIG_PARAVIRT
18385 #include <asm/asm-offsets.h>
18386@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
18387 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
18388 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
18389 L3_START_KERNEL = pud_index(__START_KERNEL_map)
18390+L4_VMALLOC_START = pgd_index(VMALLOC_START)
18391+L3_VMALLOC_START = pud_index(VMALLOC_START)
18392+L4_VMALLOC_END = pgd_index(VMALLOC_END)
18393+L3_VMALLOC_END = pud_index(VMALLOC_END)
18394+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
18395+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
18396
18397 .text
18398 __HEAD
18399@@ -88,35 +96,23 @@ startup_64:
18400 */
18401 addq %rbp, init_level4_pgt + 0(%rip)
18402 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
18403+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
18404+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
18405+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
18406 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
18407
18408 addq %rbp, level3_ident_pgt + 0(%rip)
18409+#ifndef CONFIG_XEN
18410+ addq %rbp, level3_ident_pgt + 8(%rip)
18411+#endif
18412
18413- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
18414- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
18415+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
18416+
18417+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
18418+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
18419
18420 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
18421-
18422- /* Add an Identity mapping if I am above 1G */
18423- leaq _text(%rip), %rdi
18424- andq $PMD_PAGE_MASK, %rdi
18425-
18426- movq %rdi, %rax
18427- shrq $PUD_SHIFT, %rax
18428- andq $(PTRS_PER_PUD - 1), %rax
18429- jz ident_complete
18430-
18431- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
18432- leaq level3_ident_pgt(%rip), %rbx
18433- movq %rdx, 0(%rbx, %rax, 8)
18434-
18435- movq %rdi, %rax
18436- shrq $PMD_SHIFT, %rax
18437- andq $(PTRS_PER_PMD - 1), %rax
18438- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
18439- leaq level2_spare_pgt(%rip), %rbx
18440- movq %rdx, 0(%rbx, %rax, 8)
18441-ident_complete:
18442+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
18443
18444 /*
18445 * Fixup the kernel text+data virtual addresses. Note that
18446@@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
18447 * after the boot processor executes this code.
18448 */
18449
18450- /* Enable PAE mode and PGE */
18451- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
18452+ /* Enable PAE mode and PSE/PGE */
18453+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
18454 movq %rax, %cr4
18455
18456 /* Setup early boot stage 4 level pagetables. */
18457@@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
18458 movl $MSR_EFER, %ecx
18459 rdmsr
18460 btsl $_EFER_SCE, %eax /* Enable System Call */
18461- btl $20,%edi /* No Execute supported? */
18462+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
18463 jnc 1f
18464 btsl $_EFER_NX, %eax
18465+ leaq init_level4_pgt(%rip), %rdi
18466+#ifndef CONFIG_EFI
18467+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
18468+#endif
18469+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
18470+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
18471+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
18472+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
18473 1: wrmsr /* Make changes effective */
18474
18475 /* Setup cr0 */
18476@@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
18477 * jump. In addition we need to ensure %cs is set so we make this
18478 * a far return.
18479 */
18480+ pax_set_fptr_mask
18481 movq initial_code(%rip),%rax
18482 pushq $0 # fake return address to stop unwinder
18483 pushq $__KERNEL_CS # set correct cs
18484@@ -268,7 +273,7 @@ ENTRY(secondary_startup_64)
18485 bad_address:
18486 jmp bad_address
18487
18488- .section ".init.text","ax"
18489+ __INIT
18490 .globl early_idt_handlers
18491 early_idt_handlers:
18492 # 104(%rsp) %rflags
18493@@ -347,11 +352,15 @@ ENTRY(early_idt_handler)
18494 addq $16,%rsp # drop vector number and error code
18495 decl early_recursion_flag(%rip)
18496 INTERRUPT_RETURN
18497+ .previous
18498
18499+ __INITDATA
18500 .balign 4
18501 early_recursion_flag:
18502 .long 0
18503+ .previous
18504
18505+ .section .rodata,"a",@progbits
18506 #ifdef CONFIG_EARLY_PRINTK
18507 early_idt_msg:
18508 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
18509@@ -360,6 +369,7 @@ early_idt_ripmsg:
18510 #endif /* CONFIG_EARLY_PRINTK */
18511 .previous
18512
18513+ .section .rodata,"a",@progbits
18514 #define NEXT_PAGE(name) \
18515 .balign PAGE_SIZE; \
18516 ENTRY(name)
18517@@ -372,7 +382,6 @@ ENTRY(name)
18518 i = i + 1 ; \
18519 .endr
18520
18521- .data
18522 /*
18523 * This default setting generates an ident mapping at address 0x100000
18524 * and a mapping for the kernel that precisely maps virtual address
18525@@ -383,13 +392,41 @@ NEXT_PAGE(init_level4_pgt)
18526 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18527 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
18528 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18529+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
18530+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
18531+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
18532+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
18533+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
18534+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18535 .org init_level4_pgt + L4_START_KERNEL*8, 0
18536 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
18537 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
18538
18539+#ifdef CONFIG_PAX_PER_CPU_PGD
18540+NEXT_PAGE(cpu_pgd)
18541+ .rept NR_CPUS
18542+ .fill 512,8,0
18543+ .endr
18544+#endif
18545+
18546 NEXT_PAGE(level3_ident_pgt)
18547 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18548+#ifdef CONFIG_XEN
18549 .fill 511,8,0
18550+#else
18551+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
18552+ .fill 510,8,0
18553+#endif
18554+
18555+NEXT_PAGE(level3_vmalloc_start_pgt)
18556+ .fill 512,8,0
18557+
18558+NEXT_PAGE(level3_vmalloc_end_pgt)
18559+ .fill 512,8,0
18560+
18561+NEXT_PAGE(level3_vmemmap_pgt)
18562+ .fill L3_VMEMMAP_START,8,0
18563+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18564
18565 NEXT_PAGE(level3_kernel_pgt)
18566 .fill L3_START_KERNEL,8,0
18567@@ -397,20 +434,23 @@ NEXT_PAGE(level3_kernel_pgt)
18568 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
18569 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18570
18571+NEXT_PAGE(level2_vmemmap_pgt)
18572+ .fill 512,8,0
18573+
18574 NEXT_PAGE(level2_fixmap_pgt)
18575- .fill 506,8,0
18576- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18577- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
18578- .fill 5,8,0
18579+ .fill 507,8,0
18580+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
18581+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
18582+ .fill 4,8,0
18583
18584-NEXT_PAGE(level1_fixmap_pgt)
18585+NEXT_PAGE(level1_vsyscall_pgt)
18586 .fill 512,8,0
18587
18588-NEXT_PAGE(level2_ident_pgt)
18589- /* Since I easily can, map the first 1G.
18590+ /* Since I easily can, map the first 2G.
18591 * Don't set NX because code runs from these pages.
18592 */
18593- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
18594+NEXT_PAGE(level2_ident_pgt)
18595+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
18596
18597 NEXT_PAGE(level2_kernel_pgt)
18598 /*
18599@@ -423,37 +463,59 @@ NEXT_PAGE(level2_kernel_pgt)
18600 * If you want to increase this then increase MODULES_VADDR
18601 * too.)
18602 */
18603- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
18604- KERNEL_IMAGE_SIZE/PMD_SIZE)
18605-
18606-NEXT_PAGE(level2_spare_pgt)
18607- .fill 512, 8, 0
18608+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
18609
18610 #undef PMDS
18611 #undef NEXT_PAGE
18612
18613- .data
18614+ .align PAGE_SIZE
18615+ENTRY(cpu_gdt_table)
18616+ .rept NR_CPUS
18617+ .quad 0x0000000000000000 /* NULL descriptor */
18618+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
18619+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
18620+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
18621+ .quad 0x00cffb000000ffff /* __USER32_CS */
18622+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
18623+ .quad 0x00affb000000ffff /* __USER_CS */
18624+
18625+#ifdef CONFIG_PAX_KERNEXEC
18626+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
18627+#else
18628+ .quad 0x0 /* unused */
18629+#endif
18630+
18631+ .quad 0,0 /* TSS */
18632+ .quad 0,0 /* LDT */
18633+ .quad 0,0,0 /* three TLS descriptors */
18634+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
18635+ /* asm/segment.h:GDT_ENTRIES must match this */
18636+
18637+ /* zero the remaining page */
18638+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
18639+ .endr
18640+
18641 .align 16
18642 .globl early_gdt_descr
18643 early_gdt_descr:
18644 .word GDT_ENTRIES*8-1
18645 early_gdt_descr_base:
18646- .quad INIT_PER_CPU_VAR(gdt_page)
18647+ .quad cpu_gdt_table
18648
18649 ENTRY(phys_base)
18650 /* This must match the first entry in level2_kernel_pgt */
18651 .quad 0x0000000000000000
18652
18653 #include "../../x86/xen/xen-head.S"
18654-
18655- .section .bss, "aw", @nobits
18656+
18657+ .section .rodata,"a",@progbits
18658 .align L1_CACHE_BYTES
18659 ENTRY(idt_table)
18660- .skip IDT_ENTRIES * 16
18661+ .fill 512,8,0
18662
18663 .align L1_CACHE_BYTES
18664 ENTRY(nmi_idt_table)
18665- .skip IDT_ENTRIES * 16
18666+ .fill 512,8,0
18667
18668 __PAGE_ALIGNED_BSS
18669 .align PAGE_SIZE
18670diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
18671index 9c3bd4a..e1d9b35 100644
18672--- a/arch/x86/kernel/i386_ksyms_32.c
18673+++ b/arch/x86/kernel/i386_ksyms_32.c
18674@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
18675 EXPORT_SYMBOL(cmpxchg8b_emu);
18676 #endif
18677
18678+EXPORT_SYMBOL_GPL(cpu_gdt_table);
18679+
18680 /* Networking helper routines. */
18681 EXPORT_SYMBOL(csum_partial_copy_generic);
18682+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
18683+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
18684
18685 EXPORT_SYMBOL(__get_user_1);
18686 EXPORT_SYMBOL(__get_user_2);
18687@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
18688
18689 EXPORT_SYMBOL(csum_partial);
18690 EXPORT_SYMBOL(empty_zero_page);
18691+
18692+#ifdef CONFIG_PAX_KERNEXEC
18693+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
18694+#endif
18695diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
18696index 675a050..95febfd 100644
18697--- a/arch/x86/kernel/i387.c
18698+++ b/arch/x86/kernel/i387.c
18699@@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
18700 static inline bool interrupted_user_mode(void)
18701 {
18702 struct pt_regs *regs = get_irq_regs();
18703- return regs && user_mode_vm(regs);
18704+ return regs && user_mode(regs);
18705 }
18706
18707 /*
18708diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
18709index 9a5c460..dc4374d 100644
18710--- a/arch/x86/kernel/i8259.c
18711+++ b/arch/x86/kernel/i8259.c
18712@@ -209,7 +209,7 @@ spurious_8259A_irq:
18713 "spurious 8259A interrupt: IRQ%d.\n", irq);
18714 spurious_irq_mask |= irqmask;
18715 }
18716- atomic_inc(&irq_err_count);
18717+ atomic_inc_unchecked(&irq_err_count);
18718 /*
18719 * Theoretically we do not have to handle this IRQ,
18720 * but in Linux this does not cause problems and is
18721diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
18722index 8c96897..be66bfa 100644
18723--- a/arch/x86/kernel/ioport.c
18724+++ b/arch/x86/kernel/ioport.c
18725@@ -6,6 +6,7 @@
18726 #include <linux/sched.h>
18727 #include <linux/kernel.h>
18728 #include <linux/capability.h>
18729+#include <linux/security.h>
18730 #include <linux/errno.h>
18731 #include <linux/types.h>
18732 #include <linux/ioport.h>
18733@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18734
18735 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
18736 return -EINVAL;
18737+#ifdef CONFIG_GRKERNSEC_IO
18738+ if (turn_on && grsec_disable_privio) {
18739+ gr_handle_ioperm();
18740+ return -EPERM;
18741+ }
18742+#endif
18743 if (turn_on && !capable(CAP_SYS_RAWIO))
18744 return -EPERM;
18745
18746@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18747 * because the ->io_bitmap_max value must match the bitmap
18748 * contents:
18749 */
18750- tss = &per_cpu(init_tss, get_cpu());
18751+ tss = init_tss + get_cpu();
18752
18753 if (turn_on)
18754 bitmap_clear(t->io_bitmap_ptr, from, num);
18755@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
18756 return -EINVAL;
18757 /* Trying to gain more privileges? */
18758 if (level > old) {
18759+#ifdef CONFIG_GRKERNSEC_IO
18760+ if (grsec_disable_privio) {
18761+ gr_handle_iopl();
18762+ return -EPERM;
18763+ }
18764+#endif
18765 if (!capable(CAP_SYS_RAWIO))
18766 return -EPERM;
18767 }
18768diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
18769index e4595f1..ee3bfb8 100644
18770--- a/arch/x86/kernel/irq.c
18771+++ b/arch/x86/kernel/irq.c
18772@@ -18,7 +18,7 @@
18773 #include <asm/mce.h>
18774 #include <asm/hw_irq.h>
18775
18776-atomic_t irq_err_count;
18777+atomic_unchecked_t irq_err_count;
18778
18779 /* Function pointer for generic interrupt vector handling */
18780 void (*x86_platform_ipi_callback)(void) = NULL;
18781@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
18782 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
18783 seq_printf(p, " Machine check polls\n");
18784 #endif
18785- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
18786+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
18787 #if defined(CONFIG_X86_IO_APIC)
18788- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
18789+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
18790 #endif
18791 return 0;
18792 }
18793@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
18794
18795 u64 arch_irq_stat(void)
18796 {
18797- u64 sum = atomic_read(&irq_err_count);
18798+ u64 sum = atomic_read_unchecked(&irq_err_count);
18799
18800 #ifdef CONFIG_X86_IO_APIC
18801- sum += atomic_read(&irq_mis_count);
18802+ sum += atomic_read_unchecked(&irq_mis_count);
18803 #endif
18804 return sum;
18805 }
18806diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
18807index 344faf8..355f60d 100644
18808--- a/arch/x86/kernel/irq_32.c
18809+++ b/arch/x86/kernel/irq_32.c
18810@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
18811 __asm__ __volatile__("andl %%esp,%0" :
18812 "=r" (sp) : "0" (THREAD_SIZE - 1));
18813
18814- return sp < (sizeof(struct thread_info) + STACK_WARN);
18815+ return sp < STACK_WARN;
18816 }
18817
18818 static void print_stack_overflow(void)
18819@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
18820 * per-CPU IRQ handling contexts (thread information and stack)
18821 */
18822 union irq_ctx {
18823- struct thread_info tinfo;
18824- u32 stack[THREAD_SIZE/sizeof(u32)];
18825+ unsigned long previous_esp;
18826+ u32 stack[THREAD_SIZE/sizeof(u32)];
18827 } __attribute__((aligned(THREAD_SIZE)));
18828
18829 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
18830@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
18831 static inline int
18832 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18833 {
18834- union irq_ctx *curctx, *irqctx;
18835+ union irq_ctx *irqctx;
18836 u32 *isp, arg1, arg2;
18837
18838- curctx = (union irq_ctx *) current_thread_info();
18839 irqctx = __this_cpu_read(hardirq_ctx);
18840
18841 /*
18842@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18843 * handler) we can't do that and just have to keep using the
18844 * current stack (which is the irq stack already after all)
18845 */
18846- if (unlikely(curctx == irqctx))
18847+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
18848 return 0;
18849
18850 /* build the stack frame on the IRQ stack */
18851- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18852- irqctx->tinfo.task = curctx->tinfo.task;
18853- irqctx->tinfo.previous_esp = current_stack_pointer;
18854+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18855+ irqctx->previous_esp = current_stack_pointer;
18856
18857- /* Copy the preempt_count so that the [soft]irq checks work. */
18858- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
18859+#ifdef CONFIG_PAX_MEMORY_UDEREF
18860+ __set_fs(MAKE_MM_SEG(0));
18861+#endif
18862
18863 if (unlikely(overflow))
18864 call_on_stack(print_stack_overflow, isp);
18865@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18866 : "0" (irq), "1" (desc), "2" (isp),
18867 "D" (desc->handle_irq)
18868 : "memory", "cc", "ecx");
18869+
18870+#ifdef CONFIG_PAX_MEMORY_UDEREF
18871+ __set_fs(current_thread_info()->addr_limit);
18872+#endif
18873+
18874 return 1;
18875 }
18876
18877@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18878 */
18879 void __cpuinit irq_ctx_init(int cpu)
18880 {
18881- union irq_ctx *irqctx;
18882-
18883 if (per_cpu(hardirq_ctx, cpu))
18884 return;
18885
18886- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
18887- THREADINFO_GFP,
18888- THREAD_SIZE_ORDER));
18889- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
18890- irqctx->tinfo.cpu = cpu;
18891- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
18892- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18893-
18894- per_cpu(hardirq_ctx, cpu) = irqctx;
18895-
18896- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
18897- THREADINFO_GFP,
18898- THREAD_SIZE_ORDER));
18899- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
18900- irqctx->tinfo.cpu = cpu;
18901- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18902-
18903- per_cpu(softirq_ctx, cpu) = irqctx;
18904+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
18905+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
18906+
18907+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18908+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18909
18910 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18911 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18912@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
18913 asmlinkage void do_softirq(void)
18914 {
18915 unsigned long flags;
18916- struct thread_info *curctx;
18917 union irq_ctx *irqctx;
18918 u32 *isp;
18919
18920@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
18921 local_irq_save(flags);
18922
18923 if (local_softirq_pending()) {
18924- curctx = current_thread_info();
18925 irqctx = __this_cpu_read(softirq_ctx);
18926- irqctx->tinfo.task = curctx->task;
18927- irqctx->tinfo.previous_esp = current_stack_pointer;
18928+ irqctx->previous_esp = current_stack_pointer;
18929
18930 /* build the stack frame on the softirq stack */
18931- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18932+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18933+
18934+#ifdef CONFIG_PAX_MEMORY_UDEREF
18935+ __set_fs(MAKE_MM_SEG(0));
18936+#endif
18937
18938 call_on_stack(__do_softirq, isp);
18939+
18940+#ifdef CONFIG_PAX_MEMORY_UDEREF
18941+ __set_fs(current_thread_info()->addr_limit);
18942+#endif
18943+
18944 /*
18945 * Shouldn't happen, we returned above if in_interrupt():
18946 */
18947@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
18948 if (unlikely(!desc))
18949 return false;
18950
18951- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
18952+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
18953 if (unlikely(overflow))
18954 print_stack_overflow();
18955 desc->handle_irq(irq, desc);
18956diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
18957index d04d3ec..ea4b374 100644
18958--- a/arch/x86/kernel/irq_64.c
18959+++ b/arch/x86/kernel/irq_64.c
18960@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
18961 u64 estack_top, estack_bottom;
18962 u64 curbase = (u64)task_stack_page(current);
18963
18964- if (user_mode_vm(regs))
18965+ if (user_mode(regs))
18966 return;
18967
18968 if (regs->sp >= curbase + sizeof(struct thread_info) +
18969diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
18970index dc1404b..bbc43e7 100644
18971--- a/arch/x86/kernel/kdebugfs.c
18972+++ b/arch/x86/kernel/kdebugfs.c
18973@@ -27,7 +27,7 @@ struct setup_data_node {
18974 u32 len;
18975 };
18976
18977-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
18978+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
18979 size_t count, loff_t *ppos)
18980 {
18981 struct setup_data_node *node = file->private_data;
18982diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
18983index 836f832..a8bda67 100644
18984--- a/arch/x86/kernel/kgdb.c
18985+++ b/arch/x86/kernel/kgdb.c
18986@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
18987 #ifdef CONFIG_X86_32
18988 switch (regno) {
18989 case GDB_SS:
18990- if (!user_mode_vm(regs))
18991+ if (!user_mode(regs))
18992 *(unsigned long *)mem = __KERNEL_DS;
18993 break;
18994 case GDB_SP:
18995- if (!user_mode_vm(regs))
18996+ if (!user_mode(regs))
18997 *(unsigned long *)mem = kernel_stack_pointer(regs);
18998 break;
18999 case GDB_GS:
19000@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
19001 bp->attr.bp_addr = breakinfo[breakno].addr;
19002 bp->attr.bp_len = breakinfo[breakno].len;
19003 bp->attr.bp_type = breakinfo[breakno].type;
19004- info->address = breakinfo[breakno].addr;
19005+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
19006+ info->address = ktla_ktva(breakinfo[breakno].addr);
19007+ else
19008+ info->address = breakinfo[breakno].addr;
19009 info->len = breakinfo[breakno].len;
19010 info->type = breakinfo[breakno].type;
19011 val = arch_install_hw_breakpoint(bp);
19012@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
19013 case 'k':
19014 /* clear the trace bit */
19015 linux_regs->flags &= ~X86_EFLAGS_TF;
19016- atomic_set(&kgdb_cpu_doing_single_step, -1);
19017+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
19018
19019 /* set the trace bit if we're stepping */
19020 if (remcomInBuffer[0] == 's') {
19021 linux_regs->flags |= X86_EFLAGS_TF;
19022- atomic_set(&kgdb_cpu_doing_single_step,
19023+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
19024 raw_smp_processor_id());
19025 }
19026
19027@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
19028
19029 switch (cmd) {
19030 case DIE_DEBUG:
19031- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
19032+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
19033 if (user_mode(regs))
19034 return single_step_cont(regs, args);
19035 break;
19036@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
19037 #endif /* CONFIG_DEBUG_RODATA */
19038
19039 bpt->type = BP_BREAKPOINT;
19040- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
19041+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
19042 BREAK_INSTR_SIZE);
19043 if (err)
19044 return err;
19045- err = probe_kernel_write((char *)bpt->bpt_addr,
19046+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
19047 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
19048 #ifdef CONFIG_DEBUG_RODATA
19049 if (!err)
19050@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
19051 return -EBUSY;
19052 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
19053 BREAK_INSTR_SIZE);
19054- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
19055+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
19056 if (err)
19057 return err;
19058 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
19059@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
19060 if (mutex_is_locked(&text_mutex))
19061 goto knl_write;
19062 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
19063- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
19064+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
19065 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
19066 goto knl_write;
19067 return err;
19068 knl_write:
19069 #endif /* CONFIG_DEBUG_RODATA */
19070- return probe_kernel_write((char *)bpt->bpt_addr,
19071+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
19072 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
19073 }
19074
19075diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
19076index c5e410e..ed5a7f0 100644
19077--- a/arch/x86/kernel/kprobes-opt.c
19078+++ b/arch/x86/kernel/kprobes-opt.c
19079@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
19080 * Verify if the address gap is in 2GB range, because this uses
19081 * a relative jump.
19082 */
19083- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
19084+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
19085 if (abs(rel) > 0x7fffffff)
19086 return -ERANGE;
19087
19088@@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
19089 op->optinsn.size = ret;
19090
19091 /* Copy arch-dep-instance from template */
19092- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
19093+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
19094
19095 /* Set probe information */
19096 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
19097
19098 /* Set probe function call */
19099- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
19100+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
19101
19102 /* Set returning jmp instruction at the tail of out-of-line buffer */
19103- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
19104+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
19105 (u8 *)op->kp.addr + op->optinsn.size);
19106
19107 flush_icache_range((unsigned long) buf,
19108@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
19109 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
19110
19111 /* Backup instructions which will be replaced by jump address */
19112- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
19113+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
19114 RELATIVE_ADDR_SIZE);
19115
19116 insn_buf[0] = RELATIVEJUMP_OPCODE;
19117@@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
19118 /* This kprobe is really able to run optimized path. */
19119 op = container_of(p, struct optimized_kprobe, kp);
19120 /* Detour through copied instructions */
19121- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
19122+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
19123 if (!reenter)
19124 reset_current_kprobe();
19125 preempt_enable_no_resched();
19126diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
19127index 57916c0..9e0b9d0 100644
19128--- a/arch/x86/kernel/kprobes.c
19129+++ b/arch/x86/kernel/kprobes.c
19130@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
19131 s32 raddr;
19132 } __attribute__((packed)) *insn;
19133
19134- insn = (struct __arch_relative_insn *)from;
19135+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
19136+
19137+ pax_open_kernel();
19138 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
19139 insn->op = op;
19140+ pax_close_kernel();
19141 }
19142
19143 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
19144@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
19145 kprobe_opcode_t opcode;
19146 kprobe_opcode_t *orig_opcodes = opcodes;
19147
19148- if (search_exception_tables((unsigned long)opcodes))
19149+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
19150 return 0; /* Page fault may occur on this address. */
19151
19152 retry:
19153@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
19154 * for the first byte, we can recover the original instruction
19155 * from it and kp->opcode.
19156 */
19157- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19158+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19159 buf[0] = kp->opcode;
19160- return (unsigned long)buf;
19161+ return ktva_ktla((unsigned long)buf);
19162 }
19163
19164 /*
19165@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
19166 /* Another subsystem puts a breakpoint, failed to recover */
19167 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
19168 return 0;
19169+ pax_open_kernel();
19170 memcpy(dest, insn.kaddr, insn.length);
19171+ pax_close_kernel();
19172
19173 #ifdef CONFIG_X86_64
19174 if (insn_rip_relative(&insn)) {
19175@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
19176 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
19177 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
19178 disp = (u8 *) dest + insn_offset_displacement(&insn);
19179+ pax_open_kernel();
19180 *(s32 *) disp = (s32) newdisp;
19181+ pax_close_kernel();
19182 }
19183 #endif
19184 return insn.length;
19185@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
19186 * nor set current_kprobe, because it doesn't use single
19187 * stepping.
19188 */
19189- regs->ip = (unsigned long)p->ainsn.insn;
19190+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19191 preempt_enable_no_resched();
19192 return;
19193 }
19194@@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
19195 regs->flags &= ~X86_EFLAGS_IF;
19196 /* single step inline if the instruction is an int3 */
19197 if (p->opcode == BREAKPOINT_INSTRUCTION)
19198- regs->ip = (unsigned long)p->addr;
19199+ regs->ip = ktla_ktva((unsigned long)p->addr);
19200 else
19201- regs->ip = (unsigned long)p->ainsn.insn;
19202+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19203 }
19204
19205 /*
19206@@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
19207 setup_singlestep(p, regs, kcb, 0);
19208 return 1;
19209 }
19210- } else if (*addr != BREAKPOINT_INSTRUCTION) {
19211+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
19212 /*
19213 * The breakpoint instruction was removed right
19214 * after we hit it. Another cpu has removed
19215@@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
19216 " movq %rax, 152(%rsp)\n"
19217 RESTORE_REGS_STRING
19218 " popfq\n"
19219+#ifdef KERNEXEC_PLUGIN
19220+ " btsq $63,(%rsp)\n"
19221+#endif
19222 #else
19223 " pushf\n"
19224 SAVE_REGS_STRING
19225@@ -788,7 +798,7 @@ static void __kprobes
19226 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
19227 {
19228 unsigned long *tos = stack_addr(regs);
19229- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
19230+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
19231 unsigned long orig_ip = (unsigned long)p->addr;
19232 kprobe_opcode_t *insn = p->ainsn.insn;
19233
19234@@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
19235 struct die_args *args = data;
19236 int ret = NOTIFY_DONE;
19237
19238- if (args->regs && user_mode_vm(args->regs))
19239+ if (args->regs && user_mode(args->regs))
19240 return ret;
19241
19242 switch (val) {
19243diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
19244index 4180a87..4678e4f 100644
19245--- a/arch/x86/kernel/kvm.c
19246+++ b/arch/x86/kernel/kvm.c
19247@@ -267,7 +267,7 @@ static void __init paravirt_ops_setup(void)
19248 pv_info.paravirt_enabled = 1;
19249
19250 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
19251- pv_cpu_ops.io_delay = kvm_io_delay;
19252+ *(void **)&pv_cpu_ops.io_delay = kvm_io_delay;
19253
19254 #ifdef CONFIG_X86_IO_APIC
19255 no_timer_check = 1;
19256@@ -461,18 +461,18 @@ void __init kvm_guest_init(void)
19257 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
19258 spin_lock_init(&async_pf_sleepers[i].lock);
19259 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
19260- x86_init.irqs.trap_init = kvm_apf_trap_init;
19261+ *(void **)&x86_init.irqs.trap_init = kvm_apf_trap_init;
19262
19263 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
19264 has_steal_clock = 1;
19265- pv_time_ops.steal_clock = kvm_steal_clock;
19266+ *(void **)&pv_time_ops.steal_clock = kvm_steal_clock;
19267 }
19268
19269 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
19270 apic_set_eoi_write(kvm_guest_apic_eoi_write);
19271
19272 #ifdef CONFIG_SMP
19273- smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
19274+ *(void **)&smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
19275 register_cpu_notifier(&kvm_cpu_notifier);
19276 #else
19277 kvm_guest_cpu_init();
19278diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
19279index f1b42b3..27ac4e7 100644
19280--- a/arch/x86/kernel/kvmclock.c
19281+++ b/arch/x86/kernel/kvmclock.c
19282@@ -211,19 +211,19 @@ void __init kvmclock_init(void)
19283
19284 if (kvm_register_clock("boot clock"))
19285 return;
19286- pv_time_ops.sched_clock = kvm_clock_read;
19287- x86_platform.calibrate_tsc = kvm_get_tsc_khz;
19288- x86_platform.get_wallclock = kvm_get_wallclock;
19289- x86_platform.set_wallclock = kvm_set_wallclock;
19290+ *(void **)&pv_time_ops.sched_clock = kvm_clock_read;
19291+ *(void **)&x86_platform.calibrate_tsc = kvm_get_tsc_khz;
19292+ *(void **)&x86_platform.get_wallclock = kvm_get_wallclock;
19293+ *(void **)&x86_platform.set_wallclock = kvm_set_wallclock;
19294 #ifdef CONFIG_X86_LOCAL_APIC
19295- x86_cpuinit.early_percpu_clock_init =
19296+ *(void **)&x86_cpuinit.early_percpu_clock_init =
19297 kvm_setup_secondary_clock;
19298 #endif
19299- x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
19300- x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
19301- machine_ops.shutdown = kvm_shutdown;
19302+ *(void **)&x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
19303+ *(void **)&x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
19304+ *(void **)&machine_ops.shutdown = kvm_shutdown;
19305 #ifdef CONFIG_KEXEC
19306- machine_ops.crash_shutdown = kvm_crash_shutdown;
19307+ *(void **)&machine_ops.crash_shutdown = kvm_crash_shutdown;
19308 #endif
19309 kvm_get_preset_lpj();
19310 clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
19311diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
19312index ebc9873..1b9724b 100644
19313--- a/arch/x86/kernel/ldt.c
19314+++ b/arch/x86/kernel/ldt.c
19315@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
19316 if (reload) {
19317 #ifdef CONFIG_SMP
19318 preempt_disable();
19319- load_LDT(pc);
19320+ load_LDT_nolock(pc);
19321 if (!cpumask_equal(mm_cpumask(current->mm),
19322 cpumask_of(smp_processor_id())))
19323 smp_call_function(flush_ldt, current->mm, 1);
19324 preempt_enable();
19325 #else
19326- load_LDT(pc);
19327+ load_LDT_nolock(pc);
19328 #endif
19329 }
19330 if (oldsize) {
19331@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
19332 return err;
19333
19334 for (i = 0; i < old->size; i++)
19335- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
19336+ write_ldt_entry(new->ldt, i, old->ldt + i);
19337 return 0;
19338 }
19339
19340@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
19341 retval = copy_ldt(&mm->context, &old_mm->context);
19342 mutex_unlock(&old_mm->context.lock);
19343 }
19344+
19345+ if (tsk == current) {
19346+ mm->context.vdso = 0;
19347+
19348+#ifdef CONFIG_X86_32
19349+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19350+ mm->context.user_cs_base = 0UL;
19351+ mm->context.user_cs_limit = ~0UL;
19352+
19353+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
19354+ cpus_clear(mm->context.cpu_user_cs_mask);
19355+#endif
19356+
19357+#endif
19358+#endif
19359+
19360+ }
19361+
19362 return retval;
19363 }
19364
19365@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
19366 }
19367 }
19368
19369+#ifdef CONFIG_PAX_SEGMEXEC
19370+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
19371+ error = -EINVAL;
19372+ goto out_unlock;
19373+ }
19374+#endif
19375+
19376 fill_ldt(&ldt, &ldt_info);
19377 if (oldmode)
19378 ldt.avl = 0;
19379diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
19380index 5b19e4d..6476a76 100644
19381--- a/arch/x86/kernel/machine_kexec_32.c
19382+++ b/arch/x86/kernel/machine_kexec_32.c
19383@@ -26,7 +26,7 @@
19384 #include <asm/cacheflush.h>
19385 #include <asm/debugreg.h>
19386
19387-static void set_idt(void *newidt, __u16 limit)
19388+static void set_idt(struct desc_struct *newidt, __u16 limit)
19389 {
19390 struct desc_ptr curidt;
19391
19392@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
19393 }
19394
19395
19396-static void set_gdt(void *newgdt, __u16 limit)
19397+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
19398 {
19399 struct desc_ptr curgdt;
19400
19401@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
19402 }
19403
19404 control_page = page_address(image->control_code_page);
19405- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
19406+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
19407
19408 relocate_kernel_ptr = control_page;
19409 page_list[PA_CONTROL_PAGE] = __pa(control_page);
19410diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
19411index 3544aed..01ddc1c 100644
19412--- a/arch/x86/kernel/microcode_intel.c
19413+++ b/arch/x86/kernel/microcode_intel.c
19414@@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
19415
19416 static int get_ucode_user(void *to, const void *from, size_t n)
19417 {
19418- return copy_from_user(to, from, n);
19419+ return copy_from_user(to, (const void __force_user *)from, n);
19420 }
19421
19422 static enum ucode_state
19423 request_microcode_user(int cpu, const void __user *buf, size_t size)
19424 {
19425- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
19426+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
19427 }
19428
19429 static void microcode_fini_cpu(int cpu)
19430diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
19431index 216a4d7..b328f09 100644
19432--- a/arch/x86/kernel/module.c
19433+++ b/arch/x86/kernel/module.c
19434@@ -43,15 +43,60 @@ do { \
19435 } while (0)
19436 #endif
19437
19438-void *module_alloc(unsigned long size)
19439+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
19440 {
19441- if (PAGE_ALIGN(size) > MODULES_LEN)
19442+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
19443 return NULL;
19444 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
19445- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
19446+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
19447 -1, __builtin_return_address(0));
19448 }
19449
19450+void *module_alloc(unsigned long size)
19451+{
19452+
19453+#ifdef CONFIG_PAX_KERNEXEC
19454+ return __module_alloc(size, PAGE_KERNEL);
19455+#else
19456+ return __module_alloc(size, PAGE_KERNEL_EXEC);
19457+#endif
19458+
19459+}
19460+
19461+#ifdef CONFIG_PAX_KERNEXEC
19462+#ifdef CONFIG_X86_32
19463+void *module_alloc_exec(unsigned long size)
19464+{
19465+ struct vm_struct *area;
19466+
19467+ if (size == 0)
19468+ return NULL;
19469+
19470+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
19471+ return area ? area->addr : NULL;
19472+}
19473+EXPORT_SYMBOL(module_alloc_exec);
19474+
19475+void module_free_exec(struct module *mod, void *module_region)
19476+{
19477+ vunmap(module_region);
19478+}
19479+EXPORT_SYMBOL(module_free_exec);
19480+#else
19481+void module_free_exec(struct module *mod, void *module_region)
19482+{
19483+ module_free(mod, module_region);
19484+}
19485+EXPORT_SYMBOL(module_free_exec);
19486+
19487+void *module_alloc_exec(unsigned long size)
19488+{
19489+ return __module_alloc(size, PAGE_KERNEL_RX);
19490+}
19491+EXPORT_SYMBOL(module_alloc_exec);
19492+#endif
19493+#endif
19494+
19495 #ifdef CONFIG_X86_32
19496 int apply_relocate(Elf32_Shdr *sechdrs,
19497 const char *strtab,
19498@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19499 unsigned int i;
19500 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
19501 Elf32_Sym *sym;
19502- uint32_t *location;
19503+ uint32_t *plocation, location;
19504
19505 DEBUGP("Applying relocate section %u to %u\n",
19506 relsec, sechdrs[relsec].sh_info);
19507 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
19508 /* This is where to make the change */
19509- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
19510- + rel[i].r_offset;
19511+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
19512+ location = (uint32_t)plocation;
19513+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
19514+ plocation = ktla_ktva((void *)plocation);
19515 /* This is the symbol it is referring to. Note that all
19516 undefined symbols have been resolved. */
19517 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
19518@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19519 switch (ELF32_R_TYPE(rel[i].r_info)) {
19520 case R_386_32:
19521 /* We add the value into the location given */
19522- *location += sym->st_value;
19523+ pax_open_kernel();
19524+ *plocation += sym->st_value;
19525+ pax_close_kernel();
19526 break;
19527 case R_386_PC32:
19528 /* Add the value, subtract its position */
19529- *location += sym->st_value - (uint32_t)location;
19530+ pax_open_kernel();
19531+ *plocation += sym->st_value - location;
19532+ pax_close_kernel();
19533 break;
19534 default:
19535 pr_err("%s: Unknown relocation: %u\n",
19536@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
19537 case R_X86_64_NONE:
19538 break;
19539 case R_X86_64_64:
19540+ pax_open_kernel();
19541 *(u64 *)loc = val;
19542+ pax_close_kernel();
19543 break;
19544 case R_X86_64_32:
19545+ pax_open_kernel();
19546 *(u32 *)loc = val;
19547+ pax_close_kernel();
19548 if (val != *(u32 *)loc)
19549 goto overflow;
19550 break;
19551 case R_X86_64_32S:
19552+ pax_open_kernel();
19553 *(s32 *)loc = val;
19554+ pax_close_kernel();
19555 if ((s64)val != *(s32 *)loc)
19556 goto overflow;
19557 break;
19558 case R_X86_64_PC32:
19559 val -= (u64)loc;
19560+ pax_open_kernel();
19561 *(u32 *)loc = val;
19562+ pax_close_kernel();
19563+
19564 #if 0
19565 if ((s64)val != *(s32 *)loc)
19566 goto overflow;
19567diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
19568index f84f5c5..e27e54b 100644
19569--- a/arch/x86/kernel/nmi.c
19570+++ b/arch/x86/kernel/nmi.c
19571@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
19572 dotraplinkage notrace __kprobes void
19573 do_nmi(struct pt_regs *regs, long error_code)
19574 {
19575+
19576+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19577+ if (!user_mode(regs)) {
19578+ unsigned long cs = regs->cs & 0xFFFF;
19579+ unsigned long ip = ktva_ktla(regs->ip);
19580+
19581+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
19582+ regs->ip = ip;
19583+ }
19584+#endif
19585+
19586 nmi_nesting_preprocess(regs);
19587
19588 nmi_enter();
19589diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
19590index 676b8c7..870ba04 100644
19591--- a/arch/x86/kernel/paravirt-spinlocks.c
19592+++ b/arch/x86/kernel/paravirt-spinlocks.c
19593@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
19594 arch_spin_lock(lock);
19595 }
19596
19597-struct pv_lock_ops pv_lock_ops = {
19598+struct pv_lock_ops pv_lock_ops __read_only = {
19599 #ifdef CONFIG_SMP
19600 .spin_is_locked = __ticket_spin_is_locked,
19601 .spin_is_contended = __ticket_spin_is_contended,
19602diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
19603index 17fff18..0f5f957 100644
19604--- a/arch/x86/kernel/paravirt.c
19605+++ b/arch/x86/kernel/paravirt.c
19606@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
19607 {
19608 return x;
19609 }
19610+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19611+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
19612+#endif
19613
19614 void __init default_banner(void)
19615 {
19616@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
19617 if (opfunc == NULL)
19618 /* If there's no function, patch it with a ud2a (BUG) */
19619 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
19620- else if (opfunc == _paravirt_nop)
19621+ else if (opfunc == (void *)_paravirt_nop)
19622 /* If the operation is a nop, then nop the callsite */
19623 ret = paravirt_patch_nop();
19624
19625 /* identity functions just return their single argument */
19626- else if (opfunc == _paravirt_ident_32)
19627+ else if (opfunc == (void *)_paravirt_ident_32)
19628 ret = paravirt_patch_ident_32(insnbuf, len);
19629- else if (opfunc == _paravirt_ident_64)
19630+ else if (opfunc == (void *)_paravirt_ident_64)
19631 ret = paravirt_patch_ident_64(insnbuf, len);
19632+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19633+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
19634+ ret = paravirt_patch_ident_64(insnbuf, len);
19635+#endif
19636
19637 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
19638 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
19639@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
19640 if (insn_len > len || start == NULL)
19641 insn_len = len;
19642 else
19643- memcpy(insnbuf, start, insn_len);
19644+ memcpy(insnbuf, ktla_ktva(start), insn_len);
19645
19646 return insn_len;
19647 }
19648@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
19649 preempt_enable();
19650 }
19651
19652-struct pv_info pv_info = {
19653+struct pv_info pv_info __read_only = {
19654 .name = "bare hardware",
19655 .paravirt_enabled = 0,
19656 .kernel_rpl = 0,
19657@@ -324,7 +331,7 @@ struct pv_time_ops pv_time_ops = {
19658 .steal_clock = native_steal_clock,
19659 };
19660
19661-struct pv_irq_ops pv_irq_ops = {
19662+struct pv_irq_ops pv_irq_ops __read_only = {
19663 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
19664 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
19665 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
19666@@ -401,15 +408,20 @@ struct pv_apic_ops pv_apic_ops = {
19667 #endif
19668 };
19669
19670-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
19671+#ifdef CONFIG_X86_32
19672+#ifdef CONFIG_X86_PAE
19673+/* 64-bit pagetable entries */
19674+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
19675+#else
19676 /* 32-bit pagetable entries */
19677 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
19678+#endif
19679 #else
19680 /* 64-bit pagetable entries */
19681 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
19682 #endif
19683
19684-struct pv_mmu_ops pv_mmu_ops = {
19685+struct pv_mmu_ops pv_mmu_ops __read_only = {
19686
19687 .read_cr2 = native_read_cr2,
19688 .write_cr2 = native_write_cr2,
19689@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
19690 .make_pud = PTE_IDENT,
19691
19692 .set_pgd = native_set_pgd,
19693+ .set_pgd_batched = native_set_pgd_batched,
19694 #endif
19695 #endif /* PAGETABLE_LEVELS >= 3 */
19696
19697@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
19698 },
19699
19700 .set_fixmap = native_set_fixmap,
19701+
19702+#ifdef CONFIG_PAX_KERNEXEC
19703+ .pax_open_kernel = native_pax_open_kernel,
19704+ .pax_close_kernel = native_pax_close_kernel,
19705+#endif
19706+
19707 };
19708
19709 EXPORT_SYMBOL_GPL(pv_time_ops);
19710diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
19711index 299d493..79c13dd 100644
19712--- a/arch/x86/kernel/pci-calgary_64.c
19713+++ b/arch/x86/kernel/pci-calgary_64.c
19714@@ -1461,7 +1461,7 @@ int __init detect_calgary(void)
19715 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n",
19716 specified_table_size);
19717
19718- x86_init.iommu.iommu_init = calgary_iommu_init;
19719+ *(void **)&x86_init.iommu.iommu_init = calgary_iommu_init;
19720 }
19721 return calgary_found;
19722
19723diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
19724index 35ccf75..7a15747 100644
19725--- a/arch/x86/kernel/pci-iommu_table.c
19726+++ b/arch/x86/kernel/pci-iommu_table.c
19727@@ -2,7 +2,7 @@
19728 #include <asm/iommu_table.h>
19729 #include <linux/string.h>
19730 #include <linux/kallsyms.h>
19731-
19732+#include <linux/sched.h>
19733
19734 #define DEBUG 1
19735
19736diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
19737index b644e1c..4a6d379 100644
19738--- a/arch/x86/kernel/process.c
19739+++ b/arch/x86/kernel/process.c
19740@@ -36,7 +36,8 @@
19741 * section. Since TSS's are completely CPU-local, we want them
19742 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
19743 */
19744-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
19745+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
19746+EXPORT_SYMBOL(init_tss);
19747
19748 #ifdef CONFIG_X86_64
19749 static DEFINE_PER_CPU(unsigned char, is_idle);
19750@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
19751 task_xstate_cachep =
19752 kmem_cache_create("task_xstate", xstate_size,
19753 __alignof__(union thread_xstate),
19754- SLAB_PANIC | SLAB_NOTRACK, NULL);
19755+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
19756 }
19757
19758 /*
19759@@ -105,7 +106,7 @@ void exit_thread(void)
19760 unsigned long *bp = t->io_bitmap_ptr;
19761
19762 if (bp) {
19763- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
19764+ struct tss_struct *tss = init_tss + get_cpu();
19765
19766 t->io_bitmap_ptr = NULL;
19767 clear_thread_flag(TIF_IO_BITMAP);
19768@@ -136,7 +137,7 @@ void show_regs_common(void)
19769 board = dmi_get_system_info(DMI_BOARD_NAME);
19770
19771 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
19772- current->pid, current->comm, print_tainted(),
19773+ task_pid_nr(current), current->comm, print_tainted(),
19774 init_utsname()->release,
19775 (int)strcspn(init_utsname()->version, " "),
19776 init_utsname()->version,
19777@@ -149,6 +150,9 @@ void flush_thread(void)
19778 {
19779 struct task_struct *tsk = current;
19780
19781+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19782+ loadsegment(gs, 0);
19783+#endif
19784 flush_ptrace_hw_breakpoint(tsk);
19785 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
19786 drop_init_fpu(tsk);
19787@@ -336,7 +340,7 @@ static void __exit_idle(void)
19788 void exit_idle(void)
19789 {
19790 /* idle loop has pid 0 */
19791- if (current->pid)
19792+ if (task_pid_nr(current))
19793 return;
19794 __exit_idle();
19795 }
19796@@ -445,7 +449,7 @@ bool set_pm_idle_to_default(void)
19797
19798 return ret;
19799 }
19800-void stop_this_cpu(void *dummy)
19801+__noreturn void stop_this_cpu(void *dummy)
19802 {
19803 local_irq_disable();
19804 /*
19805@@ -673,16 +677,37 @@ static int __init idle_setup(char *str)
19806 }
19807 early_param("idle", idle_setup);
19808
19809-unsigned long arch_align_stack(unsigned long sp)
19810+#ifdef CONFIG_PAX_RANDKSTACK
19811+void pax_randomize_kstack(struct pt_regs *regs)
19812 {
19813- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
19814- sp -= get_random_int() % 8192;
19815- return sp & ~0xf;
19816-}
19817+ struct thread_struct *thread = &current->thread;
19818+ unsigned long time;
19819
19820-unsigned long arch_randomize_brk(struct mm_struct *mm)
19821-{
19822- unsigned long range_end = mm->brk + 0x02000000;
19823- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
19824-}
19825+ if (!randomize_va_space)
19826+ return;
19827+
19828+ if (v8086_mode(regs))
19829+ return;
19830
19831+ rdtscl(time);
19832+
19833+ /* P4 seems to return a 0 LSB, ignore it */
19834+#ifdef CONFIG_MPENTIUM4
19835+ time &= 0x3EUL;
19836+ time <<= 2;
19837+#elif defined(CONFIG_X86_64)
19838+ time &= 0xFUL;
19839+ time <<= 4;
19840+#else
19841+ time &= 0x1FUL;
19842+ time <<= 3;
19843+#endif
19844+
19845+ thread->sp0 ^= time;
19846+ load_sp0(init_tss + smp_processor_id(), thread);
19847+
19848+#ifdef CONFIG_X86_64
19849+ this_cpu_write(kernel_stack, thread->sp0);
19850+#endif
19851+}
19852+#endif
19853diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
19854index 44e0bff..5ceb99c 100644
19855--- a/arch/x86/kernel/process_32.c
19856+++ b/arch/x86/kernel/process_32.c
19857@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
19858 unsigned long thread_saved_pc(struct task_struct *tsk)
19859 {
19860 return ((unsigned long *)tsk->thread.sp)[3];
19861+//XXX return tsk->thread.eip;
19862 }
19863
19864 void __show_regs(struct pt_regs *regs, int all)
19865@@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
19866 unsigned long sp;
19867 unsigned short ss, gs;
19868
19869- if (user_mode_vm(regs)) {
19870+ if (user_mode(regs)) {
19871 sp = regs->sp;
19872 ss = regs->ss & 0xffff;
19873- gs = get_user_gs(regs);
19874 } else {
19875 sp = kernel_stack_pointer(regs);
19876 savesegment(ss, ss);
19877- savesegment(gs, gs);
19878 }
19879+ gs = get_user_gs(regs);
19880
19881 show_regs_common();
19882
19883 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
19884 (u16)regs->cs, regs->ip, regs->flags,
19885- smp_processor_id());
19886+ raw_smp_processor_id());
19887 print_symbol("EIP is at %s\n", regs->ip);
19888
19889 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
19890@@ -131,20 +131,21 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19891 unsigned long arg,
19892 struct task_struct *p, struct pt_regs *regs)
19893 {
19894- struct pt_regs *childregs = task_pt_regs(p);
19895+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
19896 struct task_struct *tsk;
19897 int err;
19898
19899 p->thread.sp = (unsigned long) childregs;
19900 p->thread.sp0 = (unsigned long) (childregs+1);
19901+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19902
19903 if (unlikely(!regs)) {
19904 /* kernel thread */
19905 memset(childregs, 0, sizeof(struct pt_regs));
19906 p->thread.ip = (unsigned long) ret_from_kernel_thread;
19907- task_user_gs(p) = __KERNEL_STACK_CANARY;
19908- childregs->ds = __USER_DS;
19909- childregs->es = __USER_DS;
19910+ savesegment(gs, childregs->gs);
19911+ childregs->ds = __KERNEL_DS;
19912+ childregs->es = __KERNEL_DS;
19913 childregs->fs = __KERNEL_PERCPU;
19914 childregs->bx = sp; /* function */
19915 childregs->bp = arg;
19916@@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19917 struct thread_struct *prev = &prev_p->thread,
19918 *next = &next_p->thread;
19919 int cpu = smp_processor_id();
19920- struct tss_struct *tss = &per_cpu(init_tss, cpu);
19921+ struct tss_struct *tss = init_tss + cpu;
19922 fpu_switch_t fpu;
19923
19924 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
19925@@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19926 */
19927 lazy_save_gs(prev->gs);
19928
19929+#ifdef CONFIG_PAX_MEMORY_UDEREF
19930+ __set_fs(task_thread_info(next_p)->addr_limit);
19931+#endif
19932+
19933 /*
19934 * Load the per-thread Thread-Local Storage descriptor.
19935 */
19936@@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19937 */
19938 arch_end_context_switch(next_p);
19939
19940+ this_cpu_write(current_task, next_p);
19941+ this_cpu_write(current_tinfo, &next_p->tinfo);
19942+
19943 /*
19944 * Restore %gs if needed (which is common)
19945 */
19946@@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19947
19948 switch_fpu_finish(next_p, fpu);
19949
19950- this_cpu_write(current_task, next_p);
19951-
19952 return prev_p;
19953 }
19954
19955@@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
19956 } while (count++ < 16);
19957 return 0;
19958 }
19959-
19960diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
19961index 16c6365..5d32218 100644
19962--- a/arch/x86/kernel/process_64.c
19963+++ b/arch/x86/kernel/process_64.c
19964@@ -153,10 +153,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19965 struct pt_regs *childregs;
19966 struct task_struct *me = current;
19967
19968- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
19969+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
19970 childregs = task_pt_regs(p);
19971 p->thread.sp = (unsigned long) childregs;
19972 p->thread.usersp = me->thread.usersp;
19973+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19974 set_tsk_thread_flag(p, TIF_FORK);
19975 p->fpu_counter = 0;
19976 p->thread.io_bitmap_ptr = NULL;
19977@@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19978 struct thread_struct *prev = &prev_p->thread;
19979 struct thread_struct *next = &next_p->thread;
19980 int cpu = smp_processor_id();
19981- struct tss_struct *tss = &per_cpu(init_tss, cpu);
19982+ struct tss_struct *tss = init_tss + cpu;
19983 unsigned fsindex, gsindex;
19984 fpu_switch_t fpu;
19985
19986@@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19987 prev->usersp = this_cpu_read(old_rsp);
19988 this_cpu_write(old_rsp, next->usersp);
19989 this_cpu_write(current_task, next_p);
19990+ this_cpu_write(current_tinfo, &next_p->tinfo);
19991
19992- this_cpu_write(kernel_stack,
19993- (unsigned long)task_stack_page(next_p) +
19994- THREAD_SIZE - KERNEL_STACK_OFFSET);
19995+ this_cpu_write(kernel_stack, next->sp0);
19996
19997 /*
19998 * Now maybe reload the debug registers and handle I/O bitmaps
19999@@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
20000 if (!p || p == current || p->state == TASK_RUNNING)
20001 return 0;
20002 stack = (unsigned long)task_stack_page(p);
20003- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
20004+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
20005 return 0;
20006 fp = *(u64 *)(p->thread.sp);
20007 do {
20008- if (fp < (unsigned long)stack ||
20009- fp >= (unsigned long)stack+THREAD_SIZE)
20010+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
20011 return 0;
20012 ip = *(u64 *)(fp+8);
20013 if (!in_sched_functions(ip))
20014diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
20015index 974b67e..044111b 100644
20016--- a/arch/x86/kernel/ptrace.c
20017+++ b/arch/x86/kernel/ptrace.c
20018@@ -183,14 +183,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
20019 {
20020 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
20021 unsigned long sp = (unsigned long)&regs->sp;
20022- struct thread_info *tinfo;
20023
20024- if (context == (sp & ~(THREAD_SIZE - 1)))
20025+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
20026 return sp;
20027
20028- tinfo = (struct thread_info *)context;
20029- if (tinfo->previous_esp)
20030- return tinfo->previous_esp;
20031+ sp = *(unsigned long *)context;
20032+ if (sp)
20033+ return sp;
20034
20035 return (unsigned long)regs;
20036 }
20037@@ -855,7 +854,7 @@ long arch_ptrace(struct task_struct *child, long request,
20038 unsigned long addr, unsigned long data)
20039 {
20040 int ret;
20041- unsigned long __user *datap = (unsigned long __user *)data;
20042+ unsigned long __user *datap = (__force unsigned long __user *)data;
20043
20044 switch (request) {
20045 /* read the word at location addr in the USER area. */
20046@@ -940,14 +939,14 @@ long arch_ptrace(struct task_struct *child, long request,
20047 if ((int) addr < 0)
20048 return -EIO;
20049 ret = do_get_thread_area(child, addr,
20050- (struct user_desc __user *)data);
20051+ (__force struct user_desc __user *) data);
20052 break;
20053
20054 case PTRACE_SET_THREAD_AREA:
20055 if ((int) addr < 0)
20056 return -EIO;
20057 ret = do_set_thread_area(child, addr,
20058- (struct user_desc __user *)data, 0);
20059+ (__force struct user_desc __user *) data, 0);
20060 break;
20061 #endif
20062
20063@@ -1325,7 +1324,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
20064
20065 #ifdef CONFIG_X86_64
20066
20067-static struct user_regset x86_64_regsets[] __read_mostly = {
20068+static struct user_regset x86_64_regsets[] = {
20069 [REGSET_GENERAL] = {
20070 .core_note_type = NT_PRSTATUS,
20071 .n = sizeof(struct user_regs_struct) / sizeof(long),
20072@@ -1366,7 +1365,7 @@ static const struct user_regset_view user_x86_64_view = {
20073 #endif /* CONFIG_X86_64 */
20074
20075 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
20076-static struct user_regset x86_32_regsets[] __read_mostly = {
20077+static struct user_regset x86_32_regsets[] = {
20078 [REGSET_GENERAL] = {
20079 .core_note_type = NT_PRSTATUS,
20080 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
20081@@ -1419,13 +1418,13 @@ static const struct user_regset_view user_x86_32_view = {
20082 */
20083 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
20084
20085-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
20086+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
20087 {
20088 #ifdef CONFIG_X86_64
20089- x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
20090+ *(unsigned int *)&x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
20091 #endif
20092 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
20093- x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
20094+ *(unsigned int *)&x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
20095 #endif
20096 xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
20097 }
20098@@ -1454,7 +1453,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
20099 memset(info, 0, sizeof(*info));
20100 info->si_signo = SIGTRAP;
20101 info->si_code = si_code;
20102- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
20103+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
20104 }
20105
20106 void user_single_step_siginfo(struct task_struct *tsk,
20107@@ -1483,6 +1482,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20108 # define IS_IA32 0
20109 #endif
20110
20111+#ifdef CONFIG_GRKERNSEC_SETXID
20112+extern void gr_delayed_cred_worker(void);
20113+#endif
20114+
20115 /*
20116 * We must return the syscall number to actually look up in the table.
20117 * This can be -1L to skip running any syscall at all.
20118@@ -1493,6 +1496,11 @@ long syscall_trace_enter(struct pt_regs *regs)
20119
20120 rcu_user_exit();
20121
20122+#ifdef CONFIG_GRKERNSEC_SETXID
20123+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
20124+ gr_delayed_cred_worker();
20125+#endif
20126+
20127 /*
20128 * If we stepped into a sysenter/syscall insn, it trapped in
20129 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
20130@@ -1548,6 +1556,11 @@ void syscall_trace_leave(struct pt_regs *regs)
20131 */
20132 rcu_user_exit();
20133
20134+#ifdef CONFIG_GRKERNSEC_SETXID
20135+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
20136+ gr_delayed_cred_worker();
20137+#endif
20138+
20139 audit_syscall_exit(regs);
20140
20141 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
20142diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
20143index 42eb330..139955c 100644
20144--- a/arch/x86/kernel/pvclock.c
20145+++ b/arch/x86/kernel/pvclock.c
20146@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
20147 return pv_tsc_khz;
20148 }
20149
20150-static atomic64_t last_value = ATOMIC64_INIT(0);
20151+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
20152
20153 void pvclock_resume(void)
20154 {
20155- atomic64_set(&last_value, 0);
20156+ atomic64_set_unchecked(&last_value, 0);
20157 }
20158
20159 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
20160@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
20161 * updating at the same time, and one of them could be slightly behind,
20162 * making the assumption that last_value always go forward fail to hold.
20163 */
20164- last = atomic64_read(&last_value);
20165+ last = atomic64_read_unchecked(&last_value);
20166 do {
20167 if (ret < last)
20168 return last;
20169- last = atomic64_cmpxchg(&last_value, last, ret);
20170+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
20171 } while (unlikely(last != ret));
20172
20173 return ret;
20174diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
20175index 4e8ba39..e0186b3 100644
20176--- a/arch/x86/kernel/reboot.c
20177+++ b/arch/x86/kernel/reboot.c
20178@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
20179 EXPORT_SYMBOL(pm_power_off);
20180
20181 static const struct desc_ptr no_idt = {};
20182-static int reboot_mode;
20183+static unsigned short reboot_mode;
20184 enum reboot_type reboot_type = BOOT_ACPI;
20185 int reboot_force;
20186
20187@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
20188
20189 void __noreturn machine_real_restart(unsigned int type)
20190 {
20191+
20192+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
20193+ struct desc_struct *gdt;
20194+#endif
20195+
20196 local_irq_disable();
20197
20198 /*
20199@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
20200
20201 /* Jump to the identity-mapped low memory code */
20202 #ifdef CONFIG_X86_32
20203- asm volatile("jmpl *%0" : :
20204+
20205+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20206+ gdt = get_cpu_gdt_table(smp_processor_id());
20207+ pax_open_kernel();
20208+#ifdef CONFIG_PAX_MEMORY_UDEREF
20209+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
20210+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
20211+ loadsegment(ds, __KERNEL_DS);
20212+ loadsegment(es, __KERNEL_DS);
20213+ loadsegment(ss, __KERNEL_DS);
20214+#endif
20215+#ifdef CONFIG_PAX_KERNEXEC
20216+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
20217+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
20218+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
20219+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
20220+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
20221+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
20222+#endif
20223+ pax_close_kernel();
20224+#endif
20225+
20226+ asm volatile("ljmpl *%0" : :
20227 "rm" (real_mode_header->machine_real_restart_asm),
20228 "a" (type));
20229 #else
20230@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
20231 * try to force a triple fault and then cycle between hitting the keyboard
20232 * controller and doing that
20233 */
20234-static void native_machine_emergency_restart(void)
20235+static void __noreturn native_machine_emergency_restart(void)
20236 {
20237 int i;
20238 int attempt = 0;
20239@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
20240 #endif
20241 }
20242
20243-static void __machine_emergency_restart(int emergency)
20244+static void __noreturn __machine_emergency_restart(int emergency)
20245 {
20246 reboot_emergency = emergency;
20247 machine_ops.emergency_restart();
20248 }
20249
20250-static void native_machine_restart(char *__unused)
20251+static void __noreturn native_machine_restart(char *__unused)
20252 {
20253 pr_notice("machine restart\n");
20254
20255@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
20256 __machine_emergency_restart(0);
20257 }
20258
20259-static void native_machine_halt(void)
20260+static void __noreturn native_machine_halt(void)
20261 {
20262 /* Stop other cpus and apics */
20263 machine_shutdown();
20264@@ -679,7 +706,7 @@ static void native_machine_halt(void)
20265 stop_this_cpu(NULL);
20266 }
20267
20268-static void native_machine_power_off(void)
20269+static void __noreturn native_machine_power_off(void)
20270 {
20271 if (pm_power_off) {
20272 if (!reboot_force)
20273@@ -688,6 +715,7 @@ static void native_machine_power_off(void)
20274 }
20275 /* A fallback in case there is no PM info available */
20276 tboot_shutdown(TB_SHUTDOWN_HALT);
20277+ unreachable();
20278 }
20279
20280 struct machine_ops machine_ops = {
20281diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
20282index 7a6f3b3..bed145d7 100644
20283--- a/arch/x86/kernel/relocate_kernel_64.S
20284+++ b/arch/x86/kernel/relocate_kernel_64.S
20285@@ -11,6 +11,7 @@
20286 #include <asm/kexec.h>
20287 #include <asm/processor-flags.h>
20288 #include <asm/pgtable_types.h>
20289+#include <asm/alternative-asm.h>
20290
20291 /*
20292 * Must be relocatable PIC code callable as a C function
20293@@ -160,13 +161,14 @@ identity_mapped:
20294 xorq %rbp, %rbp
20295 xorq %r8, %r8
20296 xorq %r9, %r9
20297- xorq %r10, %r9
20298+ xorq %r10, %r10
20299 xorq %r11, %r11
20300 xorq %r12, %r12
20301 xorq %r13, %r13
20302 xorq %r14, %r14
20303 xorq %r15, %r15
20304
20305+ pax_force_retaddr 0, 1
20306 ret
20307
20308 1:
20309diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
20310index 86c524c..bae70c5 100644
20311--- a/arch/x86/kernel/setup.c
20312+++ b/arch/x86/kernel/setup.c
20313@@ -441,7 +441,7 @@ static void __init parse_setup_data(void)
20314
20315 switch (data->type) {
20316 case SETUP_E820_EXT:
20317- parse_e820_ext(data);
20318+ parse_e820_ext((struct setup_data __force_kernel *)data);
20319 break;
20320 case SETUP_DTB:
20321 add_dtb(pa_data);
20322@@ -708,7 +708,7 @@ static void __init trim_bios_range(void)
20323 * area (640->1Mb) as ram even though it is not.
20324 * take them out.
20325 */
20326- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
20327+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
20328
20329 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
20330 }
20331@@ -832,14 +832,14 @@ void __init setup_arch(char **cmdline_p)
20332
20333 if (!boot_params.hdr.root_flags)
20334 root_mountflags &= ~MS_RDONLY;
20335- init_mm.start_code = (unsigned long) _text;
20336- init_mm.end_code = (unsigned long) _etext;
20337+ init_mm.start_code = ktla_ktva((unsigned long) _text);
20338+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
20339 init_mm.end_data = (unsigned long) _edata;
20340 init_mm.brk = _brk_end;
20341
20342- code_resource.start = virt_to_phys(_text);
20343- code_resource.end = virt_to_phys(_etext)-1;
20344- data_resource.start = virt_to_phys(_etext);
20345+ code_resource.start = virt_to_phys(ktla_ktva(_text));
20346+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
20347+ data_resource.start = virt_to_phys(_sdata);
20348 data_resource.end = virt_to_phys(_edata)-1;
20349 bss_resource.start = virt_to_phys(&__bss_start);
20350 bss_resource.end = virt_to_phys(&__bss_stop)-1;
20351diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
20352index 5cdff03..5810740 100644
20353--- a/arch/x86/kernel/setup_percpu.c
20354+++ b/arch/x86/kernel/setup_percpu.c
20355@@ -21,19 +21,17 @@
20356 #include <asm/cpu.h>
20357 #include <asm/stackprotector.h>
20358
20359-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
20360+#ifdef CONFIG_SMP
20361+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
20362 EXPORT_PER_CPU_SYMBOL(cpu_number);
20363+#endif
20364
20365-#ifdef CONFIG_X86_64
20366 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
20367-#else
20368-#define BOOT_PERCPU_OFFSET 0
20369-#endif
20370
20371 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
20372 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
20373
20374-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
20375+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
20376 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
20377 };
20378 EXPORT_SYMBOL(__per_cpu_offset);
20379@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
20380 {
20381 #ifdef CONFIG_X86_32
20382 struct desc_struct gdt;
20383+ unsigned long base = per_cpu_offset(cpu);
20384
20385- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
20386- 0x2 | DESCTYPE_S, 0x8);
20387- gdt.s = 1;
20388+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
20389+ 0x83 | DESCTYPE_S, 0xC);
20390 write_gdt_entry(get_cpu_gdt_table(cpu),
20391 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
20392 #endif
20393@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
20394 /* alrighty, percpu areas up and running */
20395 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
20396 for_each_possible_cpu(cpu) {
20397+#ifdef CONFIG_CC_STACKPROTECTOR
20398+#ifdef CONFIG_X86_32
20399+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
20400+#endif
20401+#endif
20402 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
20403 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
20404 per_cpu(cpu_number, cpu) = cpu;
20405@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
20406 */
20407 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
20408 #endif
20409+#ifdef CONFIG_CC_STACKPROTECTOR
20410+#ifdef CONFIG_X86_32
20411+ if (!cpu)
20412+ per_cpu(stack_canary.canary, cpu) = canary;
20413+#endif
20414+#endif
20415 /*
20416 * Up to this point, the boot CPU has been using .init.data
20417 * area. Reload any changed state for the boot CPU.
20418diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
20419index 70b27ee..fcf827f 100644
20420--- a/arch/x86/kernel/signal.c
20421+++ b/arch/x86/kernel/signal.c
20422@@ -195,7 +195,7 @@ static unsigned long align_sigframe(unsigned long sp)
20423 * Align the stack pointer according to the i386 ABI,
20424 * i.e. so that on function entry ((sp + 4) & 15) == 0.
20425 */
20426- sp = ((sp + 4) & -16ul) - 4;
20427+ sp = ((sp - 12) & -16ul) - 4;
20428 #else /* !CONFIG_X86_32 */
20429 sp = round_down(sp, 16) - 8;
20430 #endif
20431@@ -303,9 +303,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20432 }
20433
20434 if (current->mm->context.vdso)
20435- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20436+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20437 else
20438- restorer = &frame->retcode;
20439+ restorer = (void __user *)&frame->retcode;
20440 if (ka->sa.sa_flags & SA_RESTORER)
20441 restorer = ka->sa.sa_restorer;
20442
20443@@ -319,7 +319,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20444 * reasons and because gdb uses it as a signature to notice
20445 * signal handler stack frames.
20446 */
20447- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
20448+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
20449
20450 if (err)
20451 return -EFAULT;
20452@@ -369,7 +369,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20453 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
20454
20455 /* Set up to return from userspace. */
20456- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20457+ if (current->mm->context.vdso)
20458+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20459+ else
20460+ restorer = (void __user *)&frame->retcode;
20461 if (ka->sa.sa_flags & SA_RESTORER)
20462 restorer = ka->sa.sa_restorer;
20463 put_user_ex(restorer, &frame->pretcode);
20464@@ -381,7 +384,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20465 * reasons and because gdb uses it as a signature to notice
20466 * signal handler stack frames.
20467 */
20468- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
20469+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
20470 } put_user_catch(err);
20471
20472 err |= copy_siginfo_to_user(&frame->info, info);
20473diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
20474index f3e2ec8..ad5287a 100644
20475--- a/arch/x86/kernel/smpboot.c
20476+++ b/arch/x86/kernel/smpboot.c
20477@@ -673,6 +673,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
20478 idle->thread.sp = (unsigned long) (((struct pt_regs *)
20479 (THREAD_SIZE + task_stack_page(idle))) - 1);
20480 per_cpu(current_task, cpu) = idle;
20481+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
20482
20483 #ifdef CONFIG_X86_32
20484 /* Stack for startup_32 can be just as for start_secondary onwards */
20485@@ -680,11 +681,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
20486 #else
20487 clear_tsk_thread_flag(idle, TIF_FORK);
20488 initial_gs = per_cpu_offset(cpu);
20489- per_cpu(kernel_stack, cpu) =
20490- (unsigned long)task_stack_page(idle) -
20491- KERNEL_STACK_OFFSET + THREAD_SIZE;
20492+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
20493 #endif
20494+
20495+ pax_open_kernel();
20496 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20497+ pax_close_kernel();
20498+
20499 initial_code = (unsigned long)start_secondary;
20500 stack_start = idle->thread.sp;
20501
20502@@ -823,6 +826,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
20503 /* the FPU context is blank, nobody can own it */
20504 __cpu_disable_lazy_restore(cpu);
20505
20506+#ifdef CONFIG_PAX_PER_CPU_PGD
20507+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
20508+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20509+ KERNEL_PGD_PTRS);
20510+#endif
20511+
20512+ /* the FPU context is blank, nobody can own it */
20513+ __cpu_disable_lazy_restore(cpu);
20514+
20515 err = do_boot_cpu(apicid, cpu, tidle);
20516 if (err) {
20517 pr_debug("do_boot_cpu failed %d\n", err);
20518diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
20519index cd3b243..4ba27a4 100644
20520--- a/arch/x86/kernel/step.c
20521+++ b/arch/x86/kernel/step.c
20522@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20523 struct desc_struct *desc;
20524 unsigned long base;
20525
20526- seg &= ~7UL;
20527+ seg >>= 3;
20528
20529 mutex_lock(&child->mm->context.lock);
20530- if (unlikely((seg >> 3) >= child->mm->context.size))
20531+ if (unlikely(seg >= child->mm->context.size))
20532 addr = -1L; /* bogus selector, access would fault */
20533 else {
20534 desc = child->mm->context.ldt + seg;
20535@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20536 addr += base;
20537 }
20538 mutex_unlock(&child->mm->context.lock);
20539- }
20540+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
20541+ addr = ktla_ktva(addr);
20542
20543 return addr;
20544 }
20545@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20546 unsigned char opcode[15];
20547 unsigned long addr = convert_ip_to_linear(child, regs);
20548
20549+ if (addr == -EINVAL)
20550+ return 0;
20551+
20552 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
20553 for (i = 0; i < copied; i++) {
20554 switch (opcode[i]) {
20555diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
20556new file mode 100644
20557index 0000000..26bb1af
20558--- /dev/null
20559+++ b/arch/x86/kernel/sys_i386_32.c
20560@@ -0,0 +1,249 @@
20561+/*
20562+ * This file contains various random system calls that
20563+ * have a non-standard calling sequence on the Linux/i386
20564+ * platform.
20565+ */
20566+
20567+#include <linux/errno.h>
20568+#include <linux/sched.h>
20569+#include <linux/mm.h>
20570+#include <linux/fs.h>
20571+#include <linux/smp.h>
20572+#include <linux/sem.h>
20573+#include <linux/msg.h>
20574+#include <linux/shm.h>
20575+#include <linux/stat.h>
20576+#include <linux/syscalls.h>
20577+#include <linux/mman.h>
20578+#include <linux/file.h>
20579+#include <linux/utsname.h>
20580+#include <linux/ipc.h>
20581+
20582+#include <linux/uaccess.h>
20583+#include <linux/unistd.h>
20584+
20585+#include <asm/syscalls.h>
20586+
20587+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
20588+{
20589+ unsigned long pax_task_size = TASK_SIZE;
20590+
20591+#ifdef CONFIG_PAX_SEGMEXEC
20592+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
20593+ pax_task_size = SEGMEXEC_TASK_SIZE;
20594+#endif
20595+
20596+ if (len > pax_task_size || addr > pax_task_size - len)
20597+ return -EINVAL;
20598+
20599+ return 0;
20600+}
20601+
20602+unsigned long
20603+arch_get_unmapped_area(struct file *filp, unsigned long addr,
20604+ unsigned long len, unsigned long pgoff, unsigned long flags)
20605+{
20606+ struct mm_struct *mm = current->mm;
20607+ struct vm_area_struct *vma;
20608+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20609+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
20610+
20611+#ifdef CONFIG_PAX_SEGMEXEC
20612+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20613+ pax_task_size = SEGMEXEC_TASK_SIZE;
20614+#endif
20615+
20616+ pax_task_size -= PAGE_SIZE;
20617+
20618+ if (len > pax_task_size)
20619+ return -ENOMEM;
20620+
20621+ if (flags & MAP_FIXED)
20622+ return addr;
20623+
20624+#ifdef CONFIG_PAX_RANDMMAP
20625+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20626+#endif
20627+
20628+ if (addr) {
20629+ addr = PAGE_ALIGN(addr);
20630+ if (pax_task_size - len >= addr) {
20631+ vma = find_vma(mm, addr);
20632+ if (check_heap_stack_gap(vma, addr, len, offset))
20633+ return addr;
20634+ }
20635+ }
20636+ if (len > mm->cached_hole_size) {
20637+ start_addr = addr = mm->free_area_cache;
20638+ } else {
20639+ start_addr = addr = mm->mmap_base;
20640+ mm->cached_hole_size = 0;
20641+ }
20642+
20643+#ifdef CONFIG_PAX_PAGEEXEC
20644+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
20645+ start_addr = 0x00110000UL;
20646+
20647+#ifdef CONFIG_PAX_RANDMMAP
20648+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20649+ start_addr += mm->delta_mmap & 0x03FFF000UL;
20650+#endif
20651+
20652+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
20653+ start_addr = addr = mm->mmap_base;
20654+ else
20655+ addr = start_addr;
20656+ }
20657+#endif
20658+
20659+full_search:
20660+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20661+ /* At this point: (!vma || addr < vma->vm_end). */
20662+ if (pax_task_size - len < addr) {
20663+ /*
20664+ * Start a new search - just in case we missed
20665+ * some holes.
20666+ */
20667+ if (start_addr != mm->mmap_base) {
20668+ start_addr = addr = mm->mmap_base;
20669+ mm->cached_hole_size = 0;
20670+ goto full_search;
20671+ }
20672+ return -ENOMEM;
20673+ }
20674+ if (check_heap_stack_gap(vma, addr, len, offset))
20675+ break;
20676+ if (addr + mm->cached_hole_size < vma->vm_start)
20677+ mm->cached_hole_size = vma->vm_start - addr;
20678+ addr = vma->vm_end;
20679+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
20680+ start_addr = addr = mm->mmap_base;
20681+ mm->cached_hole_size = 0;
20682+ goto full_search;
20683+ }
20684+ }
20685+
20686+ /*
20687+ * Remember the place where we stopped the search:
20688+ */
20689+ mm->free_area_cache = addr + len;
20690+ return addr;
20691+}
20692+
20693+unsigned long
20694+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20695+ const unsigned long len, const unsigned long pgoff,
20696+ const unsigned long flags)
20697+{
20698+ struct vm_area_struct *vma;
20699+ struct mm_struct *mm = current->mm;
20700+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
20701+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
20702+
20703+#ifdef CONFIG_PAX_SEGMEXEC
20704+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20705+ pax_task_size = SEGMEXEC_TASK_SIZE;
20706+#endif
20707+
20708+ pax_task_size -= PAGE_SIZE;
20709+
20710+ /* requested length too big for entire address space */
20711+ if (len > pax_task_size)
20712+ return -ENOMEM;
20713+
20714+ if (flags & MAP_FIXED)
20715+ return addr;
20716+
20717+#ifdef CONFIG_PAX_PAGEEXEC
20718+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
20719+ goto bottomup;
20720+#endif
20721+
20722+#ifdef CONFIG_PAX_RANDMMAP
20723+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20724+#endif
20725+
20726+ /* requesting a specific address */
20727+ if (addr) {
20728+ addr = PAGE_ALIGN(addr);
20729+ if (pax_task_size - len >= addr) {
20730+ vma = find_vma(mm, addr);
20731+ if (check_heap_stack_gap(vma, addr, len, offset))
20732+ return addr;
20733+ }
20734+ }
20735+
20736+ /* check if free_area_cache is useful for us */
20737+ if (len <= mm->cached_hole_size) {
20738+ mm->cached_hole_size = 0;
20739+ mm->free_area_cache = mm->mmap_base;
20740+ }
20741+
20742+ /* either no address requested or can't fit in requested address hole */
20743+ addr = mm->free_area_cache;
20744+
20745+ /* make sure it can fit in the remaining address space */
20746+ if (addr > len) {
20747+ vma = find_vma(mm, addr-len);
20748+ if (check_heap_stack_gap(vma, addr - len, len, offset))
20749+ /* remember the address as a hint for next time */
20750+ return (mm->free_area_cache = addr-len);
20751+ }
20752+
20753+ if (mm->mmap_base < len)
20754+ goto bottomup;
20755+
20756+ addr = mm->mmap_base-len;
20757+
20758+ do {
20759+ /*
20760+ * Lookup failure means no vma is above this address,
20761+ * else if new region fits below vma->vm_start,
20762+ * return with success:
20763+ */
20764+ vma = find_vma(mm, addr);
20765+ if (check_heap_stack_gap(vma, addr, len, offset))
20766+ /* remember the address as a hint for next time */
20767+ return (mm->free_area_cache = addr);
20768+
20769+ /* remember the largest hole we saw so far */
20770+ if (addr + mm->cached_hole_size < vma->vm_start)
20771+ mm->cached_hole_size = vma->vm_start - addr;
20772+
20773+ /* try just below the current vma->vm_start */
20774+ addr = skip_heap_stack_gap(vma, len, offset);
20775+ } while (!IS_ERR_VALUE(addr));
20776+
20777+bottomup:
20778+ /*
20779+ * A failed mmap() very likely causes application failure,
20780+ * so fall back to the bottom-up function here. This scenario
20781+ * can happen with large stack limits and large mmap()
20782+ * allocations.
20783+ */
20784+
20785+#ifdef CONFIG_PAX_SEGMEXEC
20786+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20787+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20788+ else
20789+#endif
20790+
20791+ mm->mmap_base = TASK_UNMAPPED_BASE;
20792+
20793+#ifdef CONFIG_PAX_RANDMMAP
20794+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20795+ mm->mmap_base += mm->delta_mmap;
20796+#endif
20797+
20798+ mm->free_area_cache = mm->mmap_base;
20799+ mm->cached_hole_size = ~0UL;
20800+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20801+ /*
20802+ * Restore the topdown base:
20803+ */
20804+ mm->mmap_base = base;
20805+ mm->free_area_cache = base;
20806+ mm->cached_hole_size = ~0UL;
20807+
20808+ return addr;
20809+}
20810diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
20811index b4d3c39..d699d77 100644
20812--- a/arch/x86/kernel/sys_x86_64.c
20813+++ b/arch/x86/kernel/sys_x86_64.c
20814@@ -95,8 +95,8 @@ out:
20815 return error;
20816 }
20817
20818-static void find_start_end(unsigned long flags, unsigned long *begin,
20819- unsigned long *end)
20820+static void find_start_end(struct mm_struct *mm, unsigned long flags,
20821+ unsigned long *begin, unsigned long *end)
20822 {
20823 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
20824 unsigned long new_begin;
20825@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
20826 *begin = new_begin;
20827 }
20828 } else {
20829- *begin = TASK_UNMAPPED_BASE;
20830+ *begin = mm->mmap_base;
20831 *end = TASK_SIZE;
20832 }
20833 }
20834@@ -128,20 +128,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
20835 struct vm_area_struct *vma;
20836 unsigned long start_addr;
20837 unsigned long begin, end;
20838+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
20839
20840 if (flags & MAP_FIXED)
20841 return addr;
20842
20843- find_start_end(flags, &begin, &end);
20844+ find_start_end(mm, flags, &begin, &end);
20845
20846 if (len > end)
20847 return -ENOMEM;
20848
20849+#ifdef CONFIG_PAX_RANDMMAP
20850+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20851+#endif
20852+
20853 if (addr) {
20854 addr = PAGE_ALIGN(addr);
20855 vma = find_vma(mm, addr);
20856- if (end - len >= addr &&
20857- (!vma || addr + len <= vma->vm_start))
20858+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
20859 return addr;
20860 }
20861 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
20862@@ -172,7 +176,7 @@ full_search:
20863 }
20864 return -ENOMEM;
20865 }
20866- if (!vma || addr + len <= vma->vm_start) {
20867+ if (check_heap_stack_gap(vma, addr, len, offset)) {
20868 /*
20869 * Remember the place where we stopped the search:
20870 */
20871@@ -195,7 +199,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20872 {
20873 struct vm_area_struct *vma;
20874 struct mm_struct *mm = current->mm;
20875- unsigned long addr = addr0, start_addr;
20876+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
20877+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
20878
20879 /* requested length too big for entire address space */
20880 if (len > TASK_SIZE)
20881@@ -208,13 +213,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20882 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
20883 goto bottomup;
20884
20885+#ifdef CONFIG_PAX_RANDMMAP
20886+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20887+#endif
20888+
20889 /* requesting a specific address */
20890 if (addr) {
20891 addr = PAGE_ALIGN(addr);
20892- vma = find_vma(mm, addr);
20893- if (TASK_SIZE - len >= addr &&
20894- (!vma || addr + len <= vma->vm_start))
20895- return addr;
20896+ if (TASK_SIZE - len >= addr) {
20897+ vma = find_vma(mm, addr);
20898+ if (check_heap_stack_gap(vma, addr, len, offset))
20899+ return addr;
20900+ }
20901 }
20902
20903 /* check if free_area_cache is useful for us */
20904@@ -240,7 +250,7 @@ try_again:
20905 * return with success:
20906 */
20907 vma = find_vma(mm, addr);
20908- if (!vma || addr+len <= vma->vm_start)
20909+ if (check_heap_stack_gap(vma, addr, len, offset))
20910 /* remember the address as a hint for next time */
20911 return mm->free_area_cache = addr;
20912
20913@@ -249,8 +259,8 @@ try_again:
20914 mm->cached_hole_size = vma->vm_start - addr;
20915
20916 /* try just below the current vma->vm_start */
20917- addr = vma->vm_start-len;
20918- } while (len < vma->vm_start);
20919+ addr = skip_heap_stack_gap(vma, len, offset);
20920+ } while (!IS_ERR_VALUE(addr));
20921
20922 fail:
20923 /*
20924@@ -270,13 +280,21 @@ bottomup:
20925 * can happen with large stack limits and large mmap()
20926 * allocations.
20927 */
20928+ mm->mmap_base = TASK_UNMAPPED_BASE;
20929+
20930+#ifdef CONFIG_PAX_RANDMMAP
20931+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20932+ mm->mmap_base += mm->delta_mmap;
20933+#endif
20934+
20935+ mm->free_area_cache = mm->mmap_base;
20936 mm->cached_hole_size = ~0UL;
20937- mm->free_area_cache = TASK_UNMAPPED_BASE;
20938 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20939 /*
20940 * Restore the topdown base:
20941 */
20942- mm->free_area_cache = mm->mmap_base;
20943+ mm->mmap_base = base;
20944+ mm->free_area_cache = base;
20945 mm->cached_hole_size = ~0UL;
20946
20947 return addr;
20948diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
20949index f84fe00..93fe08f 100644
20950--- a/arch/x86/kernel/tboot.c
20951+++ b/arch/x86/kernel/tboot.c
20952@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
20953
20954 void tboot_shutdown(u32 shutdown_type)
20955 {
20956- void (*shutdown)(void);
20957+ void (* __noreturn shutdown)(void);
20958
20959 if (!tboot_enabled())
20960 return;
20961@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
20962
20963 switch_to_tboot_pt();
20964
20965- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
20966+ shutdown = (void *)tboot->shutdown_entry;
20967 shutdown();
20968
20969 /* should not reach here */
20970@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
20971 return 0;
20972 }
20973
20974-static atomic_t ap_wfs_count;
20975+static atomic_unchecked_t ap_wfs_count;
20976
20977 static int tboot_wait_for_aps(int num_aps)
20978 {
20979@@ -324,9 +324,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
20980 {
20981 switch (action) {
20982 case CPU_DYING:
20983- atomic_inc(&ap_wfs_count);
20984+ atomic_inc_unchecked(&ap_wfs_count);
20985 if (num_online_cpus() == 1)
20986- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
20987+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
20988 return NOTIFY_BAD;
20989 break;
20990 }
20991@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
20992
20993 tboot_create_trampoline();
20994
20995- atomic_set(&ap_wfs_count, 0);
20996+ atomic_set_unchecked(&ap_wfs_count, 0);
20997 register_hotcpu_notifier(&tboot_cpu_notifier);
20998
20999 acpi_os_set_prepare_sleep(&tboot_sleep);
21000diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
21001index 24d3c91..d06b473 100644
21002--- a/arch/x86/kernel/time.c
21003+++ b/arch/x86/kernel/time.c
21004@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
21005 {
21006 unsigned long pc = instruction_pointer(regs);
21007
21008- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
21009+ if (!user_mode(regs) && in_lock_functions(pc)) {
21010 #ifdef CONFIG_FRAME_POINTER
21011- return *(unsigned long *)(regs->bp + sizeof(long));
21012+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
21013 #else
21014 unsigned long *sp =
21015 (unsigned long *)kernel_stack_pointer(regs);
21016@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
21017 * or above a saved flags. Eflags has bits 22-31 zero,
21018 * kernel addresses don't.
21019 */
21020+
21021+#ifdef CONFIG_PAX_KERNEXEC
21022+ return ktla_ktva(sp[0]);
21023+#else
21024 if (sp[0] >> 22)
21025 return sp[0];
21026 if (sp[1] >> 22)
21027 return sp[1];
21028 #endif
21029+
21030+#endif
21031 }
21032 return pc;
21033 }
21034diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
21035index 9d9d2f9..cad418a 100644
21036--- a/arch/x86/kernel/tls.c
21037+++ b/arch/x86/kernel/tls.c
21038@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
21039 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
21040 return -EINVAL;
21041
21042+#ifdef CONFIG_PAX_SEGMEXEC
21043+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
21044+ return -EINVAL;
21045+#endif
21046+
21047 set_tls_desc(p, idx, &info, 1);
21048
21049 return 0;
21050@@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
21051
21052 if (kbuf)
21053 info = kbuf;
21054- else if (__copy_from_user(infobuf, ubuf, count))
21055+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
21056 return -EFAULT;
21057 else
21058 info = infobuf;
21059diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
21060index 8276dc6..4ca48a2 100644
21061--- a/arch/x86/kernel/traps.c
21062+++ b/arch/x86/kernel/traps.c
21063@@ -71,12 +71,6 @@ asmlinkage int system_call(void);
21064
21065 /* Do we ignore FPU interrupts ? */
21066 char ignore_fpu_irq;
21067-
21068-/*
21069- * The IDT has to be page-aligned to simplify the Pentium
21070- * F0 0F bug workaround.
21071- */
21072-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
21073 #endif
21074
21075 DECLARE_BITMAP(used_vectors, NR_VECTORS);
21076@@ -109,11 +103,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
21077 }
21078
21079 static int __kprobes
21080-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
21081+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
21082 struct pt_regs *regs, long error_code)
21083 {
21084 #ifdef CONFIG_X86_32
21085- if (regs->flags & X86_VM_MASK) {
21086+ if (v8086_mode(regs)) {
21087 /*
21088 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
21089 * On nmi (interrupt 2), do_trap should not be called.
21090@@ -126,12 +120,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
21091 return -1;
21092 }
21093 #endif
21094- if (!user_mode(regs)) {
21095+ if (!user_mode_novm(regs)) {
21096 if (!fixup_exception(regs)) {
21097 tsk->thread.error_code = error_code;
21098 tsk->thread.trap_nr = trapnr;
21099+
21100+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21101+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
21102+ str = "PAX: suspicious stack segment fault";
21103+#endif
21104+
21105 die(str, regs, error_code);
21106 }
21107+
21108+#ifdef CONFIG_PAX_REFCOUNT
21109+ if (trapnr == 4)
21110+ pax_report_refcount_overflow(regs);
21111+#endif
21112+
21113 return 0;
21114 }
21115
21116@@ -139,7 +145,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
21117 }
21118
21119 static void __kprobes
21120-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21121+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
21122 long error_code, siginfo_t *info)
21123 {
21124 struct task_struct *tsk = current;
21125@@ -163,7 +169,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21126 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
21127 printk_ratelimit()) {
21128 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
21129- tsk->comm, tsk->pid, str,
21130+ tsk->comm, task_pid_nr(tsk), str,
21131 regs->ip, regs->sp, error_code);
21132 print_vma_addr(" in ", regs->ip);
21133 pr_cont("\n");
21134@@ -269,7 +275,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
21135 conditional_sti(regs);
21136
21137 #ifdef CONFIG_X86_32
21138- if (regs->flags & X86_VM_MASK) {
21139+ if (v8086_mode(regs)) {
21140 local_irq_enable();
21141 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
21142 goto exit;
21143@@ -277,18 +283,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
21144 #endif
21145
21146 tsk = current;
21147- if (!user_mode(regs)) {
21148+ if (!user_mode_novm(regs)) {
21149 if (fixup_exception(regs))
21150 goto exit;
21151
21152 tsk->thread.error_code = error_code;
21153 tsk->thread.trap_nr = X86_TRAP_GP;
21154 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
21155- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
21156+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
21157+
21158+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21159+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
21160+ die("PAX: suspicious general protection fault", regs, error_code);
21161+ else
21162+#endif
21163+
21164 die("general protection fault", regs, error_code);
21165+ }
21166 goto exit;
21167 }
21168
21169+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21170+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
21171+ struct mm_struct *mm = tsk->mm;
21172+ unsigned long limit;
21173+
21174+ down_write(&mm->mmap_sem);
21175+ limit = mm->context.user_cs_limit;
21176+ if (limit < TASK_SIZE) {
21177+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
21178+ up_write(&mm->mmap_sem);
21179+ return;
21180+ }
21181+ up_write(&mm->mmap_sem);
21182+ }
21183+#endif
21184+
21185 tsk->thread.error_code = error_code;
21186 tsk->thread.trap_nr = X86_TRAP_GP;
21187
21188@@ -443,7 +473,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21189 /* It's safe to allow irq's after DR6 has been saved */
21190 preempt_conditional_sti(regs);
21191
21192- if (regs->flags & X86_VM_MASK) {
21193+ if (v8086_mode(regs)) {
21194 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
21195 X86_TRAP_DB);
21196 preempt_conditional_cli(regs);
21197@@ -458,7 +488,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21198 * We already checked v86 mode above, so we can check for kernel mode
21199 * by just checking the CPL of CS.
21200 */
21201- if ((dr6 & DR_STEP) && !user_mode(regs)) {
21202+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
21203 tsk->thread.debugreg6 &= ~DR_STEP;
21204 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
21205 regs->flags &= ~X86_EFLAGS_TF;
21206@@ -490,7 +520,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
21207 return;
21208 conditional_sti(regs);
21209
21210- if (!user_mode_vm(regs))
21211+ if (!user_mode(regs))
21212 {
21213 if (!fixup_exception(regs)) {
21214 task->thread.error_code = error_code;
21215diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
21216index aafa555..a04691a 100644
21217--- a/arch/x86/kernel/uprobes.c
21218+++ b/arch/x86/kernel/uprobes.c
21219@@ -614,7 +614,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
21220 int ret = NOTIFY_DONE;
21221
21222 /* We are only interested in userspace traps */
21223- if (regs && !user_mode_vm(regs))
21224+ if (regs && !user_mode(regs))
21225 return NOTIFY_DONE;
21226
21227 switch (val) {
21228diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
21229index b9242ba..50c5edd 100644
21230--- a/arch/x86/kernel/verify_cpu.S
21231+++ b/arch/x86/kernel/verify_cpu.S
21232@@ -20,6 +20,7 @@
21233 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
21234 * arch/x86/kernel/trampoline_64.S: secondary processor verification
21235 * arch/x86/kernel/head_32.S: processor startup
21236+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
21237 *
21238 * verify_cpu, returns the status of longmode and SSE in register %eax.
21239 * 0: Success 1: Failure
21240diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
21241index 5c9687b..5f857d3 100644
21242--- a/arch/x86/kernel/vm86_32.c
21243+++ b/arch/x86/kernel/vm86_32.c
21244@@ -43,6 +43,7 @@
21245 #include <linux/ptrace.h>
21246 #include <linux/audit.h>
21247 #include <linux/stddef.h>
21248+#include <linux/grsecurity.h>
21249
21250 #include <asm/uaccess.h>
21251 #include <asm/io.h>
21252@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
21253 do_exit(SIGSEGV);
21254 }
21255
21256- tss = &per_cpu(init_tss, get_cpu());
21257+ tss = init_tss + get_cpu();
21258 current->thread.sp0 = current->thread.saved_sp0;
21259 current->thread.sysenter_cs = __KERNEL_CS;
21260 load_sp0(tss, &current->thread);
21261@@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
21262 struct task_struct *tsk;
21263 int tmp, ret = -EPERM;
21264
21265+#ifdef CONFIG_GRKERNSEC_VM86
21266+ if (!capable(CAP_SYS_RAWIO)) {
21267+ gr_handle_vm86();
21268+ goto out;
21269+ }
21270+#endif
21271+
21272 tsk = current;
21273 if (tsk->thread.saved_sp0)
21274 goto out;
21275@@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
21276 int tmp, ret;
21277 struct vm86plus_struct __user *v86;
21278
21279+#ifdef CONFIG_GRKERNSEC_VM86
21280+ if (!capable(CAP_SYS_RAWIO)) {
21281+ gr_handle_vm86();
21282+ ret = -EPERM;
21283+ goto out;
21284+ }
21285+#endif
21286+
21287 tsk = current;
21288 switch (cmd) {
21289 case VM86_REQUEST_IRQ:
21290@@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
21291 tsk->thread.saved_fs = info->regs32->fs;
21292 tsk->thread.saved_gs = get_user_gs(info->regs32);
21293
21294- tss = &per_cpu(init_tss, get_cpu());
21295+ tss = init_tss + get_cpu();
21296 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
21297 if (cpu_has_sep)
21298 tsk->thread.sysenter_cs = 0;
21299@@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
21300 goto cannot_handle;
21301 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
21302 goto cannot_handle;
21303- intr_ptr = (unsigned long __user *) (i << 2);
21304+ intr_ptr = (__force unsigned long __user *) (i << 2);
21305 if (get_user(segoffs, intr_ptr))
21306 goto cannot_handle;
21307 if ((segoffs >> 16) == BIOSSEG)
21308diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
21309index 22a1530..8fbaaad 100644
21310--- a/arch/x86/kernel/vmlinux.lds.S
21311+++ b/arch/x86/kernel/vmlinux.lds.S
21312@@ -26,6 +26,13 @@
21313 #include <asm/page_types.h>
21314 #include <asm/cache.h>
21315 #include <asm/boot.h>
21316+#include <asm/segment.h>
21317+
21318+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21319+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
21320+#else
21321+#define __KERNEL_TEXT_OFFSET 0
21322+#endif
21323
21324 #undef i386 /* in case the preprocessor is a 32bit one */
21325
21326@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
21327
21328 PHDRS {
21329 text PT_LOAD FLAGS(5); /* R_E */
21330+#ifdef CONFIG_X86_32
21331+ module PT_LOAD FLAGS(5); /* R_E */
21332+#endif
21333+#ifdef CONFIG_XEN
21334+ rodata PT_LOAD FLAGS(5); /* R_E */
21335+#else
21336+ rodata PT_LOAD FLAGS(4); /* R__ */
21337+#endif
21338 data PT_LOAD FLAGS(6); /* RW_ */
21339-#ifdef CONFIG_X86_64
21340+ init.begin PT_LOAD FLAGS(6); /* RW_ */
21341 #ifdef CONFIG_SMP
21342 percpu PT_LOAD FLAGS(6); /* RW_ */
21343 #endif
21344+ text.init PT_LOAD FLAGS(5); /* R_E */
21345+ text.exit PT_LOAD FLAGS(5); /* R_E */
21346 init PT_LOAD FLAGS(7); /* RWE */
21347-#endif
21348 note PT_NOTE FLAGS(0); /* ___ */
21349 }
21350
21351 SECTIONS
21352 {
21353 #ifdef CONFIG_X86_32
21354- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
21355- phys_startup_32 = startup_32 - LOAD_OFFSET;
21356+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
21357 #else
21358- . = __START_KERNEL;
21359- phys_startup_64 = startup_64 - LOAD_OFFSET;
21360+ . = __START_KERNEL;
21361 #endif
21362
21363 /* Text and read-only data */
21364- .text : AT(ADDR(.text) - LOAD_OFFSET) {
21365- _text = .;
21366+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21367 /* bootstrapping code */
21368+#ifdef CONFIG_X86_32
21369+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21370+#else
21371+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21372+#endif
21373+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21374+ _text = .;
21375 HEAD_TEXT
21376 #ifdef CONFIG_X86_32
21377 . = ALIGN(PAGE_SIZE);
21378@@ -108,13 +128,48 @@ SECTIONS
21379 IRQENTRY_TEXT
21380 *(.fixup)
21381 *(.gnu.warning)
21382- /* End of text section */
21383- _etext = .;
21384 } :text = 0x9090
21385
21386- NOTES :text :note
21387+ . += __KERNEL_TEXT_OFFSET;
21388
21389- EXCEPTION_TABLE(16) :text = 0x9090
21390+#ifdef CONFIG_X86_32
21391+ . = ALIGN(PAGE_SIZE);
21392+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21393+
21394+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21395+ MODULES_EXEC_VADDR = .;
21396+ BYTE(0)
21397+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21398+ . = ALIGN(HPAGE_SIZE) - 1;
21399+ MODULES_EXEC_END = .;
21400+#endif
21401+
21402+ } :module
21403+#endif
21404+
21405+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21406+ /* End of text section */
21407+ BYTE(0)
21408+ _etext = . - __KERNEL_TEXT_OFFSET;
21409+ }
21410+
21411+#ifdef CONFIG_X86_32
21412+ . = ALIGN(PAGE_SIZE);
21413+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21414+ *(.idt)
21415+ . = ALIGN(PAGE_SIZE);
21416+ *(.empty_zero_page)
21417+ *(.initial_pg_fixmap)
21418+ *(.initial_pg_pmd)
21419+ *(.initial_page_table)
21420+ *(.swapper_pg_dir)
21421+ } :rodata
21422+#endif
21423+
21424+ . = ALIGN(PAGE_SIZE);
21425+ NOTES :rodata :note
21426+
21427+ EXCEPTION_TABLE(16) :rodata
21428
21429 #if defined(CONFIG_DEBUG_RODATA)
21430 /* .text should occupy whole number of pages */
21431@@ -126,16 +181,20 @@ SECTIONS
21432
21433 /* Data */
21434 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21435+
21436+#ifdef CONFIG_PAX_KERNEXEC
21437+ . = ALIGN(HPAGE_SIZE);
21438+#else
21439+ . = ALIGN(PAGE_SIZE);
21440+#endif
21441+
21442 /* Start of data section */
21443 _sdata = .;
21444
21445 /* init_task */
21446 INIT_TASK_DATA(THREAD_SIZE)
21447
21448-#ifdef CONFIG_X86_32
21449- /* 32 bit has nosave before _edata */
21450 NOSAVE_DATA
21451-#endif
21452
21453 PAGE_ALIGNED_DATA(PAGE_SIZE)
21454
21455@@ -176,12 +235,19 @@ SECTIONS
21456 #endif /* CONFIG_X86_64 */
21457
21458 /* Init code and data - will be freed after init */
21459- . = ALIGN(PAGE_SIZE);
21460 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21461+ BYTE(0)
21462+
21463+#ifdef CONFIG_PAX_KERNEXEC
21464+ . = ALIGN(HPAGE_SIZE);
21465+#else
21466+ . = ALIGN(PAGE_SIZE);
21467+#endif
21468+
21469 __init_begin = .; /* paired with __init_end */
21470- }
21471+ } :init.begin
21472
21473-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21474+#ifdef CONFIG_SMP
21475 /*
21476 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21477 * output PHDR, so the next output section - .init.text - should
21478@@ -190,12 +256,27 @@ SECTIONS
21479 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
21480 #endif
21481
21482- INIT_TEXT_SECTION(PAGE_SIZE)
21483-#ifdef CONFIG_X86_64
21484- :init
21485-#endif
21486+ . = ALIGN(PAGE_SIZE);
21487+ init_begin = .;
21488+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
21489+ VMLINUX_SYMBOL(_sinittext) = .;
21490+ INIT_TEXT
21491+ VMLINUX_SYMBOL(_einittext) = .;
21492+ . = ALIGN(PAGE_SIZE);
21493+ } :text.init
21494
21495- INIT_DATA_SECTION(16)
21496+ /*
21497+ * .exit.text is discard at runtime, not link time, to deal with
21498+ * references from .altinstructions and .eh_frame
21499+ */
21500+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21501+ EXIT_TEXT
21502+ . = ALIGN(16);
21503+ } :text.exit
21504+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
21505+
21506+ . = ALIGN(PAGE_SIZE);
21507+ INIT_DATA_SECTION(16) :init
21508
21509 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
21510 __x86_cpu_dev_start = .;
21511@@ -257,19 +338,12 @@ SECTIONS
21512 }
21513
21514 . = ALIGN(8);
21515- /*
21516- * .exit.text is discard at runtime, not link time, to deal with
21517- * references from .altinstructions and .eh_frame
21518- */
21519- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
21520- EXIT_TEXT
21521- }
21522
21523 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
21524 EXIT_DATA
21525 }
21526
21527-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
21528+#ifndef CONFIG_SMP
21529 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
21530 #endif
21531
21532@@ -288,16 +362,10 @@ SECTIONS
21533 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
21534 __smp_locks = .;
21535 *(.smp_locks)
21536- . = ALIGN(PAGE_SIZE);
21537 __smp_locks_end = .;
21538+ . = ALIGN(PAGE_SIZE);
21539 }
21540
21541-#ifdef CONFIG_X86_64
21542- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
21543- NOSAVE_DATA
21544- }
21545-#endif
21546-
21547 /* BSS */
21548 . = ALIGN(PAGE_SIZE);
21549 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
21550@@ -313,6 +381,7 @@ SECTIONS
21551 __brk_base = .;
21552 . += 64 * 1024; /* 64k alignment slop space */
21553 *(.brk_reservation) /* areas brk users have reserved */
21554+ . = ALIGN(HPAGE_SIZE);
21555 __brk_limit = .;
21556 }
21557
21558@@ -339,13 +408,12 @@ SECTIONS
21559 * for the boot processor.
21560 */
21561 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
21562-INIT_PER_CPU(gdt_page);
21563 INIT_PER_CPU(irq_stack_union);
21564
21565 /*
21566 * Build-time check on the image size:
21567 */
21568-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
21569+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
21570 "kernel image bigger than KERNEL_IMAGE_SIZE");
21571
21572 #ifdef CONFIG_SMP
21573diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
21574index 992f890..0ab1aae 100644
21575--- a/arch/x86/kernel/vsmp_64.c
21576+++ b/arch/x86/kernel/vsmp_64.c
21577@@ -114,7 +114,7 @@ static void __init set_vsmp_pv_ops(void)
21578 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
21579 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
21580 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
21581- pv_init_ops.patch = vsmp_patch;
21582+ *(void **)&pv_init_ops.patch = vsmp_patch;
21583 ctl &= ~(1 << 4);
21584 }
21585 writel(ctl, address + 4);
21586@@ -217,8 +217,8 @@ static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
21587 static void vsmp_apic_post_init(void)
21588 {
21589 /* need to update phys_pkg_id */
21590- apic->phys_pkg_id = apicid_phys_pkg_id;
21591- apic->vector_allocation_domain = fill_vector_allocation_domain;
21592+ *(void **)&apic->phys_pkg_id = apicid_phys_pkg_id;
21593+ *(void **)&apic->vector_allocation_domain = fill_vector_allocation_domain;
21594 }
21595
21596 void __init vsmp_init(void)
21597@@ -227,7 +227,7 @@ void __init vsmp_init(void)
21598 if (!is_vsmp_box())
21599 return;
21600
21601- x86_platform.apic_post_init = vsmp_apic_post_init;
21602+ *(void **)&x86_platform.apic_post_init = vsmp_apic_post_init;
21603
21604 vsmp_cap_cpus();
21605
21606diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
21607index 3a3e8c9..1af9465 100644
21608--- a/arch/x86/kernel/vsyscall_64.c
21609+++ b/arch/x86/kernel/vsyscall_64.c
21610@@ -56,15 +56,13 @@
21611 DEFINE_VVAR(int, vgetcpu_mode);
21612 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
21613
21614-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
21615+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
21616
21617 static int __init vsyscall_setup(char *str)
21618 {
21619 if (str) {
21620 if (!strcmp("emulate", str))
21621 vsyscall_mode = EMULATE;
21622- else if (!strcmp("native", str))
21623- vsyscall_mode = NATIVE;
21624 else if (!strcmp("none", str))
21625 vsyscall_mode = NONE;
21626 else
21627@@ -315,8 +313,7 @@ done:
21628 return true;
21629
21630 sigsegv:
21631- force_sig(SIGSEGV, current);
21632- return true;
21633+ do_group_exit(SIGKILL);
21634 }
21635
21636 /*
21637@@ -369,10 +366,7 @@ void __init map_vsyscall(void)
21638 extern char __vvar_page;
21639 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
21640
21641- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
21642- vsyscall_mode == NATIVE
21643- ? PAGE_KERNEL_VSYSCALL
21644- : PAGE_KERNEL_VVAR);
21645+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
21646 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
21647 (unsigned long)VSYSCALL_START);
21648
21649diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21650index 1330dd1..d220b99 100644
21651--- a/arch/x86/kernel/x8664_ksyms_64.c
21652+++ b/arch/x86/kernel/x8664_ksyms_64.c
21653@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
21654 EXPORT_SYMBOL(copy_user_generic_unrolled);
21655 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
21656 EXPORT_SYMBOL(__copy_user_nocache);
21657-EXPORT_SYMBOL(_copy_from_user);
21658-EXPORT_SYMBOL(_copy_to_user);
21659
21660 EXPORT_SYMBOL(copy_page);
21661 EXPORT_SYMBOL(clear_page);
21662diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21663index ada87a3..afea76d 100644
21664--- a/arch/x86/kernel/xsave.c
21665+++ b/arch/x86/kernel/xsave.c
21666@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
21667 {
21668 int err;
21669
21670+ buf = (struct xsave_struct __user *)____m(buf);
21671 if (use_xsave())
21672 err = xsave_user(buf);
21673 else if (use_fxsr())
21674@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
21675 */
21676 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
21677 {
21678+ buf = (void __user *)____m(buf);
21679 if (use_xsave()) {
21680 if ((unsigned long)buf % 64 || fx_only) {
21681 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
21682diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
21683index ec79e77..420f5cc 100644
21684--- a/arch/x86/kvm/cpuid.c
21685+++ b/arch/x86/kvm/cpuid.c
21686@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21687 struct kvm_cpuid2 *cpuid,
21688 struct kvm_cpuid_entry2 __user *entries)
21689 {
21690- int r;
21691+ int r, i;
21692
21693 r = -E2BIG;
21694 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21695 goto out;
21696 r = -EFAULT;
21697- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21698- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21699+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21700 goto out;
21701+ for (i = 0; i < cpuid->nent; ++i) {
21702+ struct kvm_cpuid_entry2 cpuid_entry;
21703+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21704+ goto out;
21705+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
21706+ }
21707 vcpu->arch.cpuid_nent = cpuid->nent;
21708 kvm_apic_set_version(vcpu);
21709 kvm_x86_ops->cpuid_update(vcpu);
21710@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21711 struct kvm_cpuid2 *cpuid,
21712 struct kvm_cpuid_entry2 __user *entries)
21713 {
21714- int r;
21715+ int r, i;
21716
21717 r = -E2BIG;
21718 if (cpuid->nent < vcpu->arch.cpuid_nent)
21719 goto out;
21720 r = -EFAULT;
21721- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21722- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21723+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21724 goto out;
21725+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21726+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21727+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21728+ goto out;
21729+ }
21730 return 0;
21731
21732 out:
21733diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21734index bba39bf..296540a 100644
21735--- a/arch/x86/kvm/emulate.c
21736+++ b/arch/x86/kvm/emulate.c
21737@@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21738
21739 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
21740 do { \
21741+ unsigned long _tmp; \
21742 __asm__ __volatile__ ( \
21743 _PRE_EFLAGS("0", "4", "2") \
21744 _op _suffix " %"_x"3,%1; " \
21745@@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21746 /* Raw emulation: instruction has two explicit operands. */
21747 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
21748 do { \
21749- unsigned long _tmp; \
21750- \
21751 switch ((ctxt)->dst.bytes) { \
21752 case 2: \
21753 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
21754@@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21755
21756 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21757 do { \
21758- unsigned long _tmp; \
21759 switch ((ctxt)->dst.bytes) { \
21760 case 1: \
21761 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
21762diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21763index 43e9fad..3b7c059 100644
21764--- a/arch/x86/kvm/lapic.c
21765+++ b/arch/x86/kvm/lapic.c
21766@@ -55,7 +55,7 @@
21767 #define APIC_BUS_CYCLE_NS 1
21768
21769 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21770-#define apic_debug(fmt, arg...)
21771+#define apic_debug(fmt, arg...) do {} while (0)
21772
21773 #define APIC_LVT_NUM 6
21774 /* 14 is the version for Xeon and Pentium 8.4.8*/
21775diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21776index 714e2c0..3f7a086 100644
21777--- a/arch/x86/kvm/paging_tmpl.h
21778+++ b/arch/x86/kvm/paging_tmpl.h
21779@@ -208,7 +208,7 @@ retry_walk:
21780 if (unlikely(kvm_is_error_hva(host_addr)))
21781 goto error;
21782
21783- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
21784+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
21785 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
21786 goto error;
21787 walker->ptep_user[walker->level - 1] = ptep_user;
21788diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21789index d017df3..61ae42e 100644
21790--- a/arch/x86/kvm/svm.c
21791+++ b/arch/x86/kvm/svm.c
21792@@ -3500,7 +3500,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21793 int cpu = raw_smp_processor_id();
21794
21795 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
21796+
21797+ pax_open_kernel();
21798 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
21799+ pax_close_kernel();
21800+
21801 load_TR_desc();
21802 }
21803
21804@@ -3874,6 +3878,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
21805 #endif
21806 #endif
21807
21808+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21809+ __set_fs(current_thread_info()->addr_limit);
21810+#endif
21811+
21812 reload_tss(vcpu);
21813
21814 local_irq_disable();
21815diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21816index f858159..4ab7dba 100644
21817--- a/arch/x86/kvm/vmx.c
21818+++ b/arch/x86/kvm/vmx.c
21819@@ -1332,7 +1332,11 @@ static void reload_tss(void)
21820 struct desc_struct *descs;
21821
21822 descs = (void *)gdt->address;
21823+
21824+ pax_open_kernel();
21825 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21826+ pax_close_kernel();
21827+
21828 load_TR_desc();
21829 }
21830
21831@@ -1546,6 +1550,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
21832 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
21833 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
21834
21835+#ifdef CONFIG_PAX_PER_CPU_PGD
21836+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
21837+#endif
21838+
21839 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
21840 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
21841 vmx->loaded_vmcs->cpu = cpu;
21842@@ -2669,8 +2677,11 @@ static __init int hardware_setup(void)
21843 if (!cpu_has_vmx_flexpriority())
21844 flexpriority_enabled = 0;
21845
21846- if (!cpu_has_vmx_tpr_shadow())
21847- kvm_x86_ops->update_cr8_intercept = NULL;
21848+ if (!cpu_has_vmx_tpr_shadow()) {
21849+ pax_open_kernel();
21850+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21851+ pax_close_kernel();
21852+ }
21853
21854 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21855 kvm_disable_largepages();
21856@@ -3712,7 +3723,10 @@ static void vmx_set_constant_host_state(void)
21857
21858 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
21859 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
21860+
21861+#ifndef CONFIG_PAX_PER_CPU_PGD
21862 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
21863+#endif
21864
21865 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
21866 #ifdef CONFIG_X86_64
21867@@ -3733,7 +3747,7 @@ static void vmx_set_constant_host_state(void)
21868 native_store_idt(&dt);
21869 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
21870
21871- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
21872+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
21873
21874 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
21875 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
21876@@ -6279,6 +6293,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21877 "jmp 2f \n\t"
21878 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
21879 "2: "
21880+
21881+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21882+ "ljmp %[cs],$3f\n\t"
21883+ "3: "
21884+#endif
21885+
21886 /* Save guest registers, load host registers, keep flags */
21887 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
21888 "pop %0 \n\t"
21889@@ -6331,6 +6351,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21890 #endif
21891 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
21892 [wordsize]"i"(sizeof(ulong))
21893+
21894+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21895+ ,[cs]"i"(__KERNEL_CS)
21896+#endif
21897+
21898 : "cc", "memory"
21899 #ifdef CONFIG_X86_64
21900 , "rax", "rbx", "rdi", "rsi"
21901@@ -6344,7 +6369,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21902 if (debugctlmsr)
21903 update_debugctlmsr(debugctlmsr);
21904
21905-#ifndef CONFIG_X86_64
21906+#ifdef CONFIG_X86_32
21907 /*
21908 * The sysexit path does not restore ds/es, so we must set them to
21909 * a reasonable value ourselves.
21910@@ -6353,8 +6378,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21911 * may be executed in interrupt context, which saves and restore segments
21912 * around it, nullifying its effect.
21913 */
21914- loadsegment(ds, __USER_DS);
21915- loadsegment(es, __USER_DS);
21916+ loadsegment(ds, __KERNEL_DS);
21917+ loadsegment(es, __KERNEL_DS);
21918+ loadsegment(ss, __KERNEL_DS);
21919+
21920+#ifdef CONFIG_PAX_KERNEXEC
21921+ loadsegment(fs, __KERNEL_PERCPU);
21922+#endif
21923+
21924+#ifdef CONFIG_PAX_MEMORY_UDEREF
21925+ __set_fs(current_thread_info()->addr_limit);
21926+#endif
21927+
21928 #endif
21929
21930 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
21931diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21932index 4f76417..93429b5 100644
21933--- a/arch/x86/kvm/x86.c
21934+++ b/arch/x86/kvm/x86.c
21935@@ -1390,8 +1390,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
21936 {
21937 struct kvm *kvm = vcpu->kvm;
21938 int lm = is_long_mode(vcpu);
21939- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
21940- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
21941+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
21942+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
21943 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
21944 : kvm->arch.xen_hvm_config.blob_size_32;
21945 u32 page_num = data & ~PAGE_MASK;
21946@@ -2255,6 +2255,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
21947 if (n < msr_list.nmsrs)
21948 goto out;
21949 r = -EFAULT;
21950+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
21951+ goto out;
21952 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
21953 num_msrs_to_save * sizeof(u32)))
21954 goto out;
21955@@ -2379,7 +2381,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21956 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21957 struct kvm_interrupt *irq)
21958 {
21959- if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
21960+ if (irq->irq >= KVM_NR_INTERRUPTS)
21961 return -EINVAL;
21962 if (irqchip_in_kernel(vcpu->kvm))
21963 return -ENXIO;
21964@@ -4881,7 +4883,7 @@ static void kvm_set_mmio_spte_mask(void)
21965 kvm_mmu_set_mmio_spte_mask(mask);
21966 }
21967
21968-int kvm_arch_init(void *opaque)
21969+int kvm_arch_init(const void *opaque)
21970 {
21971 int r;
21972 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21973diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21974index 642d880..5dd034e 100644
21975--- a/arch/x86/lguest/boot.c
21976+++ b/arch/x86/lguest/boot.c
21977@@ -1116,12 +1116,12 @@ static u32 lguest_apic_safe_wait_icr_idle(void)
21978
21979 static void set_lguest_basic_apic_ops(void)
21980 {
21981- apic->read = lguest_apic_read;
21982- apic->write = lguest_apic_write;
21983- apic->icr_read = lguest_apic_icr_read;
21984- apic->icr_write = lguest_apic_icr_write;
21985- apic->wait_icr_idle = lguest_apic_wait_icr_idle;
21986- apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
21987+ *(void **)&apic->read = lguest_apic_read;
21988+ *(void **)&apic->write = lguest_apic_write;
21989+ *(void **)&apic->icr_read = lguest_apic_icr_read;
21990+ *(void **)&apic->icr_write = lguest_apic_icr_write;
21991+ *(void **)&apic->wait_icr_idle = lguest_apic_wait_icr_idle;
21992+ *(void **)&apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
21993 };
21994 #endif
21995
21996@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21997 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21998 * Launcher to reboot us.
21999 */
22000-static void lguest_restart(char *reason)
22001+static __noreturn void lguest_restart(char *reason)
22002 {
22003 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
22004+ BUG();
22005 }
22006
22007 /*G:050
22008@@ -1292,28 +1293,28 @@ __init void lguest_init(void)
22009 pv_irq_ops.safe_halt = lguest_safe_halt;
22010
22011 /* Setup operations */
22012- pv_init_ops.patch = lguest_patch;
22013+ *(void **)&pv_init_ops.patch = lguest_patch;
22014
22015 /* Intercepts of various CPU instructions */
22016- pv_cpu_ops.load_gdt = lguest_load_gdt;
22017- pv_cpu_ops.cpuid = lguest_cpuid;
22018- pv_cpu_ops.load_idt = lguest_load_idt;
22019- pv_cpu_ops.iret = lguest_iret;
22020- pv_cpu_ops.load_sp0 = lguest_load_sp0;
22021- pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
22022- pv_cpu_ops.set_ldt = lguest_set_ldt;
22023- pv_cpu_ops.load_tls = lguest_load_tls;
22024- pv_cpu_ops.set_debugreg = lguest_set_debugreg;
22025- pv_cpu_ops.clts = lguest_clts;
22026- pv_cpu_ops.read_cr0 = lguest_read_cr0;
22027- pv_cpu_ops.write_cr0 = lguest_write_cr0;
22028- pv_cpu_ops.read_cr4 = lguest_read_cr4;
22029- pv_cpu_ops.write_cr4 = lguest_write_cr4;
22030- pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
22031- pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
22032- pv_cpu_ops.wbinvd = lguest_wbinvd;
22033- pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
22034- pv_cpu_ops.end_context_switch = lguest_end_context_switch;
22035+ *(void **)&pv_cpu_ops.load_gdt = lguest_load_gdt;
22036+ *(void **)&pv_cpu_ops.cpuid = lguest_cpuid;
22037+ *(void **)&pv_cpu_ops.load_idt = lguest_load_idt;
22038+ *(void **)&pv_cpu_ops.iret = lguest_iret;
22039+ *(void **)&pv_cpu_ops.load_sp0 = lguest_load_sp0;
22040+ *(void **)&pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
22041+ *(void **)&pv_cpu_ops.set_ldt = lguest_set_ldt;
22042+ *(void **)&pv_cpu_ops.load_tls = lguest_load_tls;
22043+ *(void **)&pv_cpu_ops.set_debugreg = lguest_set_debugreg;
22044+ *(void **)&pv_cpu_ops.clts = lguest_clts;
22045+ *(void **)&pv_cpu_ops.read_cr0 = lguest_read_cr0;
22046+ *(void **)&pv_cpu_ops.write_cr0 = lguest_write_cr0;
22047+ *(void **)&pv_cpu_ops.read_cr4 = lguest_read_cr4;
22048+ *(void **)&pv_cpu_ops.write_cr4 = lguest_write_cr4;
22049+ *(void **)&pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
22050+ *(void **)&pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
22051+ *(void **)&pv_cpu_ops.wbinvd = lguest_wbinvd;
22052+ *(void **)&pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
22053+ *(void **)&pv_cpu_ops.end_context_switch = lguest_end_context_switch;
22054
22055 /* Pagetable management */
22056 pv_mmu_ops.write_cr3 = lguest_write_cr3;
22057@@ -1341,11 +1342,11 @@ __init void lguest_init(void)
22058 set_lguest_basic_apic_ops();
22059 #endif
22060
22061- x86_init.resources.memory_setup = lguest_memory_setup;
22062- x86_init.irqs.intr_init = lguest_init_IRQ;
22063- x86_init.timers.timer_init = lguest_time_init;
22064- x86_platform.calibrate_tsc = lguest_tsc_khz;
22065- x86_platform.get_wallclock = lguest_get_wallclock;
22066+ *(void **)&x86_init.resources.memory_setup = lguest_memory_setup;
22067+ *(void **)&x86_init.irqs.intr_init = lguest_init_IRQ;
22068+ *(void **)&x86_init.timers.timer_init = lguest_time_init;
22069+ *(void **)&x86_platform.calibrate_tsc = lguest_tsc_khz;
22070+ *(void **)&x86_platform.get_wallclock = lguest_get_wallclock;
22071
22072 /*
22073 * Now is a good time to look at the implementations of these functions
22074@@ -1434,7 +1435,7 @@ __init void lguest_init(void)
22075 * routine.
22076 */
22077 pm_power_off = lguest_power_off;
22078- machine_ops.restart = lguest_restart;
22079+ *(void **)&machine_ops.restart = lguest_restart;
22080
22081 /*
22082 * Now we're set up, call i386_start_kernel() in head32.c and we proceed
22083diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
22084index 00933d5..3a64af9 100644
22085--- a/arch/x86/lib/atomic64_386_32.S
22086+++ b/arch/x86/lib/atomic64_386_32.S
22087@@ -48,6 +48,10 @@ BEGIN(read)
22088 movl (v), %eax
22089 movl 4(v), %edx
22090 RET_ENDP
22091+BEGIN(read_unchecked)
22092+ movl (v), %eax
22093+ movl 4(v), %edx
22094+RET_ENDP
22095 #undef v
22096
22097 #define v %esi
22098@@ -55,6 +59,10 @@ BEGIN(set)
22099 movl %ebx, (v)
22100 movl %ecx, 4(v)
22101 RET_ENDP
22102+BEGIN(set_unchecked)
22103+ movl %ebx, (v)
22104+ movl %ecx, 4(v)
22105+RET_ENDP
22106 #undef v
22107
22108 #define v %esi
22109@@ -70,6 +78,20 @@ RET_ENDP
22110 BEGIN(add)
22111 addl %eax, (v)
22112 adcl %edx, 4(v)
22113+
22114+#ifdef CONFIG_PAX_REFCOUNT
22115+ jno 0f
22116+ subl %eax, (v)
22117+ sbbl %edx, 4(v)
22118+ int $4
22119+0:
22120+ _ASM_EXTABLE(0b, 0b)
22121+#endif
22122+
22123+RET_ENDP
22124+BEGIN(add_unchecked)
22125+ addl %eax, (v)
22126+ adcl %edx, 4(v)
22127 RET_ENDP
22128 #undef v
22129
22130@@ -77,6 +99,24 @@ RET_ENDP
22131 BEGIN(add_return)
22132 addl (v), %eax
22133 adcl 4(v), %edx
22134+
22135+#ifdef CONFIG_PAX_REFCOUNT
22136+ into
22137+1234:
22138+ _ASM_EXTABLE(1234b, 2f)
22139+#endif
22140+
22141+ movl %eax, (v)
22142+ movl %edx, 4(v)
22143+
22144+#ifdef CONFIG_PAX_REFCOUNT
22145+2:
22146+#endif
22147+
22148+RET_ENDP
22149+BEGIN(add_return_unchecked)
22150+ addl (v), %eax
22151+ adcl 4(v), %edx
22152 movl %eax, (v)
22153 movl %edx, 4(v)
22154 RET_ENDP
22155@@ -86,6 +126,20 @@ RET_ENDP
22156 BEGIN(sub)
22157 subl %eax, (v)
22158 sbbl %edx, 4(v)
22159+
22160+#ifdef CONFIG_PAX_REFCOUNT
22161+ jno 0f
22162+ addl %eax, (v)
22163+ adcl %edx, 4(v)
22164+ int $4
22165+0:
22166+ _ASM_EXTABLE(0b, 0b)
22167+#endif
22168+
22169+RET_ENDP
22170+BEGIN(sub_unchecked)
22171+ subl %eax, (v)
22172+ sbbl %edx, 4(v)
22173 RET_ENDP
22174 #undef v
22175
22176@@ -96,6 +150,27 @@ BEGIN(sub_return)
22177 sbbl $0, %edx
22178 addl (v), %eax
22179 adcl 4(v), %edx
22180+
22181+#ifdef CONFIG_PAX_REFCOUNT
22182+ into
22183+1234:
22184+ _ASM_EXTABLE(1234b, 2f)
22185+#endif
22186+
22187+ movl %eax, (v)
22188+ movl %edx, 4(v)
22189+
22190+#ifdef CONFIG_PAX_REFCOUNT
22191+2:
22192+#endif
22193+
22194+RET_ENDP
22195+BEGIN(sub_return_unchecked)
22196+ negl %edx
22197+ negl %eax
22198+ sbbl $0, %edx
22199+ addl (v), %eax
22200+ adcl 4(v), %edx
22201 movl %eax, (v)
22202 movl %edx, 4(v)
22203 RET_ENDP
22204@@ -105,6 +180,20 @@ RET_ENDP
22205 BEGIN(inc)
22206 addl $1, (v)
22207 adcl $0, 4(v)
22208+
22209+#ifdef CONFIG_PAX_REFCOUNT
22210+ jno 0f
22211+ subl $1, (v)
22212+ sbbl $0, 4(v)
22213+ int $4
22214+0:
22215+ _ASM_EXTABLE(0b, 0b)
22216+#endif
22217+
22218+RET_ENDP
22219+BEGIN(inc_unchecked)
22220+ addl $1, (v)
22221+ adcl $0, 4(v)
22222 RET_ENDP
22223 #undef v
22224
22225@@ -114,6 +203,26 @@ BEGIN(inc_return)
22226 movl 4(v), %edx
22227 addl $1, %eax
22228 adcl $0, %edx
22229+
22230+#ifdef CONFIG_PAX_REFCOUNT
22231+ into
22232+1234:
22233+ _ASM_EXTABLE(1234b, 2f)
22234+#endif
22235+
22236+ movl %eax, (v)
22237+ movl %edx, 4(v)
22238+
22239+#ifdef CONFIG_PAX_REFCOUNT
22240+2:
22241+#endif
22242+
22243+RET_ENDP
22244+BEGIN(inc_return_unchecked)
22245+ movl (v), %eax
22246+ movl 4(v), %edx
22247+ addl $1, %eax
22248+ adcl $0, %edx
22249 movl %eax, (v)
22250 movl %edx, 4(v)
22251 RET_ENDP
22252@@ -123,6 +232,20 @@ RET_ENDP
22253 BEGIN(dec)
22254 subl $1, (v)
22255 sbbl $0, 4(v)
22256+
22257+#ifdef CONFIG_PAX_REFCOUNT
22258+ jno 0f
22259+ addl $1, (v)
22260+ adcl $0, 4(v)
22261+ int $4
22262+0:
22263+ _ASM_EXTABLE(0b, 0b)
22264+#endif
22265+
22266+RET_ENDP
22267+BEGIN(dec_unchecked)
22268+ subl $1, (v)
22269+ sbbl $0, 4(v)
22270 RET_ENDP
22271 #undef v
22272
22273@@ -132,6 +255,26 @@ BEGIN(dec_return)
22274 movl 4(v), %edx
22275 subl $1, %eax
22276 sbbl $0, %edx
22277+
22278+#ifdef CONFIG_PAX_REFCOUNT
22279+ into
22280+1234:
22281+ _ASM_EXTABLE(1234b, 2f)
22282+#endif
22283+
22284+ movl %eax, (v)
22285+ movl %edx, 4(v)
22286+
22287+#ifdef CONFIG_PAX_REFCOUNT
22288+2:
22289+#endif
22290+
22291+RET_ENDP
22292+BEGIN(dec_return_unchecked)
22293+ movl (v), %eax
22294+ movl 4(v), %edx
22295+ subl $1, %eax
22296+ sbbl $0, %edx
22297 movl %eax, (v)
22298 movl %edx, 4(v)
22299 RET_ENDP
22300@@ -143,6 +286,13 @@ BEGIN(add_unless)
22301 adcl %edx, %edi
22302 addl (v), %eax
22303 adcl 4(v), %edx
22304+
22305+#ifdef CONFIG_PAX_REFCOUNT
22306+ into
22307+1234:
22308+ _ASM_EXTABLE(1234b, 2f)
22309+#endif
22310+
22311 cmpl %eax, %ecx
22312 je 3f
22313 1:
22314@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
22315 1:
22316 addl $1, %eax
22317 adcl $0, %edx
22318+
22319+#ifdef CONFIG_PAX_REFCOUNT
22320+ into
22321+1234:
22322+ _ASM_EXTABLE(1234b, 2f)
22323+#endif
22324+
22325 movl %eax, (v)
22326 movl %edx, 4(v)
22327 movl $1, %eax
22328@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
22329 movl 4(v), %edx
22330 subl $1, %eax
22331 sbbl $0, %edx
22332+
22333+#ifdef CONFIG_PAX_REFCOUNT
22334+ into
22335+1234:
22336+ _ASM_EXTABLE(1234b, 1f)
22337+#endif
22338+
22339 js 1f
22340 movl %eax, (v)
22341 movl %edx, 4(v)
22342diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
22343index f5cc9eb..51fa319 100644
22344--- a/arch/x86/lib/atomic64_cx8_32.S
22345+++ b/arch/x86/lib/atomic64_cx8_32.S
22346@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
22347 CFI_STARTPROC
22348
22349 read64 %ecx
22350+ pax_force_retaddr
22351 ret
22352 CFI_ENDPROC
22353 ENDPROC(atomic64_read_cx8)
22354
22355+ENTRY(atomic64_read_unchecked_cx8)
22356+ CFI_STARTPROC
22357+
22358+ read64 %ecx
22359+ pax_force_retaddr
22360+ ret
22361+ CFI_ENDPROC
22362+ENDPROC(atomic64_read_unchecked_cx8)
22363+
22364 ENTRY(atomic64_set_cx8)
22365 CFI_STARTPROC
22366
22367@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
22368 cmpxchg8b (%esi)
22369 jne 1b
22370
22371+ pax_force_retaddr
22372 ret
22373 CFI_ENDPROC
22374 ENDPROC(atomic64_set_cx8)
22375
22376+ENTRY(atomic64_set_unchecked_cx8)
22377+ CFI_STARTPROC
22378+
22379+1:
22380+/* we don't need LOCK_PREFIX since aligned 64-bit writes
22381+ * are atomic on 586 and newer */
22382+ cmpxchg8b (%esi)
22383+ jne 1b
22384+
22385+ pax_force_retaddr
22386+ ret
22387+ CFI_ENDPROC
22388+ENDPROC(atomic64_set_unchecked_cx8)
22389+
22390 ENTRY(atomic64_xchg_cx8)
22391 CFI_STARTPROC
22392
22393@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
22394 cmpxchg8b (%esi)
22395 jne 1b
22396
22397+ pax_force_retaddr
22398 ret
22399 CFI_ENDPROC
22400 ENDPROC(atomic64_xchg_cx8)
22401
22402-.macro addsub_return func ins insc
22403-ENTRY(atomic64_\func\()_return_cx8)
22404+.macro addsub_return func ins insc unchecked=""
22405+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
22406 CFI_STARTPROC
22407 SAVE ebp
22408 SAVE ebx
22409@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
22410 movl %edx, %ecx
22411 \ins\()l %esi, %ebx
22412 \insc\()l %edi, %ecx
22413+
22414+.ifb \unchecked
22415+#ifdef CONFIG_PAX_REFCOUNT
22416+ into
22417+2:
22418+ _ASM_EXTABLE(2b, 3f)
22419+#endif
22420+.endif
22421+
22422 LOCK_PREFIX
22423 cmpxchg8b (%ebp)
22424 jne 1b
22425-
22426-10:
22427 movl %ebx, %eax
22428 movl %ecx, %edx
22429+
22430+.ifb \unchecked
22431+#ifdef CONFIG_PAX_REFCOUNT
22432+3:
22433+#endif
22434+.endif
22435+
22436 RESTORE edi
22437 RESTORE esi
22438 RESTORE ebx
22439 RESTORE ebp
22440+ pax_force_retaddr
22441 ret
22442 CFI_ENDPROC
22443-ENDPROC(atomic64_\func\()_return_cx8)
22444+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
22445 .endm
22446
22447 addsub_return add add adc
22448 addsub_return sub sub sbb
22449+addsub_return add add adc _unchecked
22450+addsub_return sub sub sbb _unchecked
22451
22452-.macro incdec_return func ins insc
22453-ENTRY(atomic64_\func\()_return_cx8)
22454+.macro incdec_return func ins insc unchecked=""
22455+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
22456 CFI_STARTPROC
22457 SAVE ebx
22458
22459@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
22460 movl %edx, %ecx
22461 \ins\()l $1, %ebx
22462 \insc\()l $0, %ecx
22463+
22464+.ifb \unchecked
22465+#ifdef CONFIG_PAX_REFCOUNT
22466+ into
22467+2:
22468+ _ASM_EXTABLE(2b, 3f)
22469+#endif
22470+.endif
22471+
22472 LOCK_PREFIX
22473 cmpxchg8b (%esi)
22474 jne 1b
22475
22476-10:
22477 movl %ebx, %eax
22478 movl %ecx, %edx
22479+
22480+.ifb \unchecked
22481+#ifdef CONFIG_PAX_REFCOUNT
22482+3:
22483+#endif
22484+.endif
22485+
22486 RESTORE ebx
22487+ pax_force_retaddr
22488 ret
22489 CFI_ENDPROC
22490-ENDPROC(atomic64_\func\()_return_cx8)
22491+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
22492 .endm
22493
22494 incdec_return inc add adc
22495 incdec_return dec sub sbb
22496+incdec_return inc add adc _unchecked
22497+incdec_return dec sub sbb _unchecked
22498
22499 ENTRY(atomic64_dec_if_positive_cx8)
22500 CFI_STARTPROC
22501@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
22502 movl %edx, %ecx
22503 subl $1, %ebx
22504 sbb $0, %ecx
22505+
22506+#ifdef CONFIG_PAX_REFCOUNT
22507+ into
22508+1234:
22509+ _ASM_EXTABLE(1234b, 2f)
22510+#endif
22511+
22512 js 2f
22513 LOCK_PREFIX
22514 cmpxchg8b (%esi)
22515@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
22516 movl %ebx, %eax
22517 movl %ecx, %edx
22518 RESTORE ebx
22519+ pax_force_retaddr
22520 ret
22521 CFI_ENDPROC
22522 ENDPROC(atomic64_dec_if_positive_cx8)
22523@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
22524 movl %edx, %ecx
22525 addl %ebp, %ebx
22526 adcl %edi, %ecx
22527+
22528+#ifdef CONFIG_PAX_REFCOUNT
22529+ into
22530+1234:
22531+ _ASM_EXTABLE(1234b, 3f)
22532+#endif
22533+
22534 LOCK_PREFIX
22535 cmpxchg8b (%esi)
22536 jne 1b
22537@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
22538 CFI_ADJUST_CFA_OFFSET -8
22539 RESTORE ebx
22540 RESTORE ebp
22541+ pax_force_retaddr
22542 ret
22543 4:
22544 cmpl %edx, 4(%esp)
22545@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
22546 xorl %ecx, %ecx
22547 addl $1, %ebx
22548 adcl %edx, %ecx
22549+
22550+#ifdef CONFIG_PAX_REFCOUNT
22551+ into
22552+1234:
22553+ _ASM_EXTABLE(1234b, 3f)
22554+#endif
22555+
22556 LOCK_PREFIX
22557 cmpxchg8b (%esi)
22558 jne 1b
22559@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
22560 movl $1, %eax
22561 3:
22562 RESTORE ebx
22563+ pax_force_retaddr
22564 ret
22565 CFI_ENDPROC
22566 ENDPROC(atomic64_inc_not_zero_cx8)
22567diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
22568index 2af5df3..62b1a5a 100644
22569--- a/arch/x86/lib/checksum_32.S
22570+++ b/arch/x86/lib/checksum_32.S
22571@@ -29,7 +29,8 @@
22572 #include <asm/dwarf2.h>
22573 #include <asm/errno.h>
22574 #include <asm/asm.h>
22575-
22576+#include <asm/segment.h>
22577+
22578 /*
22579 * computes a partial checksum, e.g. for TCP/UDP fragments
22580 */
22581@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
22582
22583 #define ARGBASE 16
22584 #define FP 12
22585-
22586-ENTRY(csum_partial_copy_generic)
22587+
22588+ENTRY(csum_partial_copy_generic_to_user)
22589 CFI_STARTPROC
22590+
22591+#ifdef CONFIG_PAX_MEMORY_UDEREF
22592+ pushl_cfi %gs
22593+ popl_cfi %es
22594+ jmp csum_partial_copy_generic
22595+#endif
22596+
22597+ENTRY(csum_partial_copy_generic_from_user)
22598+
22599+#ifdef CONFIG_PAX_MEMORY_UDEREF
22600+ pushl_cfi %gs
22601+ popl_cfi %ds
22602+#endif
22603+
22604+ENTRY(csum_partial_copy_generic)
22605 subl $4,%esp
22606 CFI_ADJUST_CFA_OFFSET 4
22607 pushl_cfi %edi
22608@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
22609 jmp 4f
22610 SRC(1: movw (%esi), %bx )
22611 addl $2, %esi
22612-DST( movw %bx, (%edi) )
22613+DST( movw %bx, %es:(%edi) )
22614 addl $2, %edi
22615 addw %bx, %ax
22616 adcl $0, %eax
22617@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
22618 SRC(1: movl (%esi), %ebx )
22619 SRC( movl 4(%esi), %edx )
22620 adcl %ebx, %eax
22621-DST( movl %ebx, (%edi) )
22622+DST( movl %ebx, %es:(%edi) )
22623 adcl %edx, %eax
22624-DST( movl %edx, 4(%edi) )
22625+DST( movl %edx, %es:4(%edi) )
22626
22627 SRC( movl 8(%esi), %ebx )
22628 SRC( movl 12(%esi), %edx )
22629 adcl %ebx, %eax
22630-DST( movl %ebx, 8(%edi) )
22631+DST( movl %ebx, %es:8(%edi) )
22632 adcl %edx, %eax
22633-DST( movl %edx, 12(%edi) )
22634+DST( movl %edx, %es:12(%edi) )
22635
22636 SRC( movl 16(%esi), %ebx )
22637 SRC( movl 20(%esi), %edx )
22638 adcl %ebx, %eax
22639-DST( movl %ebx, 16(%edi) )
22640+DST( movl %ebx, %es:16(%edi) )
22641 adcl %edx, %eax
22642-DST( movl %edx, 20(%edi) )
22643+DST( movl %edx, %es:20(%edi) )
22644
22645 SRC( movl 24(%esi), %ebx )
22646 SRC( movl 28(%esi), %edx )
22647 adcl %ebx, %eax
22648-DST( movl %ebx, 24(%edi) )
22649+DST( movl %ebx, %es:24(%edi) )
22650 adcl %edx, %eax
22651-DST( movl %edx, 28(%edi) )
22652+DST( movl %edx, %es:28(%edi) )
22653
22654 lea 32(%esi), %esi
22655 lea 32(%edi), %edi
22656@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
22657 shrl $2, %edx # This clears CF
22658 SRC(3: movl (%esi), %ebx )
22659 adcl %ebx, %eax
22660-DST( movl %ebx, (%edi) )
22661+DST( movl %ebx, %es:(%edi) )
22662 lea 4(%esi), %esi
22663 lea 4(%edi), %edi
22664 dec %edx
22665@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
22666 jb 5f
22667 SRC( movw (%esi), %cx )
22668 leal 2(%esi), %esi
22669-DST( movw %cx, (%edi) )
22670+DST( movw %cx, %es:(%edi) )
22671 leal 2(%edi), %edi
22672 je 6f
22673 shll $16,%ecx
22674 SRC(5: movb (%esi), %cl )
22675-DST( movb %cl, (%edi) )
22676+DST( movb %cl, %es:(%edi) )
22677 6: addl %ecx, %eax
22678 adcl $0, %eax
22679 7:
22680@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
22681
22682 6001:
22683 movl ARGBASE+20(%esp), %ebx # src_err_ptr
22684- movl $-EFAULT, (%ebx)
22685+ movl $-EFAULT, %ss:(%ebx)
22686
22687 # zero the complete destination - computing the rest
22688 # is too much work
22689@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
22690
22691 6002:
22692 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22693- movl $-EFAULT,(%ebx)
22694+ movl $-EFAULT,%ss:(%ebx)
22695 jmp 5000b
22696
22697 .previous
22698
22699+ pushl_cfi %ss
22700+ popl_cfi %ds
22701+ pushl_cfi %ss
22702+ popl_cfi %es
22703 popl_cfi %ebx
22704 CFI_RESTORE ebx
22705 popl_cfi %esi
22706@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
22707 popl_cfi %ecx # equivalent to addl $4,%esp
22708 ret
22709 CFI_ENDPROC
22710-ENDPROC(csum_partial_copy_generic)
22711+ENDPROC(csum_partial_copy_generic_to_user)
22712
22713 #else
22714
22715 /* Version for PentiumII/PPro */
22716
22717 #define ROUND1(x) \
22718+ nop; nop; nop; \
22719 SRC(movl x(%esi), %ebx ) ; \
22720 addl %ebx, %eax ; \
22721- DST(movl %ebx, x(%edi) ) ;
22722+ DST(movl %ebx, %es:x(%edi)) ;
22723
22724 #define ROUND(x) \
22725+ nop; nop; nop; \
22726 SRC(movl x(%esi), %ebx ) ; \
22727 adcl %ebx, %eax ; \
22728- DST(movl %ebx, x(%edi) ) ;
22729+ DST(movl %ebx, %es:x(%edi)) ;
22730
22731 #define ARGBASE 12
22732-
22733-ENTRY(csum_partial_copy_generic)
22734+
22735+ENTRY(csum_partial_copy_generic_to_user)
22736 CFI_STARTPROC
22737+
22738+#ifdef CONFIG_PAX_MEMORY_UDEREF
22739+ pushl_cfi %gs
22740+ popl_cfi %es
22741+ jmp csum_partial_copy_generic
22742+#endif
22743+
22744+ENTRY(csum_partial_copy_generic_from_user)
22745+
22746+#ifdef CONFIG_PAX_MEMORY_UDEREF
22747+ pushl_cfi %gs
22748+ popl_cfi %ds
22749+#endif
22750+
22751+ENTRY(csum_partial_copy_generic)
22752 pushl_cfi %ebx
22753 CFI_REL_OFFSET ebx, 0
22754 pushl_cfi %edi
22755@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
22756 subl %ebx, %edi
22757 lea -1(%esi),%edx
22758 andl $-32,%edx
22759- lea 3f(%ebx,%ebx), %ebx
22760+ lea 3f(%ebx,%ebx,2), %ebx
22761 testl %esi, %esi
22762 jmp *%ebx
22763 1: addl $64,%esi
22764@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
22765 jb 5f
22766 SRC( movw (%esi), %dx )
22767 leal 2(%esi), %esi
22768-DST( movw %dx, (%edi) )
22769+DST( movw %dx, %es:(%edi) )
22770 leal 2(%edi), %edi
22771 je 6f
22772 shll $16,%edx
22773 5:
22774 SRC( movb (%esi), %dl )
22775-DST( movb %dl, (%edi) )
22776+DST( movb %dl, %es:(%edi) )
22777 6: addl %edx, %eax
22778 adcl $0, %eax
22779 7:
22780 .section .fixup, "ax"
22781 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22782- movl $-EFAULT, (%ebx)
22783+ movl $-EFAULT, %ss:(%ebx)
22784 # zero the complete destination (computing the rest is too much work)
22785 movl ARGBASE+8(%esp),%edi # dst
22786 movl ARGBASE+12(%esp),%ecx # len
22787@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
22788 rep; stosb
22789 jmp 7b
22790 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22791- movl $-EFAULT, (%ebx)
22792+ movl $-EFAULT, %ss:(%ebx)
22793 jmp 7b
22794 .previous
22795
22796+#ifdef CONFIG_PAX_MEMORY_UDEREF
22797+ pushl_cfi %ss
22798+ popl_cfi %ds
22799+ pushl_cfi %ss
22800+ popl_cfi %es
22801+#endif
22802+
22803 popl_cfi %esi
22804 CFI_RESTORE esi
22805 popl_cfi %edi
22806@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
22807 CFI_RESTORE ebx
22808 ret
22809 CFI_ENDPROC
22810-ENDPROC(csum_partial_copy_generic)
22811+ENDPROC(csum_partial_copy_generic_to_user)
22812
22813 #undef ROUND
22814 #undef ROUND1
22815diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22816index f2145cf..cea889d 100644
22817--- a/arch/x86/lib/clear_page_64.S
22818+++ b/arch/x86/lib/clear_page_64.S
22819@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
22820 movl $4096/8,%ecx
22821 xorl %eax,%eax
22822 rep stosq
22823+ pax_force_retaddr
22824 ret
22825 CFI_ENDPROC
22826 ENDPROC(clear_page_c)
22827@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
22828 movl $4096,%ecx
22829 xorl %eax,%eax
22830 rep stosb
22831+ pax_force_retaddr
22832 ret
22833 CFI_ENDPROC
22834 ENDPROC(clear_page_c_e)
22835@@ -43,6 +45,7 @@ ENTRY(clear_page)
22836 leaq 64(%rdi),%rdi
22837 jnz .Lloop
22838 nop
22839+ pax_force_retaddr
22840 ret
22841 CFI_ENDPROC
22842 .Lclear_page_end:
22843@@ -58,7 +61,7 @@ ENDPROC(clear_page)
22844
22845 #include <asm/cpufeature.h>
22846
22847- .section .altinstr_replacement,"ax"
22848+ .section .altinstr_replacement,"a"
22849 1: .byte 0xeb /* jmp <disp8> */
22850 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22851 2: .byte 0xeb /* jmp <disp8> */
22852diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
22853index 1e572c5..2a162cd 100644
22854--- a/arch/x86/lib/cmpxchg16b_emu.S
22855+++ b/arch/x86/lib/cmpxchg16b_emu.S
22856@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
22857
22858 popf
22859 mov $1, %al
22860+ pax_force_retaddr
22861 ret
22862
22863 not_same:
22864 popf
22865 xor %al,%al
22866+ pax_force_retaddr
22867 ret
22868
22869 CFI_ENDPROC
22870diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22871index 6b34d04..dccb07f 100644
22872--- a/arch/x86/lib/copy_page_64.S
22873+++ b/arch/x86/lib/copy_page_64.S
22874@@ -9,6 +9,7 @@ copy_page_c:
22875 CFI_STARTPROC
22876 movl $4096/8,%ecx
22877 rep movsq
22878+ pax_force_retaddr
22879 ret
22880 CFI_ENDPROC
22881 ENDPROC(copy_page_c)
22882@@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
22883
22884 ENTRY(copy_page)
22885 CFI_STARTPROC
22886- subq $2*8,%rsp
22887- CFI_ADJUST_CFA_OFFSET 2*8
22888+ subq $3*8,%rsp
22889+ CFI_ADJUST_CFA_OFFSET 3*8
22890 movq %rbx,(%rsp)
22891 CFI_REL_OFFSET rbx, 0
22892 movq %r12,1*8(%rsp)
22893 CFI_REL_OFFSET r12, 1*8
22894+ movq %r13,2*8(%rsp)
22895+ CFI_REL_OFFSET r13, 2*8
22896
22897 movl $(4096/64)-5,%ecx
22898 .p2align 4
22899@@ -37,7 +40,7 @@ ENTRY(copy_page)
22900 movq 16 (%rsi), %rdx
22901 movq 24 (%rsi), %r8
22902 movq 32 (%rsi), %r9
22903- movq 40 (%rsi), %r10
22904+ movq 40 (%rsi), %r13
22905 movq 48 (%rsi), %r11
22906 movq 56 (%rsi), %r12
22907
22908@@ -48,7 +51,7 @@ ENTRY(copy_page)
22909 movq %rdx, 16 (%rdi)
22910 movq %r8, 24 (%rdi)
22911 movq %r9, 32 (%rdi)
22912- movq %r10, 40 (%rdi)
22913+ movq %r13, 40 (%rdi)
22914 movq %r11, 48 (%rdi)
22915 movq %r12, 56 (%rdi)
22916
22917@@ -67,7 +70,7 @@ ENTRY(copy_page)
22918 movq 16 (%rsi), %rdx
22919 movq 24 (%rsi), %r8
22920 movq 32 (%rsi), %r9
22921- movq 40 (%rsi), %r10
22922+ movq 40 (%rsi), %r13
22923 movq 48 (%rsi), %r11
22924 movq 56 (%rsi), %r12
22925
22926@@ -76,7 +79,7 @@ ENTRY(copy_page)
22927 movq %rdx, 16 (%rdi)
22928 movq %r8, 24 (%rdi)
22929 movq %r9, 32 (%rdi)
22930- movq %r10, 40 (%rdi)
22931+ movq %r13, 40 (%rdi)
22932 movq %r11, 48 (%rdi)
22933 movq %r12, 56 (%rdi)
22934
22935@@ -89,8 +92,11 @@ ENTRY(copy_page)
22936 CFI_RESTORE rbx
22937 movq 1*8(%rsp),%r12
22938 CFI_RESTORE r12
22939- addq $2*8,%rsp
22940- CFI_ADJUST_CFA_OFFSET -2*8
22941+ movq 2*8(%rsp),%r13
22942+ CFI_RESTORE r13
22943+ addq $3*8,%rsp
22944+ CFI_ADJUST_CFA_OFFSET -3*8
22945+ pax_force_retaddr
22946 ret
22947 .Lcopy_page_end:
22948 CFI_ENDPROC
22949@@ -101,7 +107,7 @@ ENDPROC(copy_page)
22950
22951 #include <asm/cpufeature.h>
22952
22953- .section .altinstr_replacement,"ax"
22954+ .section .altinstr_replacement,"a"
22955 1: .byte 0xeb /* jmp <disp8> */
22956 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
22957 2:
22958diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
22959index a30ca15..d25fab6 100644
22960--- a/arch/x86/lib/copy_user_64.S
22961+++ b/arch/x86/lib/copy_user_64.S
22962@@ -18,6 +18,7 @@
22963 #include <asm/alternative-asm.h>
22964 #include <asm/asm.h>
22965 #include <asm/smap.h>
22966+#include <asm/pgtable.h>
22967
22968 /*
22969 * By placing feature2 after feature1 in altinstructions section, we logically
22970@@ -31,7 +32,7 @@
22971 .byte 0xe9 /* 32bit jump */
22972 .long \orig-1f /* by default jump to orig */
22973 1:
22974- .section .altinstr_replacement,"ax"
22975+ .section .altinstr_replacement,"a"
22976 2: .byte 0xe9 /* near jump with 32bit immediate */
22977 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
22978 3: .byte 0xe9 /* near jump with 32bit immediate */
22979@@ -70,47 +71,20 @@
22980 #endif
22981 .endm
22982
22983-/* Standard copy_to_user with segment limit checking */
22984-ENTRY(_copy_to_user)
22985- CFI_STARTPROC
22986- GET_THREAD_INFO(%rax)
22987- movq %rdi,%rcx
22988- addq %rdx,%rcx
22989- jc bad_to_user
22990- cmpq TI_addr_limit(%rax),%rcx
22991- ja bad_to_user
22992- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
22993- copy_user_generic_unrolled,copy_user_generic_string, \
22994- copy_user_enhanced_fast_string
22995- CFI_ENDPROC
22996-ENDPROC(_copy_to_user)
22997-
22998-/* Standard copy_from_user with segment limit checking */
22999-ENTRY(_copy_from_user)
23000- CFI_STARTPROC
23001- GET_THREAD_INFO(%rax)
23002- movq %rsi,%rcx
23003- addq %rdx,%rcx
23004- jc bad_from_user
23005- cmpq TI_addr_limit(%rax),%rcx
23006- ja bad_from_user
23007- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
23008- copy_user_generic_unrolled,copy_user_generic_string, \
23009- copy_user_enhanced_fast_string
23010- CFI_ENDPROC
23011-ENDPROC(_copy_from_user)
23012-
23013 .section .fixup,"ax"
23014 /* must zero dest */
23015 ENTRY(bad_from_user)
23016 bad_from_user:
23017 CFI_STARTPROC
23018+ testl %edx,%edx
23019+ js bad_to_user
23020 movl %edx,%ecx
23021 xorl %eax,%eax
23022 rep
23023 stosb
23024 bad_to_user:
23025 movl %edx,%eax
23026+ pax_force_retaddr
23027 ret
23028 CFI_ENDPROC
23029 ENDPROC(bad_from_user)
23030@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
23031 jz 17f
23032 1: movq (%rsi),%r8
23033 2: movq 1*8(%rsi),%r9
23034-3: movq 2*8(%rsi),%r10
23035+3: movq 2*8(%rsi),%rax
23036 4: movq 3*8(%rsi),%r11
23037 5: movq %r8,(%rdi)
23038 6: movq %r9,1*8(%rdi)
23039-7: movq %r10,2*8(%rdi)
23040+7: movq %rax,2*8(%rdi)
23041 8: movq %r11,3*8(%rdi)
23042 9: movq 4*8(%rsi),%r8
23043 10: movq 5*8(%rsi),%r9
23044-11: movq 6*8(%rsi),%r10
23045+11: movq 6*8(%rsi),%rax
23046 12: movq 7*8(%rsi),%r11
23047 13: movq %r8,4*8(%rdi)
23048 14: movq %r9,5*8(%rdi)
23049-15: movq %r10,6*8(%rdi)
23050+15: movq %rax,6*8(%rdi)
23051 16: movq %r11,7*8(%rdi)
23052 leaq 64(%rsi),%rsi
23053 leaq 64(%rdi),%rdi
23054@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
23055 jnz 21b
23056 23: xor %eax,%eax
23057 ASM_CLAC
23058+ pax_force_retaddr
23059 ret
23060
23061 .section .fixup,"ax"
23062@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
23063 movsb
23064 4: xorl %eax,%eax
23065 ASM_CLAC
23066+ pax_force_retaddr
23067 ret
23068
23069 .section .fixup,"ax"
23070@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
23071 movsb
23072 2: xorl %eax,%eax
23073 ASM_CLAC
23074+ pax_force_retaddr
23075 ret
23076
23077 .section .fixup,"ax"
23078diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
23079index 6a4f43c..f5f9e26 100644
23080--- a/arch/x86/lib/copy_user_nocache_64.S
23081+++ b/arch/x86/lib/copy_user_nocache_64.S
23082@@ -8,6 +8,7 @@
23083
23084 #include <linux/linkage.h>
23085 #include <asm/dwarf2.h>
23086+#include <asm/alternative-asm.h>
23087
23088 #define FIX_ALIGNMENT 1
23089
23090@@ -16,6 +17,7 @@
23091 #include <asm/thread_info.h>
23092 #include <asm/asm.h>
23093 #include <asm/smap.h>
23094+#include <asm/pgtable.h>
23095
23096 .macro ALIGN_DESTINATION
23097 #ifdef FIX_ALIGNMENT
23098@@ -49,6 +51,15 @@
23099 */
23100 ENTRY(__copy_user_nocache)
23101 CFI_STARTPROC
23102+
23103+#ifdef CONFIG_PAX_MEMORY_UDEREF
23104+ mov $PAX_USER_SHADOW_BASE,%rcx
23105+ cmp %rcx,%rsi
23106+ jae 1f
23107+ add %rcx,%rsi
23108+1:
23109+#endif
23110+
23111 ASM_STAC
23112 cmpl $8,%edx
23113 jb 20f /* less then 8 bytes, go to byte copy loop */
23114@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
23115 jz 17f
23116 1: movq (%rsi),%r8
23117 2: movq 1*8(%rsi),%r9
23118-3: movq 2*8(%rsi),%r10
23119+3: movq 2*8(%rsi),%rax
23120 4: movq 3*8(%rsi),%r11
23121 5: movnti %r8,(%rdi)
23122 6: movnti %r9,1*8(%rdi)
23123-7: movnti %r10,2*8(%rdi)
23124+7: movnti %rax,2*8(%rdi)
23125 8: movnti %r11,3*8(%rdi)
23126 9: movq 4*8(%rsi),%r8
23127 10: movq 5*8(%rsi),%r9
23128-11: movq 6*8(%rsi),%r10
23129+11: movq 6*8(%rsi),%rax
23130 12: movq 7*8(%rsi),%r11
23131 13: movnti %r8,4*8(%rdi)
23132 14: movnti %r9,5*8(%rdi)
23133-15: movnti %r10,6*8(%rdi)
23134+15: movnti %rax,6*8(%rdi)
23135 16: movnti %r11,7*8(%rdi)
23136 leaq 64(%rsi),%rsi
23137 leaq 64(%rdi),%rdi
23138@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
23139 23: xorl %eax,%eax
23140 ASM_CLAC
23141 sfence
23142+ pax_force_retaddr
23143 ret
23144
23145 .section .fixup,"ax"
23146diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
23147index 2419d5f..953ee51 100644
23148--- a/arch/x86/lib/csum-copy_64.S
23149+++ b/arch/x86/lib/csum-copy_64.S
23150@@ -9,6 +9,7 @@
23151 #include <asm/dwarf2.h>
23152 #include <asm/errno.h>
23153 #include <asm/asm.h>
23154+#include <asm/alternative-asm.h>
23155
23156 /*
23157 * Checksum copy with exception handling.
23158@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
23159 CFI_RESTORE rbp
23160 addq $7*8, %rsp
23161 CFI_ADJUST_CFA_OFFSET -7*8
23162+ pax_force_retaddr 0, 1
23163 ret
23164 CFI_RESTORE_STATE
23165
23166diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
23167index 25b7ae8..169fafc 100644
23168--- a/arch/x86/lib/csum-wrappers_64.c
23169+++ b/arch/x86/lib/csum-wrappers_64.c
23170@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
23171 len -= 2;
23172 }
23173 }
23174- isum = csum_partial_copy_generic((__force const void *)src,
23175+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
23176 dst, len, isum, errp, NULL);
23177 if (unlikely(*errp))
23178 goto out_err;
23179@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
23180 }
23181
23182 *errp = 0;
23183- return csum_partial_copy_generic(src, (void __force *)dst,
23184+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
23185 len, isum, NULL, errp);
23186 }
23187 EXPORT_SYMBOL(csum_partial_copy_to_user);
23188diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
23189index 156b9c8..b144132 100644
23190--- a/arch/x86/lib/getuser.S
23191+++ b/arch/x86/lib/getuser.S
23192@@ -34,17 +34,40 @@
23193 #include <asm/thread_info.h>
23194 #include <asm/asm.h>
23195 #include <asm/smap.h>
23196+#include <asm/segment.h>
23197+#include <asm/pgtable.h>
23198+#include <asm/alternative-asm.h>
23199+
23200+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23201+#define __copyuser_seg gs;
23202+#else
23203+#define __copyuser_seg
23204+#endif
23205
23206 .text
23207 ENTRY(__get_user_1)
23208 CFI_STARTPROC
23209+
23210+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23211 GET_THREAD_INFO(%_ASM_DX)
23212 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23213 jae bad_get_user
23214 ASM_STAC
23215-1: movzb (%_ASM_AX),%edx
23216+
23217+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23218+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23219+ cmp %_ASM_DX,%_ASM_AX
23220+ jae 1234f
23221+ add %_ASM_DX,%_ASM_AX
23222+1234:
23223+#endif
23224+
23225+#endif
23226+
23227+1: __copyuser_seg movzb (%_ASM_AX),%edx
23228 xor %eax,%eax
23229 ASM_CLAC
23230+ pax_force_retaddr
23231 ret
23232 CFI_ENDPROC
23233 ENDPROC(__get_user_1)
23234@@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
23235 ENTRY(__get_user_2)
23236 CFI_STARTPROC
23237 add $1,%_ASM_AX
23238+
23239+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23240 jc bad_get_user
23241 GET_THREAD_INFO(%_ASM_DX)
23242 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23243 jae bad_get_user
23244 ASM_STAC
23245-2: movzwl -1(%_ASM_AX),%edx
23246+
23247+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23248+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23249+ cmp %_ASM_DX,%_ASM_AX
23250+ jae 1234f
23251+ add %_ASM_DX,%_ASM_AX
23252+1234:
23253+#endif
23254+
23255+#endif
23256+
23257+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
23258 xor %eax,%eax
23259 ASM_CLAC
23260+ pax_force_retaddr
23261 ret
23262 CFI_ENDPROC
23263 ENDPROC(__get_user_2)
23264@@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
23265 ENTRY(__get_user_4)
23266 CFI_STARTPROC
23267 add $3,%_ASM_AX
23268+
23269+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23270 jc bad_get_user
23271 GET_THREAD_INFO(%_ASM_DX)
23272 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23273 jae bad_get_user
23274 ASM_STAC
23275-3: mov -3(%_ASM_AX),%edx
23276+
23277+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23278+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23279+ cmp %_ASM_DX,%_ASM_AX
23280+ jae 1234f
23281+ add %_ASM_DX,%_ASM_AX
23282+1234:
23283+#endif
23284+
23285+#endif
23286+
23287+3: __copyuser_seg mov -3(%_ASM_AX),%edx
23288 xor %eax,%eax
23289 ASM_CLAC
23290+ pax_force_retaddr
23291 ret
23292 CFI_ENDPROC
23293 ENDPROC(__get_user_4)
23294@@ -87,10 +138,20 @@ ENTRY(__get_user_8)
23295 GET_THREAD_INFO(%_ASM_DX)
23296 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23297 jae bad_get_user
23298+
23299+#ifdef CONFIG_PAX_MEMORY_UDEREF
23300+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23301+ cmp %_ASM_DX,%_ASM_AX
23302+ jae 1234f
23303+ add %_ASM_DX,%_ASM_AX
23304+1234:
23305+#endif
23306+
23307 ASM_STAC
23308 4: movq -7(%_ASM_AX),%_ASM_DX
23309 xor %eax,%eax
23310 ASM_CLAC
23311+ pax_force_retaddr
23312 ret
23313 CFI_ENDPROC
23314 ENDPROC(__get_user_8)
23315@@ -101,6 +162,7 @@ bad_get_user:
23316 xor %edx,%edx
23317 mov $(-EFAULT),%_ASM_AX
23318 ASM_CLAC
23319+ pax_force_retaddr
23320 ret
23321 CFI_ENDPROC
23322 END(bad_get_user)
23323diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
23324index 54fcffe..7be149e 100644
23325--- a/arch/x86/lib/insn.c
23326+++ b/arch/x86/lib/insn.c
23327@@ -20,8 +20,10 @@
23328
23329 #ifdef __KERNEL__
23330 #include <linux/string.h>
23331+#include <asm/pgtable_types.h>
23332 #else
23333 #include <string.h>
23334+#define ktla_ktva(addr) addr
23335 #endif
23336 #include <asm/inat.h>
23337 #include <asm/insn.h>
23338@@ -53,8 +55,8 @@
23339 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
23340 {
23341 memset(insn, 0, sizeof(*insn));
23342- insn->kaddr = kaddr;
23343- insn->next_byte = kaddr;
23344+ insn->kaddr = ktla_ktva(kaddr);
23345+ insn->next_byte = ktla_ktva(kaddr);
23346 insn->x86_64 = x86_64 ? 1 : 0;
23347 insn->opnd_bytes = 4;
23348 if (x86_64)
23349diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
23350index 05a95e7..326f2fa 100644
23351--- a/arch/x86/lib/iomap_copy_64.S
23352+++ b/arch/x86/lib/iomap_copy_64.S
23353@@ -17,6 +17,7 @@
23354
23355 #include <linux/linkage.h>
23356 #include <asm/dwarf2.h>
23357+#include <asm/alternative-asm.h>
23358
23359 /*
23360 * override generic version in lib/iomap_copy.c
23361@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
23362 CFI_STARTPROC
23363 movl %edx,%ecx
23364 rep movsd
23365+ pax_force_retaddr
23366 ret
23367 CFI_ENDPROC
23368 ENDPROC(__iowrite32_copy)
23369diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
23370index 1c273be..da9cc0e 100644
23371--- a/arch/x86/lib/memcpy_64.S
23372+++ b/arch/x86/lib/memcpy_64.S
23373@@ -33,6 +33,7 @@
23374 rep movsq
23375 movl %edx, %ecx
23376 rep movsb
23377+ pax_force_retaddr
23378 ret
23379 .Lmemcpy_e:
23380 .previous
23381@@ -49,6 +50,7 @@
23382 movq %rdi, %rax
23383 movq %rdx, %rcx
23384 rep movsb
23385+ pax_force_retaddr
23386 ret
23387 .Lmemcpy_e_e:
23388 .previous
23389@@ -76,13 +78,13 @@ ENTRY(memcpy)
23390 */
23391 movq 0*8(%rsi), %r8
23392 movq 1*8(%rsi), %r9
23393- movq 2*8(%rsi), %r10
23394+ movq 2*8(%rsi), %rcx
23395 movq 3*8(%rsi), %r11
23396 leaq 4*8(%rsi), %rsi
23397
23398 movq %r8, 0*8(%rdi)
23399 movq %r9, 1*8(%rdi)
23400- movq %r10, 2*8(%rdi)
23401+ movq %rcx, 2*8(%rdi)
23402 movq %r11, 3*8(%rdi)
23403 leaq 4*8(%rdi), %rdi
23404 jae .Lcopy_forward_loop
23405@@ -105,12 +107,12 @@ ENTRY(memcpy)
23406 subq $0x20, %rdx
23407 movq -1*8(%rsi), %r8
23408 movq -2*8(%rsi), %r9
23409- movq -3*8(%rsi), %r10
23410+ movq -3*8(%rsi), %rcx
23411 movq -4*8(%rsi), %r11
23412 leaq -4*8(%rsi), %rsi
23413 movq %r8, -1*8(%rdi)
23414 movq %r9, -2*8(%rdi)
23415- movq %r10, -3*8(%rdi)
23416+ movq %rcx, -3*8(%rdi)
23417 movq %r11, -4*8(%rdi)
23418 leaq -4*8(%rdi), %rdi
23419 jae .Lcopy_backward_loop
23420@@ -130,12 +132,13 @@ ENTRY(memcpy)
23421 */
23422 movq 0*8(%rsi), %r8
23423 movq 1*8(%rsi), %r9
23424- movq -2*8(%rsi, %rdx), %r10
23425+ movq -2*8(%rsi, %rdx), %rcx
23426 movq -1*8(%rsi, %rdx), %r11
23427 movq %r8, 0*8(%rdi)
23428 movq %r9, 1*8(%rdi)
23429- movq %r10, -2*8(%rdi, %rdx)
23430+ movq %rcx, -2*8(%rdi, %rdx)
23431 movq %r11, -1*8(%rdi, %rdx)
23432+ pax_force_retaddr
23433 retq
23434 .p2align 4
23435 .Lless_16bytes:
23436@@ -148,6 +151,7 @@ ENTRY(memcpy)
23437 movq -1*8(%rsi, %rdx), %r9
23438 movq %r8, 0*8(%rdi)
23439 movq %r9, -1*8(%rdi, %rdx)
23440+ pax_force_retaddr
23441 retq
23442 .p2align 4
23443 .Lless_8bytes:
23444@@ -161,6 +165,7 @@ ENTRY(memcpy)
23445 movl -4(%rsi, %rdx), %r8d
23446 movl %ecx, (%rdi)
23447 movl %r8d, -4(%rdi, %rdx)
23448+ pax_force_retaddr
23449 retq
23450 .p2align 4
23451 .Lless_3bytes:
23452@@ -179,6 +184,7 @@ ENTRY(memcpy)
23453 movb %cl, (%rdi)
23454
23455 .Lend:
23456+ pax_force_retaddr
23457 retq
23458 CFI_ENDPROC
23459 ENDPROC(memcpy)
23460diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
23461index ee16461..c39c199 100644
23462--- a/arch/x86/lib/memmove_64.S
23463+++ b/arch/x86/lib/memmove_64.S
23464@@ -61,13 +61,13 @@ ENTRY(memmove)
23465 5:
23466 sub $0x20, %rdx
23467 movq 0*8(%rsi), %r11
23468- movq 1*8(%rsi), %r10
23469+ movq 1*8(%rsi), %rcx
23470 movq 2*8(%rsi), %r9
23471 movq 3*8(%rsi), %r8
23472 leaq 4*8(%rsi), %rsi
23473
23474 movq %r11, 0*8(%rdi)
23475- movq %r10, 1*8(%rdi)
23476+ movq %rcx, 1*8(%rdi)
23477 movq %r9, 2*8(%rdi)
23478 movq %r8, 3*8(%rdi)
23479 leaq 4*8(%rdi), %rdi
23480@@ -81,10 +81,10 @@ ENTRY(memmove)
23481 4:
23482 movq %rdx, %rcx
23483 movq -8(%rsi, %rdx), %r11
23484- lea -8(%rdi, %rdx), %r10
23485+ lea -8(%rdi, %rdx), %r9
23486 shrq $3, %rcx
23487 rep movsq
23488- movq %r11, (%r10)
23489+ movq %r11, (%r9)
23490 jmp 13f
23491 .Lmemmove_end_forward:
23492
23493@@ -95,14 +95,14 @@ ENTRY(memmove)
23494 7:
23495 movq %rdx, %rcx
23496 movq (%rsi), %r11
23497- movq %rdi, %r10
23498+ movq %rdi, %r9
23499 leaq -8(%rsi, %rdx), %rsi
23500 leaq -8(%rdi, %rdx), %rdi
23501 shrq $3, %rcx
23502 std
23503 rep movsq
23504 cld
23505- movq %r11, (%r10)
23506+ movq %r11, (%r9)
23507 jmp 13f
23508
23509 /*
23510@@ -127,13 +127,13 @@ ENTRY(memmove)
23511 8:
23512 subq $0x20, %rdx
23513 movq -1*8(%rsi), %r11
23514- movq -2*8(%rsi), %r10
23515+ movq -2*8(%rsi), %rcx
23516 movq -3*8(%rsi), %r9
23517 movq -4*8(%rsi), %r8
23518 leaq -4*8(%rsi), %rsi
23519
23520 movq %r11, -1*8(%rdi)
23521- movq %r10, -2*8(%rdi)
23522+ movq %rcx, -2*8(%rdi)
23523 movq %r9, -3*8(%rdi)
23524 movq %r8, -4*8(%rdi)
23525 leaq -4*8(%rdi), %rdi
23526@@ -151,11 +151,11 @@ ENTRY(memmove)
23527 * Move data from 16 bytes to 31 bytes.
23528 */
23529 movq 0*8(%rsi), %r11
23530- movq 1*8(%rsi), %r10
23531+ movq 1*8(%rsi), %rcx
23532 movq -2*8(%rsi, %rdx), %r9
23533 movq -1*8(%rsi, %rdx), %r8
23534 movq %r11, 0*8(%rdi)
23535- movq %r10, 1*8(%rdi)
23536+ movq %rcx, 1*8(%rdi)
23537 movq %r9, -2*8(%rdi, %rdx)
23538 movq %r8, -1*8(%rdi, %rdx)
23539 jmp 13f
23540@@ -167,9 +167,9 @@ ENTRY(memmove)
23541 * Move data from 8 bytes to 15 bytes.
23542 */
23543 movq 0*8(%rsi), %r11
23544- movq -1*8(%rsi, %rdx), %r10
23545+ movq -1*8(%rsi, %rdx), %r9
23546 movq %r11, 0*8(%rdi)
23547- movq %r10, -1*8(%rdi, %rdx)
23548+ movq %r9, -1*8(%rdi, %rdx)
23549 jmp 13f
23550 10:
23551 cmpq $4, %rdx
23552@@ -178,9 +178,9 @@ ENTRY(memmove)
23553 * Move data from 4 bytes to 7 bytes.
23554 */
23555 movl (%rsi), %r11d
23556- movl -4(%rsi, %rdx), %r10d
23557+ movl -4(%rsi, %rdx), %r9d
23558 movl %r11d, (%rdi)
23559- movl %r10d, -4(%rdi, %rdx)
23560+ movl %r9d, -4(%rdi, %rdx)
23561 jmp 13f
23562 11:
23563 cmp $2, %rdx
23564@@ -189,9 +189,9 @@ ENTRY(memmove)
23565 * Move data from 2 bytes to 3 bytes.
23566 */
23567 movw (%rsi), %r11w
23568- movw -2(%rsi, %rdx), %r10w
23569+ movw -2(%rsi, %rdx), %r9w
23570 movw %r11w, (%rdi)
23571- movw %r10w, -2(%rdi, %rdx)
23572+ movw %r9w, -2(%rdi, %rdx)
23573 jmp 13f
23574 12:
23575 cmp $1, %rdx
23576@@ -202,6 +202,7 @@ ENTRY(memmove)
23577 movb (%rsi), %r11b
23578 movb %r11b, (%rdi)
23579 13:
23580+ pax_force_retaddr
23581 retq
23582 CFI_ENDPROC
23583
23584@@ -210,6 +211,7 @@ ENTRY(memmove)
23585 /* Forward moving data. */
23586 movq %rdx, %rcx
23587 rep movsb
23588+ pax_force_retaddr
23589 retq
23590 .Lmemmove_end_forward_efs:
23591 .previous
23592diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
23593index 2dcb380..963660a 100644
23594--- a/arch/x86/lib/memset_64.S
23595+++ b/arch/x86/lib/memset_64.S
23596@@ -30,6 +30,7 @@
23597 movl %edx,%ecx
23598 rep stosb
23599 movq %r9,%rax
23600+ pax_force_retaddr
23601 ret
23602 .Lmemset_e:
23603 .previous
23604@@ -52,6 +53,7 @@
23605 movq %rdx,%rcx
23606 rep stosb
23607 movq %r9,%rax
23608+ pax_force_retaddr
23609 ret
23610 .Lmemset_e_e:
23611 .previous
23612@@ -59,7 +61,7 @@
23613 ENTRY(memset)
23614 ENTRY(__memset)
23615 CFI_STARTPROC
23616- movq %rdi,%r10
23617+ movq %rdi,%r11
23618
23619 /* expand byte value */
23620 movzbl %sil,%ecx
23621@@ -117,7 +119,8 @@ ENTRY(__memset)
23622 jnz .Lloop_1
23623
23624 .Lende:
23625- movq %r10,%rax
23626+ movq %r11,%rax
23627+ pax_force_retaddr
23628 ret
23629
23630 CFI_RESTORE_STATE
23631diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
23632index c9f2d9b..e7fd2c0 100644
23633--- a/arch/x86/lib/mmx_32.c
23634+++ b/arch/x86/lib/mmx_32.c
23635@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23636 {
23637 void *p;
23638 int i;
23639+ unsigned long cr0;
23640
23641 if (unlikely(in_interrupt()))
23642 return __memcpy(to, from, len);
23643@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23644 kernel_fpu_begin();
23645
23646 __asm__ __volatile__ (
23647- "1: prefetch (%0)\n" /* This set is 28 bytes */
23648- " prefetch 64(%0)\n"
23649- " prefetch 128(%0)\n"
23650- " prefetch 192(%0)\n"
23651- " prefetch 256(%0)\n"
23652+ "1: prefetch (%1)\n" /* This set is 28 bytes */
23653+ " prefetch 64(%1)\n"
23654+ " prefetch 128(%1)\n"
23655+ " prefetch 192(%1)\n"
23656+ " prefetch 256(%1)\n"
23657 "2: \n"
23658 ".section .fixup, \"ax\"\n"
23659- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23660+ "3: \n"
23661+
23662+#ifdef CONFIG_PAX_KERNEXEC
23663+ " movl %%cr0, %0\n"
23664+ " movl %0, %%eax\n"
23665+ " andl $0xFFFEFFFF, %%eax\n"
23666+ " movl %%eax, %%cr0\n"
23667+#endif
23668+
23669+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23670+
23671+#ifdef CONFIG_PAX_KERNEXEC
23672+ " movl %0, %%cr0\n"
23673+#endif
23674+
23675 " jmp 2b\n"
23676 ".previous\n"
23677 _ASM_EXTABLE(1b, 3b)
23678- : : "r" (from));
23679+ : "=&r" (cr0) : "r" (from) : "ax");
23680
23681 for ( ; i > 5; i--) {
23682 __asm__ __volatile__ (
23683- "1: prefetch 320(%0)\n"
23684- "2: movq (%0), %%mm0\n"
23685- " movq 8(%0), %%mm1\n"
23686- " movq 16(%0), %%mm2\n"
23687- " movq 24(%0), %%mm3\n"
23688- " movq %%mm0, (%1)\n"
23689- " movq %%mm1, 8(%1)\n"
23690- " movq %%mm2, 16(%1)\n"
23691- " movq %%mm3, 24(%1)\n"
23692- " movq 32(%0), %%mm0\n"
23693- " movq 40(%0), %%mm1\n"
23694- " movq 48(%0), %%mm2\n"
23695- " movq 56(%0), %%mm3\n"
23696- " movq %%mm0, 32(%1)\n"
23697- " movq %%mm1, 40(%1)\n"
23698- " movq %%mm2, 48(%1)\n"
23699- " movq %%mm3, 56(%1)\n"
23700+ "1: prefetch 320(%1)\n"
23701+ "2: movq (%1), %%mm0\n"
23702+ " movq 8(%1), %%mm1\n"
23703+ " movq 16(%1), %%mm2\n"
23704+ " movq 24(%1), %%mm3\n"
23705+ " movq %%mm0, (%2)\n"
23706+ " movq %%mm1, 8(%2)\n"
23707+ " movq %%mm2, 16(%2)\n"
23708+ " movq %%mm3, 24(%2)\n"
23709+ " movq 32(%1), %%mm0\n"
23710+ " movq 40(%1), %%mm1\n"
23711+ " movq 48(%1), %%mm2\n"
23712+ " movq 56(%1), %%mm3\n"
23713+ " movq %%mm0, 32(%2)\n"
23714+ " movq %%mm1, 40(%2)\n"
23715+ " movq %%mm2, 48(%2)\n"
23716+ " movq %%mm3, 56(%2)\n"
23717 ".section .fixup, \"ax\"\n"
23718- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23719+ "3:\n"
23720+
23721+#ifdef CONFIG_PAX_KERNEXEC
23722+ " movl %%cr0, %0\n"
23723+ " movl %0, %%eax\n"
23724+ " andl $0xFFFEFFFF, %%eax\n"
23725+ " movl %%eax, %%cr0\n"
23726+#endif
23727+
23728+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23729+
23730+#ifdef CONFIG_PAX_KERNEXEC
23731+ " movl %0, %%cr0\n"
23732+#endif
23733+
23734 " jmp 2b\n"
23735 ".previous\n"
23736 _ASM_EXTABLE(1b, 3b)
23737- : : "r" (from), "r" (to) : "memory");
23738+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23739
23740 from += 64;
23741 to += 64;
23742@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
23743 static void fast_copy_page(void *to, void *from)
23744 {
23745 int i;
23746+ unsigned long cr0;
23747
23748 kernel_fpu_begin();
23749
23750@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
23751 * but that is for later. -AV
23752 */
23753 __asm__ __volatile__(
23754- "1: prefetch (%0)\n"
23755- " prefetch 64(%0)\n"
23756- " prefetch 128(%0)\n"
23757- " prefetch 192(%0)\n"
23758- " prefetch 256(%0)\n"
23759+ "1: prefetch (%1)\n"
23760+ " prefetch 64(%1)\n"
23761+ " prefetch 128(%1)\n"
23762+ " prefetch 192(%1)\n"
23763+ " prefetch 256(%1)\n"
23764 "2: \n"
23765 ".section .fixup, \"ax\"\n"
23766- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23767+ "3: \n"
23768+
23769+#ifdef CONFIG_PAX_KERNEXEC
23770+ " movl %%cr0, %0\n"
23771+ " movl %0, %%eax\n"
23772+ " andl $0xFFFEFFFF, %%eax\n"
23773+ " movl %%eax, %%cr0\n"
23774+#endif
23775+
23776+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23777+
23778+#ifdef CONFIG_PAX_KERNEXEC
23779+ " movl %0, %%cr0\n"
23780+#endif
23781+
23782 " jmp 2b\n"
23783 ".previous\n"
23784- _ASM_EXTABLE(1b, 3b) : : "r" (from));
23785+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23786
23787 for (i = 0; i < (4096-320)/64; i++) {
23788 __asm__ __volatile__ (
23789- "1: prefetch 320(%0)\n"
23790- "2: movq (%0), %%mm0\n"
23791- " movntq %%mm0, (%1)\n"
23792- " movq 8(%0), %%mm1\n"
23793- " movntq %%mm1, 8(%1)\n"
23794- " movq 16(%0), %%mm2\n"
23795- " movntq %%mm2, 16(%1)\n"
23796- " movq 24(%0), %%mm3\n"
23797- " movntq %%mm3, 24(%1)\n"
23798- " movq 32(%0), %%mm4\n"
23799- " movntq %%mm4, 32(%1)\n"
23800- " movq 40(%0), %%mm5\n"
23801- " movntq %%mm5, 40(%1)\n"
23802- " movq 48(%0), %%mm6\n"
23803- " movntq %%mm6, 48(%1)\n"
23804- " movq 56(%0), %%mm7\n"
23805- " movntq %%mm7, 56(%1)\n"
23806+ "1: prefetch 320(%1)\n"
23807+ "2: movq (%1), %%mm0\n"
23808+ " movntq %%mm0, (%2)\n"
23809+ " movq 8(%1), %%mm1\n"
23810+ " movntq %%mm1, 8(%2)\n"
23811+ " movq 16(%1), %%mm2\n"
23812+ " movntq %%mm2, 16(%2)\n"
23813+ " movq 24(%1), %%mm3\n"
23814+ " movntq %%mm3, 24(%2)\n"
23815+ " movq 32(%1), %%mm4\n"
23816+ " movntq %%mm4, 32(%2)\n"
23817+ " movq 40(%1), %%mm5\n"
23818+ " movntq %%mm5, 40(%2)\n"
23819+ " movq 48(%1), %%mm6\n"
23820+ " movntq %%mm6, 48(%2)\n"
23821+ " movq 56(%1), %%mm7\n"
23822+ " movntq %%mm7, 56(%2)\n"
23823 ".section .fixup, \"ax\"\n"
23824- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23825+ "3:\n"
23826+
23827+#ifdef CONFIG_PAX_KERNEXEC
23828+ " movl %%cr0, %0\n"
23829+ " movl %0, %%eax\n"
23830+ " andl $0xFFFEFFFF, %%eax\n"
23831+ " movl %%eax, %%cr0\n"
23832+#endif
23833+
23834+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23835+
23836+#ifdef CONFIG_PAX_KERNEXEC
23837+ " movl %0, %%cr0\n"
23838+#endif
23839+
23840 " jmp 2b\n"
23841 ".previous\n"
23842- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
23843+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23844
23845 from += 64;
23846 to += 64;
23847@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
23848 static void fast_copy_page(void *to, void *from)
23849 {
23850 int i;
23851+ unsigned long cr0;
23852
23853 kernel_fpu_begin();
23854
23855 __asm__ __volatile__ (
23856- "1: prefetch (%0)\n"
23857- " prefetch 64(%0)\n"
23858- " prefetch 128(%0)\n"
23859- " prefetch 192(%0)\n"
23860- " prefetch 256(%0)\n"
23861+ "1: prefetch (%1)\n"
23862+ " prefetch 64(%1)\n"
23863+ " prefetch 128(%1)\n"
23864+ " prefetch 192(%1)\n"
23865+ " prefetch 256(%1)\n"
23866 "2: \n"
23867 ".section .fixup, \"ax\"\n"
23868- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23869+ "3: \n"
23870+
23871+#ifdef CONFIG_PAX_KERNEXEC
23872+ " movl %%cr0, %0\n"
23873+ " movl %0, %%eax\n"
23874+ " andl $0xFFFEFFFF, %%eax\n"
23875+ " movl %%eax, %%cr0\n"
23876+#endif
23877+
23878+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23879+
23880+#ifdef CONFIG_PAX_KERNEXEC
23881+ " movl %0, %%cr0\n"
23882+#endif
23883+
23884 " jmp 2b\n"
23885 ".previous\n"
23886- _ASM_EXTABLE(1b, 3b) : : "r" (from));
23887+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23888
23889 for (i = 0; i < 4096/64; i++) {
23890 __asm__ __volatile__ (
23891- "1: prefetch 320(%0)\n"
23892- "2: movq (%0), %%mm0\n"
23893- " movq 8(%0), %%mm1\n"
23894- " movq 16(%0), %%mm2\n"
23895- " movq 24(%0), %%mm3\n"
23896- " movq %%mm0, (%1)\n"
23897- " movq %%mm1, 8(%1)\n"
23898- " movq %%mm2, 16(%1)\n"
23899- " movq %%mm3, 24(%1)\n"
23900- " movq 32(%0), %%mm0\n"
23901- " movq 40(%0), %%mm1\n"
23902- " movq 48(%0), %%mm2\n"
23903- " movq 56(%0), %%mm3\n"
23904- " movq %%mm0, 32(%1)\n"
23905- " movq %%mm1, 40(%1)\n"
23906- " movq %%mm2, 48(%1)\n"
23907- " movq %%mm3, 56(%1)\n"
23908+ "1: prefetch 320(%1)\n"
23909+ "2: movq (%1), %%mm0\n"
23910+ " movq 8(%1), %%mm1\n"
23911+ " movq 16(%1), %%mm2\n"
23912+ " movq 24(%1), %%mm3\n"
23913+ " movq %%mm0, (%2)\n"
23914+ " movq %%mm1, 8(%2)\n"
23915+ " movq %%mm2, 16(%2)\n"
23916+ " movq %%mm3, 24(%2)\n"
23917+ " movq 32(%1), %%mm0\n"
23918+ " movq 40(%1), %%mm1\n"
23919+ " movq 48(%1), %%mm2\n"
23920+ " movq 56(%1), %%mm3\n"
23921+ " movq %%mm0, 32(%2)\n"
23922+ " movq %%mm1, 40(%2)\n"
23923+ " movq %%mm2, 48(%2)\n"
23924+ " movq %%mm3, 56(%2)\n"
23925 ".section .fixup, \"ax\"\n"
23926- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23927+ "3:\n"
23928+
23929+#ifdef CONFIG_PAX_KERNEXEC
23930+ " movl %%cr0, %0\n"
23931+ " movl %0, %%eax\n"
23932+ " andl $0xFFFEFFFF, %%eax\n"
23933+ " movl %%eax, %%cr0\n"
23934+#endif
23935+
23936+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23937+
23938+#ifdef CONFIG_PAX_KERNEXEC
23939+ " movl %0, %%cr0\n"
23940+#endif
23941+
23942 " jmp 2b\n"
23943 ".previous\n"
23944 _ASM_EXTABLE(1b, 3b)
23945- : : "r" (from), "r" (to) : "memory");
23946+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23947
23948 from += 64;
23949 to += 64;
23950diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
23951index f6d13ee..aca5f0b 100644
23952--- a/arch/x86/lib/msr-reg.S
23953+++ b/arch/x86/lib/msr-reg.S
23954@@ -3,6 +3,7 @@
23955 #include <asm/dwarf2.h>
23956 #include <asm/asm.h>
23957 #include <asm/msr.h>
23958+#include <asm/alternative-asm.h>
23959
23960 #ifdef CONFIG_X86_64
23961 /*
23962@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
23963 CFI_STARTPROC
23964 pushq_cfi %rbx
23965 pushq_cfi %rbp
23966- movq %rdi, %r10 /* Save pointer */
23967+ movq %rdi, %r9 /* Save pointer */
23968 xorl %r11d, %r11d /* Return value */
23969 movl (%rdi), %eax
23970 movl 4(%rdi), %ecx
23971@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
23972 movl 28(%rdi), %edi
23973 CFI_REMEMBER_STATE
23974 1: \op
23975-2: movl %eax, (%r10)
23976+2: movl %eax, (%r9)
23977 movl %r11d, %eax /* Return value */
23978- movl %ecx, 4(%r10)
23979- movl %edx, 8(%r10)
23980- movl %ebx, 12(%r10)
23981- movl %ebp, 20(%r10)
23982- movl %esi, 24(%r10)
23983- movl %edi, 28(%r10)
23984+ movl %ecx, 4(%r9)
23985+ movl %edx, 8(%r9)
23986+ movl %ebx, 12(%r9)
23987+ movl %ebp, 20(%r9)
23988+ movl %esi, 24(%r9)
23989+ movl %edi, 28(%r9)
23990 popq_cfi %rbp
23991 popq_cfi %rbx
23992+ pax_force_retaddr
23993 ret
23994 3:
23995 CFI_RESTORE_STATE
23996diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
23997index fc6ba17..04471c5 100644
23998--- a/arch/x86/lib/putuser.S
23999+++ b/arch/x86/lib/putuser.S
24000@@ -16,7 +16,9 @@
24001 #include <asm/errno.h>
24002 #include <asm/asm.h>
24003 #include <asm/smap.h>
24004-
24005+#include <asm/segment.h>
24006+#include <asm/pgtable.h>
24007+#include <asm/alternative-asm.h>
24008
24009 /*
24010 * __put_user_X
24011@@ -30,57 +32,125 @@
24012 * as they get called from within inline assembly.
24013 */
24014
24015-#define ENTER CFI_STARTPROC ; \
24016- GET_THREAD_INFO(%_ASM_BX)
24017-#define EXIT ASM_CLAC ; \
24018- ret ; \
24019+#define ENTER CFI_STARTPROC
24020+#define EXIT ASM_CLAC ; \
24021+ pax_force_retaddr ; \
24022+ ret ; \
24023 CFI_ENDPROC
24024
24025+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24026+#define _DEST %_ASM_CX,%_ASM_BX
24027+#else
24028+#define _DEST %_ASM_CX
24029+#endif
24030+
24031+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24032+#define __copyuser_seg gs;
24033+#else
24034+#define __copyuser_seg
24035+#endif
24036+
24037 .text
24038 ENTRY(__put_user_1)
24039 ENTER
24040+
24041+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24042+ GET_THREAD_INFO(%_ASM_BX)
24043 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
24044 jae bad_put_user
24045 ASM_STAC
24046-1: movb %al,(%_ASM_CX)
24047+
24048+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24049+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24050+ cmp %_ASM_BX,%_ASM_CX
24051+ jb 1234f
24052+ xor %ebx,%ebx
24053+1234:
24054+#endif
24055+
24056+#endif
24057+
24058+1: __copyuser_seg movb %al,(_DEST)
24059 xor %eax,%eax
24060 EXIT
24061 ENDPROC(__put_user_1)
24062
24063 ENTRY(__put_user_2)
24064 ENTER
24065+
24066+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24067+ GET_THREAD_INFO(%_ASM_BX)
24068 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24069 sub $1,%_ASM_BX
24070 cmp %_ASM_BX,%_ASM_CX
24071 jae bad_put_user
24072 ASM_STAC
24073-2: movw %ax,(%_ASM_CX)
24074+
24075+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24076+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24077+ cmp %_ASM_BX,%_ASM_CX
24078+ jb 1234f
24079+ xor %ebx,%ebx
24080+1234:
24081+#endif
24082+
24083+#endif
24084+
24085+2: __copyuser_seg movw %ax,(_DEST)
24086 xor %eax,%eax
24087 EXIT
24088 ENDPROC(__put_user_2)
24089
24090 ENTRY(__put_user_4)
24091 ENTER
24092+
24093+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24094+ GET_THREAD_INFO(%_ASM_BX)
24095 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24096 sub $3,%_ASM_BX
24097 cmp %_ASM_BX,%_ASM_CX
24098 jae bad_put_user
24099 ASM_STAC
24100-3: movl %eax,(%_ASM_CX)
24101+
24102+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24103+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24104+ cmp %_ASM_BX,%_ASM_CX
24105+ jb 1234f
24106+ xor %ebx,%ebx
24107+1234:
24108+#endif
24109+
24110+#endif
24111+
24112+3: __copyuser_seg movl %eax,(_DEST)
24113 xor %eax,%eax
24114 EXIT
24115 ENDPROC(__put_user_4)
24116
24117 ENTRY(__put_user_8)
24118 ENTER
24119+
24120+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24121+ GET_THREAD_INFO(%_ASM_BX)
24122 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24123 sub $7,%_ASM_BX
24124 cmp %_ASM_BX,%_ASM_CX
24125 jae bad_put_user
24126 ASM_STAC
24127-4: mov %_ASM_AX,(%_ASM_CX)
24128+
24129+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24130+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24131+ cmp %_ASM_BX,%_ASM_CX
24132+ jb 1234f
24133+ xor %ebx,%ebx
24134+1234:
24135+#endif
24136+
24137+#endif
24138+
24139+4: __copyuser_seg mov %_ASM_AX,(_DEST)
24140 #ifdef CONFIG_X86_32
24141-5: movl %edx,4(%_ASM_CX)
24142+5: __copyuser_seg movl %edx,4(_DEST)
24143 #endif
24144 xor %eax,%eax
24145 EXIT
24146diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
24147index 1cad221..de671ee 100644
24148--- a/arch/x86/lib/rwlock.S
24149+++ b/arch/x86/lib/rwlock.S
24150@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
24151 FRAME
24152 0: LOCK_PREFIX
24153 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
24154+
24155+#ifdef CONFIG_PAX_REFCOUNT
24156+ jno 1234f
24157+ LOCK_PREFIX
24158+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
24159+ int $4
24160+1234:
24161+ _ASM_EXTABLE(1234b, 1234b)
24162+#endif
24163+
24164 1: rep; nop
24165 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
24166 jne 1b
24167 LOCK_PREFIX
24168 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
24169+
24170+#ifdef CONFIG_PAX_REFCOUNT
24171+ jno 1234f
24172+ LOCK_PREFIX
24173+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
24174+ int $4
24175+1234:
24176+ _ASM_EXTABLE(1234b, 1234b)
24177+#endif
24178+
24179 jnz 0b
24180 ENDFRAME
24181+ pax_force_retaddr
24182 ret
24183 CFI_ENDPROC
24184 END(__write_lock_failed)
24185@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
24186 FRAME
24187 0: LOCK_PREFIX
24188 READ_LOCK_SIZE(inc) (%__lock_ptr)
24189+
24190+#ifdef CONFIG_PAX_REFCOUNT
24191+ jno 1234f
24192+ LOCK_PREFIX
24193+ READ_LOCK_SIZE(dec) (%__lock_ptr)
24194+ int $4
24195+1234:
24196+ _ASM_EXTABLE(1234b, 1234b)
24197+#endif
24198+
24199 1: rep; nop
24200 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
24201 js 1b
24202 LOCK_PREFIX
24203 READ_LOCK_SIZE(dec) (%__lock_ptr)
24204+
24205+#ifdef CONFIG_PAX_REFCOUNT
24206+ jno 1234f
24207+ LOCK_PREFIX
24208+ READ_LOCK_SIZE(inc) (%__lock_ptr)
24209+ int $4
24210+1234:
24211+ _ASM_EXTABLE(1234b, 1234b)
24212+#endif
24213+
24214 js 0b
24215 ENDFRAME
24216+ pax_force_retaddr
24217 ret
24218 CFI_ENDPROC
24219 END(__read_lock_failed)
24220diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
24221index 5dff5f0..cadebf4 100644
24222--- a/arch/x86/lib/rwsem.S
24223+++ b/arch/x86/lib/rwsem.S
24224@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
24225 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
24226 CFI_RESTORE __ASM_REG(dx)
24227 restore_common_regs
24228+ pax_force_retaddr
24229 ret
24230 CFI_ENDPROC
24231 ENDPROC(call_rwsem_down_read_failed)
24232@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
24233 movq %rax,%rdi
24234 call rwsem_down_write_failed
24235 restore_common_regs
24236+ pax_force_retaddr
24237 ret
24238 CFI_ENDPROC
24239 ENDPROC(call_rwsem_down_write_failed)
24240@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
24241 movq %rax,%rdi
24242 call rwsem_wake
24243 restore_common_regs
24244-1: ret
24245+1: pax_force_retaddr
24246+ ret
24247 CFI_ENDPROC
24248 ENDPROC(call_rwsem_wake)
24249
24250@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
24251 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
24252 CFI_RESTORE __ASM_REG(dx)
24253 restore_common_regs
24254+ pax_force_retaddr
24255 ret
24256 CFI_ENDPROC
24257 ENDPROC(call_rwsem_downgrade_wake)
24258diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
24259index a63efd6..ccecad8 100644
24260--- a/arch/x86/lib/thunk_64.S
24261+++ b/arch/x86/lib/thunk_64.S
24262@@ -8,6 +8,7 @@
24263 #include <linux/linkage.h>
24264 #include <asm/dwarf2.h>
24265 #include <asm/calling.h>
24266+#include <asm/alternative-asm.h>
24267
24268 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
24269 .macro THUNK name, func, put_ret_addr_in_rdi=0
24270@@ -41,5 +42,6 @@
24271 SAVE_ARGS
24272 restore:
24273 RESTORE_ARGS
24274+ pax_force_retaddr
24275 ret
24276 CFI_ENDPROC
24277diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
24278index 98f6d6b6..d27f045 100644
24279--- a/arch/x86/lib/usercopy_32.c
24280+++ b/arch/x86/lib/usercopy_32.c
24281@@ -42,11 +42,13 @@ do { \
24282 int __d0; \
24283 might_fault(); \
24284 __asm__ __volatile__( \
24285+ __COPYUSER_SET_ES \
24286 ASM_STAC "\n" \
24287 "0: rep; stosl\n" \
24288 " movl %2,%0\n" \
24289 "1: rep; stosb\n" \
24290 "2: " ASM_CLAC "\n" \
24291+ __COPYUSER_RESTORE_ES \
24292 ".section .fixup,\"ax\"\n" \
24293 "3: lea 0(%2,%0,4),%0\n" \
24294 " jmp 2b\n" \
24295@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
24296
24297 #ifdef CONFIG_X86_INTEL_USERCOPY
24298 static unsigned long
24299-__copy_user_intel(void __user *to, const void *from, unsigned long size)
24300+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
24301 {
24302 int d0, d1;
24303 __asm__ __volatile__(
24304@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24305 " .align 2,0x90\n"
24306 "3: movl 0(%4), %%eax\n"
24307 "4: movl 4(%4), %%edx\n"
24308- "5: movl %%eax, 0(%3)\n"
24309- "6: movl %%edx, 4(%3)\n"
24310+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
24311+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
24312 "7: movl 8(%4), %%eax\n"
24313 "8: movl 12(%4),%%edx\n"
24314- "9: movl %%eax, 8(%3)\n"
24315- "10: movl %%edx, 12(%3)\n"
24316+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
24317+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
24318 "11: movl 16(%4), %%eax\n"
24319 "12: movl 20(%4), %%edx\n"
24320- "13: movl %%eax, 16(%3)\n"
24321- "14: movl %%edx, 20(%3)\n"
24322+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
24323+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
24324 "15: movl 24(%4), %%eax\n"
24325 "16: movl 28(%4), %%edx\n"
24326- "17: movl %%eax, 24(%3)\n"
24327- "18: movl %%edx, 28(%3)\n"
24328+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
24329+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
24330 "19: movl 32(%4), %%eax\n"
24331 "20: movl 36(%4), %%edx\n"
24332- "21: movl %%eax, 32(%3)\n"
24333- "22: movl %%edx, 36(%3)\n"
24334+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
24335+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
24336 "23: movl 40(%4), %%eax\n"
24337 "24: movl 44(%4), %%edx\n"
24338- "25: movl %%eax, 40(%3)\n"
24339- "26: movl %%edx, 44(%3)\n"
24340+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
24341+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
24342 "27: movl 48(%4), %%eax\n"
24343 "28: movl 52(%4), %%edx\n"
24344- "29: movl %%eax, 48(%3)\n"
24345- "30: movl %%edx, 52(%3)\n"
24346+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
24347+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
24348 "31: movl 56(%4), %%eax\n"
24349 "32: movl 60(%4), %%edx\n"
24350- "33: movl %%eax, 56(%3)\n"
24351- "34: movl %%edx, 60(%3)\n"
24352+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
24353+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
24354 " addl $-64, %0\n"
24355 " addl $64, %4\n"
24356 " addl $64, %3\n"
24357@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24358 " shrl $2, %0\n"
24359 " andl $3, %%eax\n"
24360 " cld\n"
24361+ __COPYUSER_SET_ES
24362 "99: rep; movsl\n"
24363 "36: movl %%eax, %0\n"
24364 "37: rep; movsb\n"
24365 "100:\n"
24366+ __COPYUSER_RESTORE_ES
24367 ".section .fixup,\"ax\"\n"
24368 "101: lea 0(%%eax,%0,4),%0\n"
24369 " jmp 100b\n"
24370@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24371 }
24372
24373 static unsigned long
24374+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
24375+{
24376+ int d0, d1;
24377+ __asm__ __volatile__(
24378+ " .align 2,0x90\n"
24379+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
24380+ " cmpl $67, %0\n"
24381+ " jbe 3f\n"
24382+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
24383+ " .align 2,0x90\n"
24384+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
24385+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
24386+ "5: movl %%eax, 0(%3)\n"
24387+ "6: movl %%edx, 4(%3)\n"
24388+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
24389+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
24390+ "9: movl %%eax, 8(%3)\n"
24391+ "10: movl %%edx, 12(%3)\n"
24392+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
24393+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
24394+ "13: movl %%eax, 16(%3)\n"
24395+ "14: movl %%edx, 20(%3)\n"
24396+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
24397+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
24398+ "17: movl %%eax, 24(%3)\n"
24399+ "18: movl %%edx, 28(%3)\n"
24400+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
24401+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
24402+ "21: movl %%eax, 32(%3)\n"
24403+ "22: movl %%edx, 36(%3)\n"
24404+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
24405+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
24406+ "25: movl %%eax, 40(%3)\n"
24407+ "26: movl %%edx, 44(%3)\n"
24408+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
24409+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
24410+ "29: movl %%eax, 48(%3)\n"
24411+ "30: movl %%edx, 52(%3)\n"
24412+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
24413+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
24414+ "33: movl %%eax, 56(%3)\n"
24415+ "34: movl %%edx, 60(%3)\n"
24416+ " addl $-64, %0\n"
24417+ " addl $64, %4\n"
24418+ " addl $64, %3\n"
24419+ " cmpl $63, %0\n"
24420+ " ja 1b\n"
24421+ "35: movl %0, %%eax\n"
24422+ " shrl $2, %0\n"
24423+ " andl $3, %%eax\n"
24424+ " cld\n"
24425+ "99: rep; "__copyuser_seg" movsl\n"
24426+ "36: movl %%eax, %0\n"
24427+ "37: rep; "__copyuser_seg" movsb\n"
24428+ "100:\n"
24429+ ".section .fixup,\"ax\"\n"
24430+ "101: lea 0(%%eax,%0,4),%0\n"
24431+ " jmp 100b\n"
24432+ ".previous\n"
24433+ _ASM_EXTABLE(1b,100b)
24434+ _ASM_EXTABLE(2b,100b)
24435+ _ASM_EXTABLE(3b,100b)
24436+ _ASM_EXTABLE(4b,100b)
24437+ _ASM_EXTABLE(5b,100b)
24438+ _ASM_EXTABLE(6b,100b)
24439+ _ASM_EXTABLE(7b,100b)
24440+ _ASM_EXTABLE(8b,100b)
24441+ _ASM_EXTABLE(9b,100b)
24442+ _ASM_EXTABLE(10b,100b)
24443+ _ASM_EXTABLE(11b,100b)
24444+ _ASM_EXTABLE(12b,100b)
24445+ _ASM_EXTABLE(13b,100b)
24446+ _ASM_EXTABLE(14b,100b)
24447+ _ASM_EXTABLE(15b,100b)
24448+ _ASM_EXTABLE(16b,100b)
24449+ _ASM_EXTABLE(17b,100b)
24450+ _ASM_EXTABLE(18b,100b)
24451+ _ASM_EXTABLE(19b,100b)
24452+ _ASM_EXTABLE(20b,100b)
24453+ _ASM_EXTABLE(21b,100b)
24454+ _ASM_EXTABLE(22b,100b)
24455+ _ASM_EXTABLE(23b,100b)
24456+ _ASM_EXTABLE(24b,100b)
24457+ _ASM_EXTABLE(25b,100b)
24458+ _ASM_EXTABLE(26b,100b)
24459+ _ASM_EXTABLE(27b,100b)
24460+ _ASM_EXTABLE(28b,100b)
24461+ _ASM_EXTABLE(29b,100b)
24462+ _ASM_EXTABLE(30b,100b)
24463+ _ASM_EXTABLE(31b,100b)
24464+ _ASM_EXTABLE(32b,100b)
24465+ _ASM_EXTABLE(33b,100b)
24466+ _ASM_EXTABLE(34b,100b)
24467+ _ASM_EXTABLE(35b,100b)
24468+ _ASM_EXTABLE(36b,100b)
24469+ _ASM_EXTABLE(37b,100b)
24470+ _ASM_EXTABLE(99b,101b)
24471+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
24472+ : "1"(to), "2"(from), "0"(size)
24473+ : "eax", "edx", "memory");
24474+ return size;
24475+}
24476+
24477+static unsigned long __size_overflow(3)
24478 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24479 {
24480 int d0, d1;
24481 __asm__ __volatile__(
24482 " .align 2,0x90\n"
24483- "0: movl 32(%4), %%eax\n"
24484+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24485 " cmpl $67, %0\n"
24486 " jbe 2f\n"
24487- "1: movl 64(%4), %%eax\n"
24488+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24489 " .align 2,0x90\n"
24490- "2: movl 0(%4), %%eax\n"
24491- "21: movl 4(%4), %%edx\n"
24492+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24493+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24494 " movl %%eax, 0(%3)\n"
24495 " movl %%edx, 4(%3)\n"
24496- "3: movl 8(%4), %%eax\n"
24497- "31: movl 12(%4),%%edx\n"
24498+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24499+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24500 " movl %%eax, 8(%3)\n"
24501 " movl %%edx, 12(%3)\n"
24502- "4: movl 16(%4), %%eax\n"
24503- "41: movl 20(%4), %%edx\n"
24504+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24505+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24506 " movl %%eax, 16(%3)\n"
24507 " movl %%edx, 20(%3)\n"
24508- "10: movl 24(%4), %%eax\n"
24509- "51: movl 28(%4), %%edx\n"
24510+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24511+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24512 " movl %%eax, 24(%3)\n"
24513 " movl %%edx, 28(%3)\n"
24514- "11: movl 32(%4), %%eax\n"
24515- "61: movl 36(%4), %%edx\n"
24516+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24517+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24518 " movl %%eax, 32(%3)\n"
24519 " movl %%edx, 36(%3)\n"
24520- "12: movl 40(%4), %%eax\n"
24521- "71: movl 44(%4), %%edx\n"
24522+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24523+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24524 " movl %%eax, 40(%3)\n"
24525 " movl %%edx, 44(%3)\n"
24526- "13: movl 48(%4), %%eax\n"
24527- "81: movl 52(%4), %%edx\n"
24528+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24529+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24530 " movl %%eax, 48(%3)\n"
24531 " movl %%edx, 52(%3)\n"
24532- "14: movl 56(%4), %%eax\n"
24533- "91: movl 60(%4), %%edx\n"
24534+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24535+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24536 " movl %%eax, 56(%3)\n"
24537 " movl %%edx, 60(%3)\n"
24538 " addl $-64, %0\n"
24539@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24540 " shrl $2, %0\n"
24541 " andl $3, %%eax\n"
24542 " cld\n"
24543- "6: rep; movsl\n"
24544+ "6: rep; "__copyuser_seg" movsl\n"
24545 " movl %%eax,%0\n"
24546- "7: rep; movsb\n"
24547+ "7: rep; "__copyuser_seg" movsb\n"
24548 "8:\n"
24549 ".section .fixup,\"ax\"\n"
24550 "9: lea 0(%%eax,%0,4),%0\n"
24551@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24552 * hyoshiok@miraclelinux.com
24553 */
24554
24555-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24556+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
24557 const void __user *from, unsigned long size)
24558 {
24559 int d0, d1;
24560
24561 __asm__ __volatile__(
24562 " .align 2,0x90\n"
24563- "0: movl 32(%4), %%eax\n"
24564+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24565 " cmpl $67, %0\n"
24566 " jbe 2f\n"
24567- "1: movl 64(%4), %%eax\n"
24568+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24569 " .align 2,0x90\n"
24570- "2: movl 0(%4), %%eax\n"
24571- "21: movl 4(%4), %%edx\n"
24572+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24573+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24574 " movnti %%eax, 0(%3)\n"
24575 " movnti %%edx, 4(%3)\n"
24576- "3: movl 8(%4), %%eax\n"
24577- "31: movl 12(%4),%%edx\n"
24578+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24579+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24580 " movnti %%eax, 8(%3)\n"
24581 " movnti %%edx, 12(%3)\n"
24582- "4: movl 16(%4), %%eax\n"
24583- "41: movl 20(%4), %%edx\n"
24584+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24585+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24586 " movnti %%eax, 16(%3)\n"
24587 " movnti %%edx, 20(%3)\n"
24588- "10: movl 24(%4), %%eax\n"
24589- "51: movl 28(%4), %%edx\n"
24590+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24591+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24592 " movnti %%eax, 24(%3)\n"
24593 " movnti %%edx, 28(%3)\n"
24594- "11: movl 32(%4), %%eax\n"
24595- "61: movl 36(%4), %%edx\n"
24596+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24597+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24598 " movnti %%eax, 32(%3)\n"
24599 " movnti %%edx, 36(%3)\n"
24600- "12: movl 40(%4), %%eax\n"
24601- "71: movl 44(%4), %%edx\n"
24602+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24603+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24604 " movnti %%eax, 40(%3)\n"
24605 " movnti %%edx, 44(%3)\n"
24606- "13: movl 48(%4), %%eax\n"
24607- "81: movl 52(%4), %%edx\n"
24608+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24609+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24610 " movnti %%eax, 48(%3)\n"
24611 " movnti %%edx, 52(%3)\n"
24612- "14: movl 56(%4), %%eax\n"
24613- "91: movl 60(%4), %%edx\n"
24614+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24615+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24616 " movnti %%eax, 56(%3)\n"
24617 " movnti %%edx, 60(%3)\n"
24618 " addl $-64, %0\n"
24619@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24620 " shrl $2, %0\n"
24621 " andl $3, %%eax\n"
24622 " cld\n"
24623- "6: rep; movsl\n"
24624+ "6: rep; "__copyuser_seg" movsl\n"
24625 " movl %%eax,%0\n"
24626- "7: rep; movsb\n"
24627+ "7: rep; "__copyuser_seg" movsb\n"
24628 "8:\n"
24629 ".section .fixup,\"ax\"\n"
24630 "9: lea 0(%%eax,%0,4),%0\n"
24631@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24632 return size;
24633 }
24634
24635-static unsigned long __copy_user_intel_nocache(void *to,
24636+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
24637 const void __user *from, unsigned long size)
24638 {
24639 int d0, d1;
24640
24641 __asm__ __volatile__(
24642 " .align 2,0x90\n"
24643- "0: movl 32(%4), %%eax\n"
24644+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24645 " cmpl $67, %0\n"
24646 " jbe 2f\n"
24647- "1: movl 64(%4), %%eax\n"
24648+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24649 " .align 2,0x90\n"
24650- "2: movl 0(%4), %%eax\n"
24651- "21: movl 4(%4), %%edx\n"
24652+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24653+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24654 " movnti %%eax, 0(%3)\n"
24655 " movnti %%edx, 4(%3)\n"
24656- "3: movl 8(%4), %%eax\n"
24657- "31: movl 12(%4),%%edx\n"
24658+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24659+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24660 " movnti %%eax, 8(%3)\n"
24661 " movnti %%edx, 12(%3)\n"
24662- "4: movl 16(%4), %%eax\n"
24663- "41: movl 20(%4), %%edx\n"
24664+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24665+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24666 " movnti %%eax, 16(%3)\n"
24667 " movnti %%edx, 20(%3)\n"
24668- "10: movl 24(%4), %%eax\n"
24669- "51: movl 28(%4), %%edx\n"
24670+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24671+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24672 " movnti %%eax, 24(%3)\n"
24673 " movnti %%edx, 28(%3)\n"
24674- "11: movl 32(%4), %%eax\n"
24675- "61: movl 36(%4), %%edx\n"
24676+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24677+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24678 " movnti %%eax, 32(%3)\n"
24679 " movnti %%edx, 36(%3)\n"
24680- "12: movl 40(%4), %%eax\n"
24681- "71: movl 44(%4), %%edx\n"
24682+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24683+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24684 " movnti %%eax, 40(%3)\n"
24685 " movnti %%edx, 44(%3)\n"
24686- "13: movl 48(%4), %%eax\n"
24687- "81: movl 52(%4), %%edx\n"
24688+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24689+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24690 " movnti %%eax, 48(%3)\n"
24691 " movnti %%edx, 52(%3)\n"
24692- "14: movl 56(%4), %%eax\n"
24693- "91: movl 60(%4), %%edx\n"
24694+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24695+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24696 " movnti %%eax, 56(%3)\n"
24697 " movnti %%edx, 60(%3)\n"
24698 " addl $-64, %0\n"
24699@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
24700 " shrl $2, %0\n"
24701 " andl $3, %%eax\n"
24702 " cld\n"
24703- "6: rep; movsl\n"
24704+ "6: rep; "__copyuser_seg" movsl\n"
24705 " movl %%eax,%0\n"
24706- "7: rep; movsb\n"
24707+ "7: rep; "__copyuser_seg" movsb\n"
24708 "8:\n"
24709 ".section .fixup,\"ax\"\n"
24710 "9: lea 0(%%eax,%0,4),%0\n"
24711@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
24712 */
24713 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
24714 unsigned long size);
24715-unsigned long __copy_user_intel(void __user *to, const void *from,
24716+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
24717+ unsigned long size);
24718+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
24719 unsigned long size);
24720 unsigned long __copy_user_zeroing_intel_nocache(void *to,
24721 const void __user *from, unsigned long size);
24722 #endif /* CONFIG_X86_INTEL_USERCOPY */
24723
24724 /* Generic arbitrary sized copy. */
24725-#define __copy_user(to, from, size) \
24726+#define __copy_user(to, from, size, prefix, set, restore) \
24727 do { \
24728 int __d0, __d1, __d2; \
24729 __asm__ __volatile__( \
24730+ set \
24731 " cmp $7,%0\n" \
24732 " jbe 1f\n" \
24733 " movl %1,%0\n" \
24734 " negl %0\n" \
24735 " andl $7,%0\n" \
24736 " subl %0,%3\n" \
24737- "4: rep; movsb\n" \
24738+ "4: rep; "prefix"movsb\n" \
24739 " movl %3,%0\n" \
24740 " shrl $2,%0\n" \
24741 " andl $3,%3\n" \
24742 " .align 2,0x90\n" \
24743- "0: rep; movsl\n" \
24744+ "0: rep; "prefix"movsl\n" \
24745 " movl %3,%0\n" \
24746- "1: rep; movsb\n" \
24747+ "1: rep; "prefix"movsb\n" \
24748 "2:\n" \
24749+ restore \
24750 ".section .fixup,\"ax\"\n" \
24751 "5: addl %3,%0\n" \
24752 " jmp 2b\n" \
24753@@ -538,14 +650,14 @@ do { \
24754 " negl %0\n" \
24755 " andl $7,%0\n" \
24756 " subl %0,%3\n" \
24757- "4: rep; movsb\n" \
24758+ "4: rep; "__copyuser_seg"movsb\n" \
24759 " movl %3,%0\n" \
24760 " shrl $2,%0\n" \
24761 " andl $3,%3\n" \
24762 " .align 2,0x90\n" \
24763- "0: rep; movsl\n" \
24764+ "0: rep; "__copyuser_seg"movsl\n" \
24765 " movl %3,%0\n" \
24766- "1: rep; movsb\n" \
24767+ "1: rep; "__copyuser_seg"movsb\n" \
24768 "2:\n" \
24769 ".section .fixup,\"ax\"\n" \
24770 "5: addl %3,%0\n" \
24771@@ -629,9 +741,9 @@ survive:
24772 #endif
24773 stac();
24774 if (movsl_is_ok(to, from, n))
24775- __copy_user(to, from, n);
24776+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
24777 else
24778- n = __copy_user_intel(to, from, n);
24779+ n = __generic_copy_to_user_intel(to, from, n);
24780 clac();
24781 return n;
24782 }
24783@@ -655,10 +767,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
24784 {
24785 stac();
24786 if (movsl_is_ok(to, from, n))
24787- __copy_user(to, from, n);
24788+ __copy_user(to, from, n, __copyuser_seg, "", "");
24789 else
24790- n = __copy_user_intel((void __user *)to,
24791- (const void *)from, n);
24792+ n = __generic_copy_from_user_intel(to, from, n);
24793 clac();
24794 return n;
24795 }
24796@@ -689,66 +800,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
24797 if (n > 64 && cpu_has_xmm2)
24798 n = __copy_user_intel_nocache(to, from, n);
24799 else
24800- __copy_user(to, from, n);
24801+ __copy_user(to, from, n, __copyuser_seg, "", "");
24802 #else
24803- __copy_user(to, from, n);
24804+ __copy_user(to, from, n, __copyuser_seg, "", "");
24805 #endif
24806 clac();
24807 return n;
24808 }
24809 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
24810
24811-/**
24812- * copy_to_user: - Copy a block of data into user space.
24813- * @to: Destination address, in user space.
24814- * @from: Source address, in kernel space.
24815- * @n: Number of bytes to copy.
24816- *
24817- * Context: User context only. This function may sleep.
24818- *
24819- * Copy data from kernel space to user space.
24820- *
24821- * Returns number of bytes that could not be copied.
24822- * On success, this will be zero.
24823- */
24824-unsigned long
24825-copy_to_user(void __user *to, const void *from, unsigned long n)
24826-{
24827- if (access_ok(VERIFY_WRITE, to, n))
24828- n = __copy_to_user(to, from, n);
24829- return n;
24830-}
24831-EXPORT_SYMBOL(copy_to_user);
24832-
24833-/**
24834- * copy_from_user: - Copy a block of data from user space.
24835- * @to: Destination address, in kernel space.
24836- * @from: Source address, in user space.
24837- * @n: Number of bytes to copy.
24838- *
24839- * Context: User context only. This function may sleep.
24840- *
24841- * Copy data from user space to kernel space.
24842- *
24843- * Returns number of bytes that could not be copied.
24844- * On success, this will be zero.
24845- *
24846- * If some data could not be copied, this function will pad the copied
24847- * data to the requested size using zero bytes.
24848- */
24849-unsigned long
24850-_copy_from_user(void *to, const void __user *from, unsigned long n)
24851-{
24852- if (access_ok(VERIFY_READ, from, n))
24853- n = __copy_from_user(to, from, n);
24854- else
24855- memset(to, 0, n);
24856- return n;
24857-}
24858-EXPORT_SYMBOL(_copy_from_user);
24859-
24860 void copy_from_user_overflow(void)
24861 {
24862 WARN(1, "Buffer overflow detected!\n");
24863 }
24864 EXPORT_SYMBOL(copy_from_user_overflow);
24865+
24866+void copy_to_user_overflow(void)
24867+{
24868+ WARN(1, "Buffer overflow detected!\n");
24869+}
24870+EXPORT_SYMBOL(copy_to_user_overflow);
24871+
24872+#ifdef CONFIG_PAX_MEMORY_UDEREF
24873+void __set_fs(mm_segment_t x)
24874+{
24875+ switch (x.seg) {
24876+ case 0:
24877+ loadsegment(gs, 0);
24878+ break;
24879+ case TASK_SIZE_MAX:
24880+ loadsegment(gs, __USER_DS);
24881+ break;
24882+ case -1UL:
24883+ loadsegment(gs, __KERNEL_DS);
24884+ break;
24885+ default:
24886+ BUG();
24887+ }
24888+ return;
24889+}
24890+EXPORT_SYMBOL(__set_fs);
24891+
24892+void set_fs(mm_segment_t x)
24893+{
24894+ current_thread_info()->addr_limit = x;
24895+ __set_fs(x);
24896+}
24897+EXPORT_SYMBOL(set_fs);
24898+#endif
24899diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
24900index 05928aa..b33dea1 100644
24901--- a/arch/x86/lib/usercopy_64.c
24902+++ b/arch/x86/lib/usercopy_64.c
24903@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
24904 _ASM_EXTABLE(0b,3b)
24905 _ASM_EXTABLE(1b,2b)
24906 : [size8] "=&c"(size), [dst] "=&D" (__d0)
24907- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
24908+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
24909 [zero] "r" (0UL), [eight] "r" (8UL));
24910 clac();
24911 return size;
24912@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
24913 }
24914 EXPORT_SYMBOL(clear_user);
24915
24916-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
24917+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
24918 {
24919- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
24920- return copy_user_generic((__force void *)to, (__force void *)from, len);
24921- }
24922- return len;
24923+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
24924+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
24925+ return len;
24926 }
24927 EXPORT_SYMBOL(copy_in_user);
24928
24929@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
24930 * it is not necessary to optimize tail handling.
24931 */
24932 unsigned long
24933-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
24934+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
24935 {
24936 char c;
24937 unsigned zero_len;
24938@@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
24939 clac();
24940 return len;
24941 }
24942+
24943+void copy_from_user_overflow(void)
24944+{
24945+ WARN(1, "Buffer overflow detected!\n");
24946+}
24947+EXPORT_SYMBOL(copy_from_user_overflow);
24948+
24949+void copy_to_user_overflow(void)
24950+{
24951+ WARN(1, "Buffer overflow detected!\n");
24952+}
24953+EXPORT_SYMBOL(copy_to_user_overflow);
24954diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
24955index 903ec1e..c4166b2 100644
24956--- a/arch/x86/mm/extable.c
24957+++ b/arch/x86/mm/extable.c
24958@@ -6,12 +6,24 @@
24959 static inline unsigned long
24960 ex_insn_addr(const struct exception_table_entry *x)
24961 {
24962- return (unsigned long)&x->insn + x->insn;
24963+ unsigned long reloc = 0;
24964+
24965+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24966+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
24967+#endif
24968+
24969+ return (unsigned long)&x->insn + x->insn + reloc;
24970 }
24971 static inline unsigned long
24972 ex_fixup_addr(const struct exception_table_entry *x)
24973 {
24974- return (unsigned long)&x->fixup + x->fixup;
24975+ unsigned long reloc = 0;
24976+
24977+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24978+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
24979+#endif
24980+
24981+ return (unsigned long)&x->fixup + x->fixup + reloc;
24982 }
24983
24984 int fixup_exception(struct pt_regs *regs)
24985@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
24986 unsigned long new_ip;
24987
24988 #ifdef CONFIG_PNPBIOS
24989- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
24990+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
24991 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
24992 extern u32 pnp_bios_is_utter_crap;
24993 pnp_bios_is_utter_crap = 1;
24994@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
24995 i += 4;
24996 p->fixup -= i;
24997 i += 4;
24998+
24999+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25000+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
25001+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
25002+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
25003+#endif
25004+
25005 }
25006 }
25007
25008diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
25009index 8e13ecb..831f2d0 100644
25010--- a/arch/x86/mm/fault.c
25011+++ b/arch/x86/mm/fault.c
25012@@ -13,12 +13,19 @@
25013 #include <linux/perf_event.h> /* perf_sw_event */
25014 #include <linux/hugetlb.h> /* hstate_index_to_shift */
25015 #include <linux/prefetch.h> /* prefetchw */
25016+#include <linux/unistd.h>
25017+#include <linux/compiler.h>
25018
25019 #include <asm/traps.h> /* dotraplinkage, ... */
25020 #include <asm/pgalloc.h> /* pgd_*(), ... */
25021 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
25022 #include <asm/fixmap.h> /* VSYSCALL_START */
25023 #include <asm/rcu.h> /* exception_enter(), ... */
25024+#include <asm/tlbflush.h>
25025+
25026+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25027+#include <asm/stacktrace.h>
25028+#endif
25029
25030 /*
25031 * Page fault error code bits:
25032@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
25033 int ret = 0;
25034
25035 /* kprobe_running() needs smp_processor_id() */
25036- if (kprobes_built_in() && !user_mode_vm(regs)) {
25037+ if (kprobes_built_in() && !user_mode(regs)) {
25038 preempt_disable();
25039 if (kprobe_running() && kprobe_fault_handler(regs, 14))
25040 ret = 1;
25041@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
25042 return !instr_lo || (instr_lo>>1) == 1;
25043 case 0x00:
25044 /* Prefetch instruction is 0x0F0D or 0x0F18 */
25045- if (probe_kernel_address(instr, opcode))
25046+ if (user_mode(regs)) {
25047+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25048+ return 0;
25049+ } else if (probe_kernel_address(instr, opcode))
25050 return 0;
25051
25052 *prefetch = (instr_lo == 0xF) &&
25053@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
25054 while (instr < max_instr) {
25055 unsigned char opcode;
25056
25057- if (probe_kernel_address(instr, opcode))
25058+ if (user_mode(regs)) {
25059+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25060+ break;
25061+ } else if (probe_kernel_address(instr, opcode))
25062 break;
25063
25064 instr++;
25065@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
25066 force_sig_info(si_signo, &info, tsk);
25067 }
25068
25069+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25070+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
25071+#endif
25072+
25073+#ifdef CONFIG_PAX_EMUTRAMP
25074+static int pax_handle_fetch_fault(struct pt_regs *regs);
25075+#endif
25076+
25077+#ifdef CONFIG_PAX_PAGEEXEC
25078+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
25079+{
25080+ pgd_t *pgd;
25081+ pud_t *pud;
25082+ pmd_t *pmd;
25083+
25084+ pgd = pgd_offset(mm, address);
25085+ if (!pgd_present(*pgd))
25086+ return NULL;
25087+ pud = pud_offset(pgd, address);
25088+ if (!pud_present(*pud))
25089+ return NULL;
25090+ pmd = pmd_offset(pud, address);
25091+ if (!pmd_present(*pmd))
25092+ return NULL;
25093+ return pmd;
25094+}
25095+#endif
25096+
25097 DEFINE_SPINLOCK(pgd_lock);
25098 LIST_HEAD(pgd_list);
25099
25100@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
25101 for (address = VMALLOC_START & PMD_MASK;
25102 address >= TASK_SIZE && address < FIXADDR_TOP;
25103 address += PMD_SIZE) {
25104+
25105+#ifdef CONFIG_PAX_PER_CPU_PGD
25106+ unsigned long cpu;
25107+#else
25108 struct page *page;
25109+#endif
25110
25111 spin_lock(&pgd_lock);
25112+
25113+#ifdef CONFIG_PAX_PER_CPU_PGD
25114+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25115+ pgd_t *pgd = get_cpu_pgd(cpu);
25116+ pmd_t *ret;
25117+#else
25118 list_for_each_entry(page, &pgd_list, lru) {
25119+ pgd_t *pgd = page_address(page);
25120 spinlock_t *pgt_lock;
25121 pmd_t *ret;
25122
25123@@ -243,8 +296,13 @@ void vmalloc_sync_all(void)
25124 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25125
25126 spin_lock(pgt_lock);
25127- ret = vmalloc_sync_one(page_address(page), address);
25128+#endif
25129+
25130+ ret = vmalloc_sync_one(pgd, address);
25131+
25132+#ifndef CONFIG_PAX_PER_CPU_PGD
25133 spin_unlock(pgt_lock);
25134+#endif
25135
25136 if (!ret)
25137 break;
25138@@ -278,6 +336,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
25139 * an interrupt in the middle of a task switch..
25140 */
25141 pgd_paddr = read_cr3();
25142+
25143+#ifdef CONFIG_PAX_PER_CPU_PGD
25144+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
25145+#endif
25146+
25147 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
25148 if (!pmd_k)
25149 return -1;
25150@@ -373,7 +436,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
25151 * happen within a race in page table update. In the later
25152 * case just flush:
25153 */
25154+
25155+#ifdef CONFIG_PAX_PER_CPU_PGD
25156+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
25157+ pgd = pgd_offset_cpu(smp_processor_id(), address);
25158+#else
25159 pgd = pgd_offset(current->active_mm, address);
25160+#endif
25161+
25162 pgd_ref = pgd_offset_k(address);
25163 if (pgd_none(*pgd_ref))
25164 return -1;
25165@@ -541,7 +611,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
25166 static int is_errata100(struct pt_regs *regs, unsigned long address)
25167 {
25168 #ifdef CONFIG_X86_64
25169- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
25170+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
25171 return 1;
25172 #endif
25173 return 0;
25174@@ -568,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
25175 }
25176
25177 static const char nx_warning[] = KERN_CRIT
25178-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
25179+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
25180
25181 static void
25182 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25183@@ -577,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25184 if (!oops_may_print())
25185 return;
25186
25187- if (error_code & PF_INSTR) {
25188+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
25189 unsigned int level;
25190
25191 pte_t *pte = lookup_address(address, &level);
25192
25193 if (pte && pte_present(*pte) && !pte_exec(*pte))
25194- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
25195+ printk(nx_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
25196 }
25197
25198+#ifdef CONFIG_PAX_KERNEXEC
25199+ if (init_mm.start_code <= address && address < init_mm.end_code) {
25200+ if (current->signal->curr_ip)
25201+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25202+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
25203+ else
25204+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25205+ current->comm, task_pid_nr(current), current_uid(), current_euid());
25206+ }
25207+#endif
25208+
25209 printk(KERN_ALERT "BUG: unable to handle kernel ");
25210 if (address < PAGE_SIZE)
25211 printk(KERN_CONT "NULL pointer dereference");
25212@@ -749,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25213 }
25214 #endif
25215
25216+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25217+ if (pax_is_fetch_fault(regs, error_code, address)) {
25218+
25219+#ifdef CONFIG_PAX_EMUTRAMP
25220+ switch (pax_handle_fetch_fault(regs)) {
25221+ case 2:
25222+ return;
25223+ }
25224+#endif
25225+
25226+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25227+ do_group_exit(SIGKILL);
25228+ }
25229+#endif
25230+
25231 if (unlikely(show_unhandled_signals))
25232 show_signal_msg(regs, error_code, address, tsk);
25233
25234@@ -845,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
25235 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
25236 printk(KERN_ERR
25237 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
25238- tsk->comm, tsk->pid, address);
25239+ tsk->comm, task_pid_nr(tsk), address);
25240 code = BUS_MCEERR_AR;
25241 }
25242 #endif
25243@@ -901,6 +997,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
25244 return 1;
25245 }
25246
25247+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25248+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
25249+{
25250+ pte_t *pte;
25251+ pmd_t *pmd;
25252+ spinlock_t *ptl;
25253+ unsigned char pte_mask;
25254+
25255+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
25256+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
25257+ return 0;
25258+
25259+ /* PaX: it's our fault, let's handle it if we can */
25260+
25261+ /* PaX: take a look at read faults before acquiring any locks */
25262+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
25263+ /* instruction fetch attempt from a protected page in user mode */
25264+ up_read(&mm->mmap_sem);
25265+
25266+#ifdef CONFIG_PAX_EMUTRAMP
25267+ switch (pax_handle_fetch_fault(regs)) {
25268+ case 2:
25269+ return 1;
25270+ }
25271+#endif
25272+
25273+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25274+ do_group_exit(SIGKILL);
25275+ }
25276+
25277+ pmd = pax_get_pmd(mm, address);
25278+ if (unlikely(!pmd))
25279+ return 0;
25280+
25281+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
25282+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
25283+ pte_unmap_unlock(pte, ptl);
25284+ return 0;
25285+ }
25286+
25287+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
25288+ /* write attempt to a protected page in user mode */
25289+ pte_unmap_unlock(pte, ptl);
25290+ return 0;
25291+ }
25292+
25293+#ifdef CONFIG_SMP
25294+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
25295+#else
25296+ if (likely(address > get_limit(regs->cs)))
25297+#endif
25298+ {
25299+ set_pte(pte, pte_mkread(*pte));
25300+ __flush_tlb_one(address);
25301+ pte_unmap_unlock(pte, ptl);
25302+ up_read(&mm->mmap_sem);
25303+ return 1;
25304+ }
25305+
25306+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
25307+
25308+ /*
25309+ * PaX: fill DTLB with user rights and retry
25310+ */
25311+ __asm__ __volatile__ (
25312+ "orb %2,(%1)\n"
25313+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
25314+/*
25315+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
25316+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
25317+ * page fault when examined during a TLB load attempt. this is true not only
25318+ * for PTEs holding a non-present entry but also present entries that will
25319+ * raise a page fault (such as those set up by PaX, or the copy-on-write
25320+ * mechanism). in effect it means that we do *not* need to flush the TLBs
25321+ * for our target pages since their PTEs are simply not in the TLBs at all.
25322+
25323+ * the best thing in omitting it is that we gain around 15-20% speed in the
25324+ * fast path of the page fault handler and can get rid of tracing since we
25325+ * can no longer flush unintended entries.
25326+ */
25327+ "invlpg (%0)\n"
25328+#endif
25329+ __copyuser_seg"testb $0,(%0)\n"
25330+ "xorb %3,(%1)\n"
25331+ :
25332+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
25333+ : "memory", "cc");
25334+ pte_unmap_unlock(pte, ptl);
25335+ up_read(&mm->mmap_sem);
25336+ return 1;
25337+}
25338+#endif
25339+
25340 /*
25341 * Handle a spurious fault caused by a stale TLB entry.
25342 *
25343@@ -973,6 +1162,9 @@ int show_unhandled_signals = 1;
25344 static inline int
25345 access_error(unsigned long error_code, struct vm_area_struct *vma)
25346 {
25347+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
25348+ return 1;
25349+
25350 if (error_code & PF_WRITE) {
25351 /* write, present and write, not present: */
25352 if (unlikely(!(vma->vm_flags & VM_WRITE)))
25353@@ -1001,7 +1193,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
25354 if (error_code & PF_USER)
25355 return false;
25356
25357- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
25358+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
25359 return false;
25360
25361 return true;
25362@@ -1017,18 +1209,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
25363 {
25364 struct vm_area_struct *vma;
25365 struct task_struct *tsk;
25366- unsigned long address;
25367 struct mm_struct *mm;
25368 int fault;
25369 int write = error_code & PF_WRITE;
25370 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
25371 (write ? FAULT_FLAG_WRITE : 0);
25372
25373- tsk = current;
25374- mm = tsk->mm;
25375-
25376 /* Get the faulting address: */
25377- address = read_cr2();
25378+ unsigned long address = read_cr2();
25379+
25380+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25381+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
25382+ if (!search_exception_tables(regs->ip)) {
25383+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25384+ bad_area_nosemaphore(regs, error_code, address);
25385+ return;
25386+ }
25387+ if (address < PAX_USER_SHADOW_BASE) {
25388+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25389+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
25390+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
25391+ } else
25392+ address -= PAX_USER_SHADOW_BASE;
25393+ }
25394+#endif
25395+
25396+ tsk = current;
25397+ mm = tsk->mm;
25398
25399 /*
25400 * Detect and handle instructions that would cause a page fault for
25401@@ -1089,7 +1296,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
25402 * User-mode registers count as a user access even for any
25403 * potential system fault or CPU buglet:
25404 */
25405- if (user_mode_vm(regs)) {
25406+ if (user_mode(regs)) {
25407 local_irq_enable();
25408 error_code |= PF_USER;
25409 } else {
25410@@ -1151,6 +1358,11 @@ retry:
25411 might_sleep();
25412 }
25413
25414+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25415+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
25416+ return;
25417+#endif
25418+
25419 vma = find_vma(mm, address);
25420 if (unlikely(!vma)) {
25421 bad_area(regs, error_code, address);
25422@@ -1162,18 +1374,24 @@ retry:
25423 bad_area(regs, error_code, address);
25424 return;
25425 }
25426- if (error_code & PF_USER) {
25427- /*
25428- * Accessing the stack below %sp is always a bug.
25429- * The large cushion allows instructions like enter
25430- * and pusha to work. ("enter $65535, $31" pushes
25431- * 32 pointers and then decrements %sp by 65535.)
25432- */
25433- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
25434- bad_area(regs, error_code, address);
25435- return;
25436- }
25437+ /*
25438+ * Accessing the stack below %sp is always a bug.
25439+ * The large cushion allows instructions like enter
25440+ * and pusha to work. ("enter $65535, $31" pushes
25441+ * 32 pointers and then decrements %sp by 65535.)
25442+ */
25443+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
25444+ bad_area(regs, error_code, address);
25445+ return;
25446 }
25447+
25448+#ifdef CONFIG_PAX_SEGMEXEC
25449+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
25450+ bad_area(regs, error_code, address);
25451+ return;
25452+ }
25453+#endif
25454+
25455 if (unlikely(expand_stack(vma, address))) {
25456 bad_area(regs, error_code, address);
25457 return;
25458@@ -1237,3 +1455,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25459 __do_page_fault(regs, error_code);
25460 exception_exit(regs);
25461 }
25462+
25463+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25464+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
25465+{
25466+ struct mm_struct *mm = current->mm;
25467+ unsigned long ip = regs->ip;
25468+
25469+ if (v8086_mode(regs))
25470+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
25471+
25472+#ifdef CONFIG_PAX_PAGEEXEC
25473+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
25474+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
25475+ return true;
25476+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
25477+ return true;
25478+ return false;
25479+ }
25480+#endif
25481+
25482+#ifdef CONFIG_PAX_SEGMEXEC
25483+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
25484+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
25485+ return true;
25486+ return false;
25487+ }
25488+#endif
25489+
25490+ return false;
25491+}
25492+#endif
25493+
25494+#ifdef CONFIG_PAX_EMUTRAMP
25495+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
25496+{
25497+ int err;
25498+
25499+ do { /* PaX: libffi trampoline emulation */
25500+ unsigned char mov, jmp;
25501+ unsigned int addr1, addr2;
25502+
25503+#ifdef CONFIG_X86_64
25504+ if ((regs->ip + 9) >> 32)
25505+ break;
25506+#endif
25507+
25508+ err = get_user(mov, (unsigned char __user *)regs->ip);
25509+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25510+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25511+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25512+
25513+ if (err)
25514+ break;
25515+
25516+ if (mov == 0xB8 && jmp == 0xE9) {
25517+ regs->ax = addr1;
25518+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25519+ return 2;
25520+ }
25521+ } while (0);
25522+
25523+ do { /* PaX: gcc trampoline emulation #1 */
25524+ unsigned char mov1, mov2;
25525+ unsigned short jmp;
25526+ unsigned int addr1, addr2;
25527+
25528+#ifdef CONFIG_X86_64
25529+ if ((regs->ip + 11) >> 32)
25530+ break;
25531+#endif
25532+
25533+ err = get_user(mov1, (unsigned char __user *)regs->ip);
25534+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25535+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
25536+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25537+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
25538+
25539+ if (err)
25540+ break;
25541+
25542+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
25543+ regs->cx = addr1;
25544+ regs->ax = addr2;
25545+ regs->ip = addr2;
25546+ return 2;
25547+ }
25548+ } while (0);
25549+
25550+ do { /* PaX: gcc trampoline emulation #2 */
25551+ unsigned char mov, jmp;
25552+ unsigned int addr1, addr2;
25553+
25554+#ifdef CONFIG_X86_64
25555+ if ((regs->ip + 9) >> 32)
25556+ break;
25557+#endif
25558+
25559+ err = get_user(mov, (unsigned char __user *)regs->ip);
25560+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25561+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25562+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25563+
25564+ if (err)
25565+ break;
25566+
25567+ if (mov == 0xB9 && jmp == 0xE9) {
25568+ regs->cx = addr1;
25569+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25570+ return 2;
25571+ }
25572+ } while (0);
25573+
25574+ return 1; /* PaX in action */
25575+}
25576+
25577+#ifdef CONFIG_X86_64
25578+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
25579+{
25580+ int err;
25581+
25582+ do { /* PaX: libffi trampoline emulation */
25583+ unsigned short mov1, mov2, jmp1;
25584+ unsigned char stcclc, jmp2;
25585+ unsigned long addr1, addr2;
25586+
25587+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25588+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25589+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25590+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25591+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
25592+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
25593+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
25594+
25595+ if (err)
25596+ break;
25597+
25598+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25599+ regs->r11 = addr1;
25600+ regs->r10 = addr2;
25601+ if (stcclc == 0xF8)
25602+ regs->flags &= ~X86_EFLAGS_CF;
25603+ else
25604+ regs->flags |= X86_EFLAGS_CF;
25605+ regs->ip = addr1;
25606+ return 2;
25607+ }
25608+ } while (0);
25609+
25610+ do { /* PaX: gcc trampoline emulation #1 */
25611+ unsigned short mov1, mov2, jmp1;
25612+ unsigned char jmp2;
25613+ unsigned int addr1;
25614+ unsigned long addr2;
25615+
25616+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25617+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
25618+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
25619+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
25620+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
25621+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
25622+
25623+ if (err)
25624+ break;
25625+
25626+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25627+ regs->r11 = addr1;
25628+ regs->r10 = addr2;
25629+ regs->ip = addr1;
25630+ return 2;
25631+ }
25632+ } while (0);
25633+
25634+ do { /* PaX: gcc trampoline emulation #2 */
25635+ unsigned short mov1, mov2, jmp1;
25636+ unsigned char jmp2;
25637+ unsigned long addr1, addr2;
25638+
25639+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25640+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25641+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25642+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25643+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
25644+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
25645+
25646+ if (err)
25647+ break;
25648+
25649+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25650+ regs->r11 = addr1;
25651+ regs->r10 = addr2;
25652+ regs->ip = addr1;
25653+ return 2;
25654+ }
25655+ } while (0);
25656+
25657+ return 1; /* PaX in action */
25658+}
25659+#endif
25660+
25661+/*
25662+ * PaX: decide what to do with offenders (regs->ip = fault address)
25663+ *
25664+ * returns 1 when task should be killed
25665+ * 2 when gcc trampoline was detected
25666+ */
25667+static int pax_handle_fetch_fault(struct pt_regs *regs)
25668+{
25669+ if (v8086_mode(regs))
25670+ return 1;
25671+
25672+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
25673+ return 1;
25674+
25675+#ifdef CONFIG_X86_32
25676+ return pax_handle_fetch_fault_32(regs);
25677+#else
25678+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
25679+ return pax_handle_fetch_fault_32(regs);
25680+ else
25681+ return pax_handle_fetch_fault_64(regs);
25682+#endif
25683+}
25684+#endif
25685+
25686+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25687+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
25688+{
25689+ long i;
25690+
25691+ printk(KERN_ERR "PAX: bytes at PC: ");
25692+ for (i = 0; i < 20; i++) {
25693+ unsigned char c;
25694+ if (get_user(c, (unsigned char __force_user *)pc+i))
25695+ printk(KERN_CONT "?? ");
25696+ else
25697+ printk(KERN_CONT "%02x ", c);
25698+ }
25699+ printk("\n");
25700+
25701+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
25702+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
25703+ unsigned long c;
25704+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
25705+#ifdef CONFIG_X86_32
25706+ printk(KERN_CONT "???????? ");
25707+#else
25708+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
25709+ printk(KERN_CONT "???????? ???????? ");
25710+ else
25711+ printk(KERN_CONT "???????????????? ");
25712+#endif
25713+ } else {
25714+#ifdef CONFIG_X86_64
25715+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
25716+ printk(KERN_CONT "%08x ", (unsigned int)c);
25717+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
25718+ } else
25719+#endif
25720+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
25721+ }
25722+ }
25723+ printk("\n");
25724+}
25725+#endif
25726+
25727+/**
25728+ * probe_kernel_write(): safely attempt to write to a location
25729+ * @dst: address to write to
25730+ * @src: pointer to the data that shall be written
25731+ * @size: size of the data chunk
25732+ *
25733+ * Safely write to address @dst from the buffer at @src. If a kernel fault
25734+ * happens, handle that and return -EFAULT.
25735+ */
25736+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
25737+{
25738+ long ret;
25739+ mm_segment_t old_fs = get_fs();
25740+
25741+ set_fs(KERNEL_DS);
25742+ pagefault_disable();
25743+ pax_open_kernel();
25744+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
25745+ pax_close_kernel();
25746+ pagefault_enable();
25747+ set_fs(old_fs);
25748+
25749+ return ret ? -EFAULT : 0;
25750+}
25751diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
25752index dd74e46..7d26398 100644
25753--- a/arch/x86/mm/gup.c
25754+++ b/arch/x86/mm/gup.c
25755@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
25756 addr = start;
25757 len = (unsigned long) nr_pages << PAGE_SHIFT;
25758 end = start + len;
25759- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25760+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25761 (void __user *)start, len)))
25762 return 0;
25763
25764diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
25765index 6f31ee5..8ee4164 100644
25766--- a/arch/x86/mm/highmem_32.c
25767+++ b/arch/x86/mm/highmem_32.c
25768@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
25769 idx = type + KM_TYPE_NR*smp_processor_id();
25770 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25771 BUG_ON(!pte_none(*(kmap_pte-idx)));
25772+
25773+ pax_open_kernel();
25774 set_pte(kmap_pte-idx, mk_pte(page, prot));
25775+ pax_close_kernel();
25776+
25777 arch_flush_lazy_mmu_mode();
25778
25779 return (void *)vaddr;
25780diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
25781index 937bff5..a354c44 100644
25782--- a/arch/x86/mm/hugetlbpage.c
25783+++ b/arch/x86/mm/hugetlbpage.c
25784@@ -276,13 +276,21 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
25785 struct hstate *h = hstate_file(file);
25786 struct mm_struct *mm = current->mm;
25787 struct vm_area_struct *vma;
25788- unsigned long start_addr;
25789+ unsigned long start_addr, pax_task_size = TASK_SIZE;
25790+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
25791+
25792+#ifdef CONFIG_PAX_SEGMEXEC
25793+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25794+ pax_task_size = SEGMEXEC_TASK_SIZE;
25795+#endif
25796+
25797+ pax_task_size -= PAGE_SIZE;
25798
25799 if (len > mm->cached_hole_size) {
25800- start_addr = mm->free_area_cache;
25801+ start_addr = mm->free_area_cache;
25802 } else {
25803- start_addr = TASK_UNMAPPED_BASE;
25804- mm->cached_hole_size = 0;
25805+ start_addr = mm->mmap_base;
25806+ mm->cached_hole_size = 0;
25807 }
25808
25809 full_search:
25810@@ -290,26 +298,27 @@ full_search:
25811
25812 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
25813 /* At this point: (!vma || addr < vma->vm_end). */
25814- if (TASK_SIZE - len < addr) {
25815+ if (pax_task_size - len < addr) {
25816 /*
25817 * Start a new search - just in case we missed
25818 * some holes.
25819 */
25820- if (start_addr != TASK_UNMAPPED_BASE) {
25821- start_addr = TASK_UNMAPPED_BASE;
25822+ if (start_addr != mm->mmap_base) {
25823+ start_addr = mm->mmap_base;
25824 mm->cached_hole_size = 0;
25825 goto full_search;
25826 }
25827 return -ENOMEM;
25828 }
25829- if (!vma || addr + len <= vma->vm_start) {
25830- mm->free_area_cache = addr + len;
25831- return addr;
25832- }
25833+ if (check_heap_stack_gap(vma, addr, len, offset))
25834+ break;
25835 if (addr + mm->cached_hole_size < vma->vm_start)
25836 mm->cached_hole_size = vma->vm_start - addr;
25837 addr = ALIGN(vma->vm_end, huge_page_size(h));
25838 }
25839+
25840+ mm->free_area_cache = addr + len;
25841+ return addr;
25842 }
25843
25844 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25845@@ -320,9 +329,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25846 struct mm_struct *mm = current->mm;
25847 struct vm_area_struct *vma;
25848 unsigned long base = mm->mmap_base;
25849- unsigned long addr = addr0;
25850+ unsigned long addr;
25851 unsigned long largest_hole = mm->cached_hole_size;
25852- unsigned long start_addr;
25853+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
25854
25855 /* don't allow allocations above current base */
25856 if (mm->free_area_cache > base)
25857@@ -332,16 +341,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25858 largest_hole = 0;
25859 mm->free_area_cache = base;
25860 }
25861-try_again:
25862- start_addr = mm->free_area_cache;
25863
25864 /* make sure it can fit in the remaining address space */
25865 if (mm->free_area_cache < len)
25866 goto fail;
25867
25868 /* either no address requested or can't fit in requested address hole */
25869- addr = (mm->free_area_cache - len) & huge_page_mask(h);
25870+ addr = mm->free_area_cache - len;
25871 do {
25872+ addr &= huge_page_mask(h);
25873 /*
25874 * Lookup failure means no vma is above this address,
25875 * i.e. return with success:
25876@@ -350,10 +358,10 @@ try_again:
25877 if (!vma)
25878 return addr;
25879
25880- if (addr + len <= vma->vm_start) {
25881+ if (check_heap_stack_gap(vma, addr, len, offset)) {
25882 /* remember the address as a hint for next time */
25883- mm->cached_hole_size = largest_hole;
25884- return (mm->free_area_cache = addr);
25885+ mm->cached_hole_size = largest_hole;
25886+ return (mm->free_area_cache = addr);
25887 } else if (mm->free_area_cache == vma->vm_end) {
25888 /* pull free_area_cache down to the first hole */
25889 mm->free_area_cache = vma->vm_start;
25890@@ -362,29 +370,34 @@ try_again:
25891
25892 /* remember the largest hole we saw so far */
25893 if (addr + largest_hole < vma->vm_start)
25894- largest_hole = vma->vm_start - addr;
25895+ largest_hole = vma->vm_start - addr;
25896
25897 /* try just below the current vma->vm_start */
25898- addr = (vma->vm_start - len) & huge_page_mask(h);
25899- } while (len <= vma->vm_start);
25900+ addr = skip_heap_stack_gap(vma, len, offset);
25901+ } while (!IS_ERR_VALUE(addr));
25902
25903 fail:
25904 /*
25905- * if hint left us with no space for the requested
25906- * mapping then try again:
25907- */
25908- if (start_addr != base) {
25909- mm->free_area_cache = base;
25910- largest_hole = 0;
25911- goto try_again;
25912- }
25913- /*
25914 * A failed mmap() very likely causes application failure,
25915 * so fall back to the bottom-up function here. This scenario
25916 * can happen with large stack limits and large mmap()
25917 * allocations.
25918 */
25919- mm->free_area_cache = TASK_UNMAPPED_BASE;
25920+
25921+#ifdef CONFIG_PAX_SEGMEXEC
25922+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25923+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
25924+ else
25925+#endif
25926+
25927+ mm->mmap_base = TASK_UNMAPPED_BASE;
25928+
25929+#ifdef CONFIG_PAX_RANDMMAP
25930+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25931+ mm->mmap_base += mm->delta_mmap;
25932+#endif
25933+
25934+ mm->free_area_cache = mm->mmap_base;
25935 mm->cached_hole_size = ~0UL;
25936 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
25937 len, pgoff, flags);
25938@@ -392,6 +405,7 @@ fail:
25939 /*
25940 * Restore the topdown base:
25941 */
25942+ mm->mmap_base = base;
25943 mm->free_area_cache = base;
25944 mm->cached_hole_size = ~0UL;
25945
25946@@ -405,10 +419,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25947 struct hstate *h = hstate_file(file);
25948 struct mm_struct *mm = current->mm;
25949 struct vm_area_struct *vma;
25950+ unsigned long pax_task_size = TASK_SIZE;
25951+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
25952
25953 if (len & ~huge_page_mask(h))
25954 return -EINVAL;
25955- if (len > TASK_SIZE)
25956+
25957+#ifdef CONFIG_PAX_SEGMEXEC
25958+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25959+ pax_task_size = SEGMEXEC_TASK_SIZE;
25960+#endif
25961+
25962+ pax_task_size -= PAGE_SIZE;
25963+
25964+ if (len > pax_task_size)
25965 return -ENOMEM;
25966
25967 if (flags & MAP_FIXED) {
25968@@ -420,8 +444,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25969 if (addr) {
25970 addr = ALIGN(addr, huge_page_size(h));
25971 vma = find_vma(mm, addr);
25972- if (TASK_SIZE - len >= addr &&
25973- (!vma || addr + len <= vma->vm_start))
25974+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
25975 return addr;
25976 }
25977 if (mm->get_unmapped_area == arch_get_unmapped_area)
25978diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
25979index d7aea41..0fc945b 100644
25980--- a/arch/x86/mm/init.c
25981+++ b/arch/x86/mm/init.c
25982@@ -4,6 +4,7 @@
25983 #include <linux/swap.h>
25984 #include <linux/memblock.h>
25985 #include <linux/bootmem.h> /* for max_low_pfn */
25986+#include <linux/tboot.h>
25987
25988 #include <asm/cacheflush.h>
25989 #include <asm/e820.h>
25990@@ -16,6 +17,8 @@
25991 #include <asm/tlb.h>
25992 #include <asm/proto.h>
25993 #include <asm/dma.h> /* for MAX_DMA_PFN */
25994+#include <asm/desc.h>
25995+#include <asm/bios_ebda.h>
25996
25997 unsigned long __initdata pgt_buf_start;
25998 unsigned long __meminitdata pgt_buf_end;
25999@@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
26000 {
26001 int i;
26002 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
26003- unsigned long start = 0, good_end;
26004+ unsigned long start = 0x100000, good_end;
26005 phys_addr_t base;
26006
26007 for (i = 0; i < nr_range; i++) {
26008@@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
26009 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
26010 * mmio resources as well as potential bios/acpi data regions.
26011 */
26012+
26013+#ifdef CONFIG_GRKERNSEC_KMEM
26014+static unsigned int ebda_start __read_only;
26015+static unsigned int ebda_end __read_only;
26016+#endif
26017+
26018 int devmem_is_allowed(unsigned long pagenr)
26019 {
26020- if (pagenr < 256)
26021+#ifdef CONFIG_GRKERNSEC_KMEM
26022+ /* allow BDA */
26023+ if (!pagenr)
26024 return 1;
26025+ /* allow EBDA */
26026+ if (pagenr >= ebda_start && pagenr < ebda_end)
26027+ return 1;
26028+ /* if tboot is in use, allow access to its hardcoded serial log range */
26029+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
26030+ return 1;
26031+#else
26032+ if (!pagenr)
26033+ return 1;
26034+#ifdef CONFIG_VM86
26035+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
26036+ return 1;
26037+#endif
26038+#endif
26039+
26040+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26041+ return 1;
26042+#ifdef CONFIG_GRKERNSEC_KMEM
26043+ /* throw out everything else below 1MB */
26044+ if (pagenr <= 256)
26045+ return 0;
26046+#endif
26047 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
26048 return 0;
26049 if (!page_is_ram(pagenr))
26050@@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
26051 #endif
26052 }
26053
26054+#ifdef CONFIG_GRKERNSEC_KMEM
26055+static inline void gr_init_ebda(void)
26056+{
26057+ unsigned int ebda_addr;
26058+ unsigned int ebda_size = 0;
26059+
26060+ ebda_addr = get_bios_ebda();
26061+ if (ebda_addr) {
26062+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
26063+ ebda_size <<= 10;
26064+ }
26065+ if (ebda_addr && ebda_size) {
26066+ ebda_start = ebda_addr >> PAGE_SHIFT;
26067+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
26068+ } else {
26069+ ebda_start = 0x9f000 >> PAGE_SHIFT;
26070+ ebda_end = 0xa0000 >> PAGE_SHIFT;
26071+ }
26072+}
26073+#else
26074+static inline void gr_init_ebda(void) { }
26075+#endif
26076+
26077 void free_initmem(void)
26078 {
26079+#ifdef CONFIG_PAX_KERNEXEC
26080+#ifdef CONFIG_X86_32
26081+ /* PaX: limit KERNEL_CS to actual size */
26082+ unsigned long addr, limit;
26083+ struct desc_struct d;
26084+ int cpu;
26085+#else
26086+ pgd_t *pgd;
26087+ pud_t *pud;
26088+ pmd_t *pmd;
26089+ unsigned long addr, end;
26090+#endif
26091+#endif
26092+
26093+ gr_init_ebda();
26094+
26095+#ifdef CONFIG_PAX_KERNEXEC
26096+#ifdef CONFIG_X86_32
26097+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
26098+ limit = (limit - 1UL) >> PAGE_SHIFT;
26099+
26100+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
26101+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26102+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
26103+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
26104+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
26105+ }
26106+
26107+ /* PaX: make KERNEL_CS read-only */
26108+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
26109+ if (!paravirt_enabled())
26110+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
26111+/*
26112+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
26113+ pgd = pgd_offset_k(addr);
26114+ pud = pud_offset(pgd, addr);
26115+ pmd = pmd_offset(pud, addr);
26116+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26117+ }
26118+*/
26119+#ifdef CONFIG_X86_PAE
26120+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
26121+/*
26122+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
26123+ pgd = pgd_offset_k(addr);
26124+ pud = pud_offset(pgd, addr);
26125+ pmd = pmd_offset(pud, addr);
26126+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26127+ }
26128+*/
26129+#endif
26130+
26131+#ifdef CONFIG_MODULES
26132+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
26133+#endif
26134+
26135+#else
26136+ /* PaX: make kernel code/rodata read-only, rest non-executable */
26137+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
26138+ pgd = pgd_offset_k(addr);
26139+ pud = pud_offset(pgd, addr);
26140+ pmd = pmd_offset(pud, addr);
26141+ if (!pmd_present(*pmd))
26142+ continue;
26143+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
26144+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26145+ else
26146+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26147+ }
26148+
26149+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
26150+ end = addr + KERNEL_IMAGE_SIZE;
26151+ for (; addr < end; addr += PMD_SIZE) {
26152+ pgd = pgd_offset_k(addr);
26153+ pud = pud_offset(pgd, addr);
26154+ pmd = pmd_offset(pud, addr);
26155+ if (!pmd_present(*pmd))
26156+ continue;
26157+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
26158+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26159+ }
26160+#endif
26161+
26162+ flush_tlb_all();
26163+#endif
26164+
26165 free_init_pages("unused kernel memory",
26166 (unsigned long)(&__init_begin),
26167 (unsigned long)(&__init_end));
26168diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
26169index 11a5800..4bd9977 100644
26170--- a/arch/x86/mm/init_32.c
26171+++ b/arch/x86/mm/init_32.c
26172@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
26173 }
26174
26175 /*
26176- * Creates a middle page table and puts a pointer to it in the
26177- * given global directory entry. This only returns the gd entry
26178- * in non-PAE compilation mode, since the middle layer is folded.
26179- */
26180-static pmd_t * __init one_md_table_init(pgd_t *pgd)
26181-{
26182- pud_t *pud;
26183- pmd_t *pmd_table;
26184-
26185-#ifdef CONFIG_X86_PAE
26186- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
26187- if (after_bootmem)
26188- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
26189- else
26190- pmd_table = (pmd_t *)alloc_low_page();
26191- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
26192- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
26193- pud = pud_offset(pgd, 0);
26194- BUG_ON(pmd_table != pmd_offset(pud, 0));
26195-
26196- return pmd_table;
26197- }
26198-#endif
26199- pud = pud_offset(pgd, 0);
26200- pmd_table = pmd_offset(pud, 0);
26201-
26202- return pmd_table;
26203-}
26204-
26205-/*
26206 * Create a page table and place a pointer to it in a middle page
26207 * directory entry:
26208 */
26209@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
26210 page_table = (pte_t *)alloc_low_page();
26211
26212 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
26213+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26214+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
26215+#else
26216 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
26217+#endif
26218 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
26219 }
26220
26221 return pte_offset_kernel(pmd, 0);
26222 }
26223
26224+static pmd_t * __init one_md_table_init(pgd_t *pgd)
26225+{
26226+ pud_t *pud;
26227+ pmd_t *pmd_table;
26228+
26229+ pud = pud_offset(pgd, 0);
26230+ pmd_table = pmd_offset(pud, 0);
26231+
26232+ return pmd_table;
26233+}
26234+
26235 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
26236 {
26237 int pgd_idx = pgd_index(vaddr);
26238@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26239 int pgd_idx, pmd_idx;
26240 unsigned long vaddr;
26241 pgd_t *pgd;
26242+ pud_t *pud;
26243 pmd_t *pmd;
26244 pte_t *pte = NULL;
26245
26246@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26247 pgd = pgd_base + pgd_idx;
26248
26249 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
26250- pmd = one_md_table_init(pgd);
26251- pmd = pmd + pmd_index(vaddr);
26252+ pud = pud_offset(pgd, vaddr);
26253+ pmd = pmd_offset(pud, vaddr);
26254+
26255+#ifdef CONFIG_X86_PAE
26256+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26257+#endif
26258+
26259 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
26260 pmd++, pmd_idx++) {
26261 pte = page_table_kmap_check(one_page_table_init(pmd),
26262@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26263 }
26264 }
26265
26266-static inline int is_kernel_text(unsigned long addr)
26267+static inline int is_kernel_text(unsigned long start, unsigned long end)
26268 {
26269- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
26270- return 1;
26271- return 0;
26272+ if ((start > ktla_ktva((unsigned long)_etext) ||
26273+ end <= ktla_ktva((unsigned long)_stext)) &&
26274+ (start > ktla_ktva((unsigned long)_einittext) ||
26275+ end <= ktla_ktva((unsigned long)_sinittext)) &&
26276+
26277+#ifdef CONFIG_ACPI_SLEEP
26278+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
26279+#endif
26280+
26281+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
26282+ return 0;
26283+ return 1;
26284 }
26285
26286 /*
26287@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
26288 unsigned long last_map_addr = end;
26289 unsigned long start_pfn, end_pfn;
26290 pgd_t *pgd_base = swapper_pg_dir;
26291- int pgd_idx, pmd_idx, pte_ofs;
26292+ unsigned int pgd_idx, pmd_idx, pte_ofs;
26293 unsigned long pfn;
26294 pgd_t *pgd;
26295+ pud_t *pud;
26296 pmd_t *pmd;
26297 pte_t *pte;
26298 unsigned pages_2m, pages_4k;
26299@@ -280,8 +281,13 @@ repeat:
26300 pfn = start_pfn;
26301 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26302 pgd = pgd_base + pgd_idx;
26303- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
26304- pmd = one_md_table_init(pgd);
26305+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
26306+ pud = pud_offset(pgd, 0);
26307+ pmd = pmd_offset(pud, 0);
26308+
26309+#ifdef CONFIG_X86_PAE
26310+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26311+#endif
26312
26313 if (pfn >= end_pfn)
26314 continue;
26315@@ -293,14 +299,13 @@ repeat:
26316 #endif
26317 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
26318 pmd++, pmd_idx++) {
26319- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
26320+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
26321
26322 /*
26323 * Map with big pages if possible, otherwise
26324 * create normal page tables:
26325 */
26326 if (use_pse) {
26327- unsigned int addr2;
26328 pgprot_t prot = PAGE_KERNEL_LARGE;
26329 /*
26330 * first pass will use the same initial
26331@@ -310,11 +315,7 @@ repeat:
26332 __pgprot(PTE_IDENT_ATTR |
26333 _PAGE_PSE);
26334
26335- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
26336- PAGE_OFFSET + PAGE_SIZE-1;
26337-
26338- if (is_kernel_text(addr) ||
26339- is_kernel_text(addr2))
26340+ if (is_kernel_text(address, address + PMD_SIZE))
26341 prot = PAGE_KERNEL_LARGE_EXEC;
26342
26343 pages_2m++;
26344@@ -331,7 +332,7 @@ repeat:
26345 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26346 pte += pte_ofs;
26347 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
26348- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
26349+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
26350 pgprot_t prot = PAGE_KERNEL;
26351 /*
26352 * first pass will use the same initial
26353@@ -339,7 +340,7 @@ repeat:
26354 */
26355 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
26356
26357- if (is_kernel_text(addr))
26358+ if (is_kernel_text(address, address + PAGE_SIZE))
26359 prot = PAGE_KERNEL_EXEC;
26360
26361 pages_4k++;
26362@@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
26363
26364 pud = pud_offset(pgd, va);
26365 pmd = pmd_offset(pud, va);
26366- if (!pmd_present(*pmd))
26367+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
26368 break;
26369
26370 pte = pte_offset_kernel(pmd, va);
26371@@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
26372
26373 static void __init pagetable_init(void)
26374 {
26375- pgd_t *pgd_base = swapper_pg_dir;
26376-
26377- permanent_kmaps_init(pgd_base);
26378+ permanent_kmaps_init(swapper_pg_dir);
26379 }
26380
26381-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26382+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26383 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26384
26385 /* user-defined highmem size */
26386@@ -731,6 +730,12 @@ void __init mem_init(void)
26387
26388 pci_iommu_alloc();
26389
26390+#ifdef CONFIG_PAX_PER_CPU_PGD
26391+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26392+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26393+ KERNEL_PGD_PTRS);
26394+#endif
26395+
26396 #ifdef CONFIG_FLATMEM
26397 BUG_ON(!mem_map);
26398 #endif
26399@@ -757,7 +762,7 @@ void __init mem_init(void)
26400 reservedpages++;
26401
26402 codesize = (unsigned long) &_etext - (unsigned long) &_text;
26403- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
26404+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
26405 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
26406
26407 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
26408@@ -798,10 +803,10 @@ void __init mem_init(void)
26409 ((unsigned long)&__init_end -
26410 (unsigned long)&__init_begin) >> 10,
26411
26412- (unsigned long)&_etext, (unsigned long)&_edata,
26413- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
26414+ (unsigned long)&_sdata, (unsigned long)&_edata,
26415+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
26416
26417- (unsigned long)&_text, (unsigned long)&_etext,
26418+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
26419 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
26420
26421 /*
26422@@ -879,6 +884,7 @@ void set_kernel_text_rw(void)
26423 if (!kernel_set_to_readonly)
26424 return;
26425
26426+ start = ktla_ktva(start);
26427 pr_debug("Set kernel text: %lx - %lx for read write\n",
26428 start, start+size);
26429
26430@@ -893,6 +899,7 @@ void set_kernel_text_ro(void)
26431 if (!kernel_set_to_readonly)
26432 return;
26433
26434+ start = ktla_ktva(start);
26435 pr_debug("Set kernel text: %lx - %lx for read only\n",
26436 start, start+size);
26437
26438@@ -921,6 +928,7 @@ void mark_rodata_ro(void)
26439 unsigned long start = PFN_ALIGN(_text);
26440 unsigned long size = PFN_ALIGN(_etext) - start;
26441
26442+ start = ktla_ktva(start);
26443 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
26444 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
26445 size >> 10);
26446diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
26447index 3baff25..8b37564 100644
26448--- a/arch/x86/mm/init_64.c
26449+++ b/arch/x86/mm/init_64.c
26450@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
26451 * around without checking the pgd every time.
26452 */
26453
26454-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
26455+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
26456 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26457
26458 int force_personality32;
26459@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26460
26461 for (address = start; address <= end; address += PGDIR_SIZE) {
26462 const pgd_t *pgd_ref = pgd_offset_k(address);
26463+
26464+#ifdef CONFIG_PAX_PER_CPU_PGD
26465+ unsigned long cpu;
26466+#else
26467 struct page *page;
26468+#endif
26469
26470 if (pgd_none(*pgd_ref))
26471 continue;
26472
26473 spin_lock(&pgd_lock);
26474+
26475+#ifdef CONFIG_PAX_PER_CPU_PGD
26476+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26477+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
26478+#else
26479 list_for_each_entry(page, &pgd_list, lru) {
26480 pgd_t *pgd;
26481 spinlock_t *pgt_lock;
26482@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26483 /* the pgt_lock only for Xen */
26484 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
26485 spin_lock(pgt_lock);
26486+#endif
26487
26488 if (pgd_none(*pgd))
26489 set_pgd(pgd, *pgd_ref);
26490@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26491 BUG_ON(pgd_page_vaddr(*pgd)
26492 != pgd_page_vaddr(*pgd_ref));
26493
26494+#ifndef CONFIG_PAX_PER_CPU_PGD
26495 spin_unlock(pgt_lock);
26496+#endif
26497+
26498 }
26499 spin_unlock(&pgd_lock);
26500 }
26501@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
26502 {
26503 if (pgd_none(*pgd)) {
26504 pud_t *pud = (pud_t *)spp_getpage();
26505- pgd_populate(&init_mm, pgd, pud);
26506+ pgd_populate_kernel(&init_mm, pgd, pud);
26507 if (pud != pud_offset(pgd, 0))
26508 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
26509 pud, pud_offset(pgd, 0));
26510@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
26511 {
26512 if (pud_none(*pud)) {
26513 pmd_t *pmd = (pmd_t *) spp_getpage();
26514- pud_populate(&init_mm, pud, pmd);
26515+ pud_populate_kernel(&init_mm, pud, pmd);
26516 if (pmd != pmd_offset(pud, 0))
26517 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
26518 pmd, pmd_offset(pud, 0));
26519@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
26520 pmd = fill_pmd(pud, vaddr);
26521 pte = fill_pte(pmd, vaddr);
26522
26523+ pax_open_kernel();
26524 set_pte(pte, new_pte);
26525+ pax_close_kernel();
26526
26527 /*
26528 * It's enough to flush this one mapping.
26529@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
26530 pgd = pgd_offset_k((unsigned long)__va(phys));
26531 if (pgd_none(*pgd)) {
26532 pud = (pud_t *) spp_getpage();
26533- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
26534- _PAGE_USER));
26535+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
26536 }
26537 pud = pud_offset(pgd, (unsigned long)__va(phys));
26538 if (pud_none(*pud)) {
26539 pmd = (pmd_t *) spp_getpage();
26540- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
26541- _PAGE_USER));
26542+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
26543 }
26544 pmd = pmd_offset(pud, phys);
26545 BUG_ON(!pmd_none(*pmd));
26546@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
26547 if (pfn >= pgt_buf_top)
26548 panic("alloc_low_page: ran out of memory");
26549
26550- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
26551+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
26552 clear_page(adr);
26553 *phys = pfn * PAGE_SIZE;
26554 return adr;
26555@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
26556
26557 phys = __pa(virt);
26558 left = phys & (PAGE_SIZE - 1);
26559- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
26560+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
26561 adr = (void *)(((unsigned long)adr) | left);
26562
26563 return adr;
26564@@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
26565 unmap_low_page(pmd);
26566
26567 spin_lock(&init_mm.page_table_lock);
26568- pud_populate(&init_mm, pud, __va(pmd_phys));
26569+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
26570 spin_unlock(&init_mm.page_table_lock);
26571 }
26572 __flush_tlb_all();
26573@@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
26574 unmap_low_page(pud);
26575
26576 spin_lock(&init_mm.page_table_lock);
26577- pgd_populate(&init_mm, pgd, __va(pud_phys));
26578+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
26579 spin_unlock(&init_mm.page_table_lock);
26580 pgd_changed = true;
26581 }
26582@@ -691,6 +705,12 @@ void __init mem_init(void)
26583
26584 pci_iommu_alloc();
26585
26586+#ifdef CONFIG_PAX_PER_CPU_PGD
26587+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26588+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26589+ KERNEL_PGD_PTRS);
26590+#endif
26591+
26592 /* clear_bss() already clear the empty_zero_page */
26593
26594 reservedpages = 0;
26595@@ -851,8 +871,8 @@ int kern_addr_valid(unsigned long addr)
26596 static struct vm_area_struct gate_vma = {
26597 .vm_start = VSYSCALL_START,
26598 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
26599- .vm_page_prot = PAGE_READONLY_EXEC,
26600- .vm_flags = VM_READ | VM_EXEC
26601+ .vm_page_prot = PAGE_READONLY,
26602+ .vm_flags = VM_READ
26603 };
26604
26605 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26606@@ -886,7 +906,7 @@ int in_gate_area_no_mm(unsigned long addr)
26607
26608 const char *arch_vma_name(struct vm_area_struct *vma)
26609 {
26610- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26611+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26612 return "[vdso]";
26613 if (vma == &gate_vma)
26614 return "[vsyscall]";
26615diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
26616index 7b179b4..6bd1777 100644
26617--- a/arch/x86/mm/iomap_32.c
26618+++ b/arch/x86/mm/iomap_32.c
26619@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
26620 type = kmap_atomic_idx_push();
26621 idx = type + KM_TYPE_NR * smp_processor_id();
26622 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26623+
26624+ pax_open_kernel();
26625 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
26626+ pax_close_kernel();
26627+
26628 arch_flush_lazy_mmu_mode();
26629
26630 return (void *)vaddr;
26631diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
26632index 78fe3f1..2f9433c 100644
26633--- a/arch/x86/mm/ioremap.c
26634+++ b/arch/x86/mm/ioremap.c
26635@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
26636 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
26637 int is_ram = page_is_ram(pfn);
26638
26639- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
26640+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
26641 return NULL;
26642 WARN_ON_ONCE(is_ram);
26643 }
26644@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
26645 *
26646 * Caller must ensure there is only one unmapping for the same pointer.
26647 */
26648-void iounmap(volatile void __iomem *addr)
26649+void iounmap(const volatile void __iomem *addr)
26650 {
26651 struct vm_struct *p, *o;
26652
26653@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
26654
26655 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
26656 if (page_is_ram(start >> PAGE_SHIFT))
26657+#ifdef CONFIG_HIGHMEM
26658+ if ((start >> PAGE_SHIFT) < max_low_pfn)
26659+#endif
26660 return __va(phys);
26661
26662 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
26663@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
26664 early_param("early_ioremap_debug", early_ioremap_debug_setup);
26665
26666 static __initdata int after_paging_init;
26667-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
26668+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
26669
26670 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
26671 {
26672@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
26673 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
26674
26675 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
26676- memset(bm_pte, 0, sizeof(bm_pte));
26677- pmd_populate_kernel(&init_mm, pmd, bm_pte);
26678+ pmd_populate_user(&init_mm, pmd, bm_pte);
26679
26680 /*
26681 * The boot-ioremap range spans multiple pmds, for which
26682diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
26683index d87dd6d..bf3fa66 100644
26684--- a/arch/x86/mm/kmemcheck/kmemcheck.c
26685+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
26686@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
26687 * memory (e.g. tracked pages)? For now, we need this to avoid
26688 * invoking kmemcheck for PnP BIOS calls.
26689 */
26690- if (regs->flags & X86_VM_MASK)
26691+ if (v8086_mode(regs))
26692 return false;
26693- if (regs->cs != __KERNEL_CS)
26694+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
26695 return false;
26696
26697 pte = kmemcheck_pte_lookup(address);
26698diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
26699index 845df68..1d8d29f 100644
26700--- a/arch/x86/mm/mmap.c
26701+++ b/arch/x86/mm/mmap.c
26702@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
26703 * Leave an at least ~128 MB hole with possible stack randomization.
26704 */
26705 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
26706-#define MAX_GAP (TASK_SIZE/6*5)
26707+#define MAX_GAP (pax_task_size/6*5)
26708
26709 static int mmap_is_legacy(void)
26710 {
26711@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
26712 return rnd << PAGE_SHIFT;
26713 }
26714
26715-static unsigned long mmap_base(void)
26716+static unsigned long mmap_base(struct mm_struct *mm)
26717 {
26718 unsigned long gap = rlimit(RLIMIT_STACK);
26719+ unsigned long pax_task_size = TASK_SIZE;
26720+
26721+#ifdef CONFIG_PAX_SEGMEXEC
26722+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26723+ pax_task_size = SEGMEXEC_TASK_SIZE;
26724+#endif
26725
26726 if (gap < MIN_GAP)
26727 gap = MIN_GAP;
26728 else if (gap > MAX_GAP)
26729 gap = MAX_GAP;
26730
26731- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
26732+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
26733 }
26734
26735 /*
26736 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
26737 * does, but not when emulating X86_32
26738 */
26739-static unsigned long mmap_legacy_base(void)
26740+static unsigned long mmap_legacy_base(struct mm_struct *mm)
26741 {
26742- if (mmap_is_ia32())
26743+ if (mmap_is_ia32()) {
26744+
26745+#ifdef CONFIG_PAX_SEGMEXEC
26746+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26747+ return SEGMEXEC_TASK_UNMAPPED_BASE;
26748+ else
26749+#endif
26750+
26751 return TASK_UNMAPPED_BASE;
26752- else
26753+ } else
26754 return TASK_UNMAPPED_BASE + mmap_rnd();
26755 }
26756
26757@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
26758 void arch_pick_mmap_layout(struct mm_struct *mm)
26759 {
26760 if (mmap_is_legacy()) {
26761- mm->mmap_base = mmap_legacy_base();
26762+ mm->mmap_base = mmap_legacy_base(mm);
26763+
26764+#ifdef CONFIG_PAX_RANDMMAP
26765+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26766+ mm->mmap_base += mm->delta_mmap;
26767+#endif
26768+
26769 mm->get_unmapped_area = arch_get_unmapped_area;
26770 mm->unmap_area = arch_unmap_area;
26771 } else {
26772- mm->mmap_base = mmap_base();
26773+ mm->mmap_base = mmap_base(mm);
26774+
26775+#ifdef CONFIG_PAX_RANDMMAP
26776+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26777+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
26778+#endif
26779+
26780 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
26781 mm->unmap_area = arch_unmap_area_topdown;
26782 }
26783diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
26784index dc0b727..f612039 100644
26785--- a/arch/x86/mm/mmio-mod.c
26786+++ b/arch/x86/mm/mmio-mod.c
26787@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
26788 break;
26789 default:
26790 {
26791- unsigned char *ip = (unsigned char *)instptr;
26792+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
26793 my_trace->opcode = MMIO_UNKNOWN_OP;
26794 my_trace->width = 0;
26795 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
26796@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
26797 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26798 void __iomem *addr)
26799 {
26800- static atomic_t next_id;
26801+ static atomic_unchecked_t next_id;
26802 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
26803 /* These are page-unaligned. */
26804 struct mmiotrace_map map = {
26805@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26806 .private = trace
26807 },
26808 .phys = offset,
26809- .id = atomic_inc_return(&next_id)
26810+ .id = atomic_inc_return_unchecked(&next_id)
26811 };
26812 map.map_id = trace->id;
26813
26814@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
26815 ioremap_trace_core(offset, size, addr);
26816 }
26817
26818-static void iounmap_trace_core(volatile void __iomem *addr)
26819+static void iounmap_trace_core(const volatile void __iomem *addr)
26820 {
26821 struct mmiotrace_map map = {
26822 .phys = 0,
26823@@ -328,7 +328,7 @@ not_enabled:
26824 }
26825 }
26826
26827-void mmiotrace_iounmap(volatile void __iomem *addr)
26828+void mmiotrace_iounmap(const volatile void __iomem *addr)
26829 {
26830 might_sleep();
26831 if (is_enabled()) /* recheck and proper locking in *_core() */
26832diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
26833index b008656..773eac2 100644
26834--- a/arch/x86/mm/pageattr-test.c
26835+++ b/arch/x86/mm/pageattr-test.c
26836@@ -36,7 +36,7 @@ enum {
26837
26838 static int pte_testbit(pte_t pte)
26839 {
26840- return pte_flags(pte) & _PAGE_UNUSED1;
26841+ return pte_flags(pte) & _PAGE_CPA_TEST;
26842 }
26843
26844 struct split_state {
26845diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
26846index a718e0d..77419bc 100644
26847--- a/arch/x86/mm/pageattr.c
26848+++ b/arch/x86/mm/pageattr.c
26849@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26850 */
26851 #ifdef CONFIG_PCI_BIOS
26852 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
26853- pgprot_val(forbidden) |= _PAGE_NX;
26854+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26855 #endif
26856
26857 /*
26858@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26859 * Does not cover __inittext since that is gone later on. On
26860 * 64bit we do not enforce !NX on the low mapping
26861 */
26862- if (within(address, (unsigned long)_text, (unsigned long)_etext))
26863- pgprot_val(forbidden) |= _PAGE_NX;
26864+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
26865+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26866
26867+#ifdef CONFIG_DEBUG_RODATA
26868 /*
26869 * The .rodata section needs to be read-only. Using the pfn
26870 * catches all aliases.
26871@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26872 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
26873 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
26874 pgprot_val(forbidden) |= _PAGE_RW;
26875+#endif
26876
26877 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
26878 /*
26879@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26880 }
26881 #endif
26882
26883+#ifdef CONFIG_PAX_KERNEXEC
26884+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
26885+ pgprot_val(forbidden) |= _PAGE_RW;
26886+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26887+ }
26888+#endif
26889+
26890 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
26891
26892 return prot;
26893@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
26894 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
26895 {
26896 /* change init_mm */
26897+ pax_open_kernel();
26898 set_pte_atomic(kpte, pte);
26899+
26900 #ifdef CONFIG_X86_32
26901 if (!SHARED_KERNEL_PMD) {
26902+
26903+#ifdef CONFIG_PAX_PER_CPU_PGD
26904+ unsigned long cpu;
26905+#else
26906 struct page *page;
26907+#endif
26908
26909+#ifdef CONFIG_PAX_PER_CPU_PGD
26910+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26911+ pgd_t *pgd = get_cpu_pgd(cpu);
26912+#else
26913 list_for_each_entry(page, &pgd_list, lru) {
26914- pgd_t *pgd;
26915+ pgd_t *pgd = (pgd_t *)page_address(page);
26916+#endif
26917+
26918 pud_t *pud;
26919 pmd_t *pmd;
26920
26921- pgd = (pgd_t *)page_address(page) + pgd_index(address);
26922+ pgd += pgd_index(address);
26923 pud = pud_offset(pgd, address);
26924 pmd = pmd_offset(pud, address);
26925 set_pte_atomic((pte_t *)pmd, pte);
26926 }
26927 }
26928 #endif
26929+ pax_close_kernel();
26930 }
26931
26932 static int
26933diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
26934index 0eb572e..92f5c1e 100644
26935--- a/arch/x86/mm/pat.c
26936+++ b/arch/x86/mm/pat.c
26937@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
26938
26939 if (!entry) {
26940 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
26941- current->comm, current->pid, start, end - 1);
26942+ current->comm, task_pid_nr(current), start, end - 1);
26943 return -EINVAL;
26944 }
26945
26946@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26947
26948 while (cursor < to) {
26949 if (!devmem_is_allowed(pfn)) {
26950- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
26951- current->comm, from, to - 1);
26952+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
26953+ current->comm, from, to - 1, cursor);
26954 return 0;
26955 }
26956 cursor += PAGE_SIZE;
26957@@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
26958 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
26959 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
26960 "for [mem %#010Lx-%#010Lx]\n",
26961- current->comm, current->pid,
26962+ current->comm, task_pid_nr(current),
26963 cattr_name(flags),
26964 base, (unsigned long long)(base + size-1));
26965 return -EINVAL;
26966@@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
26967 flags = lookup_memtype(paddr);
26968 if (want_flags != flags) {
26969 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
26970- current->comm, current->pid,
26971+ current->comm, task_pid_nr(current),
26972 cattr_name(want_flags),
26973 (unsigned long long)paddr,
26974 (unsigned long long)(paddr + size - 1),
26975@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
26976 free_memtype(paddr, paddr + size);
26977 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
26978 " for [mem %#010Lx-%#010Lx], got %s\n",
26979- current->comm, current->pid,
26980+ current->comm, task_pid_nr(current),
26981 cattr_name(want_flags),
26982 (unsigned long long)paddr,
26983 (unsigned long long)(paddr + size - 1),
26984diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
26985index 9f0614d..92ae64a 100644
26986--- a/arch/x86/mm/pf_in.c
26987+++ b/arch/x86/mm/pf_in.c
26988@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
26989 int i;
26990 enum reason_type rv = OTHERS;
26991
26992- p = (unsigned char *)ins_addr;
26993+ p = (unsigned char *)ktla_ktva(ins_addr);
26994 p += skip_prefix(p, &prf);
26995 p += get_opcode(p, &opcode);
26996
26997@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
26998 struct prefix_bits prf;
26999 int i;
27000
27001- p = (unsigned char *)ins_addr;
27002+ p = (unsigned char *)ktla_ktva(ins_addr);
27003 p += skip_prefix(p, &prf);
27004 p += get_opcode(p, &opcode);
27005
27006@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
27007 struct prefix_bits prf;
27008 int i;
27009
27010- p = (unsigned char *)ins_addr;
27011+ p = (unsigned char *)ktla_ktva(ins_addr);
27012 p += skip_prefix(p, &prf);
27013 p += get_opcode(p, &opcode);
27014
27015@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
27016 struct prefix_bits prf;
27017 int i;
27018
27019- p = (unsigned char *)ins_addr;
27020+ p = (unsigned char *)ktla_ktva(ins_addr);
27021 p += skip_prefix(p, &prf);
27022 p += get_opcode(p, &opcode);
27023 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
27024@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
27025 struct prefix_bits prf;
27026 int i;
27027
27028- p = (unsigned char *)ins_addr;
27029+ p = (unsigned char *)ktla_ktva(ins_addr);
27030 p += skip_prefix(p, &prf);
27031 p += get_opcode(p, &opcode);
27032 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
27033diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
27034index 8573b83..4f3ed7e 100644
27035--- a/arch/x86/mm/pgtable.c
27036+++ b/arch/x86/mm/pgtable.c
27037@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
27038 list_del(&page->lru);
27039 }
27040
27041-#define UNSHARED_PTRS_PER_PGD \
27042- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27043+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27044+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
27045
27046+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
27047+{
27048+ unsigned int count = USER_PGD_PTRS;
27049
27050+ while (count--)
27051+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
27052+}
27053+#endif
27054+
27055+#ifdef CONFIG_PAX_PER_CPU_PGD
27056+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
27057+{
27058+ unsigned int count = USER_PGD_PTRS;
27059+
27060+ while (count--) {
27061+ pgd_t pgd;
27062+
27063+#ifdef CONFIG_X86_64
27064+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
27065+#else
27066+ pgd = *src++;
27067+#endif
27068+
27069+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27070+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
27071+#endif
27072+
27073+ *dst++ = pgd;
27074+ }
27075+
27076+}
27077+#endif
27078+
27079+#ifdef CONFIG_X86_64
27080+#define pxd_t pud_t
27081+#define pyd_t pgd_t
27082+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
27083+#define pxd_free(mm, pud) pud_free((mm), (pud))
27084+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
27085+#define pyd_offset(mm, address) pgd_offset((mm), (address))
27086+#define PYD_SIZE PGDIR_SIZE
27087+#else
27088+#define pxd_t pmd_t
27089+#define pyd_t pud_t
27090+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
27091+#define pxd_free(mm, pud) pmd_free((mm), (pud))
27092+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
27093+#define pyd_offset(mm, address) pud_offset((mm), (address))
27094+#define PYD_SIZE PUD_SIZE
27095+#endif
27096+
27097+#ifdef CONFIG_PAX_PER_CPU_PGD
27098+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
27099+static inline void pgd_dtor(pgd_t *pgd) {}
27100+#else
27101 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
27102 {
27103 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
27104@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
27105 pgd_list_del(pgd);
27106 spin_unlock(&pgd_lock);
27107 }
27108+#endif
27109
27110 /*
27111 * List of all pgd's needed for non-PAE so it can invalidate entries
27112@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
27113 * -- wli
27114 */
27115
27116-#ifdef CONFIG_X86_PAE
27117+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27118 /*
27119 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
27120 * updating the top-level pagetable entries to guarantee the
27121@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
27122 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
27123 * and initialize the kernel pmds here.
27124 */
27125-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
27126+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27127
27128 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27129 {
27130@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27131 */
27132 flush_tlb_mm(mm);
27133 }
27134+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
27135+#define PREALLOCATED_PXDS USER_PGD_PTRS
27136 #else /* !CONFIG_X86_PAE */
27137
27138 /* No need to prepopulate any pagetable entries in non-PAE modes. */
27139-#define PREALLOCATED_PMDS 0
27140+#define PREALLOCATED_PXDS 0
27141
27142 #endif /* CONFIG_X86_PAE */
27143
27144-static void free_pmds(pmd_t *pmds[])
27145+static void free_pxds(pxd_t *pxds[])
27146 {
27147 int i;
27148
27149- for(i = 0; i < PREALLOCATED_PMDS; i++)
27150- if (pmds[i])
27151- free_page((unsigned long)pmds[i]);
27152+ for(i = 0; i < PREALLOCATED_PXDS; i++)
27153+ if (pxds[i])
27154+ free_page((unsigned long)pxds[i]);
27155 }
27156
27157-static int preallocate_pmds(pmd_t *pmds[])
27158+static int preallocate_pxds(pxd_t *pxds[])
27159 {
27160 int i;
27161 bool failed = false;
27162
27163- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27164- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
27165- if (pmd == NULL)
27166+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27167+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
27168+ if (pxd == NULL)
27169 failed = true;
27170- pmds[i] = pmd;
27171+ pxds[i] = pxd;
27172 }
27173
27174 if (failed) {
27175- free_pmds(pmds);
27176+ free_pxds(pxds);
27177 return -ENOMEM;
27178 }
27179
27180@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
27181 * preallocate which never got a corresponding vma will need to be
27182 * freed manually.
27183 */
27184-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
27185+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
27186 {
27187 int i;
27188
27189- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27190+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27191 pgd_t pgd = pgdp[i];
27192
27193 if (pgd_val(pgd) != 0) {
27194- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
27195+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
27196
27197- pgdp[i] = native_make_pgd(0);
27198+ set_pgd(pgdp + i, native_make_pgd(0));
27199
27200- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
27201- pmd_free(mm, pmd);
27202+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
27203+ pxd_free(mm, pxd);
27204 }
27205 }
27206 }
27207
27208-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
27209+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
27210 {
27211- pud_t *pud;
27212+ pyd_t *pyd;
27213 unsigned long addr;
27214 int i;
27215
27216- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
27217+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
27218 return;
27219
27220- pud = pud_offset(pgd, 0);
27221+#ifdef CONFIG_X86_64
27222+ pyd = pyd_offset(mm, 0L);
27223+#else
27224+ pyd = pyd_offset(pgd, 0L);
27225+#endif
27226
27227- for (addr = i = 0; i < PREALLOCATED_PMDS;
27228- i++, pud++, addr += PUD_SIZE) {
27229- pmd_t *pmd = pmds[i];
27230+ for (addr = i = 0; i < PREALLOCATED_PXDS;
27231+ i++, pyd++, addr += PYD_SIZE) {
27232+ pxd_t *pxd = pxds[i];
27233
27234 if (i >= KERNEL_PGD_BOUNDARY)
27235- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27236- sizeof(pmd_t) * PTRS_PER_PMD);
27237+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27238+ sizeof(pxd_t) * PTRS_PER_PMD);
27239
27240- pud_populate(mm, pud, pmd);
27241+ pyd_populate(mm, pyd, pxd);
27242 }
27243 }
27244
27245 pgd_t *pgd_alloc(struct mm_struct *mm)
27246 {
27247 pgd_t *pgd;
27248- pmd_t *pmds[PREALLOCATED_PMDS];
27249+ pxd_t *pxds[PREALLOCATED_PXDS];
27250
27251 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
27252
27253@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27254
27255 mm->pgd = pgd;
27256
27257- if (preallocate_pmds(pmds) != 0)
27258+ if (preallocate_pxds(pxds) != 0)
27259 goto out_free_pgd;
27260
27261 if (paravirt_pgd_alloc(mm) != 0)
27262- goto out_free_pmds;
27263+ goto out_free_pxds;
27264
27265 /*
27266 * Make sure that pre-populating the pmds is atomic with
27267@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27268 spin_lock(&pgd_lock);
27269
27270 pgd_ctor(mm, pgd);
27271- pgd_prepopulate_pmd(mm, pgd, pmds);
27272+ pgd_prepopulate_pxd(mm, pgd, pxds);
27273
27274 spin_unlock(&pgd_lock);
27275
27276 return pgd;
27277
27278-out_free_pmds:
27279- free_pmds(pmds);
27280+out_free_pxds:
27281+ free_pxds(pxds);
27282 out_free_pgd:
27283 free_page((unsigned long)pgd);
27284 out:
27285@@ -295,7 +356,7 @@ out:
27286
27287 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
27288 {
27289- pgd_mop_up_pmds(mm, pgd);
27290+ pgd_mop_up_pxds(mm, pgd);
27291 pgd_dtor(pgd);
27292 paravirt_pgd_free(mm, pgd);
27293 free_page((unsigned long)pgd);
27294diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
27295index a69bcb8..19068ab 100644
27296--- a/arch/x86/mm/pgtable_32.c
27297+++ b/arch/x86/mm/pgtable_32.c
27298@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
27299 return;
27300 }
27301 pte = pte_offset_kernel(pmd, vaddr);
27302+
27303+ pax_open_kernel();
27304 if (pte_val(pteval))
27305 set_pte_at(&init_mm, vaddr, pte, pteval);
27306 else
27307 pte_clear(&init_mm, vaddr, pte);
27308+ pax_close_kernel();
27309
27310 /*
27311 * It's enough to flush this one mapping.
27312diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
27313index 410531d..0f16030 100644
27314--- a/arch/x86/mm/setup_nx.c
27315+++ b/arch/x86/mm/setup_nx.c
27316@@ -5,8 +5,10 @@
27317 #include <asm/pgtable.h>
27318 #include <asm/proto.h>
27319
27320+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27321 static int disable_nx __cpuinitdata;
27322
27323+#ifndef CONFIG_PAX_PAGEEXEC
27324 /*
27325 * noexec = on|off
27326 *
27327@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
27328 return 0;
27329 }
27330 early_param("noexec", noexec_setup);
27331+#endif
27332+
27333+#endif
27334
27335 void __cpuinit x86_configure_nx(void)
27336 {
27337+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27338 if (cpu_has_nx && !disable_nx)
27339 __supported_pte_mask |= _PAGE_NX;
27340 else
27341+#endif
27342 __supported_pte_mask &= ~_PAGE_NX;
27343 }
27344
27345diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
27346index 60f926c..a710970 100644
27347--- a/arch/x86/mm/tlb.c
27348+++ b/arch/x86/mm/tlb.c
27349@@ -48,7 +48,11 @@ void leave_mm(int cpu)
27350 BUG();
27351 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
27352 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
27353+
27354+#ifndef CONFIG_PAX_PER_CPU_PGD
27355 load_cr3(swapper_pg_dir);
27356+#endif
27357+
27358 }
27359 }
27360 EXPORT_SYMBOL_GPL(leave_mm);
27361diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
27362index 877b9a1..a8ecf42 100644
27363--- a/arch/x86/net/bpf_jit.S
27364+++ b/arch/x86/net/bpf_jit.S
27365@@ -9,6 +9,7 @@
27366 */
27367 #include <linux/linkage.h>
27368 #include <asm/dwarf2.h>
27369+#include <asm/alternative-asm.h>
27370
27371 /*
27372 * Calling convention :
27373@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
27374 jle bpf_slow_path_word
27375 mov (SKBDATA,%rsi),%eax
27376 bswap %eax /* ntohl() */
27377+ pax_force_retaddr
27378 ret
27379
27380 sk_load_half:
27381@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
27382 jle bpf_slow_path_half
27383 movzwl (SKBDATA,%rsi),%eax
27384 rol $8,%ax # ntohs()
27385+ pax_force_retaddr
27386 ret
27387
27388 sk_load_byte:
27389@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
27390 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
27391 jle bpf_slow_path_byte
27392 movzbl (SKBDATA,%rsi),%eax
27393+ pax_force_retaddr
27394 ret
27395
27396 /**
27397@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
27398 movzbl (SKBDATA,%rsi),%ebx
27399 and $15,%bl
27400 shl $2,%bl
27401+ pax_force_retaddr
27402 ret
27403
27404 /* rsi contains offset and can be scratched */
27405@@ -109,6 +114,7 @@ bpf_slow_path_word:
27406 js bpf_error
27407 mov -12(%rbp),%eax
27408 bswap %eax
27409+ pax_force_retaddr
27410 ret
27411
27412 bpf_slow_path_half:
27413@@ -117,12 +123,14 @@ bpf_slow_path_half:
27414 mov -12(%rbp),%ax
27415 rol $8,%ax
27416 movzwl %ax,%eax
27417+ pax_force_retaddr
27418 ret
27419
27420 bpf_slow_path_byte:
27421 bpf_slow_path_common(1)
27422 js bpf_error
27423 movzbl -12(%rbp),%eax
27424+ pax_force_retaddr
27425 ret
27426
27427 bpf_slow_path_byte_msh:
27428@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
27429 and $15,%al
27430 shl $2,%al
27431 xchg %eax,%ebx
27432+ pax_force_retaddr
27433 ret
27434
27435 #define sk_negative_common(SIZE) \
27436@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
27437 sk_negative_common(4)
27438 mov (%rax), %eax
27439 bswap %eax
27440+ pax_force_retaddr
27441 ret
27442
27443 bpf_slow_path_half_neg:
27444@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
27445 mov (%rax),%ax
27446 rol $8,%ax
27447 movzwl %ax,%eax
27448+ pax_force_retaddr
27449 ret
27450
27451 bpf_slow_path_byte_neg:
27452@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
27453 .globl sk_load_byte_negative_offset
27454 sk_negative_common(1)
27455 movzbl (%rax), %eax
27456+ pax_force_retaddr
27457 ret
27458
27459 bpf_slow_path_byte_msh_neg:
27460@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
27461 and $15,%al
27462 shl $2,%al
27463 xchg %eax,%ebx
27464+ pax_force_retaddr
27465 ret
27466
27467 bpf_error:
27468@@ -197,4 +210,5 @@ bpf_error:
27469 xor %eax,%eax
27470 mov -8(%rbp),%rbx
27471 leaveq
27472+ pax_force_retaddr
27473 ret
27474diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
27475index 520d2bd..b895ef4 100644
27476--- a/arch/x86/net/bpf_jit_comp.c
27477+++ b/arch/x86/net/bpf_jit_comp.c
27478@@ -11,6 +11,7 @@
27479 #include <asm/cacheflush.h>
27480 #include <linux/netdevice.h>
27481 #include <linux/filter.h>
27482+#include <linux/random.h>
27483
27484 /*
27485 * Conventions :
27486@@ -48,13 +49,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
27487 return ptr + len;
27488 }
27489
27490+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27491+#define MAX_INSTR_CODE_SIZE 96
27492+#else
27493+#define MAX_INSTR_CODE_SIZE 64
27494+#endif
27495+
27496 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
27497
27498 #define EMIT1(b1) EMIT(b1, 1)
27499 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
27500 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
27501 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
27502+
27503+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27504+/* original constant will appear in ecx */
27505+#define DILUTE_CONST_SEQUENCE(_off, _key) \
27506+do { \
27507+ /* mov ecx, randkey */ \
27508+ EMIT1(0xb9); \
27509+ EMIT(_key, 4); \
27510+ /* xor ecx, randkey ^ off */ \
27511+ EMIT2(0x81, 0xf1); \
27512+ EMIT((_key) ^ (_off), 4); \
27513+} while (0)
27514+
27515+#define EMIT1_off32(b1, _off) \
27516+do { \
27517+ switch (b1) { \
27518+ case 0x05: /* add eax, imm32 */ \
27519+ case 0x2d: /* sub eax, imm32 */ \
27520+ case 0x25: /* and eax, imm32 */ \
27521+ case 0x0d: /* or eax, imm32 */ \
27522+ case 0xb8: /* mov eax, imm32 */ \
27523+ case 0x3d: /* cmp eax, imm32 */ \
27524+ case 0xa9: /* test eax, imm32 */ \
27525+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27526+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
27527+ break; \
27528+ case 0xbb: /* mov ebx, imm32 */ \
27529+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27530+ /* mov ebx, ecx */ \
27531+ EMIT2(0x89, 0xcb); \
27532+ break; \
27533+ case 0xbe: /* mov esi, imm32 */ \
27534+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27535+ /* mov esi, ecx */ \
27536+ EMIT2(0x89, 0xce); \
27537+ break; \
27538+ case 0xe9: /* jmp rel imm32 */ \
27539+ EMIT1(b1); \
27540+ EMIT(_off, 4); \
27541+ /* prevent fall-through, we're not called if off = 0 */ \
27542+ EMIT(0xcccccccc, 4); \
27543+ EMIT(0xcccccccc, 4); \
27544+ break; \
27545+ default: \
27546+ EMIT1(b1); \
27547+ EMIT(_off, 4); \
27548+ } \
27549+} while (0)
27550+
27551+#define EMIT2_off32(b1, b2, _off) \
27552+do { \
27553+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
27554+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
27555+ EMIT(randkey, 4); \
27556+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
27557+ EMIT((_off) - randkey, 4); \
27558+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
27559+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27560+ /* imul eax, ecx */ \
27561+ EMIT3(0x0f, 0xaf, 0xc1); \
27562+ } else { \
27563+ EMIT2(b1, b2); \
27564+ EMIT(_off, 4); \
27565+ } \
27566+} while (0)
27567+#else
27568 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
27569+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
27570+#endif
27571
27572 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
27573 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
27574@@ -89,6 +164,24 @@ do { \
27575 #define X86_JBE 0x76
27576 #define X86_JA 0x77
27577
27578+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27579+#define APPEND_FLOW_VERIFY() \
27580+do { \
27581+ /* mov ecx, randkey */ \
27582+ EMIT1(0xb9); \
27583+ EMIT(randkey, 4); \
27584+ /* cmp ecx, randkey */ \
27585+ EMIT2(0x81, 0xf9); \
27586+ EMIT(randkey, 4); \
27587+ /* jz after 8 int 3s */ \
27588+ EMIT2(0x74, 0x08); \
27589+ EMIT(0xcccccccc, 4); \
27590+ EMIT(0xcccccccc, 4); \
27591+} while (0)
27592+#else
27593+#define APPEND_FLOW_VERIFY() do { } while (0)
27594+#endif
27595+
27596 #define EMIT_COND_JMP(op, offset) \
27597 do { \
27598 if (is_near(offset)) \
27599@@ -96,6 +189,7 @@ do { \
27600 else { \
27601 EMIT2(0x0f, op + 0x10); \
27602 EMIT(offset, 4); /* jxx .+off32 */ \
27603+ APPEND_FLOW_VERIFY(); \
27604 } \
27605 } while (0)
27606
27607@@ -120,12 +214,17 @@ static inline void bpf_flush_icache(void *start, void *end)
27608 set_fs(old_fs);
27609 }
27610
27611+struct bpf_jit_work {
27612+ struct work_struct work;
27613+ void *image;
27614+};
27615+
27616 #define CHOOSE_LOAD_FUNC(K, func) \
27617 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
27618
27619 void bpf_jit_compile(struct sk_filter *fp)
27620 {
27621- u8 temp[64];
27622+ u8 temp[MAX_INSTR_CODE_SIZE];
27623 u8 *prog;
27624 unsigned int proglen, oldproglen = 0;
27625 int ilen, i;
27626@@ -138,6 +237,9 @@ void bpf_jit_compile(struct sk_filter *fp)
27627 unsigned int *addrs;
27628 const struct sock_filter *filter = fp->insns;
27629 int flen = fp->len;
27630+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27631+ unsigned int randkey;
27632+#endif
27633
27634 if (!bpf_jit_enable)
27635 return;
27636@@ -146,11 +248,19 @@ void bpf_jit_compile(struct sk_filter *fp)
27637 if (addrs == NULL)
27638 return;
27639
27640+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
27641+ if (!fp->work)
27642+ goto out;
27643+
27644+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27645+ randkey = get_random_int();
27646+#endif
27647+
27648 /* Before first pass, make a rough estimation of addrs[]
27649- * each bpf instruction is translated to less than 64 bytes
27650+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
27651 */
27652 for (proglen = 0, i = 0; i < flen; i++) {
27653- proglen += 64;
27654+ proglen += MAX_INSTR_CODE_SIZE;
27655 addrs[i] = proglen;
27656 }
27657 cleanup_addr = proglen; /* epilogue address */
27658@@ -258,10 +368,8 @@ void bpf_jit_compile(struct sk_filter *fp)
27659 case BPF_S_ALU_MUL_K: /* A *= K */
27660 if (is_imm8(K))
27661 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
27662- else {
27663- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
27664- EMIT(K, 4);
27665- }
27666+ else
27667+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
27668 break;
27669 case BPF_S_ALU_DIV_X: /* A /= X; */
27670 seen |= SEEN_XREG;
27671@@ -301,13 +409,23 @@ void bpf_jit_compile(struct sk_filter *fp)
27672 break;
27673 case BPF_S_ALU_MOD_K: /* A %= K; */
27674 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
27675+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27676+ DILUTE_CONST_SEQUENCE(K, randkey);
27677+#else
27678 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
27679+#endif
27680 EMIT2(0xf7, 0xf1); /* div %ecx */
27681 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
27682 break;
27683 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
27684+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27685+ DILUTE_CONST_SEQUENCE(K, randkey);
27686+ // imul rax, rcx
27687+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
27688+#else
27689 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
27690 EMIT(K, 4);
27691+#endif
27692 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
27693 break;
27694 case BPF_S_ALU_AND_X:
27695@@ -543,8 +661,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
27696 if (is_imm8(K)) {
27697 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
27698 } else {
27699- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
27700- EMIT(K, 4);
27701+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
27702 }
27703 } else {
27704 EMIT2(0x89,0xde); /* mov %ebx,%esi */
27705@@ -627,17 +744,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27706 break;
27707 default:
27708 /* hmm, too complex filter, give up with jit compiler */
27709- goto out;
27710+ goto error;
27711 }
27712 ilen = prog - temp;
27713 if (image) {
27714 if (unlikely(proglen + ilen > oldproglen)) {
27715 pr_err("bpb_jit_compile fatal error\n");
27716- kfree(addrs);
27717- module_free(NULL, image);
27718- return;
27719+ module_free_exec(NULL, image);
27720+ goto error;
27721 }
27722+ pax_open_kernel();
27723 memcpy(image + proglen, temp, ilen);
27724+ pax_close_kernel();
27725 }
27726 proglen += ilen;
27727 addrs[i] = proglen;
27728@@ -658,11 +776,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27729 break;
27730 }
27731 if (proglen == oldproglen) {
27732- image = module_alloc(max_t(unsigned int,
27733- proglen,
27734- sizeof(struct work_struct)));
27735+ image = module_alloc_exec(proglen);
27736 if (!image)
27737- goto out;
27738+ goto error;
27739 }
27740 oldproglen = proglen;
27741 }
27742@@ -678,7 +794,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27743 bpf_flush_icache(image, image + proglen);
27744
27745 fp->bpf_func = (void *)image;
27746- }
27747+ } else
27748+error:
27749+ kfree(fp->work);
27750+
27751 out:
27752 kfree(addrs);
27753 return;
27754@@ -686,18 +805,20 @@ out:
27755
27756 static void jit_free_defer(struct work_struct *arg)
27757 {
27758- module_free(NULL, arg);
27759+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
27760+ kfree(arg);
27761 }
27762
27763 /* run from softirq, we must use a work_struct to call
27764- * module_free() from process context
27765+ * module_free_exec() from process context
27766 */
27767 void bpf_jit_free(struct sk_filter *fp)
27768 {
27769 if (fp->bpf_func != sk_run_filter) {
27770- struct work_struct *work = (struct work_struct *)fp->bpf_func;
27771+ struct work_struct *work = &fp->work->work;
27772
27773 INIT_WORK(work, jit_free_defer);
27774+ fp->work->image = fp->bpf_func;
27775 schedule_work(work);
27776 }
27777 }
27778diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
27779index d6aa6e8..266395a 100644
27780--- a/arch/x86/oprofile/backtrace.c
27781+++ b/arch/x86/oprofile/backtrace.c
27782@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
27783 struct stack_frame_ia32 *fp;
27784 unsigned long bytes;
27785
27786- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
27787+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
27788 if (bytes != sizeof(bufhead))
27789 return NULL;
27790
27791- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
27792+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
27793
27794 oprofile_add_trace(bufhead[0].return_address);
27795
27796@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
27797 struct stack_frame bufhead[2];
27798 unsigned long bytes;
27799
27800- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
27801+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
27802 if (bytes != sizeof(bufhead))
27803 return NULL;
27804
27805@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
27806 {
27807 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
27808
27809- if (!user_mode_vm(regs)) {
27810+ if (!user_mode(regs)) {
27811 unsigned long stack = kernel_stack_pointer(regs);
27812 if (depth)
27813 dump_trace(NULL, regs, (unsigned long *)stack, 0,
27814diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
27815index 192397c..5ba6f9e 100644
27816--- a/arch/x86/pci/acpi.c
27817+++ b/arch/x86/pci/acpi.c
27818@@ -568,7 +568,7 @@ int __init pci_acpi_init(void)
27819 acpi_irq_penalty_init();
27820 pcibios_enable_irq = acpi_pci_irq_enable;
27821 pcibios_disable_irq = acpi_pci_irq_disable;
27822- x86_init.pci.init_irq = x86_init_noop;
27823+ *(void **)&x86_init.pci.init_irq = x86_init_noop;
27824
27825 if (pci_routeirq) {
27826 /*
27827diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
27828index e14a2ff..3fd6b58 100644
27829--- a/arch/x86/pci/mrst.c
27830+++ b/arch/x86/pci/mrst.c
27831@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
27832 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
27833 pci_mmcfg_late_init();
27834 pcibios_enable_irq = mrst_pci_irq_enable;
27835- pci_root_ops = pci_mrst_ops;
27836+ pax_open_kernel();
27837+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
27838+ pax_close_kernel();
27839 pci_soc_mode = 1;
27840 /* Continue with standard init */
27841 return 1;
27842diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
27843index da8fe05..7ee6704 100644
27844--- a/arch/x86/pci/pcbios.c
27845+++ b/arch/x86/pci/pcbios.c
27846@@ -79,50 +79,93 @@ union bios32 {
27847 static struct {
27848 unsigned long address;
27849 unsigned short segment;
27850-} bios32_indirect = { 0, __KERNEL_CS };
27851+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
27852
27853 /*
27854 * Returns the entry point for the given service, NULL on error
27855 */
27856
27857-static unsigned long bios32_service(unsigned long service)
27858+static unsigned long __devinit bios32_service(unsigned long service)
27859 {
27860 unsigned char return_code; /* %al */
27861 unsigned long address; /* %ebx */
27862 unsigned long length; /* %ecx */
27863 unsigned long entry; /* %edx */
27864 unsigned long flags;
27865+ struct desc_struct d, *gdt;
27866
27867 local_irq_save(flags);
27868- __asm__("lcall *(%%edi); cld"
27869+
27870+ gdt = get_cpu_gdt_table(smp_processor_id());
27871+
27872+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
27873+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27874+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
27875+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27876+
27877+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
27878 : "=a" (return_code),
27879 "=b" (address),
27880 "=c" (length),
27881 "=d" (entry)
27882 : "0" (service),
27883 "1" (0),
27884- "D" (&bios32_indirect));
27885+ "D" (&bios32_indirect),
27886+ "r"(__PCIBIOS_DS)
27887+ : "memory");
27888+
27889+ pax_open_kernel();
27890+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
27891+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
27892+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
27893+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
27894+ pax_close_kernel();
27895+
27896 local_irq_restore(flags);
27897
27898 switch (return_code) {
27899- case 0:
27900- return address + entry;
27901- case 0x80: /* Not present */
27902- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27903- return 0;
27904- default: /* Shouldn't happen */
27905- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27906- service, return_code);
27907+ case 0: {
27908+ int cpu;
27909+ unsigned char flags;
27910+
27911+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
27912+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
27913+ printk(KERN_WARNING "bios32_service: not valid\n");
27914 return 0;
27915+ }
27916+ address = address + PAGE_OFFSET;
27917+ length += 16UL; /* some BIOSs underreport this... */
27918+ flags = 4;
27919+ if (length >= 64*1024*1024) {
27920+ length >>= PAGE_SHIFT;
27921+ flags |= 8;
27922+ }
27923+
27924+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27925+ gdt = get_cpu_gdt_table(cpu);
27926+ pack_descriptor(&d, address, length, 0x9b, flags);
27927+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27928+ pack_descriptor(&d, address, length, 0x93, flags);
27929+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27930+ }
27931+ return entry;
27932+ }
27933+ case 0x80: /* Not present */
27934+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27935+ return 0;
27936+ default: /* Shouldn't happen */
27937+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27938+ service, return_code);
27939+ return 0;
27940 }
27941 }
27942
27943 static struct {
27944 unsigned long address;
27945 unsigned short segment;
27946-} pci_indirect = { 0, __KERNEL_CS };
27947+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
27948
27949-static int pci_bios_present;
27950+static int pci_bios_present __read_only;
27951
27952 static int __devinit check_pcibios(void)
27953 {
27954@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
27955 unsigned long flags, pcibios_entry;
27956
27957 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
27958- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
27959+ pci_indirect.address = pcibios_entry;
27960
27961 local_irq_save(flags);
27962- __asm__(
27963- "lcall *(%%edi); cld\n\t"
27964+ __asm__("movw %w6, %%ds\n\t"
27965+ "lcall *%%ss:(%%edi); cld\n\t"
27966+ "push %%ss\n\t"
27967+ "pop %%ds\n\t"
27968 "jc 1f\n\t"
27969 "xor %%ah, %%ah\n"
27970 "1:"
27971@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
27972 "=b" (ebx),
27973 "=c" (ecx)
27974 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
27975- "D" (&pci_indirect)
27976+ "D" (&pci_indirect),
27977+ "r" (__PCIBIOS_DS)
27978 : "memory");
27979 local_irq_restore(flags);
27980
27981@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27982
27983 switch (len) {
27984 case 1:
27985- __asm__("lcall *(%%esi); cld\n\t"
27986+ __asm__("movw %w6, %%ds\n\t"
27987+ "lcall *%%ss:(%%esi); cld\n\t"
27988+ "push %%ss\n\t"
27989+ "pop %%ds\n\t"
27990 "jc 1f\n\t"
27991 "xor %%ah, %%ah\n"
27992 "1:"
27993@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27994 : "1" (PCIBIOS_READ_CONFIG_BYTE),
27995 "b" (bx),
27996 "D" ((long)reg),
27997- "S" (&pci_indirect));
27998+ "S" (&pci_indirect),
27999+ "r" (__PCIBIOS_DS));
28000 /*
28001 * Zero-extend the result beyond 8 bits, do not trust the
28002 * BIOS having done it:
28003@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28004 *value &= 0xff;
28005 break;
28006 case 2:
28007- __asm__("lcall *(%%esi); cld\n\t"
28008+ __asm__("movw %w6, %%ds\n\t"
28009+ "lcall *%%ss:(%%esi); cld\n\t"
28010+ "push %%ss\n\t"
28011+ "pop %%ds\n\t"
28012 "jc 1f\n\t"
28013 "xor %%ah, %%ah\n"
28014 "1:"
28015@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28016 : "1" (PCIBIOS_READ_CONFIG_WORD),
28017 "b" (bx),
28018 "D" ((long)reg),
28019- "S" (&pci_indirect));
28020+ "S" (&pci_indirect),
28021+ "r" (__PCIBIOS_DS));
28022 /*
28023 * Zero-extend the result beyond 16 bits, do not trust the
28024 * BIOS having done it:
28025@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28026 *value &= 0xffff;
28027 break;
28028 case 4:
28029- __asm__("lcall *(%%esi); cld\n\t"
28030+ __asm__("movw %w6, %%ds\n\t"
28031+ "lcall *%%ss:(%%esi); cld\n\t"
28032+ "push %%ss\n\t"
28033+ "pop %%ds\n\t"
28034 "jc 1f\n\t"
28035 "xor %%ah, %%ah\n"
28036 "1:"
28037@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28038 : "1" (PCIBIOS_READ_CONFIG_DWORD),
28039 "b" (bx),
28040 "D" ((long)reg),
28041- "S" (&pci_indirect));
28042+ "S" (&pci_indirect),
28043+ "r" (__PCIBIOS_DS));
28044 break;
28045 }
28046
28047@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28048
28049 switch (len) {
28050 case 1:
28051- __asm__("lcall *(%%esi); cld\n\t"
28052+ __asm__("movw %w6, %%ds\n\t"
28053+ "lcall *%%ss:(%%esi); cld\n\t"
28054+ "push %%ss\n\t"
28055+ "pop %%ds\n\t"
28056 "jc 1f\n\t"
28057 "xor %%ah, %%ah\n"
28058 "1:"
28059@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28060 "c" (value),
28061 "b" (bx),
28062 "D" ((long)reg),
28063- "S" (&pci_indirect));
28064+ "S" (&pci_indirect),
28065+ "r" (__PCIBIOS_DS));
28066 break;
28067 case 2:
28068- __asm__("lcall *(%%esi); cld\n\t"
28069+ __asm__("movw %w6, %%ds\n\t"
28070+ "lcall *%%ss:(%%esi); cld\n\t"
28071+ "push %%ss\n\t"
28072+ "pop %%ds\n\t"
28073 "jc 1f\n\t"
28074 "xor %%ah, %%ah\n"
28075 "1:"
28076@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28077 "c" (value),
28078 "b" (bx),
28079 "D" ((long)reg),
28080- "S" (&pci_indirect));
28081+ "S" (&pci_indirect),
28082+ "r" (__PCIBIOS_DS));
28083 break;
28084 case 4:
28085- __asm__("lcall *(%%esi); cld\n\t"
28086+ __asm__("movw %w6, %%ds\n\t"
28087+ "lcall *%%ss:(%%esi); cld\n\t"
28088+ "push %%ss\n\t"
28089+ "pop %%ds\n\t"
28090 "jc 1f\n\t"
28091 "xor %%ah, %%ah\n"
28092 "1:"
28093@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28094 "c" (value),
28095 "b" (bx),
28096 "D" ((long)reg),
28097- "S" (&pci_indirect));
28098+ "S" (&pci_indirect),
28099+ "r" (__PCIBIOS_DS));
28100 break;
28101 }
28102
28103@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28104
28105 DBG("PCI: Fetching IRQ routing table... ");
28106 __asm__("push %%es\n\t"
28107+ "movw %w8, %%ds\n\t"
28108 "push %%ds\n\t"
28109 "pop %%es\n\t"
28110- "lcall *(%%esi); cld\n\t"
28111+ "lcall *%%ss:(%%esi); cld\n\t"
28112 "pop %%es\n\t"
28113+ "push %%ss\n\t"
28114+ "pop %%ds\n"
28115 "jc 1f\n\t"
28116 "xor %%ah, %%ah\n"
28117 "1:"
28118@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28119 "1" (0),
28120 "D" ((long) &opt),
28121 "S" (&pci_indirect),
28122- "m" (opt)
28123+ "m" (opt),
28124+ "r" (__PCIBIOS_DS)
28125 : "memory");
28126 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
28127 if (ret & 0xff00)
28128@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28129 {
28130 int ret;
28131
28132- __asm__("lcall *(%%esi); cld\n\t"
28133+ __asm__("movw %w5, %%ds\n\t"
28134+ "lcall *%%ss:(%%esi); cld\n\t"
28135+ "push %%ss\n\t"
28136+ "pop %%ds\n"
28137 "jc 1f\n\t"
28138 "xor %%ah, %%ah\n"
28139 "1:"
28140@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28141 : "0" (PCIBIOS_SET_PCI_HW_INT),
28142 "b" ((dev->bus->number << 8) | dev->devfn),
28143 "c" ((irq << 8) | (pin + 10)),
28144- "S" (&pci_indirect));
28145+ "S" (&pci_indirect),
28146+ "r" (__PCIBIOS_DS));
28147 return !(ret & 0xff00);
28148 }
28149 EXPORT_SYMBOL(pcibios_set_irq_routing);
28150diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
28151index 56ab749..3cb792a 100644
28152--- a/arch/x86/pci/xen.c
28153+++ b/arch/x86/pci/xen.c
28154@@ -395,9 +395,9 @@ int __init pci_xen_init(void)
28155 #endif
28156
28157 #ifdef CONFIG_PCI_MSI
28158- x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
28159- x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28160- x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
28161+ *(void **)&x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
28162+ *(void **)&x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28163+ *(void **)&x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
28164 #endif
28165 return 0;
28166 }
28167@@ -416,8 +416,8 @@ int __init pci_xen_hvm_init(void)
28168 #endif
28169
28170 #ifdef CONFIG_PCI_MSI
28171- x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
28172- x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28173+ *(void **)&x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
28174+ *(void **)&x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28175 #endif
28176 return 0;
28177 }
28178@@ -474,9 +474,9 @@ int __init pci_xen_initial_domain(void)
28179 int irq;
28180
28181 #ifdef CONFIG_PCI_MSI
28182- x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
28183- x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28184- x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
28185+ *(void **)&x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
28186+ *(void **)&x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28187+ *(void **)&x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
28188 #endif
28189 xen_setup_acpi_sci();
28190 __acpi_register_gsi = acpi_register_gsi_xen;
28191diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
28192index ad44391..acef4b5 100644
28193--- a/arch/x86/platform/efi/efi.c
28194+++ b/arch/x86/platform/efi/efi.c
28195@@ -738,8 +738,8 @@ void __init efi_init(void)
28196 }
28197 #ifdef CONFIG_X86_32
28198 if (efi_is_native()) {
28199- x86_platform.get_wallclock = efi_get_time;
28200- x86_platform.set_wallclock = efi_set_rtc_mmss;
28201+ *(void **)&x86_platform.get_wallclock = efi_get_time;
28202+ *(void **)&x86_platform.set_wallclock = efi_set_rtc_mmss;
28203 }
28204 #endif
28205
28206diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
28207index 40e4469..1ab536e 100644
28208--- a/arch/x86/platform/efi/efi_32.c
28209+++ b/arch/x86/platform/efi/efi_32.c
28210@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
28211 {
28212 struct desc_ptr gdt_descr;
28213
28214+#ifdef CONFIG_PAX_KERNEXEC
28215+ struct desc_struct d;
28216+#endif
28217+
28218 local_irq_save(efi_rt_eflags);
28219
28220 load_cr3(initial_page_table);
28221 __flush_tlb_all();
28222
28223+#ifdef CONFIG_PAX_KERNEXEC
28224+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
28225+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
28226+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
28227+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
28228+#endif
28229+
28230 gdt_descr.address = __pa(get_cpu_gdt_table(0));
28231 gdt_descr.size = GDT_SIZE - 1;
28232 load_gdt(&gdt_descr);
28233@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
28234 {
28235 struct desc_ptr gdt_descr;
28236
28237+#ifdef CONFIG_PAX_KERNEXEC
28238+ struct desc_struct d;
28239+
28240+ memset(&d, 0, sizeof d);
28241+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
28242+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
28243+#endif
28244+
28245 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
28246 gdt_descr.size = GDT_SIZE - 1;
28247 load_gdt(&gdt_descr);
28248diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
28249index fbe66e6..eae5e38 100644
28250--- a/arch/x86/platform/efi/efi_stub_32.S
28251+++ b/arch/x86/platform/efi/efi_stub_32.S
28252@@ -6,7 +6,9 @@
28253 */
28254
28255 #include <linux/linkage.h>
28256+#include <linux/init.h>
28257 #include <asm/page_types.h>
28258+#include <asm/segment.h>
28259
28260 /*
28261 * efi_call_phys(void *, ...) is a function with variable parameters.
28262@@ -20,7 +22,7 @@
28263 * service functions will comply with gcc calling convention, too.
28264 */
28265
28266-.text
28267+__INIT
28268 ENTRY(efi_call_phys)
28269 /*
28270 * 0. The function can only be called in Linux kernel. So CS has been
28271@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
28272 * The mapping of lower virtual memory has been created in prelog and
28273 * epilog.
28274 */
28275- movl $1f, %edx
28276- subl $__PAGE_OFFSET, %edx
28277- jmp *%edx
28278+#ifdef CONFIG_PAX_KERNEXEC
28279+ movl $(__KERNEXEC_EFI_DS), %edx
28280+ mov %edx, %ds
28281+ mov %edx, %es
28282+ mov %edx, %ss
28283+ addl $2f,(1f)
28284+ ljmp *(1f)
28285+
28286+__INITDATA
28287+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
28288+.previous
28289+
28290+2:
28291+ subl $2b,(1b)
28292+#else
28293+ jmp 1f-__PAGE_OFFSET
28294 1:
28295+#endif
28296
28297 /*
28298 * 2. Now on the top of stack is the return
28299@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
28300 * parameter 2, ..., param n. To make things easy, we save the return
28301 * address of efi_call_phys in a global variable.
28302 */
28303- popl %edx
28304- movl %edx, saved_return_addr
28305- /* get the function pointer into ECX*/
28306- popl %ecx
28307- movl %ecx, efi_rt_function_ptr
28308- movl $2f, %edx
28309- subl $__PAGE_OFFSET, %edx
28310- pushl %edx
28311+ popl (saved_return_addr)
28312+ popl (efi_rt_function_ptr)
28313
28314 /*
28315 * 3. Clear PG bit in %CR0.
28316@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
28317 /*
28318 * 5. Call the physical function.
28319 */
28320- jmp *%ecx
28321+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
28322
28323-2:
28324 /*
28325 * 6. After EFI runtime service returns, control will return to
28326 * following instruction. We'd better readjust stack pointer first.
28327@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
28328 movl %cr0, %edx
28329 orl $0x80000000, %edx
28330 movl %edx, %cr0
28331- jmp 1f
28332-1:
28333+
28334 /*
28335 * 8. Now restore the virtual mode from flat mode by
28336 * adding EIP with PAGE_OFFSET.
28337 */
28338- movl $1f, %edx
28339- jmp *%edx
28340+#ifdef CONFIG_PAX_KERNEXEC
28341+ movl $(__KERNEL_DS), %edx
28342+ mov %edx, %ds
28343+ mov %edx, %es
28344+ mov %edx, %ss
28345+ ljmp $(__KERNEL_CS),$1f
28346+#else
28347+ jmp 1f+__PAGE_OFFSET
28348+#endif
28349 1:
28350
28351 /*
28352 * 9. Balance the stack. And because EAX contain the return value,
28353 * we'd better not clobber it.
28354 */
28355- leal efi_rt_function_ptr, %edx
28356- movl (%edx), %ecx
28357- pushl %ecx
28358+ pushl (efi_rt_function_ptr)
28359
28360 /*
28361- * 10. Push the saved return address onto the stack and return.
28362+ * 10. Return to the saved return address.
28363 */
28364- leal saved_return_addr, %edx
28365- movl (%edx), %ecx
28366- pushl %ecx
28367- ret
28368+ jmpl *(saved_return_addr)
28369 ENDPROC(efi_call_phys)
28370 .previous
28371
28372-.data
28373+__INITDATA
28374 saved_return_addr:
28375 .long 0
28376 efi_rt_function_ptr:
28377diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
28378index 4c07cca..2c8427d 100644
28379--- a/arch/x86/platform/efi/efi_stub_64.S
28380+++ b/arch/x86/platform/efi/efi_stub_64.S
28381@@ -7,6 +7,7 @@
28382 */
28383
28384 #include <linux/linkage.h>
28385+#include <asm/alternative-asm.h>
28386
28387 #define SAVE_XMM \
28388 mov %rsp, %rax; \
28389@@ -40,6 +41,7 @@ ENTRY(efi_call0)
28390 call *%rdi
28391 addq $32, %rsp
28392 RESTORE_XMM
28393+ pax_force_retaddr 0, 1
28394 ret
28395 ENDPROC(efi_call0)
28396
28397@@ -50,6 +52,7 @@ ENTRY(efi_call1)
28398 call *%rdi
28399 addq $32, %rsp
28400 RESTORE_XMM
28401+ pax_force_retaddr 0, 1
28402 ret
28403 ENDPROC(efi_call1)
28404
28405@@ -60,6 +63,7 @@ ENTRY(efi_call2)
28406 call *%rdi
28407 addq $32, %rsp
28408 RESTORE_XMM
28409+ pax_force_retaddr 0, 1
28410 ret
28411 ENDPROC(efi_call2)
28412
28413@@ -71,6 +75,7 @@ ENTRY(efi_call3)
28414 call *%rdi
28415 addq $32, %rsp
28416 RESTORE_XMM
28417+ pax_force_retaddr 0, 1
28418 ret
28419 ENDPROC(efi_call3)
28420
28421@@ -83,6 +88,7 @@ ENTRY(efi_call4)
28422 call *%rdi
28423 addq $32, %rsp
28424 RESTORE_XMM
28425+ pax_force_retaddr 0, 1
28426 ret
28427 ENDPROC(efi_call4)
28428
28429@@ -96,6 +102,7 @@ ENTRY(efi_call5)
28430 call *%rdi
28431 addq $48, %rsp
28432 RESTORE_XMM
28433+ pax_force_retaddr 0, 1
28434 ret
28435 ENDPROC(efi_call5)
28436
28437@@ -112,5 +119,6 @@ ENTRY(efi_call6)
28438 call *%rdi
28439 addq $48, %rsp
28440 RESTORE_XMM
28441+ pax_force_retaddr 0, 1
28442 ret
28443 ENDPROC(efi_call6)
28444diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
28445index fd41a92..bc8091d 100644
28446--- a/arch/x86/platform/mrst/mrst.c
28447+++ b/arch/x86/platform/mrst/mrst.c
28448@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
28449 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
28450 int sfi_mrtc_num;
28451
28452-static void mrst_power_off(void)
28453+static __noreturn void mrst_power_off(void)
28454 {
28455+ BUG();
28456 }
28457
28458-static void mrst_reboot(void)
28459+static __noreturn void mrst_reboot(void)
28460 {
28461 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
28462+ BUG();
28463 }
28464
28465 /* parse all the mtimer info to a static mtimer array */
28466@@ -233,14 +235,14 @@ static void __init mrst_time_init(void)
28467 case MRST_TIMER_APBT_ONLY:
28468 break;
28469 case MRST_TIMER_LAPIC_APBT:
28470- x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
28471- x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
28472+ *(void **)&x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
28473+ *(void **)&x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
28474 break;
28475 default:
28476 if (!boot_cpu_has(X86_FEATURE_ARAT))
28477 break;
28478- x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
28479- x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
28480+ *(void **)&x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
28481+ *(void **)&x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
28482 return;
28483 }
28484 /* we need at least one APB timer */
28485@@ -282,35 +284,35 @@ static unsigned char mrst_get_nmi_reason(void)
28486 */
28487 void __init x86_mrst_early_setup(void)
28488 {
28489- x86_init.resources.probe_roms = x86_init_noop;
28490- x86_init.resources.reserve_resources = x86_init_noop;
28491+ *(void **)&x86_init.resources.probe_roms = x86_init_noop;
28492+ *(void **)&x86_init.resources.reserve_resources = x86_init_noop;
28493
28494- x86_init.timers.timer_init = mrst_time_init;
28495- x86_init.timers.setup_percpu_clockev = x86_init_noop;
28496+ *(void **)&x86_init.timers.timer_init = mrst_time_init;
28497+ *(void **)&x86_init.timers.setup_percpu_clockev = x86_init_noop;
28498
28499- x86_init.irqs.pre_vector_init = x86_init_noop;
28500+ *(void **)&x86_init.irqs.pre_vector_init = x86_init_noop;
28501
28502- x86_init.oem.arch_setup = mrst_arch_setup;
28503+ *(void **)&x86_init.oem.arch_setup = mrst_arch_setup;
28504
28505- x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
28506+ *(void **)&x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
28507
28508- x86_platform.calibrate_tsc = mrst_calibrate_tsc;
28509- x86_platform.i8042_detect = mrst_i8042_detect;
28510- x86_init.timers.wallclock_init = mrst_rtc_init;
28511- x86_platform.get_nmi_reason = mrst_get_nmi_reason;
28512+ *(void **)&x86_platform.calibrate_tsc = mrst_calibrate_tsc;
28513+ *(void **)&x86_platform.i8042_detect = mrst_i8042_detect;
28514+ *(void **)&x86_init.timers.wallclock_init = mrst_rtc_init;
28515+ *(void **)&x86_platform.get_nmi_reason = mrst_get_nmi_reason;
28516
28517- x86_init.pci.init = pci_mrst_init;
28518- x86_init.pci.fixup_irqs = x86_init_noop;
28519+ *(void **)&x86_init.pci.init = pci_mrst_init;
28520+ *(void **)&x86_init.pci.fixup_irqs = x86_init_noop;
28521
28522 legacy_pic = &null_legacy_pic;
28523
28524 /* Moorestown specific power_off/restart method */
28525 pm_power_off = mrst_power_off;
28526- machine_ops.emergency_restart = mrst_reboot;
28527+ *(void **)&machine_ops.emergency_restart = mrst_reboot;
28528
28529 /* Avoid searching for BIOS MP tables */
28530- x86_init.mpparse.find_smp_config = x86_init_noop;
28531- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
28532+ *(void **)&x86_init.mpparse.find_smp_config = x86_init_noop;
28533+ *(void **)&x86_init.mpparse.get_smp_config = x86_init_uint_noop;
28534 set_bit(MP_BUS_ISA, mp_bus_not_pci);
28535 }
28536
28537diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
28538index 225bd0f..22e8086 100644
28539--- a/arch/x86/platform/mrst/vrtc.c
28540+++ b/arch/x86/platform/mrst/vrtc.c
28541@@ -120,8 +120,8 @@ void __init mrst_rtc_init(void)
28542
28543 vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC,
28544 vrtc_paddr);
28545- x86_platform.get_wallclock = vrtc_get_time;
28546- x86_platform.set_wallclock = vrtc_set_mmss;
28547+ *(void **)&x86_platform.get_wallclock = vrtc_get_time;
28548+ *(void **)&x86_platform.set_wallclock = vrtc_set_mmss;
28549 }
28550
28551 /*
28552diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
28553index 2737608..0d62cc2 100644
28554--- a/arch/x86/platform/olpc/olpc.c
28555+++ b/arch/x86/platform/olpc/olpc.c
28556@@ -395,7 +395,7 @@ static int __init olpc_init(void)
28557 * XO-1 only. */
28558 if (olpc_platform_info.boardrev < olpc_board_pre(0xd0) &&
28559 !cs5535_has_vsa2())
28560- x86_init.pci.arch_init = pci_olpc_init;
28561+ *(void **)&x86_init.pci.arch_init = pci_olpc_init;
28562 #endif
28563
28564 if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */
28565diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
28566index d6ee929..3637cb5 100644
28567--- a/arch/x86/platform/olpc/olpc_dt.c
28568+++ b/arch/x86/platform/olpc/olpc_dt.c
28569@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
28570 return res;
28571 }
28572
28573-static struct of_pdt_ops prom_olpc_ops __initdata = {
28574+static struct of_pdt_ops prom_olpc_ops __initconst = {
28575 .nextprop = olpc_dt_nextprop,
28576 .getproplen = olpc_dt_getproplen,
28577 .getproperty = olpc_dt_getproperty,
28578diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
28579index 218cdb1..c1178eb 100644
28580--- a/arch/x86/power/cpu.c
28581+++ b/arch/x86/power/cpu.c
28582@@ -132,7 +132,7 @@ static void do_fpu_end(void)
28583 static void fix_processor_context(void)
28584 {
28585 int cpu = smp_processor_id();
28586- struct tss_struct *t = &per_cpu(init_tss, cpu);
28587+ struct tss_struct *t = init_tss + cpu;
28588
28589 set_tss_desc(cpu, t); /*
28590 * This just modifies memory; should not be
28591@@ -142,8 +142,6 @@ static void fix_processor_context(void)
28592 */
28593
28594 #ifdef CONFIG_X86_64
28595- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
28596-
28597 syscall_init(); /* This sets MSR_*STAR and related */
28598 #endif
28599 load_TR_desc(); /* This does ltr */
28600diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
28601index cbca565..bae7133 100644
28602--- a/arch/x86/realmode/init.c
28603+++ b/arch/x86/realmode/init.c
28604@@ -62,7 +62,13 @@ void __init setup_real_mode(void)
28605 __va(real_mode_header->trampoline_header);
28606
28607 #ifdef CONFIG_X86_32
28608- trampoline_header->start = __pa(startup_32_smp);
28609+ trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
28610+
28611+#ifdef CONFIG_PAX_KERNEXEC
28612+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
28613+#endif
28614+
28615+ trampoline_header->boot_cs = __BOOT_CS;
28616 trampoline_header->gdt_limit = __BOOT_DS + 7;
28617 trampoline_header->gdt_base = __pa(boot_gdt);
28618 #else
28619diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
28620index 8869287..d577672 100644
28621--- a/arch/x86/realmode/rm/Makefile
28622+++ b/arch/x86/realmode/rm/Makefile
28623@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
28624 $(call cc-option, -fno-unit-at-a-time)) \
28625 $(call cc-option, -fno-stack-protector) \
28626 $(call cc-option, -mpreferred-stack-boundary=2)
28627+ifdef CONSTIFY_PLUGIN
28628+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
28629+endif
28630 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
28631 GCOV_PROFILE := n
28632diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
28633index a28221d..93c40f1 100644
28634--- a/arch/x86/realmode/rm/header.S
28635+++ b/arch/x86/realmode/rm/header.S
28636@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
28637 #endif
28638 /* APM/BIOS reboot */
28639 .long pa_machine_real_restart_asm
28640-#ifdef CONFIG_X86_64
28641+#ifdef CONFIG_X86_32
28642+ .long __KERNEL_CS
28643+#else
28644 .long __KERNEL32_CS
28645 #endif
28646 END(real_mode_header)
28647diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
28648index c1b2791..f9e31c7 100644
28649--- a/arch/x86/realmode/rm/trampoline_32.S
28650+++ b/arch/x86/realmode/rm/trampoline_32.S
28651@@ -25,6 +25,12 @@
28652 #include <asm/page_types.h>
28653 #include "realmode.h"
28654
28655+#ifdef CONFIG_PAX_KERNEXEC
28656+#define ta(X) (X)
28657+#else
28658+#define ta(X) (pa_ ## X)
28659+#endif
28660+
28661 .text
28662 .code16
28663
28664@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
28665
28666 cli # We should be safe anyway
28667
28668- movl tr_start, %eax # where we need to go
28669-
28670 movl $0xA5A5A5A5, trampoline_status
28671 # write marker for master knows we're running
28672
28673@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
28674 movw $1, %dx # protected mode (PE) bit
28675 lmsw %dx # into protected mode
28676
28677- ljmpl $__BOOT_CS, $pa_startup_32
28678+ ljmpl *(trampoline_header)
28679
28680 .section ".text32","ax"
28681 .code32
28682@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
28683 .balign 8
28684 GLOBAL(trampoline_header)
28685 tr_start: .space 4
28686- tr_gdt_pad: .space 2
28687+ tr_boot_cs: .space 2
28688 tr_gdt: .space 6
28689 END(trampoline_header)
28690
28691diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
28692index bb360dc..3e5945f 100644
28693--- a/arch/x86/realmode/rm/trampoline_64.S
28694+++ b/arch/x86/realmode/rm/trampoline_64.S
28695@@ -107,7 +107,7 @@ ENTRY(startup_32)
28696 wrmsr
28697
28698 # Enable paging and in turn activate Long Mode
28699- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
28700+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
28701 movl %eax, %cr0
28702
28703 /*
28704diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
28705index 5a1847d..deccb30 100644
28706--- a/arch/x86/tools/relocs.c
28707+++ b/arch/x86/tools/relocs.c
28708@@ -12,10 +12,13 @@
28709 #include <regex.h>
28710 #include <tools/le_byteshift.h>
28711
28712+#include "../../../include/generated/autoconf.h"
28713+
28714 static void die(char *fmt, ...);
28715
28716 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
28717 static Elf32_Ehdr ehdr;
28718+static Elf32_Phdr *phdr;
28719 static unsigned long reloc_count, reloc_idx;
28720 static unsigned long *relocs;
28721 static unsigned long reloc16_count, reloc16_idx;
28722@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
28723 }
28724 }
28725
28726+static void read_phdrs(FILE *fp)
28727+{
28728+ unsigned int i;
28729+
28730+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
28731+ if (!phdr) {
28732+ die("Unable to allocate %d program headers\n",
28733+ ehdr.e_phnum);
28734+ }
28735+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
28736+ die("Seek to %d failed: %s\n",
28737+ ehdr.e_phoff, strerror(errno));
28738+ }
28739+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
28740+ die("Cannot read ELF program headers: %s\n",
28741+ strerror(errno));
28742+ }
28743+ for(i = 0; i < ehdr.e_phnum; i++) {
28744+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
28745+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
28746+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
28747+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
28748+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
28749+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
28750+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
28751+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
28752+ }
28753+
28754+}
28755+
28756 static void read_shdrs(FILE *fp)
28757 {
28758- int i;
28759+ unsigned int i;
28760 Elf32_Shdr shdr;
28761
28762 secs = calloc(ehdr.e_shnum, sizeof(struct section));
28763@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
28764
28765 static void read_strtabs(FILE *fp)
28766 {
28767- int i;
28768+ unsigned int i;
28769 for (i = 0; i < ehdr.e_shnum; i++) {
28770 struct section *sec = &secs[i];
28771 if (sec->shdr.sh_type != SHT_STRTAB) {
28772@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
28773
28774 static void read_symtabs(FILE *fp)
28775 {
28776- int i,j;
28777+ unsigned int i,j;
28778 for (i = 0; i < ehdr.e_shnum; i++) {
28779 struct section *sec = &secs[i];
28780 if (sec->shdr.sh_type != SHT_SYMTAB) {
28781@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
28782 }
28783
28784
28785-static void read_relocs(FILE *fp)
28786+static void read_relocs(FILE *fp, int use_real_mode)
28787 {
28788- int i,j;
28789+ unsigned int i,j;
28790+ uint32_t base;
28791+
28792 for (i = 0; i < ehdr.e_shnum; i++) {
28793 struct section *sec = &secs[i];
28794 if (sec->shdr.sh_type != SHT_REL) {
28795@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
28796 die("Cannot read symbol table: %s\n",
28797 strerror(errno));
28798 }
28799+ base = 0;
28800+
28801+#ifdef CONFIG_X86_32
28802+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
28803+ if (phdr[j].p_type != PT_LOAD )
28804+ continue;
28805+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
28806+ continue;
28807+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
28808+ break;
28809+ }
28810+#endif
28811+
28812 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
28813 Elf32_Rel *rel = &sec->reltab[j];
28814- rel->r_offset = elf32_to_cpu(rel->r_offset);
28815+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
28816 rel->r_info = elf32_to_cpu(rel->r_info);
28817 }
28818 }
28819@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
28820
28821 static void print_absolute_symbols(void)
28822 {
28823- int i;
28824+ unsigned int i;
28825 printf("Absolute symbols\n");
28826 printf(" Num: Value Size Type Bind Visibility Name\n");
28827 for (i = 0; i < ehdr.e_shnum; i++) {
28828 struct section *sec = &secs[i];
28829 char *sym_strtab;
28830- int j;
28831+ unsigned int j;
28832
28833 if (sec->shdr.sh_type != SHT_SYMTAB) {
28834 continue;
28835@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
28836
28837 static void print_absolute_relocs(void)
28838 {
28839- int i, printed = 0;
28840+ unsigned int i, printed = 0;
28841
28842 for (i = 0; i < ehdr.e_shnum; i++) {
28843 struct section *sec = &secs[i];
28844 struct section *sec_applies, *sec_symtab;
28845 char *sym_strtab;
28846 Elf32_Sym *sh_symtab;
28847- int j;
28848+ unsigned int j;
28849 if (sec->shdr.sh_type != SHT_REL) {
28850 continue;
28851 }
28852@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
28853 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
28854 int use_real_mode)
28855 {
28856- int i;
28857+ unsigned int i;
28858 /* Walk through the relocations */
28859 for (i = 0; i < ehdr.e_shnum; i++) {
28860 char *sym_strtab;
28861 Elf32_Sym *sh_symtab;
28862 struct section *sec_applies, *sec_symtab;
28863- int j;
28864+ unsigned int j;
28865 struct section *sec = &secs[i];
28866
28867 if (sec->shdr.sh_type != SHT_REL) {
28868@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
28869 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
28870 r_type = ELF32_R_TYPE(rel->r_info);
28871
28872+ if (!use_real_mode) {
28873+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
28874+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
28875+ continue;
28876+
28877+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
28878+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
28879+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
28880+ continue;
28881+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
28882+ continue;
28883+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
28884+ continue;
28885+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
28886+ continue;
28887+#endif
28888+ }
28889+
28890 shn_abs = sym->st_shndx == SHN_ABS;
28891
28892 switch (r_type) {
28893@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
28894
28895 static void emit_relocs(int as_text, int use_real_mode)
28896 {
28897- int i;
28898+ unsigned int i;
28899 /* Count how many relocations I have and allocate space for them. */
28900 reloc_count = 0;
28901 walk_relocs(count_reloc, use_real_mode);
28902@@ -808,10 +874,11 @@ int main(int argc, char **argv)
28903 fname, strerror(errno));
28904 }
28905 read_ehdr(fp);
28906+ read_phdrs(fp);
28907 read_shdrs(fp);
28908 read_strtabs(fp);
28909 read_symtabs(fp);
28910- read_relocs(fp);
28911+ read_relocs(fp, use_real_mode);
28912 if (show_absolute_syms) {
28913 print_absolute_symbols();
28914 return 0;
28915diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
28916index fd14be1..e3c79c0 100644
28917--- a/arch/x86/vdso/Makefile
28918+++ b/arch/x86/vdso/Makefile
28919@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
28920 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
28921 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
28922
28923-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28924+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28925 GCOV_PROFILE := n
28926
28927 #
28928diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
28929index 0faad64..39ef157 100644
28930--- a/arch/x86/vdso/vdso32-setup.c
28931+++ b/arch/x86/vdso/vdso32-setup.c
28932@@ -25,6 +25,7 @@
28933 #include <asm/tlbflush.h>
28934 #include <asm/vdso.h>
28935 #include <asm/proto.h>
28936+#include <asm/mman.h>
28937
28938 enum {
28939 VDSO_DISABLED = 0,
28940@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
28941 void enable_sep_cpu(void)
28942 {
28943 int cpu = get_cpu();
28944- struct tss_struct *tss = &per_cpu(init_tss, cpu);
28945+ struct tss_struct *tss = init_tss + cpu;
28946
28947 if (!boot_cpu_has(X86_FEATURE_SEP)) {
28948 put_cpu();
28949@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
28950 gate_vma.vm_start = FIXADDR_USER_START;
28951 gate_vma.vm_end = FIXADDR_USER_END;
28952 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
28953- gate_vma.vm_page_prot = __P101;
28954+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
28955
28956 return 0;
28957 }
28958@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28959 if (compat)
28960 addr = VDSO_HIGH_BASE;
28961 else {
28962- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
28963+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
28964 if (IS_ERR_VALUE(addr)) {
28965 ret = addr;
28966 goto up_fail;
28967 }
28968 }
28969
28970- current->mm->context.vdso = (void *)addr;
28971+ current->mm->context.vdso = addr;
28972
28973 if (compat_uses_vma || !compat) {
28974 /*
28975@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28976 }
28977
28978 current_thread_info()->sysenter_return =
28979- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28980+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28981
28982 up_fail:
28983 if (ret)
28984- current->mm->context.vdso = NULL;
28985+ current->mm->context.vdso = 0;
28986
28987 up_write(&mm->mmap_sem);
28988
28989@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
28990
28991 const char *arch_vma_name(struct vm_area_struct *vma)
28992 {
28993- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28994+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28995 return "[vdso]";
28996+
28997+#ifdef CONFIG_PAX_SEGMEXEC
28998+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
28999+ return "[vdso]";
29000+#endif
29001+
29002 return NULL;
29003 }
29004
29005@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
29006 * Check to see if the corresponding task was created in compat vdso
29007 * mode.
29008 */
29009- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
29010+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
29011 return &gate_vma;
29012 return NULL;
29013 }
29014diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
29015index 00aaf04..4a26505 100644
29016--- a/arch/x86/vdso/vma.c
29017+++ b/arch/x86/vdso/vma.c
29018@@ -16,8 +16,6 @@
29019 #include <asm/vdso.h>
29020 #include <asm/page.h>
29021
29022-unsigned int __read_mostly vdso_enabled = 1;
29023-
29024 extern char vdso_start[], vdso_end[];
29025 extern unsigned short vdso_sync_cpuid;
29026
29027@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
29028 * unaligned here as a result of stack start randomization.
29029 */
29030 addr = PAGE_ALIGN(addr);
29031- addr = align_addr(addr, NULL, ALIGN_VDSO);
29032
29033 return addr;
29034 }
29035@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
29036 unsigned size)
29037 {
29038 struct mm_struct *mm = current->mm;
29039- unsigned long addr;
29040+ unsigned long addr = 0;
29041 int ret;
29042
29043- if (!vdso_enabled)
29044- return 0;
29045-
29046 down_write(&mm->mmap_sem);
29047+
29048+#ifdef CONFIG_PAX_RANDMMAP
29049+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
29050+#endif
29051+
29052 addr = vdso_addr(mm->start_stack, size);
29053+ addr = align_addr(addr, NULL, ALIGN_VDSO);
29054 addr = get_unmapped_area(NULL, addr, size, 0, 0);
29055 if (IS_ERR_VALUE(addr)) {
29056 ret = addr;
29057 goto up_fail;
29058 }
29059
29060- current->mm->context.vdso = (void *)addr;
29061+ mm->context.vdso = addr;
29062
29063 ret = install_special_mapping(mm, addr, size,
29064 VM_READ|VM_EXEC|
29065 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
29066 pages);
29067- if (ret) {
29068- current->mm->context.vdso = NULL;
29069- goto up_fail;
29070- }
29071+ if (ret)
29072+ mm->context.vdso = 0;
29073
29074 up_fail:
29075 up_write(&mm->mmap_sem);
29076@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
29077 vdsox32_size);
29078 }
29079 #endif
29080-
29081-static __init int vdso_setup(char *s)
29082-{
29083- vdso_enabled = simple_strtoul(s, NULL, 0);
29084- return 0;
29085-}
29086-__setup("vdso=", vdso_setup);
29087diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
29088index 7005ced..530d6eb 100644
29089--- a/arch/x86/xen/apic.c
29090+++ b/arch/x86/xen/apic.c
29091@@ -30,5 +30,5 @@ static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
29092
29093 void __init xen_init_apic(void)
29094 {
29095- x86_io_apic_ops.read = xen_io_apic_read;
29096+ *(void **)&x86_io_apic_ops.read = xen_io_apic_read;
29097 }
29098diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
29099index 586d838..7082fc8 100644
29100--- a/arch/x86/xen/enlighten.c
29101+++ b/arch/x86/xen/enlighten.c
29102@@ -99,8 +99,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
29103
29104 struct shared_info xen_dummy_shared_info;
29105
29106-void *xen_initial_gdt;
29107-
29108 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
29109 __read_mostly int xen_have_vector_callback;
29110 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
29111@@ -523,7 +521,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
29112 unsigned long va = dtr->address;
29113 unsigned int size = dtr->size + 1;
29114 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
29115- unsigned long frames[pages];
29116+ unsigned long frames[65536 / PAGE_SIZE];
29117 int f;
29118
29119 /*
29120@@ -918,21 +916,21 @@ static u32 xen_safe_apic_wait_icr_idle(void)
29121
29122 static void set_xen_basic_apic_ops(void)
29123 {
29124- apic->read = xen_apic_read;
29125- apic->write = xen_apic_write;
29126- apic->icr_read = xen_apic_icr_read;
29127- apic->icr_write = xen_apic_icr_write;
29128- apic->wait_icr_idle = xen_apic_wait_icr_idle;
29129- apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
29130- apic->set_apic_id = xen_set_apic_id;
29131- apic->get_apic_id = xen_get_apic_id;
29132+ *(void **)&apic->read = xen_apic_read;
29133+ *(void **)&apic->write = xen_apic_write;
29134+ *(void **)&apic->icr_read = xen_apic_icr_read;
29135+ *(void **)&apic->icr_write = xen_apic_icr_write;
29136+ *(void **)&apic->wait_icr_idle = xen_apic_wait_icr_idle;
29137+ *(void **)&apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
29138+ *(void **)&apic->set_apic_id = xen_set_apic_id;
29139+ *(void **)&apic->get_apic_id = xen_get_apic_id;
29140
29141 #ifdef CONFIG_SMP
29142- apic->send_IPI_allbutself = xen_send_IPI_allbutself;
29143- apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
29144- apic->send_IPI_mask = xen_send_IPI_mask;
29145- apic->send_IPI_all = xen_send_IPI_all;
29146- apic->send_IPI_self = xen_send_IPI_self;
29147+ *(void **)&apic->send_IPI_allbutself = xen_send_IPI_allbutself;
29148+ *(void **)&apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
29149+ *(void **)&apic->send_IPI_mask = xen_send_IPI_mask;
29150+ *(void **)&apic->send_IPI_all = xen_send_IPI_all;
29151+ *(void **)&apic->send_IPI_self = xen_send_IPI_self;
29152 #endif
29153 }
29154
29155@@ -1222,30 +1220,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
29156 #endif
29157 };
29158
29159-static void xen_reboot(int reason)
29160+static __noreturn void xen_reboot(int reason)
29161 {
29162 struct sched_shutdown r = { .reason = reason };
29163
29164- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
29165- BUG();
29166+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
29167+ BUG();
29168 }
29169
29170-static void xen_restart(char *msg)
29171+static __noreturn void xen_restart(char *msg)
29172 {
29173 xen_reboot(SHUTDOWN_reboot);
29174 }
29175
29176-static void xen_emergency_restart(void)
29177+static __noreturn void xen_emergency_restart(void)
29178 {
29179 xen_reboot(SHUTDOWN_reboot);
29180 }
29181
29182-static void xen_machine_halt(void)
29183+static __noreturn void xen_machine_halt(void)
29184 {
29185 xen_reboot(SHUTDOWN_poweroff);
29186 }
29187
29188-static void xen_machine_power_off(void)
29189+static __noreturn void xen_machine_power_off(void)
29190 {
29191 if (pm_power_off)
29192 pm_power_off();
29193@@ -1290,14 +1288,14 @@ static const struct machine_ops xen_machine_ops __initconst = {
29194 */
29195 static void __init xen_setup_stackprotector(void)
29196 {
29197- pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
29198- pv_cpu_ops.load_gdt = xen_load_gdt_boot;
29199+ *(void **)&pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
29200+ *(void **)&pv_cpu_ops.load_gdt = xen_load_gdt_boot;
29201
29202 setup_stack_canary_segment(0);
29203 switch_to_new_gdt(0);
29204
29205- pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
29206- pv_cpu_ops.load_gdt = xen_load_gdt;
29207+ *(void **)&pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
29208+ *(void **)&pv_cpu_ops.load_gdt = xen_load_gdt;
29209 }
29210
29211 /* First C function to be called on Xen boot */
29212@@ -1315,13 +1313,13 @@ asmlinkage void __init xen_start_kernel(void)
29213
29214 /* Install Xen paravirt ops */
29215 pv_info = xen_info;
29216- pv_init_ops = xen_init_ops;
29217- pv_cpu_ops = xen_cpu_ops;
29218- pv_apic_ops = xen_apic_ops;
29219+ memcpy((void *)&pv_init_ops, &xen_init_ops, sizeof pv_init_ops);
29220+ memcpy((void *)&pv_cpu_ops, &xen_cpu_ops, sizeof pv_cpu_ops);
29221+ memcpy((void *)&pv_apic_ops, &xen_apic_ops, sizeof pv_apic_ops);
29222
29223- x86_init.resources.memory_setup = xen_memory_setup;
29224- x86_init.oem.arch_setup = xen_arch_setup;
29225- x86_init.oem.banner = xen_banner;
29226+ *(void **)&x86_init.resources.memory_setup = xen_memory_setup;
29227+ *(void **)&x86_init.oem.arch_setup = xen_arch_setup;
29228+ *(void **)&x86_init.oem.banner = xen_banner;
29229
29230 xen_init_time_ops();
29231
29232@@ -1347,7 +1345,17 @@ asmlinkage void __init xen_start_kernel(void)
29233 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
29234
29235 /* Work out if we support NX */
29236- x86_configure_nx();
29237+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29238+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
29239+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
29240+ unsigned l, h;
29241+
29242+ __supported_pte_mask |= _PAGE_NX;
29243+ rdmsr(MSR_EFER, l, h);
29244+ l |= EFER_NX;
29245+ wrmsr(MSR_EFER, l, h);
29246+ }
29247+#endif
29248
29249 xen_setup_features();
29250
29251@@ -1376,14 +1384,7 @@ asmlinkage void __init xen_start_kernel(void)
29252 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
29253 }
29254
29255- machine_ops = xen_machine_ops;
29256-
29257- /*
29258- * The only reliable way to retain the initial address of the
29259- * percpu gdt_page is to remember it here, so we can go and
29260- * mark it RW later, when the initial percpu area is freed.
29261- */
29262- xen_initial_gdt = &per_cpu(gdt_page, 0);
29263+ memcpy((void *)&machine_ops, &xen_machine_ops, sizeof machine_ops);
29264
29265 xen_smp_init();
29266
29267@@ -1450,7 +1451,7 @@ asmlinkage void __init xen_start_kernel(void)
29268 add_preferred_console("tty", 0, NULL);
29269 add_preferred_console("hvc", 0, NULL);
29270 if (pci_xen)
29271- x86_init.pci.arch_init = pci_xen_init;
29272+ *(void **)&x86_init.pci.arch_init = pci_xen_init;
29273 } else {
29274 const struct dom0_vga_console_info *info =
29275 (void *)((char *)xen_start_info +
29276@@ -1476,8 +1477,8 @@ asmlinkage void __init xen_start_kernel(void)
29277 xen_acpi_sleep_register();
29278
29279 /* Avoid searching for BIOS MP tables */
29280- x86_init.mpparse.find_smp_config = x86_init_noop;
29281- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
29282+ *(void **)&x86_init.mpparse.find_smp_config = x86_init_noop;
29283+ *(void **)&x86_init.mpparse.get_smp_config = x86_init_uint_noop;
29284 }
29285 #ifdef CONFIG_PCI
29286 /* PCI BIOS service won't work from a PV guest. */
29287@@ -1583,7 +1584,7 @@ static void __init xen_hvm_guest_init(void)
29288 xen_hvm_smp_init();
29289 register_cpu_notifier(&xen_hvm_cpu_notifier);
29290 xen_unplug_emulated_devices();
29291- x86_init.irqs.intr_init = xen_init_IRQ;
29292+ *(void **)&x86_init.irqs.intr_init = xen_init_IRQ;
29293 xen_hvm_init_time_ops();
29294 xen_hvm_init_mmu_ops();
29295 }
29296diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
29297index 01a4dc0..3ca0cc9 100644
29298--- a/arch/x86/xen/irq.c
29299+++ b/arch/x86/xen/irq.c
29300@@ -130,5 +130,5 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
29301 void __init xen_init_irq_ops(void)
29302 {
29303 pv_irq_ops = xen_irq_ops;
29304- x86_init.irqs.intr_init = xen_init_IRQ;
29305+ *(void **)&x86_init.irqs.intr_init = xen_init_IRQ;
29306 }
29307diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
29308index dcf5f2d..5f72fe7 100644
29309--- a/arch/x86/xen/mmu.c
29310+++ b/arch/x86/xen/mmu.c
29311@@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
29312 /* L3_k[510] -> level2_kernel_pgt
29313 * L3_i[511] -> level2_fixmap_pgt */
29314 convert_pfn_mfn(level3_kernel_pgt);
29315+ convert_pfn_mfn(level3_vmalloc_start_pgt);
29316+ convert_pfn_mfn(level3_vmalloc_end_pgt);
29317+ convert_pfn_mfn(level3_vmemmap_pgt);
29318
29319 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
29320 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
29321@@ -1910,8 +1913,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
29322 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
29323 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
29324 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
29325+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
29326+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
29327+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
29328 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
29329 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
29330+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
29331 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
29332 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
29333
29334@@ -2097,6 +2104,7 @@ static void __init xen_post_allocator_init(void)
29335 pv_mmu_ops.set_pud = xen_set_pud;
29336 #if PAGETABLE_LEVELS == 4
29337 pv_mmu_ops.set_pgd = xen_set_pgd;
29338+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
29339 #endif
29340
29341 /* This will work as long as patching hasn't happened yet
29342@@ -2178,6 +2186,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
29343 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
29344 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
29345 .set_pgd = xen_set_pgd_hyper,
29346+ .set_pgd_batched = xen_set_pgd_hyper,
29347
29348 .alloc_pud = xen_alloc_pmd_init,
29349 .release_pud = xen_release_pmd_init,
29350@@ -2197,8 +2206,8 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
29351
29352 void __init xen_init_mmu_ops(void)
29353 {
29354- x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
29355- x86_init.paging.pagetable_init = xen_pagetable_init;
29356+ *(void **)&x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
29357+ *(void **)&x86_init.paging.pagetable_init = xen_pagetable_init;
29358 pv_mmu_ops = xen_mmu_ops;
29359
29360 memset(dummy_mapping, 0xff, PAGE_SIZE);
29361diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
29362index 353c50f..a0b9b0d 100644
29363--- a/arch/x86/xen/smp.c
29364+++ b/arch/x86/xen/smp.c
29365@@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
29366 {
29367 BUG_ON(smp_processor_id() != 0);
29368 native_smp_prepare_boot_cpu();
29369-
29370- /* We've switched to the "real" per-cpu gdt, so make sure the
29371- old memory can be recycled */
29372- make_lowmem_page_readwrite(xen_initial_gdt);
29373-
29374 xen_filter_cpu_maps();
29375 xen_setup_vcpu_info_placement();
29376 }
29377@@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
29378 gdt = get_cpu_gdt_table(cpu);
29379
29380 ctxt->flags = VGCF_IN_KERNEL;
29381- ctxt->user_regs.ds = __USER_DS;
29382- ctxt->user_regs.es = __USER_DS;
29383+ ctxt->user_regs.ds = __KERNEL_DS;
29384+ ctxt->user_regs.es = __KERNEL_DS;
29385 ctxt->user_regs.ss = __KERNEL_DS;
29386 #ifdef CONFIG_X86_32
29387 ctxt->user_regs.fs = __KERNEL_PERCPU;
29388- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
29389+ savesegment(gs, ctxt->user_regs.gs);
29390 #else
29391 ctxt->gs_base_kernel = per_cpu_offset(cpu);
29392 #endif
29393@@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
29394 int rc;
29395
29396 per_cpu(current_task, cpu) = idle;
29397+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
29398 #ifdef CONFIG_X86_32
29399 irq_ctx_init(cpu);
29400 #else
29401 clear_tsk_thread_flag(idle, TIF_FORK);
29402- per_cpu(kernel_stack, cpu) =
29403- (unsigned long)task_stack_page(idle) -
29404- KERNEL_STACK_OFFSET + THREAD_SIZE;
29405+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
29406 #endif
29407 xen_setup_runstate_info(cpu);
29408 xen_setup_timer(cpu);
29409@@ -637,7 +631,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
29410
29411 void __init xen_smp_init(void)
29412 {
29413- smp_ops = xen_smp_ops;
29414+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
29415 xen_fill_possible_map();
29416 xen_init_spinlocks();
29417 }
29418@@ -672,10 +666,10 @@ void __init xen_hvm_smp_init(void)
29419 {
29420 if (!xen_have_vector_callback)
29421 return;
29422- smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
29423- smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
29424- smp_ops.cpu_up = xen_hvm_cpu_up;
29425- smp_ops.cpu_die = xen_hvm_cpu_die;
29426- smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
29427- smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
29428+ *(void **)&smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
29429+ *(void **)&smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
29430+ *(void **)&smp_ops.cpu_up = xen_hvm_cpu_up;
29431+ *(void **)&smp_ops.cpu_die = xen_hvm_cpu_die;
29432+ *(void **)&smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
29433+ *(void **)&smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
29434 }
29435diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
29436index 83e866d..ef60385 100644
29437--- a/arch/x86/xen/spinlock.c
29438+++ b/arch/x86/xen/spinlock.c
29439@@ -390,12 +390,12 @@ void __init xen_init_spinlocks(void)
29440 {
29441 BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
29442
29443- pv_lock_ops.spin_is_locked = xen_spin_is_locked;
29444- pv_lock_ops.spin_is_contended = xen_spin_is_contended;
29445- pv_lock_ops.spin_lock = xen_spin_lock;
29446- pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
29447- pv_lock_ops.spin_trylock = xen_spin_trylock;
29448- pv_lock_ops.spin_unlock = xen_spin_unlock;
29449+ *(void **)&pv_lock_ops.spin_is_locked = xen_spin_is_locked;
29450+ *(void **)&pv_lock_ops.spin_is_contended = xen_spin_is_contended;
29451+ *(void **)&pv_lock_ops.spin_lock = xen_spin_lock;
29452+ *(void **)&pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
29453+ *(void **)&pv_lock_ops.spin_trylock = xen_spin_trylock;
29454+ *(void **)&pv_lock_ops.spin_unlock = xen_spin_unlock;
29455 }
29456
29457 #ifdef CONFIG_XEN_DEBUG_FS
29458diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
29459index 0296a95..3c51a2d 100644
29460--- a/arch/x86/xen/time.c
29461+++ b/arch/x86/xen/time.c
29462@@ -481,15 +481,15 @@ static void __init xen_time_init(void)
29463
29464 void __init xen_init_time_ops(void)
29465 {
29466- pv_time_ops = xen_time_ops;
29467+ memcpy((void *)&pv_time_ops, &xen_time_ops, sizeof pv_time_ops);
29468
29469- x86_init.timers.timer_init = xen_time_init;
29470- x86_init.timers.setup_percpu_clockev = x86_init_noop;
29471- x86_cpuinit.setup_percpu_clockev = x86_init_noop;
29472+ *(void **)&x86_init.timers.timer_init = xen_time_init;
29473+ *(void **)&x86_init.timers.setup_percpu_clockev = x86_init_noop;
29474+ *(void **)&x86_cpuinit.setup_percpu_clockev = x86_init_noop;
29475
29476- x86_platform.calibrate_tsc = xen_tsc_khz;
29477- x86_platform.get_wallclock = xen_get_wallclock;
29478- x86_platform.set_wallclock = xen_set_wallclock;
29479+ *(void **)&x86_platform.calibrate_tsc = xen_tsc_khz;
29480+ *(void **)&x86_platform.get_wallclock = xen_get_wallclock;
29481+ *(void **)&x86_platform.set_wallclock = xen_set_wallclock;
29482 }
29483
29484 #ifdef CONFIG_XEN_PVHVM
29485@@ -514,12 +514,12 @@ void __init xen_hvm_init_time_ops(void)
29486 return;
29487 }
29488
29489- pv_time_ops = xen_time_ops;
29490- x86_init.timers.setup_percpu_clockev = xen_time_init;
29491- x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
29492+ memcpy((void *)&pv_time_ops, &xen_time_ops, sizeof pv_time_ops);
29493+ *(void **)&x86_init.timers.setup_percpu_clockev = xen_time_init;
29494+ *(void **)&x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
29495
29496- x86_platform.calibrate_tsc = xen_tsc_khz;
29497- x86_platform.get_wallclock = xen_get_wallclock;
29498- x86_platform.set_wallclock = xen_set_wallclock;
29499+ *(void **)&x86_platform.calibrate_tsc = xen_tsc_khz;
29500+ *(void **)&x86_platform.get_wallclock = xen_get_wallclock;
29501+ *(void **)&x86_platform.set_wallclock = xen_set_wallclock;
29502 }
29503 #endif
29504diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
29505index f9643fc..602e8af 100644
29506--- a/arch/x86/xen/xen-asm_32.S
29507+++ b/arch/x86/xen/xen-asm_32.S
29508@@ -84,14 +84,14 @@ ENTRY(xen_iret)
29509 ESP_OFFSET=4 # bytes pushed onto stack
29510
29511 /*
29512- * Store vcpu_info pointer for easy access. Do it this way to
29513- * avoid having to reload %fs
29514+ * Store vcpu_info pointer for easy access.
29515 */
29516 #ifdef CONFIG_SMP
29517- GET_THREAD_INFO(%eax)
29518- movl TI_cpu(%eax), %eax
29519- movl __per_cpu_offset(,%eax,4), %eax
29520- mov xen_vcpu(%eax), %eax
29521+ push %fs
29522+ mov $(__KERNEL_PERCPU), %eax
29523+ mov %eax, %fs
29524+ mov PER_CPU_VAR(xen_vcpu), %eax
29525+ pop %fs
29526 #else
29527 movl xen_vcpu, %eax
29528 #endif
29529diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
29530index 7faed58..ba4427c 100644
29531--- a/arch/x86/xen/xen-head.S
29532+++ b/arch/x86/xen/xen-head.S
29533@@ -19,6 +19,17 @@ ENTRY(startup_xen)
29534 #ifdef CONFIG_X86_32
29535 mov %esi,xen_start_info
29536 mov $init_thread_union+THREAD_SIZE,%esp
29537+#ifdef CONFIG_SMP
29538+ movl $cpu_gdt_table,%edi
29539+ movl $__per_cpu_load,%eax
29540+ movw %ax,__KERNEL_PERCPU + 2(%edi)
29541+ rorl $16,%eax
29542+ movb %al,__KERNEL_PERCPU + 4(%edi)
29543+ movb %ah,__KERNEL_PERCPU + 7(%edi)
29544+ movl $__per_cpu_end - 1,%eax
29545+ subl $__per_cpu_start,%eax
29546+ movw %ax,__KERNEL_PERCPU + 0(%edi)
29547+#endif
29548 #else
29549 mov %rsi,xen_start_info
29550 mov $init_thread_union+THREAD_SIZE,%rsp
29551diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
29552index a95b417..b6dbd0b 100644
29553--- a/arch/x86/xen/xen-ops.h
29554+++ b/arch/x86/xen/xen-ops.h
29555@@ -10,8 +10,6 @@
29556 extern const char xen_hypervisor_callback[];
29557 extern const char xen_failsafe_callback[];
29558
29559-extern void *xen_initial_gdt;
29560-
29561 struct trap_info;
29562 void xen_copy_trap_info(struct trap_info *traps);
29563
29564diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
29565index 525bd3d..ef888b1 100644
29566--- a/arch/xtensa/variants/dc232b/include/variant/core.h
29567+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
29568@@ -119,9 +119,9 @@
29569 ----------------------------------------------------------------------*/
29570
29571 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
29572-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
29573 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
29574 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
29575+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
29576
29577 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
29578 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
29579diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
29580index 2f33760..835e50a 100644
29581--- a/arch/xtensa/variants/fsf/include/variant/core.h
29582+++ b/arch/xtensa/variants/fsf/include/variant/core.h
29583@@ -11,6 +11,7 @@
29584 #ifndef _XTENSA_CORE_H
29585 #define _XTENSA_CORE_H
29586
29587+#include <linux/const.h>
29588
29589 /****************************************************************************
29590 Parameters Useful for Any Code, USER or PRIVILEGED
29591@@ -112,9 +113,9 @@
29592 ----------------------------------------------------------------------*/
29593
29594 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
29595-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
29596 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
29597 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
29598+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
29599
29600 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
29601 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
29602diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
29603index af00795..2bb8105 100644
29604--- a/arch/xtensa/variants/s6000/include/variant/core.h
29605+++ b/arch/xtensa/variants/s6000/include/variant/core.h
29606@@ -11,6 +11,7 @@
29607 #ifndef _XTENSA_CORE_CONFIGURATION_H
29608 #define _XTENSA_CORE_CONFIGURATION_H
29609
29610+#include <linux/const.h>
29611
29612 /****************************************************************************
29613 Parameters Useful for Any Code, USER or PRIVILEGED
29614@@ -118,9 +119,9 @@
29615 ----------------------------------------------------------------------*/
29616
29617 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
29618-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
29619 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
29620 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
29621+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
29622
29623 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
29624 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
29625diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
29626index 58916af..9cb880b 100644
29627--- a/block/blk-iopoll.c
29628+++ b/block/blk-iopoll.c
29629@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
29630 }
29631 EXPORT_SYMBOL(blk_iopoll_complete);
29632
29633-static void blk_iopoll_softirq(struct softirq_action *h)
29634+static void blk_iopoll_softirq(void)
29635 {
29636 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
29637 int rearm = 0, budget = blk_iopoll_budget;
29638diff --git a/block/blk-map.c b/block/blk-map.c
29639index 623e1cd..ca1e109 100644
29640--- a/block/blk-map.c
29641+++ b/block/blk-map.c
29642@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
29643 if (!len || !kbuf)
29644 return -EINVAL;
29645
29646- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
29647+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
29648 if (do_copy)
29649 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
29650 else
29651diff --git a/block/blk-softirq.c b/block/blk-softirq.c
29652index 467c8de..4bddc6d 100644
29653--- a/block/blk-softirq.c
29654+++ b/block/blk-softirq.c
29655@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
29656 * Softirq action handler - move entries to local list and loop over them
29657 * while passing them to the queue registered handler.
29658 */
29659-static void blk_done_softirq(struct softirq_action *h)
29660+static void blk_done_softirq(void)
29661 {
29662 struct list_head *cpu_list, local_list;
29663
29664diff --git a/block/bsg.c b/block/bsg.c
29665index ff64ae3..593560c 100644
29666--- a/block/bsg.c
29667+++ b/block/bsg.c
29668@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
29669 struct sg_io_v4 *hdr, struct bsg_device *bd,
29670 fmode_t has_write_perm)
29671 {
29672+ unsigned char tmpcmd[sizeof(rq->__cmd)];
29673+ unsigned char *cmdptr;
29674+
29675 if (hdr->request_len > BLK_MAX_CDB) {
29676 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
29677 if (!rq->cmd)
29678 return -ENOMEM;
29679- }
29680+ cmdptr = rq->cmd;
29681+ } else
29682+ cmdptr = tmpcmd;
29683
29684- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
29685+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
29686 hdr->request_len))
29687 return -EFAULT;
29688
29689+ if (cmdptr != rq->cmd)
29690+ memcpy(rq->cmd, cmdptr, hdr->request_len);
29691+
29692 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
29693 if (blk_verify_command(rq->cmd, has_write_perm))
29694 return -EPERM;
29695diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
29696index 7c668c8..db3521c 100644
29697--- a/block/compat_ioctl.c
29698+++ b/block/compat_ioctl.c
29699@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
29700 err |= __get_user(f->spec1, &uf->spec1);
29701 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
29702 err |= __get_user(name, &uf->name);
29703- f->name = compat_ptr(name);
29704+ f->name = (void __force_kernel *)compat_ptr(name);
29705 if (err) {
29706 err = -EFAULT;
29707 goto out;
29708diff --git a/block/partitions/efi.c b/block/partitions/efi.c
29709index 6296b40..417c00f 100644
29710--- a/block/partitions/efi.c
29711+++ b/block/partitions/efi.c
29712@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
29713 if (!gpt)
29714 return NULL;
29715
29716+ if (!le32_to_cpu(gpt->num_partition_entries))
29717+ return NULL;
29718+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
29719+ if (!pte)
29720+ return NULL;
29721+
29722 count = le32_to_cpu(gpt->num_partition_entries) *
29723 le32_to_cpu(gpt->sizeof_partition_entry);
29724- if (!count)
29725- return NULL;
29726- pte = kzalloc(count, GFP_KERNEL);
29727- if (!pte)
29728- return NULL;
29729-
29730 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
29731 (u8 *) pte,
29732 count) < count) {
29733diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
29734index 9a87daa..fb17486 100644
29735--- a/block/scsi_ioctl.c
29736+++ b/block/scsi_ioctl.c
29737@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
29738 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
29739 struct sg_io_hdr *hdr, fmode_t mode)
29740 {
29741- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
29742+ unsigned char tmpcmd[sizeof(rq->__cmd)];
29743+ unsigned char *cmdptr;
29744+
29745+ if (rq->cmd != rq->__cmd)
29746+ cmdptr = rq->cmd;
29747+ else
29748+ cmdptr = tmpcmd;
29749+
29750+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
29751 return -EFAULT;
29752+
29753+ if (cmdptr != rq->cmd)
29754+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
29755+
29756 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
29757 return -EPERM;
29758
29759@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
29760 int err;
29761 unsigned int in_len, out_len, bytes, opcode, cmdlen;
29762 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
29763+ unsigned char tmpcmd[sizeof(rq->__cmd)];
29764+ unsigned char *cmdptr;
29765
29766 if (!sic)
29767 return -EINVAL;
29768@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
29769 */
29770 err = -EFAULT;
29771 rq->cmd_len = cmdlen;
29772- if (copy_from_user(rq->cmd, sic->data, cmdlen))
29773+
29774+ if (rq->cmd != rq->__cmd)
29775+ cmdptr = rq->cmd;
29776+ else
29777+ cmdptr = tmpcmd;
29778+
29779+ if (copy_from_user(cmdptr, sic->data, cmdlen))
29780 goto error;
29781
29782+ if (rq->cmd != cmdptr)
29783+ memcpy(rq->cmd, cmdptr, cmdlen);
29784+
29785 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
29786 goto error;
29787
29788diff --git a/crypto/cryptd.c b/crypto/cryptd.c
29789index 7bdd61b..afec999 100644
29790--- a/crypto/cryptd.c
29791+++ b/crypto/cryptd.c
29792@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
29793
29794 struct cryptd_blkcipher_request_ctx {
29795 crypto_completion_t complete;
29796-};
29797+} __no_const;
29798
29799 struct cryptd_hash_ctx {
29800 struct crypto_shash *child;
29801@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
29802
29803 struct cryptd_aead_request_ctx {
29804 crypto_completion_t complete;
29805-};
29806+} __no_const;
29807
29808 static void cryptd_queue_worker(struct work_struct *work);
29809
29810diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
29811index e6defd8..c26a225 100644
29812--- a/drivers/acpi/apei/cper.c
29813+++ b/drivers/acpi/apei/cper.c
29814@@ -38,12 +38,12 @@
29815 */
29816 u64 cper_next_record_id(void)
29817 {
29818- static atomic64_t seq;
29819+ static atomic64_unchecked_t seq;
29820
29821- if (!atomic64_read(&seq))
29822- atomic64_set(&seq, ((u64)get_seconds()) << 32);
29823+ if (!atomic64_read_unchecked(&seq))
29824+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
29825
29826- return atomic64_inc_return(&seq);
29827+ return atomic64_inc_return_unchecked(&seq);
29828 }
29829 EXPORT_SYMBOL_GPL(cper_next_record_id);
29830
29831diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
29832index 7586544..636a2f0 100644
29833--- a/drivers/acpi/ec_sys.c
29834+++ b/drivers/acpi/ec_sys.c
29835@@ -12,6 +12,7 @@
29836 #include <linux/acpi.h>
29837 #include <linux/debugfs.h>
29838 #include <linux/module.h>
29839+#include <linux/uaccess.h>
29840 #include "internal.h"
29841
29842 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
29843@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
29844 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
29845 */
29846 unsigned int size = EC_SPACE_SIZE;
29847- u8 *data = (u8 *) buf;
29848+ u8 data;
29849 loff_t init_off = *off;
29850 int err = 0;
29851
29852@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
29853 size = count;
29854
29855 while (size) {
29856- err = ec_read(*off, &data[*off - init_off]);
29857+ err = ec_read(*off, &data);
29858 if (err)
29859 return err;
29860+ if (put_user(data, &buf[*off - init_off]))
29861+ return -EFAULT;
29862 *off += 1;
29863 size--;
29864 }
29865@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
29866
29867 unsigned int size = count;
29868 loff_t init_off = *off;
29869- u8 *data = (u8 *) buf;
29870 int err = 0;
29871
29872 if (*off >= EC_SPACE_SIZE)
29873@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
29874 }
29875
29876 while (size) {
29877- u8 byte_write = data[*off - init_off];
29878+ u8 byte_write;
29879+ if (get_user(byte_write, &buf[*off - init_off]))
29880+ return -EFAULT;
29881 err = ec_write(*off, byte_write);
29882 if (err)
29883 return err;
29884diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
29885index 27adb09..1ed412d 100644
29886--- a/drivers/acpi/proc.c
29887+++ b/drivers/acpi/proc.c
29888@@ -360,19 +360,13 @@ acpi_system_write_wakeup_device(struct file *file,
29889 size_t count, loff_t * ppos)
29890 {
29891 struct list_head *node, *next;
29892- char strbuf[5];
29893- char str[5] = "";
29894- unsigned int len = count;
29895+ char strbuf[5] = {0};
29896
29897- if (len > 4)
29898- len = 4;
29899- if (len < 0)
29900+ if (count > 4)
29901+ count = 4;
29902+ if (copy_from_user(strbuf, buffer, count))
29903 return -EFAULT;
29904-
29905- if (copy_from_user(strbuf, buffer, len))
29906- return -EFAULT;
29907- strbuf[len] = '\0';
29908- sscanf(strbuf, "%s", str);
29909+ strbuf[count] = '\0';
29910
29911 mutex_lock(&acpi_device_lock);
29912 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
29913@@ -381,7 +375,7 @@ acpi_system_write_wakeup_device(struct file *file,
29914 if (!dev->wakeup.flags.valid)
29915 continue;
29916
29917- if (!strncmp(dev->pnp.bus_id, str, 4)) {
29918+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
29919 if (device_can_wakeup(&dev->dev)) {
29920 bool enable = !device_may_wakeup(&dev->dev);
29921 device_set_wakeup_enable(&dev->dev, enable);
29922diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
29923index bd4e5dc..0497b66 100644
29924--- a/drivers/acpi/processor_driver.c
29925+++ b/drivers/acpi/processor_driver.c
29926@@ -552,7 +552,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
29927 return 0;
29928 #endif
29929
29930- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
29931+ BUG_ON(pr->id >= nr_cpu_ids);
29932
29933 /*
29934 * Buggy BIOS check
29935diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
29936index 586362e..ca71b9b 100644
29937--- a/drivers/ata/libata-core.c
29938+++ b/drivers/ata/libata-core.c
29939@@ -4775,7 +4775,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
29940 struct ata_port *ap;
29941 unsigned int tag;
29942
29943- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29944+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29945 ap = qc->ap;
29946
29947 qc->flags = 0;
29948@@ -4791,7 +4791,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
29949 struct ata_port *ap;
29950 struct ata_link *link;
29951
29952- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29953+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29954 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
29955 ap = qc->ap;
29956 link = qc->dev->link;
29957@@ -5887,6 +5887,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29958 return;
29959
29960 spin_lock(&lock);
29961+ pax_open_kernel();
29962
29963 for (cur = ops->inherits; cur; cur = cur->inherits) {
29964 void **inherit = (void **)cur;
29965@@ -5900,8 +5901,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29966 if (IS_ERR(*pp))
29967 *pp = NULL;
29968
29969- ops->inherits = NULL;
29970+ *(struct ata_port_operations **)&ops->inherits = NULL;
29971
29972+ pax_close_kernel();
29973 spin_unlock(&lock);
29974 }
29975
29976diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
29977index 371fd2c..0836c78 100644
29978--- a/drivers/ata/pata_arasan_cf.c
29979+++ b/drivers/ata/pata_arasan_cf.c
29980@@ -861,7 +861,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
29981 /* Handle platform specific quirks */
29982 if (pdata->quirk) {
29983 if (pdata->quirk & CF_BROKEN_PIO) {
29984- ap->ops->set_piomode = NULL;
29985+ pax_open_kernel();
29986+ *(void **)&ap->ops->set_piomode = NULL;
29987+ pax_close_kernel();
29988 ap->pio_mask = 0;
29989 }
29990 if (pdata->quirk & CF_BROKEN_MWDMA)
29991diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29992index f9b983a..887b9d8 100644
29993--- a/drivers/atm/adummy.c
29994+++ b/drivers/atm/adummy.c
29995@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29996 vcc->pop(vcc, skb);
29997 else
29998 dev_kfree_skb_any(skb);
29999- atomic_inc(&vcc->stats->tx);
30000+ atomic_inc_unchecked(&vcc->stats->tx);
30001
30002 return 0;
30003 }
30004diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
30005index ff7bb8a..568fc0b 100644
30006--- a/drivers/atm/ambassador.c
30007+++ b/drivers/atm/ambassador.c
30008@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
30009 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
30010
30011 // VC layer stats
30012- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30013+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30014
30015 // free the descriptor
30016 kfree (tx_descr);
30017@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
30018 dump_skb ("<<<", vc, skb);
30019
30020 // VC layer stats
30021- atomic_inc(&atm_vcc->stats->rx);
30022+ atomic_inc_unchecked(&atm_vcc->stats->rx);
30023 __net_timestamp(skb);
30024 // end of our responsibility
30025 atm_vcc->push (atm_vcc, skb);
30026@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
30027 } else {
30028 PRINTK (KERN_INFO, "dropped over-size frame");
30029 // should we count this?
30030- atomic_inc(&atm_vcc->stats->rx_drop);
30031+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30032 }
30033
30034 } else {
30035@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
30036 }
30037
30038 if (check_area (skb->data, skb->len)) {
30039- atomic_inc(&atm_vcc->stats->tx_err);
30040+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
30041 return -ENOMEM; // ?
30042 }
30043
30044diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
30045index b22d71c..d6e1049 100644
30046--- a/drivers/atm/atmtcp.c
30047+++ b/drivers/atm/atmtcp.c
30048@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30049 if (vcc->pop) vcc->pop(vcc,skb);
30050 else dev_kfree_skb(skb);
30051 if (dev_data) return 0;
30052- atomic_inc(&vcc->stats->tx_err);
30053+ atomic_inc_unchecked(&vcc->stats->tx_err);
30054 return -ENOLINK;
30055 }
30056 size = skb->len+sizeof(struct atmtcp_hdr);
30057@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30058 if (!new_skb) {
30059 if (vcc->pop) vcc->pop(vcc,skb);
30060 else dev_kfree_skb(skb);
30061- atomic_inc(&vcc->stats->tx_err);
30062+ atomic_inc_unchecked(&vcc->stats->tx_err);
30063 return -ENOBUFS;
30064 }
30065 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
30066@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30067 if (vcc->pop) vcc->pop(vcc,skb);
30068 else dev_kfree_skb(skb);
30069 out_vcc->push(out_vcc,new_skb);
30070- atomic_inc(&vcc->stats->tx);
30071- atomic_inc(&out_vcc->stats->rx);
30072+ atomic_inc_unchecked(&vcc->stats->tx);
30073+ atomic_inc_unchecked(&out_vcc->stats->rx);
30074 return 0;
30075 }
30076
30077@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
30078 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
30079 read_unlock(&vcc_sklist_lock);
30080 if (!out_vcc) {
30081- atomic_inc(&vcc->stats->tx_err);
30082+ atomic_inc_unchecked(&vcc->stats->tx_err);
30083 goto done;
30084 }
30085 skb_pull(skb,sizeof(struct atmtcp_hdr));
30086@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
30087 __net_timestamp(new_skb);
30088 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
30089 out_vcc->push(out_vcc,new_skb);
30090- atomic_inc(&vcc->stats->tx);
30091- atomic_inc(&out_vcc->stats->rx);
30092+ atomic_inc_unchecked(&vcc->stats->tx);
30093+ atomic_inc_unchecked(&out_vcc->stats->rx);
30094 done:
30095 if (vcc->pop) vcc->pop(vcc,skb);
30096 else dev_kfree_skb(skb);
30097diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
30098index 81e44f7..498ea36 100644
30099--- a/drivers/atm/eni.c
30100+++ b/drivers/atm/eni.c
30101@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
30102 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
30103 vcc->dev->number);
30104 length = 0;
30105- atomic_inc(&vcc->stats->rx_err);
30106+ atomic_inc_unchecked(&vcc->stats->rx_err);
30107 }
30108 else {
30109 length = ATM_CELL_SIZE-1; /* no HEC */
30110@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
30111 size);
30112 }
30113 eff = length = 0;
30114- atomic_inc(&vcc->stats->rx_err);
30115+ atomic_inc_unchecked(&vcc->stats->rx_err);
30116 }
30117 else {
30118 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
30119@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
30120 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
30121 vcc->dev->number,vcc->vci,length,size << 2,descr);
30122 length = eff = 0;
30123- atomic_inc(&vcc->stats->rx_err);
30124+ atomic_inc_unchecked(&vcc->stats->rx_err);
30125 }
30126 }
30127 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
30128@@ -767,7 +767,7 @@ rx_dequeued++;
30129 vcc->push(vcc,skb);
30130 pushed++;
30131 }
30132- atomic_inc(&vcc->stats->rx);
30133+ atomic_inc_unchecked(&vcc->stats->rx);
30134 }
30135 wake_up(&eni_dev->rx_wait);
30136 }
30137@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
30138 PCI_DMA_TODEVICE);
30139 if (vcc->pop) vcc->pop(vcc,skb);
30140 else dev_kfree_skb_irq(skb);
30141- atomic_inc(&vcc->stats->tx);
30142+ atomic_inc_unchecked(&vcc->stats->tx);
30143 wake_up(&eni_dev->tx_wait);
30144 dma_complete++;
30145 }
30146diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
30147index 86fed1b..6dc4721 100644
30148--- a/drivers/atm/firestream.c
30149+++ b/drivers/atm/firestream.c
30150@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
30151 }
30152 }
30153
30154- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30155+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30156
30157 fs_dprintk (FS_DEBUG_TXMEM, "i");
30158 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
30159@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
30160 #endif
30161 skb_put (skb, qe->p1 & 0xffff);
30162 ATM_SKB(skb)->vcc = atm_vcc;
30163- atomic_inc(&atm_vcc->stats->rx);
30164+ atomic_inc_unchecked(&atm_vcc->stats->rx);
30165 __net_timestamp(skb);
30166 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
30167 atm_vcc->push (atm_vcc, skb);
30168@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
30169 kfree (pe);
30170 }
30171 if (atm_vcc)
30172- atomic_inc(&atm_vcc->stats->rx_drop);
30173+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30174 break;
30175 case 0x1f: /* Reassembly abort: no buffers. */
30176 /* Silently increment error counter. */
30177 if (atm_vcc)
30178- atomic_inc(&atm_vcc->stats->rx_drop);
30179+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30180 break;
30181 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
30182 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
30183diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
30184index 361f5ae..7fc552d 100644
30185--- a/drivers/atm/fore200e.c
30186+++ b/drivers/atm/fore200e.c
30187@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
30188 #endif
30189 /* check error condition */
30190 if (*entry->status & STATUS_ERROR)
30191- atomic_inc(&vcc->stats->tx_err);
30192+ atomic_inc_unchecked(&vcc->stats->tx_err);
30193 else
30194- atomic_inc(&vcc->stats->tx);
30195+ atomic_inc_unchecked(&vcc->stats->tx);
30196 }
30197 }
30198
30199@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
30200 if (skb == NULL) {
30201 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
30202
30203- atomic_inc(&vcc->stats->rx_drop);
30204+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30205 return -ENOMEM;
30206 }
30207
30208@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
30209
30210 dev_kfree_skb_any(skb);
30211
30212- atomic_inc(&vcc->stats->rx_drop);
30213+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30214 return -ENOMEM;
30215 }
30216
30217 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
30218
30219 vcc->push(vcc, skb);
30220- atomic_inc(&vcc->stats->rx);
30221+ atomic_inc_unchecked(&vcc->stats->rx);
30222
30223 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
30224
30225@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
30226 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
30227 fore200e->atm_dev->number,
30228 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
30229- atomic_inc(&vcc->stats->rx_err);
30230+ atomic_inc_unchecked(&vcc->stats->rx_err);
30231 }
30232 }
30233
30234@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
30235 goto retry_here;
30236 }
30237
30238- atomic_inc(&vcc->stats->tx_err);
30239+ atomic_inc_unchecked(&vcc->stats->tx_err);
30240
30241 fore200e->tx_sat++;
30242 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
30243diff --git a/drivers/atm/he.c b/drivers/atm/he.c
30244index b182c2f..1c6fa8a 100644
30245--- a/drivers/atm/he.c
30246+++ b/drivers/atm/he.c
30247@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30248
30249 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
30250 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
30251- atomic_inc(&vcc->stats->rx_drop);
30252+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30253 goto return_host_buffers;
30254 }
30255
30256@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30257 RBRQ_LEN_ERR(he_dev->rbrq_head)
30258 ? "LEN_ERR" : "",
30259 vcc->vpi, vcc->vci);
30260- atomic_inc(&vcc->stats->rx_err);
30261+ atomic_inc_unchecked(&vcc->stats->rx_err);
30262 goto return_host_buffers;
30263 }
30264
30265@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30266 vcc->push(vcc, skb);
30267 spin_lock(&he_dev->global_lock);
30268
30269- atomic_inc(&vcc->stats->rx);
30270+ atomic_inc_unchecked(&vcc->stats->rx);
30271
30272 return_host_buffers:
30273 ++pdus_assembled;
30274@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
30275 tpd->vcc->pop(tpd->vcc, tpd->skb);
30276 else
30277 dev_kfree_skb_any(tpd->skb);
30278- atomic_inc(&tpd->vcc->stats->tx_err);
30279+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
30280 }
30281 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
30282 return;
30283@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30284 vcc->pop(vcc, skb);
30285 else
30286 dev_kfree_skb_any(skb);
30287- atomic_inc(&vcc->stats->tx_err);
30288+ atomic_inc_unchecked(&vcc->stats->tx_err);
30289 return -EINVAL;
30290 }
30291
30292@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30293 vcc->pop(vcc, skb);
30294 else
30295 dev_kfree_skb_any(skb);
30296- atomic_inc(&vcc->stats->tx_err);
30297+ atomic_inc_unchecked(&vcc->stats->tx_err);
30298 return -EINVAL;
30299 }
30300 #endif
30301@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30302 vcc->pop(vcc, skb);
30303 else
30304 dev_kfree_skb_any(skb);
30305- atomic_inc(&vcc->stats->tx_err);
30306+ atomic_inc_unchecked(&vcc->stats->tx_err);
30307 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30308 return -ENOMEM;
30309 }
30310@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30311 vcc->pop(vcc, skb);
30312 else
30313 dev_kfree_skb_any(skb);
30314- atomic_inc(&vcc->stats->tx_err);
30315+ atomic_inc_unchecked(&vcc->stats->tx_err);
30316 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30317 return -ENOMEM;
30318 }
30319@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30320 __enqueue_tpd(he_dev, tpd, cid);
30321 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30322
30323- atomic_inc(&vcc->stats->tx);
30324+ atomic_inc_unchecked(&vcc->stats->tx);
30325
30326 return 0;
30327 }
30328diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
30329index 7d01c2a..4e3ac01 100644
30330--- a/drivers/atm/horizon.c
30331+++ b/drivers/atm/horizon.c
30332@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
30333 {
30334 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
30335 // VC layer stats
30336- atomic_inc(&vcc->stats->rx);
30337+ atomic_inc_unchecked(&vcc->stats->rx);
30338 __net_timestamp(skb);
30339 // end of our responsibility
30340 vcc->push (vcc, skb);
30341@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
30342 dev->tx_iovec = NULL;
30343
30344 // VC layer stats
30345- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30346+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30347
30348 // free the skb
30349 hrz_kfree_skb (skb);
30350diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
30351index 8974bd2..b856f85 100644
30352--- a/drivers/atm/idt77252.c
30353+++ b/drivers/atm/idt77252.c
30354@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
30355 else
30356 dev_kfree_skb(skb);
30357
30358- atomic_inc(&vcc->stats->tx);
30359+ atomic_inc_unchecked(&vcc->stats->tx);
30360 }
30361
30362 atomic_dec(&scq->used);
30363@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30364 if ((sb = dev_alloc_skb(64)) == NULL) {
30365 printk("%s: Can't allocate buffers for aal0.\n",
30366 card->name);
30367- atomic_add(i, &vcc->stats->rx_drop);
30368+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
30369 break;
30370 }
30371 if (!atm_charge(vcc, sb->truesize)) {
30372 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
30373 card->name);
30374- atomic_add(i - 1, &vcc->stats->rx_drop);
30375+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
30376 dev_kfree_skb(sb);
30377 break;
30378 }
30379@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30380 ATM_SKB(sb)->vcc = vcc;
30381 __net_timestamp(sb);
30382 vcc->push(vcc, sb);
30383- atomic_inc(&vcc->stats->rx);
30384+ atomic_inc_unchecked(&vcc->stats->rx);
30385
30386 cell += ATM_CELL_PAYLOAD;
30387 }
30388@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30389 "(CDC: %08x)\n",
30390 card->name, len, rpp->len, readl(SAR_REG_CDC));
30391 recycle_rx_pool_skb(card, rpp);
30392- atomic_inc(&vcc->stats->rx_err);
30393+ atomic_inc_unchecked(&vcc->stats->rx_err);
30394 return;
30395 }
30396 if (stat & SAR_RSQE_CRC) {
30397 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
30398 recycle_rx_pool_skb(card, rpp);
30399- atomic_inc(&vcc->stats->rx_err);
30400+ atomic_inc_unchecked(&vcc->stats->rx_err);
30401 return;
30402 }
30403 if (skb_queue_len(&rpp->queue) > 1) {
30404@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30405 RXPRINTK("%s: Can't alloc RX skb.\n",
30406 card->name);
30407 recycle_rx_pool_skb(card, rpp);
30408- atomic_inc(&vcc->stats->rx_err);
30409+ atomic_inc_unchecked(&vcc->stats->rx_err);
30410 return;
30411 }
30412 if (!atm_charge(vcc, skb->truesize)) {
30413@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30414 __net_timestamp(skb);
30415
30416 vcc->push(vcc, skb);
30417- atomic_inc(&vcc->stats->rx);
30418+ atomic_inc_unchecked(&vcc->stats->rx);
30419
30420 return;
30421 }
30422@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30423 __net_timestamp(skb);
30424
30425 vcc->push(vcc, skb);
30426- atomic_inc(&vcc->stats->rx);
30427+ atomic_inc_unchecked(&vcc->stats->rx);
30428
30429 if (skb->truesize > SAR_FB_SIZE_3)
30430 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
30431@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
30432 if (vcc->qos.aal != ATM_AAL0) {
30433 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
30434 card->name, vpi, vci);
30435- atomic_inc(&vcc->stats->rx_drop);
30436+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30437 goto drop;
30438 }
30439
30440 if ((sb = dev_alloc_skb(64)) == NULL) {
30441 printk("%s: Can't allocate buffers for AAL0.\n",
30442 card->name);
30443- atomic_inc(&vcc->stats->rx_err);
30444+ atomic_inc_unchecked(&vcc->stats->rx_err);
30445 goto drop;
30446 }
30447
30448@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
30449 ATM_SKB(sb)->vcc = vcc;
30450 __net_timestamp(sb);
30451 vcc->push(vcc, sb);
30452- atomic_inc(&vcc->stats->rx);
30453+ atomic_inc_unchecked(&vcc->stats->rx);
30454
30455 drop:
30456 skb_pull(queue, 64);
30457@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30458
30459 if (vc == NULL) {
30460 printk("%s: NULL connection in send().\n", card->name);
30461- atomic_inc(&vcc->stats->tx_err);
30462+ atomic_inc_unchecked(&vcc->stats->tx_err);
30463 dev_kfree_skb(skb);
30464 return -EINVAL;
30465 }
30466 if (!test_bit(VCF_TX, &vc->flags)) {
30467 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
30468- atomic_inc(&vcc->stats->tx_err);
30469+ atomic_inc_unchecked(&vcc->stats->tx_err);
30470 dev_kfree_skb(skb);
30471 return -EINVAL;
30472 }
30473@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30474 break;
30475 default:
30476 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
30477- atomic_inc(&vcc->stats->tx_err);
30478+ atomic_inc_unchecked(&vcc->stats->tx_err);
30479 dev_kfree_skb(skb);
30480 return -EINVAL;
30481 }
30482
30483 if (skb_shinfo(skb)->nr_frags != 0) {
30484 printk("%s: No scatter-gather yet.\n", card->name);
30485- atomic_inc(&vcc->stats->tx_err);
30486+ atomic_inc_unchecked(&vcc->stats->tx_err);
30487 dev_kfree_skb(skb);
30488 return -EINVAL;
30489 }
30490@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30491
30492 err = queue_skb(card, vc, skb, oam);
30493 if (err) {
30494- atomic_inc(&vcc->stats->tx_err);
30495+ atomic_inc_unchecked(&vcc->stats->tx_err);
30496 dev_kfree_skb(skb);
30497 return err;
30498 }
30499@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
30500 skb = dev_alloc_skb(64);
30501 if (!skb) {
30502 printk("%s: Out of memory in send_oam().\n", card->name);
30503- atomic_inc(&vcc->stats->tx_err);
30504+ atomic_inc_unchecked(&vcc->stats->tx_err);
30505 return -ENOMEM;
30506 }
30507 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
30508diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
30509index 96cce6d..62c3ec5 100644
30510--- a/drivers/atm/iphase.c
30511+++ b/drivers/atm/iphase.c
30512@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
30513 status = (u_short) (buf_desc_ptr->desc_mode);
30514 if (status & (RX_CER | RX_PTE | RX_OFL))
30515 {
30516- atomic_inc(&vcc->stats->rx_err);
30517+ atomic_inc_unchecked(&vcc->stats->rx_err);
30518 IF_ERR(printk("IA: bad packet, dropping it");)
30519 if (status & RX_CER) {
30520 IF_ERR(printk(" cause: packet CRC error\n");)
30521@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
30522 len = dma_addr - buf_addr;
30523 if (len > iadev->rx_buf_sz) {
30524 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
30525- atomic_inc(&vcc->stats->rx_err);
30526+ atomic_inc_unchecked(&vcc->stats->rx_err);
30527 goto out_free_desc;
30528 }
30529
30530@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30531 ia_vcc = INPH_IA_VCC(vcc);
30532 if (ia_vcc == NULL)
30533 {
30534- atomic_inc(&vcc->stats->rx_err);
30535+ atomic_inc_unchecked(&vcc->stats->rx_err);
30536 atm_return(vcc, skb->truesize);
30537 dev_kfree_skb_any(skb);
30538 goto INCR_DLE;
30539@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30540 if ((length > iadev->rx_buf_sz) || (length >
30541 (skb->len - sizeof(struct cpcs_trailer))))
30542 {
30543- atomic_inc(&vcc->stats->rx_err);
30544+ atomic_inc_unchecked(&vcc->stats->rx_err);
30545 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
30546 length, skb->len);)
30547 atm_return(vcc, skb->truesize);
30548@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30549
30550 IF_RX(printk("rx_dle_intr: skb push");)
30551 vcc->push(vcc,skb);
30552- atomic_inc(&vcc->stats->rx);
30553+ atomic_inc_unchecked(&vcc->stats->rx);
30554 iadev->rx_pkt_cnt++;
30555 }
30556 INCR_DLE:
30557@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
30558 {
30559 struct k_sonet_stats *stats;
30560 stats = &PRIV(_ia_dev[board])->sonet_stats;
30561- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
30562- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
30563- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
30564- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
30565- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
30566- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
30567- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
30568- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
30569- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
30570+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
30571+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
30572+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
30573+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
30574+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
30575+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
30576+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
30577+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
30578+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
30579 }
30580 ia_cmds.status = 0;
30581 break;
30582@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30583 if ((desc == 0) || (desc > iadev->num_tx_desc))
30584 {
30585 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
30586- atomic_inc(&vcc->stats->tx);
30587+ atomic_inc_unchecked(&vcc->stats->tx);
30588 if (vcc->pop)
30589 vcc->pop(vcc, skb);
30590 else
30591@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30592 ATM_DESC(skb) = vcc->vci;
30593 skb_queue_tail(&iadev->tx_dma_q, skb);
30594
30595- atomic_inc(&vcc->stats->tx);
30596+ atomic_inc_unchecked(&vcc->stats->tx);
30597 iadev->tx_pkt_cnt++;
30598 /* Increment transaction counter */
30599 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
30600
30601 #if 0
30602 /* add flow control logic */
30603- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
30604+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
30605 if (iavcc->vc_desc_cnt > 10) {
30606 vcc->tx_quota = vcc->tx_quota * 3 / 4;
30607 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
30608diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
30609index 68c7588..7036683 100644
30610--- a/drivers/atm/lanai.c
30611+++ b/drivers/atm/lanai.c
30612@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
30613 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
30614 lanai_endtx(lanai, lvcc);
30615 lanai_free_skb(lvcc->tx.atmvcc, skb);
30616- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
30617+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
30618 }
30619
30620 /* Try to fill the buffer - don't call unless there is backlog */
30621@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
30622 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
30623 __net_timestamp(skb);
30624 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
30625- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
30626+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
30627 out:
30628 lvcc->rx.buf.ptr = end;
30629 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
30630@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30631 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
30632 "vcc %d\n", lanai->number, (unsigned int) s, vci);
30633 lanai->stats.service_rxnotaal5++;
30634- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30635+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30636 return 0;
30637 }
30638 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
30639@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30640 int bytes;
30641 read_unlock(&vcc_sklist_lock);
30642 DPRINTK("got trashed rx pdu on vci %d\n", vci);
30643- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30644+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30645 lvcc->stats.x.aal5.service_trash++;
30646 bytes = (SERVICE_GET_END(s) * 16) -
30647 (((unsigned long) lvcc->rx.buf.ptr) -
30648@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30649 }
30650 if (s & SERVICE_STREAM) {
30651 read_unlock(&vcc_sklist_lock);
30652- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30653+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30654 lvcc->stats.x.aal5.service_stream++;
30655 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
30656 "PDU on VCI %d!\n", lanai->number, vci);
30657@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30658 return 0;
30659 }
30660 DPRINTK("got rx crc error on vci %d\n", vci);
30661- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30662+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30663 lvcc->stats.x.aal5.service_rxcrc++;
30664 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30665 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30666diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30667index 1c70c45..300718d 100644
30668--- a/drivers/atm/nicstar.c
30669+++ b/drivers/atm/nicstar.c
30670@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30671 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
30672 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
30673 card->index);
30674- atomic_inc(&vcc->stats->tx_err);
30675+ atomic_inc_unchecked(&vcc->stats->tx_err);
30676 dev_kfree_skb_any(skb);
30677 return -EINVAL;
30678 }
30679@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30680 if (!vc->tx) {
30681 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
30682 card->index);
30683- atomic_inc(&vcc->stats->tx_err);
30684+ atomic_inc_unchecked(&vcc->stats->tx_err);
30685 dev_kfree_skb_any(skb);
30686 return -EINVAL;
30687 }
30688@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30689 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
30690 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
30691 card->index);
30692- atomic_inc(&vcc->stats->tx_err);
30693+ atomic_inc_unchecked(&vcc->stats->tx_err);
30694 dev_kfree_skb_any(skb);
30695 return -EINVAL;
30696 }
30697
30698 if (skb_shinfo(skb)->nr_frags != 0) {
30699 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30700- atomic_inc(&vcc->stats->tx_err);
30701+ atomic_inc_unchecked(&vcc->stats->tx_err);
30702 dev_kfree_skb_any(skb);
30703 return -EINVAL;
30704 }
30705@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30706 }
30707
30708 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
30709- atomic_inc(&vcc->stats->tx_err);
30710+ atomic_inc_unchecked(&vcc->stats->tx_err);
30711 dev_kfree_skb_any(skb);
30712 return -EIO;
30713 }
30714- atomic_inc(&vcc->stats->tx);
30715+ atomic_inc_unchecked(&vcc->stats->tx);
30716
30717 return 0;
30718 }
30719@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30720 printk
30721 ("nicstar%d: Can't allocate buffers for aal0.\n",
30722 card->index);
30723- atomic_add(i, &vcc->stats->rx_drop);
30724+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
30725 break;
30726 }
30727 if (!atm_charge(vcc, sb->truesize)) {
30728 RXPRINTK
30729 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
30730 card->index);
30731- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
30732+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
30733 dev_kfree_skb_any(sb);
30734 break;
30735 }
30736@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30737 ATM_SKB(sb)->vcc = vcc;
30738 __net_timestamp(sb);
30739 vcc->push(vcc, sb);
30740- atomic_inc(&vcc->stats->rx);
30741+ atomic_inc_unchecked(&vcc->stats->rx);
30742 cell += ATM_CELL_PAYLOAD;
30743 }
30744
30745@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30746 if (iovb == NULL) {
30747 printk("nicstar%d: Out of iovec buffers.\n",
30748 card->index);
30749- atomic_inc(&vcc->stats->rx_drop);
30750+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30751 recycle_rx_buf(card, skb);
30752 return;
30753 }
30754@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30755 small or large buffer itself. */
30756 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
30757 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30758- atomic_inc(&vcc->stats->rx_err);
30759+ atomic_inc_unchecked(&vcc->stats->rx_err);
30760 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30761 NS_MAX_IOVECS);
30762 NS_PRV_IOVCNT(iovb) = 0;
30763@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30764 ("nicstar%d: Expected a small buffer, and this is not one.\n",
30765 card->index);
30766 which_list(card, skb);
30767- atomic_inc(&vcc->stats->rx_err);
30768+ atomic_inc_unchecked(&vcc->stats->rx_err);
30769 recycle_rx_buf(card, skb);
30770 vc->rx_iov = NULL;
30771 recycle_iov_buf(card, iovb);
30772@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30773 ("nicstar%d: Expected a large buffer, and this is not one.\n",
30774 card->index);
30775 which_list(card, skb);
30776- atomic_inc(&vcc->stats->rx_err);
30777+ atomic_inc_unchecked(&vcc->stats->rx_err);
30778 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30779 NS_PRV_IOVCNT(iovb));
30780 vc->rx_iov = NULL;
30781@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30782 printk(" - PDU size mismatch.\n");
30783 else
30784 printk(".\n");
30785- atomic_inc(&vcc->stats->rx_err);
30786+ atomic_inc_unchecked(&vcc->stats->rx_err);
30787 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30788 NS_PRV_IOVCNT(iovb));
30789 vc->rx_iov = NULL;
30790@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30791 /* skb points to a small buffer */
30792 if (!atm_charge(vcc, skb->truesize)) {
30793 push_rxbufs(card, skb);
30794- atomic_inc(&vcc->stats->rx_drop);
30795+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30796 } else {
30797 skb_put(skb, len);
30798 dequeue_sm_buf(card, skb);
30799@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30800 ATM_SKB(skb)->vcc = vcc;
30801 __net_timestamp(skb);
30802 vcc->push(vcc, skb);
30803- atomic_inc(&vcc->stats->rx);
30804+ atomic_inc_unchecked(&vcc->stats->rx);
30805 }
30806 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
30807 struct sk_buff *sb;
30808@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30809 if (len <= NS_SMBUFSIZE) {
30810 if (!atm_charge(vcc, sb->truesize)) {
30811 push_rxbufs(card, sb);
30812- atomic_inc(&vcc->stats->rx_drop);
30813+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30814 } else {
30815 skb_put(sb, len);
30816 dequeue_sm_buf(card, sb);
30817@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30818 ATM_SKB(sb)->vcc = vcc;
30819 __net_timestamp(sb);
30820 vcc->push(vcc, sb);
30821- atomic_inc(&vcc->stats->rx);
30822+ atomic_inc_unchecked(&vcc->stats->rx);
30823 }
30824
30825 push_rxbufs(card, skb);
30826@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30827
30828 if (!atm_charge(vcc, skb->truesize)) {
30829 push_rxbufs(card, skb);
30830- atomic_inc(&vcc->stats->rx_drop);
30831+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30832 } else {
30833 dequeue_lg_buf(card, skb);
30834 #ifdef NS_USE_DESTRUCTORS
30835@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30836 ATM_SKB(skb)->vcc = vcc;
30837 __net_timestamp(skb);
30838 vcc->push(vcc, skb);
30839- atomic_inc(&vcc->stats->rx);
30840+ atomic_inc_unchecked(&vcc->stats->rx);
30841 }
30842
30843 push_rxbufs(card, sb);
30844@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30845 printk
30846 ("nicstar%d: Out of huge buffers.\n",
30847 card->index);
30848- atomic_inc(&vcc->stats->rx_drop);
30849+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30850 recycle_iovec_rx_bufs(card,
30851 (struct iovec *)
30852 iovb->data,
30853@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30854 card->hbpool.count++;
30855 } else
30856 dev_kfree_skb_any(hb);
30857- atomic_inc(&vcc->stats->rx_drop);
30858+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30859 } else {
30860 /* Copy the small buffer to the huge buffer */
30861 sb = (struct sk_buff *)iov->iov_base;
30862@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30863 #endif /* NS_USE_DESTRUCTORS */
30864 __net_timestamp(hb);
30865 vcc->push(vcc, hb);
30866- atomic_inc(&vcc->stats->rx);
30867+ atomic_inc_unchecked(&vcc->stats->rx);
30868 }
30869 }
30870
30871diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30872index 1853a45..cf2426d 100644
30873--- a/drivers/atm/solos-pci.c
30874+++ b/drivers/atm/solos-pci.c
30875@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
30876 }
30877 atm_charge(vcc, skb->truesize);
30878 vcc->push(vcc, skb);
30879- atomic_inc(&vcc->stats->rx);
30880+ atomic_inc_unchecked(&vcc->stats->rx);
30881 break;
30882
30883 case PKT_STATUS:
30884@@ -1010,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30885 vcc = SKB_CB(oldskb)->vcc;
30886
30887 if (vcc) {
30888- atomic_inc(&vcc->stats->tx);
30889+ atomic_inc_unchecked(&vcc->stats->tx);
30890 solos_pop(vcc, oldskb);
30891 } else
30892 dev_kfree_skb_irq(oldskb);
30893diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30894index 0215934..ce9f5b1 100644
30895--- a/drivers/atm/suni.c
30896+++ b/drivers/atm/suni.c
30897@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30898
30899
30900 #define ADD_LIMITED(s,v) \
30901- atomic_add((v),&stats->s); \
30902- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30903+ atomic_add_unchecked((v),&stats->s); \
30904+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30905
30906
30907 static void suni_hz(unsigned long from_timer)
30908diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30909index 5120a96..e2572bd 100644
30910--- a/drivers/atm/uPD98402.c
30911+++ b/drivers/atm/uPD98402.c
30912@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30913 struct sonet_stats tmp;
30914 int error = 0;
30915
30916- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30917+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30918 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30919 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30920 if (zero && !error) {
30921@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30922
30923
30924 #define ADD_LIMITED(s,v) \
30925- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30926- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30927- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30928+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30929+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30930+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30931
30932
30933 static void stat_event(struct atm_dev *dev)
30934@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
30935 if (reason & uPD98402_INT_PFM) stat_event(dev);
30936 if (reason & uPD98402_INT_PCO) {
30937 (void) GET(PCOCR); /* clear interrupt cause */
30938- atomic_add(GET(HECCT),
30939+ atomic_add_unchecked(GET(HECCT),
30940 &PRIV(dev)->sonet_stats.uncorr_hcs);
30941 }
30942 if ((reason & uPD98402_INT_RFO) &&
30943@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
30944 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30945 uPD98402_INT_LOS),PIMR); /* enable them */
30946 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30947- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30948- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30949- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30950+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30951+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30952+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30953 return 0;
30954 }
30955
30956diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30957index abe4e20..83c4727 100644
30958--- a/drivers/atm/zatm.c
30959+++ b/drivers/atm/zatm.c
30960@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30961 }
30962 if (!size) {
30963 dev_kfree_skb_irq(skb);
30964- if (vcc) atomic_inc(&vcc->stats->rx_err);
30965+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30966 continue;
30967 }
30968 if (!atm_charge(vcc,skb->truesize)) {
30969@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30970 skb->len = size;
30971 ATM_SKB(skb)->vcc = vcc;
30972 vcc->push(vcc,skb);
30973- atomic_inc(&vcc->stats->rx);
30974+ atomic_inc_unchecked(&vcc->stats->rx);
30975 }
30976 zout(pos & 0xffff,MTA(mbx));
30977 #if 0 /* probably a stupid idea */
30978@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30979 skb_queue_head(&zatm_vcc->backlog,skb);
30980 break;
30981 }
30982- atomic_inc(&vcc->stats->tx);
30983+ atomic_inc_unchecked(&vcc->stats->tx);
30984 wake_up(&zatm_vcc->tx_wait);
30985 }
30986
30987diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
30988index 147d1a4..d0fd4b0 100644
30989--- a/drivers/base/devtmpfs.c
30990+++ b/drivers/base/devtmpfs.c
30991@@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
30992 if (!thread)
30993 return 0;
30994
30995- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
30996+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
30997 if (err)
30998 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
30999 else
31000diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
31001index e6ee5e8..98ad7fc 100644
31002--- a/drivers/base/power/wakeup.c
31003+++ b/drivers/base/power/wakeup.c
31004@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
31005 * They need to be modified together atomically, so it's better to use one
31006 * atomic variable to hold them both.
31007 */
31008-static atomic_t combined_event_count = ATOMIC_INIT(0);
31009+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
31010
31011 #define IN_PROGRESS_BITS (sizeof(int) * 4)
31012 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
31013
31014 static void split_counters(unsigned int *cnt, unsigned int *inpr)
31015 {
31016- unsigned int comb = atomic_read(&combined_event_count);
31017+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
31018
31019 *cnt = (comb >> IN_PROGRESS_BITS);
31020 *inpr = comb & MAX_IN_PROGRESS;
31021@@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
31022 ws->start_prevent_time = ws->last_time;
31023
31024 /* Increment the counter of events in progress. */
31025- cec = atomic_inc_return(&combined_event_count);
31026+ cec = atomic_inc_return_unchecked(&combined_event_count);
31027
31028 trace_wakeup_source_activate(ws->name, cec);
31029 }
31030@@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
31031 * Increment the counter of registered wakeup events and decrement the
31032 * couter of wakeup events in progress simultaneously.
31033 */
31034- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
31035+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
31036 trace_wakeup_source_deactivate(ws->name, cec);
31037
31038 split_counters(&cnt, &inpr);
31039diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
31040index ca83f96..69d4ea9 100644
31041--- a/drivers/block/cciss.c
31042+++ b/drivers/block/cciss.c
31043@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
31044 int err;
31045 u32 cp;
31046
31047+ memset(&arg64, 0, sizeof(arg64));
31048+
31049 err = 0;
31050 err |=
31051 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
31052@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
31053 while (!list_empty(&h->reqQ)) {
31054 c = list_entry(h->reqQ.next, CommandList_struct, list);
31055 /* can't do anything if fifo is full */
31056- if ((h->access.fifo_full(h))) {
31057+ if ((h->access->fifo_full(h))) {
31058 dev_warn(&h->pdev->dev, "fifo full\n");
31059 break;
31060 }
31061@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
31062 h->Qdepth--;
31063
31064 /* Tell the controller execute command */
31065- h->access.submit_command(h, c);
31066+ h->access->submit_command(h, c);
31067
31068 /* Put job onto the completed Q */
31069 addQ(&h->cmpQ, c);
31070@@ -3443,17 +3445,17 @@ startio:
31071
31072 static inline unsigned long get_next_completion(ctlr_info_t *h)
31073 {
31074- return h->access.command_completed(h);
31075+ return h->access->command_completed(h);
31076 }
31077
31078 static inline int interrupt_pending(ctlr_info_t *h)
31079 {
31080- return h->access.intr_pending(h);
31081+ return h->access->intr_pending(h);
31082 }
31083
31084 static inline long interrupt_not_for_us(ctlr_info_t *h)
31085 {
31086- return ((h->access.intr_pending(h) == 0) ||
31087+ return ((h->access->intr_pending(h) == 0) ||
31088 (h->interrupts_enabled == 0));
31089 }
31090
31091@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
31092 u32 a;
31093
31094 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31095- return h->access.command_completed(h);
31096+ return h->access->command_completed(h);
31097
31098 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31099 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31100@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
31101 trans_support & CFGTBL_Trans_use_short_tags);
31102
31103 /* Change the access methods to the performant access methods */
31104- h->access = SA5_performant_access;
31105+ h->access = &SA5_performant_access;
31106 h->transMethod = CFGTBL_Trans_Performant;
31107
31108 return;
31109@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
31110 if (prod_index < 0)
31111 return -ENODEV;
31112 h->product_name = products[prod_index].product_name;
31113- h->access = *(products[prod_index].access);
31114+ h->access = products[prod_index].access;
31115
31116 if (cciss_board_disabled(h)) {
31117 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31118@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
31119 }
31120
31121 /* make sure the board interrupts are off */
31122- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31123+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31124 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
31125 if (rc)
31126 goto clean2;
31127@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
31128 * fake ones to scoop up any residual completions.
31129 */
31130 spin_lock_irqsave(&h->lock, flags);
31131- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31132+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31133 spin_unlock_irqrestore(&h->lock, flags);
31134 free_irq(h->intr[h->intr_mode], h);
31135 rc = cciss_request_irq(h, cciss_msix_discard_completions,
31136@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
31137 dev_info(&h->pdev->dev, "Board READY.\n");
31138 dev_info(&h->pdev->dev,
31139 "Waiting for stale completions to drain.\n");
31140- h->access.set_intr_mask(h, CCISS_INTR_ON);
31141+ h->access->set_intr_mask(h, CCISS_INTR_ON);
31142 msleep(10000);
31143- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31144+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31145
31146 rc = controller_reset_failed(h->cfgtable);
31147 if (rc)
31148@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
31149 cciss_scsi_setup(h);
31150
31151 /* Turn the interrupts on so we can service requests */
31152- h->access.set_intr_mask(h, CCISS_INTR_ON);
31153+ h->access->set_intr_mask(h, CCISS_INTR_ON);
31154
31155 /* Get the firmware version */
31156 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
31157@@ -5210,7 +5212,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
31158 kfree(flush_buf);
31159 if (return_code != IO_OK)
31160 dev_warn(&h->pdev->dev, "Error flushing cache\n");
31161- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31162+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31163 free_irq(h->intr[h->intr_mode], h);
31164 }
31165
31166diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
31167index 7fda30e..eb5dfe0 100644
31168--- a/drivers/block/cciss.h
31169+++ b/drivers/block/cciss.h
31170@@ -101,7 +101,7 @@ struct ctlr_info
31171 /* information about each logical volume */
31172 drive_info_struct *drv[CISS_MAX_LUN];
31173
31174- struct access_method access;
31175+ struct access_method *access;
31176
31177 /* queue and queue Info */
31178 struct list_head reqQ;
31179diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
31180index 9125bbe..eede5c8 100644
31181--- a/drivers/block/cpqarray.c
31182+++ b/drivers/block/cpqarray.c
31183@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
31184 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
31185 goto Enomem4;
31186 }
31187- hba[i]->access.set_intr_mask(hba[i], 0);
31188+ hba[i]->access->set_intr_mask(hba[i], 0);
31189 if (request_irq(hba[i]->intr, do_ida_intr,
31190 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
31191 {
31192@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
31193 add_timer(&hba[i]->timer);
31194
31195 /* Enable IRQ now that spinlock and rate limit timer are set up */
31196- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
31197+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
31198
31199 for(j=0; j<NWD; j++) {
31200 struct gendisk *disk = ida_gendisk[i][j];
31201@@ -694,7 +694,7 @@ DBGINFO(
31202 for(i=0; i<NR_PRODUCTS; i++) {
31203 if (board_id == products[i].board_id) {
31204 c->product_name = products[i].product_name;
31205- c->access = *(products[i].access);
31206+ c->access = products[i].access;
31207 break;
31208 }
31209 }
31210@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
31211 hba[ctlr]->intr = intr;
31212 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
31213 hba[ctlr]->product_name = products[j].product_name;
31214- hba[ctlr]->access = *(products[j].access);
31215+ hba[ctlr]->access = products[j].access;
31216 hba[ctlr]->ctlr = ctlr;
31217 hba[ctlr]->board_id = board_id;
31218 hba[ctlr]->pci_dev = NULL; /* not PCI */
31219@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
31220
31221 while((c = h->reqQ) != NULL) {
31222 /* Can't do anything if we're busy */
31223- if (h->access.fifo_full(h) == 0)
31224+ if (h->access->fifo_full(h) == 0)
31225 return;
31226
31227 /* Get the first entry from the request Q */
31228@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
31229 h->Qdepth--;
31230
31231 /* Tell the controller to do our bidding */
31232- h->access.submit_command(h, c);
31233+ h->access->submit_command(h, c);
31234
31235 /* Get onto the completion Q */
31236 addQ(&h->cmpQ, c);
31237@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
31238 unsigned long flags;
31239 __u32 a,a1;
31240
31241- istat = h->access.intr_pending(h);
31242+ istat = h->access->intr_pending(h);
31243 /* Is this interrupt for us? */
31244 if (istat == 0)
31245 return IRQ_NONE;
31246@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
31247 */
31248 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
31249 if (istat & FIFO_NOT_EMPTY) {
31250- while((a = h->access.command_completed(h))) {
31251+ while((a = h->access->command_completed(h))) {
31252 a1 = a; a &= ~3;
31253 if ((c = h->cmpQ) == NULL)
31254 {
31255@@ -1449,11 +1449,11 @@ static int sendcmd(
31256 /*
31257 * Disable interrupt
31258 */
31259- info_p->access.set_intr_mask(info_p, 0);
31260+ info_p->access->set_intr_mask(info_p, 0);
31261 /* Make sure there is room in the command FIFO */
31262 /* Actually it should be completely empty at this time. */
31263 for (i = 200000; i > 0; i--) {
31264- temp = info_p->access.fifo_full(info_p);
31265+ temp = info_p->access->fifo_full(info_p);
31266 if (temp != 0) {
31267 break;
31268 }
31269@@ -1466,7 +1466,7 @@ DBG(
31270 /*
31271 * Send the cmd
31272 */
31273- info_p->access.submit_command(info_p, c);
31274+ info_p->access->submit_command(info_p, c);
31275 complete = pollcomplete(ctlr);
31276
31277 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
31278@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
31279 * we check the new geometry. Then turn interrupts back on when
31280 * we're done.
31281 */
31282- host->access.set_intr_mask(host, 0);
31283+ host->access->set_intr_mask(host, 0);
31284 getgeometry(ctlr);
31285- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
31286+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
31287
31288 for(i=0; i<NWD; i++) {
31289 struct gendisk *disk = ida_gendisk[ctlr][i];
31290@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
31291 /* Wait (up to 2 seconds) for a command to complete */
31292
31293 for (i = 200000; i > 0; i--) {
31294- done = hba[ctlr]->access.command_completed(hba[ctlr]);
31295+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
31296 if (done == 0) {
31297 udelay(10); /* a short fixed delay */
31298 } else
31299diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
31300index be73e9d..7fbf140 100644
31301--- a/drivers/block/cpqarray.h
31302+++ b/drivers/block/cpqarray.h
31303@@ -99,7 +99,7 @@ struct ctlr_info {
31304 drv_info_t drv[NWD];
31305 struct proc_dir_entry *proc;
31306
31307- struct access_method access;
31308+ struct access_method *access;
31309
31310 cmdlist_t *reqQ;
31311 cmdlist_t *cmpQ;
31312diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
31313index b953cc7..e3dc580 100644
31314--- a/drivers/block/drbd/drbd_int.h
31315+++ b/drivers/block/drbd/drbd_int.h
31316@@ -735,7 +735,7 @@ struct drbd_request;
31317 struct drbd_epoch {
31318 struct list_head list;
31319 unsigned int barrier_nr;
31320- atomic_t epoch_size; /* increased on every request added. */
31321+ atomic_unchecked_t epoch_size; /* increased on every request added. */
31322 atomic_t active; /* increased on every req. added, and dec on every finished. */
31323 unsigned long flags;
31324 };
31325@@ -1116,7 +1116,7 @@ struct drbd_conf {
31326 void *int_dig_in;
31327 void *int_dig_vv;
31328 wait_queue_head_t seq_wait;
31329- atomic_t packet_seq;
31330+ atomic_unchecked_t packet_seq;
31331 unsigned int peer_seq;
31332 spinlock_t peer_seq_lock;
31333 unsigned int minor;
31334@@ -1658,30 +1658,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
31335
31336 static inline void drbd_tcp_cork(struct socket *sock)
31337 {
31338- int __user val = 1;
31339+ int val = 1;
31340 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
31341- (char __user *)&val, sizeof(val));
31342+ (char __force_user *)&val, sizeof(val));
31343 }
31344
31345 static inline void drbd_tcp_uncork(struct socket *sock)
31346 {
31347- int __user val = 0;
31348+ int val = 0;
31349 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
31350- (char __user *)&val, sizeof(val));
31351+ (char __force_user *)&val, sizeof(val));
31352 }
31353
31354 static inline void drbd_tcp_nodelay(struct socket *sock)
31355 {
31356- int __user val = 1;
31357+ int val = 1;
31358 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
31359- (char __user *)&val, sizeof(val));
31360+ (char __force_user *)&val, sizeof(val));
31361 }
31362
31363 static inline void drbd_tcp_quickack(struct socket *sock)
31364 {
31365- int __user val = 2;
31366+ int val = 2;
31367 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
31368- (char __user *)&val, sizeof(val));
31369+ (char __force_user *)&val, sizeof(val));
31370 }
31371
31372 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
31373diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
31374index f55683a..2101b96 100644
31375--- a/drivers/block/drbd/drbd_main.c
31376+++ b/drivers/block/drbd/drbd_main.c
31377@@ -2556,7 +2556,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
31378 p.sector = sector;
31379 p.block_id = block_id;
31380 p.blksize = blksize;
31381- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
31382+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
31383
31384 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
31385 return false;
31386@@ -2854,7 +2854,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
31387
31388 p.sector = cpu_to_be64(req->sector);
31389 p.block_id = (unsigned long)req;
31390- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
31391+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
31392
31393 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
31394
31395@@ -3139,7 +3139,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
31396 atomic_set(&mdev->unacked_cnt, 0);
31397 atomic_set(&mdev->local_cnt, 0);
31398 atomic_set(&mdev->net_cnt, 0);
31399- atomic_set(&mdev->packet_seq, 0);
31400+ atomic_set_unchecked(&mdev->packet_seq, 0);
31401 atomic_set(&mdev->pp_in_use, 0);
31402 atomic_set(&mdev->pp_in_use_by_net, 0);
31403 atomic_set(&mdev->rs_sect_in, 0);
31404@@ -3221,8 +3221,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
31405 mdev->receiver.t_state);
31406
31407 /* no need to lock it, I'm the only thread alive */
31408- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
31409- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
31410+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
31411+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
31412 mdev->al_writ_cnt =
31413 mdev->bm_writ_cnt =
31414 mdev->read_cnt =
31415diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
31416index edb490a..ecd69da 100644
31417--- a/drivers/block/drbd/drbd_nl.c
31418+++ b/drivers/block/drbd/drbd_nl.c
31419@@ -2407,7 +2407,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
31420 module_put(THIS_MODULE);
31421 }
31422
31423-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
31424+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
31425
31426 static unsigned short *
31427 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
31428@@ -2478,7 +2478,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
31429 cn_reply->id.idx = CN_IDX_DRBD;
31430 cn_reply->id.val = CN_VAL_DRBD;
31431
31432- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
31433+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
31434 cn_reply->ack = 0; /* not used here. */
31435 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31436 (int)((char *)tl - (char *)reply->tag_list);
31437@@ -2510,7 +2510,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
31438 cn_reply->id.idx = CN_IDX_DRBD;
31439 cn_reply->id.val = CN_VAL_DRBD;
31440
31441- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
31442+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
31443 cn_reply->ack = 0; /* not used here. */
31444 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31445 (int)((char *)tl - (char *)reply->tag_list);
31446@@ -2588,7 +2588,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
31447 cn_reply->id.idx = CN_IDX_DRBD;
31448 cn_reply->id.val = CN_VAL_DRBD;
31449
31450- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
31451+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
31452 cn_reply->ack = 0; // not used here.
31453 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31454 (int)((char*)tl - (char*)reply->tag_list);
31455@@ -2627,7 +2627,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
31456 cn_reply->id.idx = CN_IDX_DRBD;
31457 cn_reply->id.val = CN_VAL_DRBD;
31458
31459- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
31460+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
31461 cn_reply->ack = 0; /* not used here. */
31462 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31463 (int)((char *)tl - (char *)reply->tag_list);
31464diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
31465index c74ca2d..860c819 100644
31466--- a/drivers/block/drbd/drbd_receiver.c
31467+++ b/drivers/block/drbd/drbd_receiver.c
31468@@ -898,7 +898,7 @@ retry:
31469 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
31470 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
31471
31472- atomic_set(&mdev->packet_seq, 0);
31473+ atomic_set_unchecked(&mdev->packet_seq, 0);
31474 mdev->peer_seq = 0;
31475
31476 if (drbd_send_protocol(mdev) == -1)
31477@@ -999,7 +999,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
31478 do {
31479 next_epoch = NULL;
31480
31481- epoch_size = atomic_read(&epoch->epoch_size);
31482+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
31483
31484 switch (ev & ~EV_CLEANUP) {
31485 case EV_PUT:
31486@@ -1035,7 +1035,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
31487 rv = FE_DESTROYED;
31488 } else {
31489 epoch->flags = 0;
31490- atomic_set(&epoch->epoch_size, 0);
31491+ atomic_set_unchecked(&epoch->epoch_size, 0);
31492 /* atomic_set(&epoch->active, 0); is already zero */
31493 if (rv == FE_STILL_LIVE)
31494 rv = FE_RECYCLED;
31495@@ -1210,14 +1210,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
31496 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
31497 drbd_flush(mdev);
31498
31499- if (atomic_read(&mdev->current_epoch->epoch_size)) {
31500+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
31501 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
31502 if (epoch)
31503 break;
31504 }
31505
31506 epoch = mdev->current_epoch;
31507- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
31508+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
31509
31510 D_ASSERT(atomic_read(&epoch->active) == 0);
31511 D_ASSERT(epoch->flags == 0);
31512@@ -1229,11 +1229,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
31513 }
31514
31515 epoch->flags = 0;
31516- atomic_set(&epoch->epoch_size, 0);
31517+ atomic_set_unchecked(&epoch->epoch_size, 0);
31518 atomic_set(&epoch->active, 0);
31519
31520 spin_lock(&mdev->epoch_lock);
31521- if (atomic_read(&mdev->current_epoch->epoch_size)) {
31522+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
31523 list_add(&epoch->list, &mdev->current_epoch->list);
31524 mdev->current_epoch = epoch;
31525 mdev->epochs++;
31526@@ -1702,7 +1702,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
31527 spin_unlock(&mdev->peer_seq_lock);
31528
31529 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
31530- atomic_inc(&mdev->current_epoch->epoch_size);
31531+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
31532 return drbd_drain_block(mdev, data_size);
31533 }
31534
31535@@ -1732,7 +1732,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
31536
31537 spin_lock(&mdev->epoch_lock);
31538 e->epoch = mdev->current_epoch;
31539- atomic_inc(&e->epoch->epoch_size);
31540+ atomic_inc_unchecked(&e->epoch->epoch_size);
31541 atomic_inc(&e->epoch->active);
31542 spin_unlock(&mdev->epoch_lock);
31543
31544@@ -3954,7 +3954,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
31545 D_ASSERT(list_empty(&mdev->done_ee));
31546
31547 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
31548- atomic_set(&mdev->current_epoch->epoch_size, 0);
31549+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
31550 D_ASSERT(list_empty(&mdev->current_epoch->list));
31551 }
31552
31553diff --git a/drivers/block/loop.c b/drivers/block/loop.c
31554index 54046e5..7759c55 100644
31555--- a/drivers/block/loop.c
31556+++ b/drivers/block/loop.c
31557@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
31558 mm_segment_t old_fs = get_fs();
31559
31560 set_fs(get_ds());
31561- bw = file->f_op->write(file, buf, len, &pos);
31562+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
31563 set_fs(old_fs);
31564 if (likely(bw == len))
31565 return 0;
31566diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
31567index d620b44..587561e 100644
31568--- a/drivers/cdrom/cdrom.c
31569+++ b/drivers/cdrom/cdrom.c
31570@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
31571 ENSURE(reset, CDC_RESET);
31572 ENSURE(generic_packet, CDC_GENERIC_PACKET);
31573 cdi->mc_flags = 0;
31574- cdo->n_minors = 0;
31575 cdi->options = CDO_USE_FFLAGS;
31576
31577 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
31578@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
31579 else
31580 cdi->cdda_method = CDDA_OLD;
31581
31582- if (!cdo->generic_packet)
31583- cdo->generic_packet = cdrom_dummy_generic_packet;
31584+ if (!cdo->generic_packet) {
31585+ pax_open_kernel();
31586+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
31587+ pax_close_kernel();
31588+ }
31589
31590 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
31591 mutex_lock(&cdrom_mutex);
31592@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
31593 if (cdi->exit)
31594 cdi->exit(cdi);
31595
31596- cdi->ops->n_minors--;
31597 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
31598 }
31599
31600diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
31601index 75d485a..2809958 100644
31602--- a/drivers/cdrom/gdrom.c
31603+++ b/drivers/cdrom/gdrom.c
31604@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
31605 .audio_ioctl = gdrom_audio_ioctl,
31606 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
31607 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
31608- .n_minors = 1,
31609 };
31610
31611 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
31612diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
31613index 72bedad..8181ce1 100644
31614--- a/drivers/char/Kconfig
31615+++ b/drivers/char/Kconfig
31616@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
31617
31618 config DEVKMEM
31619 bool "/dev/kmem virtual device support"
31620- default y
31621+ default n
31622+ depends on !GRKERNSEC_KMEM
31623 help
31624 Say Y here if you want to support the /dev/kmem device. The
31625 /dev/kmem device is rarely used, but can be used for certain
31626@@ -581,6 +582,7 @@ config DEVPORT
31627 bool
31628 depends on !M68K
31629 depends on ISA || PCI
31630+ depends on !GRKERNSEC_KMEM
31631 default y
31632
31633 source "drivers/s390/char/Kconfig"
31634diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
31635index 2e04433..22afc64 100644
31636--- a/drivers/char/agp/frontend.c
31637+++ b/drivers/char/agp/frontend.c
31638@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
31639 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
31640 return -EFAULT;
31641
31642- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
31643+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
31644 return -EFAULT;
31645
31646 client = agp_find_client_by_pid(reserve.pid);
31647diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
31648index 21cb980..f15107c 100644
31649--- a/drivers/char/genrtc.c
31650+++ b/drivers/char/genrtc.c
31651@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
31652 switch (cmd) {
31653
31654 case RTC_PLL_GET:
31655+ memset(&pll, 0, sizeof(pll));
31656 if (get_rtc_pll(&pll))
31657 return -EINVAL;
31658 else
31659diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
31660index dfd7876..c0b0885 100644
31661--- a/drivers/char/hpet.c
31662+++ b/drivers/char/hpet.c
31663@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
31664 }
31665
31666 static int
31667-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
31668+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
31669 struct hpet_info *info)
31670 {
31671 struct hpet_timer __iomem *timer;
31672diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31673index a0c84bb..9edcf60 100644
31674--- a/drivers/char/ipmi/ipmi_msghandler.c
31675+++ b/drivers/char/ipmi/ipmi_msghandler.c
31676@@ -420,7 +420,7 @@ struct ipmi_smi {
31677 struct proc_dir_entry *proc_dir;
31678 char proc_dir_name[10];
31679
31680- atomic_t stats[IPMI_NUM_STATS];
31681+ atomic_unchecked_t stats[IPMI_NUM_STATS];
31682
31683 /*
31684 * run_to_completion duplicate of smb_info, smi_info
31685@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31686
31687
31688 #define ipmi_inc_stat(intf, stat) \
31689- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31690+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31691 #define ipmi_get_stat(intf, stat) \
31692- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31693+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31694
31695 static int is_lan_addr(struct ipmi_addr *addr)
31696 {
31697@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31698 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31699 init_waitqueue_head(&intf->waitq);
31700 for (i = 0; i < IPMI_NUM_STATS; i++)
31701- atomic_set(&intf->stats[i], 0);
31702+ atomic_set_unchecked(&intf->stats[i], 0);
31703
31704 intf->proc_dir = NULL;
31705
31706diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31707index 32a6c7e..f6966a9 100644
31708--- a/drivers/char/ipmi/ipmi_si_intf.c
31709+++ b/drivers/char/ipmi/ipmi_si_intf.c
31710@@ -275,7 +275,7 @@ struct smi_info {
31711 unsigned char slave_addr;
31712
31713 /* Counters and things for the proc filesystem. */
31714- atomic_t stats[SI_NUM_STATS];
31715+ atomic_unchecked_t stats[SI_NUM_STATS];
31716
31717 struct task_struct *thread;
31718
31719@@ -284,9 +284,9 @@ struct smi_info {
31720 };
31721
31722 #define smi_inc_stat(smi, stat) \
31723- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31724+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31725 #define smi_get_stat(smi, stat) \
31726- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31727+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31728
31729 #define SI_MAX_PARMS 4
31730
31731@@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
31732 atomic_set(&new_smi->req_events, 0);
31733 new_smi->run_to_completion = 0;
31734 for (i = 0; i < SI_NUM_STATS; i++)
31735- atomic_set(&new_smi->stats[i], 0);
31736+ atomic_set_unchecked(&new_smi->stats[i], 0);
31737
31738 new_smi->interrupt_disabled = 1;
31739 atomic_set(&new_smi->stop_operation, 0);
31740diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31741index 0537903..121c699 100644
31742--- a/drivers/char/mem.c
31743+++ b/drivers/char/mem.c
31744@@ -18,6 +18,7 @@
31745 #include <linux/raw.h>
31746 #include <linux/tty.h>
31747 #include <linux/capability.h>
31748+#include <linux/security.h>
31749 #include <linux/ptrace.h>
31750 #include <linux/device.h>
31751 #include <linux/highmem.h>
31752@@ -37,6 +38,10 @@
31753
31754 #define DEVPORT_MINOR 4
31755
31756+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31757+extern const struct file_operations grsec_fops;
31758+#endif
31759+
31760 static inline unsigned long size_inside_page(unsigned long start,
31761 unsigned long size)
31762 {
31763@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31764
31765 while (cursor < to) {
31766 if (!devmem_is_allowed(pfn)) {
31767+#ifdef CONFIG_GRKERNSEC_KMEM
31768+ gr_handle_mem_readwrite(from, to);
31769+#else
31770 printk(KERN_INFO
31771 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31772 current->comm, from, to);
31773+#endif
31774 return 0;
31775 }
31776 cursor += PAGE_SIZE;
31777@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31778 }
31779 return 1;
31780 }
31781+#elif defined(CONFIG_GRKERNSEC_KMEM)
31782+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31783+{
31784+ return 0;
31785+}
31786 #else
31787 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31788 {
31789@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
31790
31791 while (count > 0) {
31792 unsigned long remaining;
31793+ char *temp;
31794
31795 sz = size_inside_page(p, count);
31796
31797@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
31798 if (!ptr)
31799 return -EFAULT;
31800
31801- remaining = copy_to_user(buf, ptr, sz);
31802+#ifdef CONFIG_PAX_USERCOPY
31803+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
31804+ if (!temp) {
31805+ unxlate_dev_mem_ptr(p, ptr);
31806+ return -ENOMEM;
31807+ }
31808+ memcpy(temp, ptr, sz);
31809+#else
31810+ temp = ptr;
31811+#endif
31812+
31813+ remaining = copy_to_user(buf, temp, sz);
31814+
31815+#ifdef CONFIG_PAX_USERCOPY
31816+ kfree(temp);
31817+#endif
31818+
31819 unxlate_dev_mem_ptr(p, ptr);
31820 if (remaining)
31821 return -EFAULT;
31822@@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31823 size_t count, loff_t *ppos)
31824 {
31825 unsigned long p = *ppos;
31826- ssize_t low_count, read, sz;
31827+ ssize_t low_count, read, sz, err = 0;
31828 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31829- int err = 0;
31830
31831 read = 0;
31832 if (p < (unsigned long) high_memory) {
31833@@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31834 }
31835 #endif
31836 while (low_count > 0) {
31837+ char *temp;
31838+
31839 sz = size_inside_page(p, low_count);
31840
31841 /*
31842@@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31843 */
31844 kbuf = xlate_dev_kmem_ptr((char *)p);
31845
31846- if (copy_to_user(buf, kbuf, sz))
31847+#ifdef CONFIG_PAX_USERCOPY
31848+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
31849+ if (!temp)
31850+ return -ENOMEM;
31851+ memcpy(temp, kbuf, sz);
31852+#else
31853+ temp = kbuf;
31854+#endif
31855+
31856+ err = copy_to_user(buf, temp, sz);
31857+
31858+#ifdef CONFIG_PAX_USERCOPY
31859+ kfree(temp);
31860+#endif
31861+
31862+ if (err)
31863 return -EFAULT;
31864 buf += sz;
31865 p += sz;
31866@@ -833,6 +880,9 @@ static const struct memdev {
31867 #ifdef CONFIG_CRASH_DUMP
31868 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31869 #endif
31870+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31871+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31872+#endif
31873 };
31874
31875 static int memory_open(struct inode *inode, struct file *filp)
31876diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
31877index 9df78e2..01ba9ae 100644
31878--- a/drivers/char/nvram.c
31879+++ b/drivers/char/nvram.c
31880@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
31881
31882 spin_unlock_irq(&rtc_lock);
31883
31884- if (copy_to_user(buf, contents, tmp - contents))
31885+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
31886 return -EFAULT;
31887
31888 *ppos = i;
31889diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
31890index 21721d2..4e98777 100644
31891--- a/drivers/char/pcmcia/synclink_cs.c
31892+++ b/drivers/char/pcmcia/synclink_cs.c
31893@@ -2346,9 +2346,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
31894
31895 if (debug_level >= DEBUG_LEVEL_INFO)
31896 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
31897- __FILE__,__LINE__, info->device_name, port->count);
31898+ __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
31899
31900- WARN_ON(!port->count);
31901+ WARN_ON(!atomic_read(&port->count));
31902
31903 if (tty_port_close_start(port, tty, filp) == 0)
31904 goto cleanup;
31905@@ -2366,7 +2366,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
31906 cleanup:
31907 if (debug_level >= DEBUG_LEVEL_INFO)
31908 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
31909- tty->driver->name, port->count);
31910+ tty->driver->name, atomic_read(&port->count));
31911 }
31912
31913 /* Wait until the transmitter is empty.
31914@@ -2508,7 +2508,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
31915
31916 if (debug_level >= DEBUG_LEVEL_INFO)
31917 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
31918- __FILE__,__LINE__,tty->driver->name, port->count);
31919+ __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
31920
31921 /* If port is closing, signal caller to try again */
31922 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
31923@@ -2528,11 +2528,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
31924 goto cleanup;
31925 }
31926 spin_lock(&port->lock);
31927- port->count++;
31928+ atomic_inc(&port->count);
31929 spin_unlock(&port->lock);
31930 spin_unlock_irqrestore(&info->netlock, flags);
31931
31932- if (port->count == 1) {
31933+ if (atomic_read(&port->count) == 1) {
31934 /* 1st open on this device, init hardware */
31935 retval = startup(info, tty);
31936 if (retval < 0)
31937@@ -3886,7 +3886,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
31938 unsigned short new_crctype;
31939
31940 /* return error if TTY interface open */
31941- if (info->port.count)
31942+ if (atomic_read(&info->port.count))
31943 return -EBUSY;
31944
31945 switch (encoding)
31946@@ -3989,7 +3989,7 @@ static int hdlcdev_open(struct net_device *dev)
31947
31948 /* arbitrate between network and tty opens */
31949 spin_lock_irqsave(&info->netlock, flags);
31950- if (info->port.count != 0 || info->netcount != 0) {
31951+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
31952 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
31953 spin_unlock_irqrestore(&info->netlock, flags);
31954 return -EBUSY;
31955@@ -4078,7 +4078,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
31956 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
31957
31958 /* return error if TTY interface open */
31959- if (info->port.count)
31960+ if (atomic_read(&info->port.count))
31961 return -EBUSY;
31962
31963 if (cmd != SIOCWANDEV)
31964diff --git a/drivers/char/random.c b/drivers/char/random.c
31965index b86eae9..b9c2ed7 100644
31966--- a/drivers/char/random.c
31967+++ b/drivers/char/random.c
31968@@ -272,8 +272,13 @@
31969 /*
31970 * Configuration information
31971 */
31972+#ifdef CONFIG_GRKERNSEC_RANDNET
31973+#define INPUT_POOL_WORDS 512
31974+#define OUTPUT_POOL_WORDS 128
31975+#else
31976 #define INPUT_POOL_WORDS 128
31977 #define OUTPUT_POOL_WORDS 32
31978+#endif
31979 #define SEC_XFER_SIZE 512
31980 #define EXTRACT_SIZE 10
31981
31982@@ -313,10 +318,17 @@ static struct poolinfo {
31983 int poolwords;
31984 int tap1, tap2, tap3, tap4, tap5;
31985 } poolinfo_table[] = {
31986+#ifdef CONFIG_GRKERNSEC_RANDNET
31987+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31988+ { 512, 411, 308, 208, 104, 1 },
31989+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31990+ { 128, 103, 76, 51, 25, 1 },
31991+#else
31992 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31993 { 128, 103, 76, 51, 25, 1 },
31994 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31995 { 32, 26, 20, 14, 7, 1 },
31996+#endif
31997 #if 0
31998 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31999 { 2048, 1638, 1231, 819, 411, 1 },
32000@@ -437,6 +449,7 @@ struct entropy_store {
32001 int entropy_count;
32002 int entropy_total;
32003 unsigned int initialized:1;
32004+ bool last_data_init;
32005 __u8 last_data[EXTRACT_SIZE];
32006 };
32007
32008@@ -527,8 +540,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
32009 input_rotate += i ? 7 : 14;
32010 }
32011
32012- ACCESS_ONCE(r->input_rotate) = input_rotate;
32013- ACCESS_ONCE(r->add_ptr) = i;
32014+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
32015+ ACCESS_ONCE_RW(r->add_ptr) = i;
32016 smp_wmb();
32017
32018 if (out)
32019@@ -957,6 +970,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
32020 ssize_t ret = 0, i;
32021 __u8 tmp[EXTRACT_SIZE];
32022
32023+ /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
32024+ if (fips_enabled && !r->last_data_init)
32025+ nbytes += EXTRACT_SIZE;
32026+
32027 trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
32028 xfer_secondary_pool(r, nbytes);
32029 nbytes = account(r, nbytes, min, reserved);
32030@@ -967,6 +984,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
32031 if (fips_enabled) {
32032 unsigned long flags;
32033
32034+
32035+ /* prime last_data value if need be, per fips 140-2 */
32036+ if (!r->last_data_init) {
32037+ spin_lock_irqsave(&r->lock, flags);
32038+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
32039+ r->last_data_init = true;
32040+ nbytes -= EXTRACT_SIZE;
32041+ spin_unlock_irqrestore(&r->lock, flags);
32042+ extract_buf(r, tmp);
32043+ }
32044+
32045 spin_lock_irqsave(&r->lock, flags);
32046 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
32047 panic("Hardware RNG duplicated output!\n");
32048@@ -1008,7 +1036,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
32049
32050 extract_buf(r, tmp);
32051 i = min_t(int, nbytes, EXTRACT_SIZE);
32052- if (copy_to_user(buf, tmp, i)) {
32053+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
32054 ret = -EFAULT;
32055 break;
32056 }
32057@@ -1086,6 +1114,7 @@ static void init_std_data(struct entropy_store *r)
32058
32059 r->entropy_count = 0;
32060 r->entropy_total = 0;
32061+ r->last_data_init = false;
32062 mix_pool_bytes(r, &now, sizeof(now), NULL);
32063 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
32064 if (!arch_get_random_long(&rv))
32065@@ -1342,7 +1371,7 @@ EXPORT_SYMBOL(generate_random_uuid);
32066 #include <linux/sysctl.h>
32067
32068 static int min_read_thresh = 8, min_write_thresh;
32069-static int max_read_thresh = INPUT_POOL_WORDS * 32;
32070+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
32071 static int max_write_thresh = INPUT_POOL_WORDS * 32;
32072 static char sysctl_bootid[16];
32073
32074diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
32075index 9b4f011..b7e0a1a 100644
32076--- a/drivers/char/sonypi.c
32077+++ b/drivers/char/sonypi.c
32078@@ -54,6 +54,7 @@
32079
32080 #include <asm/uaccess.h>
32081 #include <asm/io.h>
32082+#include <asm/local.h>
32083
32084 #include <linux/sonypi.h>
32085
32086@@ -490,7 +491,7 @@ static struct sonypi_device {
32087 spinlock_t fifo_lock;
32088 wait_queue_head_t fifo_proc_list;
32089 struct fasync_struct *fifo_async;
32090- int open_count;
32091+ local_t open_count;
32092 int model;
32093 struct input_dev *input_jog_dev;
32094 struct input_dev *input_key_dev;
32095@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
32096 static int sonypi_misc_release(struct inode *inode, struct file *file)
32097 {
32098 mutex_lock(&sonypi_device.lock);
32099- sonypi_device.open_count--;
32100+ local_dec(&sonypi_device.open_count);
32101 mutex_unlock(&sonypi_device.lock);
32102 return 0;
32103 }
32104@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
32105 {
32106 mutex_lock(&sonypi_device.lock);
32107 /* Flush input queue on first open */
32108- if (!sonypi_device.open_count)
32109+ if (!local_read(&sonypi_device.open_count))
32110 kfifo_reset(&sonypi_device.fifo);
32111- sonypi_device.open_count++;
32112+ local_inc(&sonypi_device.open_count);
32113 mutex_unlock(&sonypi_device.lock);
32114
32115 return 0;
32116diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
32117index 93211df..c7805f7 100644
32118--- a/drivers/char/tpm/tpm.c
32119+++ b/drivers/char/tpm/tpm.c
32120@@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
32121 chip->vendor.req_complete_val)
32122 goto out_recv;
32123
32124- if ((status == chip->vendor.req_canceled)) {
32125+ if (status == chip->vendor.req_canceled) {
32126 dev_err(chip->dev, "Operation Canceled\n");
32127 rc = -ECANCELED;
32128 goto out;
32129diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
32130index 56051d0..11cf3b7 100644
32131--- a/drivers/char/tpm/tpm_acpi.c
32132+++ b/drivers/char/tpm/tpm_acpi.c
32133@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
32134 virt = acpi_os_map_memory(start, len);
32135 if (!virt) {
32136 kfree(log->bios_event_log);
32137+ log->bios_event_log = NULL;
32138 printk("%s: ERROR - Unable to map memory\n", __func__);
32139 return -EIO;
32140 }
32141
32142- memcpy_fromio(log->bios_event_log, virt, len);
32143+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
32144
32145 acpi_os_unmap_memory(virt, len);
32146 return 0;
32147diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
32148index 84ddc55..1d32f1e 100644
32149--- a/drivers/char/tpm/tpm_eventlog.c
32150+++ b/drivers/char/tpm/tpm_eventlog.c
32151@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
32152 event = addr;
32153
32154 if ((event->event_type == 0 && event->event_size == 0) ||
32155- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
32156+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
32157 return NULL;
32158
32159 return addr;
32160@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
32161 return NULL;
32162
32163 if ((event->event_type == 0 && event->event_size == 0) ||
32164- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
32165+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
32166 return NULL;
32167
32168 (*pos)++;
32169@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
32170 int i;
32171
32172 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
32173- seq_putc(m, data[i]);
32174+ if (!seq_putc(m, data[i]))
32175+ return -EFAULT;
32176
32177 return 0;
32178 }
32179diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
32180index 8ab9c3d..c3e65d3 100644
32181--- a/drivers/char/virtio_console.c
32182+++ b/drivers/char/virtio_console.c
32183@@ -622,7 +622,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
32184 if (to_user) {
32185 ssize_t ret;
32186
32187- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
32188+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
32189 if (ret)
32190 return -EFAULT;
32191 } else {
32192@@ -721,7 +721,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
32193 if (!port_has_data(port) && !port->host_connected)
32194 return 0;
32195
32196- return fill_readbuf(port, ubuf, count, true);
32197+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
32198 }
32199
32200 static int wait_port_writable(struct port *port, bool nonblock)
32201diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32202index e164c55..3aabb50 100644
32203--- a/drivers/edac/edac_pci_sysfs.c
32204+++ b/drivers/edac/edac_pci_sysfs.c
32205@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32206 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32207 static int edac_pci_poll_msec = 1000; /* one second workq period */
32208
32209-static atomic_t pci_parity_count = ATOMIC_INIT(0);
32210-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32211+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32212+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32213
32214 static struct kobject *edac_pci_top_main_kobj;
32215 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32216@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32217 edac_printk(KERN_CRIT, EDAC_PCI,
32218 "Signaled System Error on %s\n",
32219 pci_name(dev));
32220- atomic_inc(&pci_nonparity_count);
32221+ atomic_inc_unchecked(&pci_nonparity_count);
32222 }
32223
32224 if (status & (PCI_STATUS_PARITY)) {
32225@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32226 "Master Data Parity Error on %s\n",
32227 pci_name(dev));
32228
32229- atomic_inc(&pci_parity_count);
32230+ atomic_inc_unchecked(&pci_parity_count);
32231 }
32232
32233 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32234@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32235 "Detected Parity Error on %s\n",
32236 pci_name(dev));
32237
32238- atomic_inc(&pci_parity_count);
32239+ atomic_inc_unchecked(&pci_parity_count);
32240 }
32241 }
32242
32243@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32244 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32245 "Signaled System Error on %s\n",
32246 pci_name(dev));
32247- atomic_inc(&pci_nonparity_count);
32248+ atomic_inc_unchecked(&pci_nonparity_count);
32249 }
32250
32251 if (status & (PCI_STATUS_PARITY)) {
32252@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32253 "Master Data Parity Error on "
32254 "%s\n", pci_name(dev));
32255
32256- atomic_inc(&pci_parity_count);
32257+ atomic_inc_unchecked(&pci_parity_count);
32258 }
32259
32260 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32261@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32262 "Detected Parity Error on %s\n",
32263 pci_name(dev));
32264
32265- atomic_inc(&pci_parity_count);
32266+ atomic_inc_unchecked(&pci_parity_count);
32267 }
32268 }
32269 }
32270@@ -676,7 +676,7 @@ void edac_pci_do_parity_check(void)
32271 if (!check_pci_errors)
32272 return;
32273
32274- before_count = atomic_read(&pci_parity_count);
32275+ before_count = atomic_read_unchecked(&pci_parity_count);
32276
32277 /* scan all PCI devices looking for a Parity Error on devices and
32278 * bridges.
32279@@ -688,7 +688,7 @@ void edac_pci_do_parity_check(void)
32280 /* Only if operator has selected panic on PCI Error */
32281 if (edac_pci_get_panic_on_pe()) {
32282 /* If the count is different 'after' from 'before' */
32283- if (before_count != atomic_read(&pci_parity_count))
32284+ if (before_count != atomic_read_unchecked(&pci_parity_count))
32285 panic("EDAC: PCI Parity Error");
32286 }
32287 }
32288diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
32289index 8c87a5e..a19cbd7 100644
32290--- a/drivers/edac/mce_amd.h
32291+++ b/drivers/edac/mce_amd.h
32292@@ -80,7 +80,7 @@ extern const char * const ii_msgs[];
32293 struct amd_decoder_ops {
32294 bool (*dc_mce)(u16, u8);
32295 bool (*ic_mce)(u16, u8);
32296-};
32297+} __no_const;
32298
32299 void amd_report_gart_errors(bool);
32300 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
32301diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32302index 57ea7f4..789e3c3 100644
32303--- a/drivers/firewire/core-card.c
32304+++ b/drivers/firewire/core-card.c
32305@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
32306
32307 void fw_core_remove_card(struct fw_card *card)
32308 {
32309- struct fw_card_driver dummy_driver = dummy_driver_template;
32310+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
32311
32312 card->driver->update_phy_reg(card, 4,
32313 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32314diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32315index f8d2287..5aaf4db 100644
32316--- a/drivers/firewire/core-cdev.c
32317+++ b/drivers/firewire/core-cdev.c
32318@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
32319 int ret;
32320
32321 if ((request->channels == 0 && request->bandwidth == 0) ||
32322- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32323- request->bandwidth < 0)
32324+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32325 return -EINVAL;
32326
32327 r = kmalloc(sizeof(*r), GFP_KERNEL);
32328diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32329index 28a94c7..58da63a 100644
32330--- a/drivers/firewire/core-transaction.c
32331+++ b/drivers/firewire/core-transaction.c
32332@@ -38,6 +38,7 @@
32333 #include <linux/timer.h>
32334 #include <linux/types.h>
32335 #include <linux/workqueue.h>
32336+#include <linux/sched.h>
32337
32338 #include <asm/byteorder.h>
32339
32340diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32341index 515a42c..5ecf3ba 100644
32342--- a/drivers/firewire/core.h
32343+++ b/drivers/firewire/core.h
32344@@ -111,6 +111,7 @@ struct fw_card_driver {
32345
32346 int (*stop_iso)(struct fw_iso_context *ctx);
32347 };
32348+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32349
32350 void fw_card_initialize(struct fw_card *card,
32351 const struct fw_card_driver *driver, struct device *device);
32352diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32353index b298158..7ed8432 100644
32354--- a/drivers/firmware/dmi_scan.c
32355+++ b/drivers/firmware/dmi_scan.c
32356@@ -452,11 +452,6 @@ void __init dmi_scan_machine(void)
32357 }
32358 }
32359 else {
32360- /*
32361- * no iounmap() for that ioremap(); it would be a no-op, but
32362- * it's so early in setup that sucker gets confused into doing
32363- * what it shouldn't if we actually call it.
32364- */
32365 p = dmi_ioremap(0xF0000, 0x10000);
32366 if (p == NULL)
32367 goto error;
32368@@ -726,7 +721,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32369 if (buf == NULL)
32370 return -1;
32371
32372- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32373+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32374
32375 iounmap(buf);
32376 return 0;
32377diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32378index d10c987..ebe5400 100644
32379--- a/drivers/firmware/efivars.c
32380+++ b/drivers/firmware/efivars.c
32381@@ -1234,9 +1234,9 @@ efivars_init(void)
32382 return -ENOMEM;
32383 }
32384
32385- ops.get_variable = efi.get_variable;
32386- ops.set_variable = efi.set_variable;
32387- ops.get_next_variable = efi.get_next_variable;
32388+ *(void **)&ops.get_variable = efi.get_variable;
32389+ *(void **)&ops.set_variable = efi.set_variable;
32390+ *(void **)&ops.get_next_variable = efi.get_next_variable;
32391 error = register_efivars(&__efivars, &ops, efi_kobj);
32392 if (error)
32393 goto err_put;
32394diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
32395index 82d5c20..44a7177 100644
32396--- a/drivers/gpio/gpio-vr41xx.c
32397+++ b/drivers/gpio/gpio-vr41xx.c
32398@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32399 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32400 maskl, pendl, maskh, pendh);
32401
32402- atomic_inc(&irq_err_count);
32403+ atomic_inc_unchecked(&irq_err_count);
32404
32405 return -EINVAL;
32406 }
32407diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32408index 1227adf..f2301c2 100644
32409--- a/drivers/gpu/drm/drm_crtc_helper.c
32410+++ b/drivers/gpu/drm/drm_crtc_helper.c
32411@@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32412 struct drm_crtc *tmp;
32413 int crtc_mask = 1;
32414
32415- WARN(!crtc, "checking null crtc?\n");
32416+ BUG_ON(!crtc);
32417
32418 dev = crtc->dev;
32419
32420diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32421index be174ca..0bcbb71 100644
32422--- a/drivers/gpu/drm/drm_drv.c
32423+++ b/drivers/gpu/drm/drm_drv.c
32424@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
32425 /**
32426 * Copy and IOCTL return string to user space
32427 */
32428-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
32429+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
32430 {
32431 int len;
32432
32433@@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
32434 return -ENODEV;
32435
32436 atomic_inc(&dev->ioctl_count);
32437- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32438+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32439 ++file_priv->ioctl_count;
32440
32441 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32442diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32443index 133b413..fd68225 100644
32444--- a/drivers/gpu/drm/drm_fops.c
32445+++ b/drivers/gpu/drm/drm_fops.c
32446@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
32447 }
32448
32449 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32450- atomic_set(&dev->counts[i], 0);
32451+ atomic_set_unchecked(&dev->counts[i], 0);
32452
32453 dev->sigdata.lock = NULL;
32454
32455@@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
32456 if (drm_device_is_unplugged(dev))
32457 return -ENODEV;
32458
32459- if (!dev->open_count++)
32460+ if (local_inc_return(&dev->open_count) == 1)
32461 need_setup = 1;
32462 mutex_lock(&dev->struct_mutex);
32463 old_mapping = dev->dev_mapping;
32464@@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
32465 retcode = drm_open_helper(inode, filp, dev);
32466 if (retcode)
32467 goto err_undo;
32468- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32469+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32470 if (need_setup) {
32471 retcode = drm_setup(dev);
32472 if (retcode)
32473@@ -164,7 +164,7 @@ err_undo:
32474 iput(container_of(dev->dev_mapping, struct inode, i_data));
32475 dev->dev_mapping = old_mapping;
32476 mutex_unlock(&dev->struct_mutex);
32477- dev->open_count--;
32478+ local_dec(&dev->open_count);
32479 return retcode;
32480 }
32481 EXPORT_SYMBOL(drm_open);
32482@@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
32483
32484 mutex_lock(&drm_global_mutex);
32485
32486- DRM_DEBUG("open_count = %d\n", dev->open_count);
32487+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
32488
32489 if (dev->driver->preclose)
32490 dev->driver->preclose(dev, file_priv);
32491@@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
32492 * Begin inline drm_release
32493 */
32494
32495- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32496+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
32497 task_pid_nr(current),
32498 (long)old_encode_dev(file_priv->minor->device),
32499- dev->open_count);
32500+ local_read(&dev->open_count));
32501
32502 /* Release any auth tokens that might point to this file_priv,
32503 (do that under the drm_global_mutex) */
32504@@ -547,8 +547,8 @@ int drm_release(struct inode *inode, struct file *filp)
32505 * End inline drm_release
32506 */
32507
32508- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32509- if (!--dev->open_count) {
32510+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32511+ if (local_dec_and_test(&dev->open_count)) {
32512 if (atomic_read(&dev->ioctl_count)) {
32513 DRM_ERROR("Device busy: %d\n",
32514 atomic_read(&dev->ioctl_count));
32515diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
32516index f731116..629842c 100644
32517--- a/drivers/gpu/drm/drm_global.c
32518+++ b/drivers/gpu/drm/drm_global.c
32519@@ -36,7 +36,7 @@
32520 struct drm_global_item {
32521 struct mutex mutex;
32522 void *object;
32523- int refcount;
32524+ atomic_t refcount;
32525 };
32526
32527 static struct drm_global_item glob[DRM_GLOBAL_NUM];
32528@@ -49,7 +49,7 @@ void drm_global_init(void)
32529 struct drm_global_item *item = &glob[i];
32530 mutex_init(&item->mutex);
32531 item->object = NULL;
32532- item->refcount = 0;
32533+ atomic_set(&item->refcount, 0);
32534 }
32535 }
32536
32537@@ -59,7 +59,7 @@ void drm_global_release(void)
32538 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
32539 struct drm_global_item *item = &glob[i];
32540 BUG_ON(item->object != NULL);
32541- BUG_ON(item->refcount != 0);
32542+ BUG_ON(atomic_read(&item->refcount) != 0);
32543 }
32544 }
32545
32546@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
32547 void *object;
32548
32549 mutex_lock(&item->mutex);
32550- if (item->refcount == 0) {
32551+ if (atomic_read(&item->refcount) == 0) {
32552 item->object = kzalloc(ref->size, GFP_KERNEL);
32553 if (unlikely(item->object == NULL)) {
32554 ret = -ENOMEM;
32555@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
32556 goto out_err;
32557
32558 }
32559- ++item->refcount;
32560+ atomic_inc(&item->refcount);
32561 ref->object = item->object;
32562 object = item->object;
32563 mutex_unlock(&item->mutex);
32564@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
32565 struct drm_global_item *item = &glob[ref->global_type];
32566
32567 mutex_lock(&item->mutex);
32568- BUG_ON(item->refcount == 0);
32569+ BUG_ON(atomic_read(&item->refcount) == 0);
32570 BUG_ON(ref->object != item->object);
32571- if (--item->refcount == 0) {
32572+ if (atomic_dec_and_test(&item->refcount)) {
32573 ref->release(ref);
32574 item->object = NULL;
32575 }
32576diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32577index d4b20ce..77a8d41 100644
32578--- a/drivers/gpu/drm/drm_info.c
32579+++ b/drivers/gpu/drm/drm_info.c
32580@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32581 struct drm_local_map *map;
32582 struct drm_map_list *r_list;
32583
32584- /* Hardcoded from _DRM_FRAME_BUFFER,
32585- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32586- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32587- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32588+ static const char * const types[] = {
32589+ [_DRM_FRAME_BUFFER] = "FB",
32590+ [_DRM_REGISTERS] = "REG",
32591+ [_DRM_SHM] = "SHM",
32592+ [_DRM_AGP] = "AGP",
32593+ [_DRM_SCATTER_GATHER] = "SG",
32594+ [_DRM_CONSISTENT] = "PCI",
32595+ [_DRM_GEM] = "GEM" };
32596 const char *type;
32597 int i;
32598
32599@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32600 map = r_list->map;
32601 if (!map)
32602 continue;
32603- if (map->type < 0 || map->type > 5)
32604+ if (map->type >= ARRAY_SIZE(types))
32605 type = "??";
32606 else
32607 type = types[map->type];
32608@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32609 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32610 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32611 vma->vm_flags & VM_IO ? 'i' : '-',
32612+#ifdef CONFIG_GRKERNSEC_HIDESYM
32613+ 0);
32614+#else
32615 vma->vm_pgoff);
32616+#endif
32617
32618 #if defined(__i386__)
32619 pgprot = pgprot_val(vma->vm_page_prot);
32620diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32621index 2f4c434..764794b 100644
32622--- a/drivers/gpu/drm/drm_ioc32.c
32623+++ b/drivers/gpu/drm/drm_ioc32.c
32624@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32625 request = compat_alloc_user_space(nbytes);
32626 if (!access_ok(VERIFY_WRITE, request, nbytes))
32627 return -EFAULT;
32628- list = (struct drm_buf_desc *) (request + 1);
32629+ list = (struct drm_buf_desc __user *) (request + 1);
32630
32631 if (__put_user(count, &request->count)
32632 || __put_user(list, &request->list))
32633@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32634 request = compat_alloc_user_space(nbytes);
32635 if (!access_ok(VERIFY_WRITE, request, nbytes))
32636 return -EFAULT;
32637- list = (struct drm_buf_pub *) (request + 1);
32638+ list = (struct drm_buf_pub __user *) (request + 1);
32639
32640 if (__put_user(count, &request->count)
32641 || __put_user(list, &request->list))
32642diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32643index 23dd975..63e9801 100644
32644--- a/drivers/gpu/drm/drm_ioctl.c
32645+++ b/drivers/gpu/drm/drm_ioctl.c
32646@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32647 stats->data[i].value =
32648 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32649 else
32650- stats->data[i].value = atomic_read(&dev->counts[i]);
32651+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
32652 stats->data[i].type = dev->types[i];
32653 }
32654
32655diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
32656index d752c96..fe08455 100644
32657--- a/drivers/gpu/drm/drm_lock.c
32658+++ b/drivers/gpu/drm/drm_lock.c
32659@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32660 if (drm_lock_take(&master->lock, lock->context)) {
32661 master->lock.file_priv = file_priv;
32662 master->lock.lock_time = jiffies;
32663- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
32664+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
32665 break; /* Got lock */
32666 }
32667
32668@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32669 return -EINVAL;
32670 }
32671
32672- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
32673+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
32674
32675 if (drm_lock_free(&master->lock, lock->context)) {
32676 /* FIXME: Should really bail out here. */
32677diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
32678index c236fd2..6b5f2e7 100644
32679--- a/drivers/gpu/drm/drm_stub.c
32680+++ b/drivers/gpu/drm/drm_stub.c
32681@@ -511,7 +511,7 @@ void drm_unplug_dev(struct drm_device *dev)
32682
32683 drm_device_set_unplugged(dev);
32684
32685- if (dev->open_count == 0) {
32686+ if (local_read(&dev->open_count) == 0) {
32687 drm_put_dev(dev);
32688 }
32689 mutex_unlock(&drm_global_mutex);
32690diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
32691index 004ecdf..db1f6e0 100644
32692--- a/drivers/gpu/drm/i810/i810_dma.c
32693+++ b/drivers/gpu/drm/i810/i810_dma.c
32694@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
32695 dma->buflist[vertex->idx],
32696 vertex->discard, vertex->used);
32697
32698- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32699- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32700+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32701+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32702 sarea_priv->last_enqueue = dev_priv->counter - 1;
32703 sarea_priv->last_dispatch = (int)hw_status[5];
32704
32705@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
32706 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
32707 mc->last_render);
32708
32709- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32710- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32711+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32712+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32713 sarea_priv->last_enqueue = dev_priv->counter - 1;
32714 sarea_priv->last_dispatch = (int)hw_status[5];
32715
32716diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
32717index 6e0acad..93c8289 100644
32718--- a/drivers/gpu/drm/i810/i810_drv.h
32719+++ b/drivers/gpu/drm/i810/i810_drv.h
32720@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
32721 int page_flipping;
32722
32723 wait_queue_head_t irq_queue;
32724- atomic_t irq_received;
32725- atomic_t irq_emitted;
32726+ atomic_unchecked_t irq_received;
32727+ atomic_unchecked_t irq_emitted;
32728
32729 int front_offset;
32730 } drm_i810_private_t;
32731diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
32732index da21b11..14c8749 100644
32733--- a/drivers/gpu/drm/i915/i915_debugfs.c
32734+++ b/drivers/gpu/drm/i915/i915_debugfs.c
32735@@ -495,7 +495,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
32736 I915_READ(GTIMR));
32737 }
32738 seq_printf(m, "Interrupts received: %d\n",
32739- atomic_read(&dev_priv->irq_received));
32740+ atomic_read_unchecked(&dev_priv->irq_received));
32741 for_each_ring(ring, dev_priv, i) {
32742 if (IS_GEN6(dev) || IS_GEN7(dev)) {
32743 seq_printf(m,
32744diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
32745index 61ae104..f8a4bc1 100644
32746--- a/drivers/gpu/drm/i915/i915_dma.c
32747+++ b/drivers/gpu/drm/i915/i915_dma.c
32748@@ -1274,7 +1274,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
32749 bool can_switch;
32750
32751 spin_lock(&dev->count_lock);
32752- can_switch = (dev->open_count == 0);
32753+ can_switch = (local_read(&dev->open_count) == 0);
32754 spin_unlock(&dev->count_lock);
32755 return can_switch;
32756 }
32757diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
32758index 92f1750..3beba74 100644
32759--- a/drivers/gpu/drm/i915/i915_drv.h
32760+++ b/drivers/gpu/drm/i915/i915_drv.h
32761@@ -430,7 +430,7 @@ typedef struct drm_i915_private {
32762
32763 struct resource mch_res;
32764
32765- atomic_t irq_received;
32766+ atomic_unchecked_t irq_received;
32767
32768 /* protects the irq masks */
32769 spinlock_t irq_lock;
32770@@ -1055,7 +1055,7 @@ struct drm_i915_gem_object {
32771 * will be page flipped away on the next vblank. When it
32772 * reaches 0, dev_priv->pending_flip_queue will be woken up.
32773 */
32774- atomic_t pending_flip;
32775+ atomic_unchecked_t pending_flip;
32776 };
32777
32778 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
32779@@ -1558,7 +1558,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
32780 struct drm_i915_private *dev_priv, unsigned port);
32781 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
32782 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
32783-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
32784+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
32785 {
32786 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
32787 }
32788diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32789index 3eea143..a0b77db 100644
32790--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32791+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32792@@ -660,7 +660,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
32793 i915_gem_clflush_object(obj);
32794
32795 if (obj->base.pending_write_domain)
32796- flips |= atomic_read(&obj->pending_flip);
32797+ flips |= atomic_read_unchecked(&obj->pending_flip);
32798
32799 flush_domains |= obj->base.write_domain;
32800 }
32801@@ -691,9 +691,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
32802
32803 static int
32804 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
32805- int count)
32806+ unsigned int count)
32807 {
32808- int i;
32809+ unsigned int i;
32810
32811 for (i = 0; i < count; i++) {
32812 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
32813diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
32814index dc29ace..137d83a 100644
32815--- a/drivers/gpu/drm/i915/i915_irq.c
32816+++ b/drivers/gpu/drm/i915/i915_irq.c
32817@@ -531,7 +531,7 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
32818 u32 pipe_stats[I915_MAX_PIPES];
32819 bool blc_event;
32820
32821- atomic_inc(&dev_priv->irq_received);
32822+ atomic_inc_unchecked(&dev_priv->irq_received);
32823
32824 while (true) {
32825 iir = I915_READ(VLV_IIR);
32826@@ -678,7 +678,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
32827 irqreturn_t ret = IRQ_NONE;
32828 int i;
32829
32830- atomic_inc(&dev_priv->irq_received);
32831+ atomic_inc_unchecked(&dev_priv->irq_received);
32832
32833 /* disable master interrupt before clearing iir */
32834 de_ier = I915_READ(DEIER);
32835@@ -753,7 +753,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
32836 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
32837 u32 hotplug_mask;
32838
32839- atomic_inc(&dev_priv->irq_received);
32840+ atomic_inc_unchecked(&dev_priv->irq_received);
32841
32842 /* disable master interrupt before clearing iir */
32843 de_ier = I915_READ(DEIER);
32844@@ -1762,7 +1762,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
32845 {
32846 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32847
32848- atomic_set(&dev_priv->irq_received, 0);
32849+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32850
32851 I915_WRITE(HWSTAM, 0xeffe);
32852
32853@@ -1788,7 +1788,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
32854 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32855 int pipe;
32856
32857- atomic_set(&dev_priv->irq_received, 0);
32858+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32859
32860 /* VLV magic */
32861 I915_WRITE(VLV_IMR, 0);
32862@@ -2093,7 +2093,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
32863 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32864 int pipe;
32865
32866- atomic_set(&dev_priv->irq_received, 0);
32867+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32868
32869 for_each_pipe(pipe)
32870 I915_WRITE(PIPESTAT(pipe), 0);
32871@@ -2144,7 +2144,7 @@ static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
32872 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
32873 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
32874
32875- atomic_inc(&dev_priv->irq_received);
32876+ atomic_inc_unchecked(&dev_priv->irq_received);
32877
32878 iir = I915_READ16(IIR);
32879 if (iir == 0)
32880@@ -2229,7 +2229,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
32881 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32882 int pipe;
32883
32884- atomic_set(&dev_priv->irq_received, 0);
32885+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32886
32887 if (I915_HAS_HOTPLUG(dev)) {
32888 I915_WRITE(PORT_HOTPLUG_EN, 0);
32889@@ -2324,7 +2324,7 @@ static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
32890 };
32891 int pipe, ret = IRQ_NONE;
32892
32893- atomic_inc(&dev_priv->irq_received);
32894+ atomic_inc_unchecked(&dev_priv->irq_received);
32895
32896 iir = I915_READ(IIR);
32897 do {
32898@@ -2450,7 +2450,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
32899 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32900 int pipe;
32901
32902- atomic_set(&dev_priv->irq_received, 0);
32903+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32904
32905 I915_WRITE(PORT_HOTPLUG_EN, 0);
32906 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
32907@@ -2557,7 +2557,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
32908 int irq_received;
32909 int ret = IRQ_NONE, pipe;
32910
32911- atomic_inc(&dev_priv->irq_received);
32912+ atomic_inc_unchecked(&dev_priv->irq_received);
32913
32914 iir = I915_READ(IIR);
32915
32916diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
32917index 4d3c7c6..eaac87b 100644
32918--- a/drivers/gpu/drm/i915/intel_display.c
32919+++ b/drivers/gpu/drm/i915/intel_display.c
32920@@ -2131,7 +2131,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
32921
32922 wait_event(dev_priv->pending_flip_queue,
32923 atomic_read(&dev_priv->mm.wedged) ||
32924- atomic_read(&obj->pending_flip) == 0);
32925+ atomic_read_unchecked(&obj->pending_flip) == 0);
32926
32927 /* Big Hammer, we also need to ensure that any pending
32928 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
32929@@ -6221,8 +6221,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
32930
32931 obj = work->old_fb_obj;
32932
32933- atomic_clear_mask(1 << intel_crtc->plane,
32934- &obj->pending_flip.counter);
32935+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
32936 wake_up(&dev_priv->pending_flip_queue);
32937
32938 queue_work(dev_priv->wq, &work->work);
32939@@ -6589,7 +6588,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
32940 /* Block clients from rendering to the new back buffer until
32941 * the flip occurs and the object is no longer visible.
32942 */
32943- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32944+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32945 atomic_inc(&intel_crtc->unpin_work_count);
32946
32947 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
32948@@ -6606,7 +6605,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
32949
32950 cleanup_pending:
32951 atomic_dec(&intel_crtc->unpin_work_count);
32952- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32953+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32954 drm_gem_object_unreference(&work->old_fb_obj->base);
32955 drm_gem_object_unreference(&obj->base);
32956 mutex_unlock(&dev->struct_mutex);
32957diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
32958index 54558a0..2d97005 100644
32959--- a/drivers/gpu/drm/mga/mga_drv.h
32960+++ b/drivers/gpu/drm/mga/mga_drv.h
32961@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
32962 u32 clear_cmd;
32963 u32 maccess;
32964
32965- atomic_t vbl_received; /**< Number of vblanks received. */
32966+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
32967 wait_queue_head_t fence_queue;
32968- atomic_t last_fence_retired;
32969+ atomic_unchecked_t last_fence_retired;
32970 u32 next_fence_to_post;
32971
32972 unsigned int fb_cpp;
32973diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
32974index 598c281..60d590e 100644
32975--- a/drivers/gpu/drm/mga/mga_irq.c
32976+++ b/drivers/gpu/drm/mga/mga_irq.c
32977@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
32978 if (crtc != 0)
32979 return 0;
32980
32981- return atomic_read(&dev_priv->vbl_received);
32982+ return atomic_read_unchecked(&dev_priv->vbl_received);
32983 }
32984
32985
32986@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
32987 /* VBLANK interrupt */
32988 if (status & MGA_VLINEPEN) {
32989 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
32990- atomic_inc(&dev_priv->vbl_received);
32991+ atomic_inc_unchecked(&dev_priv->vbl_received);
32992 drm_handle_vblank(dev, 0);
32993 handled = 1;
32994 }
32995@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
32996 if ((prim_start & ~0x03) != (prim_end & ~0x03))
32997 MGA_WRITE(MGA_PRIMEND, prim_end);
32998
32999- atomic_inc(&dev_priv->last_fence_retired);
33000+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
33001 DRM_WAKEUP(&dev_priv->fence_queue);
33002 handled = 1;
33003 }
33004@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
33005 * using fences.
33006 */
33007 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33008- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33009+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33010 - *sequence) <= (1 << 23)));
33011
33012 *sequence = cur_fence;
33013diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
33014index 09fdef2..57f5c3b 100644
33015--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
33016+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
33017@@ -1240,7 +1240,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
33018 struct bit_table {
33019 const char id;
33020 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
33021-};
33022+} __no_const;
33023
33024 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
33025
33026diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
33027index a101699..a163f0a 100644
33028--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
33029+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
33030@@ -80,7 +80,7 @@ struct nouveau_drm {
33031 struct drm_global_reference mem_global_ref;
33032 struct ttm_bo_global_ref bo_global_ref;
33033 struct ttm_bo_device bdev;
33034- atomic_t validate_sequence;
33035+ atomic_unchecked_t validate_sequence;
33036 int (*move)(struct nouveau_channel *,
33037 struct ttm_buffer_object *,
33038 struct ttm_mem_reg *, struct ttm_mem_reg *);
33039diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
33040index cdb83ac..27f0a16 100644
33041--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
33042+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
33043@@ -43,7 +43,7 @@ struct nouveau_fence_priv {
33044 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
33045 struct nouveau_channel *);
33046 u32 (*read)(struct nouveau_channel *);
33047-};
33048+} __no_const;
33049
33050 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
33051
33052diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
33053index 5e2f521..0d21436 100644
33054--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
33055+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
33056@@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
33057 int trycnt = 0;
33058 int ret, i;
33059
33060- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
33061+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
33062 retry:
33063 if (++trycnt > 100000) {
33064 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
33065diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
33066index 6f0ac64..9c2dfb4 100644
33067--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
33068+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
33069@@ -63,7 +63,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
33070 bool can_switch;
33071
33072 spin_lock(&dev->count_lock);
33073- can_switch = (dev->open_count == 0);
33074+ can_switch = (local_read(&dev->open_count) == 0);
33075 spin_unlock(&dev->count_lock);
33076 return can_switch;
33077 }
33078diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
33079index 9f6f55c..30e3a29 100644
33080--- a/drivers/gpu/drm/nouveau/nv50_evo.c
33081+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
33082@@ -152,9 +152,9 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
33083 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
33084 evo->object->oclass->ofuncs =
33085 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
33086- evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
33087- evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
33088- evo->object->oclass->ofuncs->rd08 =
33089+ *(void**)&evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
33090+ *(void**)&evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
33091+ *(void**)&evo->object->oclass->ofuncs->rd08 =
33092 ioremap(pci_resource_start(dev->pdev, 0) +
33093 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
33094 return 0;
33095diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
33096index b562b59..9d725a8 100644
33097--- a/drivers/gpu/drm/nouveau/nv50_sor.c
33098+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
33099@@ -317,7 +317,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
33100 }
33101
33102 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
33103- struct dp_train_func func = {
33104+ static struct dp_train_func func = {
33105 .link_set = nv50_sor_dp_link_set,
33106 .train_set = nv50_sor_dp_train_set,
33107 .train_adj = nv50_sor_dp_train_adj
33108diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
33109index c402fca..f1d694b 100644
33110--- a/drivers/gpu/drm/nouveau/nvd0_display.c
33111+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
33112@@ -1389,7 +1389,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
33113 nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
33114
33115 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
33116- struct dp_train_func func = {
33117+ static struct dp_train_func func = {
33118 .link_set = nvd0_sor_dp_link_set,
33119 .train_set = nvd0_sor_dp_train_set,
33120 .train_adj = nvd0_sor_dp_train_adj
33121diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33122index d4660cf..70dbe65 100644
33123--- a/drivers/gpu/drm/r128/r128_cce.c
33124+++ b/drivers/gpu/drm/r128/r128_cce.c
33125@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
33126
33127 /* GH: Simple idle check.
33128 */
33129- atomic_set(&dev_priv->idle_count, 0);
33130+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33131
33132 /* We don't support anything other than bus-mastering ring mode,
33133 * but the ring can be in either AGP or PCI space for the ring
33134diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33135index 930c71b..499aded 100644
33136--- a/drivers/gpu/drm/r128/r128_drv.h
33137+++ b/drivers/gpu/drm/r128/r128_drv.h
33138@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33139 int is_pci;
33140 unsigned long cce_buffers_offset;
33141
33142- atomic_t idle_count;
33143+ atomic_unchecked_t idle_count;
33144
33145 int page_flipping;
33146 int current_page;
33147 u32 crtc_offset;
33148 u32 crtc_offset_cntl;
33149
33150- atomic_t vbl_received;
33151+ atomic_unchecked_t vbl_received;
33152
33153 u32 color_fmt;
33154 unsigned int front_offset;
33155diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33156index 2ea4f09..d391371 100644
33157--- a/drivers/gpu/drm/r128/r128_irq.c
33158+++ b/drivers/gpu/drm/r128/r128_irq.c
33159@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33160 if (crtc != 0)
33161 return 0;
33162
33163- return atomic_read(&dev_priv->vbl_received);
33164+ return atomic_read_unchecked(&dev_priv->vbl_received);
33165 }
33166
33167 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33168@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33169 /* VBLANK interrupt */
33170 if (status & R128_CRTC_VBLANK_INT) {
33171 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33172- atomic_inc(&dev_priv->vbl_received);
33173+ atomic_inc_unchecked(&dev_priv->vbl_received);
33174 drm_handle_vblank(dev, 0);
33175 return IRQ_HANDLED;
33176 }
33177diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33178index 19bb7e6..de7e2a2 100644
33179--- a/drivers/gpu/drm/r128/r128_state.c
33180+++ b/drivers/gpu/drm/r128/r128_state.c
33181@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
33182
33183 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
33184 {
33185- if (atomic_read(&dev_priv->idle_count) == 0)
33186+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
33187 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33188 else
33189- atomic_set(&dev_priv->idle_count, 0);
33190+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33191 }
33192
33193 #endif
33194diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33195index 5a82b6b..9e69c73 100644
33196--- a/drivers/gpu/drm/radeon/mkregtable.c
33197+++ b/drivers/gpu/drm/radeon/mkregtable.c
33198@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33199 regex_t mask_rex;
33200 regmatch_t match[4];
33201 char buf[1024];
33202- size_t end;
33203+ long end;
33204 int len;
33205 int done = 0;
33206 int r;
33207 unsigned o;
33208 struct offset *offset;
33209 char last_reg_s[10];
33210- int last_reg;
33211+ unsigned long last_reg;
33212
33213 if (regcomp
33214 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33215diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
33216index ad4c973..aa27bcb 100644
33217--- a/drivers/gpu/drm/radeon/radeon_device.c
33218+++ b/drivers/gpu/drm/radeon/radeon_device.c
33219@@ -940,7 +940,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
33220 bool can_switch;
33221
33222 spin_lock(&dev->count_lock);
33223- can_switch = (dev->open_count == 0);
33224+ can_switch = (local_read(&dev->open_count) == 0);
33225 spin_unlock(&dev->count_lock);
33226 return can_switch;
33227 }
33228diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33229index a1b59ca..86f2d44 100644
33230--- a/drivers/gpu/drm/radeon/radeon_drv.h
33231+++ b/drivers/gpu/drm/radeon/radeon_drv.h
33232@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
33233
33234 /* SW interrupt */
33235 wait_queue_head_t swi_queue;
33236- atomic_t swi_emitted;
33237+ atomic_unchecked_t swi_emitted;
33238 int vblank_crtc;
33239 uint32_t irq_enable_reg;
33240 uint32_t r500_disp_irq_reg;
33241diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33242index c180df8..cd80dd2d 100644
33243--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33244+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33245@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33246 request = compat_alloc_user_space(sizeof(*request));
33247 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33248 || __put_user(req32.param, &request->param)
33249- || __put_user((void __user *)(unsigned long)req32.value,
33250+ || __put_user((unsigned long)req32.value,
33251 &request->value))
33252 return -EFAULT;
33253
33254diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33255index e771033..a0bc6b3 100644
33256--- a/drivers/gpu/drm/radeon/radeon_irq.c
33257+++ b/drivers/gpu/drm/radeon/radeon_irq.c
33258@@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33259 unsigned int ret;
33260 RING_LOCALS;
33261
33262- atomic_inc(&dev_priv->swi_emitted);
33263- ret = atomic_read(&dev_priv->swi_emitted);
33264+ atomic_inc_unchecked(&dev_priv->swi_emitted);
33265+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33266
33267 BEGIN_RING(4);
33268 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33269@@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33270 drm_radeon_private_t *dev_priv =
33271 (drm_radeon_private_t *) dev->dev_private;
33272
33273- atomic_set(&dev_priv->swi_emitted, 0);
33274+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33275 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33276
33277 dev->max_vblank_count = 0x001fffff;
33278diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33279index 8e9057b..af6dacb 100644
33280--- a/drivers/gpu/drm/radeon/radeon_state.c
33281+++ b/drivers/gpu/drm/radeon/radeon_state.c
33282@@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
33283 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
33284 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
33285
33286- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
33287+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
33288 sarea_priv->nbox * sizeof(depth_boxes[0])))
33289 return -EFAULT;
33290
33291@@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33292 {
33293 drm_radeon_private_t *dev_priv = dev->dev_private;
33294 drm_radeon_getparam_t *param = data;
33295- int value;
33296+ int value = 0;
33297
33298 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33299
33300diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33301index 5ebe1b3..1ed9426 100644
33302--- a/drivers/gpu/drm/radeon/radeon_ttm.c
33303+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33304@@ -822,8 +822,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33305 }
33306 if (unlikely(ttm_vm_ops == NULL)) {
33307 ttm_vm_ops = vma->vm_ops;
33308- radeon_ttm_vm_ops = *ttm_vm_ops;
33309- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33310+ pax_open_kernel();
33311+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
33312+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33313+ pax_close_kernel();
33314 }
33315 vma->vm_ops = &radeon_ttm_vm_ops;
33316 return 0;
33317diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33318index 5706d2a..17aedaa 100644
33319--- a/drivers/gpu/drm/radeon/rs690.c
33320+++ b/drivers/gpu/drm/radeon/rs690.c
33321@@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33322 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33323 rdev->pm.sideport_bandwidth.full)
33324 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33325- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
33326+ read_delay_latency.full = dfixed_const(800 * 1000);
33327 read_delay_latency.full = dfixed_div(read_delay_latency,
33328 rdev->pm.igp_sideport_mclk);
33329+ a.full = dfixed_const(370);
33330+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
33331 } else {
33332 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33333 rdev->pm.k8_bandwidth.full)
33334diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
33335index bd2a3b4..122d9ad 100644
33336--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
33337+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
33338@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
33339 static int ttm_pool_mm_shrink(struct shrinker *shrink,
33340 struct shrink_control *sc)
33341 {
33342- static atomic_t start_pool = ATOMIC_INIT(0);
33343+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
33344 unsigned i;
33345- unsigned pool_offset = atomic_add_return(1, &start_pool);
33346+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
33347 struct ttm_page_pool *pool;
33348 int shrink_pages = sc->nr_to_scan;
33349
33350diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33351index 893a650..6190d3b 100644
33352--- a/drivers/gpu/drm/via/via_drv.h
33353+++ b/drivers/gpu/drm/via/via_drv.h
33354@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33355 typedef uint32_t maskarray_t[5];
33356
33357 typedef struct drm_via_irq {
33358- atomic_t irq_received;
33359+ atomic_unchecked_t irq_received;
33360 uint32_t pending_mask;
33361 uint32_t enable_mask;
33362 wait_queue_head_t irq_queue;
33363@@ -75,7 +75,7 @@ typedef struct drm_via_private {
33364 struct timeval last_vblank;
33365 int last_vblank_valid;
33366 unsigned usec_per_vblank;
33367- atomic_t vbl_received;
33368+ atomic_unchecked_t vbl_received;
33369 drm_via_state_t hc_state;
33370 char pci_buf[VIA_PCI_BUF_SIZE];
33371 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33372diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33373index ac98964..5dbf512 100644
33374--- a/drivers/gpu/drm/via/via_irq.c
33375+++ b/drivers/gpu/drm/via/via_irq.c
33376@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33377 if (crtc != 0)
33378 return 0;
33379
33380- return atomic_read(&dev_priv->vbl_received);
33381+ return atomic_read_unchecked(&dev_priv->vbl_received);
33382 }
33383
33384 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33385@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33386
33387 status = VIA_READ(VIA_REG_INTERRUPT);
33388 if (status & VIA_IRQ_VBLANK_PENDING) {
33389- atomic_inc(&dev_priv->vbl_received);
33390- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33391+ atomic_inc_unchecked(&dev_priv->vbl_received);
33392+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33393 do_gettimeofday(&cur_vblank);
33394 if (dev_priv->last_vblank_valid) {
33395 dev_priv->usec_per_vblank =
33396@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33397 dev_priv->last_vblank = cur_vblank;
33398 dev_priv->last_vblank_valid = 1;
33399 }
33400- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33401+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33402 DRM_DEBUG("US per vblank is: %u\n",
33403 dev_priv->usec_per_vblank);
33404 }
33405@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33406
33407 for (i = 0; i < dev_priv->num_irqs; ++i) {
33408 if (status & cur_irq->pending_mask) {
33409- atomic_inc(&cur_irq->irq_received);
33410+ atomic_inc_unchecked(&cur_irq->irq_received);
33411 DRM_WAKEUP(&cur_irq->irq_queue);
33412 handled = 1;
33413 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
33414@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
33415 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33416 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33417 masks[irq][4]));
33418- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33419+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33420 } else {
33421 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33422 (((cur_irq_sequence =
33423- atomic_read(&cur_irq->irq_received)) -
33424+ atomic_read_unchecked(&cur_irq->irq_received)) -
33425 *sequence) <= (1 << 23)));
33426 }
33427 *sequence = cur_irq_sequence;
33428@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
33429 }
33430
33431 for (i = 0; i < dev_priv->num_irqs; ++i) {
33432- atomic_set(&cur_irq->irq_received, 0);
33433+ atomic_set_unchecked(&cur_irq->irq_received, 0);
33434 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33435 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33436 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33437@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33438 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33439 case VIA_IRQ_RELATIVE:
33440 irqwait->request.sequence +=
33441- atomic_read(&cur_irq->irq_received);
33442+ atomic_read_unchecked(&cur_irq->irq_received);
33443 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33444 case VIA_IRQ_ABSOLUTE:
33445 break;
33446diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
33447index 88a179e..57fe50481c 100644
33448--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
33449+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
33450@@ -263,7 +263,7 @@ struct vmw_private {
33451 * Fencing and IRQs.
33452 */
33453
33454- atomic_t marker_seq;
33455+ atomic_unchecked_t marker_seq;
33456 wait_queue_head_t fence_queue;
33457 wait_queue_head_t fifo_queue;
33458 int fence_queue_waiters; /* Protected by hw_mutex */
33459diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
33460index 3eb1486..0a47ee9 100644
33461--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
33462+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
33463@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
33464 (unsigned int) min,
33465 (unsigned int) fifo->capabilities);
33466
33467- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
33468+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
33469 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
33470 vmw_marker_queue_init(&fifo->marker_queue);
33471 return vmw_fifo_send_fence(dev_priv, &dummy);
33472@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
33473 if (reserveable)
33474 iowrite32(bytes, fifo_mem +
33475 SVGA_FIFO_RESERVED);
33476- return fifo_mem + (next_cmd >> 2);
33477+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
33478 } else {
33479 need_bounce = true;
33480 }
33481@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
33482
33483 fm = vmw_fifo_reserve(dev_priv, bytes);
33484 if (unlikely(fm == NULL)) {
33485- *seqno = atomic_read(&dev_priv->marker_seq);
33486+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
33487 ret = -ENOMEM;
33488 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
33489 false, 3*HZ);
33490@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
33491 }
33492
33493 do {
33494- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
33495+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
33496 } while (*seqno == 0);
33497
33498 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
33499diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
33500index 4640adb..e1384ed 100644
33501--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
33502+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
33503@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
33504 * emitted. Then the fence is stale and signaled.
33505 */
33506
33507- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
33508+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
33509 > VMW_FENCE_WRAP);
33510
33511 return ret;
33512@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
33513
33514 if (fifo_idle)
33515 down_read(&fifo_state->rwsem);
33516- signal_seq = atomic_read(&dev_priv->marker_seq);
33517+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
33518 ret = 0;
33519
33520 for (;;) {
33521diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
33522index 8a8725c..afed796 100644
33523--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
33524+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
33525@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
33526 while (!vmw_lag_lt(queue, us)) {
33527 spin_lock(&queue->lock);
33528 if (list_empty(&queue->head))
33529- seqno = atomic_read(&dev_priv->marker_seq);
33530+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
33531 else {
33532 marker = list_first_entry(&queue->head,
33533 struct vmw_marker, head);
33534diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
33535index 52146db..ae33762 100644
33536--- a/drivers/hid/hid-core.c
33537+++ b/drivers/hid/hid-core.c
33538@@ -2201,7 +2201,7 @@ static bool hid_ignore(struct hid_device *hdev)
33539
33540 int hid_add_device(struct hid_device *hdev)
33541 {
33542- static atomic_t id = ATOMIC_INIT(0);
33543+ static atomic_unchecked_t id = ATOMIC_INIT(0);
33544 int ret;
33545
33546 if (WARN_ON(hdev->status & HID_STAT_ADDED))
33547@@ -2236,7 +2236,7 @@ int hid_add_device(struct hid_device *hdev)
33548 /* XXX hack, any other cleaner solution after the driver core
33549 * is converted to allow more than 20 bytes as the device name? */
33550 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
33551- hdev->vendor, hdev->product, atomic_inc_return(&id));
33552+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
33553
33554 hid_debug_register(hdev, dev_name(&hdev->dev));
33555 ret = device_add(&hdev->dev);
33556diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
33557index eec3291..8ed706b 100644
33558--- a/drivers/hid/hid-wiimote-debug.c
33559+++ b/drivers/hid/hid-wiimote-debug.c
33560@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
33561 else if (size == 0)
33562 return -EIO;
33563
33564- if (copy_to_user(u, buf, size))
33565+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
33566 return -EFAULT;
33567
33568 *off += size;
33569diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
33570index 14599e2..711c965 100644
33571--- a/drivers/hid/usbhid/hiddev.c
33572+++ b/drivers/hid/usbhid/hiddev.c
33573@@ -625,7 +625,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33574 break;
33575
33576 case HIDIOCAPPLICATION:
33577- if (arg < 0 || arg >= hid->maxapplication)
33578+ if (arg >= hid->maxapplication)
33579 break;
33580
33581 for (i = 0; i < hid->maxcollection; i++)
33582diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
33583index f4c3d28..82f45a9 100644
33584--- a/drivers/hv/channel.c
33585+++ b/drivers/hv/channel.c
33586@@ -402,8 +402,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
33587 int ret = 0;
33588 int t;
33589
33590- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
33591- atomic_inc(&vmbus_connection.next_gpadl_handle);
33592+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
33593+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
33594
33595 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
33596 if (ret)
33597diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
33598index 3648f8f..30ef30d 100644
33599--- a/drivers/hv/hv.c
33600+++ b/drivers/hv/hv.c
33601@@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
33602 u64 output_address = (output) ? virt_to_phys(output) : 0;
33603 u32 output_address_hi = output_address >> 32;
33604 u32 output_address_lo = output_address & 0xFFFFFFFF;
33605- void *hypercall_page = hv_context.hypercall_page;
33606+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
33607
33608 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
33609 "=a"(hv_status_lo) : "d" (control_hi),
33610diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
33611index d8d1fad..b91caf7 100644
33612--- a/drivers/hv/hyperv_vmbus.h
33613+++ b/drivers/hv/hyperv_vmbus.h
33614@@ -594,7 +594,7 @@ enum vmbus_connect_state {
33615 struct vmbus_connection {
33616 enum vmbus_connect_state conn_state;
33617
33618- atomic_t next_gpadl_handle;
33619+ atomic_unchecked_t next_gpadl_handle;
33620
33621 /*
33622 * Represents channel interrupts. Each bit position represents a
33623diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
33624index 8e1a9ec..4687821 100644
33625--- a/drivers/hv/vmbus_drv.c
33626+++ b/drivers/hv/vmbus_drv.c
33627@@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
33628 {
33629 int ret = 0;
33630
33631- static atomic_t device_num = ATOMIC_INIT(0);
33632+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
33633
33634 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
33635- atomic_inc_return(&device_num));
33636+ atomic_inc_return_unchecked(&device_num));
33637
33638 child_device_obj->device.bus = &hv_bus;
33639 child_device_obj->device.parent = &hv_acpi_dev->dev;
33640diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
33641index 07a0c1a..0cac334 100644
33642--- a/drivers/hwmon/sht15.c
33643+++ b/drivers/hwmon/sht15.c
33644@@ -169,7 +169,7 @@ struct sht15_data {
33645 int supply_uV;
33646 bool supply_uV_valid;
33647 struct work_struct update_supply_work;
33648- atomic_t interrupt_handled;
33649+ atomic_unchecked_t interrupt_handled;
33650 };
33651
33652 /**
33653@@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
33654 return ret;
33655
33656 gpio_direction_input(data->pdata->gpio_data);
33657- atomic_set(&data->interrupt_handled, 0);
33658+ atomic_set_unchecked(&data->interrupt_handled, 0);
33659
33660 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33661 if (gpio_get_value(data->pdata->gpio_data) == 0) {
33662 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
33663 /* Only relevant if the interrupt hasn't occurred. */
33664- if (!atomic_read(&data->interrupt_handled))
33665+ if (!atomic_read_unchecked(&data->interrupt_handled))
33666 schedule_work(&data->read_work);
33667 }
33668 ret = wait_event_timeout(data->wait_queue,
33669@@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
33670
33671 /* First disable the interrupt */
33672 disable_irq_nosync(irq);
33673- atomic_inc(&data->interrupt_handled);
33674+ atomic_inc_unchecked(&data->interrupt_handled);
33675 /* Then schedule a reading work struct */
33676 if (data->state != SHT15_READING_NOTHING)
33677 schedule_work(&data->read_work);
33678@@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
33679 * If not, then start the interrupt again - care here as could
33680 * have gone low in meantime so verify it hasn't!
33681 */
33682- atomic_set(&data->interrupt_handled, 0);
33683+ atomic_set_unchecked(&data->interrupt_handled, 0);
33684 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33685 /* If still not occurred or another handler was scheduled */
33686 if (gpio_get_value(data->pdata->gpio_data)
33687- || atomic_read(&data->interrupt_handled))
33688+ || atomic_read_unchecked(&data->interrupt_handled))
33689 return;
33690 }
33691
33692diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
33693index 378fcb5..5e91fa8 100644
33694--- a/drivers/i2c/busses/i2c-amd756-s4882.c
33695+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
33696@@ -43,7 +43,7 @@
33697 extern struct i2c_adapter amd756_smbus;
33698
33699 static struct i2c_adapter *s4882_adapter;
33700-static struct i2c_algorithm *s4882_algo;
33701+static i2c_algorithm_no_const *s4882_algo;
33702
33703 /* Wrapper access functions for multiplexed SMBus */
33704 static DEFINE_MUTEX(amd756_lock);
33705diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
33706index 29015eb..af2d8e9 100644
33707--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
33708+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
33709@@ -41,7 +41,7 @@
33710 extern struct i2c_adapter *nforce2_smbus;
33711
33712 static struct i2c_adapter *s4985_adapter;
33713-static struct i2c_algorithm *s4985_algo;
33714+static i2c_algorithm_no_const *s4985_algo;
33715
33716 /* Wrapper access functions for multiplexed SMBus */
33717 static DEFINE_MUTEX(nforce2_lock);
33718diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
33719index 8126824..55a2798 100644
33720--- a/drivers/ide/ide-cd.c
33721+++ b/drivers/ide/ide-cd.c
33722@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
33723 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
33724 if ((unsigned long)buf & alignment
33725 || blk_rq_bytes(rq) & q->dma_pad_mask
33726- || object_is_on_stack(buf))
33727+ || object_starts_on_stack(buf))
33728 drive->dma = 0;
33729 }
33730 }
33731diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
33732index 394fea2..c833880 100644
33733--- a/drivers/infiniband/core/cm.c
33734+++ b/drivers/infiniband/core/cm.c
33735@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
33736
33737 struct cm_counter_group {
33738 struct kobject obj;
33739- atomic_long_t counter[CM_ATTR_COUNT];
33740+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
33741 };
33742
33743 struct cm_counter_attribute {
33744@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
33745 struct ib_mad_send_buf *msg = NULL;
33746 int ret;
33747
33748- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33749+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33750 counter[CM_REQ_COUNTER]);
33751
33752 /* Quick state check to discard duplicate REQs. */
33753@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
33754 if (!cm_id_priv)
33755 return;
33756
33757- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33758+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33759 counter[CM_REP_COUNTER]);
33760 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
33761 if (ret)
33762@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
33763 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
33764 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
33765 spin_unlock_irq(&cm_id_priv->lock);
33766- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33767+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33768 counter[CM_RTU_COUNTER]);
33769 goto out;
33770 }
33771@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
33772 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
33773 dreq_msg->local_comm_id);
33774 if (!cm_id_priv) {
33775- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33776+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33777 counter[CM_DREQ_COUNTER]);
33778 cm_issue_drep(work->port, work->mad_recv_wc);
33779 return -EINVAL;
33780@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
33781 case IB_CM_MRA_REP_RCVD:
33782 break;
33783 case IB_CM_TIMEWAIT:
33784- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33785+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33786 counter[CM_DREQ_COUNTER]);
33787 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
33788 goto unlock;
33789@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
33790 cm_free_msg(msg);
33791 goto deref;
33792 case IB_CM_DREQ_RCVD:
33793- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33794+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33795 counter[CM_DREQ_COUNTER]);
33796 goto unlock;
33797 default:
33798@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
33799 ib_modify_mad(cm_id_priv->av.port->mad_agent,
33800 cm_id_priv->msg, timeout)) {
33801 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
33802- atomic_long_inc(&work->port->
33803+ atomic_long_inc_unchecked(&work->port->
33804 counter_group[CM_RECV_DUPLICATES].
33805 counter[CM_MRA_COUNTER]);
33806 goto out;
33807@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
33808 break;
33809 case IB_CM_MRA_REQ_RCVD:
33810 case IB_CM_MRA_REP_RCVD:
33811- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33812+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33813 counter[CM_MRA_COUNTER]);
33814 /* fall through */
33815 default:
33816@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
33817 case IB_CM_LAP_IDLE:
33818 break;
33819 case IB_CM_MRA_LAP_SENT:
33820- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33821+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33822 counter[CM_LAP_COUNTER]);
33823 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
33824 goto unlock;
33825@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
33826 cm_free_msg(msg);
33827 goto deref;
33828 case IB_CM_LAP_RCVD:
33829- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33830+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33831 counter[CM_LAP_COUNTER]);
33832 goto unlock;
33833 default:
33834@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
33835 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
33836 if (cur_cm_id_priv) {
33837 spin_unlock_irq(&cm.lock);
33838- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33839+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33840 counter[CM_SIDR_REQ_COUNTER]);
33841 goto out; /* Duplicate message. */
33842 }
33843@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
33844 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
33845 msg->retries = 1;
33846
33847- atomic_long_add(1 + msg->retries,
33848+ atomic_long_add_unchecked(1 + msg->retries,
33849 &port->counter_group[CM_XMIT].counter[attr_index]);
33850 if (msg->retries)
33851- atomic_long_add(msg->retries,
33852+ atomic_long_add_unchecked(msg->retries,
33853 &port->counter_group[CM_XMIT_RETRIES].
33854 counter[attr_index]);
33855
33856@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
33857 }
33858
33859 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
33860- atomic_long_inc(&port->counter_group[CM_RECV].
33861+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
33862 counter[attr_id - CM_ATTR_ID_OFFSET]);
33863
33864 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
33865@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
33866 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
33867
33868 return sprintf(buf, "%ld\n",
33869- atomic_long_read(&group->counter[cm_attr->index]));
33870+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
33871 }
33872
33873 static const struct sysfs_ops cm_counter_ops = {
33874diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
33875index 176c8f9..2627b62 100644
33876--- a/drivers/infiniband/core/fmr_pool.c
33877+++ b/drivers/infiniband/core/fmr_pool.c
33878@@ -98,8 +98,8 @@ struct ib_fmr_pool {
33879
33880 struct task_struct *thread;
33881
33882- atomic_t req_ser;
33883- atomic_t flush_ser;
33884+ atomic_unchecked_t req_ser;
33885+ atomic_unchecked_t flush_ser;
33886
33887 wait_queue_head_t force_wait;
33888 };
33889@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
33890 struct ib_fmr_pool *pool = pool_ptr;
33891
33892 do {
33893- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
33894+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
33895 ib_fmr_batch_release(pool);
33896
33897- atomic_inc(&pool->flush_ser);
33898+ atomic_inc_unchecked(&pool->flush_ser);
33899 wake_up_interruptible(&pool->force_wait);
33900
33901 if (pool->flush_function)
33902@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
33903 }
33904
33905 set_current_state(TASK_INTERRUPTIBLE);
33906- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
33907+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
33908 !kthread_should_stop())
33909 schedule();
33910 __set_current_state(TASK_RUNNING);
33911@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
33912 pool->dirty_watermark = params->dirty_watermark;
33913 pool->dirty_len = 0;
33914 spin_lock_init(&pool->pool_lock);
33915- atomic_set(&pool->req_ser, 0);
33916- atomic_set(&pool->flush_ser, 0);
33917+ atomic_set_unchecked(&pool->req_ser, 0);
33918+ atomic_set_unchecked(&pool->flush_ser, 0);
33919 init_waitqueue_head(&pool->force_wait);
33920
33921 pool->thread = kthread_run(ib_fmr_cleanup_thread,
33922@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
33923 }
33924 spin_unlock_irq(&pool->pool_lock);
33925
33926- serial = atomic_inc_return(&pool->req_ser);
33927+ serial = atomic_inc_return_unchecked(&pool->req_ser);
33928 wake_up_process(pool->thread);
33929
33930 if (wait_event_interruptible(pool->force_wait,
33931- atomic_read(&pool->flush_ser) - serial >= 0))
33932+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
33933 return -EINTR;
33934
33935 return 0;
33936@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
33937 } else {
33938 list_add_tail(&fmr->list, &pool->dirty_list);
33939 if (++pool->dirty_len >= pool->dirty_watermark) {
33940- atomic_inc(&pool->req_ser);
33941+ atomic_inc_unchecked(&pool->req_ser);
33942 wake_up_process(pool->thread);
33943 }
33944 }
33945diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
33946index afd8179..598063f 100644
33947--- a/drivers/infiniband/hw/cxgb4/mem.c
33948+++ b/drivers/infiniband/hw/cxgb4/mem.c
33949@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
33950 int err;
33951 struct fw_ri_tpte tpt;
33952 u32 stag_idx;
33953- static atomic_t key;
33954+ static atomic_unchecked_t key;
33955
33956 if (c4iw_fatal_error(rdev))
33957 return -EIO;
33958@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
33959 if (rdev->stats.stag.cur > rdev->stats.stag.max)
33960 rdev->stats.stag.max = rdev->stats.stag.cur;
33961 mutex_unlock(&rdev->stats.lock);
33962- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
33963+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
33964 }
33965 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
33966 __func__, stag_state, type, pdid, stag_idx);
33967diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
33968index 79b3dbc..96e5fcc 100644
33969--- a/drivers/infiniband/hw/ipath/ipath_rc.c
33970+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
33971@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
33972 struct ib_atomic_eth *ateth;
33973 struct ipath_ack_entry *e;
33974 u64 vaddr;
33975- atomic64_t *maddr;
33976+ atomic64_unchecked_t *maddr;
33977 u64 sdata;
33978 u32 rkey;
33979 u8 next;
33980@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
33981 IB_ACCESS_REMOTE_ATOMIC)))
33982 goto nack_acc_unlck;
33983 /* Perform atomic OP and save result. */
33984- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
33985+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
33986 sdata = be64_to_cpu(ateth->swap_data);
33987 e = &qp->s_ack_queue[qp->r_head_ack_queue];
33988 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
33989- (u64) atomic64_add_return(sdata, maddr) - sdata :
33990+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
33991 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
33992 be64_to_cpu(ateth->compare_data),
33993 sdata);
33994diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
33995index 1f95bba..9530f87 100644
33996--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
33997+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
33998@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
33999 unsigned long flags;
34000 struct ib_wc wc;
34001 u64 sdata;
34002- atomic64_t *maddr;
34003+ atomic64_unchecked_t *maddr;
34004 enum ib_wc_status send_status;
34005
34006 /*
34007@@ -382,11 +382,11 @@ again:
34008 IB_ACCESS_REMOTE_ATOMIC)))
34009 goto acc_err;
34010 /* Perform atomic OP and save result. */
34011- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
34012+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
34013 sdata = wqe->wr.wr.atomic.compare_add;
34014 *(u64 *) sqp->s_sge.sge.vaddr =
34015 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
34016- (u64) atomic64_add_return(sdata, maddr) - sdata :
34017+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
34018 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
34019 sdata, wqe->wr.wr.atomic.swap);
34020 goto send_comp;
34021diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
34022index 748db2d..5f75cc3 100644
34023--- a/drivers/infiniband/hw/nes/nes.c
34024+++ b/drivers/infiniband/hw/nes/nes.c
34025@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
34026 LIST_HEAD(nes_adapter_list);
34027 static LIST_HEAD(nes_dev_list);
34028
34029-atomic_t qps_destroyed;
34030+atomic_unchecked_t qps_destroyed;
34031
34032 static unsigned int ee_flsh_adapter;
34033 static unsigned int sysfs_nonidx_addr;
34034@@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
34035 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
34036 struct nes_adapter *nesadapter = nesdev->nesadapter;
34037
34038- atomic_inc(&qps_destroyed);
34039+ atomic_inc_unchecked(&qps_destroyed);
34040
34041 /* Free the control structures */
34042
34043diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
34044index 33cc589..3bd6538 100644
34045--- a/drivers/infiniband/hw/nes/nes.h
34046+++ b/drivers/infiniband/hw/nes/nes.h
34047@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
34048 extern unsigned int wqm_quanta;
34049 extern struct list_head nes_adapter_list;
34050
34051-extern atomic_t cm_connects;
34052-extern atomic_t cm_accepts;
34053-extern atomic_t cm_disconnects;
34054-extern atomic_t cm_closes;
34055-extern atomic_t cm_connecteds;
34056-extern atomic_t cm_connect_reqs;
34057-extern atomic_t cm_rejects;
34058-extern atomic_t mod_qp_timouts;
34059-extern atomic_t qps_created;
34060-extern atomic_t qps_destroyed;
34061-extern atomic_t sw_qps_destroyed;
34062+extern atomic_unchecked_t cm_connects;
34063+extern atomic_unchecked_t cm_accepts;
34064+extern atomic_unchecked_t cm_disconnects;
34065+extern atomic_unchecked_t cm_closes;
34066+extern atomic_unchecked_t cm_connecteds;
34067+extern atomic_unchecked_t cm_connect_reqs;
34068+extern atomic_unchecked_t cm_rejects;
34069+extern atomic_unchecked_t mod_qp_timouts;
34070+extern atomic_unchecked_t qps_created;
34071+extern atomic_unchecked_t qps_destroyed;
34072+extern atomic_unchecked_t sw_qps_destroyed;
34073 extern u32 mh_detected;
34074 extern u32 mh_pauses_sent;
34075 extern u32 cm_packets_sent;
34076@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
34077 extern u32 cm_packets_received;
34078 extern u32 cm_packets_dropped;
34079 extern u32 cm_packets_retrans;
34080-extern atomic_t cm_listens_created;
34081-extern atomic_t cm_listens_destroyed;
34082+extern atomic_unchecked_t cm_listens_created;
34083+extern atomic_unchecked_t cm_listens_destroyed;
34084 extern u32 cm_backlog_drops;
34085-extern atomic_t cm_loopbacks;
34086-extern atomic_t cm_nodes_created;
34087-extern atomic_t cm_nodes_destroyed;
34088-extern atomic_t cm_accel_dropped_pkts;
34089-extern atomic_t cm_resets_recvd;
34090-extern atomic_t pau_qps_created;
34091-extern atomic_t pau_qps_destroyed;
34092+extern atomic_unchecked_t cm_loopbacks;
34093+extern atomic_unchecked_t cm_nodes_created;
34094+extern atomic_unchecked_t cm_nodes_destroyed;
34095+extern atomic_unchecked_t cm_accel_dropped_pkts;
34096+extern atomic_unchecked_t cm_resets_recvd;
34097+extern atomic_unchecked_t pau_qps_created;
34098+extern atomic_unchecked_t pau_qps_destroyed;
34099
34100 extern u32 int_mod_timer_init;
34101 extern u32 int_mod_cq_depth_256;
34102diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
34103index cfaacaf..fa0722e 100644
34104--- a/drivers/infiniband/hw/nes/nes_cm.c
34105+++ b/drivers/infiniband/hw/nes/nes_cm.c
34106@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
34107 u32 cm_packets_retrans;
34108 u32 cm_packets_created;
34109 u32 cm_packets_received;
34110-atomic_t cm_listens_created;
34111-atomic_t cm_listens_destroyed;
34112+atomic_unchecked_t cm_listens_created;
34113+atomic_unchecked_t cm_listens_destroyed;
34114 u32 cm_backlog_drops;
34115-atomic_t cm_loopbacks;
34116-atomic_t cm_nodes_created;
34117-atomic_t cm_nodes_destroyed;
34118-atomic_t cm_accel_dropped_pkts;
34119-atomic_t cm_resets_recvd;
34120+atomic_unchecked_t cm_loopbacks;
34121+atomic_unchecked_t cm_nodes_created;
34122+atomic_unchecked_t cm_nodes_destroyed;
34123+atomic_unchecked_t cm_accel_dropped_pkts;
34124+atomic_unchecked_t cm_resets_recvd;
34125
34126 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
34127 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
34128@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
34129
34130 static struct nes_cm_core *g_cm_core;
34131
34132-atomic_t cm_connects;
34133-atomic_t cm_accepts;
34134-atomic_t cm_disconnects;
34135-atomic_t cm_closes;
34136-atomic_t cm_connecteds;
34137-atomic_t cm_connect_reqs;
34138-atomic_t cm_rejects;
34139+atomic_unchecked_t cm_connects;
34140+atomic_unchecked_t cm_accepts;
34141+atomic_unchecked_t cm_disconnects;
34142+atomic_unchecked_t cm_closes;
34143+atomic_unchecked_t cm_connecteds;
34144+atomic_unchecked_t cm_connect_reqs;
34145+atomic_unchecked_t cm_rejects;
34146
34147 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
34148 {
34149@@ -1281,7 +1281,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
34150 kfree(listener);
34151 listener = NULL;
34152 ret = 0;
34153- atomic_inc(&cm_listens_destroyed);
34154+ atomic_inc_unchecked(&cm_listens_destroyed);
34155 } else {
34156 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
34157 }
34158@@ -1480,7 +1480,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
34159 cm_node->rem_mac);
34160
34161 add_hte_node(cm_core, cm_node);
34162- atomic_inc(&cm_nodes_created);
34163+ atomic_inc_unchecked(&cm_nodes_created);
34164
34165 return cm_node;
34166 }
34167@@ -1538,7 +1538,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
34168 }
34169
34170 atomic_dec(&cm_core->node_cnt);
34171- atomic_inc(&cm_nodes_destroyed);
34172+ atomic_inc_unchecked(&cm_nodes_destroyed);
34173 nesqp = cm_node->nesqp;
34174 if (nesqp) {
34175 nesqp->cm_node = NULL;
34176@@ -1602,7 +1602,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
34177
34178 static void drop_packet(struct sk_buff *skb)
34179 {
34180- atomic_inc(&cm_accel_dropped_pkts);
34181+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
34182 dev_kfree_skb_any(skb);
34183 }
34184
34185@@ -1665,7 +1665,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
34186 {
34187
34188 int reset = 0; /* whether to send reset in case of err.. */
34189- atomic_inc(&cm_resets_recvd);
34190+ atomic_inc_unchecked(&cm_resets_recvd);
34191 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
34192 " refcnt=%d\n", cm_node, cm_node->state,
34193 atomic_read(&cm_node->ref_count));
34194@@ -2306,7 +2306,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
34195 rem_ref_cm_node(cm_node->cm_core, cm_node);
34196 return NULL;
34197 }
34198- atomic_inc(&cm_loopbacks);
34199+ atomic_inc_unchecked(&cm_loopbacks);
34200 loopbackremotenode->loopbackpartner = cm_node;
34201 loopbackremotenode->tcp_cntxt.rcv_wscale =
34202 NES_CM_DEFAULT_RCV_WND_SCALE;
34203@@ -2581,7 +2581,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
34204 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
34205 else {
34206 rem_ref_cm_node(cm_core, cm_node);
34207- atomic_inc(&cm_accel_dropped_pkts);
34208+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
34209 dev_kfree_skb_any(skb);
34210 }
34211 break;
34212@@ -2889,7 +2889,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
34213
34214 if ((cm_id) && (cm_id->event_handler)) {
34215 if (issue_disconn) {
34216- atomic_inc(&cm_disconnects);
34217+ atomic_inc_unchecked(&cm_disconnects);
34218 cm_event.event = IW_CM_EVENT_DISCONNECT;
34219 cm_event.status = disconn_status;
34220 cm_event.local_addr = cm_id->local_addr;
34221@@ -2911,7 +2911,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
34222 }
34223
34224 if (issue_close) {
34225- atomic_inc(&cm_closes);
34226+ atomic_inc_unchecked(&cm_closes);
34227 nes_disconnect(nesqp, 1);
34228
34229 cm_id->provider_data = nesqp;
34230@@ -3047,7 +3047,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
34231
34232 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
34233 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
34234- atomic_inc(&cm_accepts);
34235+ atomic_inc_unchecked(&cm_accepts);
34236
34237 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
34238 netdev_refcnt_read(nesvnic->netdev));
34239@@ -3242,7 +3242,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
34240 struct nes_cm_core *cm_core;
34241 u8 *start_buff;
34242
34243- atomic_inc(&cm_rejects);
34244+ atomic_inc_unchecked(&cm_rejects);
34245 cm_node = (struct nes_cm_node *)cm_id->provider_data;
34246 loopback = cm_node->loopbackpartner;
34247 cm_core = cm_node->cm_core;
34248@@ -3302,7 +3302,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
34249 ntohl(cm_id->local_addr.sin_addr.s_addr),
34250 ntohs(cm_id->local_addr.sin_port));
34251
34252- atomic_inc(&cm_connects);
34253+ atomic_inc_unchecked(&cm_connects);
34254 nesqp->active_conn = 1;
34255
34256 /* cache the cm_id in the qp */
34257@@ -3412,7 +3412,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
34258 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
34259 return err;
34260 }
34261- atomic_inc(&cm_listens_created);
34262+ atomic_inc_unchecked(&cm_listens_created);
34263 }
34264
34265 cm_id->add_ref(cm_id);
34266@@ -3513,7 +3513,7 @@ static void cm_event_connected(struct nes_cm_event *event)
34267
34268 if (nesqp->destroyed)
34269 return;
34270- atomic_inc(&cm_connecteds);
34271+ atomic_inc_unchecked(&cm_connecteds);
34272 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
34273 " local port 0x%04X. jiffies = %lu.\n",
34274 nesqp->hwqp.qp_id,
34275@@ -3693,7 +3693,7 @@ static void cm_event_reset(struct nes_cm_event *event)
34276
34277 cm_id->add_ref(cm_id);
34278 ret = cm_id->event_handler(cm_id, &cm_event);
34279- atomic_inc(&cm_closes);
34280+ atomic_inc_unchecked(&cm_closes);
34281 cm_event.event = IW_CM_EVENT_CLOSE;
34282 cm_event.status = 0;
34283 cm_event.provider_data = cm_id->provider_data;
34284@@ -3729,7 +3729,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
34285 return;
34286 cm_id = cm_node->cm_id;
34287
34288- atomic_inc(&cm_connect_reqs);
34289+ atomic_inc_unchecked(&cm_connect_reqs);
34290 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
34291 cm_node, cm_id, jiffies);
34292
34293@@ -3769,7 +3769,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
34294 return;
34295 cm_id = cm_node->cm_id;
34296
34297- atomic_inc(&cm_connect_reqs);
34298+ atomic_inc_unchecked(&cm_connect_reqs);
34299 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
34300 cm_node, cm_id, jiffies);
34301
34302diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
34303index 3ba7be3..c81f6ff 100644
34304--- a/drivers/infiniband/hw/nes/nes_mgt.c
34305+++ b/drivers/infiniband/hw/nes/nes_mgt.c
34306@@ -40,8 +40,8 @@
34307 #include "nes.h"
34308 #include "nes_mgt.h"
34309
34310-atomic_t pau_qps_created;
34311-atomic_t pau_qps_destroyed;
34312+atomic_unchecked_t pau_qps_created;
34313+atomic_unchecked_t pau_qps_destroyed;
34314
34315 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
34316 {
34317@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
34318 {
34319 struct sk_buff *skb;
34320 unsigned long flags;
34321- atomic_inc(&pau_qps_destroyed);
34322+ atomic_inc_unchecked(&pau_qps_destroyed);
34323
34324 /* Free packets that have not yet been forwarded */
34325 /* Lock is acquired by skb_dequeue when removing the skb */
34326@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
34327 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
34328 skb_queue_head_init(&nesqp->pau_list);
34329 spin_lock_init(&nesqp->pau_lock);
34330- atomic_inc(&pau_qps_created);
34331+ atomic_inc_unchecked(&pau_qps_created);
34332 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
34333 }
34334
34335diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
34336index 0564be7..f68b0f1 100644
34337--- a/drivers/infiniband/hw/nes/nes_nic.c
34338+++ b/drivers/infiniband/hw/nes/nes_nic.c
34339@@ -1272,39 +1272,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
34340 target_stat_values[++index] = mh_detected;
34341 target_stat_values[++index] = mh_pauses_sent;
34342 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
34343- target_stat_values[++index] = atomic_read(&cm_connects);
34344- target_stat_values[++index] = atomic_read(&cm_accepts);
34345- target_stat_values[++index] = atomic_read(&cm_disconnects);
34346- target_stat_values[++index] = atomic_read(&cm_connecteds);
34347- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
34348- target_stat_values[++index] = atomic_read(&cm_rejects);
34349- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
34350- target_stat_values[++index] = atomic_read(&qps_created);
34351- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
34352- target_stat_values[++index] = atomic_read(&qps_destroyed);
34353- target_stat_values[++index] = atomic_read(&cm_closes);
34354+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
34355+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
34356+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
34357+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
34358+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
34359+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
34360+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
34361+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
34362+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
34363+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
34364+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
34365 target_stat_values[++index] = cm_packets_sent;
34366 target_stat_values[++index] = cm_packets_bounced;
34367 target_stat_values[++index] = cm_packets_created;
34368 target_stat_values[++index] = cm_packets_received;
34369 target_stat_values[++index] = cm_packets_dropped;
34370 target_stat_values[++index] = cm_packets_retrans;
34371- target_stat_values[++index] = atomic_read(&cm_listens_created);
34372- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
34373+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
34374+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
34375 target_stat_values[++index] = cm_backlog_drops;
34376- target_stat_values[++index] = atomic_read(&cm_loopbacks);
34377- target_stat_values[++index] = atomic_read(&cm_nodes_created);
34378- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
34379- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
34380- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
34381+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
34382+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
34383+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
34384+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
34385+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
34386 target_stat_values[++index] = nesadapter->free_4kpbl;
34387 target_stat_values[++index] = nesadapter->free_256pbl;
34388 target_stat_values[++index] = int_mod_timer_init;
34389 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
34390 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
34391 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
34392- target_stat_values[++index] = atomic_read(&pau_qps_created);
34393- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
34394+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
34395+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
34396 }
34397
34398 /**
34399diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
34400index 07e4fba..685f041 100644
34401--- a/drivers/infiniband/hw/nes/nes_verbs.c
34402+++ b/drivers/infiniband/hw/nes/nes_verbs.c
34403@@ -46,9 +46,9 @@
34404
34405 #include <rdma/ib_umem.h>
34406
34407-atomic_t mod_qp_timouts;
34408-atomic_t qps_created;
34409-atomic_t sw_qps_destroyed;
34410+atomic_unchecked_t mod_qp_timouts;
34411+atomic_unchecked_t qps_created;
34412+atomic_unchecked_t sw_qps_destroyed;
34413
34414 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
34415
34416@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
34417 if (init_attr->create_flags)
34418 return ERR_PTR(-EINVAL);
34419
34420- atomic_inc(&qps_created);
34421+ atomic_inc_unchecked(&qps_created);
34422 switch (init_attr->qp_type) {
34423 case IB_QPT_RC:
34424 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
34425@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
34426 struct iw_cm_event cm_event;
34427 int ret = 0;
34428
34429- atomic_inc(&sw_qps_destroyed);
34430+ atomic_inc_unchecked(&sw_qps_destroyed);
34431 nesqp->destroyed = 1;
34432
34433 /* Blow away the connection if it exists. */
34434diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
34435index 4d11575..3e890e5 100644
34436--- a/drivers/infiniband/hw/qib/qib.h
34437+++ b/drivers/infiniband/hw/qib/qib.h
34438@@ -51,6 +51,7 @@
34439 #include <linux/completion.h>
34440 #include <linux/kref.h>
34441 #include <linux/sched.h>
34442+#include <linux/slab.h>
34443
34444 #include "qib_common.h"
34445 #include "qib_verbs.h"
34446diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
34447index da739d9..da1c7f4 100644
34448--- a/drivers/input/gameport/gameport.c
34449+++ b/drivers/input/gameport/gameport.c
34450@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
34451 */
34452 static void gameport_init_port(struct gameport *gameport)
34453 {
34454- static atomic_t gameport_no = ATOMIC_INIT(0);
34455+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
34456
34457 __module_get(THIS_MODULE);
34458
34459 mutex_init(&gameport->drv_mutex);
34460 device_initialize(&gameport->dev);
34461 dev_set_name(&gameport->dev, "gameport%lu",
34462- (unsigned long)atomic_inc_return(&gameport_no) - 1);
34463+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
34464 gameport->dev.bus = &gameport_bus;
34465 gameport->dev.release = gameport_release_port;
34466 if (gameport->parent)
34467diff --git a/drivers/input/input.c b/drivers/input/input.c
34468index 53a0dde..abffda7 100644
34469--- a/drivers/input/input.c
34470+++ b/drivers/input/input.c
34471@@ -1902,7 +1902,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
34472 */
34473 int input_register_device(struct input_dev *dev)
34474 {
34475- static atomic_t input_no = ATOMIC_INIT(0);
34476+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
34477 struct input_handler *handler;
34478 unsigned int packet_size;
34479 const char *path;
34480@@ -1945,7 +1945,7 @@ int input_register_device(struct input_dev *dev)
34481 dev->setkeycode = input_default_setkeycode;
34482
34483 dev_set_name(&dev->dev, "input%ld",
34484- (unsigned long) atomic_inc_return(&input_no) - 1);
34485+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
34486
34487 error = device_add(&dev->dev);
34488 if (error)
34489diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
34490index 04c69af..5f92d00 100644
34491--- a/drivers/input/joystick/sidewinder.c
34492+++ b/drivers/input/joystick/sidewinder.c
34493@@ -30,6 +30,7 @@
34494 #include <linux/kernel.h>
34495 #include <linux/module.h>
34496 #include <linux/slab.h>
34497+#include <linux/sched.h>
34498 #include <linux/init.h>
34499 #include <linux/input.h>
34500 #include <linux/gameport.h>
34501diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
34502index 83811e4..0822b90 100644
34503--- a/drivers/input/joystick/xpad.c
34504+++ b/drivers/input/joystick/xpad.c
34505@@ -726,7 +726,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
34506
34507 static int xpad_led_probe(struct usb_xpad *xpad)
34508 {
34509- static atomic_t led_seq = ATOMIC_INIT(0);
34510+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
34511 long led_no;
34512 struct xpad_led *led;
34513 struct led_classdev *led_cdev;
34514@@ -739,7 +739,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
34515 if (!led)
34516 return -ENOMEM;
34517
34518- led_no = (long)atomic_inc_return(&led_seq) - 1;
34519+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
34520
34521 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
34522 led->xpad = xpad;
34523diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
34524index 4c842c3..590b0bf 100644
34525--- a/drivers/input/mousedev.c
34526+++ b/drivers/input/mousedev.c
34527@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
34528
34529 spin_unlock_irq(&client->packet_lock);
34530
34531- if (copy_to_user(buffer, data, count))
34532+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
34533 return -EFAULT;
34534
34535 return count;
34536diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
34537index d0f7533..fb8215b 100644
34538--- a/drivers/input/serio/serio.c
34539+++ b/drivers/input/serio/serio.c
34540@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
34541 */
34542 static void serio_init_port(struct serio *serio)
34543 {
34544- static atomic_t serio_no = ATOMIC_INIT(0);
34545+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
34546
34547 __module_get(THIS_MODULE);
34548
34549@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
34550 mutex_init(&serio->drv_mutex);
34551 device_initialize(&serio->dev);
34552 dev_set_name(&serio->dev, "serio%ld",
34553- (long)atomic_inc_return(&serio_no) - 1);
34554+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
34555 serio->dev.bus = &serio_bus;
34556 serio->dev.release = serio_release_port;
34557 serio->dev.groups = serio_device_attr_groups;
34558diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
34559index 81837b0..d7470e8 100644
34560--- a/drivers/iommu/amd_iommu_init.c
34561+++ b/drivers/iommu/amd_iommu_init.c
34562@@ -1888,7 +1888,7 @@ static int __init state_next(void)
34563 case IOMMU_ACPI_FINISHED:
34564 early_enable_iommus();
34565 register_syscore_ops(&amd_iommu_syscore_ops);
34566- x86_platform.iommu_shutdown = disable_iommus;
34567+ *(void **)&x86_platform.iommu_shutdown = disable_iommus;
34568 init_state = IOMMU_ENABLED;
34569 break;
34570 case IOMMU_ENABLED:
34571@@ -2030,7 +2030,7 @@ int __init amd_iommu_detect(void)
34572
34573 amd_iommu_detected = true;
34574 iommu_detected = 1;
34575- x86_init.iommu.iommu_init = amd_iommu_init;
34576+ *(void **)&x86_init.iommu.iommu_init = amd_iommu_init;
34577
34578 return 0;
34579 }
34580diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
34581index 86e2f4a..d1cec5d 100644
34582--- a/drivers/iommu/dmar.c
34583+++ b/drivers/iommu/dmar.c
34584@@ -555,7 +555,7 @@ int __init detect_intel_iommu(void)
34585
34586 #ifdef CONFIG_X86
34587 if (ret)
34588- x86_init.iommu.iommu_init = intel_iommu_init;
34589+ *(void **)&x86_init.iommu.iommu_init = intel_iommu_init;
34590 #endif
34591 }
34592 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
34593diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
34594index c679867..6e2e34d 100644
34595--- a/drivers/isdn/capi/capi.c
34596+++ b/drivers/isdn/capi/capi.c
34597@@ -83,8 +83,8 @@ struct capiminor {
34598
34599 struct capi20_appl *ap;
34600 u32 ncci;
34601- atomic_t datahandle;
34602- atomic_t msgid;
34603+ atomic_unchecked_t datahandle;
34604+ atomic_unchecked_t msgid;
34605
34606 struct tty_port port;
34607 int ttyinstop;
34608@@ -393,7 +393,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
34609 capimsg_setu16(s, 2, mp->ap->applid);
34610 capimsg_setu8 (s, 4, CAPI_DATA_B3);
34611 capimsg_setu8 (s, 5, CAPI_RESP);
34612- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
34613+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
34614 capimsg_setu32(s, 8, mp->ncci);
34615 capimsg_setu16(s, 12, datahandle);
34616 }
34617@@ -514,14 +514,14 @@ static void handle_minor_send(struct capiminor *mp)
34618 mp->outbytes -= len;
34619 spin_unlock_bh(&mp->outlock);
34620
34621- datahandle = atomic_inc_return(&mp->datahandle);
34622+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
34623 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
34624 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
34625 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
34626 capimsg_setu16(skb->data, 2, mp->ap->applid);
34627 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
34628 capimsg_setu8 (skb->data, 5, CAPI_REQ);
34629- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
34630+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
34631 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
34632 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
34633 capimsg_setu16(skb->data, 16, len); /* Data length */
34634diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
34635index 67abf3f..076b3a6 100644
34636--- a/drivers/isdn/gigaset/interface.c
34637+++ b/drivers/isdn/gigaset/interface.c
34638@@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
34639 }
34640 tty->driver_data = cs;
34641
34642- ++cs->port.count;
34643+ atomic_inc(&cs->port.count);
34644
34645- if (cs->port.count == 1) {
34646+ if (atomic_read(&cs->port.count) == 1) {
34647 tty_port_tty_set(&cs->port, tty);
34648 tty->low_latency = 1;
34649 }
34650@@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
34651
34652 if (!cs->connected)
34653 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
34654- else if (!cs->port.count)
34655+ else if (!atomic_read(&cs->port.count))
34656 dev_warn(cs->dev, "%s: device not opened\n", __func__);
34657- else if (!--cs->port.count)
34658+ else if (!atomic_dec_return(&cs->port.count))
34659 tty_port_tty_set(&cs->port, NULL);
34660
34661 mutex_unlock(&cs->mutex);
34662diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
34663index 821f7ac..28d4030 100644
34664--- a/drivers/isdn/hardware/avm/b1.c
34665+++ b/drivers/isdn/hardware/avm/b1.c
34666@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
34667 }
34668 if (left) {
34669 if (t4file->user) {
34670- if (copy_from_user(buf, dp, left))
34671+ if (left > sizeof buf || copy_from_user(buf, dp, left))
34672 return -EFAULT;
34673 } else {
34674 memcpy(buf, dp, left);
34675@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
34676 }
34677 if (left) {
34678 if (config->user) {
34679- if (copy_from_user(buf, dp, left))
34680+ if (left > sizeof buf || copy_from_user(buf, dp, left))
34681 return -EFAULT;
34682 } else {
34683 memcpy(buf, dp, left);
34684diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
34685index b817809..409caff 100644
34686--- a/drivers/isdn/i4l/isdn_tty.c
34687+++ b/drivers/isdn/i4l/isdn_tty.c
34688@@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
34689
34690 #ifdef ISDN_DEBUG_MODEM_OPEN
34691 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
34692- port->count);
34693+ atomic_read(&port->count));
34694 #endif
34695- port->count++;
34696+ atomic_inc(&port->count);
34697 port->tty = tty;
34698 /*
34699 * Start up serial port
34700@@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
34701 #endif
34702 return;
34703 }
34704- if ((tty->count == 1) && (port->count != 1)) {
34705+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
34706 /*
34707 * Uh, oh. tty->count is 1, which means that the tty
34708 * structure will be freed. Info->count should always
34709@@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
34710 * serial port won't be shutdown.
34711 */
34712 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
34713- "info->count is %d\n", port->count);
34714- port->count = 1;
34715+ "info->count is %d\n", atomic_read(&port->count));
34716+ atomic_set(&port->count, 1);
34717 }
34718- if (--port->count < 0) {
34719+ if (atomic_dec_return(&port->count) < 0) {
34720 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
34721- info->line, port->count);
34722- port->count = 0;
34723+ info->line, atomic_read(&port->count));
34724+ atomic_set(&port->count, 0);
34725 }
34726- if (port->count) {
34727+ if (atomic_read(&port->count)) {
34728 #ifdef ISDN_DEBUG_MODEM_OPEN
34729 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
34730 #endif
34731@@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
34732 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
34733 return;
34734 isdn_tty_shutdown(info);
34735- port->count = 0;
34736+ atomic_set(&port->count, 0);
34737 port->flags &= ~ASYNC_NORMAL_ACTIVE;
34738 port->tty = NULL;
34739 wake_up_interruptible(&port->open_wait);
34740@@ -1971,7 +1971,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
34741 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
34742 modem_info *info = &dev->mdm.info[i];
34743
34744- if (info->port.count == 0)
34745+ if (atomic_read(&info->port.count) == 0)
34746 continue;
34747 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
34748 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
34749diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
34750index e74df7c..03a03ba 100644
34751--- a/drivers/isdn/icn/icn.c
34752+++ b/drivers/isdn/icn/icn.c
34753@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
34754 if (count > len)
34755 count = len;
34756 if (user) {
34757- if (copy_from_user(msg, buf, count))
34758+ if (count > sizeof msg || copy_from_user(msg, buf, count))
34759 return -EFAULT;
34760 } else
34761 memcpy(msg, buf, count);
34762diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
34763index b5fdcb7..5b6c59f 100644
34764--- a/drivers/lguest/core.c
34765+++ b/drivers/lguest/core.c
34766@@ -92,9 +92,17 @@ static __init int map_switcher(void)
34767 * it's worked so far. The end address needs +1 because __get_vm_area
34768 * allocates an extra guard page, so we need space for that.
34769 */
34770+
34771+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
34772+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
34773+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
34774+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
34775+#else
34776 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
34777 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
34778 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
34779+#endif
34780+
34781 if (!switcher_vma) {
34782 err = -ENOMEM;
34783 printk("lguest: could not map switcher pages high\n");
34784@@ -119,7 +127,7 @@ static __init int map_switcher(void)
34785 * Now the Switcher is mapped at the right address, we can't fail!
34786 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
34787 */
34788- memcpy(switcher_vma->addr, start_switcher_text,
34789+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
34790 end_switcher_text - start_switcher_text);
34791
34792 printk(KERN_INFO "lguest: mapped switcher at %p\n",
34793diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
34794index 4af12e1..0e89afe 100644
34795--- a/drivers/lguest/x86/core.c
34796+++ b/drivers/lguest/x86/core.c
34797@@ -59,7 +59,7 @@ static struct {
34798 /* Offset from where switcher.S was compiled to where we've copied it */
34799 static unsigned long switcher_offset(void)
34800 {
34801- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
34802+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
34803 }
34804
34805 /* This cpu's struct lguest_pages. */
34806@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
34807 * These copies are pretty cheap, so we do them unconditionally: */
34808 /* Save the current Host top-level page directory.
34809 */
34810+
34811+#ifdef CONFIG_PAX_PER_CPU_PGD
34812+ pages->state.host_cr3 = read_cr3();
34813+#else
34814 pages->state.host_cr3 = __pa(current->mm->pgd);
34815+#endif
34816+
34817 /*
34818 * Set up the Guest's page tables to see this CPU's pages (and no
34819 * other CPU's pages).
34820@@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
34821 * compiled-in switcher code and the high-mapped copy we just made.
34822 */
34823 for (i = 0; i < IDT_ENTRIES; i++)
34824- default_idt_entries[i] += switcher_offset();
34825+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
34826
34827 /*
34828 * Set up the Switcher's per-cpu areas.
34829@@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
34830 * it will be undisturbed when we switch. To change %cs and jump we
34831 * need this structure to feed to Intel's "lcall" instruction.
34832 */
34833- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
34834+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
34835 lguest_entry.segment = LGUEST_CS;
34836
34837 /*
34838diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
34839index 40634b0..4f5855e 100644
34840--- a/drivers/lguest/x86/switcher_32.S
34841+++ b/drivers/lguest/x86/switcher_32.S
34842@@ -87,6 +87,7 @@
34843 #include <asm/page.h>
34844 #include <asm/segment.h>
34845 #include <asm/lguest.h>
34846+#include <asm/processor-flags.h>
34847
34848 // We mark the start of the code to copy
34849 // It's placed in .text tho it's never run here
34850@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
34851 // Changes type when we load it: damn Intel!
34852 // For after we switch over our page tables
34853 // That entry will be read-only: we'd crash.
34854+
34855+#ifdef CONFIG_PAX_KERNEXEC
34856+ mov %cr0, %edx
34857+ xor $X86_CR0_WP, %edx
34858+ mov %edx, %cr0
34859+#endif
34860+
34861 movl $(GDT_ENTRY_TSS*8), %edx
34862 ltr %dx
34863
34864@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
34865 // Let's clear it again for our return.
34866 // The GDT descriptor of the Host
34867 // Points to the table after two "size" bytes
34868- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
34869+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
34870 // Clear "used" from type field (byte 5, bit 2)
34871- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
34872+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
34873+
34874+#ifdef CONFIG_PAX_KERNEXEC
34875+ mov %cr0, %eax
34876+ xor $X86_CR0_WP, %eax
34877+ mov %eax, %cr0
34878+#endif
34879
34880 // Once our page table's switched, the Guest is live!
34881 // The Host fades as we run this final step.
34882@@ -295,13 +309,12 @@ deliver_to_host:
34883 // I consulted gcc, and it gave
34884 // These instructions, which I gladly credit:
34885 leal (%edx,%ebx,8), %eax
34886- movzwl (%eax),%edx
34887- movl 4(%eax), %eax
34888- xorw %ax, %ax
34889- orl %eax, %edx
34890+ movl 4(%eax), %edx
34891+ movw (%eax), %dx
34892 // Now the address of the handler's in %edx
34893 // We call it now: its "iret" drops us home.
34894- jmp *%edx
34895+ ljmp $__KERNEL_CS, $1f
34896+1: jmp *%edx
34897
34898 // Every interrupt can come to us here
34899 // But we must truly tell each apart.
34900diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
34901index 7155945..4bcc562 100644
34902--- a/drivers/md/bitmap.c
34903+++ b/drivers/md/bitmap.c
34904@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
34905 chunk_kb ? "KB" : "B");
34906 if (bitmap->storage.file) {
34907 seq_printf(seq, ", file: ");
34908- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
34909+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
34910 }
34911
34912 seq_printf(seq, "\n");
34913diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
34914index a651d52..82f8a95 100644
34915--- a/drivers/md/dm-ioctl.c
34916+++ b/drivers/md/dm-ioctl.c
34917@@ -1601,7 +1601,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
34918 cmd == DM_LIST_VERSIONS_CMD)
34919 return 0;
34920
34921- if ((cmd == DM_DEV_CREATE_CMD)) {
34922+ if (cmd == DM_DEV_CREATE_CMD) {
34923 if (!*param->name) {
34924 DMWARN("name not supplied when creating device");
34925 return -EINVAL;
34926diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
34927index fd61f98..8050783 100644
34928--- a/drivers/md/dm-raid1.c
34929+++ b/drivers/md/dm-raid1.c
34930@@ -40,7 +40,7 @@ enum dm_raid1_error {
34931
34932 struct mirror {
34933 struct mirror_set *ms;
34934- atomic_t error_count;
34935+ atomic_unchecked_t error_count;
34936 unsigned long error_type;
34937 struct dm_dev *dev;
34938 sector_t offset;
34939@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
34940 struct mirror *m;
34941
34942 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
34943- if (!atomic_read(&m->error_count))
34944+ if (!atomic_read_unchecked(&m->error_count))
34945 return m;
34946
34947 return NULL;
34948@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
34949 * simple way to tell if a device has encountered
34950 * errors.
34951 */
34952- atomic_inc(&m->error_count);
34953+ atomic_inc_unchecked(&m->error_count);
34954
34955 if (test_and_set_bit(error_type, &m->error_type))
34956 return;
34957@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
34958 struct mirror *m = get_default_mirror(ms);
34959
34960 do {
34961- if (likely(!atomic_read(&m->error_count)))
34962+ if (likely(!atomic_read_unchecked(&m->error_count)))
34963 return m;
34964
34965 if (m-- == ms->mirror)
34966@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
34967 {
34968 struct mirror *default_mirror = get_default_mirror(m->ms);
34969
34970- return !atomic_read(&default_mirror->error_count);
34971+ return !atomic_read_unchecked(&default_mirror->error_count);
34972 }
34973
34974 static int mirror_available(struct mirror_set *ms, struct bio *bio)
34975@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
34976 */
34977 if (likely(region_in_sync(ms, region, 1)))
34978 m = choose_mirror(ms, bio->bi_sector);
34979- else if (m && atomic_read(&m->error_count))
34980+ else if (m && atomic_read_unchecked(&m->error_count))
34981 m = NULL;
34982
34983 if (likely(m))
34984@@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
34985 }
34986
34987 ms->mirror[mirror].ms = ms;
34988- atomic_set(&(ms->mirror[mirror].error_count), 0);
34989+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
34990 ms->mirror[mirror].error_type = 0;
34991 ms->mirror[mirror].offset = offset;
34992
34993@@ -1356,7 +1356,7 @@ static void mirror_resume(struct dm_target *ti)
34994 */
34995 static char device_status_char(struct mirror *m)
34996 {
34997- if (!atomic_read(&(m->error_count)))
34998+ if (!atomic_read_unchecked(&(m->error_count)))
34999 return 'A';
35000
35001 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
35002diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
35003index e2f87653..f279abe 100644
35004--- a/drivers/md/dm-stripe.c
35005+++ b/drivers/md/dm-stripe.c
35006@@ -20,7 +20,7 @@ struct stripe {
35007 struct dm_dev *dev;
35008 sector_t physical_start;
35009
35010- atomic_t error_count;
35011+ atomic_unchecked_t error_count;
35012 };
35013
35014 struct stripe_c {
35015@@ -183,7 +183,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
35016 kfree(sc);
35017 return r;
35018 }
35019- atomic_set(&(sc->stripe[i].error_count), 0);
35020+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
35021 }
35022
35023 ti->private = sc;
35024@@ -324,7 +324,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
35025 DMEMIT("%d ", sc->stripes);
35026 for (i = 0; i < sc->stripes; i++) {
35027 DMEMIT("%s ", sc->stripe[i].dev->name);
35028- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
35029+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
35030 'D' : 'A';
35031 }
35032 buffer[i] = '\0';
35033@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
35034 */
35035 for (i = 0; i < sc->stripes; i++)
35036 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
35037- atomic_inc(&(sc->stripe[i].error_count));
35038- if (atomic_read(&(sc->stripe[i].error_count)) <
35039+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
35040+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
35041 DM_IO_ERROR_THRESHOLD)
35042 schedule_work(&sc->trigger_event);
35043 }
35044diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
35045index fa29557..d24a5b7 100644
35046--- a/drivers/md/dm-table.c
35047+++ b/drivers/md/dm-table.c
35048@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
35049 if (!dev_size)
35050 return 0;
35051
35052- if ((start >= dev_size) || (start + len > dev_size)) {
35053+ if ((start >= dev_size) || (len > dev_size - start)) {
35054 DMWARN("%s: %s too small for target: "
35055 "start=%llu, len=%llu, dev_size=%llu",
35056 dm_device_name(ti->table->md), bdevname(bdev, b),
35057diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
35058index 693e149..b7e0fde 100644
35059--- a/drivers/md/dm-thin-metadata.c
35060+++ b/drivers/md/dm-thin-metadata.c
35061@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
35062 {
35063 pmd->info.tm = pmd->tm;
35064 pmd->info.levels = 2;
35065- pmd->info.value_type.context = pmd->data_sm;
35066+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
35067 pmd->info.value_type.size = sizeof(__le64);
35068 pmd->info.value_type.inc = data_block_inc;
35069 pmd->info.value_type.dec = data_block_dec;
35070@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
35071
35072 pmd->bl_info.tm = pmd->tm;
35073 pmd->bl_info.levels = 1;
35074- pmd->bl_info.value_type.context = pmd->data_sm;
35075+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
35076 pmd->bl_info.value_type.size = sizeof(__le64);
35077 pmd->bl_info.value_type.inc = data_block_inc;
35078 pmd->bl_info.value_type.dec = data_block_dec;
35079diff --git a/drivers/md/dm.c b/drivers/md/dm.c
35080index 77e6eff..913d695 100644
35081--- a/drivers/md/dm.c
35082+++ b/drivers/md/dm.c
35083@@ -182,9 +182,9 @@ struct mapped_device {
35084 /*
35085 * Event handling.
35086 */
35087- atomic_t event_nr;
35088+ atomic_unchecked_t event_nr;
35089 wait_queue_head_t eventq;
35090- atomic_t uevent_seq;
35091+ atomic_unchecked_t uevent_seq;
35092 struct list_head uevent_list;
35093 spinlock_t uevent_lock; /* Protect access to uevent_list */
35094
35095@@ -1847,8 +1847,8 @@ static struct mapped_device *alloc_dev(int minor)
35096 rwlock_init(&md->map_lock);
35097 atomic_set(&md->holders, 1);
35098 atomic_set(&md->open_count, 0);
35099- atomic_set(&md->event_nr, 0);
35100- atomic_set(&md->uevent_seq, 0);
35101+ atomic_set_unchecked(&md->event_nr, 0);
35102+ atomic_set_unchecked(&md->uevent_seq, 0);
35103 INIT_LIST_HEAD(&md->uevent_list);
35104 spin_lock_init(&md->uevent_lock);
35105
35106@@ -1982,7 +1982,7 @@ static void event_callback(void *context)
35107
35108 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
35109
35110- atomic_inc(&md->event_nr);
35111+ atomic_inc_unchecked(&md->event_nr);
35112 wake_up(&md->eventq);
35113 }
35114
35115@@ -2637,18 +2637,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
35116
35117 uint32_t dm_next_uevent_seq(struct mapped_device *md)
35118 {
35119- return atomic_add_return(1, &md->uevent_seq);
35120+ return atomic_add_return_unchecked(1, &md->uevent_seq);
35121 }
35122
35123 uint32_t dm_get_event_nr(struct mapped_device *md)
35124 {
35125- return atomic_read(&md->event_nr);
35126+ return atomic_read_unchecked(&md->event_nr);
35127 }
35128
35129 int dm_wait_event(struct mapped_device *md, int event_nr)
35130 {
35131 return wait_event_interruptible(md->eventq,
35132- (event_nr != atomic_read(&md->event_nr)));
35133+ (event_nr != atomic_read_unchecked(&md->event_nr)));
35134 }
35135
35136 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
35137diff --git a/drivers/md/md.c b/drivers/md/md.c
35138index 6120071..31d9be2 100644
35139--- a/drivers/md/md.c
35140+++ b/drivers/md/md.c
35141@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
35142 * start build, activate spare
35143 */
35144 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
35145-static atomic_t md_event_count;
35146+static atomic_unchecked_t md_event_count;
35147 void md_new_event(struct mddev *mddev)
35148 {
35149- atomic_inc(&md_event_count);
35150+ atomic_inc_unchecked(&md_event_count);
35151 wake_up(&md_event_waiters);
35152 }
35153 EXPORT_SYMBOL_GPL(md_new_event);
35154@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
35155 */
35156 static void md_new_event_inintr(struct mddev *mddev)
35157 {
35158- atomic_inc(&md_event_count);
35159+ atomic_inc_unchecked(&md_event_count);
35160 wake_up(&md_event_waiters);
35161 }
35162
35163@@ -1504,7 +1504,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
35164 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
35165 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
35166 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
35167- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
35168+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
35169
35170 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
35171 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
35172@@ -1748,7 +1748,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
35173 else
35174 sb->resync_offset = cpu_to_le64(0);
35175
35176- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
35177+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
35178
35179 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
35180 sb->size = cpu_to_le64(mddev->dev_sectors);
35181@@ -2748,7 +2748,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
35182 static ssize_t
35183 errors_show(struct md_rdev *rdev, char *page)
35184 {
35185- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
35186+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
35187 }
35188
35189 static ssize_t
35190@@ -2757,7 +2757,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
35191 char *e;
35192 unsigned long n = simple_strtoul(buf, &e, 10);
35193 if (*buf && (*e == 0 || *e == '\n')) {
35194- atomic_set(&rdev->corrected_errors, n);
35195+ atomic_set_unchecked(&rdev->corrected_errors, n);
35196 return len;
35197 }
35198 return -EINVAL;
35199@@ -3204,8 +3204,8 @@ int md_rdev_init(struct md_rdev *rdev)
35200 rdev->sb_loaded = 0;
35201 rdev->bb_page = NULL;
35202 atomic_set(&rdev->nr_pending, 0);
35203- atomic_set(&rdev->read_errors, 0);
35204- atomic_set(&rdev->corrected_errors, 0);
35205+ atomic_set_unchecked(&rdev->read_errors, 0);
35206+ atomic_set_unchecked(&rdev->corrected_errors, 0);
35207
35208 INIT_LIST_HEAD(&rdev->same_set);
35209 init_waitqueue_head(&rdev->blocked_wait);
35210@@ -6984,7 +6984,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
35211
35212 spin_unlock(&pers_lock);
35213 seq_printf(seq, "\n");
35214- seq->poll_event = atomic_read(&md_event_count);
35215+ seq->poll_event = atomic_read_unchecked(&md_event_count);
35216 return 0;
35217 }
35218 if (v == (void*)2) {
35219@@ -7087,7 +7087,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
35220 return error;
35221
35222 seq = file->private_data;
35223- seq->poll_event = atomic_read(&md_event_count);
35224+ seq->poll_event = atomic_read_unchecked(&md_event_count);
35225 return error;
35226 }
35227
35228@@ -7101,7 +7101,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
35229 /* always allow read */
35230 mask = POLLIN | POLLRDNORM;
35231
35232- if (seq->poll_event != atomic_read(&md_event_count))
35233+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
35234 mask |= POLLERR | POLLPRI;
35235 return mask;
35236 }
35237@@ -7145,7 +7145,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
35238 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
35239 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
35240 (int)part_stat_read(&disk->part0, sectors[1]) -
35241- atomic_read(&disk->sync_io);
35242+ atomic_read_unchecked(&disk->sync_io);
35243 /* sync IO will cause sync_io to increase before the disk_stats
35244 * as sync_io is counted when a request starts, and
35245 * disk_stats is counted when it completes.
35246diff --git a/drivers/md/md.h b/drivers/md/md.h
35247index af443ab..0f93be3 100644
35248--- a/drivers/md/md.h
35249+++ b/drivers/md/md.h
35250@@ -94,13 +94,13 @@ struct md_rdev {
35251 * only maintained for arrays that
35252 * support hot removal
35253 */
35254- atomic_t read_errors; /* number of consecutive read errors that
35255+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
35256 * we have tried to ignore.
35257 */
35258 struct timespec last_read_error; /* monotonic time since our
35259 * last read error
35260 */
35261- atomic_t corrected_errors; /* number of corrected read errors,
35262+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
35263 * for reporting to userspace and storing
35264 * in superblock.
35265 */
35266@@ -432,7 +432,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
35267
35268 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
35269 {
35270- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
35271+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
35272 }
35273
35274 struct md_personality
35275diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
35276index 1cbfc6b..56e1dbb 100644
35277--- a/drivers/md/persistent-data/dm-space-map.h
35278+++ b/drivers/md/persistent-data/dm-space-map.h
35279@@ -60,6 +60,7 @@ struct dm_space_map {
35280 int (*root_size)(struct dm_space_map *sm, size_t *result);
35281 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
35282 };
35283+typedef struct dm_space_map __no_const dm_space_map_no_const;
35284
35285 /*----------------------------------------------------------------*/
35286
35287diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
35288index a0f7309..5599dbc 100644
35289--- a/drivers/md/raid1.c
35290+++ b/drivers/md/raid1.c
35291@@ -1819,7 +1819,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
35292 if (r1_sync_page_io(rdev, sect, s,
35293 bio->bi_io_vec[idx].bv_page,
35294 READ) != 0)
35295- atomic_add(s, &rdev->corrected_errors);
35296+ atomic_add_unchecked(s, &rdev->corrected_errors);
35297 }
35298 sectors -= s;
35299 sect += s;
35300@@ -2041,7 +2041,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
35301 test_bit(In_sync, &rdev->flags)) {
35302 if (r1_sync_page_io(rdev, sect, s,
35303 conf->tmppage, READ)) {
35304- atomic_add(s, &rdev->corrected_errors);
35305+ atomic_add_unchecked(s, &rdev->corrected_errors);
35306 printk(KERN_INFO
35307 "md/raid1:%s: read error corrected "
35308 "(%d sectors at %llu on %s)\n",
35309diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
35310index c9acbd7..386cd3e 100644
35311--- a/drivers/md/raid10.c
35312+++ b/drivers/md/raid10.c
35313@@ -1878,7 +1878,7 @@ static void end_sync_read(struct bio *bio, int error)
35314 /* The write handler will notice the lack of
35315 * R10BIO_Uptodate and record any errors etc
35316 */
35317- atomic_add(r10_bio->sectors,
35318+ atomic_add_unchecked(r10_bio->sectors,
35319 &conf->mirrors[d].rdev->corrected_errors);
35320
35321 /* for reconstruct, we always reschedule after a read.
35322@@ -2227,7 +2227,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
35323 {
35324 struct timespec cur_time_mon;
35325 unsigned long hours_since_last;
35326- unsigned int read_errors = atomic_read(&rdev->read_errors);
35327+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
35328
35329 ktime_get_ts(&cur_time_mon);
35330
35331@@ -2249,9 +2249,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
35332 * overflowing the shift of read_errors by hours_since_last.
35333 */
35334 if (hours_since_last >= 8 * sizeof(read_errors))
35335- atomic_set(&rdev->read_errors, 0);
35336+ atomic_set_unchecked(&rdev->read_errors, 0);
35337 else
35338- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
35339+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
35340 }
35341
35342 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
35343@@ -2305,8 +2305,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
35344 return;
35345
35346 check_decay_read_errors(mddev, rdev);
35347- atomic_inc(&rdev->read_errors);
35348- if (atomic_read(&rdev->read_errors) > max_read_errors) {
35349+ atomic_inc_unchecked(&rdev->read_errors);
35350+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
35351 char b[BDEVNAME_SIZE];
35352 bdevname(rdev->bdev, b);
35353
35354@@ -2314,7 +2314,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
35355 "md/raid10:%s: %s: Raid device exceeded "
35356 "read_error threshold [cur %d:max %d]\n",
35357 mdname(mddev), b,
35358- atomic_read(&rdev->read_errors), max_read_errors);
35359+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
35360 printk(KERN_NOTICE
35361 "md/raid10:%s: %s: Failing raid device\n",
35362 mdname(mddev), b);
35363@@ -2469,7 +2469,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
35364 sect +
35365 choose_data_offset(r10_bio, rdev)),
35366 bdevname(rdev->bdev, b));
35367- atomic_add(s, &rdev->corrected_errors);
35368+ atomic_add_unchecked(s, &rdev->corrected_errors);
35369 }
35370
35371 rdev_dec_pending(rdev, mddev);
35372diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
35373index a450268..c4168a9 100644
35374--- a/drivers/md/raid5.c
35375+++ b/drivers/md/raid5.c
35376@@ -1789,21 +1789,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
35377 mdname(conf->mddev), STRIPE_SECTORS,
35378 (unsigned long long)s,
35379 bdevname(rdev->bdev, b));
35380- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
35381+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
35382 clear_bit(R5_ReadError, &sh->dev[i].flags);
35383 clear_bit(R5_ReWrite, &sh->dev[i].flags);
35384 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
35385 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
35386
35387- if (atomic_read(&rdev->read_errors))
35388- atomic_set(&rdev->read_errors, 0);
35389+ if (atomic_read_unchecked(&rdev->read_errors))
35390+ atomic_set_unchecked(&rdev->read_errors, 0);
35391 } else {
35392 const char *bdn = bdevname(rdev->bdev, b);
35393 int retry = 0;
35394 int set_bad = 0;
35395
35396 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
35397- atomic_inc(&rdev->read_errors);
35398+ atomic_inc_unchecked(&rdev->read_errors);
35399 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
35400 printk_ratelimited(
35401 KERN_WARNING
35402@@ -1831,7 +1831,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
35403 mdname(conf->mddev),
35404 (unsigned long long)s,
35405 bdn);
35406- } else if (atomic_read(&rdev->read_errors)
35407+ } else if (atomic_read_unchecked(&rdev->read_errors)
35408 > conf->max_nr_stripes)
35409 printk(KERN_WARNING
35410 "md/raid:%s: Too many read errors, failing device %s.\n",
35411diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
35412index d33101a..6b13069 100644
35413--- a/drivers/media/dvb-core/dvbdev.c
35414+++ b/drivers/media/dvb-core/dvbdev.c
35415@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
35416 const struct dvb_device *template, void *priv, int type)
35417 {
35418 struct dvb_device *dvbdev;
35419- struct file_operations *dvbdevfops;
35420+ file_operations_no_const *dvbdevfops;
35421 struct device *clsdev;
35422 int minor;
35423 int id;
35424diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
35425index 404f63a..4796533 100644
35426--- a/drivers/media/dvb-frontends/dib3000.h
35427+++ b/drivers/media/dvb-frontends/dib3000.h
35428@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
35429 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
35430 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
35431 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
35432-};
35433+} __no_const;
35434
35435 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
35436 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
35437diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
35438index 3aa6856..435ad25 100644
35439--- a/drivers/media/pci/cx88/cx88-alsa.c
35440+++ b/drivers/media/pci/cx88/cx88-alsa.c
35441@@ -749,7 +749,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
35442 * Only boards with eeprom and byte 1 at eeprom=1 have it
35443 */
35444
35445-static const struct pci_device_id cx88_audio_pci_tbl[] __devinitdata = {
35446+static const struct pci_device_id cx88_audio_pci_tbl[] __devinitconst = {
35447 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
35448 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
35449 {0, }
35450diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
35451index feff57e..66a2c67 100644
35452--- a/drivers/media/pci/ddbridge/ddbridge-core.c
35453+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
35454@@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
35455 .subvendor = _subvend, .subdevice = _subdev, \
35456 .driver_data = (unsigned long)&_driverdata }
35457
35458-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
35459+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
35460 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
35461 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
35462 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
35463diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
35464index 96a13ed..6df45b4 100644
35465--- a/drivers/media/pci/ngene/ngene-cards.c
35466+++ b/drivers/media/pci/ngene/ngene-cards.c
35467@@ -741,7 +741,7 @@ static struct ngene_info ngene_info_terratec = {
35468
35469 /****************************************************************************/
35470
35471-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
35472+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
35473 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
35474 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
35475 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
35476diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
35477index a3b1a34..71ce0e3 100644
35478--- a/drivers/media/platform/omap/omap_vout.c
35479+++ b/drivers/media/platform/omap/omap_vout.c
35480@@ -65,7 +65,6 @@ enum omap_vout_channels {
35481 OMAP_VIDEO2,
35482 };
35483
35484-static struct videobuf_queue_ops video_vbq_ops;
35485 /* Variables configurable through module params*/
35486 static u32 video1_numbuffers = 3;
35487 static u32 video2_numbuffers = 3;
35488@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
35489 {
35490 struct videobuf_queue *q;
35491 struct omap_vout_device *vout = NULL;
35492+ static struct videobuf_queue_ops video_vbq_ops = {
35493+ .buf_setup = omap_vout_buffer_setup,
35494+ .buf_prepare = omap_vout_buffer_prepare,
35495+ .buf_release = omap_vout_buffer_release,
35496+ .buf_queue = omap_vout_buffer_queue,
35497+ };
35498
35499 vout = video_drvdata(file);
35500 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
35501@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
35502 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
35503
35504 q = &vout->vbq;
35505- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
35506- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
35507- video_vbq_ops.buf_release = omap_vout_buffer_release;
35508- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
35509 spin_lock_init(&vout->vbq_lock);
35510
35511 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
35512diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
35513index 02194c0..36d69c1 100644
35514--- a/drivers/media/platform/timblogiw.c
35515+++ b/drivers/media/platform/timblogiw.c
35516@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
35517
35518 /* Platform device functions */
35519
35520-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
35521+static struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
35522 .vidioc_querycap = timblogiw_querycap,
35523 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
35524 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
35525@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
35526 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
35527 };
35528
35529-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
35530+static struct v4l2_file_operations timblogiw_fops = {
35531 .owner = THIS_MODULE,
35532 .open = timblogiw_open,
35533 .release = timblogiw_close,
35534diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
35535index 697a421..16c5a5f 100644
35536--- a/drivers/media/radio/radio-cadet.c
35537+++ b/drivers/media/radio/radio-cadet.c
35538@@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
35539 unsigned char readbuf[RDS_BUFFER];
35540 int i = 0;
35541
35542+ if (count > RDS_BUFFER)
35543+ return -EFAULT;
35544 mutex_lock(&dev->lock);
35545 if (dev->rdsstat == 0)
35546 cadet_start_rds(dev);
35547@@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
35548 while (i < count && dev->rdsin != dev->rdsout)
35549 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
35550
35551- if (i && copy_to_user(data, readbuf, i))
35552+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
35553 i = -EFAULT;
35554 unlock:
35555 mutex_unlock(&dev->lock);
35556diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
35557index 3940bb0..fb3952a 100644
35558--- a/drivers/media/usb/dvb-usb/cxusb.c
35559+++ b/drivers/media/usb/dvb-usb/cxusb.c
35560@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
35561
35562 struct dib0700_adapter_state {
35563 int (*set_param_save) (struct dvb_frontend *);
35564-};
35565+} __no_const;
35566
35567 static int dib7070_set_param_override(struct dvb_frontend *fe)
35568 {
35569diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
35570index 9382895..ac8093c 100644
35571--- a/drivers/media/usb/dvb-usb/dw2102.c
35572+++ b/drivers/media/usb/dvb-usb/dw2102.c
35573@@ -95,7 +95,7 @@ struct su3000_state {
35574
35575 struct s6x0_state {
35576 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
35577-};
35578+} __no_const;
35579
35580 /* debug */
35581 static int dvb_usb_dw2102_debug;
35582diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
35583index fb69baa..cf7ad22 100644
35584--- a/drivers/message/fusion/mptbase.c
35585+++ b/drivers/message/fusion/mptbase.c
35586@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
35587 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
35588 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
35589
35590+#ifdef CONFIG_GRKERNSEC_HIDESYM
35591+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
35592+#else
35593 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
35594 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
35595+#endif
35596+
35597 /*
35598 * Rounding UP to nearest 4-kB boundary here...
35599 */
35600diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
35601index 551262e..7551198 100644
35602--- a/drivers/message/fusion/mptsas.c
35603+++ b/drivers/message/fusion/mptsas.c
35604@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
35605 return 0;
35606 }
35607
35608+static inline void
35609+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
35610+{
35611+ if (phy_info->port_details) {
35612+ phy_info->port_details->rphy = rphy;
35613+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
35614+ ioc->name, rphy));
35615+ }
35616+
35617+ if (rphy) {
35618+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
35619+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
35620+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
35621+ ioc->name, rphy, rphy->dev.release));
35622+ }
35623+}
35624+
35625 /* no mutex */
35626 static void
35627 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
35628@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
35629 return NULL;
35630 }
35631
35632-static inline void
35633-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
35634-{
35635- if (phy_info->port_details) {
35636- phy_info->port_details->rphy = rphy;
35637- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
35638- ioc->name, rphy));
35639- }
35640-
35641- if (rphy) {
35642- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
35643- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
35644- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
35645- ioc->name, rphy, rphy->dev.release));
35646- }
35647-}
35648-
35649 static inline struct sas_port *
35650 mptsas_get_port(struct mptsas_phyinfo *phy_info)
35651 {
35652diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
35653index 0c3ced7..1fe34ec 100644
35654--- a/drivers/message/fusion/mptscsih.c
35655+++ b/drivers/message/fusion/mptscsih.c
35656@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
35657
35658 h = shost_priv(SChost);
35659
35660- if (h) {
35661- if (h->info_kbuf == NULL)
35662- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
35663- return h->info_kbuf;
35664- h->info_kbuf[0] = '\0';
35665+ if (!h)
35666+ return NULL;
35667
35668- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
35669- h->info_kbuf[size-1] = '\0';
35670- }
35671+ if (h->info_kbuf == NULL)
35672+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
35673+ return h->info_kbuf;
35674+ h->info_kbuf[0] = '\0';
35675+
35676+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
35677+ h->info_kbuf[size-1] = '\0';
35678
35679 return h->info_kbuf;
35680 }
35681diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
35682index 8001aa6..b137580 100644
35683--- a/drivers/message/i2o/i2o_proc.c
35684+++ b/drivers/message/i2o/i2o_proc.c
35685@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
35686 "Array Controller Device"
35687 };
35688
35689-static char *chtostr(char *tmp, u8 *chars, int n)
35690-{
35691- tmp[0] = 0;
35692- return strncat(tmp, (char *)chars, n);
35693-}
35694-
35695 static int i2o_report_query_status(struct seq_file *seq, int block_status,
35696 char *group)
35697 {
35698@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
35699 } *result;
35700
35701 i2o_exec_execute_ddm_table ddm_table;
35702- char tmp[28 + 1];
35703
35704 result = kmalloc(sizeof(*result), GFP_KERNEL);
35705 if (!result)
35706@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
35707
35708 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
35709 seq_printf(seq, "%-#8x", ddm_table.module_id);
35710- seq_printf(seq, "%-29s",
35711- chtostr(tmp, ddm_table.module_name_version, 28));
35712+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
35713 seq_printf(seq, "%9d ", ddm_table.data_size);
35714 seq_printf(seq, "%8d", ddm_table.code_size);
35715
35716@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
35717
35718 i2o_driver_result_table *result;
35719 i2o_driver_store_table *dst;
35720- char tmp[28 + 1];
35721
35722 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
35723 if (result == NULL)
35724@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
35725
35726 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
35727 seq_printf(seq, "%-#8x", dst->module_id);
35728- seq_printf(seq, "%-29s",
35729- chtostr(tmp, dst->module_name_version, 28));
35730- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
35731+ seq_printf(seq, "%-.28s", dst->module_name_version);
35732+ seq_printf(seq, "%-.8s", dst->date);
35733 seq_printf(seq, "%8d ", dst->module_size);
35734 seq_printf(seq, "%8d ", dst->mpb_size);
35735 seq_printf(seq, "0x%04x", dst->module_flags);
35736@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
35737 // == (allow) 512d bytes (max)
35738 static u16 *work16 = (u16 *) work32;
35739 int token;
35740- char tmp[16 + 1];
35741
35742 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
35743
35744@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
35745 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
35746 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
35747 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
35748- seq_printf(seq, "Vendor info : %s\n",
35749- chtostr(tmp, (u8 *) (work32 + 2), 16));
35750- seq_printf(seq, "Product info : %s\n",
35751- chtostr(tmp, (u8 *) (work32 + 6), 16));
35752- seq_printf(seq, "Description : %s\n",
35753- chtostr(tmp, (u8 *) (work32 + 10), 16));
35754- seq_printf(seq, "Product rev. : %s\n",
35755- chtostr(tmp, (u8 *) (work32 + 14), 8));
35756+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
35757+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
35758+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
35759+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
35760
35761 seq_printf(seq, "Serial number : ");
35762 print_serial_number(seq, (u8 *) (work32 + 16),
35763@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
35764 u8 pad[256]; // allow up to 256 byte (max) serial number
35765 } result;
35766
35767- char tmp[24 + 1];
35768-
35769 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
35770
35771 if (token < 0) {
35772@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
35773 }
35774
35775 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
35776- seq_printf(seq, "Module name : %s\n",
35777- chtostr(tmp, result.module_name, 24));
35778- seq_printf(seq, "Module revision : %s\n",
35779- chtostr(tmp, result.module_rev, 8));
35780+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
35781+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
35782
35783 seq_printf(seq, "Serial number : ");
35784 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
35785@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
35786 u8 instance_number[4];
35787 } result;
35788
35789- char tmp[64 + 1];
35790-
35791 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
35792
35793 if (token < 0) {
35794@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
35795 return 0;
35796 }
35797
35798- seq_printf(seq, "Device name : %s\n",
35799- chtostr(tmp, result.device_name, 64));
35800- seq_printf(seq, "Service name : %s\n",
35801- chtostr(tmp, result.service_name, 64));
35802- seq_printf(seq, "Physical name : %s\n",
35803- chtostr(tmp, result.physical_location, 64));
35804- seq_printf(seq, "Instance number : %s\n",
35805- chtostr(tmp, result.instance_number, 4));
35806+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
35807+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
35808+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
35809+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
35810
35811 return 0;
35812 }
35813diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
35814index a8c08f3..155fe3d 100644
35815--- a/drivers/message/i2o/iop.c
35816+++ b/drivers/message/i2o/iop.c
35817@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
35818
35819 spin_lock_irqsave(&c->context_list_lock, flags);
35820
35821- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
35822- atomic_inc(&c->context_list_counter);
35823+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
35824+ atomic_inc_unchecked(&c->context_list_counter);
35825
35826- entry->context = atomic_read(&c->context_list_counter);
35827+ entry->context = atomic_read_unchecked(&c->context_list_counter);
35828
35829 list_add(&entry->list, &c->context_list);
35830
35831@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
35832
35833 #if BITS_PER_LONG == 64
35834 spin_lock_init(&c->context_list_lock);
35835- atomic_set(&c->context_list_counter, 0);
35836+ atomic_set_unchecked(&c->context_list_counter, 0);
35837 INIT_LIST_HEAD(&c->context_list);
35838 #endif
35839
35840diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
35841index 965c480..71f2db9 100644
35842--- a/drivers/mfd/janz-cmodio.c
35843+++ b/drivers/mfd/janz-cmodio.c
35844@@ -13,6 +13,7 @@
35845
35846 #include <linux/kernel.h>
35847 #include <linux/module.h>
35848+#include <linux/slab.h>
35849 #include <linux/init.h>
35850 #include <linux/pci.h>
35851 #include <linux/interrupt.h>
35852diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
35853index 3aa9a96..59cf685 100644
35854--- a/drivers/misc/kgdbts.c
35855+++ b/drivers/misc/kgdbts.c
35856@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
35857 char before[BREAK_INSTR_SIZE];
35858 char after[BREAK_INSTR_SIZE];
35859
35860- probe_kernel_read(before, (char *)kgdbts_break_test,
35861+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
35862 BREAK_INSTR_SIZE);
35863 init_simple_test();
35864 ts.tst = plant_and_detach_test;
35865@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
35866 /* Activate test with initial breakpoint */
35867 if (!is_early)
35868 kgdb_breakpoint();
35869- probe_kernel_read(after, (char *)kgdbts_break_test,
35870+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
35871 BREAK_INSTR_SIZE);
35872 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
35873 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
35874diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
35875index 4a87e5c..76bdf5c 100644
35876--- a/drivers/misc/lis3lv02d/lis3lv02d.c
35877+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
35878@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
35879 * the lid is closed. This leads to interrupts as soon as a little move
35880 * is done.
35881 */
35882- atomic_inc(&lis3->count);
35883+ atomic_inc_unchecked(&lis3->count);
35884
35885 wake_up_interruptible(&lis3->misc_wait);
35886 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
35887@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
35888 if (lis3->pm_dev)
35889 pm_runtime_get_sync(lis3->pm_dev);
35890
35891- atomic_set(&lis3->count, 0);
35892+ atomic_set_unchecked(&lis3->count, 0);
35893 return 0;
35894 }
35895
35896@@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
35897 add_wait_queue(&lis3->misc_wait, &wait);
35898 while (true) {
35899 set_current_state(TASK_INTERRUPTIBLE);
35900- data = atomic_xchg(&lis3->count, 0);
35901+ data = atomic_xchg_unchecked(&lis3->count, 0);
35902 if (data)
35903 break;
35904
35905@@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
35906 struct lis3lv02d, miscdev);
35907
35908 poll_wait(file, &lis3->misc_wait, wait);
35909- if (atomic_read(&lis3->count))
35910+ if (atomic_read_unchecked(&lis3->count))
35911 return POLLIN | POLLRDNORM;
35912 return 0;
35913 }
35914diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
35915index c439c82..1f20f57 100644
35916--- a/drivers/misc/lis3lv02d/lis3lv02d.h
35917+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
35918@@ -297,7 +297,7 @@ struct lis3lv02d {
35919 struct input_polled_dev *idev; /* input device */
35920 struct platform_device *pdev; /* platform device */
35921 struct regulator_bulk_data regulators[2];
35922- atomic_t count; /* interrupt count after last read */
35923+ atomic_unchecked_t count; /* interrupt count after last read */
35924 union axis_conversion ac; /* hw -> logical axis */
35925 int mapped_btns[3];
35926
35927diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
35928index 2f30bad..c4c13d0 100644
35929--- a/drivers/misc/sgi-gru/gruhandles.c
35930+++ b/drivers/misc/sgi-gru/gruhandles.c
35931@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
35932 unsigned long nsec;
35933
35934 nsec = CLKS2NSEC(clks);
35935- atomic_long_inc(&mcs_op_statistics[op].count);
35936- atomic_long_add(nsec, &mcs_op_statistics[op].total);
35937+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
35938+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
35939 if (mcs_op_statistics[op].max < nsec)
35940 mcs_op_statistics[op].max = nsec;
35941 }
35942diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
35943index 950dbe9..eeef0f8 100644
35944--- a/drivers/misc/sgi-gru/gruprocfs.c
35945+++ b/drivers/misc/sgi-gru/gruprocfs.c
35946@@ -32,9 +32,9 @@
35947
35948 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
35949
35950-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
35951+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
35952 {
35953- unsigned long val = atomic_long_read(v);
35954+ unsigned long val = atomic_long_read_unchecked(v);
35955
35956 seq_printf(s, "%16lu %s\n", val, id);
35957 }
35958@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
35959
35960 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
35961 for (op = 0; op < mcsop_last; op++) {
35962- count = atomic_long_read(&mcs_op_statistics[op].count);
35963- total = atomic_long_read(&mcs_op_statistics[op].total);
35964+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
35965+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
35966 max = mcs_op_statistics[op].max;
35967 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
35968 count ? total / count : 0, max);
35969diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
35970index 5c3ce24..4915ccb 100644
35971--- a/drivers/misc/sgi-gru/grutables.h
35972+++ b/drivers/misc/sgi-gru/grutables.h
35973@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
35974 * GRU statistics.
35975 */
35976 struct gru_stats_s {
35977- atomic_long_t vdata_alloc;
35978- atomic_long_t vdata_free;
35979- atomic_long_t gts_alloc;
35980- atomic_long_t gts_free;
35981- atomic_long_t gms_alloc;
35982- atomic_long_t gms_free;
35983- atomic_long_t gts_double_allocate;
35984- atomic_long_t assign_context;
35985- atomic_long_t assign_context_failed;
35986- atomic_long_t free_context;
35987- atomic_long_t load_user_context;
35988- atomic_long_t load_kernel_context;
35989- atomic_long_t lock_kernel_context;
35990- atomic_long_t unlock_kernel_context;
35991- atomic_long_t steal_user_context;
35992- atomic_long_t steal_kernel_context;
35993- atomic_long_t steal_context_failed;
35994- atomic_long_t nopfn;
35995- atomic_long_t asid_new;
35996- atomic_long_t asid_next;
35997- atomic_long_t asid_wrap;
35998- atomic_long_t asid_reuse;
35999- atomic_long_t intr;
36000- atomic_long_t intr_cbr;
36001- atomic_long_t intr_tfh;
36002- atomic_long_t intr_spurious;
36003- atomic_long_t intr_mm_lock_failed;
36004- atomic_long_t call_os;
36005- atomic_long_t call_os_wait_queue;
36006- atomic_long_t user_flush_tlb;
36007- atomic_long_t user_unload_context;
36008- atomic_long_t user_exception;
36009- atomic_long_t set_context_option;
36010- atomic_long_t check_context_retarget_intr;
36011- atomic_long_t check_context_unload;
36012- atomic_long_t tlb_dropin;
36013- atomic_long_t tlb_preload_page;
36014- atomic_long_t tlb_dropin_fail_no_asid;
36015- atomic_long_t tlb_dropin_fail_upm;
36016- atomic_long_t tlb_dropin_fail_invalid;
36017- atomic_long_t tlb_dropin_fail_range_active;
36018- atomic_long_t tlb_dropin_fail_idle;
36019- atomic_long_t tlb_dropin_fail_fmm;
36020- atomic_long_t tlb_dropin_fail_no_exception;
36021- atomic_long_t tfh_stale_on_fault;
36022- atomic_long_t mmu_invalidate_range;
36023- atomic_long_t mmu_invalidate_page;
36024- atomic_long_t flush_tlb;
36025- atomic_long_t flush_tlb_gru;
36026- atomic_long_t flush_tlb_gru_tgh;
36027- atomic_long_t flush_tlb_gru_zero_asid;
36028+ atomic_long_unchecked_t vdata_alloc;
36029+ atomic_long_unchecked_t vdata_free;
36030+ atomic_long_unchecked_t gts_alloc;
36031+ atomic_long_unchecked_t gts_free;
36032+ atomic_long_unchecked_t gms_alloc;
36033+ atomic_long_unchecked_t gms_free;
36034+ atomic_long_unchecked_t gts_double_allocate;
36035+ atomic_long_unchecked_t assign_context;
36036+ atomic_long_unchecked_t assign_context_failed;
36037+ atomic_long_unchecked_t free_context;
36038+ atomic_long_unchecked_t load_user_context;
36039+ atomic_long_unchecked_t load_kernel_context;
36040+ atomic_long_unchecked_t lock_kernel_context;
36041+ atomic_long_unchecked_t unlock_kernel_context;
36042+ atomic_long_unchecked_t steal_user_context;
36043+ atomic_long_unchecked_t steal_kernel_context;
36044+ atomic_long_unchecked_t steal_context_failed;
36045+ atomic_long_unchecked_t nopfn;
36046+ atomic_long_unchecked_t asid_new;
36047+ atomic_long_unchecked_t asid_next;
36048+ atomic_long_unchecked_t asid_wrap;
36049+ atomic_long_unchecked_t asid_reuse;
36050+ atomic_long_unchecked_t intr;
36051+ atomic_long_unchecked_t intr_cbr;
36052+ atomic_long_unchecked_t intr_tfh;
36053+ atomic_long_unchecked_t intr_spurious;
36054+ atomic_long_unchecked_t intr_mm_lock_failed;
36055+ atomic_long_unchecked_t call_os;
36056+ atomic_long_unchecked_t call_os_wait_queue;
36057+ atomic_long_unchecked_t user_flush_tlb;
36058+ atomic_long_unchecked_t user_unload_context;
36059+ atomic_long_unchecked_t user_exception;
36060+ atomic_long_unchecked_t set_context_option;
36061+ atomic_long_unchecked_t check_context_retarget_intr;
36062+ atomic_long_unchecked_t check_context_unload;
36063+ atomic_long_unchecked_t tlb_dropin;
36064+ atomic_long_unchecked_t tlb_preload_page;
36065+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
36066+ atomic_long_unchecked_t tlb_dropin_fail_upm;
36067+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
36068+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
36069+ atomic_long_unchecked_t tlb_dropin_fail_idle;
36070+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
36071+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
36072+ atomic_long_unchecked_t tfh_stale_on_fault;
36073+ atomic_long_unchecked_t mmu_invalidate_range;
36074+ atomic_long_unchecked_t mmu_invalidate_page;
36075+ atomic_long_unchecked_t flush_tlb;
36076+ atomic_long_unchecked_t flush_tlb_gru;
36077+ atomic_long_unchecked_t flush_tlb_gru_tgh;
36078+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
36079
36080- atomic_long_t copy_gpa;
36081- atomic_long_t read_gpa;
36082+ atomic_long_unchecked_t copy_gpa;
36083+ atomic_long_unchecked_t read_gpa;
36084
36085- atomic_long_t mesq_receive;
36086- atomic_long_t mesq_receive_none;
36087- atomic_long_t mesq_send;
36088- atomic_long_t mesq_send_failed;
36089- atomic_long_t mesq_noop;
36090- atomic_long_t mesq_send_unexpected_error;
36091- atomic_long_t mesq_send_lb_overflow;
36092- atomic_long_t mesq_send_qlimit_reached;
36093- atomic_long_t mesq_send_amo_nacked;
36094- atomic_long_t mesq_send_put_nacked;
36095- atomic_long_t mesq_page_overflow;
36096- atomic_long_t mesq_qf_locked;
36097- atomic_long_t mesq_qf_noop_not_full;
36098- atomic_long_t mesq_qf_switch_head_failed;
36099- atomic_long_t mesq_qf_unexpected_error;
36100- atomic_long_t mesq_noop_unexpected_error;
36101- atomic_long_t mesq_noop_lb_overflow;
36102- atomic_long_t mesq_noop_qlimit_reached;
36103- atomic_long_t mesq_noop_amo_nacked;
36104- atomic_long_t mesq_noop_put_nacked;
36105- atomic_long_t mesq_noop_page_overflow;
36106+ atomic_long_unchecked_t mesq_receive;
36107+ atomic_long_unchecked_t mesq_receive_none;
36108+ atomic_long_unchecked_t mesq_send;
36109+ atomic_long_unchecked_t mesq_send_failed;
36110+ atomic_long_unchecked_t mesq_noop;
36111+ atomic_long_unchecked_t mesq_send_unexpected_error;
36112+ atomic_long_unchecked_t mesq_send_lb_overflow;
36113+ atomic_long_unchecked_t mesq_send_qlimit_reached;
36114+ atomic_long_unchecked_t mesq_send_amo_nacked;
36115+ atomic_long_unchecked_t mesq_send_put_nacked;
36116+ atomic_long_unchecked_t mesq_page_overflow;
36117+ atomic_long_unchecked_t mesq_qf_locked;
36118+ atomic_long_unchecked_t mesq_qf_noop_not_full;
36119+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
36120+ atomic_long_unchecked_t mesq_qf_unexpected_error;
36121+ atomic_long_unchecked_t mesq_noop_unexpected_error;
36122+ atomic_long_unchecked_t mesq_noop_lb_overflow;
36123+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
36124+ atomic_long_unchecked_t mesq_noop_amo_nacked;
36125+ atomic_long_unchecked_t mesq_noop_put_nacked;
36126+ atomic_long_unchecked_t mesq_noop_page_overflow;
36127
36128 };
36129
36130@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
36131 tghop_invalidate, mcsop_last};
36132
36133 struct mcs_op_statistic {
36134- atomic_long_t count;
36135- atomic_long_t total;
36136+ atomic_long_unchecked_t count;
36137+ atomic_long_unchecked_t total;
36138 unsigned long max;
36139 };
36140
36141@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
36142
36143 #define STAT(id) do { \
36144 if (gru_options & OPT_STATS) \
36145- atomic_long_inc(&gru_stats.id); \
36146+ atomic_long_inc_unchecked(&gru_stats.id); \
36147 } while (0)
36148
36149 #ifdef CONFIG_SGI_GRU_DEBUG
36150diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
36151index c862cd4..0d176fe 100644
36152--- a/drivers/misc/sgi-xp/xp.h
36153+++ b/drivers/misc/sgi-xp/xp.h
36154@@ -288,7 +288,7 @@ struct xpc_interface {
36155 xpc_notify_func, void *);
36156 void (*received) (short, int, void *);
36157 enum xp_retval (*partid_to_nasids) (short, void *);
36158-};
36159+} __no_const;
36160
36161 extern struct xpc_interface xpc_interface;
36162
36163diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
36164index b94d5f7..7f494c5 100644
36165--- a/drivers/misc/sgi-xp/xpc.h
36166+++ b/drivers/misc/sgi-xp/xpc.h
36167@@ -835,6 +835,7 @@ struct xpc_arch_operations {
36168 void (*received_payload) (struct xpc_channel *, void *);
36169 void (*notify_senders_of_disconnect) (struct xpc_channel *);
36170 };
36171+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
36172
36173 /* struct xpc_partition act_state values (for XPC HB) */
36174
36175@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
36176 /* found in xpc_main.c */
36177 extern struct device *xpc_part;
36178 extern struct device *xpc_chan;
36179-extern struct xpc_arch_operations xpc_arch_ops;
36180+extern xpc_arch_operations_no_const xpc_arch_ops;
36181 extern int xpc_disengage_timelimit;
36182 extern int xpc_disengage_timedout;
36183 extern int xpc_activate_IRQ_rcvd;
36184diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
36185index d971817..3805cce 100644
36186--- a/drivers/misc/sgi-xp/xpc_main.c
36187+++ b/drivers/misc/sgi-xp/xpc_main.c
36188@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
36189 .notifier_call = xpc_system_die,
36190 };
36191
36192-struct xpc_arch_operations xpc_arch_ops;
36193+xpc_arch_operations_no_const xpc_arch_ops;
36194
36195 /*
36196 * Timer function to enforce the timelimit on the partition disengage.
36197diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
36198index a0e1720..ee63d0b 100644
36199--- a/drivers/mmc/core/mmc_ops.c
36200+++ b/drivers/mmc/core/mmc_ops.c
36201@@ -245,7 +245,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
36202 void *data_buf;
36203 int is_on_stack;
36204
36205- is_on_stack = object_is_on_stack(buf);
36206+ is_on_stack = object_starts_on_stack(buf);
36207 if (is_on_stack) {
36208 /*
36209 * dma onto stack is unsafe/nonportable, but callers to this
36210diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
36211index 53b8fd9..615b462 100644
36212--- a/drivers/mmc/host/dw_mmc.h
36213+++ b/drivers/mmc/host/dw_mmc.h
36214@@ -205,5 +205,5 @@ struct dw_mci_drv_data {
36215 int (*parse_dt)(struct dw_mci *host);
36216 int (*setup_bus)(struct dw_mci *host,
36217 struct device_node *slot_np, u8 bus_width);
36218-};
36219+} __do_const;
36220 #endif /* _DW_MMC_H_ */
36221diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
36222index a4eb8b5..8c0628f 100644
36223--- a/drivers/mtd/devices/doc2000.c
36224+++ b/drivers/mtd/devices/doc2000.c
36225@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
36226
36227 /* The ECC will not be calculated correctly if less than 512 is written */
36228 /* DBB-
36229- if (len != 0x200 && eccbuf)
36230+ if (len != 0x200)
36231 printk(KERN_WARNING
36232 "ECC needs a full sector write (adr: %lx size %lx)\n",
36233 (long) to, (long) len);
36234diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
36235index e706a23..b3d262f 100644
36236--- a/drivers/mtd/nand/denali.c
36237+++ b/drivers/mtd/nand/denali.c
36238@@ -26,6 +26,7 @@
36239 #include <linux/pci.h>
36240 #include <linux/mtd/mtd.h>
36241 #include <linux/module.h>
36242+#include <linux/slab.h>
36243
36244 #include "denali.h"
36245
36246diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
36247index 51b9d6a..52af9a7 100644
36248--- a/drivers/mtd/nftlmount.c
36249+++ b/drivers/mtd/nftlmount.c
36250@@ -24,6 +24,7 @@
36251 #include <asm/errno.h>
36252 #include <linux/delay.h>
36253 #include <linux/slab.h>
36254+#include <linux/sched.h>
36255 #include <linux/mtd/mtd.h>
36256 #include <linux/mtd/nand.h>
36257 #include <linux/mtd/nftl.h>
36258diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
36259index 203ff9d..0968ca8 100644
36260--- a/drivers/net/ethernet/8390/ax88796.c
36261+++ b/drivers/net/ethernet/8390/ax88796.c
36262@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
36263 if (ax->plat->reg_offsets)
36264 ei_local->reg_offset = ax->plat->reg_offsets;
36265 else {
36266+ resource_size_t _mem_size = mem_size;
36267+ do_div(_mem_size, 0x18);
36268 ei_local->reg_offset = ax->reg_offsets;
36269 for (ret = 0; ret < 0x18; ret++)
36270- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
36271+ ax->reg_offsets[ret] = _mem_size * ret;
36272 }
36273
36274 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
36275diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
36276index 9c5ea6c..eaad276 100644
36277--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
36278+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
36279@@ -1046,7 +1046,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
36280 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
36281 {
36282 /* RX_MODE controlling object */
36283- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
36284+ bnx2x_init_rx_mode_obj(bp);
36285
36286 /* multicast configuration controlling object */
36287 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
36288diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
36289index 614981c..11216c7 100644
36290--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
36291+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
36292@@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
36293 return rc;
36294 }
36295
36296-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
36297- struct bnx2x_rx_mode_obj *o)
36298+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
36299 {
36300 if (CHIP_IS_E1x(bp)) {
36301- o->wait_comp = bnx2x_empty_rx_mode_wait;
36302- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
36303+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
36304+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
36305 } else {
36306- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
36307- o->config_rx_mode = bnx2x_set_rx_mode_e2;
36308+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
36309+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
36310 }
36311 }
36312
36313diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
36314index acf2fe4..efb96df 100644
36315--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
36316+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
36317@@ -1281,8 +1281,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
36318
36319 /********************* RX MODE ****************/
36320
36321-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
36322- struct bnx2x_rx_mode_obj *o);
36323+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
36324
36325 /**
36326 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
36327diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
36328index d9308c32..d87b824 100644
36329--- a/drivers/net/ethernet/broadcom/tg3.h
36330+++ b/drivers/net/ethernet/broadcom/tg3.h
36331@@ -140,6 +140,7 @@
36332 #define CHIPREV_ID_5750_A0 0x4000
36333 #define CHIPREV_ID_5750_A1 0x4001
36334 #define CHIPREV_ID_5750_A3 0x4003
36335+#define CHIPREV_ID_5750_C1 0x4201
36336 #define CHIPREV_ID_5750_C2 0x4202
36337 #define CHIPREV_ID_5752_A0_HW 0x5000
36338 #define CHIPREV_ID_5752_A0 0x6000
36339diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
36340index 8cffcdf..aadf043 100644
36341--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
36342+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
36343@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
36344 */
36345 struct l2t_skb_cb {
36346 arp_failure_handler_func arp_failure_handler;
36347-};
36348+} __no_const;
36349
36350 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
36351
36352diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
36353index f879e92..726f20f 100644
36354--- a/drivers/net/ethernet/dec/tulip/de4x5.c
36355+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
36356@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
36357 for (i=0; i<ETH_ALEN; i++) {
36358 tmp.addr[i] = dev->dev_addr[i];
36359 }
36360- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
36361+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
36362 break;
36363
36364 case DE4X5_SET_HWADDR: /* Set the hardware address */
36365@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
36366 spin_lock_irqsave(&lp->lock, flags);
36367 memcpy(&statbuf, &lp->pktStats, ioc->len);
36368 spin_unlock_irqrestore(&lp->lock, flags);
36369- if (copy_to_user(ioc->data, &statbuf, ioc->len))
36370+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
36371 return -EFAULT;
36372 break;
36373 }
36374diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
36375index d1b6cc5..cde0d97 100644
36376--- a/drivers/net/ethernet/emulex/benet/be_main.c
36377+++ b/drivers/net/ethernet/emulex/benet/be_main.c
36378@@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
36379
36380 if (wrapped)
36381 newacc += 65536;
36382- ACCESS_ONCE(*acc) = newacc;
36383+ ACCESS_ONCE_RW(*acc) = newacc;
36384 }
36385
36386 void be_parse_stats(struct be_adapter *adapter)
36387diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
36388index 74d749e..eefb1bd 100644
36389--- a/drivers/net/ethernet/faraday/ftgmac100.c
36390+++ b/drivers/net/ethernet/faraday/ftgmac100.c
36391@@ -31,6 +31,8 @@
36392 #include <linux/netdevice.h>
36393 #include <linux/phy.h>
36394 #include <linux/platform_device.h>
36395+#include <linux/interrupt.h>
36396+#include <linux/irqreturn.h>
36397 #include <net/ip.h>
36398
36399 #include "ftgmac100.h"
36400diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
36401index b901a01..1ff32ee 100644
36402--- a/drivers/net/ethernet/faraday/ftmac100.c
36403+++ b/drivers/net/ethernet/faraday/ftmac100.c
36404@@ -31,6 +31,8 @@
36405 #include <linux/module.h>
36406 #include <linux/netdevice.h>
36407 #include <linux/platform_device.h>
36408+#include <linux/interrupt.h>
36409+#include <linux/irqreturn.h>
36410
36411 #include "ftmac100.h"
36412
36413diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36414index d929131..aed108f 100644
36415--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36416+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36417@@ -865,7 +865,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
36418 /* store the new cycle speed */
36419 adapter->cycle_speed = cycle_speed;
36420
36421- ACCESS_ONCE(adapter->base_incval) = incval;
36422+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
36423 smp_mb();
36424
36425 /* grab the ptp lock */
36426diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
36427index c2e420a..26a75e0 100644
36428--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
36429+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
36430@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
36431 struct __vxge_hw_fifo *fifo;
36432 struct vxge_hw_fifo_config *config;
36433 u32 txdl_size, txdl_per_memblock;
36434- struct vxge_hw_mempool_cbs fifo_mp_callback;
36435+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
36436+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
36437+ };
36438+
36439 struct __vxge_hw_virtualpath *vpath;
36440
36441 if ((vp == NULL) || (attr == NULL)) {
36442@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
36443 goto exit;
36444 }
36445
36446- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
36447-
36448 fifo->mempool =
36449 __vxge_hw_mempool_create(vpath->hldev,
36450 fifo->config->memblock_size,
36451diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
36452index 927aa33..a6c2518 100644
36453--- a/drivers/net/ethernet/realtek/r8169.c
36454+++ b/drivers/net/ethernet/realtek/r8169.c
36455@@ -747,22 +747,22 @@ struct rtl8169_private {
36456 struct mdio_ops {
36457 void (*write)(struct rtl8169_private *, int, int);
36458 int (*read)(struct rtl8169_private *, int);
36459- } mdio_ops;
36460+ } __no_const mdio_ops;
36461
36462 struct pll_power_ops {
36463 void (*down)(struct rtl8169_private *);
36464 void (*up)(struct rtl8169_private *);
36465- } pll_power_ops;
36466+ } __no_const pll_power_ops;
36467
36468 struct jumbo_ops {
36469 void (*enable)(struct rtl8169_private *);
36470 void (*disable)(struct rtl8169_private *);
36471- } jumbo_ops;
36472+ } __no_const jumbo_ops;
36473
36474 struct csi_ops {
36475 void (*write)(struct rtl8169_private *, int, int);
36476 u32 (*read)(struct rtl8169_private *, int);
36477- } csi_ops;
36478+ } __no_const csi_ops;
36479
36480 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
36481 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
36482diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
36483index 0767043f..08c2553 100644
36484--- a/drivers/net/ethernet/sfc/ptp.c
36485+++ b/drivers/net/ethernet/sfc/ptp.c
36486@@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
36487 (u32)((u64)ptp->start.dma_addr >> 32));
36488
36489 /* Clear flag that signals MC ready */
36490- ACCESS_ONCE(*start) = 0;
36491+ ACCESS_ONCE_RW(*start) = 0;
36492 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
36493 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
36494
36495diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36496index 0c74a70..3bc6f68 100644
36497--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36498+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36499@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
36500
36501 writel(value, ioaddr + MMC_CNTRL);
36502
36503- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
36504- MMC_CNTRL, value);
36505+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
36506+// MMC_CNTRL, value);
36507 }
36508
36509 /* To mask all all interrupts.*/
36510diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
36511index 5fd6f46..ee1f265 100644
36512--- a/drivers/net/hyperv/hyperv_net.h
36513+++ b/drivers/net/hyperv/hyperv_net.h
36514@@ -101,7 +101,7 @@ struct rndis_device {
36515
36516 enum rndis_device_state state;
36517 bool link_state;
36518- atomic_t new_req_id;
36519+ atomic_unchecked_t new_req_id;
36520
36521 spinlock_t request_lock;
36522 struct list_head req_list;
36523diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
36524index 928148c..d83298e 100644
36525--- a/drivers/net/hyperv/rndis_filter.c
36526+++ b/drivers/net/hyperv/rndis_filter.c
36527@@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
36528 * template
36529 */
36530 set = &rndis_msg->msg.set_req;
36531- set->req_id = atomic_inc_return(&dev->new_req_id);
36532+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36533
36534 /* Add to the request list */
36535 spin_lock_irqsave(&dev->request_lock, flags);
36536@@ -760,7 +760,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
36537
36538 /* Setup the rndis set */
36539 halt = &request->request_msg.msg.halt_req;
36540- halt->req_id = atomic_inc_return(&dev->new_req_id);
36541+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36542
36543 /* Ignore return since this msg is optional. */
36544 rndis_filter_send_request(dev, request);
36545diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
36546index 7d39add..037e1da 100644
36547--- a/drivers/net/ieee802154/fakehard.c
36548+++ b/drivers/net/ieee802154/fakehard.c
36549@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
36550 phy->transmit_power = 0xbf;
36551
36552 dev->netdev_ops = &fake_ops;
36553- dev->ml_priv = &fake_mlme;
36554+ dev->ml_priv = (void *)&fake_mlme;
36555
36556 priv = netdev_priv(dev);
36557 priv->phy = phy;
36558diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
36559index 0f0f9ce..0ca5819 100644
36560--- a/drivers/net/macvtap.c
36561+++ b/drivers/net/macvtap.c
36562@@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
36563 return NOTIFY_DONE;
36564 }
36565
36566-static struct notifier_block macvtap_notifier_block __read_mostly = {
36567+static struct notifier_block macvtap_notifier_block = {
36568 .notifier_call = macvtap_device_event,
36569 };
36570
36571diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
36572index daec9b0..6428fcb 100644
36573--- a/drivers/net/phy/mdio-bitbang.c
36574+++ b/drivers/net/phy/mdio-bitbang.c
36575@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
36576 struct mdiobb_ctrl *ctrl = bus->priv;
36577
36578 module_put(ctrl->ops->owner);
36579+ mdiobus_unregister(bus);
36580 mdiobus_free(bus);
36581 }
36582 EXPORT_SYMBOL(free_mdio_bitbang);
36583diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
36584index eb3f5ce..d773730 100644
36585--- a/drivers/net/ppp/ppp_generic.c
36586+++ b/drivers/net/ppp/ppp_generic.c
36587@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36588 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
36589 struct ppp_stats stats;
36590 struct ppp_comp_stats cstats;
36591- char *vers;
36592
36593 switch (cmd) {
36594 case SIOCGPPPSTATS:
36595@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36596 break;
36597
36598 case SIOCGPPPVER:
36599- vers = PPP_VERSION;
36600- if (copy_to_user(addr, vers, strlen(vers) + 1))
36601+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
36602 break;
36603 err = 0;
36604 break;
36605diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
36606index ad86660..9fd0884 100644
36607--- a/drivers/net/team/team.c
36608+++ b/drivers/net/team/team.c
36609@@ -2601,7 +2601,7 @@ static int team_device_event(struct notifier_block *unused,
36610 return NOTIFY_DONE;
36611 }
36612
36613-static struct notifier_block team_notifier_block __read_mostly = {
36614+static struct notifier_block team_notifier_block = {
36615 .notifier_call = team_device_event,
36616 };
36617
36618diff --git a/drivers/net/tun.c b/drivers/net/tun.c
36619index 0873cdc..ddb178e 100644
36620--- a/drivers/net/tun.c
36621+++ b/drivers/net/tun.c
36622@@ -1374,7 +1374,7 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
36623 }
36624
36625 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
36626- unsigned long arg, int ifreq_len)
36627+ unsigned long arg, size_t ifreq_len)
36628 {
36629 struct tun_file *tfile = file->private_data;
36630 struct tun_struct *tun;
36631@@ -1387,6 +1387,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
36632 int vnet_hdr_sz;
36633 int ret;
36634
36635+ if (ifreq_len > sizeof ifr)
36636+ return -EFAULT;
36637+
36638 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
36639 if (copy_from_user(&ifr, argp, ifreq_len))
36640 return -EFAULT;
36641diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
36642index 605a4ba..a883dd1 100644
36643--- a/drivers/net/usb/hso.c
36644+++ b/drivers/net/usb/hso.c
36645@@ -71,7 +71,7 @@
36646 #include <asm/byteorder.h>
36647 #include <linux/serial_core.h>
36648 #include <linux/serial.h>
36649-
36650+#include <asm/local.h>
36651
36652 #define MOD_AUTHOR "Option Wireless"
36653 #define MOD_DESCRIPTION "USB High Speed Option driver"
36654@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
36655 struct urb *urb;
36656
36657 urb = serial->rx_urb[0];
36658- if (serial->port.count > 0) {
36659+ if (atomic_read(&serial->port.count) > 0) {
36660 count = put_rxbuf_data(urb, serial);
36661 if (count == -1)
36662 return;
36663@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
36664 DUMP1(urb->transfer_buffer, urb->actual_length);
36665
36666 /* Anyone listening? */
36667- if (serial->port.count == 0)
36668+ if (atomic_read(&serial->port.count) == 0)
36669 return;
36670
36671 if (status == 0) {
36672@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
36673 tty_port_tty_set(&serial->port, tty);
36674
36675 /* check for port already opened, if not set the termios */
36676- serial->port.count++;
36677- if (serial->port.count == 1) {
36678+ if (atomic_inc_return(&serial->port.count) == 1) {
36679 serial->rx_state = RX_IDLE;
36680 /* Force default termio settings */
36681 _hso_serial_set_termios(tty, NULL);
36682@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
36683 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
36684 if (result) {
36685 hso_stop_serial_device(serial->parent);
36686- serial->port.count--;
36687+ atomic_dec(&serial->port.count);
36688 kref_put(&serial->parent->ref, hso_serial_ref_free);
36689 }
36690 } else {
36691@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
36692
36693 /* reset the rts and dtr */
36694 /* do the actual close */
36695- serial->port.count--;
36696+ atomic_dec(&serial->port.count);
36697
36698- if (serial->port.count <= 0) {
36699- serial->port.count = 0;
36700+ if (atomic_read(&serial->port.count) <= 0) {
36701+ atomic_set(&serial->port.count, 0);
36702 tty_port_tty_set(&serial->port, NULL);
36703 if (!usb_gone)
36704 hso_stop_serial_device(serial->parent);
36705@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
36706
36707 /* the actual setup */
36708 spin_lock_irqsave(&serial->serial_lock, flags);
36709- if (serial->port.count)
36710+ if (atomic_read(&serial->port.count))
36711 _hso_serial_set_termios(tty, old);
36712 else
36713 tty->termios = *old;
36714@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
36715 D1("Pending read interrupt on port %d\n", i);
36716 spin_lock(&serial->serial_lock);
36717 if (serial->rx_state == RX_IDLE &&
36718- serial->port.count > 0) {
36719+ atomic_read(&serial->port.count) > 0) {
36720 /* Setup and send a ctrl req read on
36721 * port i */
36722 if (!serial->rx_urb_filled[0]) {
36723@@ -3078,7 +3077,7 @@ static int hso_resume(struct usb_interface *iface)
36724 /* Start all serial ports */
36725 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
36726 if (serial_table[i] && (serial_table[i]->interface == iface)) {
36727- if (dev2ser(serial_table[i])->port.count) {
36728+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
36729 result =
36730 hso_start_serial_device(serial_table[i], GFP_NOIO);
36731 hso_kick_transmit(dev2ser(serial_table[i]));
36732diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36733index 8d78253..bebbb68 100644
36734--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36735+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36736@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36737 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
36738 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
36739
36740- ACCESS_ONCE(ads->ds_link) = i->link;
36741- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
36742+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
36743+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
36744
36745 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
36746 ctl6 = SM(i->keytype, AR_EncrType);
36747@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36748
36749 if ((i->is_first || i->is_last) &&
36750 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
36751- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
36752+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
36753 | set11nTries(i->rates, 1)
36754 | set11nTries(i->rates, 2)
36755 | set11nTries(i->rates, 3)
36756 | (i->dur_update ? AR_DurUpdateEna : 0)
36757 | SM(0, AR_BurstDur);
36758
36759- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
36760+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
36761 | set11nRate(i->rates, 1)
36762 | set11nRate(i->rates, 2)
36763 | set11nRate(i->rates, 3);
36764 } else {
36765- ACCESS_ONCE(ads->ds_ctl2) = 0;
36766- ACCESS_ONCE(ads->ds_ctl3) = 0;
36767+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
36768+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
36769 }
36770
36771 if (!i->is_first) {
36772- ACCESS_ONCE(ads->ds_ctl0) = 0;
36773- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36774- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36775+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
36776+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36777+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
36778 return;
36779 }
36780
36781@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36782 break;
36783 }
36784
36785- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36786+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36787 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36788 | SM(i->txpower, AR_XmitPower)
36789 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36790@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36791 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
36792 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
36793
36794- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36795- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36796+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36797+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
36798
36799 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
36800 return;
36801
36802- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36803+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36804 | set11nPktDurRTSCTS(i->rates, 1);
36805
36806- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36807+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36808 | set11nPktDurRTSCTS(i->rates, 3);
36809
36810- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36811+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36812 | set11nRateFlags(i->rates, 1)
36813 | set11nRateFlags(i->rates, 2)
36814 | set11nRateFlags(i->rates, 3)
36815diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36816index 301bf72..3f5654f 100644
36817--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36818+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36819@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36820 (i->qcu << AR_TxQcuNum_S) | desc_len;
36821
36822 checksum += val;
36823- ACCESS_ONCE(ads->info) = val;
36824+ ACCESS_ONCE_RW(ads->info) = val;
36825
36826 checksum += i->link;
36827- ACCESS_ONCE(ads->link) = i->link;
36828+ ACCESS_ONCE_RW(ads->link) = i->link;
36829
36830 checksum += i->buf_addr[0];
36831- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
36832+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
36833 checksum += i->buf_addr[1];
36834- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
36835+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
36836 checksum += i->buf_addr[2];
36837- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
36838+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
36839 checksum += i->buf_addr[3];
36840- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
36841+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
36842
36843 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
36844- ACCESS_ONCE(ads->ctl3) = val;
36845+ ACCESS_ONCE_RW(ads->ctl3) = val;
36846 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
36847- ACCESS_ONCE(ads->ctl5) = val;
36848+ ACCESS_ONCE_RW(ads->ctl5) = val;
36849 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
36850- ACCESS_ONCE(ads->ctl7) = val;
36851+ ACCESS_ONCE_RW(ads->ctl7) = val;
36852 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
36853- ACCESS_ONCE(ads->ctl9) = val;
36854+ ACCESS_ONCE_RW(ads->ctl9) = val;
36855
36856 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
36857- ACCESS_ONCE(ads->ctl10) = checksum;
36858+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
36859
36860 if (i->is_first || i->is_last) {
36861- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
36862+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
36863 | set11nTries(i->rates, 1)
36864 | set11nTries(i->rates, 2)
36865 | set11nTries(i->rates, 3)
36866 | (i->dur_update ? AR_DurUpdateEna : 0)
36867 | SM(0, AR_BurstDur);
36868
36869- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
36870+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
36871 | set11nRate(i->rates, 1)
36872 | set11nRate(i->rates, 2)
36873 | set11nRate(i->rates, 3);
36874 } else {
36875- ACCESS_ONCE(ads->ctl13) = 0;
36876- ACCESS_ONCE(ads->ctl14) = 0;
36877+ ACCESS_ONCE_RW(ads->ctl13) = 0;
36878+ ACCESS_ONCE_RW(ads->ctl14) = 0;
36879 }
36880
36881 ads->ctl20 = 0;
36882@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36883
36884 ctl17 = SM(i->keytype, AR_EncrType);
36885 if (!i->is_first) {
36886- ACCESS_ONCE(ads->ctl11) = 0;
36887- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36888- ACCESS_ONCE(ads->ctl15) = 0;
36889- ACCESS_ONCE(ads->ctl16) = 0;
36890- ACCESS_ONCE(ads->ctl17) = ctl17;
36891- ACCESS_ONCE(ads->ctl18) = 0;
36892- ACCESS_ONCE(ads->ctl19) = 0;
36893+ ACCESS_ONCE_RW(ads->ctl11) = 0;
36894+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36895+ ACCESS_ONCE_RW(ads->ctl15) = 0;
36896+ ACCESS_ONCE_RW(ads->ctl16) = 0;
36897+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
36898+ ACCESS_ONCE_RW(ads->ctl18) = 0;
36899+ ACCESS_ONCE_RW(ads->ctl19) = 0;
36900 return;
36901 }
36902
36903- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36904+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36905 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36906 | SM(i->txpower, AR_XmitPower)
36907 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36908@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36909 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
36910 ctl12 |= SM(val, AR_PAPRDChainMask);
36911
36912- ACCESS_ONCE(ads->ctl12) = ctl12;
36913- ACCESS_ONCE(ads->ctl17) = ctl17;
36914+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
36915+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
36916
36917- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36918+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36919 | set11nPktDurRTSCTS(i->rates, 1);
36920
36921- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36922+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36923 | set11nPktDurRTSCTS(i->rates, 3);
36924
36925- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
36926+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
36927 | set11nRateFlags(i->rates, 1)
36928 | set11nRateFlags(i->rates, 2)
36929 | set11nRateFlags(i->rates, 3)
36930 | SM(i->rtscts_rate, AR_RTSCTSRate);
36931
36932- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
36933+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
36934 }
36935
36936 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
36937diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
36938index dbc1b7a..67e2ca2 100644
36939--- a/drivers/net/wireless/ath/ath9k/hw.h
36940+++ b/drivers/net/wireless/ath/ath9k/hw.h
36941@@ -657,7 +657,7 @@ struct ath_hw_private_ops {
36942
36943 /* ANI */
36944 void (*ani_cache_ini_regs)(struct ath_hw *ah);
36945-};
36946+} __no_const;
36947
36948 /**
36949 * struct ath_hw_ops - callbacks used by hardware code and driver code
36950@@ -687,7 +687,7 @@ struct ath_hw_ops {
36951 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
36952 struct ath_hw_antcomb_conf *antconf);
36953 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
36954-};
36955+} __no_const;
36956
36957 struct ath_nf_limits {
36958 s16 max;
36959diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
36960index 71ced17..cd82b12 100644
36961--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
36962+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
36963@@ -184,7 +184,7 @@ struct brcmf_cfg80211_event_loop {
36964 struct net_device *ndev,
36965 const struct brcmf_event_msg *e,
36966 void *data);
36967-};
36968+} __no_const;
36969
36970 /* basic structure of scan request */
36971 struct brcmf_cfg80211_scan_req {
36972@@ -239,7 +239,7 @@ struct brcmf_cfg80211_profile {
36973 struct brcmf_cfg80211_iscan_eloop {
36974 s32 (*handler[WL_SCAN_ERSULTS_LAST])
36975 (struct brcmf_cfg80211_info *cfg);
36976-};
36977+} __no_const;
36978
36979 /* dongle iscan controller */
36980 struct brcmf_cfg80211_iscan_ctrl {
36981diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
36982index e252acb..6ad1e65 100644
36983--- a/drivers/net/wireless/iwlegacy/3945-mac.c
36984+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
36985@@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
36986 */
36987 if (il3945_mod_params.disable_hw_scan) {
36988 D_INFO("Disabling hw_scan\n");
36989- il3945_mac_ops.hw_scan = NULL;
36990+ pax_open_kernel();
36991+ *(void **)&il3945_mac_ops.hw_scan = NULL;
36992+ pax_close_kernel();
36993 }
36994
36995 D_INFO("*** LOAD DRIVER ***\n");
36996diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
36997index 1a98fa3..51e6661 100644
36998--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
36999+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
37000@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
37001 {
37002 struct iwl_priv *priv = file->private_data;
37003 char buf[64];
37004- int buf_size;
37005+ size_t buf_size;
37006 u32 offset, len;
37007
37008 memset(buf, 0, sizeof(buf));
37009@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
37010 struct iwl_priv *priv = file->private_data;
37011
37012 char buf[8];
37013- int buf_size;
37014+ size_t buf_size;
37015 u32 reset_flag;
37016
37017 memset(buf, 0, sizeof(buf));
37018@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
37019 {
37020 struct iwl_priv *priv = file->private_data;
37021 char buf[8];
37022- int buf_size;
37023+ size_t buf_size;
37024 int ht40;
37025
37026 memset(buf, 0, sizeof(buf));
37027@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
37028 {
37029 struct iwl_priv *priv = file->private_data;
37030 char buf[8];
37031- int buf_size;
37032+ size_t buf_size;
37033 int value;
37034
37035 memset(buf, 0, sizeof(buf));
37036@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
37037 {
37038 struct iwl_priv *priv = file->private_data;
37039 char buf[8];
37040- int buf_size;
37041+ size_t buf_size;
37042 int clear;
37043
37044 memset(buf, 0, sizeof(buf));
37045@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
37046 {
37047 struct iwl_priv *priv = file->private_data;
37048 char buf[8];
37049- int buf_size;
37050+ size_t buf_size;
37051 int trace;
37052
37053 memset(buf, 0, sizeof(buf));
37054@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
37055 {
37056 struct iwl_priv *priv = file->private_data;
37057 char buf[8];
37058- int buf_size;
37059+ size_t buf_size;
37060 int missed;
37061
37062 memset(buf, 0, sizeof(buf));
37063@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
37064
37065 struct iwl_priv *priv = file->private_data;
37066 char buf[8];
37067- int buf_size;
37068+ size_t buf_size;
37069 int plcp;
37070
37071 memset(buf, 0, sizeof(buf));
37072@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
37073
37074 struct iwl_priv *priv = file->private_data;
37075 char buf[8];
37076- int buf_size;
37077+ size_t buf_size;
37078 int flush;
37079
37080 memset(buf, 0, sizeof(buf));
37081@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
37082
37083 struct iwl_priv *priv = file->private_data;
37084 char buf[8];
37085- int buf_size;
37086+ size_t buf_size;
37087 int rts;
37088
37089 if (!priv->cfg->ht_params)
37090@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
37091 {
37092 struct iwl_priv *priv = file->private_data;
37093 char buf[8];
37094- int buf_size;
37095+ size_t buf_size;
37096
37097 memset(buf, 0, sizeof(buf));
37098 buf_size = min(count, sizeof(buf) - 1);
37099@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
37100 struct iwl_priv *priv = file->private_data;
37101 u32 event_log_flag;
37102 char buf[8];
37103- int buf_size;
37104+ size_t buf_size;
37105
37106 /* check that the interface is up */
37107 if (!iwl_is_ready(priv))
37108@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
37109 struct iwl_priv *priv = file->private_data;
37110 char buf[8];
37111 u32 calib_disabled;
37112- int buf_size;
37113+ size_t buf_size;
37114
37115 memset(buf, 0, sizeof(buf));
37116 buf_size = min(count, sizeof(buf) - 1);
37117diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
37118index fe0fffd..b4c5724 100644
37119--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
37120+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
37121@@ -1967,7 +1967,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
37122 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
37123
37124 char buf[8];
37125- int buf_size;
37126+ size_t buf_size;
37127 u32 reset_flag;
37128
37129 memset(buf, 0, sizeof(buf));
37130@@ -1988,7 +1988,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
37131 {
37132 struct iwl_trans *trans = file->private_data;
37133 char buf[8];
37134- int buf_size;
37135+ size_t buf_size;
37136 int csr;
37137
37138 memset(buf, 0, sizeof(buf));
37139diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
37140index 429ca32..f86236b 100644
37141--- a/drivers/net/wireless/mac80211_hwsim.c
37142+++ b/drivers/net/wireless/mac80211_hwsim.c
37143@@ -1751,9 +1751,11 @@ static int __init init_mac80211_hwsim(void)
37144 return -EINVAL;
37145
37146 if (fake_hw_scan) {
37147- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
37148- mac80211_hwsim_ops.sw_scan_start = NULL;
37149- mac80211_hwsim_ops.sw_scan_complete = NULL;
37150+ pax_open_kernel();
37151+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
37152+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
37153+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
37154+ pax_close_kernel();
37155 }
37156
37157 spin_lock_init(&hwsim_radio_lock);
37158diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
37159index bd1f0cb..db85ab0 100644
37160--- a/drivers/net/wireless/rndis_wlan.c
37161+++ b/drivers/net/wireless/rndis_wlan.c
37162@@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
37163
37164 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
37165
37166- if (rts_threshold < 0 || rts_threshold > 2347)
37167+ if (rts_threshold > 2347)
37168 rts_threshold = 2347;
37169
37170 tmp = cpu_to_le32(rts_threshold);
37171diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
37172index 0751b35..246ba3e 100644
37173--- a/drivers/net/wireless/rt2x00/rt2x00.h
37174+++ b/drivers/net/wireless/rt2x00/rt2x00.h
37175@@ -398,7 +398,7 @@ struct rt2x00_intf {
37176 * for hardware which doesn't support hardware
37177 * sequence counting.
37178 */
37179- atomic_t seqno;
37180+ atomic_unchecked_t seqno;
37181 };
37182
37183 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
37184diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
37185index e488b94..14b6a0c 100644
37186--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
37187+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
37188@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
37189 * sequence counter given by mac80211.
37190 */
37191 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
37192- seqno = atomic_add_return(0x10, &intf->seqno);
37193+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
37194 else
37195- seqno = atomic_read(&intf->seqno);
37196+ seqno = atomic_read_unchecked(&intf->seqno);
37197
37198 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
37199 hdr->seq_ctrl |= cpu_to_le16(seqno);
37200diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
37201index e2750a1..797e179 100644
37202--- a/drivers/net/wireless/ti/wl1251/sdio.c
37203+++ b/drivers/net/wireless/ti/wl1251/sdio.c
37204@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
37205
37206 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
37207
37208- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
37209- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
37210+ pax_open_kernel();
37211+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
37212+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
37213+ pax_close_kernel();
37214
37215 wl1251_info("using dedicated interrupt line");
37216 } else {
37217- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
37218- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
37219+ pax_open_kernel();
37220+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
37221+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
37222+ pax_close_kernel();
37223
37224 wl1251_info("using SDIO interrupt");
37225 }
37226diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
37227index dadf1db..d9db7a7 100644
37228--- a/drivers/net/wireless/ti/wl12xx/main.c
37229+++ b/drivers/net/wireless/ti/wl12xx/main.c
37230@@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
37231 sizeof(wl->conf.mem));
37232
37233 /* read data preparation is only needed by wl127x */
37234- wl->ops->prepare_read = wl127x_prepare_read;
37235+ pax_open_kernel();
37236+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
37237+ pax_close_kernel();
37238
37239 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
37240 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
37241@@ -665,7 +667,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
37242 sizeof(wl->conf.mem));
37243
37244 /* read data preparation is only needed by wl127x */
37245- wl->ops->prepare_read = wl127x_prepare_read;
37246+ pax_open_kernel();
37247+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
37248+ pax_close_kernel();
37249
37250 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
37251 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
37252diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
37253index a39682a..1e8220c 100644
37254--- a/drivers/net/wireless/ti/wl18xx/main.c
37255+++ b/drivers/net/wireless/ti/wl18xx/main.c
37256@@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
37257 }
37258
37259 if (!checksum_param) {
37260- wl18xx_ops.set_rx_csum = NULL;
37261- wl18xx_ops.init_vif = NULL;
37262+ pax_open_kernel();
37263+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
37264+ *(void **)&wl18xx_ops.init_vif = NULL;
37265+ pax_close_kernel();
37266 }
37267
37268 /* Enable 11a Band only if we have 5G antennas */
37269diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
37270index d93b2b6..ae50401 100644
37271--- a/drivers/oprofile/buffer_sync.c
37272+++ b/drivers/oprofile/buffer_sync.c
37273@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
37274 if (cookie == NO_COOKIE)
37275 offset = pc;
37276 if (cookie == INVALID_COOKIE) {
37277- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
37278+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
37279 offset = pc;
37280 }
37281 if (cookie != last_cookie) {
37282@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
37283 /* add userspace sample */
37284
37285 if (!mm) {
37286- atomic_inc(&oprofile_stats.sample_lost_no_mm);
37287+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
37288 return 0;
37289 }
37290
37291 cookie = lookup_dcookie(mm, s->eip, &offset);
37292
37293 if (cookie == INVALID_COOKIE) {
37294- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
37295+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
37296 return 0;
37297 }
37298
37299@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
37300 /* ignore backtraces if failed to add a sample */
37301 if (state == sb_bt_start) {
37302 state = sb_bt_ignore;
37303- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
37304+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
37305 }
37306 }
37307 release_mm(mm);
37308diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
37309index c0cc4e7..44d4e54 100644
37310--- a/drivers/oprofile/event_buffer.c
37311+++ b/drivers/oprofile/event_buffer.c
37312@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
37313 }
37314
37315 if (buffer_pos == buffer_size) {
37316- atomic_inc(&oprofile_stats.event_lost_overflow);
37317+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
37318 return;
37319 }
37320
37321diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
37322index ed2c3ec..deda85a 100644
37323--- a/drivers/oprofile/oprof.c
37324+++ b/drivers/oprofile/oprof.c
37325@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
37326 if (oprofile_ops.switch_events())
37327 return;
37328
37329- atomic_inc(&oprofile_stats.multiplex_counter);
37330+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
37331 start_switch_worker();
37332 }
37333
37334diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
37335index 917d28e..d62d981 100644
37336--- a/drivers/oprofile/oprofile_stats.c
37337+++ b/drivers/oprofile/oprofile_stats.c
37338@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
37339 cpu_buf->sample_invalid_eip = 0;
37340 }
37341
37342- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
37343- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
37344- atomic_set(&oprofile_stats.event_lost_overflow, 0);
37345- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
37346- atomic_set(&oprofile_stats.multiplex_counter, 0);
37347+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
37348+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
37349+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
37350+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
37351+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
37352 }
37353
37354
37355diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
37356index 38b6fc0..b5cbfce 100644
37357--- a/drivers/oprofile/oprofile_stats.h
37358+++ b/drivers/oprofile/oprofile_stats.h
37359@@ -13,11 +13,11 @@
37360 #include <linux/atomic.h>
37361
37362 struct oprofile_stat_struct {
37363- atomic_t sample_lost_no_mm;
37364- atomic_t sample_lost_no_mapping;
37365- atomic_t bt_lost_no_mapping;
37366- atomic_t event_lost_overflow;
37367- atomic_t multiplex_counter;
37368+ atomic_unchecked_t sample_lost_no_mm;
37369+ atomic_unchecked_t sample_lost_no_mapping;
37370+ atomic_unchecked_t bt_lost_no_mapping;
37371+ atomic_unchecked_t event_lost_overflow;
37372+ atomic_unchecked_t multiplex_counter;
37373 };
37374
37375 extern struct oprofile_stat_struct oprofile_stats;
37376diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
37377index 849357c..b83c1e0 100644
37378--- a/drivers/oprofile/oprofilefs.c
37379+++ b/drivers/oprofile/oprofilefs.c
37380@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
37381
37382
37383 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
37384- char const *name, atomic_t *val)
37385+ char const *name, atomic_unchecked_t *val)
37386 {
37387 return __oprofilefs_create_file(sb, root, name,
37388 &atomic_ro_fops, 0444, val);
37389diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
37390index 3f56bc0..707d642 100644
37391--- a/drivers/parport/procfs.c
37392+++ b/drivers/parport/procfs.c
37393@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
37394
37395 *ppos += len;
37396
37397- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
37398+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
37399 }
37400
37401 #ifdef CONFIG_PARPORT_1284
37402@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
37403
37404 *ppos += len;
37405
37406- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
37407+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
37408 }
37409 #endif /* IEEE1284.3 support. */
37410
37411diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
37412index a6a71c4..c91097b 100644
37413--- a/drivers/pci/hotplug/cpcihp_generic.c
37414+++ b/drivers/pci/hotplug/cpcihp_generic.c
37415@@ -73,7 +73,6 @@ static u16 port;
37416 static unsigned int enum_bit;
37417 static u8 enum_mask;
37418
37419-static struct cpci_hp_controller_ops generic_hpc_ops;
37420 static struct cpci_hp_controller generic_hpc;
37421
37422 static int __init validate_parameters(void)
37423@@ -139,6 +138,10 @@ static int query_enum(void)
37424 return ((value & enum_mask) == enum_mask);
37425 }
37426
37427+static struct cpci_hp_controller_ops generic_hpc_ops = {
37428+ .query_enum = query_enum,
37429+};
37430+
37431 static int __init cpcihp_generic_init(void)
37432 {
37433 int status;
37434@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
37435 pci_dev_put(dev);
37436
37437 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
37438- generic_hpc_ops.query_enum = query_enum;
37439 generic_hpc.ops = &generic_hpc_ops;
37440
37441 status = cpci_hp_register_controller(&generic_hpc);
37442diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
37443index 6bf8d2a..9711ce0 100644
37444--- a/drivers/pci/hotplug/cpcihp_zt5550.c
37445+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
37446@@ -59,7 +59,6 @@
37447 /* local variables */
37448 static bool debug;
37449 static bool poll;
37450-static struct cpci_hp_controller_ops zt5550_hpc_ops;
37451 static struct cpci_hp_controller zt5550_hpc;
37452
37453 /* Primary cPCI bus bridge device */
37454@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
37455 return 0;
37456 }
37457
37458+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
37459+ .query_enum = zt5550_hc_query_enum,
37460+};
37461+
37462 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
37463 {
37464 int status;
37465@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
37466 dbg("returned from zt5550_hc_config");
37467
37468 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
37469- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
37470 zt5550_hpc.ops = &zt5550_hpc_ops;
37471 if(!poll) {
37472 zt5550_hpc.irq = hc_dev->irq;
37473 zt5550_hpc.irq_flags = IRQF_SHARED;
37474 zt5550_hpc.dev_id = hc_dev;
37475
37476- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
37477- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
37478- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
37479+ pax_open_kernel();
37480+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
37481+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
37482+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
37483+ pax_open_kernel();
37484 } else {
37485 info("using ENUM# polling mode");
37486 }
37487diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
37488index 76ba8a1..20ca857 100644
37489--- a/drivers/pci/hotplug/cpqphp_nvram.c
37490+++ b/drivers/pci/hotplug/cpqphp_nvram.c
37491@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
37492
37493 void compaq_nvram_init (void __iomem *rom_start)
37494 {
37495+
37496+#ifndef CONFIG_PAX_KERNEXEC
37497 if (rom_start) {
37498 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
37499 }
37500+#endif
37501+
37502 dbg("int15 entry = %p\n", compaq_int15_entry_point);
37503
37504 /* initialize our int15 lock */
37505diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
37506index 213753b..b4abaac 100644
37507--- a/drivers/pci/pcie/aspm.c
37508+++ b/drivers/pci/pcie/aspm.c
37509@@ -27,9 +27,9 @@
37510 #define MODULE_PARAM_PREFIX "pcie_aspm."
37511
37512 /* Note: those are not register definitions */
37513-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
37514-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
37515-#define ASPM_STATE_L1 (4) /* L1 state */
37516+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
37517+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
37518+#define ASPM_STATE_L1 (4U) /* L1 state */
37519 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
37520 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
37521
37522diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
37523index ec909af..e7517f3 100644
37524--- a/drivers/pci/probe.c
37525+++ b/drivers/pci/probe.c
37526@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
37527 struct pci_bus_region region;
37528 bool bar_too_big = false, bar_disabled = false;
37529
37530- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
37531+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
37532
37533 /* No printks while decoding is disabled! */
37534 if (!dev->mmio_always_on) {
37535diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
37536index 9b8505c..f00870a 100644
37537--- a/drivers/pci/proc.c
37538+++ b/drivers/pci/proc.c
37539@@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
37540 static int __init pci_proc_init(void)
37541 {
37542 struct pci_dev *dev = NULL;
37543+
37544+#ifdef CONFIG_GRKERNSEC_PROC_ADD
37545+#ifdef CONFIG_GRKERNSEC_PROC_USER
37546+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
37547+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
37548+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
37549+#endif
37550+#else
37551 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
37552+#endif
37553 proc_create("devices", 0, proc_bus_pci_dir,
37554 &proc_bus_pci_dev_operations);
37555 proc_initialized = 1;
37556diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
37557index 75dd651..2af4c9a 100644
37558--- a/drivers/platform/x86/thinkpad_acpi.c
37559+++ b/drivers/platform/x86/thinkpad_acpi.c
37560@@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
37561 return 0;
37562 }
37563
37564-void static hotkey_mask_warn_incomplete_mask(void)
37565+static void hotkey_mask_warn_incomplete_mask(void)
37566 {
37567 /* log only what the user can fix... */
37568 const u32 wantedmask = hotkey_driver_mask &
37569@@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
37570 }
37571 }
37572
37573-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37574- struct tp_nvram_state *newn,
37575- const u32 event_mask)
37576-{
37577-
37578 #define TPACPI_COMPARE_KEY(__scancode, __member) \
37579 do { \
37580 if ((event_mask & (1 << __scancode)) && \
37581@@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37582 tpacpi_hotkey_send_key(__scancode); \
37583 } while (0)
37584
37585- void issue_volchange(const unsigned int oldvol,
37586- const unsigned int newvol)
37587- {
37588- unsigned int i = oldvol;
37589+static void issue_volchange(const unsigned int oldvol,
37590+ const unsigned int newvol,
37591+ const u32 event_mask)
37592+{
37593+ unsigned int i = oldvol;
37594
37595- while (i > newvol) {
37596- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
37597- i--;
37598- }
37599- while (i < newvol) {
37600- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37601- i++;
37602- }
37603+ while (i > newvol) {
37604+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
37605+ i--;
37606 }
37607+ while (i < newvol) {
37608+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37609+ i++;
37610+ }
37611+}
37612
37613- void issue_brightnesschange(const unsigned int oldbrt,
37614- const unsigned int newbrt)
37615- {
37616- unsigned int i = oldbrt;
37617+static void issue_brightnesschange(const unsigned int oldbrt,
37618+ const unsigned int newbrt,
37619+ const u32 event_mask)
37620+{
37621+ unsigned int i = oldbrt;
37622
37623- while (i > newbrt) {
37624- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
37625- i--;
37626- }
37627- while (i < newbrt) {
37628- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37629- i++;
37630- }
37631+ while (i > newbrt) {
37632+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
37633+ i--;
37634+ }
37635+ while (i < newbrt) {
37636+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37637+ i++;
37638 }
37639+}
37640
37641+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37642+ struct tp_nvram_state *newn,
37643+ const u32 event_mask)
37644+{
37645 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
37646 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
37647 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
37648@@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37649 oldn->volume_level != newn->volume_level) {
37650 /* recently muted, or repeated mute keypress, or
37651 * multiple presses ending in mute */
37652- issue_volchange(oldn->volume_level, newn->volume_level);
37653+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
37654 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
37655 }
37656 } else {
37657@@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37658 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37659 }
37660 if (oldn->volume_level != newn->volume_level) {
37661- issue_volchange(oldn->volume_level, newn->volume_level);
37662+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
37663 } else if (oldn->volume_toggle != newn->volume_toggle) {
37664 /* repeated vol up/down keypress at end of scale ? */
37665 if (newn->volume_level == 0)
37666@@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37667 /* handle brightness */
37668 if (oldn->brightness_level != newn->brightness_level) {
37669 issue_brightnesschange(oldn->brightness_level,
37670- newn->brightness_level);
37671+ newn->brightness_level,
37672+ event_mask);
37673 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
37674 /* repeated key presses that didn't change state */
37675 if (newn->brightness_level == 0)
37676@@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37677 && !tp_features.bright_unkfw)
37678 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37679 }
37680+}
37681
37682 #undef TPACPI_COMPARE_KEY
37683 #undef TPACPI_MAY_SEND_KEY
37684-}
37685
37686 /*
37687 * Polling driver
37688diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
37689index 769d265..a3a05ca 100644
37690--- a/drivers/pnp/pnpbios/bioscalls.c
37691+++ b/drivers/pnp/pnpbios/bioscalls.c
37692@@ -58,7 +58,7 @@ do { \
37693 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
37694 } while(0)
37695
37696-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
37697+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
37698 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
37699
37700 /*
37701@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
37702
37703 cpu = get_cpu();
37704 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
37705+
37706+ pax_open_kernel();
37707 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
37708+ pax_close_kernel();
37709
37710 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
37711 spin_lock_irqsave(&pnp_bios_lock, flags);
37712@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
37713 :"memory");
37714 spin_unlock_irqrestore(&pnp_bios_lock, flags);
37715
37716+ pax_open_kernel();
37717 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
37718+ pax_close_kernel();
37719+
37720 put_cpu();
37721
37722 /* If we get here and this is set then the PnP BIOS faulted on us. */
37723@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
37724 return status;
37725 }
37726
37727-void pnpbios_calls_init(union pnp_bios_install_struct *header)
37728+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
37729 {
37730 int i;
37731
37732@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
37733 pnp_bios_callpoint.offset = header->fields.pm16offset;
37734 pnp_bios_callpoint.segment = PNP_CS16;
37735
37736+ pax_open_kernel();
37737+
37738 for_each_possible_cpu(i) {
37739 struct desc_struct *gdt = get_cpu_gdt_table(i);
37740 if (!gdt)
37741@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
37742 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
37743 (unsigned long)__va(header->fields.pm16dseg));
37744 }
37745+
37746+ pax_close_kernel();
37747 }
37748diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
37749index b0ecacb..7c9da2e 100644
37750--- a/drivers/pnp/resource.c
37751+++ b/drivers/pnp/resource.c
37752@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
37753 return 1;
37754
37755 /* check if the resource is valid */
37756- if (*irq < 0 || *irq > 15)
37757+ if (*irq > 15)
37758 return 0;
37759
37760 /* check if the resource is reserved */
37761@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
37762 return 1;
37763
37764 /* check if the resource is valid */
37765- if (*dma < 0 || *dma == 4 || *dma > 7)
37766+ if (*dma == 4 || *dma > 7)
37767 return 0;
37768
37769 /* check if the resource is reserved */
37770diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
37771index 7df7c5f..bd48c47 100644
37772--- a/drivers/power/pda_power.c
37773+++ b/drivers/power/pda_power.c
37774@@ -37,7 +37,11 @@ static int polling;
37775
37776 #ifdef CONFIG_USB_OTG_UTILS
37777 static struct usb_phy *transceiver;
37778-static struct notifier_block otg_nb;
37779+static int otg_handle_notification(struct notifier_block *nb,
37780+ unsigned long event, void *unused);
37781+static struct notifier_block otg_nb = {
37782+ .notifier_call = otg_handle_notification
37783+};
37784 #endif
37785
37786 static struct regulator *ac_draw;
37787@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
37788
37789 #ifdef CONFIG_USB_OTG_UTILS
37790 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
37791- otg_nb.notifier_call = otg_handle_notification;
37792 ret = usb_register_notifier(transceiver, &otg_nb);
37793 if (ret) {
37794 dev_err(dev, "failure to register otg notifier\n");
37795diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
37796index 8d53174..04c65de 100644
37797--- a/drivers/regulator/max8660.c
37798+++ b/drivers/regulator/max8660.c
37799@@ -333,8 +333,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
37800 max8660->shadow_regs[MAX8660_OVER1] = 5;
37801 } else {
37802 /* Otherwise devices can be toggled via software */
37803- max8660_dcdc_ops.enable = max8660_dcdc_enable;
37804- max8660_dcdc_ops.disable = max8660_dcdc_disable;
37805+ pax_open_kernel();
37806+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
37807+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
37808+ pax_close_kernel();
37809 }
37810
37811 /*
37812diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
37813index 1fa6381..f58834e 100644
37814--- a/drivers/regulator/mc13892-regulator.c
37815+++ b/drivers/regulator/mc13892-regulator.c
37816@@ -540,10 +540,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
37817 }
37818 mc13xxx_unlock(mc13892);
37819
37820- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37821+ pax_open_kernel();
37822+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37823 = mc13892_vcam_set_mode;
37824- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
37825+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
37826 = mc13892_vcam_get_mode;
37827+ pax_close_kernel();
37828
37829 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
37830 ARRAY_SIZE(mc13892_regulators));
37831diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
37832index cace6d3..f623fda 100644
37833--- a/drivers/rtc/rtc-dev.c
37834+++ b/drivers/rtc/rtc-dev.c
37835@@ -14,6 +14,7 @@
37836 #include <linux/module.h>
37837 #include <linux/rtc.h>
37838 #include <linux/sched.h>
37839+#include <linux/grsecurity.h>
37840 #include "rtc-core.h"
37841
37842 static dev_t rtc_devt;
37843@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
37844 if (copy_from_user(&tm, uarg, sizeof(tm)))
37845 return -EFAULT;
37846
37847+ gr_log_timechange();
37848+
37849 return rtc_set_time(rtc, &tm);
37850
37851 case RTC_PIE_ON:
37852diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
37853index 4ad7e36..d004679 100644
37854--- a/drivers/scsi/bfa/bfa.h
37855+++ b/drivers/scsi/bfa/bfa.h
37856@@ -196,7 +196,7 @@ struct bfa_hwif_s {
37857 u32 *end);
37858 int cpe_vec_q0;
37859 int rme_vec_q0;
37860-};
37861+} __no_const;
37862 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
37863
37864 struct bfa_faa_cbfn_s {
37865diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
37866index e693af6..2e525b6 100644
37867--- a/drivers/scsi/bfa/bfa_fcpim.h
37868+++ b/drivers/scsi/bfa/bfa_fcpim.h
37869@@ -36,7 +36,7 @@ struct bfa_iotag_s {
37870
37871 struct bfa_itn_s {
37872 bfa_isr_func_t isr;
37873-};
37874+} __no_const;
37875
37876 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
37877 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
37878diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
37879index 23a90e7..9cf04ee 100644
37880--- a/drivers/scsi/bfa/bfa_ioc.h
37881+++ b/drivers/scsi/bfa/bfa_ioc.h
37882@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
37883 bfa_ioc_disable_cbfn_t disable_cbfn;
37884 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
37885 bfa_ioc_reset_cbfn_t reset_cbfn;
37886-};
37887+} __no_const;
37888
37889 /*
37890 * IOC event notification mechanism.
37891@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
37892 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
37893 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
37894 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
37895-};
37896+} __no_const;
37897
37898 /*
37899 * Queue element to wait for room in request queue. FIFO order is
37900diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
37901index 593085a..47aa999 100644
37902--- a/drivers/scsi/hosts.c
37903+++ b/drivers/scsi/hosts.c
37904@@ -42,7 +42,7 @@
37905 #include "scsi_logging.h"
37906
37907
37908-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
37909+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
37910
37911
37912 static void scsi_host_cls_release(struct device *dev)
37913@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
37914 * subtract one because we increment first then return, but we need to
37915 * know what the next host number was before increment
37916 */
37917- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
37918+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
37919 shost->dma_channel = 0xff;
37920
37921 /* These three are default values which can be overridden */
37922diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
37923index 4217e49..9c77e3e 100644
37924--- a/drivers/scsi/hpsa.c
37925+++ b/drivers/scsi/hpsa.c
37926@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
37927 unsigned long flags;
37928
37929 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37930- return h->access.command_completed(h, q);
37931+ return h->access->command_completed(h, q);
37932
37933 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
37934 a = rq->head[rq->current_entry];
37935@@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
37936 while (!list_empty(&h->reqQ)) {
37937 c = list_entry(h->reqQ.next, struct CommandList, list);
37938 /* can't do anything if fifo is full */
37939- if ((h->access.fifo_full(h))) {
37940+ if ((h->access->fifo_full(h))) {
37941 dev_warn(&h->pdev->dev, "fifo full\n");
37942 break;
37943 }
37944@@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
37945
37946 /* Tell the controller execute command */
37947 spin_unlock_irqrestore(&h->lock, flags);
37948- h->access.submit_command(h, c);
37949+ h->access->submit_command(h, c);
37950 spin_lock_irqsave(&h->lock, flags);
37951 }
37952 spin_unlock_irqrestore(&h->lock, flags);
37953@@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
37954
37955 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
37956 {
37957- return h->access.command_completed(h, q);
37958+ return h->access->command_completed(h, q);
37959 }
37960
37961 static inline bool interrupt_pending(struct ctlr_info *h)
37962 {
37963- return h->access.intr_pending(h);
37964+ return h->access->intr_pending(h);
37965 }
37966
37967 static inline long interrupt_not_for_us(struct ctlr_info *h)
37968 {
37969- return (h->access.intr_pending(h) == 0) ||
37970+ return (h->access->intr_pending(h) == 0) ||
37971 (h->interrupts_enabled == 0);
37972 }
37973
37974@@ -4318,7 +4318,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
37975 if (prod_index < 0)
37976 return -ENODEV;
37977 h->product_name = products[prod_index].product_name;
37978- h->access = *(products[prod_index].access);
37979+ h->access = products[prod_index].access;
37980
37981 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
37982 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
37983@@ -4600,7 +4600,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
37984
37985 assert_spin_locked(&lockup_detector_lock);
37986 remove_ctlr_from_lockup_detector_list(h);
37987- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37988+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
37989 spin_lock_irqsave(&h->lock, flags);
37990 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
37991 spin_unlock_irqrestore(&h->lock, flags);
37992@@ -4778,7 +4778,7 @@ reinit_after_soft_reset:
37993 }
37994
37995 /* make sure the board interrupts are off */
37996- h->access.set_intr_mask(h, HPSA_INTR_OFF);
37997+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
37998
37999 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
38000 goto clean2;
38001@@ -4812,7 +4812,7 @@ reinit_after_soft_reset:
38002 * fake ones to scoop up any residual completions.
38003 */
38004 spin_lock_irqsave(&h->lock, flags);
38005- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38006+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38007 spin_unlock_irqrestore(&h->lock, flags);
38008 free_irqs(h);
38009 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
38010@@ -4831,9 +4831,9 @@ reinit_after_soft_reset:
38011 dev_info(&h->pdev->dev, "Board READY.\n");
38012 dev_info(&h->pdev->dev,
38013 "Waiting for stale completions to drain.\n");
38014- h->access.set_intr_mask(h, HPSA_INTR_ON);
38015+ h->access->set_intr_mask(h, HPSA_INTR_ON);
38016 msleep(10000);
38017- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38018+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38019
38020 rc = controller_reset_failed(h->cfgtable);
38021 if (rc)
38022@@ -4854,7 +4854,7 @@ reinit_after_soft_reset:
38023 }
38024
38025 /* Turn the interrupts on so we can service requests */
38026- h->access.set_intr_mask(h, HPSA_INTR_ON);
38027+ h->access->set_intr_mask(h, HPSA_INTR_ON);
38028
38029 hpsa_hba_inquiry(h);
38030 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
38031@@ -4906,7 +4906,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
38032 * To write all data in the battery backed cache to disks
38033 */
38034 hpsa_flush_cache(h);
38035- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38036+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38037 hpsa_free_irqs_and_disable_msix(h);
38038 }
38039
38040@@ -5075,7 +5075,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
38041 return;
38042 }
38043 /* Change the access methods to the performant access methods */
38044- h->access = SA5_performant_access;
38045+ h->access = &SA5_performant_access;
38046 h->transMethod = CFGTBL_Trans_Performant;
38047 }
38048
38049diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
38050index 9816479..c5d4e97 100644
38051--- a/drivers/scsi/hpsa.h
38052+++ b/drivers/scsi/hpsa.h
38053@@ -79,7 +79,7 @@ struct ctlr_info {
38054 unsigned int msix_vector;
38055 unsigned int msi_vector;
38056 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
38057- struct access_method access;
38058+ struct access_method *access;
38059
38060 /* queue and queue Info */
38061 struct list_head reqQ;
38062diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
38063index c772d8d..35c362c 100644
38064--- a/drivers/scsi/libfc/fc_exch.c
38065+++ b/drivers/scsi/libfc/fc_exch.c
38066@@ -100,12 +100,12 @@ struct fc_exch_mgr {
38067 u16 pool_max_index;
38068
38069 struct {
38070- atomic_t no_free_exch;
38071- atomic_t no_free_exch_xid;
38072- atomic_t xid_not_found;
38073- atomic_t xid_busy;
38074- atomic_t seq_not_found;
38075- atomic_t non_bls_resp;
38076+ atomic_unchecked_t no_free_exch;
38077+ atomic_unchecked_t no_free_exch_xid;
38078+ atomic_unchecked_t xid_not_found;
38079+ atomic_unchecked_t xid_busy;
38080+ atomic_unchecked_t seq_not_found;
38081+ atomic_unchecked_t non_bls_resp;
38082 } stats;
38083 };
38084
38085@@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
38086 /* allocate memory for exchange */
38087 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
38088 if (!ep) {
38089- atomic_inc(&mp->stats.no_free_exch);
38090+ atomic_inc_unchecked(&mp->stats.no_free_exch);
38091 goto out;
38092 }
38093 memset(ep, 0, sizeof(*ep));
38094@@ -786,7 +786,7 @@ out:
38095 return ep;
38096 err:
38097 spin_unlock_bh(&pool->lock);
38098- atomic_inc(&mp->stats.no_free_exch_xid);
38099+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
38100 mempool_free(ep, mp->ep_pool);
38101 return NULL;
38102 }
38103@@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38104 xid = ntohs(fh->fh_ox_id); /* we originated exch */
38105 ep = fc_exch_find(mp, xid);
38106 if (!ep) {
38107- atomic_inc(&mp->stats.xid_not_found);
38108+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38109 reject = FC_RJT_OX_ID;
38110 goto out;
38111 }
38112@@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38113 ep = fc_exch_find(mp, xid);
38114 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
38115 if (ep) {
38116- atomic_inc(&mp->stats.xid_busy);
38117+ atomic_inc_unchecked(&mp->stats.xid_busy);
38118 reject = FC_RJT_RX_ID;
38119 goto rel;
38120 }
38121@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38122 }
38123 xid = ep->xid; /* get our XID */
38124 } else if (!ep) {
38125- atomic_inc(&mp->stats.xid_not_found);
38126+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38127 reject = FC_RJT_RX_ID; /* XID not found */
38128 goto out;
38129 }
38130@@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38131 } else {
38132 sp = &ep->seq;
38133 if (sp->id != fh->fh_seq_id) {
38134- atomic_inc(&mp->stats.seq_not_found);
38135+ atomic_inc_unchecked(&mp->stats.seq_not_found);
38136 if (f_ctl & FC_FC_END_SEQ) {
38137 /*
38138 * Update sequence_id based on incoming last
38139@@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
38140
38141 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
38142 if (!ep) {
38143- atomic_inc(&mp->stats.xid_not_found);
38144+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38145 goto out;
38146 }
38147 if (ep->esb_stat & ESB_ST_COMPLETE) {
38148- atomic_inc(&mp->stats.xid_not_found);
38149+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38150 goto rel;
38151 }
38152 if (ep->rxid == FC_XID_UNKNOWN)
38153 ep->rxid = ntohs(fh->fh_rx_id);
38154 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
38155- atomic_inc(&mp->stats.xid_not_found);
38156+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38157 goto rel;
38158 }
38159 if (ep->did != ntoh24(fh->fh_s_id) &&
38160 ep->did != FC_FID_FLOGI) {
38161- atomic_inc(&mp->stats.xid_not_found);
38162+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38163 goto rel;
38164 }
38165 sof = fr_sof(fp);
38166@@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
38167 sp->ssb_stat |= SSB_ST_RESP;
38168 sp->id = fh->fh_seq_id;
38169 } else if (sp->id != fh->fh_seq_id) {
38170- atomic_inc(&mp->stats.seq_not_found);
38171+ atomic_inc_unchecked(&mp->stats.seq_not_found);
38172 goto rel;
38173 }
38174
38175@@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
38176 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
38177
38178 if (!sp)
38179- atomic_inc(&mp->stats.xid_not_found);
38180+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38181 else
38182- atomic_inc(&mp->stats.non_bls_resp);
38183+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
38184
38185 fc_frame_free(fp);
38186 }
38187@@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
38188
38189 list_for_each_entry(ema, &lport->ema_list, ema_list) {
38190 mp = ema->mp;
38191- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
38192+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
38193 st->fc_no_free_exch_xid +=
38194- atomic_read(&mp->stats.no_free_exch_xid);
38195- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
38196- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
38197- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
38198- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
38199+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
38200+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
38201+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
38202+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
38203+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
38204 }
38205 }
38206 EXPORT_SYMBOL(fc_exch_update_stats);
38207diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
38208index bdb81cd..d3c7c2c 100644
38209--- a/drivers/scsi/libsas/sas_ata.c
38210+++ b/drivers/scsi/libsas/sas_ata.c
38211@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
38212 .postreset = ata_std_postreset,
38213 .error_handler = ata_std_error_handler,
38214 .post_internal_cmd = sas_ata_post_internal,
38215- .qc_defer = ata_std_qc_defer,
38216+ .qc_defer = ata_std_qc_defer,
38217 .qc_prep = ata_noop_qc_prep,
38218 .qc_issue = sas_ata_qc_issue,
38219 .qc_fill_rtf = sas_ata_qc_fill_rtf,
38220diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
38221index 69b5993..1ac9dce 100644
38222--- a/drivers/scsi/lpfc/lpfc.h
38223+++ b/drivers/scsi/lpfc/lpfc.h
38224@@ -424,7 +424,7 @@ struct lpfc_vport {
38225 struct dentry *debug_nodelist;
38226 struct dentry *vport_debugfs_root;
38227 struct lpfc_debugfs_trc *disc_trc;
38228- atomic_t disc_trc_cnt;
38229+ atomic_unchecked_t disc_trc_cnt;
38230 #endif
38231 uint8_t stat_data_enabled;
38232 uint8_t stat_data_blocked;
38233@@ -840,8 +840,8 @@ struct lpfc_hba {
38234 struct timer_list fabric_block_timer;
38235 unsigned long bit_flags;
38236 #define FABRIC_COMANDS_BLOCKED 0
38237- atomic_t num_rsrc_err;
38238- atomic_t num_cmd_success;
38239+ atomic_unchecked_t num_rsrc_err;
38240+ atomic_unchecked_t num_cmd_success;
38241 unsigned long last_rsrc_error_time;
38242 unsigned long last_ramp_down_time;
38243 unsigned long last_ramp_up_time;
38244@@ -877,7 +877,7 @@ struct lpfc_hba {
38245
38246 struct dentry *debug_slow_ring_trc;
38247 struct lpfc_debugfs_trc *slow_ring_trc;
38248- atomic_t slow_ring_trc_cnt;
38249+ atomic_unchecked_t slow_ring_trc_cnt;
38250 /* iDiag debugfs sub-directory */
38251 struct dentry *idiag_root;
38252 struct dentry *idiag_pci_cfg;
38253diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
38254index f63f5ff..de29189 100644
38255--- a/drivers/scsi/lpfc/lpfc_debugfs.c
38256+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
38257@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
38258
38259 #include <linux/debugfs.h>
38260
38261-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
38262+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
38263 static unsigned long lpfc_debugfs_start_time = 0L;
38264
38265 /* iDiag */
38266@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
38267 lpfc_debugfs_enable = 0;
38268
38269 len = 0;
38270- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
38271+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
38272 (lpfc_debugfs_max_disc_trc - 1);
38273 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
38274 dtp = vport->disc_trc + i;
38275@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
38276 lpfc_debugfs_enable = 0;
38277
38278 len = 0;
38279- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
38280+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
38281 (lpfc_debugfs_max_slow_ring_trc - 1);
38282 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
38283 dtp = phba->slow_ring_trc + i;
38284@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
38285 !vport || !vport->disc_trc)
38286 return;
38287
38288- index = atomic_inc_return(&vport->disc_trc_cnt) &
38289+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
38290 (lpfc_debugfs_max_disc_trc - 1);
38291 dtp = vport->disc_trc + index;
38292 dtp->fmt = fmt;
38293 dtp->data1 = data1;
38294 dtp->data2 = data2;
38295 dtp->data3 = data3;
38296- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
38297+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
38298 dtp->jif = jiffies;
38299 #endif
38300 return;
38301@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
38302 !phba || !phba->slow_ring_trc)
38303 return;
38304
38305- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
38306+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
38307 (lpfc_debugfs_max_slow_ring_trc - 1);
38308 dtp = phba->slow_ring_trc + index;
38309 dtp->fmt = fmt;
38310 dtp->data1 = data1;
38311 dtp->data2 = data2;
38312 dtp->data3 = data3;
38313- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
38314+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
38315 dtp->jif = jiffies;
38316 #endif
38317 return;
38318@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
38319 "slow_ring buffer\n");
38320 goto debug_failed;
38321 }
38322- atomic_set(&phba->slow_ring_trc_cnt, 0);
38323+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
38324 memset(phba->slow_ring_trc, 0,
38325 (sizeof(struct lpfc_debugfs_trc) *
38326 lpfc_debugfs_max_slow_ring_trc));
38327@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
38328 "buffer\n");
38329 goto debug_failed;
38330 }
38331- atomic_set(&vport->disc_trc_cnt, 0);
38332+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
38333
38334 snprintf(name, sizeof(name), "discovery_trace");
38335 vport->debug_disc_trc =
38336diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
38337index 7dc4218..3436f08 100644
38338--- a/drivers/scsi/lpfc/lpfc_init.c
38339+++ b/drivers/scsi/lpfc/lpfc_init.c
38340@@ -10589,8 +10589,10 @@ lpfc_init(void)
38341 "misc_register returned with status %d", error);
38342
38343 if (lpfc_enable_npiv) {
38344- lpfc_transport_functions.vport_create = lpfc_vport_create;
38345- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
38346+ pax_open_kernel();
38347+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
38348+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
38349+ pax_close_kernel();
38350 }
38351 lpfc_transport_template =
38352 fc_attach_transport(&lpfc_transport_functions);
38353diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
38354index 7f45ac9..cf62eda 100644
38355--- a/drivers/scsi/lpfc/lpfc_scsi.c
38356+++ b/drivers/scsi/lpfc/lpfc_scsi.c
38357@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
38358 uint32_t evt_posted;
38359
38360 spin_lock_irqsave(&phba->hbalock, flags);
38361- atomic_inc(&phba->num_rsrc_err);
38362+ atomic_inc_unchecked(&phba->num_rsrc_err);
38363 phba->last_rsrc_error_time = jiffies;
38364
38365 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
38366@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
38367 unsigned long flags;
38368 struct lpfc_hba *phba = vport->phba;
38369 uint32_t evt_posted;
38370- atomic_inc(&phba->num_cmd_success);
38371+ atomic_inc_unchecked(&phba->num_cmd_success);
38372
38373 if (vport->cfg_lun_queue_depth <= queue_depth)
38374 return;
38375@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
38376 unsigned long num_rsrc_err, num_cmd_success;
38377 int i;
38378
38379- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
38380- num_cmd_success = atomic_read(&phba->num_cmd_success);
38381+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
38382+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
38383
38384 /*
38385 * The error and success command counters are global per
38386@@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
38387 }
38388 }
38389 lpfc_destroy_vport_work_array(phba, vports);
38390- atomic_set(&phba->num_rsrc_err, 0);
38391- atomic_set(&phba->num_cmd_success, 0);
38392+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
38393+ atomic_set_unchecked(&phba->num_cmd_success, 0);
38394 }
38395
38396 /**
38397@@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
38398 }
38399 }
38400 lpfc_destroy_vport_work_array(phba, vports);
38401- atomic_set(&phba->num_rsrc_err, 0);
38402- atomic_set(&phba->num_cmd_success, 0);
38403+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
38404+ atomic_set_unchecked(&phba->num_cmd_success, 0);
38405 }
38406
38407 /**
38408diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
38409index af763ea..41904f7 100644
38410--- a/drivers/scsi/pmcraid.c
38411+++ b/drivers/scsi/pmcraid.c
38412@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
38413 res->scsi_dev = scsi_dev;
38414 scsi_dev->hostdata = res;
38415 res->change_detected = 0;
38416- atomic_set(&res->read_failures, 0);
38417- atomic_set(&res->write_failures, 0);
38418+ atomic_set_unchecked(&res->read_failures, 0);
38419+ atomic_set_unchecked(&res->write_failures, 0);
38420 rc = 0;
38421 }
38422 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
38423@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
38424
38425 /* If this was a SCSI read/write command keep count of errors */
38426 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
38427- atomic_inc(&res->read_failures);
38428+ atomic_inc_unchecked(&res->read_failures);
38429 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
38430- atomic_inc(&res->write_failures);
38431+ atomic_inc_unchecked(&res->write_failures);
38432
38433 if (!RES_IS_GSCSI(res->cfg_entry) &&
38434 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
38435@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
38436 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
38437 * hrrq_id assigned here in queuecommand
38438 */
38439- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
38440+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
38441 pinstance->num_hrrq;
38442 cmd->cmd_done = pmcraid_io_done;
38443
38444@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
38445 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
38446 * hrrq_id assigned here in queuecommand
38447 */
38448- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
38449+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
38450 pinstance->num_hrrq;
38451
38452 if (request_size) {
38453@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
38454
38455 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
38456 /* add resources only after host is added into system */
38457- if (!atomic_read(&pinstance->expose_resources))
38458+ if (!atomic_read_unchecked(&pinstance->expose_resources))
38459 return;
38460
38461 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
38462@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
38463 init_waitqueue_head(&pinstance->reset_wait_q);
38464
38465 atomic_set(&pinstance->outstanding_cmds, 0);
38466- atomic_set(&pinstance->last_message_id, 0);
38467- atomic_set(&pinstance->expose_resources, 0);
38468+ atomic_set_unchecked(&pinstance->last_message_id, 0);
38469+ atomic_set_unchecked(&pinstance->expose_resources, 0);
38470
38471 INIT_LIST_HEAD(&pinstance->free_res_q);
38472 INIT_LIST_HEAD(&pinstance->used_res_q);
38473@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
38474 /* Schedule worker thread to handle CCN and take care of adding and
38475 * removing devices to OS
38476 */
38477- atomic_set(&pinstance->expose_resources, 1);
38478+ atomic_set_unchecked(&pinstance->expose_resources, 1);
38479 schedule_work(&pinstance->worker_q);
38480 return rc;
38481
38482diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
38483index e1d150f..6c6df44 100644
38484--- a/drivers/scsi/pmcraid.h
38485+++ b/drivers/scsi/pmcraid.h
38486@@ -748,7 +748,7 @@ struct pmcraid_instance {
38487 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
38488
38489 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
38490- atomic_t last_message_id;
38491+ atomic_unchecked_t last_message_id;
38492
38493 /* configuration table */
38494 struct pmcraid_config_table *cfg_table;
38495@@ -777,7 +777,7 @@ struct pmcraid_instance {
38496 atomic_t outstanding_cmds;
38497
38498 /* should add/delete resources to mid-layer now ?*/
38499- atomic_t expose_resources;
38500+ atomic_unchecked_t expose_resources;
38501
38502
38503
38504@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
38505 struct pmcraid_config_table_entry_ext cfg_entry_ext;
38506 };
38507 struct scsi_device *scsi_dev; /* Link scsi_device structure */
38508- atomic_t read_failures; /* count of failed READ commands */
38509- atomic_t write_failures; /* count of failed WRITE commands */
38510+ atomic_unchecked_t read_failures; /* count of failed READ commands */
38511+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
38512
38513 /* To indicate add/delete/modify during CCN */
38514 u8 change_detected;
38515diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
38516index 83d7984..a27d947 100644
38517--- a/drivers/scsi/qla2xxx/qla_attr.c
38518+++ b/drivers/scsi/qla2xxx/qla_attr.c
38519@@ -1969,7 +1969,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
38520 return 0;
38521 }
38522
38523-struct fc_function_template qla2xxx_transport_functions = {
38524+fc_function_template_no_const qla2xxx_transport_functions = {
38525
38526 .show_host_node_name = 1,
38527 .show_host_port_name = 1,
38528@@ -2016,7 +2016,7 @@ struct fc_function_template qla2xxx_transport_functions = {
38529 .bsg_timeout = qla24xx_bsg_timeout,
38530 };
38531
38532-struct fc_function_template qla2xxx_transport_vport_functions = {
38533+fc_function_template_no_const qla2xxx_transport_vport_functions = {
38534
38535 .show_host_node_name = 1,
38536 .show_host_port_name = 1,
38537diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
38538index 6acb397..d86e3e0 100644
38539--- a/drivers/scsi/qla2xxx/qla_gbl.h
38540+++ b/drivers/scsi/qla2xxx/qla_gbl.h
38541@@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
38542 struct device_attribute;
38543 extern struct device_attribute *qla2x00_host_attrs[];
38544 struct fc_function_template;
38545-extern struct fc_function_template qla2xxx_transport_functions;
38546-extern struct fc_function_template qla2xxx_transport_vport_functions;
38547+extern fc_function_template_no_const qla2xxx_transport_functions;
38548+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
38549 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
38550 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
38551 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
38552diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
38553index f4b1fc8..a1ce4dd 100644
38554--- a/drivers/scsi/qla2xxx/qla_os.c
38555+++ b/drivers/scsi/qla2xxx/qla_os.c
38556@@ -1462,8 +1462,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
38557 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
38558 /* Ok, a 64bit DMA mask is applicable. */
38559 ha->flags.enable_64bit_addressing = 1;
38560- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
38561- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
38562+ pax_open_kernel();
38563+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
38564+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
38565+ pax_close_kernel();
38566 return;
38567 }
38568 }
38569diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
38570index 329d553..f20d31d 100644
38571--- a/drivers/scsi/qla4xxx/ql4_def.h
38572+++ b/drivers/scsi/qla4xxx/ql4_def.h
38573@@ -273,7 +273,7 @@ struct ddb_entry {
38574 * (4000 only) */
38575 atomic_t relogin_timer; /* Max Time to wait for
38576 * relogin to complete */
38577- atomic_t relogin_retry_count; /* Num of times relogin has been
38578+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
38579 * retried */
38580 uint32_t default_time2wait; /* Default Min time between
38581 * relogins (+aens) */
38582diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
38583index fbc546e..c7d1b48 100644
38584--- a/drivers/scsi/qla4xxx/ql4_os.c
38585+++ b/drivers/scsi/qla4xxx/ql4_os.c
38586@@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
38587 */
38588 if (!iscsi_is_session_online(cls_sess)) {
38589 /* Reset retry relogin timer */
38590- atomic_inc(&ddb_entry->relogin_retry_count);
38591+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
38592 DEBUG2(ql4_printk(KERN_INFO, ha,
38593 "%s: index[%d] relogin timed out-retrying"
38594 " relogin (%d), retry (%d)\n", __func__,
38595 ddb_entry->fw_ddb_index,
38596- atomic_read(&ddb_entry->relogin_retry_count),
38597+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
38598 ddb_entry->default_time2wait + 4));
38599 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
38600 atomic_set(&ddb_entry->retry_relogin_timer,
38601@@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
38602
38603 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
38604 atomic_set(&ddb_entry->relogin_timer, 0);
38605- atomic_set(&ddb_entry->relogin_retry_count, 0);
38606+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
38607 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
38608 ddb_entry->default_relogin_timeout =
38609 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
38610diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
38611index 2c0d0ec..4e8681a 100644
38612--- a/drivers/scsi/scsi.c
38613+++ b/drivers/scsi/scsi.c
38614@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
38615 unsigned long timeout;
38616 int rtn = 0;
38617
38618- atomic_inc(&cmd->device->iorequest_cnt);
38619+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
38620
38621 /* check if the device is still usable */
38622 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
38623diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
38624index 9032e91..7a805d0 100644
38625--- a/drivers/scsi/scsi_lib.c
38626+++ b/drivers/scsi/scsi_lib.c
38627@@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
38628 shost = sdev->host;
38629 scsi_init_cmd_errh(cmd);
38630 cmd->result = DID_NO_CONNECT << 16;
38631- atomic_inc(&cmd->device->iorequest_cnt);
38632+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
38633
38634 /*
38635 * SCSI request completion path will do scsi_device_unbusy(),
38636@@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
38637
38638 INIT_LIST_HEAD(&cmd->eh_entry);
38639
38640- atomic_inc(&cmd->device->iodone_cnt);
38641+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
38642 if (cmd->result)
38643- atomic_inc(&cmd->device->ioerr_cnt);
38644+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
38645
38646 disposition = scsi_decide_disposition(cmd);
38647 if (disposition != SUCCESS &&
38648diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
38649index 931a7d9..0c2a754 100644
38650--- a/drivers/scsi/scsi_sysfs.c
38651+++ b/drivers/scsi/scsi_sysfs.c
38652@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
38653 char *buf) \
38654 { \
38655 struct scsi_device *sdev = to_scsi_device(dev); \
38656- unsigned long long count = atomic_read(&sdev->field); \
38657+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
38658 return snprintf(buf, 20, "0x%llx\n", count); \
38659 } \
38660 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
38661diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
38662index 84a1fdf..693b0d6 100644
38663--- a/drivers/scsi/scsi_tgt_lib.c
38664+++ b/drivers/scsi/scsi_tgt_lib.c
38665@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
38666 int err;
38667
38668 dprintk("%lx %u\n", uaddr, len);
38669- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
38670+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
38671 if (err) {
38672 /*
38673 * TODO: need to fixup sg_tablesize, max_segment_size,
38674diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
38675index e894ca7..de9d7660 100644
38676--- a/drivers/scsi/scsi_transport_fc.c
38677+++ b/drivers/scsi/scsi_transport_fc.c
38678@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
38679 * Netlink Infrastructure
38680 */
38681
38682-static atomic_t fc_event_seq;
38683+static atomic_unchecked_t fc_event_seq;
38684
38685 /**
38686 * fc_get_event_number - Obtain the next sequential FC event number
38687@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
38688 u32
38689 fc_get_event_number(void)
38690 {
38691- return atomic_add_return(1, &fc_event_seq);
38692+ return atomic_add_return_unchecked(1, &fc_event_seq);
38693 }
38694 EXPORT_SYMBOL(fc_get_event_number);
38695
38696@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
38697 {
38698 int error;
38699
38700- atomic_set(&fc_event_seq, 0);
38701+ atomic_set_unchecked(&fc_event_seq, 0);
38702
38703 error = transport_class_register(&fc_host_class);
38704 if (error)
38705@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
38706 char *cp;
38707
38708 *val = simple_strtoul(buf, &cp, 0);
38709- if ((*cp && (*cp != '\n')) || (*val < 0))
38710+ if (*cp && (*cp != '\n'))
38711 return -EINVAL;
38712 /*
38713 * Check for overflow; dev_loss_tmo is u32
38714diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
38715index 31969f2..2b348f0 100644
38716--- a/drivers/scsi/scsi_transport_iscsi.c
38717+++ b/drivers/scsi/scsi_transport_iscsi.c
38718@@ -79,7 +79,7 @@ struct iscsi_internal {
38719 struct transport_container session_cont;
38720 };
38721
38722-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
38723+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
38724 static struct workqueue_struct *iscsi_eh_timer_workq;
38725
38726 static DEFINE_IDA(iscsi_sess_ida);
38727@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
38728 int err;
38729
38730 ihost = shost->shost_data;
38731- session->sid = atomic_add_return(1, &iscsi_session_nr);
38732+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
38733
38734 if (target_id == ISCSI_MAX_TARGET) {
38735 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
38736@@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
38737 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
38738 ISCSI_TRANSPORT_VERSION);
38739
38740- atomic_set(&iscsi_session_nr, 0);
38741+ atomic_set_unchecked(&iscsi_session_nr, 0);
38742
38743 err = class_register(&iscsi_transport_class);
38744 if (err)
38745diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
38746index 21a045e..ec89e03 100644
38747--- a/drivers/scsi/scsi_transport_srp.c
38748+++ b/drivers/scsi/scsi_transport_srp.c
38749@@ -33,7 +33,7 @@
38750 #include "scsi_transport_srp_internal.h"
38751
38752 struct srp_host_attrs {
38753- atomic_t next_port_id;
38754+ atomic_unchecked_t next_port_id;
38755 };
38756 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
38757
38758@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
38759 struct Scsi_Host *shost = dev_to_shost(dev);
38760 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
38761
38762- atomic_set(&srp_host->next_port_id, 0);
38763+ atomic_set_unchecked(&srp_host->next_port_id, 0);
38764 return 0;
38765 }
38766
38767@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
38768 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
38769 rport->roles = ids->roles;
38770
38771- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
38772+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
38773 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
38774
38775 transport_setup_device(&rport->dev);
38776diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
38777index 352bc77..c049b14 100644
38778--- a/drivers/scsi/sd.c
38779+++ b/drivers/scsi/sd.c
38780@@ -2899,7 +2899,7 @@ static int sd_probe(struct device *dev)
38781 sdkp->disk = gd;
38782 sdkp->index = index;
38783 atomic_set(&sdkp->openers, 0);
38784- atomic_set(&sdkp->device->ioerr_cnt, 0);
38785+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
38786
38787 if (!sdp->request_queue->rq_timeout) {
38788 if (sdp->type != TYPE_MOD)
38789diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
38790index be2c9a6..275525c 100644
38791--- a/drivers/scsi/sg.c
38792+++ b/drivers/scsi/sg.c
38793@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
38794 sdp->disk->disk_name,
38795 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
38796 NULL,
38797- (char *)arg);
38798+ (char __user *)arg);
38799 case BLKTRACESTART:
38800 return blk_trace_startstop(sdp->device->request_queue, 1);
38801 case BLKTRACESTOP:
38802diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
38803index 84c2861..ece0a31 100644
38804--- a/drivers/spi/spi.c
38805+++ b/drivers/spi/spi.c
38806@@ -1453,7 +1453,7 @@ int spi_bus_unlock(struct spi_master *master)
38807 EXPORT_SYMBOL_GPL(spi_bus_unlock);
38808
38809 /* portable code must never pass more than 32 bytes */
38810-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
38811+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
38812
38813 static u8 *buf;
38814
38815diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
38816index 34afc16..ffe44dd 100644
38817--- a/drivers/staging/octeon/ethernet-rx.c
38818+++ b/drivers/staging/octeon/ethernet-rx.c
38819@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
38820 /* Increment RX stats for virtual ports */
38821 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
38822 #ifdef CONFIG_64BIT
38823- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
38824- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
38825+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
38826+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
38827 #else
38828- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
38829- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
38830+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
38831+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
38832 #endif
38833 }
38834 netif_receive_skb(skb);
38835@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
38836 dev->name);
38837 */
38838 #ifdef CONFIG_64BIT
38839- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
38840+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38841 #else
38842- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
38843+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
38844 #endif
38845 dev_kfree_skb_irq(skb);
38846 }
38847diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
38848index 683bedc..86dba9a 100644
38849--- a/drivers/staging/octeon/ethernet.c
38850+++ b/drivers/staging/octeon/ethernet.c
38851@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
38852 * since the RX tasklet also increments it.
38853 */
38854 #ifdef CONFIG_64BIT
38855- atomic64_add(rx_status.dropped_packets,
38856- (atomic64_t *)&priv->stats.rx_dropped);
38857+ atomic64_add_unchecked(rx_status.dropped_packets,
38858+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38859 #else
38860- atomic_add(rx_status.dropped_packets,
38861- (atomic_t *)&priv->stats.rx_dropped);
38862+ atomic_add_unchecked(rx_status.dropped_packets,
38863+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
38864 #endif
38865 }
38866
38867diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
38868index a2b7e03..aaf3630 100644
38869--- a/drivers/staging/ramster/tmem.c
38870+++ b/drivers/staging/ramster/tmem.c
38871@@ -50,25 +50,25 @@
38872 * A tmem host implementation must use this function to register callbacks
38873 * for memory allocation.
38874 */
38875-static struct tmem_hostops tmem_hostops;
38876+static struct tmem_hostops *tmem_hostops;
38877
38878 static void tmem_objnode_tree_init(void);
38879
38880 void tmem_register_hostops(struct tmem_hostops *m)
38881 {
38882 tmem_objnode_tree_init();
38883- tmem_hostops = *m;
38884+ tmem_hostops = m;
38885 }
38886
38887 /*
38888 * A tmem host implementation must use this function to register
38889 * callbacks for a page-accessible memory (PAM) implementation.
38890 */
38891-static struct tmem_pamops tmem_pamops;
38892+static struct tmem_pamops *tmem_pamops;
38893
38894 void tmem_register_pamops(struct tmem_pamops *m)
38895 {
38896- tmem_pamops = *m;
38897+ tmem_pamops = m;
38898 }
38899
38900 /*
38901@@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
38902 obj->pampd_count = 0;
38903 #ifdef CONFIG_RAMSTER
38904 if (tmem_pamops.new_obj != NULL)
38905- (*tmem_pamops.new_obj)(obj);
38906+ (tmem_pamops->new_obj)(obj);
38907 #endif
38908 SET_SENTINEL(obj, OBJ);
38909
38910@@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
38911 rbnode = rb_next(rbnode);
38912 tmem_pampd_destroy_all_in_obj(obj, true);
38913 tmem_obj_free(obj, hb);
38914- (*tmem_hostops.obj_free)(obj, pool);
38915+ (tmem_hostops->obj_free)(obj, pool);
38916 }
38917 spin_unlock(&hb->lock);
38918 }
38919@@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
38920 ASSERT_SENTINEL(obj, OBJ);
38921 BUG_ON(obj->pool == NULL);
38922 ASSERT_SENTINEL(obj->pool, POOL);
38923- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
38924+ objnode = (tmem_hostops->objnode_alloc)(obj->pool);
38925 if (unlikely(objnode == NULL))
38926 goto out;
38927 objnode->obj = obj;
38928@@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
38929 ASSERT_SENTINEL(pool, POOL);
38930 objnode->obj->objnode_count--;
38931 objnode->obj = NULL;
38932- (*tmem_hostops.objnode_free)(objnode, pool);
38933+ (tmem_hostops->objnode_free)(objnode, pool);
38934 }
38935
38936 /*
38937@@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
38938 void *old_pampd = *(void **)slot;
38939 *(void **)slot = new_pampd;
38940 if (!no_free)
38941- (*tmem_pamops.free)(old_pampd, obj->pool,
38942+ (tmem_pamops->free)(old_pampd, obj->pool,
38943 NULL, 0, false);
38944 ret = new_pampd;
38945 }
38946@@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
38947 if (objnode->slots[i]) {
38948 if (ht == 1) {
38949 obj->pampd_count--;
38950- (*tmem_pamops.free)(objnode->slots[i],
38951+ (tmem_pamops->free)(objnode->slots[i],
38952 obj->pool, NULL, 0, true);
38953 objnode->slots[i] = NULL;
38954 continue;
38955@@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
38956 return;
38957 if (obj->objnode_tree_height == 0) {
38958 obj->pampd_count--;
38959- (*tmem_pamops.free)(obj->objnode_tree_root,
38960+ (tmem_pamops->free)(obj->objnode_tree_root,
38961 obj->pool, NULL, 0, true);
38962 } else {
38963 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
38964@@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
38965 obj->objnode_tree_root = NULL;
38966 #ifdef CONFIG_RAMSTER
38967 if (tmem_pamops.free_obj != NULL)
38968- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
38969+ (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
38970 #endif
38971 }
38972
38973@@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
38974 /* if found, is a dup put, flush the old one */
38975 pampd_del = tmem_pampd_delete_from_obj(obj, index);
38976 BUG_ON(pampd_del != pampd);
38977- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
38978+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
38979 if (obj->pampd_count == 0) {
38980 objnew = obj;
38981 objfound = NULL;
38982@@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
38983 pampd = NULL;
38984 }
38985 } else {
38986- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
38987+ obj = objnew = (tmem_hostops->obj_alloc)(pool);
38988 if (unlikely(obj == NULL)) {
38989 ret = -ENOMEM;
38990 goto out;
38991@@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
38992 if (unlikely(ret == -ENOMEM))
38993 /* may have partially built objnode tree ("stump") */
38994 goto delete_and_free;
38995- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
38996+ (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
38997 goto out;
38998
38999 delete_and_free:
39000 (void)tmem_pampd_delete_from_obj(obj, index);
39001 if (pampd)
39002- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
39003+ (tmem_pamops->free)(pampd, pool, NULL, 0, true);
39004 if (objnew) {
39005 tmem_obj_free(objnew, hb);
39006- (*tmem_hostops.obj_free)(objnew, pool);
39007+ (tmem_hostops->obj_free)(objnew, pool);
39008 }
39009 out:
39010 spin_unlock(&hb->lock);
39011@@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
39012 if (pampd != NULL) {
39013 BUG_ON(obj == NULL);
39014 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
39015- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
39016+ (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
39017 } else if (delete) {
39018 BUG_ON(obj == NULL);
39019 (void)tmem_pampd_delete_from_obj(obj, index);
39020@@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
39021 int ret = 0;
39022
39023 if (!is_ephemeral(pool))
39024- new_pampd = (*tmem_pamops.repatriate_preload)(
39025+ new_pampd = (tmem_pamops->repatriate_preload)(
39026 old_pampd, pool, oidp, index, &intransit);
39027 if (intransit)
39028 ret = -EAGAIN;
39029@@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
39030 /* must release the hb->lock else repatriate can't sleep */
39031 spin_unlock(&hb->lock);
39032 if (!intransit)
39033- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
39034+ ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
39035 oidp, index, free, data);
39036 if (ret == -EAGAIN) {
39037 /* rare I think, but should cond_resched()??? */
39038@@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
39039 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
39040 /* if we bug here, pamops wasn't properly set up for ramster */
39041 BUG_ON(tmem_pamops.replace_in_obj == NULL);
39042- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
39043+ ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
39044 out:
39045 spin_unlock(&hb->lock);
39046 return ret;
39047@@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
39048 if (free) {
39049 if (obj->pampd_count == 0) {
39050 tmem_obj_free(obj, hb);
39051- (*tmem_hostops.obj_free)(obj, pool);
39052+ (tmem_hostops->obj_free)(obj, pool);
39053 obj = NULL;
39054 }
39055 }
39056 if (free)
39057- ret = (*tmem_pamops.get_data_and_free)(
39058+ ret = (tmem_pamops->get_data_and_free)(
39059 data, sizep, raw, pampd, pool, oidp, index);
39060 else
39061- ret = (*tmem_pamops.get_data)(
39062+ ret = (tmem_pamops->get_data)(
39063 data, sizep, raw, pampd, pool, oidp, index);
39064 if (ret < 0)
39065 goto out;
39066@@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
39067 pampd = tmem_pampd_delete_from_obj(obj, index);
39068 if (pampd == NULL)
39069 goto out;
39070- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
39071+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
39072 if (obj->pampd_count == 0) {
39073 tmem_obj_free(obj, hb);
39074- (*tmem_hostops.obj_free)(obj, pool);
39075+ (tmem_hostops->obj_free)(obj, pool);
39076 }
39077 ret = 0;
39078
39079@@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
39080 goto out;
39081 tmem_pampd_destroy_all_in_obj(obj, false);
39082 tmem_obj_free(obj, hb);
39083- (*tmem_hostops.obj_free)(obj, pool);
39084+ (tmem_hostops->obj_free)(obj, pool);
39085 ret = 0;
39086
39087 out:
39088diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
39089index dc23395..cf7e9b1 100644
39090--- a/drivers/staging/rtl8712/rtl871x_io.h
39091+++ b/drivers/staging/rtl8712/rtl871x_io.h
39092@@ -108,7 +108,7 @@ struct _io_ops {
39093 u8 *pmem);
39094 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
39095 u8 *pmem);
39096-};
39097+} __no_const;
39098
39099 struct io_req {
39100 struct list_head list;
39101diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
39102index 180c963..1f18377 100644
39103--- a/drivers/staging/sbe-2t3e3/netdev.c
39104+++ b/drivers/staging/sbe-2t3e3/netdev.c
39105@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39106 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
39107
39108 if (rlen)
39109- if (copy_to_user(data, &resp, rlen))
39110+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
39111 return -EFAULT;
39112
39113 return 0;
39114diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
39115index c66b8b3..a4a035b 100644
39116--- a/drivers/staging/usbip/vhci.h
39117+++ b/drivers/staging/usbip/vhci.h
39118@@ -83,7 +83,7 @@ struct vhci_hcd {
39119 unsigned resuming:1;
39120 unsigned long re_timeout;
39121
39122- atomic_t seqnum;
39123+ atomic_unchecked_t seqnum;
39124
39125 /*
39126 * NOTE:
39127diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
39128index 620d1be..1cd6711 100644
39129--- a/drivers/staging/usbip/vhci_hcd.c
39130+++ b/drivers/staging/usbip/vhci_hcd.c
39131@@ -471,7 +471,7 @@ static void vhci_tx_urb(struct urb *urb)
39132 return;
39133 }
39134
39135- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
39136+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
39137 if (priv->seqnum == 0xffff)
39138 dev_info(&urb->dev->dev, "seqnum max\n");
39139
39140@@ -723,7 +723,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
39141 return -ENOMEM;
39142 }
39143
39144- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
39145+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
39146 if (unlink->seqnum == 0xffff)
39147 pr_info("seqnum max\n");
39148
39149@@ -924,7 +924,7 @@ static int vhci_start(struct usb_hcd *hcd)
39150 vdev->rhport = rhport;
39151 }
39152
39153- atomic_set(&vhci->seqnum, 0);
39154+ atomic_set_unchecked(&vhci->seqnum, 0);
39155 spin_lock_init(&vhci->lock);
39156
39157 hcd->power_budget = 0; /* no limit */
39158diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
39159index f0eaf04..5a82e06 100644
39160--- a/drivers/staging/usbip/vhci_rx.c
39161+++ b/drivers/staging/usbip/vhci_rx.c
39162@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
39163 if (!urb) {
39164 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
39165 pr_info("max seqnum %d\n",
39166- atomic_read(&the_controller->seqnum));
39167+ atomic_read_unchecked(&the_controller->seqnum));
39168 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
39169 return;
39170 }
39171diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
39172index 67b1b88..6392fe9 100644
39173--- a/drivers/staging/vt6655/hostap.c
39174+++ b/drivers/staging/vt6655/hostap.c
39175@@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
39176 *
39177 */
39178
39179+static net_device_ops_no_const apdev_netdev_ops;
39180+
39181 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39182 {
39183 PSDevice apdev_priv;
39184 struct net_device *dev = pDevice->dev;
39185 int ret;
39186- const struct net_device_ops apdev_netdev_ops = {
39187- .ndo_start_xmit = pDevice->tx_80211,
39188- };
39189
39190 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
39191
39192@@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39193 *apdev_priv = *pDevice;
39194 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
39195
39196+ /* only half broken now */
39197+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
39198 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
39199
39200 pDevice->apdev->type = ARPHRD_IEEE80211;
39201diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
39202index 0a73d40..6fda560 100644
39203--- a/drivers/staging/vt6656/hostap.c
39204+++ b/drivers/staging/vt6656/hostap.c
39205@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
39206 *
39207 */
39208
39209+static net_device_ops_no_const apdev_netdev_ops;
39210+
39211 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39212 {
39213 PSDevice apdev_priv;
39214 struct net_device *dev = pDevice->dev;
39215 int ret;
39216- const struct net_device_ops apdev_netdev_ops = {
39217- .ndo_start_xmit = pDevice->tx_80211,
39218- };
39219
39220 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
39221
39222@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39223 *apdev_priv = *pDevice;
39224 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
39225
39226+ /* only half broken now */
39227+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
39228 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
39229
39230 pDevice->apdev->type = ARPHRD_IEEE80211;
39231diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
39232index 56c8e60..1920c63 100644
39233--- a/drivers/staging/zcache/tmem.c
39234+++ b/drivers/staging/zcache/tmem.c
39235@@ -39,7 +39,7 @@
39236 * A tmem host implementation must use this function to register callbacks
39237 * for memory allocation.
39238 */
39239-static struct tmem_hostops tmem_hostops;
39240+static tmem_hostops_no_const tmem_hostops;
39241
39242 static void tmem_objnode_tree_init(void);
39243
39244@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
39245 * A tmem host implementation must use this function to register
39246 * callbacks for a page-accessible memory (PAM) implementation
39247 */
39248-static struct tmem_pamops tmem_pamops;
39249+static tmem_pamops_no_const tmem_pamops;
39250
39251 void tmem_register_pamops(struct tmem_pamops *m)
39252 {
39253diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
39254index 0d4aa82..f7832d4 100644
39255--- a/drivers/staging/zcache/tmem.h
39256+++ b/drivers/staging/zcache/tmem.h
39257@@ -180,6 +180,7 @@ struct tmem_pamops {
39258 void (*new_obj)(struct tmem_obj *);
39259 int (*replace_in_obj)(void *, struct tmem_obj *);
39260 };
39261+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
39262 extern void tmem_register_pamops(struct tmem_pamops *m);
39263
39264 /* memory allocation methods provided by the host implementation */
39265@@ -189,6 +190,7 @@ struct tmem_hostops {
39266 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
39267 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
39268 };
39269+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
39270 extern void tmem_register_hostops(struct tmem_hostops *m);
39271
39272 /* core tmem accessor functions */
39273diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
39274index 13fe16c..cbdc39a 100644
39275--- a/drivers/target/target_core_transport.c
39276+++ b/drivers/target/target_core_transport.c
39277@@ -1085,7 +1085,7 @@ struct se_device *transport_add_device_to_core_hba(
39278 spin_lock_init(&dev->se_port_lock);
39279 spin_lock_init(&dev->se_tmr_lock);
39280 spin_lock_init(&dev->qf_cmd_lock);
39281- atomic_set(&dev->dev_ordered_id, 0);
39282+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
39283
39284 se_dev_set_default_attribs(dev, dev_limits);
39285
39286@@ -1275,7 +1275,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
39287 * Used to determine when ORDERED commands should go from
39288 * Dormant to Active status.
39289 */
39290- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
39291+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
39292 smp_mb__after_atomic_inc();
39293 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
39294 cmd->se_ordered_id, cmd->sam_task_attr,
39295diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
39296index 0a6a0bc..5501b06 100644
39297--- a/drivers/tty/cyclades.c
39298+++ b/drivers/tty/cyclades.c
39299@@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
39300 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
39301 info->port.count);
39302 #endif
39303- info->port.count++;
39304+ atomic_inc(&info->port.count);
39305 #ifdef CY_DEBUG_COUNT
39306 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
39307- current->pid, info->port.count);
39308+ current->pid, atomic_read(&info->port.count));
39309 #endif
39310
39311 /*
39312@@ -3989,7 +3989,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
39313 for (j = 0; j < cy_card[i].nports; j++) {
39314 info = &cy_card[i].ports[j];
39315
39316- if (info->port.count) {
39317+ if (atomic_read(&info->port.count)) {
39318 /* XXX is the ldisc num worth this? */
39319 struct tty_struct *tty;
39320 struct tty_ldisc *ld;
39321diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
39322index 13ee53b..418d164 100644
39323--- a/drivers/tty/hvc/hvc_console.c
39324+++ b/drivers/tty/hvc/hvc_console.c
39325@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
39326
39327 spin_lock_irqsave(&hp->port.lock, flags);
39328 /* Check and then increment for fast path open. */
39329- if (hp->port.count++ > 0) {
39330+ if (atomic_inc_return(&hp->port.count) > 1) {
39331 spin_unlock_irqrestore(&hp->port.lock, flags);
39332 hvc_kick();
39333 return 0;
39334@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
39335
39336 spin_lock_irqsave(&hp->port.lock, flags);
39337
39338- if (--hp->port.count == 0) {
39339+ if (atomic_dec_return(&hp->port.count) == 0) {
39340 spin_unlock_irqrestore(&hp->port.lock, flags);
39341 /* We are done with the tty pointer now. */
39342 tty_port_tty_set(&hp->port, NULL);
39343@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
39344 */
39345 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
39346 } else {
39347- if (hp->port.count < 0)
39348+ if (atomic_read(&hp->port.count) < 0)
39349 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
39350- hp->vtermno, hp->port.count);
39351+ hp->vtermno, atomic_read(&hp->port.count));
39352 spin_unlock_irqrestore(&hp->port.lock, flags);
39353 }
39354 }
39355@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
39356 * open->hangup case this can be called after the final close so prevent
39357 * that from happening for now.
39358 */
39359- if (hp->port.count <= 0) {
39360+ if (atomic_read(&hp->port.count) <= 0) {
39361 spin_unlock_irqrestore(&hp->port.lock, flags);
39362 return;
39363 }
39364
39365- hp->port.count = 0;
39366+ atomic_set(&hp->port.count, 0);
39367 spin_unlock_irqrestore(&hp->port.lock, flags);
39368 tty_port_tty_set(&hp->port, NULL);
39369
39370@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
39371 return -EPIPE;
39372
39373 /* FIXME what's this (unprotected) check for? */
39374- if (hp->port.count <= 0)
39375+ if (atomic_read(&hp->port.count) <= 0)
39376 return -EIO;
39377
39378 spin_lock_irqsave(&hp->lock, flags);
39379diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
39380index cab5c7a..4cc66ea 100644
39381--- a/drivers/tty/hvc/hvcs.c
39382+++ b/drivers/tty/hvc/hvcs.c
39383@@ -83,6 +83,7 @@
39384 #include <asm/hvcserver.h>
39385 #include <asm/uaccess.h>
39386 #include <asm/vio.h>
39387+#include <asm/local.h>
39388
39389 /*
39390 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
39391@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
39392
39393 spin_lock_irqsave(&hvcsd->lock, flags);
39394
39395- if (hvcsd->port.count > 0) {
39396+ if (atomic_read(&hvcsd->port.count) > 0) {
39397 spin_unlock_irqrestore(&hvcsd->lock, flags);
39398 printk(KERN_INFO "HVCS: vterm state unchanged. "
39399 "The hvcs device node is still in use.\n");
39400@@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
39401 }
39402 }
39403
39404- hvcsd->port.count = 0;
39405+ atomic_set(&hvcsd->port.count, 0);
39406 hvcsd->port.tty = tty;
39407 tty->driver_data = hvcsd;
39408
39409@@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
39410 unsigned long flags;
39411
39412 spin_lock_irqsave(&hvcsd->lock, flags);
39413- hvcsd->port.count++;
39414+ atomic_inc(&hvcsd->port.count);
39415 hvcsd->todo_mask |= HVCS_SCHED_READ;
39416 spin_unlock_irqrestore(&hvcsd->lock, flags);
39417
39418@@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
39419 hvcsd = tty->driver_data;
39420
39421 spin_lock_irqsave(&hvcsd->lock, flags);
39422- if (--hvcsd->port.count == 0) {
39423+ if (atomic_dec_and_test(&hvcsd->port.count)) {
39424
39425 vio_disable_interrupts(hvcsd->vdev);
39426
39427@@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
39428
39429 free_irq(irq, hvcsd);
39430 return;
39431- } else if (hvcsd->port.count < 0) {
39432+ } else if (atomic_read(&hvcsd->port.count) < 0) {
39433 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
39434 " is missmanaged.\n",
39435- hvcsd->vdev->unit_address, hvcsd->port.count);
39436+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
39437 }
39438
39439 spin_unlock_irqrestore(&hvcsd->lock, flags);
39440@@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
39441
39442 spin_lock_irqsave(&hvcsd->lock, flags);
39443 /* Preserve this so that we know how many kref refs to put */
39444- temp_open_count = hvcsd->port.count;
39445+ temp_open_count = atomic_read(&hvcsd->port.count);
39446
39447 /*
39448 * Don't kref put inside the spinlock because the destruction
39449@@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
39450 tty->driver_data = NULL;
39451 hvcsd->port.tty = NULL;
39452
39453- hvcsd->port.count = 0;
39454+ atomic_set(&hvcsd->port.count, 0);
39455
39456 /* This will drop any buffered data on the floor which is OK in a hangup
39457 * scenario. */
39458@@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
39459 * the middle of a write operation? This is a crummy place to do this
39460 * but we want to keep it all in the spinlock.
39461 */
39462- if (hvcsd->port.count <= 0) {
39463+ if (atomic_read(&hvcsd->port.count) <= 0) {
39464 spin_unlock_irqrestore(&hvcsd->lock, flags);
39465 return -ENODEV;
39466 }
39467@@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
39468 {
39469 struct hvcs_struct *hvcsd = tty->driver_data;
39470
39471- if (!hvcsd || hvcsd->port.count <= 0)
39472+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
39473 return 0;
39474
39475 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
39476diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
39477index 160f0ad..588b853 100644
39478--- a/drivers/tty/ipwireless/tty.c
39479+++ b/drivers/tty/ipwireless/tty.c
39480@@ -29,6 +29,7 @@
39481 #include <linux/tty_driver.h>
39482 #include <linux/tty_flip.h>
39483 #include <linux/uaccess.h>
39484+#include <asm/local.h>
39485
39486 #include "tty.h"
39487 #include "network.h"
39488@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
39489 mutex_unlock(&tty->ipw_tty_mutex);
39490 return -ENODEV;
39491 }
39492- if (tty->port.count == 0)
39493+ if (atomic_read(&tty->port.count) == 0)
39494 tty->tx_bytes_queued = 0;
39495
39496- tty->port.count++;
39497+ atomic_inc(&tty->port.count);
39498
39499 tty->port.tty = linux_tty;
39500 linux_tty->driver_data = tty;
39501@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
39502
39503 static void do_ipw_close(struct ipw_tty *tty)
39504 {
39505- tty->port.count--;
39506-
39507- if (tty->port.count == 0) {
39508+ if (atomic_dec_return(&tty->port.count) == 0) {
39509 struct tty_struct *linux_tty = tty->port.tty;
39510
39511 if (linux_tty != NULL) {
39512@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
39513 return;
39514
39515 mutex_lock(&tty->ipw_tty_mutex);
39516- if (tty->port.count == 0) {
39517+ if (atomic_read(&tty->port.count) == 0) {
39518 mutex_unlock(&tty->ipw_tty_mutex);
39519 return;
39520 }
39521@@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
39522 return;
39523 }
39524
39525- if (!tty->port.count) {
39526+ if (!atomic_read(&tty->port.count)) {
39527 mutex_unlock(&tty->ipw_tty_mutex);
39528 return;
39529 }
39530@@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
39531 return -ENODEV;
39532
39533 mutex_lock(&tty->ipw_tty_mutex);
39534- if (!tty->port.count) {
39535+ if (!atomic_read(&tty->port.count)) {
39536 mutex_unlock(&tty->ipw_tty_mutex);
39537 return -EINVAL;
39538 }
39539@@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
39540 if (!tty)
39541 return -ENODEV;
39542
39543- if (!tty->port.count)
39544+ if (!atomic_read(&tty->port.count))
39545 return -EINVAL;
39546
39547 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
39548@@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
39549 if (!tty)
39550 return 0;
39551
39552- if (!tty->port.count)
39553+ if (!atomic_read(&tty->port.count))
39554 return 0;
39555
39556 return tty->tx_bytes_queued;
39557@@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
39558 if (!tty)
39559 return -ENODEV;
39560
39561- if (!tty->port.count)
39562+ if (!atomic_read(&tty->port.count))
39563 return -EINVAL;
39564
39565 return get_control_lines(tty);
39566@@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
39567 if (!tty)
39568 return -ENODEV;
39569
39570- if (!tty->port.count)
39571+ if (!atomic_read(&tty->port.count))
39572 return -EINVAL;
39573
39574 return set_control_lines(tty, set, clear);
39575@@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
39576 if (!tty)
39577 return -ENODEV;
39578
39579- if (!tty->port.count)
39580+ if (!atomic_read(&tty->port.count))
39581 return -EINVAL;
39582
39583 /* FIXME: Exactly how is the tty object locked here .. */
39584@@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
39585 * are gone */
39586 mutex_lock(&ttyj->ipw_tty_mutex);
39587 }
39588- while (ttyj->port.count)
39589+ while (atomic_read(&ttyj->port.count))
39590 do_ipw_close(ttyj);
39591 ipwireless_disassociate_network_ttys(network,
39592 ttyj->channel_idx);
39593diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
39594index 56e616b..9d9f10a 100644
39595--- a/drivers/tty/moxa.c
39596+++ b/drivers/tty/moxa.c
39597@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
39598 }
39599
39600 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
39601- ch->port.count++;
39602+ atomic_inc(&ch->port.count);
39603 tty->driver_data = ch;
39604 tty_port_tty_set(&ch->port, tty);
39605 mutex_lock(&ch->port.mutex);
39606diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
39607index 1e8e8ce..a9efc93 100644
39608--- a/drivers/tty/n_gsm.c
39609+++ b/drivers/tty/n_gsm.c
39610@@ -1638,7 +1638,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
39611 kref_init(&dlci->ref);
39612 mutex_init(&dlci->mutex);
39613 dlci->fifo = &dlci->_fifo;
39614- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
39615+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
39616 kfree(dlci);
39617 return NULL;
39618 }
39619@@ -2925,7 +2925,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
39620 struct gsm_dlci *dlci = tty->driver_data;
39621 struct tty_port *port = &dlci->port;
39622
39623- port->count++;
39624+ atomic_inc(&port->count);
39625 dlci_get(dlci);
39626 dlci_get(dlci->gsm->dlci[0]);
39627 mux_get(dlci->gsm);
39628diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
39629index 8c0b7b4..e88f052 100644
39630--- a/drivers/tty/n_tty.c
39631+++ b/drivers/tty/n_tty.c
39632@@ -2142,6 +2142,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
39633 {
39634 *ops = tty_ldisc_N_TTY;
39635 ops->owner = NULL;
39636- ops->refcount = ops->flags = 0;
39637+ atomic_set(&ops->refcount, 0);
39638+ ops->flags = 0;
39639 }
39640 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
39641diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
39642index 8cf8d0a..4ef9ed0 100644
39643--- a/drivers/tty/pty.c
39644+++ b/drivers/tty/pty.c
39645@@ -730,8 +730,10 @@ static void __init unix98_pty_init(void)
39646 panic("Couldn't register Unix98 pts driver");
39647
39648 /* Now create the /dev/ptmx special device */
39649+ pax_open_kernel();
39650 tty_default_fops(&ptmx_fops);
39651- ptmx_fops.open = ptmx_open;
39652+ *(void **)&ptmx_fops.open = ptmx_open;
39653+ pax_close_kernel();
39654
39655 cdev_init(&ptmx_cdev, &ptmx_fops);
39656 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
39657diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
39658index 9700d34..df7520c 100644
39659--- a/drivers/tty/rocket.c
39660+++ b/drivers/tty/rocket.c
39661@@ -924,7 +924,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
39662 tty->driver_data = info;
39663 tty_port_tty_set(port, tty);
39664
39665- if (port->count++ == 0) {
39666+ if (atomic_inc_return(&port->count) == 1) {
39667 atomic_inc(&rp_num_ports_open);
39668
39669 #ifdef ROCKET_DEBUG_OPEN
39670@@ -933,7 +933,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
39671 #endif
39672 }
39673 #ifdef ROCKET_DEBUG_OPEN
39674- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
39675+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
39676 #endif
39677
39678 /*
39679@@ -1528,7 +1528,7 @@ static void rp_hangup(struct tty_struct *tty)
39680 spin_unlock_irqrestore(&info->port.lock, flags);
39681 return;
39682 }
39683- if (info->port.count)
39684+ if (atomic_read(&info->port.count))
39685 atomic_dec(&rp_num_ports_open);
39686 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
39687 spin_unlock_irqrestore(&info->port.lock, flags);
39688diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
39689index 1002054..dd644a8 100644
39690--- a/drivers/tty/serial/kgdboc.c
39691+++ b/drivers/tty/serial/kgdboc.c
39692@@ -24,8 +24,9 @@
39693 #define MAX_CONFIG_LEN 40
39694
39695 static struct kgdb_io kgdboc_io_ops;
39696+static struct kgdb_io kgdboc_io_ops_console;
39697
39698-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
39699+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
39700 static int configured = -1;
39701
39702 static char config[MAX_CONFIG_LEN];
39703@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
39704 kgdboc_unregister_kbd();
39705 if (configured == 1)
39706 kgdb_unregister_io_module(&kgdboc_io_ops);
39707+ else if (configured == 2)
39708+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
39709 }
39710
39711 static int configure_kgdboc(void)
39712@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
39713 int err;
39714 char *cptr = config;
39715 struct console *cons;
39716+ int is_console = 0;
39717
39718 err = kgdboc_option_setup(config);
39719 if (err || !strlen(config) || isspace(config[0]))
39720 goto noconfig;
39721
39722 err = -ENODEV;
39723- kgdboc_io_ops.is_console = 0;
39724 kgdb_tty_driver = NULL;
39725
39726 kgdboc_use_kms = 0;
39727@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
39728 int idx;
39729 if (cons->device && cons->device(cons, &idx) == p &&
39730 idx == tty_line) {
39731- kgdboc_io_ops.is_console = 1;
39732+ is_console = 1;
39733 break;
39734 }
39735 cons = cons->next;
39736@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
39737 kgdb_tty_line = tty_line;
39738
39739 do_register:
39740- err = kgdb_register_io_module(&kgdboc_io_ops);
39741+ if (is_console) {
39742+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
39743+ configured = 2;
39744+ } else {
39745+ err = kgdb_register_io_module(&kgdboc_io_ops);
39746+ configured = 1;
39747+ }
39748 if (err)
39749 goto noconfig;
39750
39751@@ -205,8 +214,6 @@ do_register:
39752 if (err)
39753 goto nmi_con_failed;
39754
39755- configured = 1;
39756-
39757 return 0;
39758
39759 nmi_con_failed:
39760@@ -223,7 +230,7 @@ noconfig:
39761 static int __init init_kgdboc(void)
39762 {
39763 /* Already configured? */
39764- if (configured == 1)
39765+ if (configured >= 1)
39766 return 0;
39767
39768 return configure_kgdboc();
39769@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
39770 if (config[len - 1] == '\n')
39771 config[len - 1] = '\0';
39772
39773- if (configured == 1)
39774+ if (configured >= 1)
39775 cleanup_kgdboc();
39776
39777 /* Go and configure with the new params. */
39778@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
39779 .post_exception = kgdboc_post_exp_handler,
39780 };
39781
39782+static struct kgdb_io kgdboc_io_ops_console = {
39783+ .name = "kgdboc",
39784+ .read_char = kgdboc_get_char,
39785+ .write_char = kgdboc_put_char,
39786+ .pre_exception = kgdboc_pre_exp_handler,
39787+ .post_exception = kgdboc_post_exp_handler,
39788+ .is_console = 1
39789+};
39790+
39791 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
39792 /* This is only available if kgdboc is a built in for early debugging */
39793 static int __init kgdboc_early_init(char *opt)
39794diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
39795index 7f04717..0f3794f 100644
39796--- a/drivers/tty/serial/samsung.c
39797+++ b/drivers/tty/serial/samsung.c
39798@@ -445,11 +445,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
39799 }
39800 }
39801
39802+static int s3c64xx_serial_startup(struct uart_port *port);
39803 static int s3c24xx_serial_startup(struct uart_port *port)
39804 {
39805 struct s3c24xx_uart_port *ourport = to_ourport(port);
39806 int ret;
39807
39808+ /* Startup sequence is different for s3c64xx and higher SoC's */
39809+ if (s3c24xx_serial_has_interrupt_mask(port))
39810+ return s3c64xx_serial_startup(port);
39811+
39812 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
39813 port->mapbase, port->membase);
39814
39815@@ -1115,10 +1120,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
39816 /* setup info for port */
39817 port->dev = &platdev->dev;
39818
39819- /* Startup sequence is different for s3c64xx and higher SoC's */
39820- if (s3c24xx_serial_has_interrupt_mask(port))
39821- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
39822-
39823 port->uartclk = 1;
39824
39825 if (cfg->uart_flags & UPF_CONS_FLOW) {
39826diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
39827index 0fcfd98..8244fce 100644
39828--- a/drivers/tty/serial/serial_core.c
39829+++ b/drivers/tty/serial/serial_core.c
39830@@ -1408,7 +1408,7 @@ static void uart_hangup(struct tty_struct *tty)
39831 uart_flush_buffer(tty);
39832 uart_shutdown(tty, state);
39833 spin_lock_irqsave(&port->lock, flags);
39834- port->count = 0;
39835+ atomic_set(&port->count, 0);
39836 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
39837 spin_unlock_irqrestore(&port->lock, flags);
39838 tty_port_tty_set(port, NULL);
39839@@ -1504,7 +1504,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
39840 goto end;
39841 }
39842
39843- port->count++;
39844+ atomic_inc(&port->count);
39845 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
39846 retval = -ENXIO;
39847 goto err_dec_count;
39848@@ -1531,7 +1531,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
39849 /*
39850 * Make sure the device is in D0 state.
39851 */
39852- if (port->count == 1)
39853+ if (atomic_read(&port->count) == 1)
39854 uart_change_pm(state, 0);
39855
39856 /*
39857@@ -1549,7 +1549,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
39858 end:
39859 return retval;
39860 err_dec_count:
39861- port->count--;
39862+ atomic_inc(&port->count);
39863 mutex_unlock(&port->mutex);
39864 goto end;
39865 }
39866diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
39867index 70e3a52..5742052 100644
39868--- a/drivers/tty/synclink.c
39869+++ b/drivers/tty/synclink.c
39870@@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
39871
39872 if (debug_level >= DEBUG_LEVEL_INFO)
39873 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
39874- __FILE__,__LINE__, info->device_name, info->port.count);
39875+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
39876
39877 if (tty_port_close_start(&info->port, tty, filp) == 0)
39878 goto cleanup;
39879@@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
39880 cleanup:
39881 if (debug_level >= DEBUG_LEVEL_INFO)
39882 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
39883- tty->driver->name, info->port.count);
39884+ tty->driver->name, atomic_read(&info->port.count));
39885
39886 } /* end of mgsl_close() */
39887
39888@@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
39889
39890 mgsl_flush_buffer(tty);
39891 shutdown(info);
39892-
39893- info->port.count = 0;
39894+
39895+ atomic_set(&info->port.count, 0);
39896 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
39897 info->port.tty = NULL;
39898
39899@@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
39900
39901 if (debug_level >= DEBUG_LEVEL_INFO)
39902 printk("%s(%d):block_til_ready before block on %s count=%d\n",
39903- __FILE__,__LINE__, tty->driver->name, port->count );
39904+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
39905
39906 spin_lock_irqsave(&info->irq_spinlock, flags);
39907 if (!tty_hung_up_p(filp)) {
39908 extra_count = true;
39909- port->count--;
39910+ atomic_dec(&port->count);
39911 }
39912 spin_unlock_irqrestore(&info->irq_spinlock, flags);
39913 port->blocked_open++;
39914@@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
39915
39916 if (debug_level >= DEBUG_LEVEL_INFO)
39917 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
39918- __FILE__,__LINE__, tty->driver->name, port->count );
39919+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
39920
39921 tty_unlock(tty);
39922 schedule();
39923@@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
39924
39925 /* FIXME: Racy on hangup during close wait */
39926 if (extra_count)
39927- port->count++;
39928+ atomic_inc(&port->count);
39929 port->blocked_open--;
39930
39931 if (debug_level >= DEBUG_LEVEL_INFO)
39932 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
39933- __FILE__,__LINE__, tty->driver->name, port->count );
39934+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
39935
39936 if (!retval)
39937 port->flags |= ASYNC_NORMAL_ACTIVE;
39938@@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
39939
39940 if (debug_level >= DEBUG_LEVEL_INFO)
39941 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
39942- __FILE__,__LINE__,tty->driver->name, info->port.count);
39943+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
39944
39945 /* If port is closing, signal caller to try again */
39946 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
39947@@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
39948 spin_unlock_irqrestore(&info->netlock, flags);
39949 goto cleanup;
39950 }
39951- info->port.count++;
39952+ atomic_inc(&info->port.count);
39953 spin_unlock_irqrestore(&info->netlock, flags);
39954
39955- if (info->port.count == 1) {
39956+ if (atomic_read(&info->port.count) == 1) {
39957 /* 1st open on this device, init hardware */
39958 retval = startup(info);
39959 if (retval < 0)
39960@@ -3451,8 +3451,8 @@ cleanup:
39961 if (retval) {
39962 if (tty->count == 1)
39963 info->port.tty = NULL; /* tty layer will release tty struct */
39964- if(info->port.count)
39965- info->port.count--;
39966+ if (atomic_read(&info->port.count))
39967+ atomic_dec(&info->port.count);
39968 }
39969
39970 return retval;
39971@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39972 unsigned short new_crctype;
39973
39974 /* return error if TTY interface open */
39975- if (info->port.count)
39976+ if (atomic_read(&info->port.count))
39977 return -EBUSY;
39978
39979 switch (encoding)
39980@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
39981
39982 /* arbitrate between network and tty opens */
39983 spin_lock_irqsave(&info->netlock, flags);
39984- if (info->port.count != 0 || info->netcount != 0) {
39985+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39986 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39987 spin_unlock_irqrestore(&info->netlock, flags);
39988 return -EBUSY;
39989@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39990 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
39991
39992 /* return error if TTY interface open */
39993- if (info->port.count)
39994+ if (atomic_read(&info->port.count))
39995 return -EBUSY;
39996
39997 if (cmd != SIOCWANDEV)
39998diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
39999index b38e954..ce45b38 100644
40000--- a/drivers/tty/synclink_gt.c
40001+++ b/drivers/tty/synclink_gt.c
40002@@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
40003 tty->driver_data = info;
40004 info->port.tty = tty;
40005
40006- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
40007+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
40008
40009 /* If port is closing, signal caller to try again */
40010 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
40011@@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
40012 mutex_unlock(&info->port.mutex);
40013 goto cleanup;
40014 }
40015- info->port.count++;
40016+ atomic_inc(&info->port.count);
40017 spin_unlock_irqrestore(&info->netlock, flags);
40018
40019- if (info->port.count == 1) {
40020+ if (atomic_read(&info->port.count) == 1) {
40021 /* 1st open on this device, init hardware */
40022 retval = startup(info);
40023 if (retval < 0) {
40024@@ -716,8 +716,8 @@ cleanup:
40025 if (retval) {
40026 if (tty->count == 1)
40027 info->port.tty = NULL; /* tty layer will release tty struct */
40028- if(info->port.count)
40029- info->port.count--;
40030+ if(atomic_read(&info->port.count))
40031+ atomic_dec(&info->port.count);
40032 }
40033
40034 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
40035@@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40036
40037 if (sanity_check(info, tty->name, "close"))
40038 return;
40039- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
40040+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
40041
40042 if (tty_port_close_start(&info->port, tty, filp) == 0)
40043 goto cleanup;
40044@@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40045 tty_port_close_end(&info->port, tty);
40046 info->port.tty = NULL;
40047 cleanup:
40048- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
40049+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
40050 }
40051
40052 static void hangup(struct tty_struct *tty)
40053@@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
40054 shutdown(info);
40055
40056 spin_lock_irqsave(&info->port.lock, flags);
40057- info->port.count = 0;
40058+ atomic_set(&info->port.count, 0);
40059 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
40060 info->port.tty = NULL;
40061 spin_unlock_irqrestore(&info->port.lock, flags);
40062@@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40063 unsigned short new_crctype;
40064
40065 /* return error if TTY interface open */
40066- if (info->port.count)
40067+ if (atomic_read(&info->port.count))
40068 return -EBUSY;
40069
40070 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
40071@@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
40072
40073 /* arbitrate between network and tty opens */
40074 spin_lock_irqsave(&info->netlock, flags);
40075- if (info->port.count != 0 || info->netcount != 0) {
40076+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40077 DBGINFO(("%s hdlc_open busy\n", dev->name));
40078 spin_unlock_irqrestore(&info->netlock, flags);
40079 return -EBUSY;
40080@@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40081 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
40082
40083 /* return error if TTY interface open */
40084- if (info->port.count)
40085+ if (atomic_read(&info->port.count))
40086 return -EBUSY;
40087
40088 if (cmd != SIOCWANDEV)
40089@@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
40090 if (port == NULL)
40091 continue;
40092 spin_lock(&port->lock);
40093- if ((port->port.count || port->netcount) &&
40094+ if ((atomic_read(&port->port.count) || port->netcount) &&
40095 port->pending_bh && !port->bh_running &&
40096 !port->bh_requested) {
40097 DBGISR(("%s bh queued\n", port->device_name));
40098@@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40099 spin_lock_irqsave(&info->lock, flags);
40100 if (!tty_hung_up_p(filp)) {
40101 extra_count = true;
40102- port->count--;
40103+ atomic_dec(&port->count);
40104 }
40105 spin_unlock_irqrestore(&info->lock, flags);
40106 port->blocked_open++;
40107@@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40108 remove_wait_queue(&port->open_wait, &wait);
40109
40110 if (extra_count)
40111- port->count++;
40112+ atomic_inc(&port->count);
40113 port->blocked_open--;
40114
40115 if (!retval)
40116diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
40117index f17d9f3..27a041b 100644
40118--- a/drivers/tty/synclinkmp.c
40119+++ b/drivers/tty/synclinkmp.c
40120@@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
40121
40122 if (debug_level >= DEBUG_LEVEL_INFO)
40123 printk("%s(%d):%s open(), old ref count = %d\n",
40124- __FILE__,__LINE__,tty->driver->name, info->port.count);
40125+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
40126
40127 /* If port is closing, signal caller to try again */
40128 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
40129@@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
40130 spin_unlock_irqrestore(&info->netlock, flags);
40131 goto cleanup;
40132 }
40133- info->port.count++;
40134+ atomic_inc(&info->port.count);
40135 spin_unlock_irqrestore(&info->netlock, flags);
40136
40137- if (info->port.count == 1) {
40138+ if (atomic_read(&info->port.count) == 1) {
40139 /* 1st open on this device, init hardware */
40140 retval = startup(info);
40141 if (retval < 0)
40142@@ -797,8 +797,8 @@ cleanup:
40143 if (retval) {
40144 if (tty->count == 1)
40145 info->port.tty = NULL; /* tty layer will release tty struct */
40146- if(info->port.count)
40147- info->port.count--;
40148+ if(atomic_read(&info->port.count))
40149+ atomic_dec(&info->port.count);
40150 }
40151
40152 return retval;
40153@@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40154
40155 if (debug_level >= DEBUG_LEVEL_INFO)
40156 printk("%s(%d):%s close() entry, count=%d\n",
40157- __FILE__,__LINE__, info->device_name, info->port.count);
40158+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
40159
40160 if (tty_port_close_start(&info->port, tty, filp) == 0)
40161 goto cleanup;
40162@@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40163 cleanup:
40164 if (debug_level >= DEBUG_LEVEL_INFO)
40165 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
40166- tty->driver->name, info->port.count);
40167+ tty->driver->name, atomic_read(&info->port.count));
40168 }
40169
40170 /* Called by tty_hangup() when a hangup is signaled.
40171@@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
40172 shutdown(info);
40173
40174 spin_lock_irqsave(&info->port.lock, flags);
40175- info->port.count = 0;
40176+ atomic_set(&info->port.count, 0);
40177 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
40178 info->port.tty = NULL;
40179 spin_unlock_irqrestore(&info->port.lock, flags);
40180@@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40181 unsigned short new_crctype;
40182
40183 /* return error if TTY interface open */
40184- if (info->port.count)
40185+ if (atomic_read(&info->port.count))
40186 return -EBUSY;
40187
40188 switch (encoding)
40189@@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
40190
40191 /* arbitrate between network and tty opens */
40192 spin_lock_irqsave(&info->netlock, flags);
40193- if (info->port.count != 0 || info->netcount != 0) {
40194+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40195 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40196 spin_unlock_irqrestore(&info->netlock, flags);
40197 return -EBUSY;
40198@@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40199 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
40200
40201 /* return error if TTY interface open */
40202- if (info->port.count)
40203+ if (atomic_read(&info->port.count))
40204 return -EBUSY;
40205
40206 if (cmd != SIOCWANDEV)
40207@@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
40208 * do not request bottom half processing if the
40209 * device is not open in a normal mode.
40210 */
40211- if ( port && (port->port.count || port->netcount) &&
40212+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
40213 port->pending_bh && !port->bh_running &&
40214 !port->bh_requested ) {
40215 if ( debug_level >= DEBUG_LEVEL_ISR )
40216@@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40217
40218 if (debug_level >= DEBUG_LEVEL_INFO)
40219 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
40220- __FILE__,__LINE__, tty->driver->name, port->count );
40221+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40222
40223 spin_lock_irqsave(&info->lock, flags);
40224 if (!tty_hung_up_p(filp)) {
40225 extra_count = true;
40226- port->count--;
40227+ atomic_dec(&port->count);
40228 }
40229 spin_unlock_irqrestore(&info->lock, flags);
40230 port->blocked_open++;
40231@@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40232
40233 if (debug_level >= DEBUG_LEVEL_INFO)
40234 printk("%s(%d):%s block_til_ready() count=%d\n",
40235- __FILE__,__LINE__, tty->driver->name, port->count );
40236+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40237
40238 tty_unlock(tty);
40239 schedule();
40240@@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40241 remove_wait_queue(&port->open_wait, &wait);
40242
40243 if (extra_count)
40244- port->count++;
40245+ atomic_inc(&port->count);
40246 port->blocked_open--;
40247
40248 if (debug_level >= DEBUG_LEVEL_INFO)
40249 printk("%s(%d):%s block_til_ready() after, count=%d\n",
40250- __FILE__,__LINE__, tty->driver->name, port->count );
40251+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40252
40253 if (!retval)
40254 port->flags |= ASYNC_NORMAL_ACTIVE;
40255diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
40256index 16ee6ce..bfcac57 100644
40257--- a/drivers/tty/sysrq.c
40258+++ b/drivers/tty/sysrq.c
40259@@ -866,7 +866,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
40260 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
40261 size_t count, loff_t *ppos)
40262 {
40263- if (count) {
40264+ if (count && capable(CAP_SYS_ADMIN)) {
40265 char c;
40266
40267 if (get_user(c, buf))
40268diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
40269index 2ea176b..2877bc8 100644
40270--- a/drivers/tty/tty_io.c
40271+++ b/drivers/tty/tty_io.c
40272@@ -3395,7 +3395,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
40273
40274 void tty_default_fops(struct file_operations *fops)
40275 {
40276- *fops = tty_fops;
40277+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
40278 }
40279
40280 /*
40281diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
40282index 0f2a2c5..471e228 100644
40283--- a/drivers/tty/tty_ldisc.c
40284+++ b/drivers/tty/tty_ldisc.c
40285@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
40286 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
40287 struct tty_ldisc_ops *ldo = ld->ops;
40288
40289- ldo->refcount--;
40290+ atomic_dec(&ldo->refcount);
40291 module_put(ldo->owner);
40292 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40293
40294@@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
40295 spin_lock_irqsave(&tty_ldisc_lock, flags);
40296 tty_ldiscs[disc] = new_ldisc;
40297 new_ldisc->num = disc;
40298- new_ldisc->refcount = 0;
40299+ atomic_set(&new_ldisc->refcount, 0);
40300 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40301
40302 return ret;
40303@@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
40304 return -EINVAL;
40305
40306 spin_lock_irqsave(&tty_ldisc_lock, flags);
40307- if (tty_ldiscs[disc]->refcount)
40308+ if (atomic_read(&tty_ldiscs[disc]->refcount))
40309 ret = -EBUSY;
40310 else
40311 tty_ldiscs[disc] = NULL;
40312@@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
40313 if (ldops) {
40314 ret = ERR_PTR(-EAGAIN);
40315 if (try_module_get(ldops->owner)) {
40316- ldops->refcount++;
40317+ atomic_inc(&ldops->refcount);
40318 ret = ldops;
40319 }
40320 }
40321@@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
40322 unsigned long flags;
40323
40324 spin_lock_irqsave(&tty_ldisc_lock, flags);
40325- ldops->refcount--;
40326+ atomic_dec(&ldops->refcount);
40327 module_put(ldops->owner);
40328 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40329 }
40330diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
40331index d7bdd8d..feaef30 100644
40332--- a/drivers/tty/tty_port.c
40333+++ b/drivers/tty/tty_port.c
40334@@ -202,7 +202,7 @@ void tty_port_hangup(struct tty_port *port)
40335 unsigned long flags;
40336
40337 spin_lock_irqsave(&port->lock, flags);
40338- port->count = 0;
40339+ atomic_set(&port->count, 0);
40340 port->flags &= ~ASYNC_NORMAL_ACTIVE;
40341 if (port->tty) {
40342 set_bit(TTY_IO_ERROR, &port->tty->flags);
40343@@ -328,7 +328,7 @@ int tty_port_block_til_ready(struct tty_port *port,
40344 /* The port lock protects the port counts */
40345 spin_lock_irqsave(&port->lock, flags);
40346 if (!tty_hung_up_p(filp))
40347- port->count--;
40348+ atomic_dec(&port->count);
40349 port->blocked_open++;
40350 spin_unlock_irqrestore(&port->lock, flags);
40351
40352@@ -370,7 +370,7 @@ int tty_port_block_til_ready(struct tty_port *port,
40353 we must not mess that up further */
40354 spin_lock_irqsave(&port->lock, flags);
40355 if (!tty_hung_up_p(filp))
40356- port->count++;
40357+ atomic_inc(&port->count);
40358 port->blocked_open--;
40359 if (retval == 0)
40360 port->flags |= ASYNC_NORMAL_ACTIVE;
40361@@ -390,19 +390,19 @@ int tty_port_close_start(struct tty_port *port,
40362 return 0;
40363 }
40364
40365- if (tty->count == 1 && port->count != 1) {
40366+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
40367 printk(KERN_WARNING
40368 "tty_port_close_start: tty->count = 1 port count = %d.\n",
40369- port->count);
40370- port->count = 1;
40371+ atomic_read(&port->count));
40372+ atomic_set(&port->count, 1);
40373 }
40374- if (--port->count < 0) {
40375+ if (atomic_dec_return(&port->count) < 0) {
40376 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
40377- port->count);
40378- port->count = 0;
40379+ atomic_read(&port->count));
40380+ atomic_set(&port->count, 0);
40381 }
40382
40383- if (port->count) {
40384+ if (atomic_read(&port->count)) {
40385 spin_unlock_irqrestore(&port->lock, flags);
40386 if (port->ops->drop)
40387 port->ops->drop(port);
40388@@ -500,7 +500,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
40389 {
40390 spin_lock_irq(&port->lock);
40391 if (!tty_hung_up_p(filp))
40392- ++port->count;
40393+ atomic_inc(&port->count);
40394 spin_unlock_irq(&port->lock);
40395 tty_port_tty_set(port, tty);
40396
40397diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
40398index 681765b..d3ccdf2 100644
40399--- a/drivers/tty/vt/keyboard.c
40400+++ b/drivers/tty/vt/keyboard.c
40401@@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
40402 kbd->kbdmode == VC_OFF) &&
40403 value != KVAL(K_SAK))
40404 return; /* SAK is allowed even in raw mode */
40405+
40406+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40407+ {
40408+ void *func = fn_handler[value];
40409+ if (func == fn_show_state || func == fn_show_ptregs ||
40410+ func == fn_show_mem)
40411+ return;
40412+ }
40413+#endif
40414+
40415 fn_handler[value](vc);
40416 }
40417
40418@@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
40419 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
40420 return -EFAULT;
40421
40422- if (!capable(CAP_SYS_TTY_CONFIG))
40423- perm = 0;
40424-
40425 switch (cmd) {
40426 case KDGKBENT:
40427 /* Ensure another thread doesn't free it under us */
40428@@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
40429 spin_unlock_irqrestore(&kbd_event_lock, flags);
40430 return put_user(val, &user_kbe->kb_value);
40431 case KDSKBENT:
40432+ if (!capable(CAP_SYS_TTY_CONFIG))
40433+ perm = 0;
40434+
40435 if (!perm)
40436 return -EPERM;
40437 if (!i && v == K_NOSUCHMAP) {
40438@@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
40439 int i, j, k;
40440 int ret;
40441
40442- if (!capable(CAP_SYS_TTY_CONFIG))
40443- perm = 0;
40444-
40445 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
40446 if (!kbs) {
40447 ret = -ENOMEM;
40448@@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
40449 kfree(kbs);
40450 return ((p && *p) ? -EOVERFLOW : 0);
40451 case KDSKBSENT:
40452+ if (!capable(CAP_SYS_TTY_CONFIG))
40453+ perm = 0;
40454+
40455 if (!perm) {
40456 ret = -EPERM;
40457 goto reterr;
40458diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
40459index 5110f36..8dc0a74 100644
40460--- a/drivers/uio/uio.c
40461+++ b/drivers/uio/uio.c
40462@@ -25,6 +25,7 @@
40463 #include <linux/kobject.h>
40464 #include <linux/cdev.h>
40465 #include <linux/uio_driver.h>
40466+#include <asm/local.h>
40467
40468 #define UIO_MAX_DEVICES (1U << MINORBITS)
40469
40470@@ -32,10 +33,10 @@ struct uio_device {
40471 struct module *owner;
40472 struct device *dev;
40473 int minor;
40474- atomic_t event;
40475+ atomic_unchecked_t event;
40476 struct fasync_struct *async_queue;
40477 wait_queue_head_t wait;
40478- int vma_count;
40479+ local_t vma_count;
40480 struct uio_info *info;
40481 struct kobject *map_dir;
40482 struct kobject *portio_dir;
40483@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
40484 struct device_attribute *attr, char *buf)
40485 {
40486 struct uio_device *idev = dev_get_drvdata(dev);
40487- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
40488+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
40489 }
40490
40491 static struct device_attribute uio_class_attributes[] = {
40492@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
40493 {
40494 struct uio_device *idev = info->uio_dev;
40495
40496- atomic_inc(&idev->event);
40497+ atomic_inc_unchecked(&idev->event);
40498 wake_up_interruptible(&idev->wait);
40499 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
40500 }
40501@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
40502 }
40503
40504 listener->dev = idev;
40505- listener->event_count = atomic_read(&idev->event);
40506+ listener->event_count = atomic_read_unchecked(&idev->event);
40507 filep->private_data = listener;
40508
40509 if (idev->info->open) {
40510@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
40511 return -EIO;
40512
40513 poll_wait(filep, &idev->wait, wait);
40514- if (listener->event_count != atomic_read(&idev->event))
40515+ if (listener->event_count != atomic_read_unchecked(&idev->event))
40516 return POLLIN | POLLRDNORM;
40517 return 0;
40518 }
40519@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
40520 do {
40521 set_current_state(TASK_INTERRUPTIBLE);
40522
40523- event_count = atomic_read(&idev->event);
40524+ event_count = atomic_read_unchecked(&idev->event);
40525 if (event_count != listener->event_count) {
40526 if (copy_to_user(buf, &event_count, count))
40527 retval = -EFAULT;
40528@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
40529 static void uio_vma_open(struct vm_area_struct *vma)
40530 {
40531 struct uio_device *idev = vma->vm_private_data;
40532- idev->vma_count++;
40533+ local_inc(&idev->vma_count);
40534 }
40535
40536 static void uio_vma_close(struct vm_area_struct *vma)
40537 {
40538 struct uio_device *idev = vma->vm_private_data;
40539- idev->vma_count--;
40540+ local_dec(&idev->vma_count);
40541 }
40542
40543 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40544@@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
40545 idev->owner = owner;
40546 idev->info = info;
40547 init_waitqueue_head(&idev->wait);
40548- atomic_set(&idev->event, 0);
40549+ atomic_set_unchecked(&idev->event, 0);
40550
40551 ret = uio_get_minor(idev);
40552 if (ret)
40553diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
40554index b7eb86a..36d28af 100644
40555--- a/drivers/usb/atm/cxacru.c
40556+++ b/drivers/usb/atm/cxacru.c
40557@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
40558 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
40559 if (ret < 2)
40560 return -EINVAL;
40561- if (index < 0 || index > 0x7f)
40562+ if (index > 0x7f)
40563 return -EINVAL;
40564 pos += tmp;
40565
40566diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
40567index 35f10bf..6a38a0b 100644
40568--- a/drivers/usb/atm/usbatm.c
40569+++ b/drivers/usb/atm/usbatm.c
40570@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40571 if (printk_ratelimit())
40572 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
40573 __func__, vpi, vci);
40574- atomic_inc(&vcc->stats->rx_err);
40575+ atomic_inc_unchecked(&vcc->stats->rx_err);
40576 return;
40577 }
40578
40579@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40580 if (length > ATM_MAX_AAL5_PDU) {
40581 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
40582 __func__, length, vcc);
40583- atomic_inc(&vcc->stats->rx_err);
40584+ atomic_inc_unchecked(&vcc->stats->rx_err);
40585 goto out;
40586 }
40587
40588@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40589 if (sarb->len < pdu_length) {
40590 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
40591 __func__, pdu_length, sarb->len, vcc);
40592- atomic_inc(&vcc->stats->rx_err);
40593+ atomic_inc_unchecked(&vcc->stats->rx_err);
40594 goto out;
40595 }
40596
40597 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
40598 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
40599 __func__, vcc);
40600- atomic_inc(&vcc->stats->rx_err);
40601+ atomic_inc_unchecked(&vcc->stats->rx_err);
40602 goto out;
40603 }
40604
40605@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40606 if (printk_ratelimit())
40607 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
40608 __func__, length);
40609- atomic_inc(&vcc->stats->rx_drop);
40610+ atomic_inc_unchecked(&vcc->stats->rx_drop);
40611 goto out;
40612 }
40613
40614@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40615
40616 vcc->push(vcc, skb);
40617
40618- atomic_inc(&vcc->stats->rx);
40619+ atomic_inc_unchecked(&vcc->stats->rx);
40620 out:
40621 skb_trim(sarb, 0);
40622 }
40623@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
40624 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
40625
40626 usbatm_pop(vcc, skb);
40627- atomic_inc(&vcc->stats->tx);
40628+ atomic_inc_unchecked(&vcc->stats->tx);
40629
40630 skb = skb_dequeue(&instance->sndqueue);
40631 }
40632@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
40633 if (!left--)
40634 return sprintf(page,
40635 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
40636- atomic_read(&atm_dev->stats.aal5.tx),
40637- atomic_read(&atm_dev->stats.aal5.tx_err),
40638- atomic_read(&atm_dev->stats.aal5.rx),
40639- atomic_read(&atm_dev->stats.aal5.rx_err),
40640- atomic_read(&atm_dev->stats.aal5.rx_drop));
40641+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
40642+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
40643+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
40644+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
40645+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
40646
40647 if (!left--) {
40648 if (instance->disconnected)
40649diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
40650index f460de3..95ba1f6 100644
40651--- a/drivers/usb/core/devices.c
40652+++ b/drivers/usb/core/devices.c
40653@@ -126,7 +126,7 @@ static const char format_endpt[] =
40654 * time it gets called.
40655 */
40656 static struct device_connect_event {
40657- atomic_t count;
40658+ atomic_unchecked_t count;
40659 wait_queue_head_t wait;
40660 } device_event = {
40661 .count = ATOMIC_INIT(1),
40662@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
40663
40664 void usbfs_conn_disc_event(void)
40665 {
40666- atomic_add(2, &device_event.count);
40667+ atomic_add_unchecked(2, &device_event.count);
40668 wake_up(&device_event.wait);
40669 }
40670
40671@@ -647,7 +647,7 @@ static unsigned int usb_device_poll(struct file *file,
40672
40673 poll_wait(file, &device_event.wait, wait);
40674
40675- event_count = atomic_read(&device_event.count);
40676+ event_count = atomic_read_unchecked(&device_event.count);
40677 if (file->f_version != event_count) {
40678 file->f_version = event_count;
40679 return POLLIN | POLLRDNORM;
40680diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
40681index f034716..aed0368 100644
40682--- a/drivers/usb/core/hcd.c
40683+++ b/drivers/usb/core/hcd.c
40684@@ -1478,7 +1478,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
40685 */
40686 usb_get_urb(urb);
40687 atomic_inc(&urb->use_count);
40688- atomic_inc(&urb->dev->urbnum);
40689+ atomic_inc_unchecked(&urb->dev->urbnum);
40690 usbmon_urb_submit(&hcd->self, urb);
40691
40692 /* NOTE requirements on root-hub callers (usbfs and the hub
40693@@ -1505,7 +1505,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
40694 urb->hcpriv = NULL;
40695 INIT_LIST_HEAD(&urb->urb_list);
40696 atomic_dec(&urb->use_count);
40697- atomic_dec(&urb->dev->urbnum);
40698+ atomic_dec_unchecked(&urb->dev->urbnum);
40699 if (atomic_read(&urb->reject))
40700 wake_up(&usb_kill_urb_queue);
40701 usb_put_urb(urb);
40702diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
40703index 818e4a0..0fc9589 100644
40704--- a/drivers/usb/core/sysfs.c
40705+++ b/drivers/usb/core/sysfs.c
40706@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
40707 struct usb_device *udev;
40708
40709 udev = to_usb_device(dev);
40710- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
40711+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
40712 }
40713 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
40714
40715diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
40716index cd8fb44..17fbe0c 100644
40717--- a/drivers/usb/core/usb.c
40718+++ b/drivers/usb/core/usb.c
40719@@ -397,7 +397,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
40720 set_dev_node(&dev->dev, dev_to_node(bus->controller));
40721 dev->state = USB_STATE_ATTACHED;
40722 dev->lpm_disable_count = 1;
40723- atomic_set(&dev->urbnum, 0);
40724+ atomic_set_unchecked(&dev->urbnum, 0);
40725
40726 INIT_LIST_HEAD(&dev->ep0.urb_list);
40727 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
40728diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
40729index 4bfa78a..902bfbd 100644
40730--- a/drivers/usb/early/ehci-dbgp.c
40731+++ b/drivers/usb/early/ehci-dbgp.c
40732@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
40733
40734 #ifdef CONFIG_KGDB
40735 static struct kgdb_io kgdbdbgp_io_ops;
40736-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
40737+static struct kgdb_io kgdbdbgp_io_ops_console;
40738+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
40739 #else
40740 #define dbgp_kgdb_mode (0)
40741 #endif
40742@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
40743 .write_char = kgdbdbgp_write_char,
40744 };
40745
40746+static struct kgdb_io kgdbdbgp_io_ops_console = {
40747+ .name = "kgdbdbgp",
40748+ .read_char = kgdbdbgp_read_char,
40749+ .write_char = kgdbdbgp_write_char,
40750+ .is_console = 1
40751+};
40752+
40753 static int kgdbdbgp_wait_time;
40754
40755 static int __init kgdbdbgp_parse_config(char *str)
40756@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
40757 ptr++;
40758 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
40759 }
40760- kgdb_register_io_module(&kgdbdbgp_io_ops);
40761- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
40762+ if (early_dbgp_console.index != -1)
40763+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
40764+ else
40765+ kgdb_register_io_module(&kgdbdbgp_io_ops);
40766
40767 return 0;
40768 }
40769diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
40770index f173952..83d6ec0 100644
40771--- a/drivers/usb/gadget/u_serial.c
40772+++ b/drivers/usb/gadget/u_serial.c
40773@@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
40774 spin_lock_irq(&port->port_lock);
40775
40776 /* already open? Great. */
40777- if (port->port.count) {
40778+ if (atomic_read(&port->port.count)) {
40779 status = 0;
40780- port->port.count++;
40781+ atomic_inc(&port->port.count);
40782
40783 /* currently opening/closing? wait ... */
40784 } else if (port->openclose) {
40785@@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
40786 tty->driver_data = port;
40787 port->port.tty = tty;
40788
40789- port->port.count = 1;
40790+ atomic_set(&port->port.count, 1);
40791 port->openclose = false;
40792
40793 /* if connected, start the I/O stream */
40794@@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
40795
40796 spin_lock_irq(&port->port_lock);
40797
40798- if (port->port.count != 1) {
40799- if (port->port.count == 0)
40800+ if (atomic_read(&port->port.count) != 1) {
40801+ if (atomic_read(&port->port.count) == 0)
40802 WARN_ON(1);
40803 else
40804- --port->port.count;
40805+ atomic_dec(&port->port.count);
40806 goto exit;
40807 }
40808
40809@@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
40810 * and sleep if necessary
40811 */
40812 port->openclose = true;
40813- port->port.count = 0;
40814+ atomic_set(&port->port.count, 0);
40815
40816 gser = port->port_usb;
40817 if (gser && gser->disconnect)
40818@@ -1157,7 +1157,7 @@ static int gs_closed(struct gs_port *port)
40819 int cond;
40820
40821 spin_lock_irq(&port->port_lock);
40822- cond = (port->port.count == 0) && !port->openclose;
40823+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
40824 spin_unlock_irq(&port->port_lock);
40825 return cond;
40826 }
40827@@ -1270,7 +1270,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
40828 /* if it's already open, start I/O ... and notify the serial
40829 * protocol about open/close status (connect/disconnect).
40830 */
40831- if (port->port.count) {
40832+ if (atomic_read(&port->port.count)) {
40833 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
40834 gs_start_io(port);
40835 if (gser->connect)
40836@@ -1317,7 +1317,7 @@ void gserial_disconnect(struct gserial *gser)
40837
40838 port->port_usb = NULL;
40839 gser->ioport = NULL;
40840- if (port->port.count > 0 || port->openclose) {
40841+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
40842 wake_up_interruptible(&port->drain_wait);
40843 if (port->port.tty)
40844 tty_hangup(port->port.tty);
40845@@ -1333,7 +1333,7 @@ void gserial_disconnect(struct gserial *gser)
40846
40847 /* finally, free any unused/unusable I/O buffers */
40848 spin_lock_irqsave(&port->port_lock, flags);
40849- if (port->port.count == 0 && !port->openclose)
40850+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
40851 gs_buf_free(&port->port_write_buf);
40852 gs_free_requests(gser->out, &port->read_pool, NULL);
40853 gs_free_requests(gser->out, &port->read_queue, NULL);
40854diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
40855index 5f3bcd3..bfca43f 100644
40856--- a/drivers/usb/serial/console.c
40857+++ b/drivers/usb/serial/console.c
40858@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
40859
40860 info->port = port;
40861
40862- ++port->port.count;
40863+ atomic_inc(&port->port.count);
40864 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
40865 if (serial->type->set_termios) {
40866 /*
40867@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
40868 }
40869 /* Now that any required fake tty operations are completed restore
40870 * the tty port count */
40871- --port->port.count;
40872+ atomic_dec(&port->port.count);
40873 /* The console is special in terms of closing the device so
40874 * indicate this port is now acting as a system console. */
40875 port->port.console = 1;
40876@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
40877 free_tty:
40878 kfree(tty);
40879 reset_open_count:
40880- port->port.count = 0;
40881+ atomic_set(&port->port.count, 0);
40882 usb_autopm_put_interface(serial->interface);
40883 error_get_interface:
40884 usb_serial_put(serial);
40885diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
40886index d6bea3e..60b250e 100644
40887--- a/drivers/usb/wusbcore/wa-hc.h
40888+++ b/drivers/usb/wusbcore/wa-hc.h
40889@@ -192,7 +192,7 @@ struct wahc {
40890 struct list_head xfer_delayed_list;
40891 spinlock_t xfer_list_lock;
40892 struct work_struct xfer_work;
40893- atomic_t xfer_id_count;
40894+ atomic_unchecked_t xfer_id_count;
40895 };
40896
40897
40898@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
40899 INIT_LIST_HEAD(&wa->xfer_delayed_list);
40900 spin_lock_init(&wa->xfer_list_lock);
40901 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
40902- atomic_set(&wa->xfer_id_count, 1);
40903+ atomic_set_unchecked(&wa->xfer_id_count, 1);
40904 }
40905
40906 /**
40907diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
40908index 57c01ab..8a05959 100644
40909--- a/drivers/usb/wusbcore/wa-xfer.c
40910+++ b/drivers/usb/wusbcore/wa-xfer.c
40911@@ -296,7 +296,7 @@ out:
40912 */
40913 static void wa_xfer_id_init(struct wa_xfer *xfer)
40914 {
40915- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
40916+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
40917 }
40918
40919 /*
40920diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
40921index dedaf81..b0f11ab 100644
40922--- a/drivers/vhost/vhost.c
40923+++ b/drivers/vhost/vhost.c
40924@@ -634,7 +634,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
40925 return 0;
40926 }
40927
40928-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
40929+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
40930 {
40931 struct file *eventfp, *filep = NULL;
40932 bool pollstart = false, pollstop = false;
40933diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
40934index 0fefa84..7a9d581 100644
40935--- a/drivers/video/aty/aty128fb.c
40936+++ b/drivers/video/aty/aty128fb.c
40937@@ -149,7 +149,7 @@ enum {
40938 };
40939
40940 /* Must match above enum */
40941-static char * const r128_family[] __devinitconst = {
40942+static const char * const r128_family[] __devinitconst = {
40943 "AGP",
40944 "PCI",
40945 "PRO AGP",
40946diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
40947index 5c3960d..15cf8fc 100644
40948--- a/drivers/video/fbcmap.c
40949+++ b/drivers/video/fbcmap.c
40950@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
40951 rc = -ENODEV;
40952 goto out;
40953 }
40954- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
40955- !info->fbops->fb_setcmap)) {
40956+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
40957 rc = -EINVAL;
40958 goto out1;
40959 }
40960diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
40961index 3ff0105..7589d98 100644
40962--- a/drivers/video/fbmem.c
40963+++ b/drivers/video/fbmem.c
40964@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
40965 image->dx += image->width + 8;
40966 }
40967 } else if (rotate == FB_ROTATE_UD) {
40968- for (x = 0; x < num && image->dx >= 0; x++) {
40969+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
40970 info->fbops->fb_imageblit(info, image);
40971 image->dx -= image->width + 8;
40972 }
40973@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
40974 image->dy += image->height + 8;
40975 }
40976 } else if (rotate == FB_ROTATE_CCW) {
40977- for (x = 0; x < num && image->dy >= 0; x++) {
40978+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
40979 info->fbops->fb_imageblit(info, image);
40980 image->dy -= image->height + 8;
40981 }
40982@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
40983 return -EFAULT;
40984 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
40985 return -EINVAL;
40986- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
40987+ if (con2fb.framebuffer >= FB_MAX)
40988 return -EINVAL;
40989 if (!registered_fb[con2fb.framebuffer])
40990 request_module("fb%d", con2fb.framebuffer);
40991diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
40992index 7672d2e..b56437f 100644
40993--- a/drivers/video/i810/i810_accel.c
40994+++ b/drivers/video/i810/i810_accel.c
40995@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
40996 }
40997 }
40998 printk("ringbuffer lockup!!!\n");
40999+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
41000 i810_report_error(mmio);
41001 par->dev_flags |= LOCKUP;
41002 info->pixmap.scan_align = 1;
41003diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
41004index 3c14e43..eafa544 100644
41005--- a/drivers/video/logo/logo_linux_clut224.ppm
41006+++ b/drivers/video/logo/logo_linux_clut224.ppm
41007@@ -1,1604 +1,1123 @@
41008 P3
41009-# Standard 224-color Linux logo
41010 80 80
41011 255
41012- 0 0 0 0 0 0 0 0 0 0 0 0
41013- 0 0 0 0 0 0 0 0 0 0 0 0
41014- 0 0 0 0 0 0 0 0 0 0 0 0
41015- 0 0 0 0 0 0 0 0 0 0 0 0
41016- 0 0 0 0 0 0 0 0 0 0 0 0
41017- 0 0 0 0 0 0 0 0 0 0 0 0
41018- 0 0 0 0 0 0 0 0 0 0 0 0
41019- 0 0 0 0 0 0 0 0 0 0 0 0
41020- 0 0 0 0 0 0 0 0 0 0 0 0
41021- 6 6 6 6 6 6 10 10 10 10 10 10
41022- 10 10 10 6 6 6 6 6 6 6 6 6
41023- 0 0 0 0 0 0 0 0 0 0 0 0
41024- 0 0 0 0 0 0 0 0 0 0 0 0
41025- 0 0 0 0 0 0 0 0 0 0 0 0
41026- 0 0 0 0 0 0 0 0 0 0 0 0
41027- 0 0 0 0 0 0 0 0 0 0 0 0
41028- 0 0 0 0 0 0 0 0 0 0 0 0
41029- 0 0 0 0 0 0 0 0 0 0 0 0
41030- 0 0 0 0 0 0 0 0 0 0 0 0
41031- 0 0 0 0 0 0 0 0 0 0 0 0
41032- 0 0 0 0 0 0 0 0 0 0 0 0
41033- 0 0 0 0 0 0 0 0 0 0 0 0
41034- 0 0 0 0 0 0 0 0 0 0 0 0
41035- 0 0 0 0 0 0 0 0 0 0 0 0
41036- 0 0 0 0 0 0 0 0 0 0 0 0
41037- 0 0 0 0 0 0 0 0 0 0 0 0
41038- 0 0 0 0 0 0 0 0 0 0 0 0
41039- 0 0 0 0 0 0 0 0 0 0 0 0
41040- 0 0 0 6 6 6 10 10 10 14 14 14
41041- 22 22 22 26 26 26 30 30 30 34 34 34
41042- 30 30 30 30 30 30 26 26 26 18 18 18
41043- 14 14 14 10 10 10 6 6 6 0 0 0
41044- 0 0 0 0 0 0 0 0 0 0 0 0
41045- 0 0 0 0 0 0 0 0 0 0 0 0
41046- 0 0 0 0 0 0 0 0 0 0 0 0
41047- 0 0 0 0 0 0 0 0 0 0 0 0
41048- 0 0 0 0 0 0 0 0 0 0 0 0
41049- 0 0 0 0 0 0 0 0 0 0 0 0
41050- 0 0 0 0 0 0 0 0 0 0 0 0
41051- 0 0 0 0 0 0 0 0 0 0 0 0
41052- 0 0 0 0 0 0 0 0 0 0 0 0
41053- 0 0 0 0 0 1 0 0 1 0 0 0
41054- 0 0 0 0 0 0 0 0 0 0 0 0
41055- 0 0 0 0 0 0 0 0 0 0 0 0
41056- 0 0 0 0 0 0 0 0 0 0 0 0
41057- 0 0 0 0 0 0 0 0 0 0 0 0
41058- 0 0 0 0 0 0 0 0 0 0 0 0
41059- 0 0 0 0 0 0 0 0 0 0 0 0
41060- 6 6 6 14 14 14 26 26 26 42 42 42
41061- 54 54 54 66 66 66 78 78 78 78 78 78
41062- 78 78 78 74 74 74 66 66 66 54 54 54
41063- 42 42 42 26 26 26 18 18 18 10 10 10
41064- 6 6 6 0 0 0 0 0 0 0 0 0
41065- 0 0 0 0 0 0 0 0 0 0 0 0
41066- 0 0 0 0 0 0 0 0 0 0 0 0
41067- 0 0 0 0 0 0 0 0 0 0 0 0
41068- 0 0 0 0 0 0 0 0 0 0 0 0
41069- 0 0 0 0 0 0 0 0 0 0 0 0
41070- 0 0 0 0 0 0 0 0 0 0 0 0
41071- 0 0 0 0 0 0 0 0 0 0 0 0
41072- 0 0 0 0 0 0 0 0 0 0 0 0
41073- 0 0 1 0 0 0 0 0 0 0 0 0
41074- 0 0 0 0 0 0 0 0 0 0 0 0
41075- 0 0 0 0 0 0 0 0 0 0 0 0
41076- 0 0 0 0 0 0 0 0 0 0 0 0
41077- 0 0 0 0 0 0 0 0 0 0 0 0
41078- 0 0 0 0 0 0 0 0 0 0 0 0
41079- 0 0 0 0 0 0 0 0 0 10 10 10
41080- 22 22 22 42 42 42 66 66 66 86 86 86
41081- 66 66 66 38 38 38 38 38 38 22 22 22
41082- 26 26 26 34 34 34 54 54 54 66 66 66
41083- 86 86 86 70 70 70 46 46 46 26 26 26
41084- 14 14 14 6 6 6 0 0 0 0 0 0
41085- 0 0 0 0 0 0 0 0 0 0 0 0
41086- 0 0 0 0 0 0 0 0 0 0 0 0
41087- 0 0 0 0 0 0 0 0 0 0 0 0
41088- 0 0 0 0 0 0 0 0 0 0 0 0
41089- 0 0 0 0 0 0 0 0 0 0 0 0
41090- 0 0 0 0 0 0 0 0 0 0 0 0
41091- 0 0 0 0 0 0 0 0 0 0 0 0
41092- 0 0 0 0 0 0 0 0 0 0 0 0
41093- 0 0 1 0 0 1 0 0 1 0 0 0
41094- 0 0 0 0 0 0 0 0 0 0 0 0
41095- 0 0 0 0 0 0 0 0 0 0 0 0
41096- 0 0 0 0 0 0 0 0 0 0 0 0
41097- 0 0 0 0 0 0 0 0 0 0 0 0
41098- 0 0 0 0 0 0 0 0 0 0 0 0
41099- 0 0 0 0 0 0 10 10 10 26 26 26
41100- 50 50 50 82 82 82 58 58 58 6 6 6
41101- 2 2 6 2 2 6 2 2 6 2 2 6
41102- 2 2 6 2 2 6 2 2 6 2 2 6
41103- 6 6 6 54 54 54 86 86 86 66 66 66
41104- 38 38 38 18 18 18 6 6 6 0 0 0
41105- 0 0 0 0 0 0 0 0 0 0 0 0
41106- 0 0 0 0 0 0 0 0 0 0 0 0
41107- 0 0 0 0 0 0 0 0 0 0 0 0
41108- 0 0 0 0 0 0 0 0 0 0 0 0
41109- 0 0 0 0 0 0 0 0 0 0 0 0
41110- 0 0 0 0 0 0 0 0 0 0 0 0
41111- 0 0 0 0 0 0 0 0 0 0 0 0
41112- 0 0 0 0 0 0 0 0 0 0 0 0
41113- 0 0 0 0 0 0 0 0 0 0 0 0
41114- 0 0 0 0 0 0 0 0 0 0 0 0
41115- 0 0 0 0 0 0 0 0 0 0 0 0
41116- 0 0 0 0 0 0 0 0 0 0 0 0
41117- 0 0 0 0 0 0 0 0 0 0 0 0
41118- 0 0 0 0 0 0 0 0 0 0 0 0
41119- 0 0 0 6 6 6 22 22 22 50 50 50
41120- 78 78 78 34 34 34 2 2 6 2 2 6
41121- 2 2 6 2 2 6 2 2 6 2 2 6
41122- 2 2 6 2 2 6 2 2 6 2 2 6
41123- 2 2 6 2 2 6 6 6 6 70 70 70
41124- 78 78 78 46 46 46 22 22 22 6 6 6
41125- 0 0 0 0 0 0 0 0 0 0 0 0
41126- 0 0 0 0 0 0 0 0 0 0 0 0
41127- 0 0 0 0 0 0 0 0 0 0 0 0
41128- 0 0 0 0 0 0 0 0 0 0 0 0
41129- 0 0 0 0 0 0 0 0 0 0 0 0
41130- 0 0 0 0 0 0 0 0 0 0 0 0
41131- 0 0 0 0 0 0 0 0 0 0 0 0
41132- 0 0 0 0 0 0 0 0 0 0 0 0
41133- 0 0 1 0 0 1 0 0 1 0 0 0
41134- 0 0 0 0 0 0 0 0 0 0 0 0
41135- 0 0 0 0 0 0 0 0 0 0 0 0
41136- 0 0 0 0 0 0 0 0 0 0 0 0
41137- 0 0 0 0 0 0 0 0 0 0 0 0
41138- 0 0 0 0 0 0 0 0 0 0 0 0
41139- 6 6 6 18 18 18 42 42 42 82 82 82
41140- 26 26 26 2 2 6 2 2 6 2 2 6
41141- 2 2 6 2 2 6 2 2 6 2 2 6
41142- 2 2 6 2 2 6 2 2 6 14 14 14
41143- 46 46 46 34 34 34 6 6 6 2 2 6
41144- 42 42 42 78 78 78 42 42 42 18 18 18
41145- 6 6 6 0 0 0 0 0 0 0 0 0
41146- 0 0 0 0 0 0 0 0 0 0 0 0
41147- 0 0 0 0 0 0 0 0 0 0 0 0
41148- 0 0 0 0 0 0 0 0 0 0 0 0
41149- 0 0 0 0 0 0 0 0 0 0 0 0
41150- 0 0 0 0 0 0 0 0 0 0 0 0
41151- 0 0 0 0 0 0 0 0 0 0 0 0
41152- 0 0 0 0 0 0 0 0 0 0 0 0
41153- 0 0 1 0 0 0 0 0 1 0 0 0
41154- 0 0 0 0 0 0 0 0 0 0 0 0
41155- 0 0 0 0 0 0 0 0 0 0 0 0
41156- 0 0 0 0 0 0 0 0 0 0 0 0
41157- 0 0 0 0 0 0 0 0 0 0 0 0
41158- 0 0 0 0 0 0 0 0 0 0 0 0
41159- 10 10 10 30 30 30 66 66 66 58 58 58
41160- 2 2 6 2 2 6 2 2 6 2 2 6
41161- 2 2 6 2 2 6 2 2 6 2 2 6
41162- 2 2 6 2 2 6 2 2 6 26 26 26
41163- 86 86 86 101 101 101 46 46 46 10 10 10
41164- 2 2 6 58 58 58 70 70 70 34 34 34
41165- 10 10 10 0 0 0 0 0 0 0 0 0
41166- 0 0 0 0 0 0 0 0 0 0 0 0
41167- 0 0 0 0 0 0 0 0 0 0 0 0
41168- 0 0 0 0 0 0 0 0 0 0 0 0
41169- 0 0 0 0 0 0 0 0 0 0 0 0
41170- 0 0 0 0 0 0 0 0 0 0 0 0
41171- 0 0 0 0 0 0 0 0 0 0 0 0
41172- 0 0 0 0 0 0 0 0 0 0 0 0
41173- 0 0 1 0 0 1 0 0 1 0 0 0
41174- 0 0 0 0 0 0 0 0 0 0 0 0
41175- 0 0 0 0 0 0 0 0 0 0 0 0
41176- 0 0 0 0 0 0 0 0 0 0 0 0
41177- 0 0 0 0 0 0 0 0 0 0 0 0
41178- 0 0 0 0 0 0 0 0 0 0 0 0
41179- 14 14 14 42 42 42 86 86 86 10 10 10
41180- 2 2 6 2 2 6 2 2 6 2 2 6
41181- 2 2 6 2 2 6 2 2 6 2 2 6
41182- 2 2 6 2 2 6 2 2 6 30 30 30
41183- 94 94 94 94 94 94 58 58 58 26 26 26
41184- 2 2 6 6 6 6 78 78 78 54 54 54
41185- 22 22 22 6 6 6 0 0 0 0 0 0
41186- 0 0 0 0 0 0 0 0 0 0 0 0
41187- 0 0 0 0 0 0 0 0 0 0 0 0
41188- 0 0 0 0 0 0 0 0 0 0 0 0
41189- 0 0 0 0 0 0 0 0 0 0 0 0
41190- 0 0 0 0 0 0 0 0 0 0 0 0
41191- 0 0 0 0 0 0 0 0 0 0 0 0
41192- 0 0 0 0 0 0 0 0 0 0 0 0
41193- 0 0 0 0 0 0 0 0 0 0 0 0
41194- 0 0 0 0 0 0 0 0 0 0 0 0
41195- 0 0 0 0 0 0 0 0 0 0 0 0
41196- 0 0 0 0 0 0 0 0 0 0 0 0
41197- 0 0 0 0 0 0 0 0 0 0 0 0
41198- 0 0 0 0 0 0 0 0 0 6 6 6
41199- 22 22 22 62 62 62 62 62 62 2 2 6
41200- 2 2 6 2 2 6 2 2 6 2 2 6
41201- 2 2 6 2 2 6 2 2 6 2 2 6
41202- 2 2 6 2 2 6 2 2 6 26 26 26
41203- 54 54 54 38 38 38 18 18 18 10 10 10
41204- 2 2 6 2 2 6 34 34 34 82 82 82
41205- 38 38 38 14 14 14 0 0 0 0 0 0
41206- 0 0 0 0 0 0 0 0 0 0 0 0
41207- 0 0 0 0 0 0 0 0 0 0 0 0
41208- 0 0 0 0 0 0 0 0 0 0 0 0
41209- 0 0 0 0 0 0 0 0 0 0 0 0
41210- 0 0 0 0 0 0 0 0 0 0 0 0
41211- 0 0 0 0 0 0 0 0 0 0 0 0
41212- 0 0 0 0 0 0 0 0 0 0 0 0
41213- 0 0 0 0 0 1 0 0 1 0 0 0
41214- 0 0 0 0 0 0 0 0 0 0 0 0
41215- 0 0 0 0 0 0 0 0 0 0 0 0
41216- 0 0 0 0 0 0 0 0 0 0 0 0
41217- 0 0 0 0 0 0 0 0 0 0 0 0
41218- 0 0 0 0 0 0 0 0 0 6 6 6
41219- 30 30 30 78 78 78 30 30 30 2 2 6
41220- 2 2 6 2 2 6 2 2 6 2 2 6
41221- 2 2 6 2 2 6 2 2 6 2 2 6
41222- 2 2 6 2 2 6 2 2 6 10 10 10
41223- 10 10 10 2 2 6 2 2 6 2 2 6
41224- 2 2 6 2 2 6 2 2 6 78 78 78
41225- 50 50 50 18 18 18 6 6 6 0 0 0
41226- 0 0 0 0 0 0 0 0 0 0 0 0
41227- 0 0 0 0 0 0 0 0 0 0 0 0
41228- 0 0 0 0 0 0 0 0 0 0 0 0
41229- 0 0 0 0 0 0 0 0 0 0 0 0
41230- 0 0 0 0 0 0 0 0 0 0 0 0
41231- 0 0 0 0 0 0 0 0 0 0 0 0
41232- 0 0 0 0 0 0 0 0 0 0 0 0
41233- 0 0 1 0 0 0 0 0 0 0 0 0
41234- 0 0 0 0 0 0 0 0 0 0 0 0
41235- 0 0 0 0 0 0 0 0 0 0 0 0
41236- 0 0 0 0 0 0 0 0 0 0 0 0
41237- 0 0 0 0 0 0 0 0 0 0 0 0
41238- 0 0 0 0 0 0 0 0 0 10 10 10
41239- 38 38 38 86 86 86 14 14 14 2 2 6
41240- 2 2 6 2 2 6 2 2 6 2 2 6
41241- 2 2 6 2 2 6 2 2 6 2 2 6
41242- 2 2 6 2 2 6 2 2 6 2 2 6
41243- 2 2 6 2 2 6 2 2 6 2 2 6
41244- 2 2 6 2 2 6 2 2 6 54 54 54
41245- 66 66 66 26 26 26 6 6 6 0 0 0
41246- 0 0 0 0 0 0 0 0 0 0 0 0
41247- 0 0 0 0 0 0 0 0 0 0 0 0
41248- 0 0 0 0 0 0 0 0 0 0 0 0
41249- 0 0 0 0 0 0 0 0 0 0 0 0
41250- 0 0 0 0 0 0 0 0 0 0 0 0
41251- 0 0 0 0 0 0 0 0 0 0 0 0
41252- 0 0 0 0 0 0 0 0 0 0 0 0
41253- 0 0 0 0 0 1 0 0 1 0 0 0
41254- 0 0 0 0 0 0 0 0 0 0 0 0
41255- 0 0 0 0 0 0 0 0 0 0 0 0
41256- 0 0 0 0 0 0 0 0 0 0 0 0
41257- 0 0 0 0 0 0 0 0 0 0 0 0
41258- 0 0 0 0 0 0 0 0 0 14 14 14
41259- 42 42 42 82 82 82 2 2 6 2 2 6
41260- 2 2 6 6 6 6 10 10 10 2 2 6
41261- 2 2 6 2 2 6 2 2 6 2 2 6
41262- 2 2 6 2 2 6 2 2 6 6 6 6
41263- 14 14 14 10 10 10 2 2 6 2 2 6
41264- 2 2 6 2 2 6 2 2 6 18 18 18
41265- 82 82 82 34 34 34 10 10 10 0 0 0
41266- 0 0 0 0 0 0 0 0 0 0 0 0
41267- 0 0 0 0 0 0 0 0 0 0 0 0
41268- 0 0 0 0 0 0 0 0 0 0 0 0
41269- 0 0 0 0 0 0 0 0 0 0 0 0
41270- 0 0 0 0 0 0 0 0 0 0 0 0
41271- 0 0 0 0 0 0 0 0 0 0 0 0
41272- 0 0 0 0 0 0 0 0 0 0 0 0
41273- 0 0 1 0 0 0 0 0 0 0 0 0
41274- 0 0 0 0 0 0 0 0 0 0 0 0
41275- 0 0 0 0 0 0 0 0 0 0 0 0
41276- 0 0 0 0 0 0 0 0 0 0 0 0
41277- 0 0 0 0 0 0 0 0 0 0 0 0
41278- 0 0 0 0 0 0 0 0 0 14 14 14
41279- 46 46 46 86 86 86 2 2 6 2 2 6
41280- 6 6 6 6 6 6 22 22 22 34 34 34
41281- 6 6 6 2 2 6 2 2 6 2 2 6
41282- 2 2 6 2 2 6 18 18 18 34 34 34
41283- 10 10 10 50 50 50 22 22 22 2 2 6
41284- 2 2 6 2 2 6 2 2 6 10 10 10
41285- 86 86 86 42 42 42 14 14 14 0 0 0
41286- 0 0 0 0 0 0 0 0 0 0 0 0
41287- 0 0 0 0 0 0 0 0 0 0 0 0
41288- 0 0 0 0 0 0 0 0 0 0 0 0
41289- 0 0 0 0 0 0 0 0 0 0 0 0
41290- 0 0 0 0 0 0 0 0 0 0 0 0
41291- 0 0 0 0 0 0 0 0 0 0 0 0
41292- 0 0 0 0 0 0 0 0 0 0 0 0
41293- 0 0 1 0 0 1 0 0 1 0 0 0
41294- 0 0 0 0 0 0 0 0 0 0 0 0
41295- 0 0 0 0 0 0 0 0 0 0 0 0
41296- 0 0 0 0 0 0 0 0 0 0 0 0
41297- 0 0 0 0 0 0 0 0 0 0 0 0
41298- 0 0 0 0 0 0 0 0 0 14 14 14
41299- 46 46 46 86 86 86 2 2 6 2 2 6
41300- 38 38 38 116 116 116 94 94 94 22 22 22
41301- 22 22 22 2 2 6 2 2 6 2 2 6
41302- 14 14 14 86 86 86 138 138 138 162 162 162
41303-154 154 154 38 38 38 26 26 26 6 6 6
41304- 2 2 6 2 2 6 2 2 6 2 2 6
41305- 86 86 86 46 46 46 14 14 14 0 0 0
41306- 0 0 0 0 0 0 0 0 0 0 0 0
41307- 0 0 0 0 0 0 0 0 0 0 0 0
41308- 0 0 0 0 0 0 0 0 0 0 0 0
41309- 0 0 0 0 0 0 0 0 0 0 0 0
41310- 0 0 0 0 0 0 0 0 0 0 0 0
41311- 0 0 0 0 0 0 0 0 0 0 0 0
41312- 0 0 0 0 0 0 0 0 0 0 0 0
41313- 0 0 0 0 0 0 0 0 0 0 0 0
41314- 0 0 0 0 0 0 0 0 0 0 0 0
41315- 0 0 0 0 0 0 0 0 0 0 0 0
41316- 0 0 0 0 0 0 0 0 0 0 0 0
41317- 0 0 0 0 0 0 0 0 0 0 0 0
41318- 0 0 0 0 0 0 0 0 0 14 14 14
41319- 46 46 46 86 86 86 2 2 6 14 14 14
41320-134 134 134 198 198 198 195 195 195 116 116 116
41321- 10 10 10 2 2 6 2 2 6 6 6 6
41322-101 98 89 187 187 187 210 210 210 218 218 218
41323-214 214 214 134 134 134 14 14 14 6 6 6
41324- 2 2 6 2 2 6 2 2 6 2 2 6
41325- 86 86 86 50 50 50 18 18 18 6 6 6
41326- 0 0 0 0 0 0 0 0 0 0 0 0
41327- 0 0 0 0 0 0 0 0 0 0 0 0
41328- 0 0 0 0 0 0 0 0 0 0 0 0
41329- 0 0 0 0 0 0 0 0 0 0 0 0
41330- 0 0 0 0 0 0 0 0 0 0 0 0
41331- 0 0 0 0 0 0 0 0 0 0 0 0
41332- 0 0 0 0 0 0 0 0 1 0 0 0
41333- 0 0 1 0 0 1 0 0 1 0 0 0
41334- 0 0 0 0 0 0 0 0 0 0 0 0
41335- 0 0 0 0 0 0 0 0 0 0 0 0
41336- 0 0 0 0 0 0 0 0 0 0 0 0
41337- 0 0 0 0 0 0 0 0 0 0 0 0
41338- 0 0 0 0 0 0 0 0 0 14 14 14
41339- 46 46 46 86 86 86 2 2 6 54 54 54
41340-218 218 218 195 195 195 226 226 226 246 246 246
41341- 58 58 58 2 2 6 2 2 6 30 30 30
41342-210 210 210 253 253 253 174 174 174 123 123 123
41343-221 221 221 234 234 234 74 74 74 2 2 6
41344- 2 2 6 2 2 6 2 2 6 2 2 6
41345- 70 70 70 58 58 58 22 22 22 6 6 6
41346- 0 0 0 0 0 0 0 0 0 0 0 0
41347- 0 0 0 0 0 0 0 0 0 0 0 0
41348- 0 0 0 0 0 0 0 0 0 0 0 0
41349- 0 0 0 0 0 0 0 0 0 0 0 0
41350- 0 0 0 0 0 0 0 0 0 0 0 0
41351- 0 0 0 0 0 0 0 0 0 0 0 0
41352- 0 0 0 0 0 0 0 0 0 0 0 0
41353- 0 0 0 0 0 0 0 0 0 0 0 0
41354- 0 0 0 0 0 0 0 0 0 0 0 0
41355- 0 0 0 0 0 0 0 0 0 0 0 0
41356- 0 0 0 0 0 0 0 0 0 0 0 0
41357- 0 0 0 0 0 0 0 0 0 0 0 0
41358- 0 0 0 0 0 0 0 0 0 14 14 14
41359- 46 46 46 82 82 82 2 2 6 106 106 106
41360-170 170 170 26 26 26 86 86 86 226 226 226
41361-123 123 123 10 10 10 14 14 14 46 46 46
41362-231 231 231 190 190 190 6 6 6 70 70 70
41363- 90 90 90 238 238 238 158 158 158 2 2 6
41364- 2 2 6 2 2 6 2 2 6 2 2 6
41365- 70 70 70 58 58 58 22 22 22 6 6 6
41366- 0 0 0 0 0 0 0 0 0 0 0 0
41367- 0 0 0 0 0 0 0 0 0 0 0 0
41368- 0 0 0 0 0 0 0 0 0 0 0 0
41369- 0 0 0 0 0 0 0 0 0 0 0 0
41370- 0 0 0 0 0 0 0 0 0 0 0 0
41371- 0 0 0 0 0 0 0 0 0 0 0 0
41372- 0 0 0 0 0 0 0 0 1 0 0 0
41373- 0 0 1 0 0 1 0 0 1 0 0 0
41374- 0 0 0 0 0 0 0 0 0 0 0 0
41375- 0 0 0 0 0 0 0 0 0 0 0 0
41376- 0 0 0 0 0 0 0 0 0 0 0 0
41377- 0 0 0 0 0 0 0 0 0 0 0 0
41378- 0 0 0 0 0 0 0 0 0 14 14 14
41379- 42 42 42 86 86 86 6 6 6 116 116 116
41380-106 106 106 6 6 6 70 70 70 149 149 149
41381-128 128 128 18 18 18 38 38 38 54 54 54
41382-221 221 221 106 106 106 2 2 6 14 14 14
41383- 46 46 46 190 190 190 198 198 198 2 2 6
41384- 2 2 6 2 2 6 2 2 6 2 2 6
41385- 74 74 74 62 62 62 22 22 22 6 6 6
41386- 0 0 0 0 0 0 0 0 0 0 0 0
41387- 0 0 0 0 0 0 0 0 0 0 0 0
41388- 0 0 0 0 0 0 0 0 0 0 0 0
41389- 0 0 0 0 0 0 0 0 0 0 0 0
41390- 0 0 0 0 0 0 0 0 0 0 0 0
41391- 0 0 0 0 0 0 0 0 0 0 0 0
41392- 0 0 0 0 0 0 0 0 1 0 0 0
41393- 0 0 1 0 0 0 0 0 1 0 0 0
41394- 0 0 0 0 0 0 0 0 0 0 0 0
41395- 0 0 0 0 0 0 0 0 0 0 0 0
41396- 0 0 0 0 0 0 0 0 0 0 0 0
41397- 0 0 0 0 0 0 0 0 0 0 0 0
41398- 0 0 0 0 0 0 0 0 0 14 14 14
41399- 42 42 42 94 94 94 14 14 14 101 101 101
41400-128 128 128 2 2 6 18 18 18 116 116 116
41401-118 98 46 121 92 8 121 92 8 98 78 10
41402-162 162 162 106 106 106 2 2 6 2 2 6
41403- 2 2 6 195 195 195 195 195 195 6 6 6
41404- 2 2 6 2 2 6 2 2 6 2 2 6
41405- 74 74 74 62 62 62 22 22 22 6 6 6
41406- 0 0 0 0 0 0 0 0 0 0 0 0
41407- 0 0 0 0 0 0 0 0 0 0 0 0
41408- 0 0 0 0 0 0 0 0 0 0 0 0
41409- 0 0 0 0 0 0 0 0 0 0 0 0
41410- 0 0 0 0 0 0 0 0 0 0 0 0
41411- 0 0 0 0 0 0 0 0 0 0 0 0
41412- 0 0 0 0 0 0 0 0 1 0 0 1
41413- 0 0 1 0 0 0 0 0 1 0 0 0
41414- 0 0 0 0 0 0 0 0 0 0 0 0
41415- 0 0 0 0 0 0 0 0 0 0 0 0
41416- 0 0 0 0 0 0 0 0 0 0 0 0
41417- 0 0 0 0 0 0 0 0 0 0 0 0
41418- 0 0 0 0 0 0 0 0 0 10 10 10
41419- 38 38 38 90 90 90 14 14 14 58 58 58
41420-210 210 210 26 26 26 54 38 6 154 114 10
41421-226 170 11 236 186 11 225 175 15 184 144 12
41422-215 174 15 175 146 61 37 26 9 2 2 6
41423- 70 70 70 246 246 246 138 138 138 2 2 6
41424- 2 2 6 2 2 6 2 2 6 2 2 6
41425- 70 70 70 66 66 66 26 26 26 6 6 6
41426- 0 0 0 0 0 0 0 0 0 0 0 0
41427- 0 0 0 0 0 0 0 0 0 0 0 0
41428- 0 0 0 0 0 0 0 0 0 0 0 0
41429- 0 0 0 0 0 0 0 0 0 0 0 0
41430- 0 0 0 0 0 0 0 0 0 0 0 0
41431- 0 0 0 0 0 0 0 0 0 0 0 0
41432- 0 0 0 0 0 0 0 0 0 0 0 0
41433- 0 0 0 0 0 0 0 0 0 0 0 0
41434- 0 0 0 0 0 0 0 0 0 0 0 0
41435- 0 0 0 0 0 0 0 0 0 0 0 0
41436- 0 0 0 0 0 0 0 0 0 0 0 0
41437- 0 0 0 0 0 0 0 0 0 0 0 0
41438- 0 0 0 0 0 0 0 0 0 10 10 10
41439- 38 38 38 86 86 86 14 14 14 10 10 10
41440-195 195 195 188 164 115 192 133 9 225 175 15
41441-239 182 13 234 190 10 232 195 16 232 200 30
41442-245 207 45 241 208 19 232 195 16 184 144 12
41443-218 194 134 211 206 186 42 42 42 2 2 6
41444- 2 2 6 2 2 6 2 2 6 2 2 6
41445- 50 50 50 74 74 74 30 30 30 6 6 6
41446- 0 0 0 0 0 0 0 0 0 0 0 0
41447- 0 0 0 0 0 0 0 0 0 0 0 0
41448- 0 0 0 0 0 0 0 0 0 0 0 0
41449- 0 0 0 0 0 0 0 0 0 0 0 0
41450- 0 0 0 0 0 0 0 0 0 0 0 0
41451- 0 0 0 0 0 0 0 0 0 0 0 0
41452- 0 0 0 0 0 0 0 0 0 0 0 0
41453- 0 0 0 0 0 0 0 0 0 0 0 0
41454- 0 0 0 0 0 0 0 0 0 0 0 0
41455- 0 0 0 0 0 0 0 0 0 0 0 0
41456- 0 0 0 0 0 0 0 0 0 0 0 0
41457- 0 0 0 0 0 0 0 0 0 0 0 0
41458- 0 0 0 0 0 0 0 0 0 10 10 10
41459- 34 34 34 86 86 86 14 14 14 2 2 6
41460-121 87 25 192 133 9 219 162 10 239 182 13
41461-236 186 11 232 195 16 241 208 19 244 214 54
41462-246 218 60 246 218 38 246 215 20 241 208 19
41463-241 208 19 226 184 13 121 87 25 2 2 6
41464- 2 2 6 2 2 6 2 2 6 2 2 6
41465- 50 50 50 82 82 82 34 34 34 10 10 10
41466- 0 0 0 0 0 0 0 0 0 0 0 0
41467- 0 0 0 0 0 0 0 0 0 0 0 0
41468- 0 0 0 0 0 0 0 0 0 0 0 0
41469- 0 0 0 0 0 0 0 0 0 0 0 0
41470- 0 0 0 0 0 0 0 0 0 0 0 0
41471- 0 0 0 0 0 0 0 0 0 0 0 0
41472- 0 0 0 0 0 0 0 0 0 0 0 0
41473- 0 0 0 0 0 0 0 0 0 0 0 0
41474- 0 0 0 0 0 0 0 0 0 0 0 0
41475- 0 0 0 0 0 0 0 0 0 0 0 0
41476- 0 0 0 0 0 0 0 0 0 0 0 0
41477- 0 0 0 0 0 0 0 0 0 0 0 0
41478- 0 0 0 0 0 0 0 0 0 10 10 10
41479- 34 34 34 82 82 82 30 30 30 61 42 6
41480-180 123 7 206 145 10 230 174 11 239 182 13
41481-234 190 10 238 202 15 241 208 19 246 218 74
41482-246 218 38 246 215 20 246 215 20 246 215 20
41483-226 184 13 215 174 15 184 144 12 6 6 6
41484- 2 2 6 2 2 6 2 2 6 2 2 6
41485- 26 26 26 94 94 94 42 42 42 14 14 14
41486- 0 0 0 0 0 0 0 0 0 0 0 0
41487- 0 0 0 0 0 0 0 0 0 0 0 0
41488- 0 0 0 0 0 0 0 0 0 0 0 0
41489- 0 0 0 0 0 0 0 0 0 0 0 0
41490- 0 0 0 0 0 0 0 0 0 0 0 0
41491- 0 0 0 0 0 0 0 0 0 0 0 0
41492- 0 0 0 0 0 0 0 0 0 0 0 0
41493- 0 0 0 0 0 0 0 0 0 0 0 0
41494- 0 0 0 0 0 0 0 0 0 0 0 0
41495- 0 0 0 0 0 0 0 0 0 0 0 0
41496- 0 0 0 0 0 0 0 0 0 0 0 0
41497- 0 0 0 0 0 0 0 0 0 0 0 0
41498- 0 0 0 0 0 0 0 0 0 10 10 10
41499- 30 30 30 78 78 78 50 50 50 104 69 6
41500-192 133 9 216 158 10 236 178 12 236 186 11
41501-232 195 16 241 208 19 244 214 54 245 215 43
41502-246 215 20 246 215 20 241 208 19 198 155 10
41503-200 144 11 216 158 10 156 118 10 2 2 6
41504- 2 2 6 2 2 6 2 2 6 2 2 6
41505- 6 6 6 90 90 90 54 54 54 18 18 18
41506- 6 6 6 0 0 0 0 0 0 0 0 0
41507- 0 0 0 0 0 0 0 0 0 0 0 0
41508- 0 0 0 0 0 0 0 0 0 0 0 0
41509- 0 0 0 0 0 0 0 0 0 0 0 0
41510- 0 0 0 0 0 0 0 0 0 0 0 0
41511- 0 0 0 0 0 0 0 0 0 0 0 0
41512- 0 0 0 0 0 0 0 0 0 0 0 0
41513- 0 0 0 0 0 0 0 0 0 0 0 0
41514- 0 0 0 0 0 0 0 0 0 0 0 0
41515- 0 0 0 0 0 0 0 0 0 0 0 0
41516- 0 0 0 0 0 0 0 0 0 0 0 0
41517- 0 0 0 0 0 0 0 0 0 0 0 0
41518- 0 0 0 0 0 0 0 0 0 10 10 10
41519- 30 30 30 78 78 78 46 46 46 22 22 22
41520-137 92 6 210 162 10 239 182 13 238 190 10
41521-238 202 15 241 208 19 246 215 20 246 215 20
41522-241 208 19 203 166 17 185 133 11 210 150 10
41523-216 158 10 210 150 10 102 78 10 2 2 6
41524- 6 6 6 54 54 54 14 14 14 2 2 6
41525- 2 2 6 62 62 62 74 74 74 30 30 30
41526- 10 10 10 0 0 0 0 0 0 0 0 0
41527- 0 0 0 0 0 0 0 0 0 0 0 0
41528- 0 0 0 0 0 0 0 0 0 0 0 0
41529- 0 0 0 0 0 0 0 0 0 0 0 0
41530- 0 0 0 0 0 0 0 0 0 0 0 0
41531- 0 0 0 0 0 0 0 0 0 0 0 0
41532- 0 0 0 0 0 0 0 0 0 0 0 0
41533- 0 0 0 0 0 0 0 0 0 0 0 0
41534- 0 0 0 0 0 0 0 0 0 0 0 0
41535- 0 0 0 0 0 0 0 0 0 0 0 0
41536- 0 0 0 0 0 0 0 0 0 0 0 0
41537- 0 0 0 0 0 0 0 0 0 0 0 0
41538- 0 0 0 0 0 0 0 0 0 10 10 10
41539- 34 34 34 78 78 78 50 50 50 6 6 6
41540- 94 70 30 139 102 15 190 146 13 226 184 13
41541-232 200 30 232 195 16 215 174 15 190 146 13
41542-168 122 10 192 133 9 210 150 10 213 154 11
41543-202 150 34 182 157 106 101 98 89 2 2 6
41544- 2 2 6 78 78 78 116 116 116 58 58 58
41545- 2 2 6 22 22 22 90 90 90 46 46 46
41546- 18 18 18 6 6 6 0 0 0 0 0 0
41547- 0 0 0 0 0 0 0 0 0 0 0 0
41548- 0 0 0 0 0 0 0 0 0 0 0 0
41549- 0 0 0 0 0 0 0 0 0 0 0 0
41550- 0 0 0 0 0 0 0 0 0 0 0 0
41551- 0 0 0 0 0 0 0 0 0 0 0 0
41552- 0 0 0 0 0 0 0 0 0 0 0 0
41553- 0 0 0 0 0 0 0 0 0 0 0 0
41554- 0 0 0 0 0 0 0 0 0 0 0 0
41555- 0 0 0 0 0 0 0 0 0 0 0 0
41556- 0 0 0 0 0 0 0 0 0 0 0 0
41557- 0 0 0 0 0 0 0 0 0 0 0 0
41558- 0 0 0 0 0 0 0 0 0 10 10 10
41559- 38 38 38 86 86 86 50 50 50 6 6 6
41560-128 128 128 174 154 114 156 107 11 168 122 10
41561-198 155 10 184 144 12 197 138 11 200 144 11
41562-206 145 10 206 145 10 197 138 11 188 164 115
41563-195 195 195 198 198 198 174 174 174 14 14 14
41564- 2 2 6 22 22 22 116 116 116 116 116 116
41565- 22 22 22 2 2 6 74 74 74 70 70 70
41566- 30 30 30 10 10 10 0 0 0 0 0 0
41567- 0 0 0 0 0 0 0 0 0 0 0 0
41568- 0 0 0 0 0 0 0 0 0 0 0 0
41569- 0 0 0 0 0 0 0 0 0 0 0 0
41570- 0 0 0 0 0 0 0 0 0 0 0 0
41571- 0 0 0 0 0 0 0 0 0 0 0 0
41572- 0 0 0 0 0 0 0 0 0 0 0 0
41573- 0 0 0 0 0 0 0 0 0 0 0 0
41574- 0 0 0 0 0 0 0 0 0 0 0 0
41575- 0 0 0 0 0 0 0 0 0 0 0 0
41576- 0 0 0 0 0 0 0 0 0 0 0 0
41577- 0 0 0 0 0 0 0 0 0 0 0 0
41578- 0 0 0 0 0 0 6 6 6 18 18 18
41579- 50 50 50 101 101 101 26 26 26 10 10 10
41580-138 138 138 190 190 190 174 154 114 156 107 11
41581-197 138 11 200 144 11 197 138 11 192 133 9
41582-180 123 7 190 142 34 190 178 144 187 187 187
41583-202 202 202 221 221 221 214 214 214 66 66 66
41584- 2 2 6 2 2 6 50 50 50 62 62 62
41585- 6 6 6 2 2 6 10 10 10 90 90 90
41586- 50 50 50 18 18 18 6 6 6 0 0 0
41587- 0 0 0 0 0 0 0 0 0 0 0 0
41588- 0 0 0 0 0 0 0 0 0 0 0 0
41589- 0 0 0 0 0 0 0 0 0 0 0 0
41590- 0 0 0 0 0 0 0 0 0 0 0 0
41591- 0 0 0 0 0 0 0 0 0 0 0 0
41592- 0 0 0 0 0 0 0 0 0 0 0 0
41593- 0 0 0 0 0 0 0 0 0 0 0 0
41594- 0 0 0 0 0 0 0 0 0 0 0 0
41595- 0 0 0 0 0 0 0 0 0 0 0 0
41596- 0 0 0 0 0 0 0 0 0 0 0 0
41597- 0 0 0 0 0 0 0 0 0 0 0 0
41598- 0 0 0 0 0 0 10 10 10 34 34 34
41599- 74 74 74 74 74 74 2 2 6 6 6 6
41600-144 144 144 198 198 198 190 190 190 178 166 146
41601-154 121 60 156 107 11 156 107 11 168 124 44
41602-174 154 114 187 187 187 190 190 190 210 210 210
41603-246 246 246 253 253 253 253 253 253 182 182 182
41604- 6 6 6 2 2 6 2 2 6 2 2 6
41605- 2 2 6 2 2 6 2 2 6 62 62 62
41606- 74 74 74 34 34 34 14 14 14 0 0 0
41607- 0 0 0 0 0 0 0 0 0 0 0 0
41608- 0 0 0 0 0 0 0 0 0 0 0 0
41609- 0 0 0 0 0 0 0 0 0 0 0 0
41610- 0 0 0 0 0 0 0 0 0 0 0 0
41611- 0 0 0 0 0 0 0 0 0 0 0 0
41612- 0 0 0 0 0 0 0 0 0 0 0 0
41613- 0 0 0 0 0 0 0 0 0 0 0 0
41614- 0 0 0 0 0 0 0 0 0 0 0 0
41615- 0 0 0 0 0 0 0 0 0 0 0 0
41616- 0 0 0 0 0 0 0 0 0 0 0 0
41617- 0 0 0 0 0 0 0 0 0 0 0 0
41618- 0 0 0 10 10 10 22 22 22 54 54 54
41619- 94 94 94 18 18 18 2 2 6 46 46 46
41620-234 234 234 221 221 221 190 190 190 190 190 190
41621-190 190 190 187 187 187 187 187 187 190 190 190
41622-190 190 190 195 195 195 214 214 214 242 242 242
41623-253 253 253 253 253 253 253 253 253 253 253 253
41624- 82 82 82 2 2 6 2 2 6 2 2 6
41625- 2 2 6 2 2 6 2 2 6 14 14 14
41626- 86 86 86 54 54 54 22 22 22 6 6 6
41627- 0 0 0 0 0 0 0 0 0 0 0 0
41628- 0 0 0 0 0 0 0 0 0 0 0 0
41629- 0 0 0 0 0 0 0 0 0 0 0 0
41630- 0 0 0 0 0 0 0 0 0 0 0 0
41631- 0 0 0 0 0 0 0 0 0 0 0 0
41632- 0 0 0 0 0 0 0 0 0 0 0 0
41633- 0 0 0 0 0 0 0 0 0 0 0 0
41634- 0 0 0 0 0 0 0 0 0 0 0 0
41635- 0 0 0 0 0 0 0 0 0 0 0 0
41636- 0 0 0 0 0 0 0 0 0 0 0 0
41637- 0 0 0 0 0 0 0 0 0 0 0 0
41638- 6 6 6 18 18 18 46 46 46 90 90 90
41639- 46 46 46 18 18 18 6 6 6 182 182 182
41640-253 253 253 246 246 246 206 206 206 190 190 190
41641-190 190 190 190 190 190 190 190 190 190 190 190
41642-206 206 206 231 231 231 250 250 250 253 253 253
41643-253 253 253 253 253 253 253 253 253 253 253 253
41644-202 202 202 14 14 14 2 2 6 2 2 6
41645- 2 2 6 2 2 6 2 2 6 2 2 6
41646- 42 42 42 86 86 86 42 42 42 18 18 18
41647- 6 6 6 0 0 0 0 0 0 0 0 0
41648- 0 0 0 0 0 0 0 0 0 0 0 0
41649- 0 0 0 0 0 0 0 0 0 0 0 0
41650- 0 0 0 0 0 0 0 0 0 0 0 0
41651- 0 0 0 0 0 0 0 0 0 0 0 0
41652- 0 0 0 0 0 0 0 0 0 0 0 0
41653- 0 0 0 0 0 0 0 0 0 0 0 0
41654- 0 0 0 0 0 0 0 0 0 0 0 0
41655- 0 0 0 0 0 0 0 0 0 0 0 0
41656- 0 0 0 0 0 0 0 0 0 0 0 0
41657- 0 0 0 0 0 0 0 0 0 6 6 6
41658- 14 14 14 38 38 38 74 74 74 66 66 66
41659- 2 2 6 6 6 6 90 90 90 250 250 250
41660-253 253 253 253 253 253 238 238 238 198 198 198
41661-190 190 190 190 190 190 195 195 195 221 221 221
41662-246 246 246 253 253 253 253 253 253 253 253 253
41663-253 253 253 253 253 253 253 253 253 253 253 253
41664-253 253 253 82 82 82 2 2 6 2 2 6
41665- 2 2 6 2 2 6 2 2 6 2 2 6
41666- 2 2 6 78 78 78 70 70 70 34 34 34
41667- 14 14 14 6 6 6 0 0 0 0 0 0
41668- 0 0 0 0 0 0 0 0 0 0 0 0
41669- 0 0 0 0 0 0 0 0 0 0 0 0
41670- 0 0 0 0 0 0 0 0 0 0 0 0
41671- 0 0 0 0 0 0 0 0 0 0 0 0
41672- 0 0 0 0 0 0 0 0 0 0 0 0
41673- 0 0 0 0 0 0 0 0 0 0 0 0
41674- 0 0 0 0 0 0 0 0 0 0 0 0
41675- 0 0 0 0 0 0 0 0 0 0 0 0
41676- 0 0 0 0 0 0 0 0 0 0 0 0
41677- 0 0 0 0 0 0 0 0 0 14 14 14
41678- 34 34 34 66 66 66 78 78 78 6 6 6
41679- 2 2 6 18 18 18 218 218 218 253 253 253
41680-253 253 253 253 253 253 253 253 253 246 246 246
41681-226 226 226 231 231 231 246 246 246 253 253 253
41682-253 253 253 253 253 253 253 253 253 253 253 253
41683-253 253 253 253 253 253 253 253 253 253 253 253
41684-253 253 253 178 178 178 2 2 6 2 2 6
41685- 2 2 6 2 2 6 2 2 6 2 2 6
41686- 2 2 6 18 18 18 90 90 90 62 62 62
41687- 30 30 30 10 10 10 0 0 0 0 0 0
41688- 0 0 0 0 0 0 0 0 0 0 0 0
41689- 0 0 0 0 0 0 0 0 0 0 0 0
41690- 0 0 0 0 0 0 0 0 0 0 0 0
41691- 0 0 0 0 0 0 0 0 0 0 0 0
41692- 0 0 0 0 0 0 0 0 0 0 0 0
41693- 0 0 0 0 0 0 0 0 0 0 0 0
41694- 0 0 0 0 0 0 0 0 0 0 0 0
41695- 0 0 0 0 0 0 0 0 0 0 0 0
41696- 0 0 0 0 0 0 0 0 0 0 0 0
41697- 0 0 0 0 0 0 10 10 10 26 26 26
41698- 58 58 58 90 90 90 18 18 18 2 2 6
41699- 2 2 6 110 110 110 253 253 253 253 253 253
41700-253 253 253 253 253 253 253 253 253 253 253 253
41701-250 250 250 253 253 253 253 253 253 253 253 253
41702-253 253 253 253 253 253 253 253 253 253 253 253
41703-253 253 253 253 253 253 253 253 253 253 253 253
41704-253 253 253 231 231 231 18 18 18 2 2 6
41705- 2 2 6 2 2 6 2 2 6 2 2 6
41706- 2 2 6 2 2 6 18 18 18 94 94 94
41707- 54 54 54 26 26 26 10 10 10 0 0 0
41708- 0 0 0 0 0 0 0 0 0 0 0 0
41709- 0 0 0 0 0 0 0 0 0 0 0 0
41710- 0 0 0 0 0 0 0 0 0 0 0 0
41711- 0 0 0 0 0 0 0 0 0 0 0 0
41712- 0 0 0 0 0 0 0 0 0 0 0 0
41713- 0 0 0 0 0 0 0 0 0 0 0 0
41714- 0 0 0 0 0 0 0 0 0 0 0 0
41715- 0 0 0 0 0 0 0 0 0 0 0 0
41716- 0 0 0 0 0 0 0 0 0 0 0 0
41717- 0 0 0 6 6 6 22 22 22 50 50 50
41718- 90 90 90 26 26 26 2 2 6 2 2 6
41719- 14 14 14 195 195 195 250 250 250 253 253 253
41720-253 253 253 253 253 253 253 253 253 253 253 253
41721-253 253 253 253 253 253 253 253 253 253 253 253
41722-253 253 253 253 253 253 253 253 253 253 253 253
41723-253 253 253 253 253 253 253 253 253 253 253 253
41724-250 250 250 242 242 242 54 54 54 2 2 6
41725- 2 2 6 2 2 6 2 2 6 2 2 6
41726- 2 2 6 2 2 6 2 2 6 38 38 38
41727- 86 86 86 50 50 50 22 22 22 6 6 6
41728- 0 0 0 0 0 0 0 0 0 0 0 0
41729- 0 0 0 0 0 0 0 0 0 0 0 0
41730- 0 0 0 0 0 0 0 0 0 0 0 0
41731- 0 0 0 0 0 0 0 0 0 0 0 0
41732- 0 0 0 0 0 0 0 0 0 0 0 0
41733- 0 0 0 0 0 0 0 0 0 0 0 0
41734- 0 0 0 0 0 0 0 0 0 0 0 0
41735- 0 0 0 0 0 0 0 0 0 0 0 0
41736- 0 0 0 0 0 0 0 0 0 0 0 0
41737- 6 6 6 14 14 14 38 38 38 82 82 82
41738- 34 34 34 2 2 6 2 2 6 2 2 6
41739- 42 42 42 195 195 195 246 246 246 253 253 253
41740-253 253 253 253 253 253 253 253 253 250 250 250
41741-242 242 242 242 242 242 250 250 250 253 253 253
41742-253 253 253 253 253 253 253 253 253 253 253 253
41743-253 253 253 250 250 250 246 246 246 238 238 238
41744-226 226 226 231 231 231 101 101 101 6 6 6
41745- 2 2 6 2 2 6 2 2 6 2 2 6
41746- 2 2 6 2 2 6 2 2 6 2 2 6
41747- 38 38 38 82 82 82 42 42 42 14 14 14
41748- 6 6 6 0 0 0 0 0 0 0 0 0
41749- 0 0 0 0 0 0 0 0 0 0 0 0
41750- 0 0 0 0 0 0 0 0 0 0 0 0
41751- 0 0 0 0 0 0 0 0 0 0 0 0
41752- 0 0 0 0 0 0 0 0 0 0 0 0
41753- 0 0 0 0 0 0 0 0 0 0 0 0
41754- 0 0 0 0 0 0 0 0 0 0 0 0
41755- 0 0 0 0 0 0 0 0 0 0 0 0
41756- 0 0 0 0 0 0 0 0 0 0 0 0
41757- 10 10 10 26 26 26 62 62 62 66 66 66
41758- 2 2 6 2 2 6 2 2 6 6 6 6
41759- 70 70 70 170 170 170 206 206 206 234 234 234
41760-246 246 246 250 250 250 250 250 250 238 238 238
41761-226 226 226 231 231 231 238 238 238 250 250 250
41762-250 250 250 250 250 250 246 246 246 231 231 231
41763-214 214 214 206 206 206 202 202 202 202 202 202
41764-198 198 198 202 202 202 182 182 182 18 18 18
41765- 2 2 6 2 2 6 2 2 6 2 2 6
41766- 2 2 6 2 2 6 2 2 6 2 2 6
41767- 2 2 6 62 62 62 66 66 66 30 30 30
41768- 10 10 10 0 0 0 0 0 0 0 0 0
41769- 0 0 0 0 0 0 0 0 0 0 0 0
41770- 0 0 0 0 0 0 0 0 0 0 0 0
41771- 0 0 0 0 0 0 0 0 0 0 0 0
41772- 0 0 0 0 0 0 0 0 0 0 0 0
41773- 0 0 0 0 0 0 0 0 0 0 0 0
41774- 0 0 0 0 0 0 0 0 0 0 0 0
41775- 0 0 0 0 0 0 0 0 0 0 0 0
41776- 0 0 0 0 0 0 0 0 0 0 0 0
41777- 14 14 14 42 42 42 82 82 82 18 18 18
41778- 2 2 6 2 2 6 2 2 6 10 10 10
41779- 94 94 94 182 182 182 218 218 218 242 242 242
41780-250 250 250 253 253 253 253 253 253 250 250 250
41781-234 234 234 253 253 253 253 253 253 253 253 253
41782-253 253 253 253 253 253 253 253 253 246 246 246
41783-238 238 238 226 226 226 210 210 210 202 202 202
41784-195 195 195 195 195 195 210 210 210 158 158 158
41785- 6 6 6 14 14 14 50 50 50 14 14 14
41786- 2 2 6 2 2 6 2 2 6 2 2 6
41787- 2 2 6 6 6 6 86 86 86 46 46 46
41788- 18 18 18 6 6 6 0 0 0 0 0 0
41789- 0 0 0 0 0 0 0 0 0 0 0 0
41790- 0 0 0 0 0 0 0 0 0 0 0 0
41791- 0 0 0 0 0 0 0 0 0 0 0 0
41792- 0 0 0 0 0 0 0 0 0 0 0 0
41793- 0 0 0 0 0 0 0 0 0 0 0 0
41794- 0 0 0 0 0 0 0 0 0 0 0 0
41795- 0 0 0 0 0 0 0 0 0 0 0 0
41796- 0 0 0 0 0 0 0 0 0 6 6 6
41797- 22 22 22 54 54 54 70 70 70 2 2 6
41798- 2 2 6 10 10 10 2 2 6 22 22 22
41799-166 166 166 231 231 231 250 250 250 253 253 253
41800-253 253 253 253 253 253 253 253 253 250 250 250
41801-242 242 242 253 253 253 253 253 253 253 253 253
41802-253 253 253 253 253 253 253 253 253 253 253 253
41803-253 253 253 253 253 253 253 253 253 246 246 246
41804-231 231 231 206 206 206 198 198 198 226 226 226
41805- 94 94 94 2 2 6 6 6 6 38 38 38
41806- 30 30 30 2 2 6 2 2 6 2 2 6
41807- 2 2 6 2 2 6 62 62 62 66 66 66
41808- 26 26 26 10 10 10 0 0 0 0 0 0
41809- 0 0 0 0 0 0 0 0 0 0 0 0
41810- 0 0 0 0 0 0 0 0 0 0 0 0
41811- 0 0 0 0 0 0 0 0 0 0 0 0
41812- 0 0 0 0 0 0 0 0 0 0 0 0
41813- 0 0 0 0 0 0 0 0 0 0 0 0
41814- 0 0 0 0 0 0 0 0 0 0 0 0
41815- 0 0 0 0 0 0 0 0 0 0 0 0
41816- 0 0 0 0 0 0 0 0 0 10 10 10
41817- 30 30 30 74 74 74 50 50 50 2 2 6
41818- 26 26 26 26 26 26 2 2 6 106 106 106
41819-238 238 238 253 253 253 253 253 253 253 253 253
41820-253 253 253 253 253 253 253 253 253 253 253 253
41821-253 253 253 253 253 253 253 253 253 253 253 253
41822-253 253 253 253 253 253 253 253 253 253 253 253
41823-253 253 253 253 253 253 253 253 253 253 253 253
41824-253 253 253 246 246 246 218 218 218 202 202 202
41825-210 210 210 14 14 14 2 2 6 2 2 6
41826- 30 30 30 22 22 22 2 2 6 2 2 6
41827- 2 2 6 2 2 6 18 18 18 86 86 86
41828- 42 42 42 14 14 14 0 0 0 0 0 0
41829- 0 0 0 0 0 0 0 0 0 0 0 0
41830- 0 0 0 0 0 0 0 0 0 0 0 0
41831- 0 0 0 0 0 0 0 0 0 0 0 0
41832- 0 0 0 0 0 0 0 0 0 0 0 0
41833- 0 0 0 0 0 0 0 0 0 0 0 0
41834- 0 0 0 0 0 0 0 0 0 0 0 0
41835- 0 0 0 0 0 0 0 0 0 0 0 0
41836- 0 0 0 0 0 0 0 0 0 14 14 14
41837- 42 42 42 90 90 90 22 22 22 2 2 6
41838- 42 42 42 2 2 6 18 18 18 218 218 218
41839-253 253 253 253 253 253 253 253 253 253 253 253
41840-253 253 253 253 253 253 253 253 253 253 253 253
41841-253 253 253 253 253 253 253 253 253 253 253 253
41842-253 253 253 253 253 253 253 253 253 253 253 253
41843-253 253 253 253 253 253 253 253 253 253 253 253
41844-253 253 253 253 253 253 250 250 250 221 221 221
41845-218 218 218 101 101 101 2 2 6 14 14 14
41846- 18 18 18 38 38 38 10 10 10 2 2 6
41847- 2 2 6 2 2 6 2 2 6 78 78 78
41848- 58 58 58 22 22 22 6 6 6 0 0 0
41849- 0 0 0 0 0 0 0 0 0 0 0 0
41850- 0 0 0 0 0 0 0 0 0 0 0 0
41851- 0 0 0 0 0 0 0 0 0 0 0 0
41852- 0 0 0 0 0 0 0 0 0 0 0 0
41853- 0 0 0 0 0 0 0 0 0 0 0 0
41854- 0 0 0 0 0 0 0 0 0 0 0 0
41855- 0 0 0 0 0 0 0 0 0 0 0 0
41856- 0 0 0 0 0 0 6 6 6 18 18 18
41857- 54 54 54 82 82 82 2 2 6 26 26 26
41858- 22 22 22 2 2 6 123 123 123 253 253 253
41859-253 253 253 253 253 253 253 253 253 253 253 253
41860-253 253 253 253 253 253 253 253 253 253 253 253
41861-253 253 253 253 253 253 253 253 253 253 253 253
41862-253 253 253 253 253 253 253 253 253 253 253 253
41863-253 253 253 253 253 253 253 253 253 253 253 253
41864-253 253 253 253 253 253 253 253 253 250 250 250
41865-238 238 238 198 198 198 6 6 6 38 38 38
41866- 58 58 58 26 26 26 38 38 38 2 2 6
41867- 2 2 6 2 2 6 2 2 6 46 46 46
41868- 78 78 78 30 30 30 10 10 10 0 0 0
41869- 0 0 0 0 0 0 0 0 0 0 0 0
41870- 0 0 0 0 0 0 0 0 0 0 0 0
41871- 0 0 0 0 0 0 0 0 0 0 0 0
41872- 0 0 0 0 0 0 0 0 0 0 0 0
41873- 0 0 0 0 0 0 0 0 0 0 0 0
41874- 0 0 0 0 0 0 0 0 0 0 0 0
41875- 0 0 0 0 0 0 0 0 0 0 0 0
41876- 0 0 0 0 0 0 10 10 10 30 30 30
41877- 74 74 74 58 58 58 2 2 6 42 42 42
41878- 2 2 6 22 22 22 231 231 231 253 253 253
41879-253 253 253 253 253 253 253 253 253 253 253 253
41880-253 253 253 253 253 253 253 253 253 250 250 250
41881-253 253 253 253 253 253 253 253 253 253 253 253
41882-253 253 253 253 253 253 253 253 253 253 253 253
41883-253 253 253 253 253 253 253 253 253 253 253 253
41884-253 253 253 253 253 253 253 253 253 253 253 253
41885-253 253 253 246 246 246 46 46 46 38 38 38
41886- 42 42 42 14 14 14 38 38 38 14 14 14
41887- 2 2 6 2 2 6 2 2 6 6 6 6
41888- 86 86 86 46 46 46 14 14 14 0 0 0
41889- 0 0 0 0 0 0 0 0 0 0 0 0
41890- 0 0 0 0 0 0 0 0 0 0 0 0
41891- 0 0 0 0 0 0 0 0 0 0 0 0
41892- 0 0 0 0 0 0 0 0 0 0 0 0
41893- 0 0 0 0 0 0 0 0 0 0 0 0
41894- 0 0 0 0 0 0 0 0 0 0 0 0
41895- 0 0 0 0 0 0 0 0 0 0 0 0
41896- 0 0 0 6 6 6 14 14 14 42 42 42
41897- 90 90 90 18 18 18 18 18 18 26 26 26
41898- 2 2 6 116 116 116 253 253 253 253 253 253
41899-253 253 253 253 253 253 253 253 253 253 253 253
41900-253 253 253 253 253 253 250 250 250 238 238 238
41901-253 253 253 253 253 253 253 253 253 253 253 253
41902-253 253 253 253 253 253 253 253 253 253 253 253
41903-253 253 253 253 253 253 253 253 253 253 253 253
41904-253 253 253 253 253 253 253 253 253 253 253 253
41905-253 253 253 253 253 253 94 94 94 6 6 6
41906- 2 2 6 2 2 6 10 10 10 34 34 34
41907- 2 2 6 2 2 6 2 2 6 2 2 6
41908- 74 74 74 58 58 58 22 22 22 6 6 6
41909- 0 0 0 0 0 0 0 0 0 0 0 0
41910- 0 0 0 0 0 0 0 0 0 0 0 0
41911- 0 0 0 0 0 0 0 0 0 0 0 0
41912- 0 0 0 0 0 0 0 0 0 0 0 0
41913- 0 0 0 0 0 0 0 0 0 0 0 0
41914- 0 0 0 0 0 0 0 0 0 0 0 0
41915- 0 0 0 0 0 0 0 0 0 0 0 0
41916- 0 0 0 10 10 10 26 26 26 66 66 66
41917- 82 82 82 2 2 6 38 38 38 6 6 6
41918- 14 14 14 210 210 210 253 253 253 253 253 253
41919-253 253 253 253 253 253 253 253 253 253 253 253
41920-253 253 253 253 253 253 246 246 246 242 242 242
41921-253 253 253 253 253 253 253 253 253 253 253 253
41922-253 253 253 253 253 253 253 253 253 253 253 253
41923-253 253 253 253 253 253 253 253 253 253 253 253
41924-253 253 253 253 253 253 253 253 253 253 253 253
41925-253 253 253 253 253 253 144 144 144 2 2 6
41926- 2 2 6 2 2 6 2 2 6 46 46 46
41927- 2 2 6 2 2 6 2 2 6 2 2 6
41928- 42 42 42 74 74 74 30 30 30 10 10 10
41929- 0 0 0 0 0 0 0 0 0 0 0 0
41930- 0 0 0 0 0 0 0 0 0 0 0 0
41931- 0 0 0 0 0 0 0 0 0 0 0 0
41932- 0 0 0 0 0 0 0 0 0 0 0 0
41933- 0 0 0 0 0 0 0 0 0 0 0 0
41934- 0 0 0 0 0 0 0 0 0 0 0 0
41935- 0 0 0 0 0 0 0 0 0 0 0 0
41936- 6 6 6 14 14 14 42 42 42 90 90 90
41937- 26 26 26 6 6 6 42 42 42 2 2 6
41938- 74 74 74 250 250 250 253 253 253 253 253 253
41939-253 253 253 253 253 253 253 253 253 253 253 253
41940-253 253 253 253 253 253 242 242 242 242 242 242
41941-253 253 253 253 253 253 253 253 253 253 253 253
41942-253 253 253 253 253 253 253 253 253 253 253 253
41943-253 253 253 253 253 253 253 253 253 253 253 253
41944-253 253 253 253 253 253 253 253 253 253 253 253
41945-253 253 253 253 253 253 182 182 182 2 2 6
41946- 2 2 6 2 2 6 2 2 6 46 46 46
41947- 2 2 6 2 2 6 2 2 6 2 2 6
41948- 10 10 10 86 86 86 38 38 38 10 10 10
41949- 0 0 0 0 0 0 0 0 0 0 0 0
41950- 0 0 0 0 0 0 0 0 0 0 0 0
41951- 0 0 0 0 0 0 0 0 0 0 0 0
41952- 0 0 0 0 0 0 0 0 0 0 0 0
41953- 0 0 0 0 0 0 0 0 0 0 0 0
41954- 0 0 0 0 0 0 0 0 0 0 0 0
41955- 0 0 0 0 0 0 0 0 0 0 0 0
41956- 10 10 10 26 26 26 66 66 66 82 82 82
41957- 2 2 6 22 22 22 18 18 18 2 2 6
41958-149 149 149 253 253 253 253 253 253 253 253 253
41959-253 253 253 253 253 253 253 253 253 253 253 253
41960-253 253 253 253 253 253 234 234 234 242 242 242
41961-253 253 253 253 253 253 253 253 253 253 253 253
41962-253 253 253 253 253 253 253 253 253 253 253 253
41963-253 253 253 253 253 253 253 253 253 253 253 253
41964-253 253 253 253 253 253 253 253 253 253 253 253
41965-253 253 253 253 253 253 206 206 206 2 2 6
41966- 2 2 6 2 2 6 2 2 6 38 38 38
41967- 2 2 6 2 2 6 2 2 6 2 2 6
41968- 6 6 6 86 86 86 46 46 46 14 14 14
41969- 0 0 0 0 0 0 0 0 0 0 0 0
41970- 0 0 0 0 0 0 0 0 0 0 0 0
41971- 0 0 0 0 0 0 0 0 0 0 0 0
41972- 0 0 0 0 0 0 0 0 0 0 0 0
41973- 0 0 0 0 0 0 0 0 0 0 0 0
41974- 0 0 0 0 0 0 0 0 0 0 0 0
41975- 0 0 0 0 0 0 0 0 0 6 6 6
41976- 18 18 18 46 46 46 86 86 86 18 18 18
41977- 2 2 6 34 34 34 10 10 10 6 6 6
41978-210 210 210 253 253 253 253 253 253 253 253 253
41979-253 253 253 253 253 253 253 253 253 253 253 253
41980-253 253 253 253 253 253 234 234 234 242 242 242
41981-253 253 253 253 253 253 253 253 253 253 253 253
41982-253 253 253 253 253 253 253 253 253 253 253 253
41983-253 253 253 253 253 253 253 253 253 253 253 253
41984-253 253 253 253 253 253 253 253 253 253 253 253
41985-253 253 253 253 253 253 221 221 221 6 6 6
41986- 2 2 6 2 2 6 6 6 6 30 30 30
41987- 2 2 6 2 2 6 2 2 6 2 2 6
41988- 2 2 6 82 82 82 54 54 54 18 18 18
41989- 6 6 6 0 0 0 0 0 0 0 0 0
41990- 0 0 0 0 0 0 0 0 0 0 0 0
41991- 0 0 0 0 0 0 0 0 0 0 0 0
41992- 0 0 0 0 0 0 0 0 0 0 0 0
41993- 0 0 0 0 0 0 0 0 0 0 0 0
41994- 0 0 0 0 0 0 0 0 0 0 0 0
41995- 0 0 0 0 0 0 0 0 0 10 10 10
41996- 26 26 26 66 66 66 62 62 62 2 2 6
41997- 2 2 6 38 38 38 10 10 10 26 26 26
41998-238 238 238 253 253 253 253 253 253 253 253 253
41999-253 253 253 253 253 253 253 253 253 253 253 253
42000-253 253 253 253 253 253 231 231 231 238 238 238
42001-253 253 253 253 253 253 253 253 253 253 253 253
42002-253 253 253 253 253 253 253 253 253 253 253 253
42003-253 253 253 253 253 253 253 253 253 253 253 253
42004-253 253 253 253 253 253 253 253 253 253 253 253
42005-253 253 253 253 253 253 231 231 231 6 6 6
42006- 2 2 6 2 2 6 10 10 10 30 30 30
42007- 2 2 6 2 2 6 2 2 6 2 2 6
42008- 2 2 6 66 66 66 58 58 58 22 22 22
42009- 6 6 6 0 0 0 0 0 0 0 0 0
42010- 0 0 0 0 0 0 0 0 0 0 0 0
42011- 0 0 0 0 0 0 0 0 0 0 0 0
42012- 0 0 0 0 0 0 0 0 0 0 0 0
42013- 0 0 0 0 0 0 0 0 0 0 0 0
42014- 0 0 0 0 0 0 0 0 0 0 0 0
42015- 0 0 0 0 0 0 0 0 0 10 10 10
42016- 38 38 38 78 78 78 6 6 6 2 2 6
42017- 2 2 6 46 46 46 14 14 14 42 42 42
42018-246 246 246 253 253 253 253 253 253 253 253 253
42019-253 253 253 253 253 253 253 253 253 253 253 253
42020-253 253 253 253 253 253 231 231 231 242 242 242
42021-253 253 253 253 253 253 253 253 253 253 253 253
42022-253 253 253 253 253 253 253 253 253 253 253 253
42023-253 253 253 253 253 253 253 253 253 253 253 253
42024-253 253 253 253 253 253 253 253 253 253 253 253
42025-253 253 253 253 253 253 234 234 234 10 10 10
42026- 2 2 6 2 2 6 22 22 22 14 14 14
42027- 2 2 6 2 2 6 2 2 6 2 2 6
42028- 2 2 6 66 66 66 62 62 62 22 22 22
42029- 6 6 6 0 0 0 0 0 0 0 0 0
42030- 0 0 0 0 0 0 0 0 0 0 0 0
42031- 0 0 0 0 0 0 0 0 0 0 0 0
42032- 0 0 0 0 0 0 0 0 0 0 0 0
42033- 0 0 0 0 0 0 0 0 0 0 0 0
42034- 0 0 0 0 0 0 0 0 0 0 0 0
42035- 0 0 0 0 0 0 6 6 6 18 18 18
42036- 50 50 50 74 74 74 2 2 6 2 2 6
42037- 14 14 14 70 70 70 34 34 34 62 62 62
42038-250 250 250 253 253 253 253 253 253 253 253 253
42039-253 253 253 253 253 253 253 253 253 253 253 253
42040-253 253 253 253 253 253 231 231 231 246 246 246
42041-253 253 253 253 253 253 253 253 253 253 253 253
42042-253 253 253 253 253 253 253 253 253 253 253 253
42043-253 253 253 253 253 253 253 253 253 253 253 253
42044-253 253 253 253 253 253 253 253 253 253 253 253
42045-253 253 253 253 253 253 234 234 234 14 14 14
42046- 2 2 6 2 2 6 30 30 30 2 2 6
42047- 2 2 6 2 2 6 2 2 6 2 2 6
42048- 2 2 6 66 66 66 62 62 62 22 22 22
42049- 6 6 6 0 0 0 0 0 0 0 0 0
42050- 0 0 0 0 0 0 0 0 0 0 0 0
42051- 0 0 0 0 0 0 0 0 0 0 0 0
42052- 0 0 0 0 0 0 0 0 0 0 0 0
42053- 0 0 0 0 0 0 0 0 0 0 0 0
42054- 0 0 0 0 0 0 0 0 0 0 0 0
42055- 0 0 0 0 0 0 6 6 6 18 18 18
42056- 54 54 54 62 62 62 2 2 6 2 2 6
42057- 2 2 6 30 30 30 46 46 46 70 70 70
42058-250 250 250 253 253 253 253 253 253 253 253 253
42059-253 253 253 253 253 253 253 253 253 253 253 253
42060-253 253 253 253 253 253 231 231 231 246 246 246
42061-253 253 253 253 253 253 253 253 253 253 253 253
42062-253 253 253 253 253 253 253 253 253 253 253 253
42063-253 253 253 253 253 253 253 253 253 253 253 253
42064-253 253 253 253 253 253 253 253 253 253 253 253
42065-253 253 253 253 253 253 226 226 226 10 10 10
42066- 2 2 6 6 6 6 30 30 30 2 2 6
42067- 2 2 6 2 2 6 2 2 6 2 2 6
42068- 2 2 6 66 66 66 58 58 58 22 22 22
42069- 6 6 6 0 0 0 0 0 0 0 0 0
42070- 0 0 0 0 0 0 0 0 0 0 0 0
42071- 0 0 0 0 0 0 0 0 0 0 0 0
42072- 0 0 0 0 0 0 0 0 0 0 0 0
42073- 0 0 0 0 0 0 0 0 0 0 0 0
42074- 0 0 0 0 0 0 0 0 0 0 0 0
42075- 0 0 0 0 0 0 6 6 6 22 22 22
42076- 58 58 58 62 62 62 2 2 6 2 2 6
42077- 2 2 6 2 2 6 30 30 30 78 78 78
42078-250 250 250 253 253 253 253 253 253 253 253 253
42079-253 253 253 253 253 253 253 253 253 253 253 253
42080-253 253 253 253 253 253 231 231 231 246 246 246
42081-253 253 253 253 253 253 253 253 253 253 253 253
42082-253 253 253 253 253 253 253 253 253 253 253 253
42083-253 253 253 253 253 253 253 253 253 253 253 253
42084-253 253 253 253 253 253 253 253 253 253 253 253
42085-253 253 253 253 253 253 206 206 206 2 2 6
42086- 22 22 22 34 34 34 18 14 6 22 22 22
42087- 26 26 26 18 18 18 6 6 6 2 2 6
42088- 2 2 6 82 82 82 54 54 54 18 18 18
42089- 6 6 6 0 0 0 0 0 0 0 0 0
42090- 0 0 0 0 0 0 0 0 0 0 0 0
42091- 0 0 0 0 0 0 0 0 0 0 0 0
42092- 0 0 0 0 0 0 0 0 0 0 0 0
42093- 0 0 0 0 0 0 0 0 0 0 0 0
42094- 0 0 0 0 0 0 0 0 0 0 0 0
42095- 0 0 0 0 0 0 6 6 6 26 26 26
42096- 62 62 62 106 106 106 74 54 14 185 133 11
42097-210 162 10 121 92 8 6 6 6 62 62 62
42098-238 238 238 253 253 253 253 253 253 253 253 253
42099-253 253 253 253 253 253 253 253 253 253 253 253
42100-253 253 253 253 253 253 231 231 231 246 246 246
42101-253 253 253 253 253 253 253 253 253 253 253 253
42102-253 253 253 253 253 253 253 253 253 253 253 253
42103-253 253 253 253 253 253 253 253 253 253 253 253
42104-253 253 253 253 253 253 253 253 253 253 253 253
42105-253 253 253 253 253 253 158 158 158 18 18 18
42106- 14 14 14 2 2 6 2 2 6 2 2 6
42107- 6 6 6 18 18 18 66 66 66 38 38 38
42108- 6 6 6 94 94 94 50 50 50 18 18 18
42109- 6 6 6 0 0 0 0 0 0 0 0 0
42110- 0 0 0 0 0 0 0 0 0 0 0 0
42111- 0 0 0 0 0 0 0 0 0 0 0 0
42112- 0 0 0 0 0 0 0 0 0 0 0 0
42113- 0 0 0 0 0 0 0 0 0 0 0 0
42114- 0 0 0 0 0 0 0 0 0 6 6 6
42115- 10 10 10 10 10 10 18 18 18 38 38 38
42116- 78 78 78 142 134 106 216 158 10 242 186 14
42117-246 190 14 246 190 14 156 118 10 10 10 10
42118- 90 90 90 238 238 238 253 253 253 253 253 253
42119-253 253 253 253 253 253 253 253 253 253 253 253
42120-253 253 253 253 253 253 231 231 231 250 250 250
42121-253 253 253 253 253 253 253 253 253 253 253 253
42122-253 253 253 253 253 253 253 253 253 253 253 253
42123-253 253 253 253 253 253 253 253 253 253 253 253
42124-253 253 253 253 253 253 253 253 253 246 230 190
42125-238 204 91 238 204 91 181 142 44 37 26 9
42126- 2 2 6 2 2 6 2 2 6 2 2 6
42127- 2 2 6 2 2 6 38 38 38 46 46 46
42128- 26 26 26 106 106 106 54 54 54 18 18 18
42129- 6 6 6 0 0 0 0 0 0 0 0 0
42130- 0 0 0 0 0 0 0 0 0 0 0 0
42131- 0 0 0 0 0 0 0 0 0 0 0 0
42132- 0 0 0 0 0 0 0 0 0 0 0 0
42133- 0 0 0 0 0 0 0 0 0 0 0 0
42134- 0 0 0 6 6 6 14 14 14 22 22 22
42135- 30 30 30 38 38 38 50 50 50 70 70 70
42136-106 106 106 190 142 34 226 170 11 242 186 14
42137-246 190 14 246 190 14 246 190 14 154 114 10
42138- 6 6 6 74 74 74 226 226 226 253 253 253
42139-253 253 253 253 253 253 253 253 253 253 253 253
42140-253 253 253 253 253 253 231 231 231 250 250 250
42141-253 253 253 253 253 253 253 253 253 253 253 253
42142-253 253 253 253 253 253 253 253 253 253 253 253
42143-253 253 253 253 253 253 253 253 253 253 253 253
42144-253 253 253 253 253 253 253 253 253 228 184 62
42145-241 196 14 241 208 19 232 195 16 38 30 10
42146- 2 2 6 2 2 6 2 2 6 2 2 6
42147- 2 2 6 6 6 6 30 30 30 26 26 26
42148-203 166 17 154 142 90 66 66 66 26 26 26
42149- 6 6 6 0 0 0 0 0 0 0 0 0
42150- 0 0 0 0 0 0 0 0 0 0 0 0
42151- 0 0 0 0 0 0 0 0 0 0 0 0
42152- 0 0 0 0 0 0 0 0 0 0 0 0
42153- 0 0 0 0 0 0 0 0 0 0 0 0
42154- 6 6 6 18 18 18 38 38 38 58 58 58
42155- 78 78 78 86 86 86 101 101 101 123 123 123
42156-175 146 61 210 150 10 234 174 13 246 186 14
42157-246 190 14 246 190 14 246 190 14 238 190 10
42158-102 78 10 2 2 6 46 46 46 198 198 198
42159-253 253 253 253 253 253 253 253 253 253 253 253
42160-253 253 253 253 253 253 234 234 234 242 242 242
42161-253 253 253 253 253 253 253 253 253 253 253 253
42162-253 253 253 253 253 253 253 253 253 253 253 253
42163-253 253 253 253 253 253 253 253 253 253 253 253
42164-253 253 253 253 253 253 253 253 253 224 178 62
42165-242 186 14 241 196 14 210 166 10 22 18 6
42166- 2 2 6 2 2 6 2 2 6 2 2 6
42167- 2 2 6 2 2 6 6 6 6 121 92 8
42168-238 202 15 232 195 16 82 82 82 34 34 34
42169- 10 10 10 0 0 0 0 0 0 0 0 0
42170- 0 0 0 0 0 0 0 0 0 0 0 0
42171- 0 0 0 0 0 0 0 0 0 0 0 0
42172- 0 0 0 0 0 0 0 0 0 0 0 0
42173- 0 0 0 0 0 0 0 0 0 0 0 0
42174- 14 14 14 38 38 38 70 70 70 154 122 46
42175-190 142 34 200 144 11 197 138 11 197 138 11
42176-213 154 11 226 170 11 242 186 14 246 190 14
42177-246 190 14 246 190 14 246 190 14 246 190 14
42178-225 175 15 46 32 6 2 2 6 22 22 22
42179-158 158 158 250 250 250 253 253 253 253 253 253
42180-253 253 253 253 253 253 253 253 253 253 253 253
42181-253 253 253 253 253 253 253 253 253 253 253 253
42182-253 253 253 253 253 253 253 253 253 253 253 253
42183-253 253 253 253 253 253 253 253 253 253 253 253
42184-253 253 253 250 250 250 242 242 242 224 178 62
42185-239 182 13 236 186 11 213 154 11 46 32 6
42186- 2 2 6 2 2 6 2 2 6 2 2 6
42187- 2 2 6 2 2 6 61 42 6 225 175 15
42188-238 190 10 236 186 11 112 100 78 42 42 42
42189- 14 14 14 0 0 0 0 0 0 0 0 0
42190- 0 0 0 0 0 0 0 0 0 0 0 0
42191- 0 0 0 0 0 0 0 0 0 0 0 0
42192- 0 0 0 0 0 0 0 0 0 0 0 0
42193- 0 0 0 0 0 0 0 0 0 6 6 6
42194- 22 22 22 54 54 54 154 122 46 213 154 11
42195-226 170 11 230 174 11 226 170 11 226 170 11
42196-236 178 12 242 186 14 246 190 14 246 190 14
42197-246 190 14 246 190 14 246 190 14 246 190 14
42198-241 196 14 184 144 12 10 10 10 2 2 6
42199- 6 6 6 116 116 116 242 242 242 253 253 253
42200-253 253 253 253 253 253 253 253 253 253 253 253
42201-253 253 253 253 253 253 253 253 253 253 253 253
42202-253 253 253 253 253 253 253 253 253 253 253 253
42203-253 253 253 253 253 253 253 253 253 253 253 253
42204-253 253 253 231 231 231 198 198 198 214 170 54
42205-236 178 12 236 178 12 210 150 10 137 92 6
42206- 18 14 6 2 2 6 2 2 6 2 2 6
42207- 6 6 6 70 47 6 200 144 11 236 178 12
42208-239 182 13 239 182 13 124 112 88 58 58 58
42209- 22 22 22 6 6 6 0 0 0 0 0 0
42210- 0 0 0 0 0 0 0 0 0 0 0 0
42211- 0 0 0 0 0 0 0 0 0 0 0 0
42212- 0 0 0 0 0 0 0 0 0 0 0 0
42213- 0 0 0 0 0 0 0 0 0 10 10 10
42214- 30 30 30 70 70 70 180 133 36 226 170 11
42215-239 182 13 242 186 14 242 186 14 246 186 14
42216-246 190 14 246 190 14 246 190 14 246 190 14
42217-246 190 14 246 190 14 246 190 14 246 190 14
42218-246 190 14 232 195 16 98 70 6 2 2 6
42219- 2 2 6 2 2 6 66 66 66 221 221 221
42220-253 253 253 253 253 253 253 253 253 253 253 253
42221-253 253 253 253 253 253 253 253 253 253 253 253
42222-253 253 253 253 253 253 253 253 253 253 253 253
42223-253 253 253 253 253 253 253 253 253 253 253 253
42224-253 253 253 206 206 206 198 198 198 214 166 58
42225-230 174 11 230 174 11 216 158 10 192 133 9
42226-163 110 8 116 81 8 102 78 10 116 81 8
42227-167 114 7 197 138 11 226 170 11 239 182 13
42228-242 186 14 242 186 14 162 146 94 78 78 78
42229- 34 34 34 14 14 14 6 6 6 0 0 0
42230- 0 0 0 0 0 0 0 0 0 0 0 0
42231- 0 0 0 0 0 0 0 0 0 0 0 0
42232- 0 0 0 0 0 0 0 0 0 0 0 0
42233- 0 0 0 0 0 0 0 0 0 6 6 6
42234- 30 30 30 78 78 78 190 142 34 226 170 11
42235-239 182 13 246 190 14 246 190 14 246 190 14
42236-246 190 14 246 190 14 246 190 14 246 190 14
42237-246 190 14 246 190 14 246 190 14 246 190 14
42238-246 190 14 241 196 14 203 166 17 22 18 6
42239- 2 2 6 2 2 6 2 2 6 38 38 38
42240-218 218 218 253 253 253 253 253 253 253 253 253
42241-253 253 253 253 253 253 253 253 253 253 253 253
42242-253 253 253 253 253 253 253 253 253 253 253 253
42243-253 253 253 253 253 253 253 253 253 253 253 253
42244-250 250 250 206 206 206 198 198 198 202 162 69
42245-226 170 11 236 178 12 224 166 10 210 150 10
42246-200 144 11 197 138 11 192 133 9 197 138 11
42247-210 150 10 226 170 11 242 186 14 246 190 14
42248-246 190 14 246 186 14 225 175 15 124 112 88
42249- 62 62 62 30 30 30 14 14 14 6 6 6
42250- 0 0 0 0 0 0 0 0 0 0 0 0
42251- 0 0 0 0 0 0 0 0 0 0 0 0
42252- 0 0 0 0 0 0 0 0 0 0 0 0
42253- 0 0 0 0 0 0 0 0 0 10 10 10
42254- 30 30 30 78 78 78 174 135 50 224 166 10
42255-239 182 13 246 190 14 246 190 14 246 190 14
42256-246 190 14 246 190 14 246 190 14 246 190 14
42257-246 190 14 246 190 14 246 190 14 246 190 14
42258-246 190 14 246 190 14 241 196 14 139 102 15
42259- 2 2 6 2 2 6 2 2 6 2 2 6
42260- 78 78 78 250 250 250 253 253 253 253 253 253
42261-253 253 253 253 253 253 253 253 253 253 253 253
42262-253 253 253 253 253 253 253 253 253 253 253 253
42263-253 253 253 253 253 253 253 253 253 253 253 253
42264-250 250 250 214 214 214 198 198 198 190 150 46
42265-219 162 10 236 178 12 234 174 13 224 166 10
42266-216 158 10 213 154 11 213 154 11 216 158 10
42267-226 170 11 239 182 13 246 190 14 246 190 14
42268-246 190 14 246 190 14 242 186 14 206 162 42
42269-101 101 101 58 58 58 30 30 30 14 14 14
42270- 6 6 6 0 0 0 0 0 0 0 0 0
42271- 0 0 0 0 0 0 0 0 0 0 0 0
42272- 0 0 0 0 0 0 0 0 0 0 0 0
42273- 0 0 0 0 0 0 0 0 0 10 10 10
42274- 30 30 30 74 74 74 174 135 50 216 158 10
42275-236 178 12 246 190 14 246 190 14 246 190 14
42276-246 190 14 246 190 14 246 190 14 246 190 14
42277-246 190 14 246 190 14 246 190 14 246 190 14
42278-246 190 14 246 190 14 241 196 14 226 184 13
42279- 61 42 6 2 2 6 2 2 6 2 2 6
42280- 22 22 22 238 238 238 253 253 253 253 253 253
42281-253 253 253 253 253 253 253 253 253 253 253 253
42282-253 253 253 253 253 253 253 253 253 253 253 253
42283-253 253 253 253 253 253 253 253 253 253 253 253
42284-253 253 253 226 226 226 187 187 187 180 133 36
42285-216 158 10 236 178 12 239 182 13 236 178 12
42286-230 174 11 226 170 11 226 170 11 230 174 11
42287-236 178 12 242 186 14 246 190 14 246 190 14
42288-246 190 14 246 190 14 246 186 14 239 182 13
42289-206 162 42 106 106 106 66 66 66 34 34 34
42290- 14 14 14 6 6 6 0 0 0 0 0 0
42291- 0 0 0 0 0 0 0 0 0 0 0 0
42292- 0 0 0 0 0 0 0 0 0 0 0 0
42293- 0 0 0 0 0 0 0 0 0 6 6 6
42294- 26 26 26 70 70 70 163 133 67 213 154 11
42295-236 178 12 246 190 14 246 190 14 246 190 14
42296-246 190 14 246 190 14 246 190 14 246 190 14
42297-246 190 14 246 190 14 246 190 14 246 190 14
42298-246 190 14 246 190 14 246 190 14 241 196 14
42299-190 146 13 18 14 6 2 2 6 2 2 6
42300- 46 46 46 246 246 246 253 253 253 253 253 253
42301-253 253 253 253 253 253 253 253 253 253 253 253
42302-253 253 253 253 253 253 253 253 253 253 253 253
42303-253 253 253 253 253 253 253 253 253 253 253 253
42304-253 253 253 221 221 221 86 86 86 156 107 11
42305-216 158 10 236 178 12 242 186 14 246 186 14
42306-242 186 14 239 182 13 239 182 13 242 186 14
42307-242 186 14 246 186 14 246 190 14 246 190 14
42308-246 190 14 246 190 14 246 190 14 246 190 14
42309-242 186 14 225 175 15 142 122 72 66 66 66
42310- 30 30 30 10 10 10 0 0 0 0 0 0
42311- 0 0 0 0 0 0 0 0 0 0 0 0
42312- 0 0 0 0 0 0 0 0 0 0 0 0
42313- 0 0 0 0 0 0 0 0 0 6 6 6
42314- 26 26 26 70 70 70 163 133 67 210 150 10
42315-236 178 12 246 190 14 246 190 14 246 190 14
42316-246 190 14 246 190 14 246 190 14 246 190 14
42317-246 190 14 246 190 14 246 190 14 246 190 14
42318-246 190 14 246 190 14 246 190 14 246 190 14
42319-232 195 16 121 92 8 34 34 34 106 106 106
42320-221 221 221 253 253 253 253 253 253 253 253 253
42321-253 253 253 253 253 253 253 253 253 253 253 253
42322-253 253 253 253 253 253 253 253 253 253 253 253
42323-253 253 253 253 253 253 253 253 253 253 253 253
42324-242 242 242 82 82 82 18 14 6 163 110 8
42325-216 158 10 236 178 12 242 186 14 246 190 14
42326-246 190 14 246 190 14 246 190 14 246 190 14
42327-246 190 14 246 190 14 246 190 14 246 190 14
42328-246 190 14 246 190 14 246 190 14 246 190 14
42329-246 190 14 246 190 14 242 186 14 163 133 67
42330- 46 46 46 18 18 18 6 6 6 0 0 0
42331- 0 0 0 0 0 0 0 0 0 0 0 0
42332- 0 0 0 0 0 0 0 0 0 0 0 0
42333- 0 0 0 0 0 0 0 0 0 10 10 10
42334- 30 30 30 78 78 78 163 133 67 210 150 10
42335-236 178 12 246 186 14 246 190 14 246 190 14
42336-246 190 14 246 190 14 246 190 14 246 190 14
42337-246 190 14 246 190 14 246 190 14 246 190 14
42338-246 190 14 246 190 14 246 190 14 246 190 14
42339-241 196 14 215 174 15 190 178 144 253 253 253
42340-253 253 253 253 253 253 253 253 253 253 253 253
42341-253 253 253 253 253 253 253 253 253 253 253 253
42342-253 253 253 253 253 253 253 253 253 253 253 253
42343-253 253 253 253 253 253 253 253 253 218 218 218
42344- 58 58 58 2 2 6 22 18 6 167 114 7
42345-216 158 10 236 178 12 246 186 14 246 190 14
42346-246 190 14 246 190 14 246 190 14 246 190 14
42347-246 190 14 246 190 14 246 190 14 246 190 14
42348-246 190 14 246 190 14 246 190 14 246 190 14
42349-246 190 14 246 186 14 242 186 14 190 150 46
42350- 54 54 54 22 22 22 6 6 6 0 0 0
42351- 0 0 0 0 0 0 0 0 0 0 0 0
42352- 0 0 0 0 0 0 0 0 0 0 0 0
42353- 0 0 0 0 0 0 0 0 0 14 14 14
42354- 38 38 38 86 86 86 180 133 36 213 154 11
42355-236 178 12 246 186 14 246 190 14 246 190 14
42356-246 190 14 246 190 14 246 190 14 246 190 14
42357-246 190 14 246 190 14 246 190 14 246 190 14
42358-246 190 14 246 190 14 246 190 14 246 190 14
42359-246 190 14 232 195 16 190 146 13 214 214 214
42360-253 253 253 253 253 253 253 253 253 253 253 253
42361-253 253 253 253 253 253 253 253 253 253 253 253
42362-253 253 253 253 253 253 253 253 253 253 253 253
42363-253 253 253 250 250 250 170 170 170 26 26 26
42364- 2 2 6 2 2 6 37 26 9 163 110 8
42365-219 162 10 239 182 13 246 186 14 246 190 14
42366-246 190 14 246 190 14 246 190 14 246 190 14
42367-246 190 14 246 190 14 246 190 14 246 190 14
42368-246 190 14 246 190 14 246 190 14 246 190 14
42369-246 186 14 236 178 12 224 166 10 142 122 72
42370- 46 46 46 18 18 18 6 6 6 0 0 0
42371- 0 0 0 0 0 0 0 0 0 0 0 0
42372- 0 0 0 0 0 0 0 0 0 0 0 0
42373- 0 0 0 0 0 0 6 6 6 18 18 18
42374- 50 50 50 109 106 95 192 133 9 224 166 10
42375-242 186 14 246 190 14 246 190 14 246 190 14
42376-246 190 14 246 190 14 246 190 14 246 190 14
42377-246 190 14 246 190 14 246 190 14 246 190 14
42378-246 190 14 246 190 14 246 190 14 246 190 14
42379-242 186 14 226 184 13 210 162 10 142 110 46
42380-226 226 226 253 253 253 253 253 253 253 253 253
42381-253 253 253 253 253 253 253 253 253 253 253 253
42382-253 253 253 253 253 253 253 253 253 253 253 253
42383-198 198 198 66 66 66 2 2 6 2 2 6
42384- 2 2 6 2 2 6 50 34 6 156 107 11
42385-219 162 10 239 182 13 246 186 14 246 190 14
42386-246 190 14 246 190 14 246 190 14 246 190 14
42387-246 190 14 246 190 14 246 190 14 246 190 14
42388-246 190 14 246 190 14 246 190 14 242 186 14
42389-234 174 13 213 154 11 154 122 46 66 66 66
42390- 30 30 30 10 10 10 0 0 0 0 0 0
42391- 0 0 0 0 0 0 0 0 0 0 0 0
42392- 0 0 0 0 0 0 0 0 0 0 0 0
42393- 0 0 0 0 0 0 6 6 6 22 22 22
42394- 58 58 58 154 121 60 206 145 10 234 174 13
42395-242 186 14 246 186 14 246 190 14 246 190 14
42396-246 190 14 246 190 14 246 190 14 246 190 14
42397-246 190 14 246 190 14 246 190 14 246 190 14
42398-246 190 14 246 190 14 246 190 14 246 190 14
42399-246 186 14 236 178 12 210 162 10 163 110 8
42400- 61 42 6 138 138 138 218 218 218 250 250 250
42401-253 253 253 253 253 253 253 253 253 250 250 250
42402-242 242 242 210 210 210 144 144 144 66 66 66
42403- 6 6 6 2 2 6 2 2 6 2 2 6
42404- 2 2 6 2 2 6 61 42 6 163 110 8
42405-216 158 10 236 178 12 246 190 14 246 190 14
42406-246 190 14 246 190 14 246 190 14 246 190 14
42407-246 190 14 246 190 14 246 190 14 246 190 14
42408-246 190 14 239 182 13 230 174 11 216 158 10
42409-190 142 34 124 112 88 70 70 70 38 38 38
42410- 18 18 18 6 6 6 0 0 0 0 0 0
42411- 0 0 0 0 0 0 0 0 0 0 0 0
42412- 0 0 0 0 0 0 0 0 0 0 0 0
42413- 0 0 0 0 0 0 6 6 6 22 22 22
42414- 62 62 62 168 124 44 206 145 10 224 166 10
42415-236 178 12 239 182 13 242 186 14 242 186 14
42416-246 186 14 246 190 14 246 190 14 246 190 14
42417-246 190 14 246 190 14 246 190 14 246 190 14
42418-246 190 14 246 190 14 246 190 14 246 190 14
42419-246 190 14 236 178 12 216 158 10 175 118 6
42420- 80 54 7 2 2 6 6 6 6 30 30 30
42421- 54 54 54 62 62 62 50 50 50 38 38 38
42422- 14 14 14 2 2 6 2 2 6 2 2 6
42423- 2 2 6 2 2 6 2 2 6 2 2 6
42424- 2 2 6 6 6 6 80 54 7 167 114 7
42425-213 154 11 236 178 12 246 190 14 246 190 14
42426-246 190 14 246 190 14 246 190 14 246 190 14
42427-246 190 14 242 186 14 239 182 13 239 182 13
42428-230 174 11 210 150 10 174 135 50 124 112 88
42429- 82 82 82 54 54 54 34 34 34 18 18 18
42430- 6 6 6 0 0 0 0 0 0 0 0 0
42431- 0 0 0 0 0 0 0 0 0 0 0 0
42432- 0 0 0 0 0 0 0 0 0 0 0 0
42433- 0 0 0 0 0 0 6 6 6 18 18 18
42434- 50 50 50 158 118 36 192 133 9 200 144 11
42435-216 158 10 219 162 10 224 166 10 226 170 11
42436-230 174 11 236 178 12 239 182 13 239 182 13
42437-242 186 14 246 186 14 246 190 14 246 190 14
42438-246 190 14 246 190 14 246 190 14 246 190 14
42439-246 186 14 230 174 11 210 150 10 163 110 8
42440-104 69 6 10 10 10 2 2 6 2 2 6
42441- 2 2 6 2 2 6 2 2 6 2 2 6
42442- 2 2 6 2 2 6 2 2 6 2 2 6
42443- 2 2 6 2 2 6 2 2 6 2 2 6
42444- 2 2 6 6 6 6 91 60 6 167 114 7
42445-206 145 10 230 174 11 242 186 14 246 190 14
42446-246 190 14 246 190 14 246 186 14 242 186 14
42447-239 182 13 230 174 11 224 166 10 213 154 11
42448-180 133 36 124 112 88 86 86 86 58 58 58
42449- 38 38 38 22 22 22 10 10 10 6 6 6
42450- 0 0 0 0 0 0 0 0 0 0 0 0
42451- 0 0 0 0 0 0 0 0 0 0 0 0
42452- 0 0 0 0 0 0 0 0 0 0 0 0
42453- 0 0 0 0 0 0 0 0 0 14 14 14
42454- 34 34 34 70 70 70 138 110 50 158 118 36
42455-167 114 7 180 123 7 192 133 9 197 138 11
42456-200 144 11 206 145 10 213 154 11 219 162 10
42457-224 166 10 230 174 11 239 182 13 242 186 14
42458-246 186 14 246 186 14 246 186 14 246 186 14
42459-239 182 13 216 158 10 185 133 11 152 99 6
42460-104 69 6 18 14 6 2 2 6 2 2 6
42461- 2 2 6 2 2 6 2 2 6 2 2 6
42462- 2 2 6 2 2 6 2 2 6 2 2 6
42463- 2 2 6 2 2 6 2 2 6 2 2 6
42464- 2 2 6 6 6 6 80 54 7 152 99 6
42465-192 133 9 219 162 10 236 178 12 239 182 13
42466-246 186 14 242 186 14 239 182 13 236 178 12
42467-224 166 10 206 145 10 192 133 9 154 121 60
42468- 94 94 94 62 62 62 42 42 42 22 22 22
42469- 14 14 14 6 6 6 0 0 0 0 0 0
42470- 0 0 0 0 0 0 0 0 0 0 0 0
42471- 0 0 0 0 0 0 0 0 0 0 0 0
42472- 0 0 0 0 0 0 0 0 0 0 0 0
42473- 0 0 0 0 0 0 0 0 0 6 6 6
42474- 18 18 18 34 34 34 58 58 58 78 78 78
42475-101 98 89 124 112 88 142 110 46 156 107 11
42476-163 110 8 167 114 7 175 118 6 180 123 7
42477-185 133 11 197 138 11 210 150 10 219 162 10
42478-226 170 11 236 178 12 236 178 12 234 174 13
42479-219 162 10 197 138 11 163 110 8 130 83 6
42480- 91 60 6 10 10 10 2 2 6 2 2 6
42481- 18 18 18 38 38 38 38 38 38 38 38 38
42482- 38 38 38 38 38 38 38 38 38 38 38 38
42483- 38 38 38 38 38 38 26 26 26 2 2 6
42484- 2 2 6 6 6 6 70 47 6 137 92 6
42485-175 118 6 200 144 11 219 162 10 230 174 11
42486-234 174 13 230 174 11 219 162 10 210 150 10
42487-192 133 9 163 110 8 124 112 88 82 82 82
42488- 50 50 50 30 30 30 14 14 14 6 6 6
42489- 0 0 0 0 0 0 0 0 0 0 0 0
42490- 0 0 0 0 0 0 0 0 0 0 0 0
42491- 0 0 0 0 0 0 0 0 0 0 0 0
42492- 0 0 0 0 0 0 0 0 0 0 0 0
42493- 0 0 0 0 0 0 0 0 0 0 0 0
42494- 6 6 6 14 14 14 22 22 22 34 34 34
42495- 42 42 42 58 58 58 74 74 74 86 86 86
42496-101 98 89 122 102 70 130 98 46 121 87 25
42497-137 92 6 152 99 6 163 110 8 180 123 7
42498-185 133 11 197 138 11 206 145 10 200 144 11
42499-180 123 7 156 107 11 130 83 6 104 69 6
42500- 50 34 6 54 54 54 110 110 110 101 98 89
42501- 86 86 86 82 82 82 78 78 78 78 78 78
42502- 78 78 78 78 78 78 78 78 78 78 78 78
42503- 78 78 78 82 82 82 86 86 86 94 94 94
42504-106 106 106 101 101 101 86 66 34 124 80 6
42505-156 107 11 180 123 7 192 133 9 200 144 11
42506-206 145 10 200 144 11 192 133 9 175 118 6
42507-139 102 15 109 106 95 70 70 70 42 42 42
42508- 22 22 22 10 10 10 0 0 0 0 0 0
42509- 0 0 0 0 0 0 0 0 0 0 0 0
42510- 0 0 0 0 0 0 0 0 0 0 0 0
42511- 0 0 0 0 0 0 0 0 0 0 0 0
42512- 0 0 0 0 0 0 0 0 0 0 0 0
42513- 0 0 0 0 0 0 0 0 0 0 0 0
42514- 0 0 0 0 0 0 6 6 6 10 10 10
42515- 14 14 14 22 22 22 30 30 30 38 38 38
42516- 50 50 50 62 62 62 74 74 74 90 90 90
42517-101 98 89 112 100 78 121 87 25 124 80 6
42518-137 92 6 152 99 6 152 99 6 152 99 6
42519-138 86 6 124 80 6 98 70 6 86 66 30
42520-101 98 89 82 82 82 58 58 58 46 46 46
42521- 38 38 38 34 34 34 34 34 34 34 34 34
42522- 34 34 34 34 34 34 34 34 34 34 34 34
42523- 34 34 34 34 34 34 38 38 38 42 42 42
42524- 54 54 54 82 82 82 94 86 76 91 60 6
42525-134 86 6 156 107 11 167 114 7 175 118 6
42526-175 118 6 167 114 7 152 99 6 121 87 25
42527-101 98 89 62 62 62 34 34 34 18 18 18
42528- 6 6 6 0 0 0 0 0 0 0 0 0
42529- 0 0 0 0 0 0 0 0 0 0 0 0
42530- 0 0 0 0 0 0 0 0 0 0 0 0
42531- 0 0 0 0 0 0 0 0 0 0 0 0
42532- 0 0 0 0 0 0 0 0 0 0 0 0
42533- 0 0 0 0 0 0 0 0 0 0 0 0
42534- 0 0 0 0 0 0 0 0 0 0 0 0
42535- 0 0 0 6 6 6 6 6 6 10 10 10
42536- 18 18 18 22 22 22 30 30 30 42 42 42
42537- 50 50 50 66 66 66 86 86 86 101 98 89
42538-106 86 58 98 70 6 104 69 6 104 69 6
42539-104 69 6 91 60 6 82 62 34 90 90 90
42540- 62 62 62 38 38 38 22 22 22 14 14 14
42541- 10 10 10 10 10 10 10 10 10 10 10 10
42542- 10 10 10 10 10 10 6 6 6 10 10 10
42543- 10 10 10 10 10 10 10 10 10 14 14 14
42544- 22 22 22 42 42 42 70 70 70 89 81 66
42545- 80 54 7 104 69 6 124 80 6 137 92 6
42546-134 86 6 116 81 8 100 82 52 86 86 86
42547- 58 58 58 30 30 30 14 14 14 6 6 6
42548- 0 0 0 0 0 0 0 0 0 0 0 0
42549- 0 0 0 0 0 0 0 0 0 0 0 0
42550- 0 0 0 0 0 0 0 0 0 0 0 0
42551- 0 0 0 0 0 0 0 0 0 0 0 0
42552- 0 0 0 0 0 0 0 0 0 0 0 0
42553- 0 0 0 0 0 0 0 0 0 0 0 0
42554- 0 0 0 0 0 0 0 0 0 0 0 0
42555- 0 0 0 0 0 0 0 0 0 0 0 0
42556- 0 0 0 6 6 6 10 10 10 14 14 14
42557- 18 18 18 26 26 26 38 38 38 54 54 54
42558- 70 70 70 86 86 86 94 86 76 89 81 66
42559- 89 81 66 86 86 86 74 74 74 50 50 50
42560- 30 30 30 14 14 14 6 6 6 0 0 0
42561- 0 0 0 0 0 0 0 0 0 0 0 0
42562- 0 0 0 0 0 0 0 0 0 0 0 0
42563- 0 0 0 0 0 0 0 0 0 0 0 0
42564- 6 6 6 18 18 18 34 34 34 58 58 58
42565- 82 82 82 89 81 66 89 81 66 89 81 66
42566- 94 86 66 94 86 76 74 74 74 50 50 50
42567- 26 26 26 14 14 14 6 6 6 0 0 0
42568- 0 0 0 0 0 0 0 0 0 0 0 0
42569- 0 0 0 0 0 0 0 0 0 0 0 0
42570- 0 0 0 0 0 0 0 0 0 0 0 0
42571- 0 0 0 0 0 0 0 0 0 0 0 0
42572- 0 0 0 0 0 0 0 0 0 0 0 0
42573- 0 0 0 0 0 0 0 0 0 0 0 0
42574- 0 0 0 0 0 0 0 0 0 0 0 0
42575- 0 0 0 0 0 0 0 0 0 0 0 0
42576- 0 0 0 0 0 0 0 0 0 0 0 0
42577- 6 6 6 6 6 6 14 14 14 18 18 18
42578- 30 30 30 38 38 38 46 46 46 54 54 54
42579- 50 50 50 42 42 42 30 30 30 18 18 18
42580- 10 10 10 0 0 0 0 0 0 0 0 0
42581- 0 0 0 0 0 0 0 0 0 0 0 0
42582- 0 0 0 0 0 0 0 0 0 0 0 0
42583- 0 0 0 0 0 0 0 0 0 0 0 0
42584- 0 0 0 6 6 6 14 14 14 26 26 26
42585- 38 38 38 50 50 50 58 58 58 58 58 58
42586- 54 54 54 42 42 42 30 30 30 18 18 18
42587- 10 10 10 0 0 0 0 0 0 0 0 0
42588- 0 0 0 0 0 0 0 0 0 0 0 0
42589- 0 0 0 0 0 0 0 0 0 0 0 0
42590- 0 0 0 0 0 0 0 0 0 0 0 0
42591- 0 0 0 0 0 0 0 0 0 0 0 0
42592- 0 0 0 0 0 0 0 0 0 0 0 0
42593- 0 0 0 0 0 0 0 0 0 0 0 0
42594- 0 0 0 0 0 0 0 0 0 0 0 0
42595- 0 0 0 0 0 0 0 0 0 0 0 0
42596- 0 0 0 0 0 0 0 0 0 0 0 0
42597- 0 0 0 0 0 0 0 0 0 6 6 6
42598- 6 6 6 10 10 10 14 14 14 18 18 18
42599- 18 18 18 14 14 14 10 10 10 6 6 6
42600- 0 0 0 0 0 0 0 0 0 0 0 0
42601- 0 0 0 0 0 0 0 0 0 0 0 0
42602- 0 0 0 0 0 0 0 0 0 0 0 0
42603- 0 0 0 0 0 0 0 0 0 0 0 0
42604- 0 0 0 0 0 0 0 0 0 6 6 6
42605- 14 14 14 18 18 18 22 22 22 22 22 22
42606- 18 18 18 14 14 14 10 10 10 6 6 6
42607- 0 0 0 0 0 0 0 0 0 0 0 0
42608- 0 0 0 0 0 0 0 0 0 0 0 0
42609- 0 0 0 0 0 0 0 0 0 0 0 0
42610- 0 0 0 0 0 0 0 0 0 0 0 0
42611- 0 0 0 0 0 0 0 0 0 0 0 0
42612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42625+4 4 4 4 4 4
42626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42639+4 4 4 4 4 4
42640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42653+4 4 4 4 4 4
42654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42667+4 4 4 4 4 4
42668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42681+4 4 4 4 4 4
42682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42695+4 4 4 4 4 4
42696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42700+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
42701+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
42702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42705+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
42706+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42707+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
42708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42709+4 4 4 4 4 4
42710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42714+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
42715+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
42716+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42719+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
42720+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
42721+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
42722+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42723+4 4 4 4 4 4
42724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42728+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
42729+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
42730+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42733+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
42734+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
42735+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
42736+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
42737+4 4 4 4 4 4
42738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42741+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
42742+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
42743+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
42744+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
42745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42746+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
42747+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
42748+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
42749+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
42750+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
42751+4 4 4 4 4 4
42752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42755+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
42756+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
42757+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
42758+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
42759+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42760+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
42761+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
42762+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
42763+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
42764+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
42765+4 4 4 4 4 4
42766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42769+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
42770+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
42771+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
42772+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
42773+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
42774+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
42775+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
42776+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
42777+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
42778+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
42779+4 4 4 4 4 4
42780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42782+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
42783+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
42784+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
42785+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
42786+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
42787+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
42788+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
42789+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
42790+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
42791+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
42792+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
42793+4 4 4 4 4 4
42794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42796+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
42797+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
42798+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
42799+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
42800+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
42801+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
42802+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
42803+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
42804+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
42805+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
42806+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
42807+4 4 4 4 4 4
42808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42810+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
42811+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
42812+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
42813+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
42814+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
42815+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
42816+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
42817+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
42818+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
42819+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
42820+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42821+4 4 4 4 4 4
42822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42824+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
42825+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
42826+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
42827+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
42828+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
42829+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
42830+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
42831+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
42832+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
42833+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
42834+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
42835+4 4 4 4 4 4
42836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42837+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
42838+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
42839+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
42840+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
42841+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
42842+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
42843+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
42844+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
42845+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
42846+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
42847+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
42848+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
42849+4 4 4 4 4 4
42850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42851+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
42852+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
42853+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
42854+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
42855+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
42856+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
42857+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
42858+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
42859+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
42860+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
42861+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
42862+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
42863+0 0 0 4 4 4
42864+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
42865+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
42866+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
42867+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
42868+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
42869+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
42870+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
42871+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
42872+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
42873+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
42874+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
42875+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
42876+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
42877+2 0 0 0 0 0
42878+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
42879+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
42880+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
42881+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
42882+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
42883+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
42884+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
42885+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
42886+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
42887+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
42888+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
42889+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
42890+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
42891+37 38 37 0 0 0
42892+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
42893+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
42894+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
42895+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
42896+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
42897+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
42898+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
42899+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
42900+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
42901+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
42902+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
42903+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
42904+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
42905+85 115 134 4 0 0
42906+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
42907+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
42908+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
42909+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
42910+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
42911+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
42912+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
42913+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
42914+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
42915+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
42916+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
42917+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
42918+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
42919+60 73 81 4 0 0
42920+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
42921+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
42922+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
42923+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
42924+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
42925+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
42926+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
42927+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
42928+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
42929+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
42930+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
42931+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
42932+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
42933+16 19 21 4 0 0
42934+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
42935+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
42936+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
42937+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
42938+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
42939+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
42940+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
42941+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
42942+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
42943+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
42944+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
42945+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
42946+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
42947+4 0 0 4 3 3
42948+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
42949+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
42950+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
42951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
42952+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
42953+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
42954+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
42955+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
42956+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
42957+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
42958+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
42959+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
42960+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
42961+3 2 2 4 4 4
42962+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
42963+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
42964+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
42965+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
42966+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
42967+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
42968+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
42969+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
42970+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
42971+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
42972+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
42973+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
42974+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
42975+4 4 4 4 4 4
42976+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
42977+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
42978+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
42979+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
42980+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
42981+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
42982+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
42983+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
42984+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
42985+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
42986+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
42987+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
42988+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
42989+4 4 4 4 4 4
42990+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
42991+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
42992+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
42993+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
42994+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
42995+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
42996+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
42997+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
42998+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
42999+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
43000+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
43001+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
43002+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
43003+5 5 5 5 5 5
43004+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
43005+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
43006+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
43007+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
43008+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
43009+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43010+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
43011+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
43012+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
43013+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
43014+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
43015+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
43016+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
43017+5 5 5 4 4 4
43018+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
43019+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
43020+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
43021+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
43022+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
43023+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
43024+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
43025+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
43026+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
43027+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
43028+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
43029+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
43030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43031+4 4 4 4 4 4
43032+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
43033+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
43034+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
43035+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
43036+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
43037+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43038+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43039+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
43040+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
43041+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
43042+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
43043+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
43044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43045+4 4 4 4 4 4
43046+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
43047+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
43048+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
43049+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
43050+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
43051+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
43052+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
43053+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
43054+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
43055+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
43056+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
43057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43059+4 4 4 4 4 4
43060+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
43061+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
43062+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
43063+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
43064+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
43065+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43066+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43067+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
43068+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
43069+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
43070+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
43071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43073+4 4 4 4 4 4
43074+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
43075+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
43076+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
43077+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
43078+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
43079+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
43080+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
43081+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
43082+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
43083+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
43084+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43087+4 4 4 4 4 4
43088+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
43089+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
43090+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
43091+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
43092+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
43093+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
43094+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
43095+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
43096+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
43097+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
43098+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
43099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43101+4 4 4 4 4 4
43102+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
43103+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
43104+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
43105+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
43106+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
43107+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
43108+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
43109+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
43110+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
43111+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
43112+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
43113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43115+4 4 4 4 4 4
43116+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
43117+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
43118+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
43119+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
43120+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
43121+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
43122+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
43123+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
43124+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
43125+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
43126+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43129+4 4 4 4 4 4
43130+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
43131+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
43132+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
43133+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
43134+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43135+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
43136+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
43137+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
43138+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
43139+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
43140+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43143+4 4 4 4 4 4
43144+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
43145+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
43146+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
43147+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
43148+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43149+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
43150+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
43151+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
43152+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
43153+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
43154+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43157+4 4 4 4 4 4
43158+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
43159+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
43160+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
43161+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
43162+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43163+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
43164+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
43165+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
43166+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
43167+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43168+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43171+4 4 4 4 4 4
43172+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
43173+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
43174+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
43175+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
43176+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
43177+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
43178+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
43179+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
43180+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43181+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43182+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43185+4 4 4 4 4 4
43186+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
43187+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
43188+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
43189+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
43190+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43191+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
43192+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
43193+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
43194+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43195+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43196+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43199+4 4 4 4 4 4
43200+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
43201+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
43202+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
43203+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
43204+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
43205+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
43206+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
43207+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
43208+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43209+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43210+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43213+4 4 4 4 4 4
43214+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
43215+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
43216+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43217+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
43218+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
43219+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
43220+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
43221+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
43222+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
43223+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43224+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43227+4 4 4 4 4 4
43228+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
43229+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
43230+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
43231+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
43232+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
43233+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
43234+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
43235+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
43236+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43237+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43238+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43241+4 4 4 4 4 4
43242+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
43243+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
43244+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43245+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
43246+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
43247+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
43248+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
43249+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
43250+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
43251+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43252+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43255+4 4 4 4 4 4
43256+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
43257+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
43258+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
43259+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
43260+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
43261+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
43262+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
43263+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
43264+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43265+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43266+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43269+4 4 4 4 4 4
43270+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43271+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
43272+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43273+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
43274+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
43275+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
43276+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
43277+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
43278+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43279+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43280+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43283+4 4 4 4 4 4
43284+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
43285+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
43286+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
43287+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
43288+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
43289+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
43290+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43291+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
43292+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43293+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43294+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43297+4 4 4 4 4 4
43298+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43299+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
43300+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
43301+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
43302+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
43303+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
43304+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43305+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
43306+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43307+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43308+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43311+4 4 4 4 4 4
43312+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
43313+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
43314+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
43315+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
43316+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
43317+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
43318+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
43319+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
43320+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
43321+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43322+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43325+4 4 4 4 4 4
43326+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43327+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
43328+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
43329+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
43330+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
43331+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
43332+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
43333+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
43334+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
43335+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43336+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43339+4 4 4 4 4 4
43340+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
43341+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
43342+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
43343+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
43344+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
43345+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
43346+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
43347+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
43348+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
43349+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43350+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43353+4 4 4 4 4 4
43354+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43355+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
43356+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
43357+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
43358+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
43359+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
43360+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
43361+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
43362+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
43363+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43364+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43367+4 4 4 4 4 4
43368+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
43369+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
43370+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
43371+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
43372+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
43373+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
43374+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
43375+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
43376+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
43377+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
43378+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43381+4 4 4 4 4 4
43382+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
43383+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
43384+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
43385+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
43386+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
43387+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
43388+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
43389+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
43390+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
43391+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
43392+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43395+4 4 4 4 4 4
43396+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
43397+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
43398+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
43399+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
43400+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
43401+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
43402+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43403+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
43404+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
43405+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
43406+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43409+4 4 4 4 4 4
43410+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
43411+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
43412+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
43413+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
43414+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
43415+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
43416+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
43417+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
43418+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
43419+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
43420+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43423+4 4 4 4 4 4
43424+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
43425+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
43426+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
43427+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
43428+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
43429+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
43430+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
43431+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
43432+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
43433+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
43434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43437+4 4 4 4 4 4
43438+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43439+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
43440+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
43441+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
43442+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
43443+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
43444+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
43445+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
43446+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
43447+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
43448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43451+4 4 4 4 4 4
43452+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
43453+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
43454+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
43455+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
43456+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
43457+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
43458+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
43459+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
43460+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
43461+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
43462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43465+4 4 4 4 4 4
43466+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
43467+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
43468+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
43469+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
43470+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
43471+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
43472+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
43473+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
43474+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
43475+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43479+4 4 4 4 4 4
43480+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
43481+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43482+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
43483+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
43484+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
43485+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
43486+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
43487+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
43488+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
43489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43493+4 4 4 4 4 4
43494+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
43495+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
43496+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
43497+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
43498+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
43499+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
43500+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
43501+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
43502+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
43503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43507+4 4 4 4 4 4
43508+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
43509+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
43510+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
43511+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
43512+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
43513+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
43514+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
43515+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
43516+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43521+4 4 4 4 4 4
43522+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
43523+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
43524+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
43525+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
43526+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
43527+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
43528+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
43529+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
43530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43535+4 4 4 4 4 4
43536+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
43537+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
43538+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
43539+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
43540+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
43541+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
43542+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
43543+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
43544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43549+4 4 4 4 4 4
43550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43551+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
43552+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43553+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
43554+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
43555+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
43556+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
43557+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
43558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43563+4 4 4 4 4 4
43564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43565+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
43566+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
43567+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
43568+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
43569+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
43570+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
43571+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
43572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43577+4 4 4 4 4 4
43578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43579+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
43580+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
43581+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
43582+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
43583+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
43584+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
43585+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43591+4 4 4 4 4 4
43592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43594+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
43595+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
43596+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
43597+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
43598+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
43599+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43605+4 4 4 4 4 4
43606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43609+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43610+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
43611+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
43612+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
43613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43619+4 4 4 4 4 4
43620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43623+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
43624+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
43625+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
43626+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
43627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43633+4 4 4 4 4 4
43634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43637+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
43638+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43639+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
43640+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
43641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43647+4 4 4 4 4 4
43648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43651+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
43652+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
43653+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
43654+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
43655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43661+4 4 4 4 4 4
43662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43666+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
43667+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43668+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43675+4 4 4 4 4 4
43676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43680+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
43681+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
43682+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
43683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43689+4 4 4 4 4 4
43690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43694+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
43695+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
43696+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43703+4 4 4 4 4 4
43704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43708+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
43709+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
43710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43717+4 4 4 4 4 4
43718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43722+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
43723+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
43724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43731+4 4 4 4 4 4
43732diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
43733index 86d449e..af6a7f7 100644
43734--- a/drivers/video/udlfb.c
43735+++ b/drivers/video/udlfb.c
43736@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
43737 dlfb_urb_completion(urb);
43738
43739 error:
43740- atomic_add(bytes_sent, &dev->bytes_sent);
43741- atomic_add(bytes_identical, &dev->bytes_identical);
43742- atomic_add(width*height*2, &dev->bytes_rendered);
43743+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
43744+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
43745+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
43746 end_cycles = get_cycles();
43747- atomic_add(((unsigned int) ((end_cycles - start_cycles)
43748+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
43749 >> 10)), /* Kcycles */
43750 &dev->cpu_kcycles_used);
43751
43752@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
43753 dlfb_urb_completion(urb);
43754
43755 error:
43756- atomic_add(bytes_sent, &dev->bytes_sent);
43757- atomic_add(bytes_identical, &dev->bytes_identical);
43758- atomic_add(bytes_rendered, &dev->bytes_rendered);
43759+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
43760+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
43761+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
43762 end_cycles = get_cycles();
43763- atomic_add(((unsigned int) ((end_cycles - start_cycles)
43764+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
43765 >> 10)), /* Kcycles */
43766 &dev->cpu_kcycles_used);
43767 }
43768@@ -1372,7 +1372,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
43769 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43770 struct dlfb_data *dev = fb_info->par;
43771 return snprintf(buf, PAGE_SIZE, "%u\n",
43772- atomic_read(&dev->bytes_rendered));
43773+ atomic_read_unchecked(&dev->bytes_rendered));
43774 }
43775
43776 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
43777@@ -1380,7 +1380,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
43778 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43779 struct dlfb_data *dev = fb_info->par;
43780 return snprintf(buf, PAGE_SIZE, "%u\n",
43781- atomic_read(&dev->bytes_identical));
43782+ atomic_read_unchecked(&dev->bytes_identical));
43783 }
43784
43785 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
43786@@ -1388,7 +1388,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
43787 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43788 struct dlfb_data *dev = fb_info->par;
43789 return snprintf(buf, PAGE_SIZE, "%u\n",
43790- atomic_read(&dev->bytes_sent));
43791+ atomic_read_unchecked(&dev->bytes_sent));
43792 }
43793
43794 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
43795@@ -1396,7 +1396,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
43796 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43797 struct dlfb_data *dev = fb_info->par;
43798 return snprintf(buf, PAGE_SIZE, "%u\n",
43799- atomic_read(&dev->cpu_kcycles_used));
43800+ atomic_read_unchecked(&dev->cpu_kcycles_used));
43801 }
43802
43803 static ssize_t edid_show(
43804@@ -1456,10 +1456,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
43805 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43806 struct dlfb_data *dev = fb_info->par;
43807
43808- atomic_set(&dev->bytes_rendered, 0);
43809- atomic_set(&dev->bytes_identical, 0);
43810- atomic_set(&dev->bytes_sent, 0);
43811- atomic_set(&dev->cpu_kcycles_used, 0);
43812+ atomic_set_unchecked(&dev->bytes_rendered, 0);
43813+ atomic_set_unchecked(&dev->bytes_identical, 0);
43814+ atomic_set_unchecked(&dev->bytes_sent, 0);
43815+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
43816
43817 return count;
43818 }
43819diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
43820index 2f8f82d..191de37 100644
43821--- a/drivers/video/uvesafb.c
43822+++ b/drivers/video/uvesafb.c
43823@@ -19,6 +19,7 @@
43824 #include <linux/io.h>
43825 #include <linux/mutex.h>
43826 #include <linux/slab.h>
43827+#include <linux/moduleloader.h>
43828 #include <video/edid.h>
43829 #include <video/uvesafb.h>
43830 #ifdef CONFIG_X86
43831@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
43832 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
43833 par->pmi_setpal = par->ypan = 0;
43834 } else {
43835+
43836+#ifdef CONFIG_PAX_KERNEXEC
43837+#ifdef CONFIG_MODULES
43838+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
43839+#endif
43840+ if (!par->pmi_code) {
43841+ par->pmi_setpal = par->ypan = 0;
43842+ return 0;
43843+ }
43844+#endif
43845+
43846 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
43847 + task->t.regs.edi);
43848+
43849+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43850+ pax_open_kernel();
43851+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
43852+ pax_close_kernel();
43853+
43854+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
43855+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
43856+#else
43857 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
43858 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
43859+#endif
43860+
43861 printk(KERN_INFO "uvesafb: protected mode interface info at "
43862 "%04x:%04x\n",
43863 (u16)task->t.regs.es, (u16)task->t.regs.edi);
43864@@ -818,13 +841,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
43865 par->ypan = ypan;
43866
43867 if (par->pmi_setpal || par->ypan) {
43868+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
43869 if (__supported_pte_mask & _PAGE_NX) {
43870 par->pmi_setpal = par->ypan = 0;
43871 printk(KERN_WARNING "uvesafb: NX protection is actively."
43872 "We have better not to use the PMI.\n");
43873- } else {
43874+ } else
43875+#endif
43876 uvesafb_vbe_getpmi(task, par);
43877- }
43878 }
43879 #else
43880 /* The protected mode interface is not available on non-x86. */
43881@@ -1838,6 +1862,11 @@ out:
43882 if (par->vbe_modes)
43883 kfree(par->vbe_modes);
43884
43885+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43886+ if (par->pmi_code)
43887+ module_free_exec(NULL, par->pmi_code);
43888+#endif
43889+
43890 framebuffer_release(info);
43891 return err;
43892 }
43893@@ -1864,6 +1893,12 @@ static int uvesafb_remove(struct platform_device *dev)
43894 kfree(par->vbe_state_orig);
43895 if (par->vbe_state_saved)
43896 kfree(par->vbe_state_saved);
43897+
43898+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43899+ if (par->pmi_code)
43900+ module_free_exec(NULL, par->pmi_code);
43901+#endif
43902+
43903 }
43904
43905 framebuffer_release(info);
43906diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
43907index 501b340..86bd4cf 100644
43908--- a/drivers/video/vesafb.c
43909+++ b/drivers/video/vesafb.c
43910@@ -9,6 +9,7 @@
43911 */
43912
43913 #include <linux/module.h>
43914+#include <linux/moduleloader.h>
43915 #include <linux/kernel.h>
43916 #include <linux/errno.h>
43917 #include <linux/string.h>
43918@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
43919 static int vram_total __initdata; /* Set total amount of memory */
43920 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
43921 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
43922-static void (*pmi_start)(void) __read_mostly;
43923-static void (*pmi_pal) (void) __read_mostly;
43924+static void (*pmi_start)(void) __read_only;
43925+static void (*pmi_pal) (void) __read_only;
43926 static int depth __read_mostly;
43927 static int vga_compat __read_mostly;
43928 /* --------------------------------------------------------------------- */
43929@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
43930 unsigned int size_vmode;
43931 unsigned int size_remap;
43932 unsigned int size_total;
43933+ void *pmi_code = NULL;
43934
43935 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
43936 return -ENODEV;
43937@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
43938 size_remap = size_total;
43939 vesafb_fix.smem_len = size_remap;
43940
43941-#ifndef __i386__
43942- screen_info.vesapm_seg = 0;
43943-#endif
43944-
43945 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
43946 printk(KERN_WARNING
43947 "vesafb: cannot reserve video memory at 0x%lx\n",
43948@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
43949 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
43950 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
43951
43952+#ifdef __i386__
43953+
43954+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43955+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
43956+ if (!pmi_code)
43957+#elif !defined(CONFIG_PAX_KERNEXEC)
43958+ if (0)
43959+#endif
43960+
43961+#endif
43962+ screen_info.vesapm_seg = 0;
43963+
43964 if (screen_info.vesapm_seg) {
43965- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
43966- screen_info.vesapm_seg,screen_info.vesapm_off);
43967+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
43968+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
43969 }
43970
43971 if (screen_info.vesapm_seg < 0xc000)
43972@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
43973
43974 if (ypan || pmi_setpal) {
43975 unsigned short *pmi_base;
43976+
43977 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
43978- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
43979- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
43980+
43981+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43982+ pax_open_kernel();
43983+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
43984+#else
43985+ pmi_code = pmi_base;
43986+#endif
43987+
43988+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
43989+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
43990+
43991+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43992+ pmi_start = ktva_ktla(pmi_start);
43993+ pmi_pal = ktva_ktla(pmi_pal);
43994+ pax_close_kernel();
43995+#endif
43996+
43997 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
43998 if (pmi_base[3]) {
43999 printk(KERN_INFO "vesafb: pmi: ports = ");
44000@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
44001 info->node, info->fix.id);
44002 return 0;
44003 err:
44004+
44005+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44006+ module_free_exec(NULL, pmi_code);
44007+#endif
44008+
44009 if (info->screen_base)
44010 iounmap(info->screen_base);
44011 framebuffer_release(info);
44012diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
44013index 88714ae..16c2e11 100644
44014--- a/drivers/video/via/via_clock.h
44015+++ b/drivers/video/via/via_clock.h
44016@@ -56,7 +56,7 @@ struct via_clock {
44017
44018 void (*set_engine_pll_state)(u8 state);
44019 void (*set_engine_pll)(struct via_pll_config config);
44020-};
44021+} __no_const;
44022
44023
44024 static inline u32 get_pll_internal_frequency(u32 ref_freq,
44025diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
44026index 6b1b7e1..b2fa4d5 100644
44027--- a/drivers/virtio/virtio_mmio.c
44028+++ b/drivers/virtio/virtio_mmio.c
44029@@ -530,7 +530,7 @@ static int vm_cmdline_set(const char *device,
44030
44031 resources[0].end = memparse(device, &str) - 1;
44032
44033- processed = sscanf(str, "@%lli:%u%n:%d%n",
44034+ processed = sscanf(str, "@%lli:%llu%n:%d%n",
44035 &base, &resources[1].start, &consumed,
44036 &vm_cmdline_id, &consumed);
44037
44038diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
44039index fef20db..d28b1ab 100644
44040--- a/drivers/xen/xenfs/xenstored.c
44041+++ b/drivers/xen/xenfs/xenstored.c
44042@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
44043 static int xsd_kva_open(struct inode *inode, struct file *file)
44044 {
44045 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
44046+#ifdef CONFIG_GRKERNSEC_HIDESYM
44047+ NULL);
44048+#else
44049 xen_store_interface);
44050+#endif
44051+
44052 if (!file->private_data)
44053 return -ENOMEM;
44054 return 0;
44055diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
44056index 890bed5..17ae73e 100644
44057--- a/fs/9p/vfs_inode.c
44058+++ b/fs/9p/vfs_inode.c
44059@@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
44060 void
44061 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44062 {
44063- char *s = nd_get_link(nd);
44064+ const char *s = nd_get_link(nd);
44065
44066 p9_debug(P9_DEBUG_VFS, " %s %s\n",
44067 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
44068diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
44069index 0efd152..b5802ad 100644
44070--- a/fs/Kconfig.binfmt
44071+++ b/fs/Kconfig.binfmt
44072@@ -89,7 +89,7 @@ config HAVE_AOUT
44073
44074 config BINFMT_AOUT
44075 tristate "Kernel support for a.out and ECOFF binaries"
44076- depends on HAVE_AOUT
44077+ depends on HAVE_AOUT && BROKEN
44078 ---help---
44079 A.out (Assembler.OUTput) is a set of formats for libraries and
44080 executables used in the earliest versions of UNIX. Linux used
44081diff --git a/fs/aio.c b/fs/aio.c
44082index 71f613c..9d01f1f 100644
44083--- a/fs/aio.c
44084+++ b/fs/aio.c
44085@@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
44086 size += sizeof(struct io_event) * nr_events;
44087 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
44088
44089- if (nr_pages < 0)
44090+ if (nr_pages <= 0)
44091 return -EINVAL;
44092
44093 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
44094@@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
44095 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
44096 {
44097 ssize_t ret;
44098+ struct iovec iovstack;
44099
44100 #ifdef CONFIG_COMPAT
44101 if (compat)
44102 ret = compat_rw_copy_check_uvector(type,
44103 (struct compat_iovec __user *)kiocb->ki_buf,
44104- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
44105+ kiocb->ki_nbytes, 1, &iovstack,
44106 &kiocb->ki_iovec);
44107 else
44108 #endif
44109 ret = rw_copy_check_uvector(type,
44110 (struct iovec __user *)kiocb->ki_buf,
44111- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
44112+ kiocb->ki_nbytes, 1, &iovstack,
44113 &kiocb->ki_iovec);
44114 if (ret < 0)
44115 goto out;
44116@@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
44117 if (ret < 0)
44118 goto out;
44119
44120+ if (kiocb->ki_iovec == &iovstack) {
44121+ kiocb->ki_inline_vec = iovstack;
44122+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
44123+ }
44124 kiocb->ki_nr_segs = kiocb->ki_nbytes;
44125 kiocb->ki_cur_seg = 0;
44126 /* ki_nbytes/left now reflect bytes instead of segs */
44127diff --git a/fs/attr.c b/fs/attr.c
44128index cce7df5..eaa2731 100644
44129--- a/fs/attr.c
44130+++ b/fs/attr.c
44131@@ -100,6 +100,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
44132 unsigned long limit;
44133
44134 limit = rlimit(RLIMIT_FSIZE);
44135+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
44136 if (limit != RLIM_INFINITY && offset > limit)
44137 goto out_sig;
44138 if (offset > inode->i_sb->s_maxbytes)
44139diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
44140index dce436e..55e670d 100644
44141--- a/fs/autofs4/waitq.c
44142+++ b/fs/autofs4/waitq.c
44143@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
44144 {
44145 unsigned long sigpipe, flags;
44146 mm_segment_t fs;
44147- const char *data = (const char *)addr;
44148+ const char __user *data = (const char __force_user *)addr;
44149 ssize_t wr = 0;
44150
44151 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
44152@@ -347,6 +347,10 @@ static int validate_request(struct autofs_wait_queue **wait,
44153 return 1;
44154 }
44155
44156+#ifdef CONFIG_GRKERNSEC_HIDESYM
44157+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
44158+#endif
44159+
44160 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
44161 enum autofs_notify notify)
44162 {
44163@@ -380,7 +384,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
44164
44165 /* If this is a direct mount request create a dummy name */
44166 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
44167+#ifdef CONFIG_GRKERNSEC_HIDESYM
44168+ /* this name does get written to userland via autofs4_write() */
44169+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
44170+#else
44171 qstr.len = sprintf(name, "%p", dentry);
44172+#endif
44173 else {
44174 qstr.len = autofs4_getpath(sbi, dentry, &name);
44175 if (!qstr.len) {
44176diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
44177index 2b3bda8..6a2d4be 100644
44178--- a/fs/befs/linuxvfs.c
44179+++ b/fs/befs/linuxvfs.c
44180@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44181 {
44182 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
44183 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
44184- char *link = nd_get_link(nd);
44185+ const char *link = nd_get_link(nd);
44186 if (!IS_ERR(link))
44187 kfree(link);
44188 }
44189diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
44190index 0e7a6f8..332b1ca 100644
44191--- a/fs/binfmt_aout.c
44192+++ b/fs/binfmt_aout.c
44193@@ -16,6 +16,7 @@
44194 #include <linux/string.h>
44195 #include <linux/fs.h>
44196 #include <linux/file.h>
44197+#include <linux/security.h>
44198 #include <linux/stat.h>
44199 #include <linux/fcntl.h>
44200 #include <linux/ptrace.h>
44201@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
44202 #endif
44203 # define START_STACK(u) ((void __user *)u.start_stack)
44204
44205+ memset(&dump, 0, sizeof(dump));
44206+
44207 fs = get_fs();
44208 set_fs(KERNEL_DS);
44209 has_dumped = 1;
44210@@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
44211
44212 /* If the size of the dump file exceeds the rlimit, then see what would happen
44213 if we wrote the stack, but not the data area. */
44214+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
44215 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
44216 dump.u_dsize = 0;
44217
44218 /* Make sure we have enough room to write the stack and data areas. */
44219+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
44220 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
44221 dump.u_ssize = 0;
44222
44223@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44224 rlim = rlimit(RLIMIT_DATA);
44225 if (rlim >= RLIM_INFINITY)
44226 rlim = ~0;
44227+
44228+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
44229 if (ex.a_data + ex.a_bss > rlim)
44230 return -ENOMEM;
44231
44232@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44233
44234 install_exec_creds(bprm);
44235
44236+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44237+ current->mm->pax_flags = 0UL;
44238+#endif
44239+
44240+#ifdef CONFIG_PAX_PAGEEXEC
44241+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
44242+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
44243+
44244+#ifdef CONFIG_PAX_EMUTRAMP
44245+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
44246+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
44247+#endif
44248+
44249+#ifdef CONFIG_PAX_MPROTECT
44250+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
44251+ current->mm->pax_flags |= MF_PAX_MPROTECT;
44252+#endif
44253+
44254+ }
44255+#endif
44256+
44257 if (N_MAGIC(ex) == OMAGIC) {
44258 unsigned long text_addr, map_size;
44259 loff_t pos;
44260@@ -332,7 +360,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44261 }
44262
44263 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
44264- PROT_READ | PROT_WRITE | PROT_EXEC,
44265+ PROT_READ | PROT_WRITE,
44266 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
44267 fd_offset + ex.a_text);
44268 if (error != N_DATADDR(ex)) {
44269diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
44270index fbd9f60..d4edac0 100644
44271--- a/fs/binfmt_elf.c
44272+++ b/fs/binfmt_elf.c
44273@@ -33,6 +33,7 @@
44274 #include <linux/elf.h>
44275 #include <linux/utsname.h>
44276 #include <linux/coredump.h>
44277+#include <linux/xattr.h>
44278 #include <asm/uaccess.h>
44279 #include <asm/param.h>
44280 #include <asm/page.h>
44281@@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
44282 #define elf_core_dump NULL
44283 #endif
44284
44285+#ifdef CONFIG_PAX_MPROTECT
44286+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
44287+#endif
44288+
44289 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
44290 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
44291 #else
44292@@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
44293 .load_binary = load_elf_binary,
44294 .load_shlib = load_elf_library,
44295 .core_dump = elf_core_dump,
44296+
44297+#ifdef CONFIG_PAX_MPROTECT
44298+ .handle_mprotect= elf_handle_mprotect,
44299+#endif
44300+
44301 .min_coredump = ELF_EXEC_PAGESIZE,
44302 };
44303
44304@@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
44305
44306 static int set_brk(unsigned long start, unsigned long end)
44307 {
44308+ unsigned long e = end;
44309+
44310 start = ELF_PAGEALIGN(start);
44311 end = ELF_PAGEALIGN(end);
44312 if (end > start) {
44313@@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
44314 if (BAD_ADDR(addr))
44315 return addr;
44316 }
44317- current->mm->start_brk = current->mm->brk = end;
44318+ current->mm->start_brk = current->mm->brk = e;
44319 return 0;
44320 }
44321
44322@@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44323 elf_addr_t __user *u_rand_bytes;
44324 const char *k_platform = ELF_PLATFORM;
44325 const char *k_base_platform = ELF_BASE_PLATFORM;
44326- unsigned char k_rand_bytes[16];
44327+ u32 k_rand_bytes[4];
44328 int items;
44329 elf_addr_t *elf_info;
44330 int ei_index = 0;
44331 const struct cred *cred = current_cred();
44332 struct vm_area_struct *vma;
44333+ unsigned long saved_auxv[AT_VECTOR_SIZE];
44334
44335 /*
44336 * In some cases (e.g. Hyper-Threading), we want to avoid L1
44337@@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44338 * Generate 16 random bytes for userspace PRNG seeding.
44339 */
44340 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
44341- u_rand_bytes = (elf_addr_t __user *)
44342- STACK_ALLOC(p, sizeof(k_rand_bytes));
44343+ srandom32(k_rand_bytes[0] ^ random32());
44344+ srandom32(k_rand_bytes[1] ^ random32());
44345+ srandom32(k_rand_bytes[2] ^ random32());
44346+ srandom32(k_rand_bytes[3] ^ random32());
44347+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
44348+ u_rand_bytes = (elf_addr_t __user *) p;
44349 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
44350 return -EFAULT;
44351
44352@@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44353 return -EFAULT;
44354 current->mm->env_end = p;
44355
44356+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
44357+
44358 /* Put the elf_info on the stack in the right place. */
44359 sp = (elf_addr_t __user *)envp + 1;
44360- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
44361+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
44362 return -EFAULT;
44363 return 0;
44364 }
44365@@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
44366 an ELF header */
44367
44368 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44369- struct file *interpreter, unsigned long *interp_map_addr,
44370- unsigned long no_base)
44371+ struct file *interpreter, unsigned long no_base)
44372 {
44373 struct elf_phdr *elf_phdata;
44374 struct elf_phdr *eppnt;
44375- unsigned long load_addr = 0;
44376+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
44377 int load_addr_set = 0;
44378 unsigned long last_bss = 0, elf_bss = 0;
44379- unsigned long error = ~0UL;
44380+ unsigned long error = -EINVAL;
44381 unsigned long total_size;
44382 int retval, i, size;
44383
44384@@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44385 goto out_close;
44386 }
44387
44388+#ifdef CONFIG_PAX_SEGMEXEC
44389+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
44390+ pax_task_size = SEGMEXEC_TASK_SIZE;
44391+#endif
44392+
44393 eppnt = elf_phdata;
44394 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
44395 if (eppnt->p_type == PT_LOAD) {
44396@@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44397 map_addr = elf_map(interpreter, load_addr + vaddr,
44398 eppnt, elf_prot, elf_type, total_size);
44399 total_size = 0;
44400- if (!*interp_map_addr)
44401- *interp_map_addr = map_addr;
44402 error = map_addr;
44403 if (BAD_ADDR(map_addr))
44404 goto out_close;
44405@@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44406 k = load_addr + eppnt->p_vaddr;
44407 if (BAD_ADDR(k) ||
44408 eppnt->p_filesz > eppnt->p_memsz ||
44409- eppnt->p_memsz > TASK_SIZE ||
44410- TASK_SIZE - eppnt->p_memsz < k) {
44411+ eppnt->p_memsz > pax_task_size ||
44412+ pax_task_size - eppnt->p_memsz < k) {
44413 error = -ENOMEM;
44414 goto out_close;
44415 }
44416@@ -530,6 +551,315 @@ out:
44417 return error;
44418 }
44419
44420+#ifdef CONFIG_PAX_PT_PAX_FLAGS
44421+#ifdef CONFIG_PAX_SOFTMODE
44422+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
44423+{
44424+ unsigned long pax_flags = 0UL;
44425+
44426+#ifdef CONFIG_PAX_PAGEEXEC
44427+ if (elf_phdata->p_flags & PF_PAGEEXEC)
44428+ pax_flags |= MF_PAX_PAGEEXEC;
44429+#endif
44430+
44431+#ifdef CONFIG_PAX_SEGMEXEC
44432+ if (elf_phdata->p_flags & PF_SEGMEXEC)
44433+ pax_flags |= MF_PAX_SEGMEXEC;
44434+#endif
44435+
44436+#ifdef CONFIG_PAX_EMUTRAMP
44437+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
44438+ pax_flags |= MF_PAX_EMUTRAMP;
44439+#endif
44440+
44441+#ifdef CONFIG_PAX_MPROTECT
44442+ if (elf_phdata->p_flags & PF_MPROTECT)
44443+ pax_flags |= MF_PAX_MPROTECT;
44444+#endif
44445+
44446+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44447+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
44448+ pax_flags |= MF_PAX_RANDMMAP;
44449+#endif
44450+
44451+ return pax_flags;
44452+}
44453+#endif
44454+
44455+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
44456+{
44457+ unsigned long pax_flags = 0UL;
44458+
44459+#ifdef CONFIG_PAX_PAGEEXEC
44460+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
44461+ pax_flags |= MF_PAX_PAGEEXEC;
44462+#endif
44463+
44464+#ifdef CONFIG_PAX_SEGMEXEC
44465+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
44466+ pax_flags |= MF_PAX_SEGMEXEC;
44467+#endif
44468+
44469+#ifdef CONFIG_PAX_EMUTRAMP
44470+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
44471+ pax_flags |= MF_PAX_EMUTRAMP;
44472+#endif
44473+
44474+#ifdef CONFIG_PAX_MPROTECT
44475+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
44476+ pax_flags |= MF_PAX_MPROTECT;
44477+#endif
44478+
44479+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44480+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
44481+ pax_flags |= MF_PAX_RANDMMAP;
44482+#endif
44483+
44484+ return pax_flags;
44485+}
44486+#endif
44487+
44488+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
44489+#ifdef CONFIG_PAX_SOFTMODE
44490+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
44491+{
44492+ unsigned long pax_flags = 0UL;
44493+
44494+#ifdef CONFIG_PAX_PAGEEXEC
44495+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
44496+ pax_flags |= MF_PAX_PAGEEXEC;
44497+#endif
44498+
44499+#ifdef CONFIG_PAX_SEGMEXEC
44500+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
44501+ pax_flags |= MF_PAX_SEGMEXEC;
44502+#endif
44503+
44504+#ifdef CONFIG_PAX_EMUTRAMP
44505+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
44506+ pax_flags |= MF_PAX_EMUTRAMP;
44507+#endif
44508+
44509+#ifdef CONFIG_PAX_MPROTECT
44510+ if (pax_flags_softmode & MF_PAX_MPROTECT)
44511+ pax_flags |= MF_PAX_MPROTECT;
44512+#endif
44513+
44514+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44515+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
44516+ pax_flags |= MF_PAX_RANDMMAP;
44517+#endif
44518+
44519+ return pax_flags;
44520+}
44521+#endif
44522+
44523+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
44524+{
44525+ unsigned long pax_flags = 0UL;
44526+
44527+#ifdef CONFIG_PAX_PAGEEXEC
44528+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
44529+ pax_flags |= MF_PAX_PAGEEXEC;
44530+#endif
44531+
44532+#ifdef CONFIG_PAX_SEGMEXEC
44533+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
44534+ pax_flags |= MF_PAX_SEGMEXEC;
44535+#endif
44536+
44537+#ifdef CONFIG_PAX_EMUTRAMP
44538+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
44539+ pax_flags |= MF_PAX_EMUTRAMP;
44540+#endif
44541+
44542+#ifdef CONFIG_PAX_MPROTECT
44543+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
44544+ pax_flags |= MF_PAX_MPROTECT;
44545+#endif
44546+
44547+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44548+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
44549+ pax_flags |= MF_PAX_RANDMMAP;
44550+#endif
44551+
44552+ return pax_flags;
44553+}
44554+#endif
44555+
44556+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44557+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
44558+{
44559+ unsigned long pax_flags = 0UL;
44560+
44561+#ifdef CONFIG_PAX_EI_PAX
44562+
44563+#ifdef CONFIG_PAX_PAGEEXEC
44564+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
44565+ pax_flags |= MF_PAX_PAGEEXEC;
44566+#endif
44567+
44568+#ifdef CONFIG_PAX_SEGMEXEC
44569+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
44570+ pax_flags |= MF_PAX_SEGMEXEC;
44571+#endif
44572+
44573+#ifdef CONFIG_PAX_EMUTRAMP
44574+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
44575+ pax_flags |= MF_PAX_EMUTRAMP;
44576+#endif
44577+
44578+#ifdef CONFIG_PAX_MPROTECT
44579+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
44580+ pax_flags |= MF_PAX_MPROTECT;
44581+#endif
44582+
44583+#ifdef CONFIG_PAX_ASLR
44584+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
44585+ pax_flags |= MF_PAX_RANDMMAP;
44586+#endif
44587+
44588+#else
44589+
44590+#ifdef CONFIG_PAX_PAGEEXEC
44591+ pax_flags |= MF_PAX_PAGEEXEC;
44592+#endif
44593+
44594+#ifdef CONFIG_PAX_SEGMEXEC
44595+ pax_flags |= MF_PAX_SEGMEXEC;
44596+#endif
44597+
44598+#ifdef CONFIG_PAX_MPROTECT
44599+ pax_flags |= MF_PAX_MPROTECT;
44600+#endif
44601+
44602+#ifdef CONFIG_PAX_RANDMMAP
44603+ if (randomize_va_space)
44604+ pax_flags |= MF_PAX_RANDMMAP;
44605+#endif
44606+
44607+#endif
44608+
44609+ return pax_flags;
44610+}
44611+
44612+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
44613+{
44614+
44615+#ifdef CONFIG_PAX_PT_PAX_FLAGS
44616+ unsigned long i;
44617+
44618+ for (i = 0UL; i < elf_ex->e_phnum; i++)
44619+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
44620+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
44621+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
44622+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
44623+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
44624+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
44625+ return ~0UL;
44626+
44627+#ifdef CONFIG_PAX_SOFTMODE
44628+ if (pax_softmode)
44629+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
44630+ else
44631+#endif
44632+
44633+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
44634+ break;
44635+ }
44636+#endif
44637+
44638+ return ~0UL;
44639+}
44640+
44641+static unsigned long pax_parse_xattr_pax(struct file * const file)
44642+{
44643+
44644+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
44645+ ssize_t xattr_size, i;
44646+ unsigned char xattr_value[5];
44647+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
44648+
44649+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
44650+ if (xattr_size <= 0 || xattr_size > 5)
44651+ return ~0UL;
44652+
44653+ for (i = 0; i < xattr_size; i++)
44654+ switch (xattr_value[i]) {
44655+ default:
44656+ return ~0UL;
44657+
44658+#define parse_flag(option1, option2, flag) \
44659+ case option1: \
44660+ if (pax_flags_hardmode & MF_PAX_##flag) \
44661+ return ~0UL; \
44662+ pax_flags_hardmode |= MF_PAX_##flag; \
44663+ break; \
44664+ case option2: \
44665+ if (pax_flags_softmode & MF_PAX_##flag) \
44666+ return ~0UL; \
44667+ pax_flags_softmode |= MF_PAX_##flag; \
44668+ break;
44669+
44670+ parse_flag('p', 'P', PAGEEXEC);
44671+ parse_flag('e', 'E', EMUTRAMP);
44672+ parse_flag('m', 'M', MPROTECT);
44673+ parse_flag('r', 'R', RANDMMAP);
44674+ parse_flag('s', 'S', SEGMEXEC);
44675+
44676+#undef parse_flag
44677+ }
44678+
44679+ if (pax_flags_hardmode & pax_flags_softmode)
44680+ return ~0UL;
44681+
44682+#ifdef CONFIG_PAX_SOFTMODE
44683+ if (pax_softmode)
44684+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
44685+ else
44686+#endif
44687+
44688+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
44689+#else
44690+ return ~0UL;
44691+#endif
44692+
44693+}
44694+
44695+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
44696+{
44697+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
44698+
44699+ pax_flags = pax_parse_ei_pax(elf_ex);
44700+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
44701+ xattr_pax_flags = pax_parse_xattr_pax(file);
44702+
44703+ if (pt_pax_flags == ~0UL)
44704+ pt_pax_flags = xattr_pax_flags;
44705+ else if (xattr_pax_flags == ~0UL)
44706+ xattr_pax_flags = pt_pax_flags;
44707+ if (pt_pax_flags != xattr_pax_flags)
44708+ return -EINVAL;
44709+ if (pt_pax_flags != ~0UL)
44710+ pax_flags = pt_pax_flags;
44711+
44712+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
44713+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44714+ if ((__supported_pte_mask & _PAGE_NX))
44715+ pax_flags &= ~MF_PAX_SEGMEXEC;
44716+ else
44717+ pax_flags &= ~MF_PAX_PAGEEXEC;
44718+ }
44719+#endif
44720+
44721+ if (0 > pax_check_flags(&pax_flags))
44722+ return -EINVAL;
44723+
44724+ current->mm->pax_flags = pax_flags;
44725+ return 0;
44726+}
44727+#endif
44728+
44729 /*
44730 * These are the functions used to load ELF style executables and shared
44731 * libraries. There is no binary dependent code anywhere else.
44732@@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
44733 {
44734 unsigned int random_variable = 0;
44735
44736+#ifdef CONFIG_PAX_RANDUSTACK
44737+ if (randomize_va_space)
44738+ return stack_top - current->mm->delta_stack;
44739+#endif
44740+
44741 if ((current->flags & PF_RANDOMIZE) &&
44742 !(current->personality & ADDR_NO_RANDOMIZE)) {
44743 random_variable = get_random_int() & STACK_RND_MASK;
44744@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44745 unsigned long load_addr = 0, load_bias = 0;
44746 int load_addr_set = 0;
44747 char * elf_interpreter = NULL;
44748- unsigned long error;
44749+ unsigned long error = 0;
44750 struct elf_phdr *elf_ppnt, *elf_phdata;
44751 unsigned long elf_bss, elf_brk;
44752 int retval, i;
44753@@ -574,11 +909,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44754 unsigned long start_code, end_code, start_data, end_data;
44755 unsigned long reloc_func_desc __maybe_unused = 0;
44756 int executable_stack = EXSTACK_DEFAULT;
44757- unsigned long def_flags = 0;
44758 struct {
44759 struct elfhdr elf_ex;
44760 struct elfhdr interp_elf_ex;
44761 } *loc;
44762+ unsigned long pax_task_size = TASK_SIZE;
44763
44764 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
44765 if (!loc) {
44766@@ -714,11 +1049,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44767 goto out_free_dentry;
44768
44769 /* OK, This is the point of no return */
44770- current->mm->def_flags = def_flags;
44771+
44772+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44773+ current->mm->pax_flags = 0UL;
44774+#endif
44775+
44776+#ifdef CONFIG_PAX_DLRESOLVE
44777+ current->mm->call_dl_resolve = 0UL;
44778+#endif
44779+
44780+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
44781+ current->mm->call_syscall = 0UL;
44782+#endif
44783+
44784+#ifdef CONFIG_PAX_ASLR
44785+ current->mm->delta_mmap = 0UL;
44786+ current->mm->delta_stack = 0UL;
44787+#endif
44788+
44789+ current->mm->def_flags = 0;
44790+
44791+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44792+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
44793+ send_sig(SIGKILL, current, 0);
44794+ goto out_free_dentry;
44795+ }
44796+#endif
44797+
44798+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
44799+ pax_set_initial_flags(bprm);
44800+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
44801+ if (pax_set_initial_flags_func)
44802+ (pax_set_initial_flags_func)(bprm);
44803+#endif
44804+
44805+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
44806+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
44807+ current->mm->context.user_cs_limit = PAGE_SIZE;
44808+ current->mm->def_flags |= VM_PAGEEXEC;
44809+ }
44810+#endif
44811+
44812+#ifdef CONFIG_PAX_SEGMEXEC
44813+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
44814+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
44815+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
44816+ pax_task_size = SEGMEXEC_TASK_SIZE;
44817+ current->mm->def_flags |= VM_NOHUGEPAGE;
44818+ }
44819+#endif
44820+
44821+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
44822+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44823+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
44824+ put_cpu();
44825+ }
44826+#endif
44827
44828 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
44829 may depend on the personality. */
44830 SET_PERSONALITY(loc->elf_ex);
44831+
44832+#ifdef CONFIG_PAX_ASLR
44833+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
44834+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
44835+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
44836+ }
44837+#endif
44838+
44839+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44840+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44841+ executable_stack = EXSTACK_DISABLE_X;
44842+ current->personality &= ~READ_IMPLIES_EXEC;
44843+ } else
44844+#endif
44845+
44846 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
44847 current->personality |= READ_IMPLIES_EXEC;
44848
44849@@ -809,6 +1214,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44850 #else
44851 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
44852 #endif
44853+
44854+#ifdef CONFIG_PAX_RANDMMAP
44855+ /* PaX: randomize base address at the default exe base if requested */
44856+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
44857+#ifdef CONFIG_SPARC64
44858+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
44859+#else
44860+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
44861+#endif
44862+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
44863+ elf_flags |= MAP_FIXED;
44864+ }
44865+#endif
44866+
44867 }
44868
44869 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
44870@@ -841,9 +1260,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44871 * allowed task size. Note that p_filesz must always be
44872 * <= p_memsz so it is only necessary to check p_memsz.
44873 */
44874- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
44875- elf_ppnt->p_memsz > TASK_SIZE ||
44876- TASK_SIZE - elf_ppnt->p_memsz < k) {
44877+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
44878+ elf_ppnt->p_memsz > pax_task_size ||
44879+ pax_task_size - elf_ppnt->p_memsz < k) {
44880 /* set_brk can never work. Avoid overflows. */
44881 send_sig(SIGKILL, current, 0);
44882 retval = -EINVAL;
44883@@ -882,17 +1301,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44884 goto out_free_dentry;
44885 }
44886 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
44887- send_sig(SIGSEGV, current, 0);
44888- retval = -EFAULT; /* Nobody gets to see this, but.. */
44889- goto out_free_dentry;
44890+ /*
44891+ * This bss-zeroing can fail if the ELF
44892+ * file specifies odd protections. So
44893+ * we don't check the return value
44894+ */
44895 }
44896
44897+#ifdef CONFIG_PAX_RANDMMAP
44898+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
44899+ unsigned long start, size;
44900+
44901+ start = ELF_PAGEALIGN(elf_brk);
44902+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
44903+ down_read(&current->mm->mmap_sem);
44904+ retval = -ENOMEM;
44905+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
44906+ unsigned long prot = PROT_NONE;
44907+
44908+ up_read(&current->mm->mmap_sem);
44909+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
44910+// if (current->personality & ADDR_NO_RANDOMIZE)
44911+// prot = PROT_READ;
44912+ start = vm_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
44913+ retval = IS_ERR_VALUE(start) ? start : 0;
44914+ } else
44915+ up_read(&current->mm->mmap_sem);
44916+ if (retval == 0)
44917+ retval = set_brk(start + size, start + size + PAGE_SIZE);
44918+ if (retval < 0) {
44919+ send_sig(SIGKILL, current, 0);
44920+ goto out_free_dentry;
44921+ }
44922+ }
44923+#endif
44924+
44925 if (elf_interpreter) {
44926- unsigned long interp_map_addr = 0;
44927-
44928 elf_entry = load_elf_interp(&loc->interp_elf_ex,
44929 interpreter,
44930- &interp_map_addr,
44931 load_bias);
44932 if (!IS_ERR((void *)elf_entry)) {
44933 /*
44934@@ -1114,7 +1560,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
44935 * Decide what to dump of a segment, part, all or none.
44936 */
44937 static unsigned long vma_dump_size(struct vm_area_struct *vma,
44938- unsigned long mm_flags)
44939+ unsigned long mm_flags, long signr)
44940 {
44941 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
44942
44943@@ -1151,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
44944 if (vma->vm_file == NULL)
44945 return 0;
44946
44947- if (FILTER(MAPPED_PRIVATE))
44948+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
44949 goto whole;
44950
44951 /*
44952@@ -1373,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
44953 {
44954 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
44955 int i = 0;
44956- do
44957+ do {
44958 i += 2;
44959- while (auxv[i - 2] != AT_NULL);
44960+ } while (auxv[i - 2] != AT_NULL);
44961 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
44962 }
44963
44964@@ -2003,14 +2449,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
44965 }
44966
44967 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
44968- unsigned long mm_flags)
44969+ struct coredump_params *cprm)
44970 {
44971 struct vm_area_struct *vma;
44972 size_t size = 0;
44973
44974 for (vma = first_vma(current, gate_vma); vma != NULL;
44975 vma = next_vma(vma, gate_vma))
44976- size += vma_dump_size(vma, mm_flags);
44977+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
44978 return size;
44979 }
44980
44981@@ -2104,7 +2550,7 @@ static int elf_core_dump(struct coredump_params *cprm)
44982
44983 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
44984
44985- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
44986+ offset += elf_core_vma_data_size(gate_vma, cprm);
44987 offset += elf_core_extra_data_size();
44988 e_shoff = offset;
44989
44990@@ -2118,10 +2564,12 @@ static int elf_core_dump(struct coredump_params *cprm)
44991 offset = dataoff;
44992
44993 size += sizeof(*elf);
44994+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
44995 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
44996 goto end_coredump;
44997
44998 size += sizeof(*phdr4note);
44999+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
45000 if (size > cprm->limit
45001 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
45002 goto end_coredump;
45003@@ -2135,7 +2583,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45004 phdr.p_offset = offset;
45005 phdr.p_vaddr = vma->vm_start;
45006 phdr.p_paddr = 0;
45007- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
45008+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
45009 phdr.p_memsz = vma->vm_end - vma->vm_start;
45010 offset += phdr.p_filesz;
45011 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
45012@@ -2146,6 +2594,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45013 phdr.p_align = ELF_EXEC_PAGESIZE;
45014
45015 size += sizeof(phdr);
45016+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
45017 if (size > cprm->limit
45018 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
45019 goto end_coredump;
45020@@ -2170,7 +2619,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45021 unsigned long addr;
45022 unsigned long end;
45023
45024- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
45025+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
45026
45027 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
45028 struct page *page;
45029@@ -2179,6 +2628,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45030 page = get_dump_page(addr);
45031 if (page) {
45032 void *kaddr = kmap(page);
45033+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
45034 stop = ((size += PAGE_SIZE) > cprm->limit) ||
45035 !dump_write(cprm->file, kaddr,
45036 PAGE_SIZE);
45037@@ -2196,6 +2646,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45038
45039 if (e_phnum == PN_XNUM) {
45040 size += sizeof(*shdr4extnum);
45041+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
45042 if (size > cprm->limit
45043 || !dump_write(cprm->file, shdr4extnum,
45044 sizeof(*shdr4extnum)))
45045@@ -2216,6 +2667,97 @@ out:
45046
45047 #endif /* CONFIG_ELF_CORE */
45048
45049+#ifdef CONFIG_PAX_MPROTECT
45050+/* PaX: non-PIC ELF libraries need relocations on their executable segments
45051+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
45052+ * we'll remove VM_MAYWRITE for good on RELRO segments.
45053+ *
45054+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
45055+ * basis because we want to allow the common case and not the special ones.
45056+ */
45057+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
45058+{
45059+ struct elfhdr elf_h;
45060+ struct elf_phdr elf_p;
45061+ unsigned long i;
45062+ unsigned long oldflags;
45063+ bool is_textrel_rw, is_textrel_rx, is_relro;
45064+
45065+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
45066+ return;
45067+
45068+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
45069+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
45070+
45071+#ifdef CONFIG_PAX_ELFRELOCS
45072+ /* possible TEXTREL */
45073+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
45074+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
45075+#else
45076+ is_textrel_rw = false;
45077+ is_textrel_rx = false;
45078+#endif
45079+
45080+ /* possible RELRO */
45081+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
45082+
45083+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
45084+ return;
45085+
45086+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
45087+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
45088+
45089+#ifdef CONFIG_PAX_ETEXECRELOCS
45090+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
45091+#else
45092+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
45093+#endif
45094+
45095+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
45096+ !elf_check_arch(&elf_h) ||
45097+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
45098+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
45099+ return;
45100+
45101+ for (i = 0UL; i < elf_h.e_phnum; i++) {
45102+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
45103+ return;
45104+ switch (elf_p.p_type) {
45105+ case PT_DYNAMIC:
45106+ if (!is_textrel_rw && !is_textrel_rx)
45107+ continue;
45108+ i = 0UL;
45109+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
45110+ elf_dyn dyn;
45111+
45112+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
45113+ return;
45114+ if (dyn.d_tag == DT_NULL)
45115+ return;
45116+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
45117+ gr_log_textrel(vma);
45118+ if (is_textrel_rw)
45119+ vma->vm_flags |= VM_MAYWRITE;
45120+ else
45121+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
45122+ vma->vm_flags &= ~VM_MAYWRITE;
45123+ return;
45124+ }
45125+ i++;
45126+ }
45127+ return;
45128+
45129+ case PT_GNU_RELRO:
45130+ if (!is_relro)
45131+ continue;
45132+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
45133+ vma->vm_flags &= ~VM_MAYWRITE;
45134+ return;
45135+ }
45136+ }
45137+}
45138+#endif
45139+
45140 static int __init init_elf_binfmt(void)
45141 {
45142 register_binfmt(&elf_format);
45143diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
45144index e280352..7b2f231 100644
45145--- a/fs/binfmt_flat.c
45146+++ b/fs/binfmt_flat.c
45147@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
45148 realdatastart = (unsigned long) -ENOMEM;
45149 printk("Unable to allocate RAM for process data, errno %d\n",
45150 (int)-realdatastart);
45151+ down_write(&current->mm->mmap_sem);
45152 vm_munmap(textpos, text_len);
45153+ up_write(&current->mm->mmap_sem);
45154 ret = realdatastart;
45155 goto err;
45156 }
45157@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
45158 }
45159 if (IS_ERR_VALUE(result)) {
45160 printk("Unable to read data+bss, errno %d\n", (int)-result);
45161+ down_write(&current->mm->mmap_sem);
45162 vm_munmap(textpos, text_len);
45163 vm_munmap(realdatastart, len);
45164+ up_write(&current->mm->mmap_sem);
45165 ret = result;
45166 goto err;
45167 }
45168@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
45169 }
45170 if (IS_ERR_VALUE(result)) {
45171 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
45172+ down_write(&current->mm->mmap_sem);
45173 vm_munmap(textpos, text_len + data_len + extra +
45174 MAX_SHARED_LIBS * sizeof(unsigned long));
45175+ up_write(&current->mm->mmap_sem);
45176 ret = result;
45177 goto err;
45178 }
45179diff --git a/fs/bio.c b/fs/bio.c
45180index b96fc6c..431d628 100644
45181--- a/fs/bio.c
45182+++ b/fs/bio.c
45183@@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
45184 /*
45185 * Overflow, abort
45186 */
45187- if (end < start)
45188+ if (end < start || end - start > INT_MAX - nr_pages)
45189 return ERR_PTR(-EINVAL);
45190
45191 nr_pages += end - start;
45192@@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
45193 /*
45194 * Overflow, abort
45195 */
45196- if (end < start)
45197+ if (end < start || end - start > INT_MAX - nr_pages)
45198 return ERR_PTR(-EINVAL);
45199
45200 nr_pages += end - start;
45201@@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
45202 const int read = bio_data_dir(bio) == READ;
45203 struct bio_map_data *bmd = bio->bi_private;
45204 int i;
45205- char *p = bmd->sgvecs[0].iov_base;
45206+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
45207
45208 __bio_for_each_segment(bvec, bio, i, 0) {
45209 char *addr = page_address(bvec->bv_page);
45210diff --git a/fs/block_dev.c b/fs/block_dev.c
45211index ab3a456..7da538b 100644
45212--- a/fs/block_dev.c
45213+++ b/fs/block_dev.c
45214@@ -651,7 +651,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
45215 else if (bdev->bd_contains == bdev)
45216 return true; /* is a whole device which isn't held */
45217
45218- else if (whole->bd_holder == bd_may_claim)
45219+ else if (whole->bd_holder == (void *)bd_may_claim)
45220 return true; /* is a partition of a device that is being partitioned */
45221 else if (whole->bd_holder != NULL)
45222 return false; /* is a partition of a held device */
45223diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
45224index cdfb4c4..da736d4 100644
45225--- a/fs/btrfs/ctree.c
45226+++ b/fs/btrfs/ctree.c
45227@@ -1035,9 +1035,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
45228 free_extent_buffer(buf);
45229 add_root_to_dirty_list(root);
45230 } else {
45231- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
45232- parent_start = parent->start;
45233- else
45234+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
45235+ if (parent)
45236+ parent_start = parent->start;
45237+ else
45238+ parent_start = 0;
45239+ } else
45240 parent_start = 0;
45241
45242 WARN_ON(trans->transid != btrfs_header_generation(parent));
45243diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
45244index 95542a1..95a8727 100644
45245--- a/fs/btrfs/inode.c
45246+++ b/fs/btrfs/inode.c
45247@@ -7243,7 +7243,7 @@ fail:
45248 return -ENOMEM;
45249 }
45250
45251-static int btrfs_getattr(struct vfsmount *mnt,
45252+int btrfs_getattr(struct vfsmount *mnt,
45253 struct dentry *dentry, struct kstat *stat)
45254 {
45255 struct inode *inode = dentry->d_inode;
45256@@ -7257,6 +7257,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
45257 return 0;
45258 }
45259
45260+EXPORT_SYMBOL(btrfs_getattr);
45261+
45262+dev_t get_btrfs_dev_from_inode(struct inode *inode)
45263+{
45264+ return BTRFS_I(inode)->root->anon_dev;
45265+}
45266+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
45267+
45268 /*
45269 * If a file is moved, it will inherit the cow and compression flags of the new
45270 * directory.
45271diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
45272index 8fcf9a5..a200000 100644
45273--- a/fs/btrfs/ioctl.c
45274+++ b/fs/btrfs/ioctl.c
45275@@ -2965,9 +2965,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
45276 for (i = 0; i < num_types; i++) {
45277 struct btrfs_space_info *tmp;
45278
45279+ /* Don't copy in more than we allocated */
45280 if (!slot_count)
45281 break;
45282
45283+ slot_count--;
45284+
45285 info = NULL;
45286 rcu_read_lock();
45287 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
45288@@ -2989,10 +2992,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
45289 memcpy(dest, &space, sizeof(space));
45290 dest++;
45291 space_args.total_spaces++;
45292- slot_count--;
45293 }
45294- if (!slot_count)
45295- break;
45296 }
45297 up_read(&info->groups_sem);
45298 }
45299diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
45300index 776f0aa..3aad281 100644
45301--- a/fs/btrfs/relocation.c
45302+++ b/fs/btrfs/relocation.c
45303@@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
45304 }
45305 spin_unlock(&rc->reloc_root_tree.lock);
45306
45307- BUG_ON((struct btrfs_root *)node->data != root);
45308+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
45309
45310 if (!del) {
45311 spin_lock(&rc->reloc_root_tree.lock);
45312diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
45313index 622f469..e8d2d55 100644
45314--- a/fs/cachefiles/bind.c
45315+++ b/fs/cachefiles/bind.c
45316@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
45317 args);
45318
45319 /* start by checking things over */
45320- ASSERT(cache->fstop_percent >= 0 &&
45321- cache->fstop_percent < cache->fcull_percent &&
45322+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
45323 cache->fcull_percent < cache->frun_percent &&
45324 cache->frun_percent < 100);
45325
45326- ASSERT(cache->bstop_percent >= 0 &&
45327- cache->bstop_percent < cache->bcull_percent &&
45328+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
45329 cache->bcull_percent < cache->brun_percent &&
45330 cache->brun_percent < 100);
45331
45332diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
45333index 0a1467b..6a53245 100644
45334--- a/fs/cachefiles/daemon.c
45335+++ b/fs/cachefiles/daemon.c
45336@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
45337 if (n > buflen)
45338 return -EMSGSIZE;
45339
45340- if (copy_to_user(_buffer, buffer, n) != 0)
45341+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
45342 return -EFAULT;
45343
45344 return n;
45345@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
45346 if (test_bit(CACHEFILES_DEAD, &cache->flags))
45347 return -EIO;
45348
45349- if (datalen < 0 || datalen > PAGE_SIZE - 1)
45350+ if (datalen > PAGE_SIZE - 1)
45351 return -EOPNOTSUPP;
45352
45353 /* drag the command string into the kernel so we can parse it */
45354@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
45355 if (args[0] != '%' || args[1] != '\0')
45356 return -EINVAL;
45357
45358- if (fstop < 0 || fstop >= cache->fcull_percent)
45359+ if (fstop >= cache->fcull_percent)
45360 return cachefiles_daemon_range_error(cache, args);
45361
45362 cache->fstop_percent = fstop;
45363@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
45364 if (args[0] != '%' || args[1] != '\0')
45365 return -EINVAL;
45366
45367- if (bstop < 0 || bstop >= cache->bcull_percent)
45368+ if (bstop >= cache->bcull_percent)
45369 return cachefiles_daemon_range_error(cache, args);
45370
45371 cache->bstop_percent = bstop;
45372diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
45373index bd6bc1b..b627b53 100644
45374--- a/fs/cachefiles/internal.h
45375+++ b/fs/cachefiles/internal.h
45376@@ -57,7 +57,7 @@ struct cachefiles_cache {
45377 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
45378 struct rb_root active_nodes; /* active nodes (can't be culled) */
45379 rwlock_t active_lock; /* lock for active_nodes */
45380- atomic_t gravecounter; /* graveyard uniquifier */
45381+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
45382 unsigned frun_percent; /* when to stop culling (% files) */
45383 unsigned fcull_percent; /* when to start culling (% files) */
45384 unsigned fstop_percent; /* when to stop allocating (% files) */
45385@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
45386 * proc.c
45387 */
45388 #ifdef CONFIG_CACHEFILES_HISTOGRAM
45389-extern atomic_t cachefiles_lookup_histogram[HZ];
45390-extern atomic_t cachefiles_mkdir_histogram[HZ];
45391-extern atomic_t cachefiles_create_histogram[HZ];
45392+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
45393+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
45394+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
45395
45396 extern int __init cachefiles_proc_init(void);
45397 extern void cachefiles_proc_cleanup(void);
45398 static inline
45399-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
45400+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
45401 {
45402 unsigned long jif = jiffies - start_jif;
45403 if (jif >= HZ)
45404 jif = HZ - 1;
45405- atomic_inc(&histogram[jif]);
45406+ atomic_inc_unchecked(&histogram[jif]);
45407 }
45408
45409 #else
45410diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
45411index b0b5f7c..039bb26 100644
45412--- a/fs/cachefiles/namei.c
45413+++ b/fs/cachefiles/namei.c
45414@@ -318,7 +318,7 @@ try_again:
45415 /* first step is to make up a grave dentry in the graveyard */
45416 sprintf(nbuffer, "%08x%08x",
45417 (uint32_t) get_seconds(),
45418- (uint32_t) atomic_inc_return(&cache->gravecounter));
45419+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
45420
45421 /* do the multiway lock magic */
45422 trap = lock_rename(cache->graveyard, dir);
45423diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
45424index eccd339..4c1d995 100644
45425--- a/fs/cachefiles/proc.c
45426+++ b/fs/cachefiles/proc.c
45427@@ -14,9 +14,9 @@
45428 #include <linux/seq_file.h>
45429 #include "internal.h"
45430
45431-atomic_t cachefiles_lookup_histogram[HZ];
45432-atomic_t cachefiles_mkdir_histogram[HZ];
45433-atomic_t cachefiles_create_histogram[HZ];
45434+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
45435+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
45436+atomic_unchecked_t cachefiles_create_histogram[HZ];
45437
45438 /*
45439 * display the latency histogram
45440@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
45441 return 0;
45442 default:
45443 index = (unsigned long) v - 3;
45444- x = atomic_read(&cachefiles_lookup_histogram[index]);
45445- y = atomic_read(&cachefiles_mkdir_histogram[index]);
45446- z = atomic_read(&cachefiles_create_histogram[index]);
45447+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
45448+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
45449+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
45450 if (x == 0 && y == 0 && z == 0)
45451 return 0;
45452
45453diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
45454index c994691..2a1537f 100644
45455--- a/fs/cachefiles/rdwr.c
45456+++ b/fs/cachefiles/rdwr.c
45457@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
45458 old_fs = get_fs();
45459 set_fs(KERNEL_DS);
45460 ret = file->f_op->write(
45461- file, (const void __user *) data, len, &pos);
45462+ file, (const void __force_user *) data, len, &pos);
45463 set_fs(old_fs);
45464 kunmap(page);
45465 if (ret != len)
45466diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
45467index e5b7731..b9c59fb 100644
45468--- a/fs/ceph/dir.c
45469+++ b/fs/ceph/dir.c
45470@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
45471 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
45472 struct ceph_mds_client *mdsc = fsc->mdsc;
45473 unsigned frag = fpos_frag(filp->f_pos);
45474- int off = fpos_off(filp->f_pos);
45475+ unsigned int off = fpos_off(filp->f_pos);
45476 int err;
45477 u32 ftype;
45478 struct ceph_mds_reply_info_parsed *rinfo;
45479diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
45480index d9ea6ed..1e6c8ac 100644
45481--- a/fs/cifs/cifs_debug.c
45482+++ b/fs/cifs/cifs_debug.c
45483@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
45484
45485 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
45486 #ifdef CONFIG_CIFS_STATS2
45487- atomic_set(&totBufAllocCount, 0);
45488- atomic_set(&totSmBufAllocCount, 0);
45489+ atomic_set_unchecked(&totBufAllocCount, 0);
45490+ atomic_set_unchecked(&totSmBufAllocCount, 0);
45491 #endif /* CONFIG_CIFS_STATS2 */
45492 spin_lock(&cifs_tcp_ses_lock);
45493 list_for_each(tmp1, &cifs_tcp_ses_list) {
45494@@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
45495 tcon = list_entry(tmp3,
45496 struct cifs_tcon,
45497 tcon_list);
45498- atomic_set(&tcon->num_smbs_sent, 0);
45499+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
45500 if (server->ops->clear_stats)
45501 server->ops->clear_stats(tcon);
45502 }
45503@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
45504 smBufAllocCount.counter, cifs_min_small);
45505 #ifdef CONFIG_CIFS_STATS2
45506 seq_printf(m, "Total Large %d Small %d Allocations\n",
45507- atomic_read(&totBufAllocCount),
45508- atomic_read(&totSmBufAllocCount));
45509+ atomic_read_unchecked(&totBufAllocCount),
45510+ atomic_read_unchecked(&totSmBufAllocCount));
45511 #endif /* CONFIG_CIFS_STATS2 */
45512
45513 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
45514@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
45515 if (tcon->need_reconnect)
45516 seq_puts(m, "\tDISCONNECTED ");
45517 seq_printf(m, "\nSMBs: %d",
45518- atomic_read(&tcon->num_smbs_sent));
45519+ atomic_read_unchecked(&tcon->num_smbs_sent));
45520 if (server->ops->print_stats)
45521 server->ops->print_stats(m, tcon);
45522 }
45523diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
45524index e7931cc..76a1ab9 100644
45525--- a/fs/cifs/cifsfs.c
45526+++ b/fs/cifs/cifsfs.c
45527@@ -999,7 +999,7 @@ cifs_init_request_bufs(void)
45528 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
45529 cifs_req_cachep = kmem_cache_create("cifs_request",
45530 CIFSMaxBufSize + max_hdr_size, 0,
45531- SLAB_HWCACHE_ALIGN, NULL);
45532+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
45533 if (cifs_req_cachep == NULL)
45534 return -ENOMEM;
45535
45536@@ -1026,7 +1026,7 @@ cifs_init_request_bufs(void)
45537 efficient to alloc 1 per page off the slab compared to 17K (5page)
45538 alloc of large cifs buffers even when page debugging is on */
45539 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
45540- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
45541+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
45542 NULL);
45543 if (cifs_sm_req_cachep == NULL) {
45544 mempool_destroy(cifs_req_poolp);
45545@@ -1111,8 +1111,8 @@ init_cifs(void)
45546 atomic_set(&bufAllocCount, 0);
45547 atomic_set(&smBufAllocCount, 0);
45548 #ifdef CONFIG_CIFS_STATS2
45549- atomic_set(&totBufAllocCount, 0);
45550- atomic_set(&totSmBufAllocCount, 0);
45551+ atomic_set_unchecked(&totBufAllocCount, 0);
45552+ atomic_set_unchecked(&totSmBufAllocCount, 0);
45553 #endif /* CONFIG_CIFS_STATS2 */
45554
45555 atomic_set(&midCount, 0);
45556diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
45557index f5af252..489b5f2 100644
45558--- a/fs/cifs/cifsglob.h
45559+++ b/fs/cifs/cifsglob.h
45560@@ -751,35 +751,35 @@ struct cifs_tcon {
45561 __u16 Flags; /* optional support bits */
45562 enum statusEnum tidStatus;
45563 #ifdef CONFIG_CIFS_STATS
45564- atomic_t num_smbs_sent;
45565+ atomic_unchecked_t num_smbs_sent;
45566 union {
45567 struct {
45568- atomic_t num_writes;
45569- atomic_t num_reads;
45570- atomic_t num_flushes;
45571- atomic_t num_oplock_brks;
45572- atomic_t num_opens;
45573- atomic_t num_closes;
45574- atomic_t num_deletes;
45575- atomic_t num_mkdirs;
45576- atomic_t num_posixopens;
45577- atomic_t num_posixmkdirs;
45578- atomic_t num_rmdirs;
45579- atomic_t num_renames;
45580- atomic_t num_t2renames;
45581- atomic_t num_ffirst;
45582- atomic_t num_fnext;
45583- atomic_t num_fclose;
45584- atomic_t num_hardlinks;
45585- atomic_t num_symlinks;
45586- atomic_t num_locks;
45587- atomic_t num_acl_get;
45588- atomic_t num_acl_set;
45589+ atomic_unchecked_t num_writes;
45590+ atomic_unchecked_t num_reads;
45591+ atomic_unchecked_t num_flushes;
45592+ atomic_unchecked_t num_oplock_brks;
45593+ atomic_unchecked_t num_opens;
45594+ atomic_unchecked_t num_closes;
45595+ atomic_unchecked_t num_deletes;
45596+ atomic_unchecked_t num_mkdirs;
45597+ atomic_unchecked_t num_posixopens;
45598+ atomic_unchecked_t num_posixmkdirs;
45599+ atomic_unchecked_t num_rmdirs;
45600+ atomic_unchecked_t num_renames;
45601+ atomic_unchecked_t num_t2renames;
45602+ atomic_unchecked_t num_ffirst;
45603+ atomic_unchecked_t num_fnext;
45604+ atomic_unchecked_t num_fclose;
45605+ atomic_unchecked_t num_hardlinks;
45606+ atomic_unchecked_t num_symlinks;
45607+ atomic_unchecked_t num_locks;
45608+ atomic_unchecked_t num_acl_get;
45609+ atomic_unchecked_t num_acl_set;
45610 } cifs_stats;
45611 #ifdef CONFIG_CIFS_SMB2
45612 struct {
45613- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
45614- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
45615+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
45616+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
45617 } smb2_stats;
45618 #endif /* CONFIG_CIFS_SMB2 */
45619 } stats;
45620@@ -1094,7 +1094,7 @@ build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
45621 }
45622
45623 #ifdef CONFIG_CIFS_STATS
45624-#define cifs_stats_inc atomic_inc
45625+#define cifs_stats_inc atomic_inc_unchecked
45626
45627 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
45628 unsigned int bytes)
45629@@ -1459,8 +1459,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
45630 /* Various Debug counters */
45631 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
45632 #ifdef CONFIG_CIFS_STATS2
45633-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
45634-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
45635+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
45636+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
45637 #endif
45638 GLOBAL_EXTERN atomic_t smBufAllocCount;
45639 GLOBAL_EXTERN atomic_t midCount;
45640diff --git a/fs/cifs/link.c b/fs/cifs/link.c
45641index 51dc2fb..1e12a33 100644
45642--- a/fs/cifs/link.c
45643+++ b/fs/cifs/link.c
45644@@ -616,7 +616,7 @@ symlink_exit:
45645
45646 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
45647 {
45648- char *p = nd_get_link(nd);
45649+ const char *p = nd_get_link(nd);
45650 if (!IS_ERR(p))
45651 kfree(p);
45652 }
45653diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
45654index 3a00c0d..42d901c 100644
45655--- a/fs/cifs/misc.c
45656+++ b/fs/cifs/misc.c
45657@@ -169,7 +169,7 @@ cifs_buf_get(void)
45658 memset(ret_buf, 0, buf_size + 3);
45659 atomic_inc(&bufAllocCount);
45660 #ifdef CONFIG_CIFS_STATS2
45661- atomic_inc(&totBufAllocCount);
45662+ atomic_inc_unchecked(&totBufAllocCount);
45663 #endif /* CONFIG_CIFS_STATS2 */
45664 }
45665
45666@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
45667 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
45668 atomic_inc(&smBufAllocCount);
45669 #ifdef CONFIG_CIFS_STATS2
45670- atomic_inc(&totSmBufAllocCount);
45671+ atomic_inc_unchecked(&totSmBufAllocCount);
45672 #endif /* CONFIG_CIFS_STATS2 */
45673
45674 }
45675diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
45676index 591bf19..690d600 100644
45677--- a/fs/cifs/smb1ops.c
45678+++ b/fs/cifs/smb1ops.c
45679@@ -617,27 +617,27 @@ static void
45680 cifs_clear_stats(struct cifs_tcon *tcon)
45681 {
45682 #ifdef CONFIG_CIFS_STATS
45683- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
45684- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
45685- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
45686- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
45687- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
45688- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
45689- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
45690- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
45691- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
45692- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
45693- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
45694- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
45695- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
45696- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
45697- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
45698- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
45699- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
45700- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
45701- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
45702- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
45703- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
45704+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
45705+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
45706+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
45707+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
45708+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
45709+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
45710+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
45711+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
45712+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
45713+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
45714+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
45715+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
45716+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
45717+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
45718+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
45719+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
45720+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
45721+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
45722+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
45723+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
45724+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
45725 #endif
45726 }
45727
45728@@ -646,36 +646,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
45729 {
45730 #ifdef CONFIG_CIFS_STATS
45731 seq_printf(m, " Oplocks breaks: %d",
45732- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
45733+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
45734 seq_printf(m, "\nReads: %d Bytes: %llu",
45735- atomic_read(&tcon->stats.cifs_stats.num_reads),
45736+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
45737 (long long)(tcon->bytes_read));
45738 seq_printf(m, "\nWrites: %d Bytes: %llu",
45739- atomic_read(&tcon->stats.cifs_stats.num_writes),
45740+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
45741 (long long)(tcon->bytes_written));
45742 seq_printf(m, "\nFlushes: %d",
45743- atomic_read(&tcon->stats.cifs_stats.num_flushes));
45744+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
45745 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
45746- atomic_read(&tcon->stats.cifs_stats.num_locks),
45747- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
45748- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
45749+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
45750+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
45751+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
45752 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
45753- atomic_read(&tcon->stats.cifs_stats.num_opens),
45754- atomic_read(&tcon->stats.cifs_stats.num_closes),
45755- atomic_read(&tcon->stats.cifs_stats.num_deletes));
45756+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
45757+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
45758+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
45759 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
45760- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
45761- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
45762+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
45763+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
45764 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
45765- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
45766- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
45767+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
45768+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
45769 seq_printf(m, "\nRenames: %d T2 Renames %d",
45770- atomic_read(&tcon->stats.cifs_stats.num_renames),
45771- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
45772+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
45773+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
45774 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
45775- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
45776- atomic_read(&tcon->stats.cifs_stats.num_fnext),
45777- atomic_read(&tcon->stats.cifs_stats.num_fclose));
45778+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
45779+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
45780+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
45781 #endif
45782 }
45783
45784diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
45785index 4d9dbe0..0af4601 100644
45786--- a/fs/cifs/smb2ops.c
45787+++ b/fs/cifs/smb2ops.c
45788@@ -291,8 +291,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
45789 #ifdef CONFIG_CIFS_STATS
45790 int i;
45791 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
45792- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
45793- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
45794+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
45795+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
45796 }
45797 #endif
45798 }
45799@@ -301,66 +301,66 @@ static void
45800 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
45801 {
45802 #ifdef CONFIG_CIFS_STATS
45803- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
45804- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
45805+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
45806+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
45807 seq_printf(m, "\nNegotiates: %d sent %d failed",
45808- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
45809- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
45810+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
45811+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
45812 seq_printf(m, "\nSessionSetups: %d sent %d failed",
45813- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
45814- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
45815+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
45816+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
45817 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
45818 seq_printf(m, "\nLogoffs: %d sent %d failed",
45819- atomic_read(&sent[SMB2_LOGOFF_HE]),
45820- atomic_read(&failed[SMB2_LOGOFF_HE]));
45821+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
45822+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
45823 seq_printf(m, "\nTreeConnects: %d sent %d failed",
45824- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
45825- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
45826+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
45827+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
45828 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
45829- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
45830- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
45831+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
45832+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
45833 seq_printf(m, "\nCreates: %d sent %d failed",
45834- atomic_read(&sent[SMB2_CREATE_HE]),
45835- atomic_read(&failed[SMB2_CREATE_HE]));
45836+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
45837+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
45838 seq_printf(m, "\nCloses: %d sent %d failed",
45839- atomic_read(&sent[SMB2_CLOSE_HE]),
45840- atomic_read(&failed[SMB2_CLOSE_HE]));
45841+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
45842+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
45843 seq_printf(m, "\nFlushes: %d sent %d failed",
45844- atomic_read(&sent[SMB2_FLUSH_HE]),
45845- atomic_read(&failed[SMB2_FLUSH_HE]));
45846+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
45847+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
45848 seq_printf(m, "\nReads: %d sent %d failed",
45849- atomic_read(&sent[SMB2_READ_HE]),
45850- atomic_read(&failed[SMB2_READ_HE]));
45851+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
45852+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
45853 seq_printf(m, "\nWrites: %d sent %d failed",
45854- atomic_read(&sent[SMB2_WRITE_HE]),
45855- atomic_read(&failed[SMB2_WRITE_HE]));
45856+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
45857+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
45858 seq_printf(m, "\nLocks: %d sent %d failed",
45859- atomic_read(&sent[SMB2_LOCK_HE]),
45860- atomic_read(&failed[SMB2_LOCK_HE]));
45861+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
45862+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
45863 seq_printf(m, "\nIOCTLs: %d sent %d failed",
45864- atomic_read(&sent[SMB2_IOCTL_HE]),
45865- atomic_read(&failed[SMB2_IOCTL_HE]));
45866+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
45867+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
45868 seq_printf(m, "\nCancels: %d sent %d failed",
45869- atomic_read(&sent[SMB2_CANCEL_HE]),
45870- atomic_read(&failed[SMB2_CANCEL_HE]));
45871+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
45872+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
45873 seq_printf(m, "\nEchos: %d sent %d failed",
45874- atomic_read(&sent[SMB2_ECHO_HE]),
45875- atomic_read(&failed[SMB2_ECHO_HE]));
45876+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
45877+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
45878 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
45879- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
45880- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
45881+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
45882+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
45883 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
45884- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
45885- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
45886+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
45887+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
45888 seq_printf(m, "\nQueryInfos: %d sent %d failed",
45889- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
45890- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
45891+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
45892+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
45893 seq_printf(m, "\nSetInfos: %d sent %d failed",
45894- atomic_read(&sent[SMB2_SET_INFO_HE]),
45895- atomic_read(&failed[SMB2_SET_INFO_HE]));
45896+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
45897+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
45898 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
45899- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
45900- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
45901+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
45902+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
45903 #endif
45904 }
45905
45906diff --git a/fs/coda/cache.c b/fs/coda/cache.c
45907index 958ae0e..505c9d0 100644
45908--- a/fs/coda/cache.c
45909+++ b/fs/coda/cache.c
45910@@ -24,7 +24,7 @@
45911 #include "coda_linux.h"
45912 #include "coda_cache.h"
45913
45914-static atomic_t permission_epoch = ATOMIC_INIT(0);
45915+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
45916
45917 /* replace or extend an acl cache hit */
45918 void coda_cache_enter(struct inode *inode, int mask)
45919@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
45920 struct coda_inode_info *cii = ITOC(inode);
45921
45922 spin_lock(&cii->c_lock);
45923- cii->c_cached_epoch = atomic_read(&permission_epoch);
45924+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
45925 if (cii->c_uid != current_fsuid()) {
45926 cii->c_uid = current_fsuid();
45927 cii->c_cached_perm = mask;
45928@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
45929 {
45930 struct coda_inode_info *cii = ITOC(inode);
45931 spin_lock(&cii->c_lock);
45932- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
45933+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
45934 spin_unlock(&cii->c_lock);
45935 }
45936
45937 /* remove all acl caches */
45938 void coda_cache_clear_all(struct super_block *sb)
45939 {
45940- atomic_inc(&permission_epoch);
45941+ atomic_inc_unchecked(&permission_epoch);
45942 }
45943
45944
45945@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
45946 spin_lock(&cii->c_lock);
45947 hit = (mask & cii->c_cached_perm) == mask &&
45948 cii->c_uid == current_fsuid() &&
45949- cii->c_cached_epoch == atomic_read(&permission_epoch);
45950+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
45951 spin_unlock(&cii->c_lock);
45952
45953 return hit;
45954diff --git a/fs/compat.c b/fs/compat.c
45955index 015e1e1..5ce8e54 100644
45956--- a/fs/compat.c
45957+++ b/fs/compat.c
45958@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
45959
45960 set_fs(KERNEL_DS);
45961 /* The __user pointer cast is valid because of the set_fs() */
45962- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
45963+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
45964 set_fs(oldfs);
45965 /* truncating is ok because it's a user address */
45966 if (!ret)
45967@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
45968 goto out;
45969
45970 ret = -EINVAL;
45971- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
45972+ if (nr_segs > UIO_MAXIOV)
45973 goto out;
45974 if (nr_segs > fast_segs) {
45975 ret = -ENOMEM;
45976@@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
45977
45978 struct compat_readdir_callback {
45979 struct compat_old_linux_dirent __user *dirent;
45980+ struct file * file;
45981 int result;
45982 };
45983
45984@@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
45985 buf->result = -EOVERFLOW;
45986 return -EOVERFLOW;
45987 }
45988+
45989+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45990+ return 0;
45991+
45992 buf->result++;
45993 dirent = buf->dirent;
45994 if (!access_ok(VERIFY_WRITE, dirent,
45995@@ -878,6 +883,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
45996
45997 buf.result = 0;
45998 buf.dirent = dirent;
45999+ buf.file = f.file;
46000
46001 error = vfs_readdir(f.file, compat_fillonedir, &buf);
46002 if (buf.result)
46003@@ -897,6 +903,7 @@ struct compat_linux_dirent {
46004 struct compat_getdents_callback {
46005 struct compat_linux_dirent __user *current_dir;
46006 struct compat_linux_dirent __user *previous;
46007+ struct file * file;
46008 int count;
46009 int error;
46010 };
46011@@ -918,6 +925,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
46012 buf->error = -EOVERFLOW;
46013 return -EOVERFLOW;
46014 }
46015+
46016+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46017+ return 0;
46018+
46019 dirent = buf->previous;
46020 if (dirent) {
46021 if (__put_user(offset, &dirent->d_off))
46022@@ -963,6 +974,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
46023 buf.previous = NULL;
46024 buf.count = count;
46025 buf.error = 0;
46026+ buf.file = f.file;
46027
46028 error = vfs_readdir(f.file, compat_filldir, &buf);
46029 if (error >= 0)
46030@@ -983,6 +995,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
46031 struct compat_getdents_callback64 {
46032 struct linux_dirent64 __user *current_dir;
46033 struct linux_dirent64 __user *previous;
46034+ struct file * file;
46035 int count;
46036 int error;
46037 };
46038@@ -999,6 +1012,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
46039 buf->error = -EINVAL; /* only used if we fail.. */
46040 if (reclen > buf->count)
46041 return -EINVAL;
46042+
46043+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46044+ return 0;
46045+
46046 dirent = buf->previous;
46047
46048 if (dirent) {
46049@@ -1048,13 +1065,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
46050 buf.previous = NULL;
46051 buf.count = count;
46052 buf.error = 0;
46053+ buf.file = f.file;
46054
46055 error = vfs_readdir(f.file, compat_filldir64, &buf);
46056 if (error >= 0)
46057 error = buf.error;
46058 lastdirent = buf.previous;
46059 if (lastdirent) {
46060- typeof(lastdirent->d_off) d_off = f.file->f_pos;
46061+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
46062 if (__put_user_unaligned(d_off, &lastdirent->d_off))
46063 error = -EFAULT;
46064 else
46065diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
46066index a81147e..20bf2b5 100644
46067--- a/fs/compat_binfmt_elf.c
46068+++ b/fs/compat_binfmt_elf.c
46069@@ -30,11 +30,13 @@
46070 #undef elf_phdr
46071 #undef elf_shdr
46072 #undef elf_note
46073+#undef elf_dyn
46074 #undef elf_addr_t
46075 #define elfhdr elf32_hdr
46076 #define elf_phdr elf32_phdr
46077 #define elf_shdr elf32_shdr
46078 #define elf_note elf32_note
46079+#define elf_dyn Elf32_Dyn
46080 #define elf_addr_t Elf32_Addr
46081
46082 /*
46083diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
46084index 4c6285f..b7a2411 100644
46085--- a/fs/compat_ioctl.c
46086+++ b/fs/compat_ioctl.c
46087@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
46088 return -EFAULT;
46089 if (__get_user(udata, &ss32->iomem_base))
46090 return -EFAULT;
46091- ss.iomem_base = compat_ptr(udata);
46092+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
46093 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
46094 __get_user(ss.port_high, &ss32->port_high))
46095 return -EFAULT;
46096@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
46097 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
46098 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
46099 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
46100- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
46101+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
46102 return -EFAULT;
46103
46104 return ioctl_preallocate(file, p);
46105@@ -1617,8 +1617,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
46106 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
46107 {
46108 unsigned int a, b;
46109- a = *(unsigned int *)p;
46110- b = *(unsigned int *)q;
46111+ a = *(const unsigned int *)p;
46112+ b = *(const unsigned int *)q;
46113 if (a > b)
46114 return 1;
46115 if (a < b)
46116diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
46117index 7414ae2..d98ad6d 100644
46118--- a/fs/configfs/dir.c
46119+++ b/fs/configfs/dir.c
46120@@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
46121 }
46122 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
46123 struct configfs_dirent *next;
46124- const char * name;
46125+ const unsigned char * name;
46126+ char d_name[sizeof(next->s_dentry->d_iname)];
46127 int len;
46128 struct inode *inode = NULL;
46129
46130@@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
46131 continue;
46132
46133 name = configfs_get_name(next);
46134- len = strlen(name);
46135+ if (next->s_dentry && name == next->s_dentry->d_iname) {
46136+ len = next->s_dentry->d_name.len;
46137+ memcpy(d_name, name, len);
46138+ name = d_name;
46139+ } else
46140+ len = strlen(name);
46141
46142 /*
46143 * We'll have a dentry and an inode for
46144diff --git a/fs/coredump.c b/fs/coredump.c
46145index ce47379..68c8e43 100644
46146--- a/fs/coredump.c
46147+++ b/fs/coredump.c
46148@@ -52,7 +52,7 @@ struct core_name {
46149 char *corename;
46150 int used, size;
46151 };
46152-static atomic_t call_count = ATOMIC_INIT(1);
46153+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
46154
46155 /* The maximal length of core_pattern is also specified in sysctl.c */
46156
46157@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
46158 {
46159 char *old_corename = cn->corename;
46160
46161- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
46162+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
46163 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
46164
46165 if (!cn->corename) {
46166@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
46167 int pid_in_pattern = 0;
46168 int err = 0;
46169
46170- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
46171+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
46172 cn->corename = kmalloc(cn->size, GFP_KERNEL);
46173 cn->used = 0;
46174
46175@@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
46176 pipe = file->f_path.dentry->d_inode->i_pipe;
46177
46178 pipe_lock(pipe);
46179- pipe->readers++;
46180- pipe->writers--;
46181+ atomic_inc(&pipe->readers);
46182+ atomic_dec(&pipe->writers);
46183
46184- while ((pipe->readers > 1) && (!signal_pending(current))) {
46185+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
46186 wake_up_interruptible_sync(&pipe->wait);
46187 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
46188 pipe_wait(pipe);
46189 }
46190
46191- pipe->readers--;
46192- pipe->writers++;
46193+ atomic_dec(&pipe->readers);
46194+ atomic_inc(&pipe->writers);
46195 pipe_unlock(pipe);
46196
46197 }
46198@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46199 int ispipe;
46200 struct files_struct *displaced;
46201 bool need_nonrelative = false;
46202- static atomic_t core_dump_count = ATOMIC_INIT(0);
46203+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
46204+ long signr = siginfo->si_signo;
46205 struct coredump_params cprm = {
46206 .siginfo = siginfo,
46207 .regs = regs,
46208@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46209 .mm_flags = mm->flags,
46210 };
46211
46212- audit_core_dumps(siginfo->si_signo);
46213+ audit_core_dumps(signr);
46214+
46215+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
46216+ gr_handle_brute_attach(cprm.mm_flags);
46217
46218 binfmt = mm->binfmt;
46219 if (!binfmt || !binfmt->core_dump)
46220@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46221 need_nonrelative = true;
46222 }
46223
46224- retval = coredump_wait(siginfo->si_signo, &core_state);
46225+ retval = coredump_wait(signr, &core_state);
46226 if (retval < 0)
46227 goto fail_creds;
46228
46229@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46230 }
46231 cprm.limit = RLIM_INFINITY;
46232
46233- dump_count = atomic_inc_return(&core_dump_count);
46234+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
46235 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
46236 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
46237 task_tgid_vnr(current), current->comm);
46238@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46239 } else {
46240 struct inode *inode;
46241
46242+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
46243+
46244 if (cprm.limit < binfmt->min_coredump)
46245 goto fail_unlock;
46246
46247@@ -640,7 +646,7 @@ close_fail:
46248 filp_close(cprm.file, NULL);
46249 fail_dropcount:
46250 if (ispipe)
46251- atomic_dec(&core_dump_count);
46252+ atomic_dec_unchecked(&core_dump_count);
46253 fail_unlock:
46254 kfree(cn.corename);
46255 fail_corename:
46256@@ -659,7 +665,7 @@ fail:
46257 */
46258 int dump_write(struct file *file, const void *addr, int nr)
46259 {
46260- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
46261+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
46262 }
46263 EXPORT_SYMBOL(dump_write);
46264
46265diff --git a/fs/dcache.c b/fs/dcache.c
46266index 0d0adb6..f4646e9 100644
46267--- a/fs/dcache.c
46268+++ b/fs/dcache.c
46269@@ -3164,7 +3164,7 @@ void __init vfs_caches_init(unsigned long mempages)
46270 mempages -= reserve;
46271
46272 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
46273- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
46274+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
46275
46276 dcache_init();
46277 inode_init();
46278diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
46279index b607d92..41fda09 100644
46280--- a/fs/debugfs/inode.c
46281+++ b/fs/debugfs/inode.c
46282@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
46283 */
46284 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
46285 {
46286+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46287+ return __create_file(name, S_IFDIR | S_IRWXU,
46288+#else
46289 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46290+#endif
46291 parent, NULL, NULL);
46292 }
46293 EXPORT_SYMBOL_GPL(debugfs_create_dir);
46294diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
46295index cc7709e..7e7211f 100644
46296--- a/fs/ecryptfs/inode.c
46297+++ b/fs/ecryptfs/inode.c
46298@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
46299 old_fs = get_fs();
46300 set_fs(get_ds());
46301 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
46302- (char __user *)lower_buf,
46303+ (char __force_user *)lower_buf,
46304 PATH_MAX);
46305 set_fs(old_fs);
46306 if (rc < 0)
46307@@ -706,7 +706,7 @@ out:
46308 static void
46309 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
46310 {
46311- char *buf = nd_get_link(nd);
46312+ const char *buf = nd_get_link(nd);
46313 if (!IS_ERR(buf)) {
46314 /* Free the char* */
46315 kfree(buf);
46316diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
46317index 412e6ed..4292d22 100644
46318--- a/fs/ecryptfs/miscdev.c
46319+++ b/fs/ecryptfs/miscdev.c
46320@@ -315,7 +315,7 @@ check_list:
46321 goto out_unlock_msg_ctx;
46322 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
46323 if (msg_ctx->msg) {
46324- if (copy_to_user(&buf[i], packet_length, packet_length_size))
46325+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
46326 goto out_unlock_msg_ctx;
46327 i += packet_length_size;
46328 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
46329diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
46330index b2a34a1..162fa69 100644
46331--- a/fs/ecryptfs/read_write.c
46332+++ b/fs/ecryptfs/read_write.c
46333@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
46334 return -EIO;
46335 fs_save = get_fs();
46336 set_fs(get_ds());
46337- rc = vfs_write(lower_file, data, size, &offset);
46338+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
46339 set_fs(fs_save);
46340 mark_inode_dirty_sync(ecryptfs_inode);
46341 return rc;
46342@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
46343 return -EIO;
46344 fs_save = get_fs();
46345 set_fs(get_ds());
46346- rc = vfs_read(lower_file, data, size, &offset);
46347+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
46348 set_fs(fs_save);
46349 return rc;
46350 }
46351diff --git a/fs/exec.c b/fs/exec.c
46352index c6e6de4..de6841c 100644
46353--- a/fs/exec.c
46354+++ b/fs/exec.c
46355@@ -55,6 +55,16 @@
46356 #include <linux/pipe_fs_i.h>
46357 #include <linux/oom.h>
46358 #include <linux/compat.h>
46359+#include <linux/random.h>
46360+#include <linux/seq_file.h>
46361+#include <linux/coredump.h>
46362+
46363+#ifdef CONFIG_PAX_REFCOUNT
46364+#include <linux/kallsyms.h>
46365+#include <linux/kdebug.h>
46366+#endif
46367+
46368+#include <trace/events/fs.h>
46369
46370 #include <asm/uaccess.h>
46371 #include <asm/mmu_context.h>
46372@@ -66,6 +76,18 @@
46373
46374 #include <trace/events/sched.h>
46375
46376+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46377+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
46378+{
46379+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
46380+}
46381+#endif
46382+
46383+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
46384+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
46385+EXPORT_SYMBOL(pax_set_initial_flags_func);
46386+#endif
46387+
46388 int suid_dumpable = 0;
46389
46390 static LIST_HEAD(formats);
46391@@ -180,18 +202,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
46392 int write)
46393 {
46394 struct page *page;
46395- int ret;
46396
46397-#ifdef CONFIG_STACK_GROWSUP
46398- if (write) {
46399- ret = expand_downwards(bprm->vma, pos);
46400- if (ret < 0)
46401- return NULL;
46402- }
46403-#endif
46404- ret = get_user_pages(current, bprm->mm, pos,
46405- 1, write, 1, &page, NULL);
46406- if (ret <= 0)
46407+ if (0 > expand_downwards(bprm->vma, pos))
46408+ return NULL;
46409+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
46410 return NULL;
46411
46412 if (write) {
46413@@ -207,6 +221,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
46414 if (size <= ARG_MAX)
46415 return page;
46416
46417+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46418+ // only allow 512KB for argv+env on suid/sgid binaries
46419+ // to prevent easy ASLR exhaustion
46420+ if (((bprm->cred->euid != current_euid()) ||
46421+ (bprm->cred->egid != current_egid())) &&
46422+ (size > (512 * 1024))) {
46423+ put_page(page);
46424+ return NULL;
46425+ }
46426+#endif
46427+
46428 /*
46429 * Limit to 1/4-th the stack size for the argv+env strings.
46430 * This ensures that:
46431@@ -266,6 +291,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
46432 vma->vm_end = STACK_TOP_MAX;
46433 vma->vm_start = vma->vm_end - PAGE_SIZE;
46434 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
46435+
46436+#ifdef CONFIG_PAX_SEGMEXEC
46437+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
46438+#endif
46439+
46440 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
46441 INIT_LIST_HEAD(&vma->anon_vma_chain);
46442
46443@@ -276,6 +306,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
46444 mm->stack_vm = mm->total_vm = 1;
46445 up_write(&mm->mmap_sem);
46446 bprm->p = vma->vm_end - sizeof(void *);
46447+
46448+#ifdef CONFIG_PAX_RANDUSTACK
46449+ if (randomize_va_space)
46450+ bprm->p ^= random32() & ~PAGE_MASK;
46451+#endif
46452+
46453 return 0;
46454 err:
46455 up_write(&mm->mmap_sem);
46456@@ -384,19 +420,7 @@ err:
46457 return err;
46458 }
46459
46460-struct user_arg_ptr {
46461-#ifdef CONFIG_COMPAT
46462- bool is_compat;
46463-#endif
46464- union {
46465- const char __user *const __user *native;
46466-#ifdef CONFIG_COMPAT
46467- const compat_uptr_t __user *compat;
46468-#endif
46469- } ptr;
46470-};
46471-
46472-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46473+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46474 {
46475 const char __user *native;
46476
46477@@ -405,14 +429,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46478 compat_uptr_t compat;
46479
46480 if (get_user(compat, argv.ptr.compat + nr))
46481- return ERR_PTR(-EFAULT);
46482+ return (const char __force_user *)ERR_PTR(-EFAULT);
46483
46484 return compat_ptr(compat);
46485 }
46486 #endif
46487
46488 if (get_user(native, argv.ptr.native + nr))
46489- return ERR_PTR(-EFAULT);
46490+ return (const char __force_user *)ERR_PTR(-EFAULT);
46491
46492 return native;
46493 }
46494@@ -431,11 +455,12 @@ static int count(struct user_arg_ptr argv, int max)
46495 if (!p)
46496 break;
46497
46498- if (IS_ERR(p))
46499+ if (IS_ERR((const char __force_kernel *)p))
46500 return -EFAULT;
46501
46502- if (i++ >= max)
46503+ if (i >= max)
46504 return -E2BIG;
46505+ ++i;
46506
46507 if (fatal_signal_pending(current))
46508 return -ERESTARTNOHAND;
46509@@ -465,7 +490,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
46510
46511 ret = -EFAULT;
46512 str = get_user_arg_ptr(argv, argc);
46513- if (IS_ERR(str))
46514+ if (IS_ERR((const char __force_kernel *)str))
46515 goto out;
46516
46517 len = strnlen_user(str, MAX_ARG_STRLEN);
46518@@ -547,7 +572,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
46519 int r;
46520 mm_segment_t oldfs = get_fs();
46521 struct user_arg_ptr argv = {
46522- .ptr.native = (const char __user *const __user *)__argv,
46523+ .ptr.native = (const char __force_user *const __force_user *)__argv,
46524 };
46525
46526 set_fs(KERNEL_DS);
46527@@ -582,7 +607,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
46528 unsigned long new_end = old_end - shift;
46529 struct mmu_gather tlb;
46530
46531- BUG_ON(new_start > new_end);
46532+ if (new_start >= new_end || new_start < mmap_min_addr)
46533+ return -ENOMEM;
46534
46535 /*
46536 * ensure there are no vmas between where we want to go
46537@@ -591,6 +617,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
46538 if (vma != find_vma(mm, new_start))
46539 return -EFAULT;
46540
46541+#ifdef CONFIG_PAX_SEGMEXEC
46542+ BUG_ON(pax_find_mirror_vma(vma));
46543+#endif
46544+
46545 /*
46546 * cover the whole range: [new_start, old_end)
46547 */
46548@@ -671,10 +701,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
46549 stack_top = arch_align_stack(stack_top);
46550 stack_top = PAGE_ALIGN(stack_top);
46551
46552- if (unlikely(stack_top < mmap_min_addr) ||
46553- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
46554- return -ENOMEM;
46555-
46556 stack_shift = vma->vm_end - stack_top;
46557
46558 bprm->p -= stack_shift;
46559@@ -686,8 +712,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
46560 bprm->exec -= stack_shift;
46561
46562 down_write(&mm->mmap_sem);
46563+
46564+ /* Move stack pages down in memory. */
46565+ if (stack_shift) {
46566+ ret = shift_arg_pages(vma, stack_shift);
46567+ if (ret)
46568+ goto out_unlock;
46569+ }
46570+
46571 vm_flags = VM_STACK_FLAGS;
46572
46573+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46574+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46575+ vm_flags &= ~VM_EXEC;
46576+
46577+#ifdef CONFIG_PAX_MPROTECT
46578+ if (mm->pax_flags & MF_PAX_MPROTECT)
46579+ vm_flags &= ~VM_MAYEXEC;
46580+#endif
46581+
46582+ }
46583+#endif
46584+
46585 /*
46586 * Adjust stack execute permissions; explicitly enable for
46587 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
46588@@ -706,13 +752,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
46589 goto out_unlock;
46590 BUG_ON(prev != vma);
46591
46592- /* Move stack pages down in memory. */
46593- if (stack_shift) {
46594- ret = shift_arg_pages(vma, stack_shift);
46595- if (ret)
46596- goto out_unlock;
46597- }
46598-
46599 /* mprotect_fixup is overkill to remove the temporary stack flags */
46600 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
46601
46602@@ -771,6 +810,8 @@ struct file *open_exec(const char *name)
46603
46604 fsnotify_open(file);
46605
46606+ trace_open_exec(name);
46607+
46608 err = deny_write_access(file);
46609 if (err)
46610 goto exit;
46611@@ -794,7 +835,7 @@ int kernel_read(struct file *file, loff_t offset,
46612 old_fs = get_fs();
46613 set_fs(get_ds());
46614 /* The cast to a user pointer is valid due to the set_fs() */
46615- result = vfs_read(file, (void __user *)addr, count, &pos);
46616+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
46617 set_fs(old_fs);
46618 return result;
46619 }
46620@@ -1246,7 +1287,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
46621 }
46622 rcu_read_unlock();
46623
46624- if (p->fs->users > n_fs) {
46625+ if (atomic_read(&p->fs->users) > n_fs) {
46626 bprm->unsafe |= LSM_UNSAFE_SHARE;
46627 } else {
46628 res = -EAGAIN;
46629@@ -1449,6 +1490,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
46630
46631 EXPORT_SYMBOL(search_binary_handler);
46632
46633+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46634+static DEFINE_PER_CPU(u64, exec_counter);
46635+static int __init init_exec_counters(void)
46636+{
46637+ unsigned int cpu;
46638+
46639+ for_each_possible_cpu(cpu) {
46640+ per_cpu(exec_counter, cpu) = (u64)cpu;
46641+ }
46642+
46643+ return 0;
46644+}
46645+early_initcall(init_exec_counters);
46646+static inline void increment_exec_counter(void)
46647+{
46648+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
46649+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
46650+}
46651+#else
46652+static inline void increment_exec_counter(void) {}
46653+#endif
46654+
46655 /*
46656 * sys_execve() executes a new program.
46657 */
46658@@ -1457,6 +1520,11 @@ static int do_execve_common(const char *filename,
46659 struct user_arg_ptr envp,
46660 struct pt_regs *regs)
46661 {
46662+#ifdef CONFIG_GRKERNSEC
46663+ struct file *old_exec_file;
46664+ struct acl_subject_label *old_acl;
46665+ struct rlimit old_rlim[RLIM_NLIMITS];
46666+#endif
46667 struct linux_binprm *bprm;
46668 struct file *file;
46669 struct files_struct *displaced;
46670@@ -1464,6 +1532,8 @@ static int do_execve_common(const char *filename,
46671 int retval;
46672 const struct cred *cred = current_cred();
46673
46674+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
46675+
46676 /*
46677 * We move the actual failure in case of RLIMIT_NPROC excess from
46678 * set*uid() to execve() because too many poorly written programs
46679@@ -1504,12 +1574,27 @@ static int do_execve_common(const char *filename,
46680 if (IS_ERR(file))
46681 goto out_unmark;
46682
46683+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
46684+ retval = -EPERM;
46685+ goto out_file;
46686+ }
46687+
46688 sched_exec();
46689
46690 bprm->file = file;
46691 bprm->filename = filename;
46692 bprm->interp = filename;
46693
46694+ if (gr_process_user_ban()) {
46695+ retval = -EPERM;
46696+ goto out_file;
46697+ }
46698+
46699+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
46700+ retval = -EACCES;
46701+ goto out_file;
46702+ }
46703+
46704 retval = bprm_mm_init(bprm);
46705 if (retval)
46706 goto out_file;
46707@@ -1526,24 +1611,65 @@ static int do_execve_common(const char *filename,
46708 if (retval < 0)
46709 goto out;
46710
46711+#ifdef CONFIG_GRKERNSEC
46712+ old_acl = current->acl;
46713+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
46714+ old_exec_file = current->exec_file;
46715+ get_file(file);
46716+ current->exec_file = file;
46717+#endif
46718+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46719+ /* limit suid stack to 8MB
46720+ we saved the old limits above and will restore them if this exec fails
46721+ */
46722+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
46723+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
46724+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
46725+#endif
46726+
46727+ if (!gr_tpe_allow(file)) {
46728+ retval = -EACCES;
46729+ goto out_fail;
46730+ }
46731+
46732+ if (gr_check_crash_exec(file)) {
46733+ retval = -EACCES;
46734+ goto out_fail;
46735+ }
46736+
46737+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
46738+ bprm->unsafe);
46739+ if (retval < 0)
46740+ goto out_fail;
46741+
46742 retval = copy_strings_kernel(1, &bprm->filename, bprm);
46743 if (retval < 0)
46744- goto out;
46745+ goto out_fail;
46746
46747 bprm->exec = bprm->p;
46748 retval = copy_strings(bprm->envc, envp, bprm);
46749 if (retval < 0)
46750- goto out;
46751+ goto out_fail;
46752
46753 retval = copy_strings(bprm->argc, argv, bprm);
46754 if (retval < 0)
46755- goto out;
46756+ goto out_fail;
46757+
46758+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
46759+
46760+ gr_handle_exec_args(bprm, argv);
46761
46762 retval = search_binary_handler(bprm,regs);
46763 if (retval < 0)
46764- goto out;
46765+ goto out_fail;
46766+#ifdef CONFIG_GRKERNSEC
46767+ if (old_exec_file)
46768+ fput(old_exec_file);
46769+#endif
46770
46771 /* execve succeeded */
46772+
46773+ increment_exec_counter();
46774 current->fs->in_exec = 0;
46775 current->in_execve = 0;
46776 acct_update_integrals(current);
46777@@ -1552,6 +1678,14 @@ static int do_execve_common(const char *filename,
46778 put_files_struct(displaced);
46779 return retval;
46780
46781+out_fail:
46782+#ifdef CONFIG_GRKERNSEC
46783+ current->acl = old_acl;
46784+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
46785+ fput(current->exec_file);
46786+ current->exec_file = old_exec_file;
46787+#endif
46788+
46789 out:
46790 if (bprm->mm) {
46791 acct_arg_size(bprm, 0);
46792@@ -1727,3 +1861,253 @@ int kernel_execve(const char *filename,
46793 ret_from_kernel_execve(p);
46794 }
46795 #endif
46796+
46797+int pax_check_flags(unsigned long *flags)
46798+{
46799+ int retval = 0;
46800+
46801+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
46802+ if (*flags & MF_PAX_SEGMEXEC)
46803+ {
46804+ *flags &= ~MF_PAX_SEGMEXEC;
46805+ retval = -EINVAL;
46806+ }
46807+#endif
46808+
46809+ if ((*flags & MF_PAX_PAGEEXEC)
46810+
46811+#ifdef CONFIG_PAX_PAGEEXEC
46812+ && (*flags & MF_PAX_SEGMEXEC)
46813+#endif
46814+
46815+ )
46816+ {
46817+ *flags &= ~MF_PAX_PAGEEXEC;
46818+ retval = -EINVAL;
46819+ }
46820+
46821+ if ((*flags & MF_PAX_MPROTECT)
46822+
46823+#ifdef CONFIG_PAX_MPROTECT
46824+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
46825+#endif
46826+
46827+ )
46828+ {
46829+ *flags &= ~MF_PAX_MPROTECT;
46830+ retval = -EINVAL;
46831+ }
46832+
46833+ if ((*flags & MF_PAX_EMUTRAMP)
46834+
46835+#ifdef CONFIG_PAX_EMUTRAMP
46836+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
46837+#endif
46838+
46839+ )
46840+ {
46841+ *flags &= ~MF_PAX_EMUTRAMP;
46842+ retval = -EINVAL;
46843+ }
46844+
46845+ return retval;
46846+}
46847+
46848+EXPORT_SYMBOL(pax_check_flags);
46849+
46850+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46851+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
46852+{
46853+ struct task_struct *tsk = current;
46854+ struct mm_struct *mm = current->mm;
46855+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
46856+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
46857+ char *path_exec = NULL;
46858+ char *path_fault = NULL;
46859+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
46860+ siginfo_t info = { };
46861+
46862+ if (buffer_exec && buffer_fault) {
46863+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
46864+
46865+ down_read(&mm->mmap_sem);
46866+ vma = mm->mmap;
46867+ while (vma && (!vma_exec || !vma_fault)) {
46868+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
46869+ vma_exec = vma;
46870+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
46871+ vma_fault = vma;
46872+ vma = vma->vm_next;
46873+ }
46874+ if (vma_exec) {
46875+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
46876+ if (IS_ERR(path_exec))
46877+ path_exec = "<path too long>";
46878+ else {
46879+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
46880+ if (path_exec) {
46881+ *path_exec = 0;
46882+ path_exec = buffer_exec;
46883+ } else
46884+ path_exec = "<path too long>";
46885+ }
46886+ }
46887+ if (vma_fault) {
46888+ start = vma_fault->vm_start;
46889+ end = vma_fault->vm_end;
46890+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
46891+ if (vma_fault->vm_file) {
46892+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
46893+ if (IS_ERR(path_fault))
46894+ path_fault = "<path too long>";
46895+ else {
46896+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
46897+ if (path_fault) {
46898+ *path_fault = 0;
46899+ path_fault = buffer_fault;
46900+ } else
46901+ path_fault = "<path too long>";
46902+ }
46903+ } else
46904+ path_fault = "<anonymous mapping>";
46905+ }
46906+ up_read(&mm->mmap_sem);
46907+ }
46908+ if (tsk->signal->curr_ip)
46909+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
46910+ else
46911+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
46912+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
46913+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
46914+ task_uid(tsk), task_euid(tsk), pc, sp);
46915+ free_page((unsigned long)buffer_exec);
46916+ free_page((unsigned long)buffer_fault);
46917+ pax_report_insns(regs, pc, sp);
46918+ info.si_signo = SIGKILL;
46919+ info.si_errno = 0;
46920+ info.si_code = SI_KERNEL;
46921+ info.si_pid = 0;
46922+ info.si_uid = 0;
46923+ do_coredump(&info, regs);
46924+}
46925+#endif
46926+
46927+#ifdef CONFIG_PAX_REFCOUNT
46928+void pax_report_refcount_overflow(struct pt_regs *regs)
46929+{
46930+ if (current->signal->curr_ip)
46931+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
46932+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
46933+ else
46934+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
46935+ current->comm, task_pid_nr(current), current_uid(), current_euid());
46936+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
46937+ show_regs(regs);
46938+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
46939+}
46940+#endif
46941+
46942+#ifdef CONFIG_PAX_USERCOPY
46943+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
46944+static noinline int check_stack_object(const void *obj, unsigned long len)
46945+{
46946+ const void * const stack = task_stack_page(current);
46947+ const void * const stackend = stack + THREAD_SIZE;
46948+
46949+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
46950+ const void *frame = NULL;
46951+ const void *oldframe;
46952+#endif
46953+
46954+ if (obj + len < obj)
46955+ return -1;
46956+
46957+ if (obj + len <= stack || stackend <= obj)
46958+ return 0;
46959+
46960+ if (obj < stack || stackend < obj + len)
46961+ return -1;
46962+
46963+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
46964+ oldframe = __builtin_frame_address(1);
46965+ if (oldframe)
46966+ frame = __builtin_frame_address(2);
46967+ /*
46968+ low ----------------------------------------------> high
46969+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
46970+ ^----------------^
46971+ allow copies only within here
46972+ */
46973+ while (stack <= frame && frame < stackend) {
46974+ /* if obj + len extends past the last frame, this
46975+ check won't pass and the next frame will be 0,
46976+ causing us to bail out and correctly report
46977+ the copy as invalid
46978+ */
46979+ if (obj + len <= frame)
46980+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
46981+ oldframe = frame;
46982+ frame = *(const void * const *)frame;
46983+ }
46984+ return -1;
46985+#else
46986+ return 1;
46987+#endif
46988+}
46989+
46990+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
46991+{
46992+ if (current->signal->curr_ip)
46993+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
46994+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
46995+ else
46996+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
46997+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
46998+ dump_stack();
46999+ gr_handle_kernel_exploit();
47000+ do_group_exit(SIGKILL);
47001+}
47002+#endif
47003+
47004+void check_object_size(const void *ptr, unsigned long n, bool to)
47005+{
47006+
47007+#ifdef CONFIG_PAX_USERCOPY
47008+ const char *type;
47009+
47010+ if (!n)
47011+ return;
47012+
47013+ type = check_heap_object(ptr, n);
47014+ if (!type) {
47015+ if (check_stack_object(ptr, n) != -1)
47016+ return;
47017+ type = "<process stack>";
47018+ }
47019+
47020+ pax_report_usercopy(ptr, n, to, type);
47021+#endif
47022+
47023+}
47024+EXPORT_SYMBOL(check_object_size);
47025+
47026+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
47027+void pax_track_stack(void)
47028+{
47029+ unsigned long sp = (unsigned long)&sp;
47030+ if (sp < current_thread_info()->lowest_stack &&
47031+ sp > (unsigned long)task_stack_page(current))
47032+ current_thread_info()->lowest_stack = sp;
47033+}
47034+EXPORT_SYMBOL(pax_track_stack);
47035+#endif
47036+
47037+#ifdef CONFIG_PAX_SIZE_OVERFLOW
47038+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
47039+{
47040+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
47041+ dump_stack();
47042+ do_group_exit(SIGKILL);
47043+}
47044+EXPORT_SYMBOL(report_size_overflow);
47045+#endif
47046diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
47047index 2616d0e..2ffdec9 100644
47048--- a/fs/ext2/balloc.c
47049+++ b/fs/ext2/balloc.c
47050@@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
47051
47052 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
47053 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
47054- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
47055+ if (free_blocks < root_blocks + 1 &&
47056 !uid_eq(sbi->s_resuid, current_fsuid()) &&
47057 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
47058- !in_group_p (sbi->s_resgid))) {
47059+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
47060 return 0;
47061 }
47062 return 1;
47063diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
47064index 22548f5..41521d8 100644
47065--- a/fs/ext3/balloc.c
47066+++ b/fs/ext3/balloc.c
47067@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
47068
47069 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
47070 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
47071- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
47072+ if (free_blocks < root_blocks + 1 &&
47073 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
47074 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
47075- !in_group_p (sbi->s_resgid))) {
47076+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
47077 return 0;
47078 }
47079 return 1;
47080diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
47081index cf18217..8f6b9c3 100644
47082--- a/fs/ext4/balloc.c
47083+++ b/fs/ext4/balloc.c
47084@@ -498,8 +498,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
47085 /* Hm, nope. Are (enough) root reserved clusters available? */
47086 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
47087 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
47088- capable(CAP_SYS_RESOURCE) ||
47089- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
47090+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
47091+ capable_nolog(CAP_SYS_RESOURCE)) {
47092
47093 if (free_clusters >= (nclusters + dirty_clusters))
47094 return 1;
47095diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
47096index 3c20de1..6ff2460 100644
47097--- a/fs/ext4/ext4.h
47098+++ b/fs/ext4/ext4.h
47099@@ -1247,19 +1247,19 @@ struct ext4_sb_info {
47100 unsigned long s_mb_last_start;
47101
47102 /* stats for buddy allocator */
47103- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
47104- atomic_t s_bal_success; /* we found long enough chunks */
47105- atomic_t s_bal_allocated; /* in blocks */
47106- atomic_t s_bal_ex_scanned; /* total extents scanned */
47107- atomic_t s_bal_goals; /* goal hits */
47108- atomic_t s_bal_breaks; /* too long searches */
47109- atomic_t s_bal_2orders; /* 2^order hits */
47110+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
47111+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
47112+ atomic_unchecked_t s_bal_allocated; /* in blocks */
47113+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
47114+ atomic_unchecked_t s_bal_goals; /* goal hits */
47115+ atomic_unchecked_t s_bal_breaks; /* too long searches */
47116+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
47117 spinlock_t s_bal_lock;
47118 unsigned long s_mb_buddies_generated;
47119 unsigned long long s_mb_generation_time;
47120- atomic_t s_mb_lost_chunks;
47121- atomic_t s_mb_preallocated;
47122- atomic_t s_mb_discarded;
47123+ atomic_unchecked_t s_mb_lost_chunks;
47124+ atomic_unchecked_t s_mb_preallocated;
47125+ atomic_unchecked_t s_mb_discarded;
47126 atomic_t s_lock_busy;
47127
47128 /* locality groups */
47129diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
47130index 526e553..3f2de85 100644
47131--- a/fs/ext4/mballoc.c
47132+++ b/fs/ext4/mballoc.c
47133@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
47134 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
47135
47136 if (EXT4_SB(sb)->s_mb_stats)
47137- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
47138+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
47139
47140 break;
47141 }
47142@@ -2044,7 +2044,7 @@ repeat:
47143 ac->ac_status = AC_STATUS_CONTINUE;
47144 ac->ac_flags |= EXT4_MB_HINT_FIRST;
47145 cr = 3;
47146- atomic_inc(&sbi->s_mb_lost_chunks);
47147+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
47148 goto repeat;
47149 }
47150 }
47151@@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
47152 if (sbi->s_mb_stats) {
47153 ext4_msg(sb, KERN_INFO,
47154 "mballoc: %u blocks %u reqs (%u success)",
47155- atomic_read(&sbi->s_bal_allocated),
47156- atomic_read(&sbi->s_bal_reqs),
47157- atomic_read(&sbi->s_bal_success));
47158+ atomic_read_unchecked(&sbi->s_bal_allocated),
47159+ atomic_read_unchecked(&sbi->s_bal_reqs),
47160+ atomic_read_unchecked(&sbi->s_bal_success));
47161 ext4_msg(sb, KERN_INFO,
47162 "mballoc: %u extents scanned, %u goal hits, "
47163 "%u 2^N hits, %u breaks, %u lost",
47164- atomic_read(&sbi->s_bal_ex_scanned),
47165- atomic_read(&sbi->s_bal_goals),
47166- atomic_read(&sbi->s_bal_2orders),
47167- atomic_read(&sbi->s_bal_breaks),
47168- atomic_read(&sbi->s_mb_lost_chunks));
47169+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
47170+ atomic_read_unchecked(&sbi->s_bal_goals),
47171+ atomic_read_unchecked(&sbi->s_bal_2orders),
47172+ atomic_read_unchecked(&sbi->s_bal_breaks),
47173+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
47174 ext4_msg(sb, KERN_INFO,
47175 "mballoc: %lu generated and it took %Lu",
47176 sbi->s_mb_buddies_generated,
47177 sbi->s_mb_generation_time);
47178 ext4_msg(sb, KERN_INFO,
47179 "mballoc: %u preallocated, %u discarded",
47180- atomic_read(&sbi->s_mb_preallocated),
47181- atomic_read(&sbi->s_mb_discarded));
47182+ atomic_read_unchecked(&sbi->s_mb_preallocated),
47183+ atomic_read_unchecked(&sbi->s_mb_discarded));
47184 }
47185
47186 free_percpu(sbi->s_locality_groups);
47187@@ -3052,16 +3052,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
47188 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
47189
47190 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
47191- atomic_inc(&sbi->s_bal_reqs);
47192- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
47193+ atomic_inc_unchecked(&sbi->s_bal_reqs);
47194+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
47195 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
47196- atomic_inc(&sbi->s_bal_success);
47197- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
47198+ atomic_inc_unchecked(&sbi->s_bal_success);
47199+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
47200 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
47201 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
47202- atomic_inc(&sbi->s_bal_goals);
47203+ atomic_inc_unchecked(&sbi->s_bal_goals);
47204 if (ac->ac_found > sbi->s_mb_max_to_scan)
47205- atomic_inc(&sbi->s_bal_breaks);
47206+ atomic_inc_unchecked(&sbi->s_bal_breaks);
47207 }
47208
47209 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
47210@@ -3461,7 +3461,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
47211 trace_ext4_mb_new_inode_pa(ac, pa);
47212
47213 ext4_mb_use_inode_pa(ac, pa);
47214- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
47215+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
47216
47217 ei = EXT4_I(ac->ac_inode);
47218 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
47219@@ -3521,7 +3521,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
47220 trace_ext4_mb_new_group_pa(ac, pa);
47221
47222 ext4_mb_use_group_pa(ac, pa);
47223- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
47224+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
47225
47226 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
47227 lg = ac->ac_lg;
47228@@ -3610,7 +3610,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
47229 * from the bitmap and continue.
47230 */
47231 }
47232- atomic_add(free, &sbi->s_mb_discarded);
47233+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
47234
47235 return err;
47236 }
47237@@ -3628,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
47238 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
47239 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
47240 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
47241- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
47242+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
47243 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
47244
47245 return 0;
47246diff --git a/fs/ext4/super.c b/fs/ext4/super.c
47247index d59b351..775f8c8 100644
47248--- a/fs/ext4/super.c
47249+++ b/fs/ext4/super.c
47250@@ -3212,7 +3212,6 @@ int ext4_calculate_overhead(struct super_block *sb)
47251 ext4_fsblk_t overhead = 0;
47252 char *buf = (char *) get_zeroed_page(GFP_KERNEL);
47253
47254- memset(buf, 0, PAGE_SIZE);
47255 if (!buf)
47256 return -ENOMEM;
47257
47258diff --git a/fs/fcntl.c b/fs/fcntl.c
47259index 71a600a..20d87b1 100644
47260--- a/fs/fcntl.c
47261+++ b/fs/fcntl.c
47262@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
47263 if (err)
47264 return err;
47265
47266+ if (gr_handle_chroot_fowner(pid, type))
47267+ return -ENOENT;
47268+ if (gr_check_protected_task_fowner(pid, type))
47269+ return -EACCES;
47270+
47271 f_modown(filp, pid, type, force);
47272 return 0;
47273 }
47274diff --git a/fs/fifo.c b/fs/fifo.c
47275index cf6f434..3d7942c 100644
47276--- a/fs/fifo.c
47277+++ b/fs/fifo.c
47278@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
47279 */
47280 filp->f_op = &read_pipefifo_fops;
47281 pipe->r_counter++;
47282- if (pipe->readers++ == 0)
47283+ if (atomic_inc_return(&pipe->readers) == 1)
47284 wake_up_partner(inode);
47285
47286- if (!pipe->writers) {
47287+ if (!atomic_read(&pipe->writers)) {
47288 if ((filp->f_flags & O_NONBLOCK)) {
47289 /* suppress POLLHUP until we have
47290 * seen a writer */
47291@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
47292 * errno=ENXIO when there is no process reading the FIFO.
47293 */
47294 ret = -ENXIO;
47295- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
47296+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
47297 goto err;
47298
47299 filp->f_op = &write_pipefifo_fops;
47300 pipe->w_counter++;
47301- if (!pipe->writers++)
47302+ if (atomic_inc_return(&pipe->writers) == 1)
47303 wake_up_partner(inode);
47304
47305- if (!pipe->readers) {
47306+ if (!atomic_read(&pipe->readers)) {
47307 if (wait_for_partner(inode, &pipe->r_counter))
47308 goto err_wr;
47309 }
47310@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
47311 */
47312 filp->f_op = &rdwr_pipefifo_fops;
47313
47314- pipe->readers++;
47315- pipe->writers++;
47316+ atomic_inc(&pipe->readers);
47317+ atomic_inc(&pipe->writers);
47318 pipe->r_counter++;
47319 pipe->w_counter++;
47320- if (pipe->readers == 1 || pipe->writers == 1)
47321+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
47322 wake_up_partner(inode);
47323 break;
47324
47325@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
47326 return 0;
47327
47328 err_rd:
47329- if (!--pipe->readers)
47330+ if (atomic_dec_and_test(&pipe->readers))
47331 wake_up_interruptible(&pipe->wait);
47332 ret = -ERESTARTSYS;
47333 goto err;
47334
47335 err_wr:
47336- if (!--pipe->writers)
47337+ if (atomic_dec_and_test(&pipe->writers))
47338 wake_up_interruptible(&pipe->wait);
47339 ret = -ERESTARTSYS;
47340 goto err;
47341
47342 err:
47343- if (!pipe->readers && !pipe->writers)
47344+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
47345 free_pipe_info(inode);
47346
47347 err_nocleanup:
47348diff --git a/fs/file.c b/fs/file.c
47349index eff2316..8c8930c 100644
47350--- a/fs/file.c
47351+++ b/fs/file.c
47352@@ -16,6 +16,7 @@
47353 #include <linux/slab.h>
47354 #include <linux/vmalloc.h>
47355 #include <linux/file.h>
47356+#include <linux/security.h>
47357 #include <linux/fdtable.h>
47358 #include <linux/bitops.h>
47359 #include <linux/interrupt.h>
47360@@ -898,6 +899,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
47361 if (!file)
47362 return __close_fd(files, fd);
47363
47364+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
47365 if (fd >= rlimit(RLIMIT_NOFILE))
47366 return -EBADF;
47367
47368@@ -924,6 +926,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
47369 if (unlikely(oldfd == newfd))
47370 return -EINVAL;
47371
47372+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
47373 if (newfd >= rlimit(RLIMIT_NOFILE))
47374 return -EBADF;
47375
47376@@ -979,6 +982,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
47377 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
47378 {
47379 int err;
47380+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
47381 if (from >= rlimit(RLIMIT_NOFILE))
47382 return -EINVAL;
47383 err = alloc_fd(from, flags);
47384diff --git a/fs/filesystems.c b/fs/filesystems.c
47385index da165f6..3671bdb 100644
47386--- a/fs/filesystems.c
47387+++ b/fs/filesystems.c
47388@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
47389 int len = dot ? dot - name : strlen(name);
47390
47391 fs = __get_fs_type(name, len);
47392+
47393+#ifdef CONFIG_GRKERNSEC_MODHARDEN
47394+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
47395+#else
47396 if (!fs && (request_module("%.*s", len, name) == 0))
47397+#endif
47398 fs = __get_fs_type(name, len);
47399
47400 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
47401diff --git a/fs/fs_struct.c b/fs/fs_struct.c
47402index 5df4775..9d9336f 100644
47403--- a/fs/fs_struct.c
47404+++ b/fs/fs_struct.c
47405@@ -4,6 +4,7 @@
47406 #include <linux/path.h>
47407 #include <linux/slab.h>
47408 #include <linux/fs_struct.h>
47409+#include <linux/grsecurity.h>
47410 #include "internal.h"
47411
47412 /*
47413@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
47414 write_seqcount_begin(&fs->seq);
47415 old_root = fs->root;
47416 fs->root = *path;
47417+ gr_set_chroot_entries(current, path);
47418 write_seqcount_end(&fs->seq);
47419 spin_unlock(&fs->lock);
47420 if (old_root.dentry)
47421@@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
47422 return 1;
47423 }
47424
47425+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
47426+{
47427+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
47428+ return 0;
47429+ *p = *new;
47430+
47431+ /* This function is only called from pivot_root(). Leave our
47432+ gr_chroot_dentry and is_chrooted flags as-is, so that a
47433+ pivoted root isn't treated as a chroot
47434+ */
47435+ //gr_set_chroot_entries(task, new);
47436+
47437+ return 1;
47438+}
47439+
47440 void chroot_fs_refs(struct path *old_root, struct path *new_root)
47441 {
47442 struct task_struct *g, *p;
47443@@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
47444 int hits = 0;
47445 spin_lock(&fs->lock);
47446 write_seqcount_begin(&fs->seq);
47447- hits += replace_path(&fs->root, old_root, new_root);
47448+ hits += replace_root_path(p, &fs->root, old_root, new_root);
47449 hits += replace_path(&fs->pwd, old_root, new_root);
47450 write_seqcount_end(&fs->seq);
47451 while (hits--) {
47452@@ -99,7 +116,8 @@ void exit_fs(struct task_struct *tsk)
47453 task_lock(tsk);
47454 spin_lock(&fs->lock);
47455 tsk->fs = NULL;
47456- kill = !--fs->users;
47457+ gr_clear_chroot_entries(tsk);
47458+ kill = !atomic_dec_return(&fs->users);
47459 spin_unlock(&fs->lock);
47460 task_unlock(tsk);
47461 if (kill)
47462@@ -112,7 +130,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
47463 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
47464 /* We don't need to lock fs - think why ;-) */
47465 if (fs) {
47466- fs->users = 1;
47467+ atomic_set(&fs->users, 1);
47468 fs->in_exec = 0;
47469 spin_lock_init(&fs->lock);
47470 seqcount_init(&fs->seq);
47471@@ -121,6 +139,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
47472 spin_lock(&old->lock);
47473 fs->root = old->root;
47474 path_get(&fs->root);
47475+ /* instead of calling gr_set_chroot_entries here,
47476+ we call it from every caller of this function
47477+ */
47478 fs->pwd = old->pwd;
47479 path_get(&fs->pwd);
47480 spin_unlock(&old->lock);
47481@@ -139,8 +160,9 @@ int unshare_fs_struct(void)
47482
47483 task_lock(current);
47484 spin_lock(&fs->lock);
47485- kill = !--fs->users;
47486+ kill = !atomic_dec_return(&fs->users);
47487 current->fs = new_fs;
47488+ gr_set_chroot_entries(current, &new_fs->root);
47489 spin_unlock(&fs->lock);
47490 task_unlock(current);
47491
47492@@ -153,13 +175,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
47493
47494 int current_umask(void)
47495 {
47496- return current->fs->umask;
47497+ return current->fs->umask | gr_acl_umask();
47498 }
47499 EXPORT_SYMBOL(current_umask);
47500
47501 /* to be mentioned only in INIT_TASK */
47502 struct fs_struct init_fs = {
47503- .users = 1,
47504+ .users = ATOMIC_INIT(1),
47505 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
47506 .seq = SEQCNT_ZERO,
47507 .umask = 0022,
47508@@ -175,12 +197,13 @@ void daemonize_fs_struct(void)
47509 task_lock(current);
47510
47511 spin_lock(&init_fs.lock);
47512- init_fs.users++;
47513+ atomic_inc(&init_fs.users);
47514 spin_unlock(&init_fs.lock);
47515
47516 spin_lock(&fs->lock);
47517 current->fs = &init_fs;
47518- kill = !--fs->users;
47519+ gr_set_chroot_entries(current, &current->fs->root);
47520+ kill = !atomic_dec_return(&fs->users);
47521 spin_unlock(&fs->lock);
47522
47523 task_unlock(current);
47524diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
47525index 9905350..02eaec4 100644
47526--- a/fs/fscache/cookie.c
47527+++ b/fs/fscache/cookie.c
47528@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
47529 parent ? (char *) parent->def->name : "<no-parent>",
47530 def->name, netfs_data);
47531
47532- fscache_stat(&fscache_n_acquires);
47533+ fscache_stat_unchecked(&fscache_n_acquires);
47534
47535 /* if there's no parent cookie, then we don't create one here either */
47536 if (!parent) {
47537- fscache_stat(&fscache_n_acquires_null);
47538+ fscache_stat_unchecked(&fscache_n_acquires_null);
47539 _leave(" [no parent]");
47540 return NULL;
47541 }
47542@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
47543 /* allocate and initialise a cookie */
47544 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
47545 if (!cookie) {
47546- fscache_stat(&fscache_n_acquires_oom);
47547+ fscache_stat_unchecked(&fscache_n_acquires_oom);
47548 _leave(" [ENOMEM]");
47549 return NULL;
47550 }
47551@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
47552
47553 switch (cookie->def->type) {
47554 case FSCACHE_COOKIE_TYPE_INDEX:
47555- fscache_stat(&fscache_n_cookie_index);
47556+ fscache_stat_unchecked(&fscache_n_cookie_index);
47557 break;
47558 case FSCACHE_COOKIE_TYPE_DATAFILE:
47559- fscache_stat(&fscache_n_cookie_data);
47560+ fscache_stat_unchecked(&fscache_n_cookie_data);
47561 break;
47562 default:
47563- fscache_stat(&fscache_n_cookie_special);
47564+ fscache_stat_unchecked(&fscache_n_cookie_special);
47565 break;
47566 }
47567
47568@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
47569 if (fscache_acquire_non_index_cookie(cookie) < 0) {
47570 atomic_dec(&parent->n_children);
47571 __fscache_cookie_put(cookie);
47572- fscache_stat(&fscache_n_acquires_nobufs);
47573+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
47574 _leave(" = NULL");
47575 return NULL;
47576 }
47577 }
47578
47579- fscache_stat(&fscache_n_acquires_ok);
47580+ fscache_stat_unchecked(&fscache_n_acquires_ok);
47581 _leave(" = %p", cookie);
47582 return cookie;
47583 }
47584@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
47585 cache = fscache_select_cache_for_object(cookie->parent);
47586 if (!cache) {
47587 up_read(&fscache_addremove_sem);
47588- fscache_stat(&fscache_n_acquires_no_cache);
47589+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
47590 _leave(" = -ENOMEDIUM [no cache]");
47591 return -ENOMEDIUM;
47592 }
47593@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
47594 object = cache->ops->alloc_object(cache, cookie);
47595 fscache_stat_d(&fscache_n_cop_alloc_object);
47596 if (IS_ERR(object)) {
47597- fscache_stat(&fscache_n_object_no_alloc);
47598+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
47599 ret = PTR_ERR(object);
47600 goto error;
47601 }
47602
47603- fscache_stat(&fscache_n_object_alloc);
47604+ fscache_stat_unchecked(&fscache_n_object_alloc);
47605
47606 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
47607
47608@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
47609 struct fscache_object *object;
47610 struct hlist_node *_p;
47611
47612- fscache_stat(&fscache_n_updates);
47613+ fscache_stat_unchecked(&fscache_n_updates);
47614
47615 if (!cookie) {
47616- fscache_stat(&fscache_n_updates_null);
47617+ fscache_stat_unchecked(&fscache_n_updates_null);
47618 _leave(" [no cookie]");
47619 return;
47620 }
47621@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
47622 struct fscache_object *object;
47623 unsigned long event;
47624
47625- fscache_stat(&fscache_n_relinquishes);
47626+ fscache_stat_unchecked(&fscache_n_relinquishes);
47627 if (retire)
47628- fscache_stat(&fscache_n_relinquishes_retire);
47629+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
47630
47631 if (!cookie) {
47632- fscache_stat(&fscache_n_relinquishes_null);
47633+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
47634 _leave(" [no cookie]");
47635 return;
47636 }
47637@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
47638
47639 /* wait for the cookie to finish being instantiated (or to fail) */
47640 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
47641- fscache_stat(&fscache_n_relinquishes_waitcrt);
47642+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
47643 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
47644 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
47645 }
47646diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
47647index f6aad48..88dcf26 100644
47648--- a/fs/fscache/internal.h
47649+++ b/fs/fscache/internal.h
47650@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
47651 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
47652 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
47653
47654-extern atomic_t fscache_n_op_pend;
47655-extern atomic_t fscache_n_op_run;
47656-extern atomic_t fscache_n_op_enqueue;
47657-extern atomic_t fscache_n_op_deferred_release;
47658-extern atomic_t fscache_n_op_release;
47659-extern atomic_t fscache_n_op_gc;
47660-extern atomic_t fscache_n_op_cancelled;
47661-extern atomic_t fscache_n_op_rejected;
47662+extern atomic_unchecked_t fscache_n_op_pend;
47663+extern atomic_unchecked_t fscache_n_op_run;
47664+extern atomic_unchecked_t fscache_n_op_enqueue;
47665+extern atomic_unchecked_t fscache_n_op_deferred_release;
47666+extern atomic_unchecked_t fscache_n_op_release;
47667+extern atomic_unchecked_t fscache_n_op_gc;
47668+extern atomic_unchecked_t fscache_n_op_cancelled;
47669+extern atomic_unchecked_t fscache_n_op_rejected;
47670
47671-extern atomic_t fscache_n_attr_changed;
47672-extern atomic_t fscache_n_attr_changed_ok;
47673-extern atomic_t fscache_n_attr_changed_nobufs;
47674-extern atomic_t fscache_n_attr_changed_nomem;
47675-extern atomic_t fscache_n_attr_changed_calls;
47676+extern atomic_unchecked_t fscache_n_attr_changed;
47677+extern atomic_unchecked_t fscache_n_attr_changed_ok;
47678+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
47679+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
47680+extern atomic_unchecked_t fscache_n_attr_changed_calls;
47681
47682-extern atomic_t fscache_n_allocs;
47683-extern atomic_t fscache_n_allocs_ok;
47684-extern atomic_t fscache_n_allocs_wait;
47685-extern atomic_t fscache_n_allocs_nobufs;
47686-extern atomic_t fscache_n_allocs_intr;
47687-extern atomic_t fscache_n_allocs_object_dead;
47688-extern atomic_t fscache_n_alloc_ops;
47689-extern atomic_t fscache_n_alloc_op_waits;
47690+extern atomic_unchecked_t fscache_n_allocs;
47691+extern atomic_unchecked_t fscache_n_allocs_ok;
47692+extern atomic_unchecked_t fscache_n_allocs_wait;
47693+extern atomic_unchecked_t fscache_n_allocs_nobufs;
47694+extern atomic_unchecked_t fscache_n_allocs_intr;
47695+extern atomic_unchecked_t fscache_n_allocs_object_dead;
47696+extern atomic_unchecked_t fscache_n_alloc_ops;
47697+extern atomic_unchecked_t fscache_n_alloc_op_waits;
47698
47699-extern atomic_t fscache_n_retrievals;
47700-extern atomic_t fscache_n_retrievals_ok;
47701-extern atomic_t fscache_n_retrievals_wait;
47702-extern atomic_t fscache_n_retrievals_nodata;
47703-extern atomic_t fscache_n_retrievals_nobufs;
47704-extern atomic_t fscache_n_retrievals_intr;
47705-extern atomic_t fscache_n_retrievals_nomem;
47706-extern atomic_t fscache_n_retrievals_object_dead;
47707-extern atomic_t fscache_n_retrieval_ops;
47708-extern atomic_t fscache_n_retrieval_op_waits;
47709+extern atomic_unchecked_t fscache_n_retrievals;
47710+extern atomic_unchecked_t fscache_n_retrievals_ok;
47711+extern atomic_unchecked_t fscache_n_retrievals_wait;
47712+extern atomic_unchecked_t fscache_n_retrievals_nodata;
47713+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
47714+extern atomic_unchecked_t fscache_n_retrievals_intr;
47715+extern atomic_unchecked_t fscache_n_retrievals_nomem;
47716+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
47717+extern atomic_unchecked_t fscache_n_retrieval_ops;
47718+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
47719
47720-extern atomic_t fscache_n_stores;
47721-extern atomic_t fscache_n_stores_ok;
47722-extern atomic_t fscache_n_stores_again;
47723-extern atomic_t fscache_n_stores_nobufs;
47724-extern atomic_t fscache_n_stores_oom;
47725-extern atomic_t fscache_n_store_ops;
47726-extern atomic_t fscache_n_store_calls;
47727-extern atomic_t fscache_n_store_pages;
47728-extern atomic_t fscache_n_store_radix_deletes;
47729-extern atomic_t fscache_n_store_pages_over_limit;
47730+extern atomic_unchecked_t fscache_n_stores;
47731+extern atomic_unchecked_t fscache_n_stores_ok;
47732+extern atomic_unchecked_t fscache_n_stores_again;
47733+extern atomic_unchecked_t fscache_n_stores_nobufs;
47734+extern atomic_unchecked_t fscache_n_stores_oom;
47735+extern atomic_unchecked_t fscache_n_store_ops;
47736+extern atomic_unchecked_t fscache_n_store_calls;
47737+extern atomic_unchecked_t fscache_n_store_pages;
47738+extern atomic_unchecked_t fscache_n_store_radix_deletes;
47739+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
47740
47741-extern atomic_t fscache_n_store_vmscan_not_storing;
47742-extern atomic_t fscache_n_store_vmscan_gone;
47743-extern atomic_t fscache_n_store_vmscan_busy;
47744-extern atomic_t fscache_n_store_vmscan_cancelled;
47745+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
47746+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
47747+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
47748+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
47749
47750-extern atomic_t fscache_n_marks;
47751-extern atomic_t fscache_n_uncaches;
47752+extern atomic_unchecked_t fscache_n_marks;
47753+extern atomic_unchecked_t fscache_n_uncaches;
47754
47755-extern atomic_t fscache_n_acquires;
47756-extern atomic_t fscache_n_acquires_null;
47757-extern atomic_t fscache_n_acquires_no_cache;
47758-extern atomic_t fscache_n_acquires_ok;
47759-extern atomic_t fscache_n_acquires_nobufs;
47760-extern atomic_t fscache_n_acquires_oom;
47761+extern atomic_unchecked_t fscache_n_acquires;
47762+extern atomic_unchecked_t fscache_n_acquires_null;
47763+extern atomic_unchecked_t fscache_n_acquires_no_cache;
47764+extern atomic_unchecked_t fscache_n_acquires_ok;
47765+extern atomic_unchecked_t fscache_n_acquires_nobufs;
47766+extern atomic_unchecked_t fscache_n_acquires_oom;
47767
47768-extern atomic_t fscache_n_updates;
47769-extern atomic_t fscache_n_updates_null;
47770-extern atomic_t fscache_n_updates_run;
47771+extern atomic_unchecked_t fscache_n_updates;
47772+extern atomic_unchecked_t fscache_n_updates_null;
47773+extern atomic_unchecked_t fscache_n_updates_run;
47774
47775-extern atomic_t fscache_n_relinquishes;
47776-extern atomic_t fscache_n_relinquishes_null;
47777-extern atomic_t fscache_n_relinquishes_waitcrt;
47778-extern atomic_t fscache_n_relinquishes_retire;
47779+extern atomic_unchecked_t fscache_n_relinquishes;
47780+extern atomic_unchecked_t fscache_n_relinquishes_null;
47781+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
47782+extern atomic_unchecked_t fscache_n_relinquishes_retire;
47783
47784-extern atomic_t fscache_n_cookie_index;
47785-extern atomic_t fscache_n_cookie_data;
47786-extern atomic_t fscache_n_cookie_special;
47787+extern atomic_unchecked_t fscache_n_cookie_index;
47788+extern atomic_unchecked_t fscache_n_cookie_data;
47789+extern atomic_unchecked_t fscache_n_cookie_special;
47790
47791-extern atomic_t fscache_n_object_alloc;
47792-extern atomic_t fscache_n_object_no_alloc;
47793-extern atomic_t fscache_n_object_lookups;
47794-extern atomic_t fscache_n_object_lookups_negative;
47795-extern atomic_t fscache_n_object_lookups_positive;
47796-extern atomic_t fscache_n_object_lookups_timed_out;
47797-extern atomic_t fscache_n_object_created;
47798-extern atomic_t fscache_n_object_avail;
47799-extern atomic_t fscache_n_object_dead;
47800+extern atomic_unchecked_t fscache_n_object_alloc;
47801+extern atomic_unchecked_t fscache_n_object_no_alloc;
47802+extern atomic_unchecked_t fscache_n_object_lookups;
47803+extern atomic_unchecked_t fscache_n_object_lookups_negative;
47804+extern atomic_unchecked_t fscache_n_object_lookups_positive;
47805+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
47806+extern atomic_unchecked_t fscache_n_object_created;
47807+extern atomic_unchecked_t fscache_n_object_avail;
47808+extern atomic_unchecked_t fscache_n_object_dead;
47809
47810-extern atomic_t fscache_n_checkaux_none;
47811-extern atomic_t fscache_n_checkaux_okay;
47812-extern atomic_t fscache_n_checkaux_update;
47813-extern atomic_t fscache_n_checkaux_obsolete;
47814+extern atomic_unchecked_t fscache_n_checkaux_none;
47815+extern atomic_unchecked_t fscache_n_checkaux_okay;
47816+extern atomic_unchecked_t fscache_n_checkaux_update;
47817+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
47818
47819 extern atomic_t fscache_n_cop_alloc_object;
47820 extern atomic_t fscache_n_cop_lookup_object;
47821@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
47822 atomic_inc(stat);
47823 }
47824
47825+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
47826+{
47827+ atomic_inc_unchecked(stat);
47828+}
47829+
47830 static inline void fscache_stat_d(atomic_t *stat)
47831 {
47832 atomic_dec(stat);
47833@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
47834
47835 #define __fscache_stat(stat) (NULL)
47836 #define fscache_stat(stat) do {} while (0)
47837+#define fscache_stat_unchecked(stat) do {} while (0)
47838 #define fscache_stat_d(stat) do {} while (0)
47839 #endif
47840
47841diff --git a/fs/fscache/object.c b/fs/fscache/object.c
47842index b6b897c..0ffff9c 100644
47843--- a/fs/fscache/object.c
47844+++ b/fs/fscache/object.c
47845@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
47846 /* update the object metadata on disk */
47847 case FSCACHE_OBJECT_UPDATING:
47848 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
47849- fscache_stat(&fscache_n_updates_run);
47850+ fscache_stat_unchecked(&fscache_n_updates_run);
47851 fscache_stat(&fscache_n_cop_update_object);
47852 object->cache->ops->update_object(object);
47853 fscache_stat_d(&fscache_n_cop_update_object);
47854@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
47855 spin_lock(&object->lock);
47856 object->state = FSCACHE_OBJECT_DEAD;
47857 spin_unlock(&object->lock);
47858- fscache_stat(&fscache_n_object_dead);
47859+ fscache_stat_unchecked(&fscache_n_object_dead);
47860 goto terminal_transit;
47861
47862 /* handle the parent cache of this object being withdrawn from
47863@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
47864 spin_lock(&object->lock);
47865 object->state = FSCACHE_OBJECT_DEAD;
47866 spin_unlock(&object->lock);
47867- fscache_stat(&fscache_n_object_dead);
47868+ fscache_stat_unchecked(&fscache_n_object_dead);
47869 goto terminal_transit;
47870
47871 /* complain about the object being woken up once it is
47872@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
47873 parent->cookie->def->name, cookie->def->name,
47874 object->cache->tag->name);
47875
47876- fscache_stat(&fscache_n_object_lookups);
47877+ fscache_stat_unchecked(&fscache_n_object_lookups);
47878 fscache_stat(&fscache_n_cop_lookup_object);
47879 ret = object->cache->ops->lookup_object(object);
47880 fscache_stat_d(&fscache_n_cop_lookup_object);
47881@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
47882 if (ret == -ETIMEDOUT) {
47883 /* probably stuck behind another object, so move this one to
47884 * the back of the queue */
47885- fscache_stat(&fscache_n_object_lookups_timed_out);
47886+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
47887 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
47888 }
47889
47890@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
47891
47892 spin_lock(&object->lock);
47893 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
47894- fscache_stat(&fscache_n_object_lookups_negative);
47895+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
47896
47897 /* transit here to allow write requests to begin stacking up
47898 * and read requests to begin returning ENODATA */
47899@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
47900 * result, in which case there may be data available */
47901 spin_lock(&object->lock);
47902 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
47903- fscache_stat(&fscache_n_object_lookups_positive);
47904+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
47905
47906 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
47907
47908@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
47909 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
47910 } else {
47911 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
47912- fscache_stat(&fscache_n_object_created);
47913+ fscache_stat_unchecked(&fscache_n_object_created);
47914
47915 object->state = FSCACHE_OBJECT_AVAILABLE;
47916 spin_unlock(&object->lock);
47917@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
47918 fscache_enqueue_dependents(object);
47919
47920 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
47921- fscache_stat(&fscache_n_object_avail);
47922+ fscache_stat_unchecked(&fscache_n_object_avail);
47923
47924 _leave("");
47925 }
47926@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
47927 enum fscache_checkaux result;
47928
47929 if (!object->cookie->def->check_aux) {
47930- fscache_stat(&fscache_n_checkaux_none);
47931+ fscache_stat_unchecked(&fscache_n_checkaux_none);
47932 return FSCACHE_CHECKAUX_OKAY;
47933 }
47934
47935@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
47936 switch (result) {
47937 /* entry okay as is */
47938 case FSCACHE_CHECKAUX_OKAY:
47939- fscache_stat(&fscache_n_checkaux_okay);
47940+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
47941 break;
47942
47943 /* entry requires update */
47944 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
47945- fscache_stat(&fscache_n_checkaux_update);
47946+ fscache_stat_unchecked(&fscache_n_checkaux_update);
47947 break;
47948
47949 /* entry requires deletion */
47950 case FSCACHE_CHECKAUX_OBSOLETE:
47951- fscache_stat(&fscache_n_checkaux_obsolete);
47952+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
47953 break;
47954
47955 default:
47956diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
47957index 30afdfa..2256596 100644
47958--- a/fs/fscache/operation.c
47959+++ b/fs/fscache/operation.c
47960@@ -17,7 +17,7 @@
47961 #include <linux/slab.h>
47962 #include "internal.h"
47963
47964-atomic_t fscache_op_debug_id;
47965+atomic_unchecked_t fscache_op_debug_id;
47966 EXPORT_SYMBOL(fscache_op_debug_id);
47967
47968 /**
47969@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
47970 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
47971 ASSERTCMP(atomic_read(&op->usage), >, 0);
47972
47973- fscache_stat(&fscache_n_op_enqueue);
47974+ fscache_stat_unchecked(&fscache_n_op_enqueue);
47975 switch (op->flags & FSCACHE_OP_TYPE) {
47976 case FSCACHE_OP_ASYNC:
47977 _debug("queue async");
47978@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
47979 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
47980 if (op->processor)
47981 fscache_enqueue_operation(op);
47982- fscache_stat(&fscache_n_op_run);
47983+ fscache_stat_unchecked(&fscache_n_op_run);
47984 }
47985
47986 /*
47987@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
47988 if (object->n_ops > 1) {
47989 atomic_inc(&op->usage);
47990 list_add_tail(&op->pend_link, &object->pending_ops);
47991- fscache_stat(&fscache_n_op_pend);
47992+ fscache_stat_unchecked(&fscache_n_op_pend);
47993 } else if (!list_empty(&object->pending_ops)) {
47994 atomic_inc(&op->usage);
47995 list_add_tail(&op->pend_link, &object->pending_ops);
47996- fscache_stat(&fscache_n_op_pend);
47997+ fscache_stat_unchecked(&fscache_n_op_pend);
47998 fscache_start_operations(object);
47999 } else {
48000 ASSERTCMP(object->n_in_progress, ==, 0);
48001@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
48002 object->n_exclusive++; /* reads and writes must wait */
48003 atomic_inc(&op->usage);
48004 list_add_tail(&op->pend_link, &object->pending_ops);
48005- fscache_stat(&fscache_n_op_pend);
48006+ fscache_stat_unchecked(&fscache_n_op_pend);
48007 ret = 0;
48008 } else {
48009 /* not allowed to submit ops in any other state */
48010@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
48011 if (object->n_exclusive > 0) {
48012 atomic_inc(&op->usage);
48013 list_add_tail(&op->pend_link, &object->pending_ops);
48014- fscache_stat(&fscache_n_op_pend);
48015+ fscache_stat_unchecked(&fscache_n_op_pend);
48016 } else if (!list_empty(&object->pending_ops)) {
48017 atomic_inc(&op->usage);
48018 list_add_tail(&op->pend_link, &object->pending_ops);
48019- fscache_stat(&fscache_n_op_pend);
48020+ fscache_stat_unchecked(&fscache_n_op_pend);
48021 fscache_start_operations(object);
48022 } else {
48023 ASSERTCMP(object->n_exclusive, ==, 0);
48024@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
48025 object->n_ops++;
48026 atomic_inc(&op->usage);
48027 list_add_tail(&op->pend_link, &object->pending_ops);
48028- fscache_stat(&fscache_n_op_pend);
48029+ fscache_stat_unchecked(&fscache_n_op_pend);
48030 ret = 0;
48031 } else if (object->state == FSCACHE_OBJECT_DYING ||
48032 object->state == FSCACHE_OBJECT_LC_DYING ||
48033 object->state == FSCACHE_OBJECT_WITHDRAWING) {
48034- fscache_stat(&fscache_n_op_rejected);
48035+ fscache_stat_unchecked(&fscache_n_op_rejected);
48036 ret = -ENOBUFS;
48037 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
48038 fscache_report_unexpected_submission(object, op, ostate);
48039@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
48040
48041 ret = -EBUSY;
48042 if (!list_empty(&op->pend_link)) {
48043- fscache_stat(&fscache_n_op_cancelled);
48044+ fscache_stat_unchecked(&fscache_n_op_cancelled);
48045 list_del_init(&op->pend_link);
48046 object->n_ops--;
48047 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
48048@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
48049 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
48050 BUG();
48051
48052- fscache_stat(&fscache_n_op_release);
48053+ fscache_stat_unchecked(&fscache_n_op_release);
48054
48055 if (op->release) {
48056 op->release(op);
48057@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
48058 * lock, and defer it otherwise */
48059 if (!spin_trylock(&object->lock)) {
48060 _debug("defer put");
48061- fscache_stat(&fscache_n_op_deferred_release);
48062+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
48063
48064 cache = object->cache;
48065 spin_lock(&cache->op_gc_list_lock);
48066@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
48067
48068 _debug("GC DEFERRED REL OBJ%x OP%x",
48069 object->debug_id, op->debug_id);
48070- fscache_stat(&fscache_n_op_gc);
48071+ fscache_stat_unchecked(&fscache_n_op_gc);
48072
48073 ASSERTCMP(atomic_read(&op->usage), ==, 0);
48074
48075diff --git a/fs/fscache/page.c b/fs/fscache/page.c
48076index 3f7a59b..cf196cc 100644
48077--- a/fs/fscache/page.c
48078+++ b/fs/fscache/page.c
48079@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
48080 val = radix_tree_lookup(&cookie->stores, page->index);
48081 if (!val) {
48082 rcu_read_unlock();
48083- fscache_stat(&fscache_n_store_vmscan_not_storing);
48084+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
48085 __fscache_uncache_page(cookie, page);
48086 return true;
48087 }
48088@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
48089 spin_unlock(&cookie->stores_lock);
48090
48091 if (xpage) {
48092- fscache_stat(&fscache_n_store_vmscan_cancelled);
48093- fscache_stat(&fscache_n_store_radix_deletes);
48094+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
48095+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
48096 ASSERTCMP(xpage, ==, page);
48097 } else {
48098- fscache_stat(&fscache_n_store_vmscan_gone);
48099+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
48100 }
48101
48102 wake_up_bit(&cookie->flags, 0);
48103@@ -107,7 +107,7 @@ page_busy:
48104 /* we might want to wait here, but that could deadlock the allocator as
48105 * the work threads writing to the cache may all end up sleeping
48106 * on memory allocation */
48107- fscache_stat(&fscache_n_store_vmscan_busy);
48108+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
48109 return false;
48110 }
48111 EXPORT_SYMBOL(__fscache_maybe_release_page);
48112@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
48113 FSCACHE_COOKIE_STORING_TAG);
48114 if (!radix_tree_tag_get(&cookie->stores, page->index,
48115 FSCACHE_COOKIE_PENDING_TAG)) {
48116- fscache_stat(&fscache_n_store_radix_deletes);
48117+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
48118 xpage = radix_tree_delete(&cookie->stores, page->index);
48119 }
48120 spin_unlock(&cookie->stores_lock);
48121@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
48122
48123 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
48124
48125- fscache_stat(&fscache_n_attr_changed_calls);
48126+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
48127
48128 if (fscache_object_is_active(object)) {
48129 fscache_stat(&fscache_n_cop_attr_changed);
48130@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48131
48132 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48133
48134- fscache_stat(&fscache_n_attr_changed);
48135+ fscache_stat_unchecked(&fscache_n_attr_changed);
48136
48137 op = kzalloc(sizeof(*op), GFP_KERNEL);
48138 if (!op) {
48139- fscache_stat(&fscache_n_attr_changed_nomem);
48140+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
48141 _leave(" = -ENOMEM");
48142 return -ENOMEM;
48143 }
48144@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48145 if (fscache_submit_exclusive_op(object, op) < 0)
48146 goto nobufs;
48147 spin_unlock(&cookie->lock);
48148- fscache_stat(&fscache_n_attr_changed_ok);
48149+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
48150 fscache_put_operation(op);
48151 _leave(" = 0");
48152 return 0;
48153@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48154 nobufs:
48155 spin_unlock(&cookie->lock);
48156 kfree(op);
48157- fscache_stat(&fscache_n_attr_changed_nobufs);
48158+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
48159 _leave(" = %d", -ENOBUFS);
48160 return -ENOBUFS;
48161 }
48162@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
48163 /* allocate a retrieval operation and attempt to submit it */
48164 op = kzalloc(sizeof(*op), GFP_NOIO);
48165 if (!op) {
48166- fscache_stat(&fscache_n_retrievals_nomem);
48167+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48168 return NULL;
48169 }
48170
48171@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
48172 return 0;
48173 }
48174
48175- fscache_stat(&fscache_n_retrievals_wait);
48176+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
48177
48178 jif = jiffies;
48179 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
48180 fscache_wait_bit_interruptible,
48181 TASK_INTERRUPTIBLE) != 0) {
48182- fscache_stat(&fscache_n_retrievals_intr);
48183+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
48184 _leave(" = -ERESTARTSYS");
48185 return -ERESTARTSYS;
48186 }
48187@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
48188 */
48189 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48190 struct fscache_retrieval *op,
48191- atomic_t *stat_op_waits,
48192- atomic_t *stat_object_dead)
48193+ atomic_unchecked_t *stat_op_waits,
48194+ atomic_unchecked_t *stat_object_dead)
48195 {
48196 int ret;
48197
48198@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48199 goto check_if_dead;
48200
48201 _debug(">>> WT");
48202- fscache_stat(stat_op_waits);
48203+ fscache_stat_unchecked(stat_op_waits);
48204 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
48205 fscache_wait_bit_interruptible,
48206 TASK_INTERRUPTIBLE) < 0) {
48207@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48208
48209 check_if_dead:
48210 if (unlikely(fscache_object_is_dead(object))) {
48211- fscache_stat(stat_object_dead);
48212+ fscache_stat_unchecked(stat_object_dead);
48213 return -ENOBUFS;
48214 }
48215 return 0;
48216@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48217
48218 _enter("%p,%p,,,", cookie, page);
48219
48220- fscache_stat(&fscache_n_retrievals);
48221+ fscache_stat_unchecked(&fscache_n_retrievals);
48222
48223 if (hlist_empty(&cookie->backing_objects))
48224 goto nobufs;
48225@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48226 goto nobufs_unlock;
48227 spin_unlock(&cookie->lock);
48228
48229- fscache_stat(&fscache_n_retrieval_ops);
48230+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
48231
48232 /* pin the netfs read context in case we need to do the actual netfs
48233 * read because we've encountered a cache read failure */
48234@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48235
48236 error:
48237 if (ret == -ENOMEM)
48238- fscache_stat(&fscache_n_retrievals_nomem);
48239+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48240 else if (ret == -ERESTARTSYS)
48241- fscache_stat(&fscache_n_retrievals_intr);
48242+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
48243 else if (ret == -ENODATA)
48244- fscache_stat(&fscache_n_retrievals_nodata);
48245+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
48246 else if (ret < 0)
48247- fscache_stat(&fscache_n_retrievals_nobufs);
48248+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48249 else
48250- fscache_stat(&fscache_n_retrievals_ok);
48251+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
48252
48253 fscache_put_retrieval(op);
48254 _leave(" = %d", ret);
48255@@ -429,7 +429,7 @@ nobufs_unlock:
48256 spin_unlock(&cookie->lock);
48257 kfree(op);
48258 nobufs:
48259- fscache_stat(&fscache_n_retrievals_nobufs);
48260+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48261 _leave(" = -ENOBUFS");
48262 return -ENOBUFS;
48263 }
48264@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48265
48266 _enter("%p,,%d,,,", cookie, *nr_pages);
48267
48268- fscache_stat(&fscache_n_retrievals);
48269+ fscache_stat_unchecked(&fscache_n_retrievals);
48270
48271 if (hlist_empty(&cookie->backing_objects))
48272 goto nobufs;
48273@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48274 goto nobufs_unlock;
48275 spin_unlock(&cookie->lock);
48276
48277- fscache_stat(&fscache_n_retrieval_ops);
48278+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
48279
48280 /* pin the netfs read context in case we need to do the actual netfs
48281 * read because we've encountered a cache read failure */
48282@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48283
48284 error:
48285 if (ret == -ENOMEM)
48286- fscache_stat(&fscache_n_retrievals_nomem);
48287+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48288 else if (ret == -ERESTARTSYS)
48289- fscache_stat(&fscache_n_retrievals_intr);
48290+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
48291 else if (ret == -ENODATA)
48292- fscache_stat(&fscache_n_retrievals_nodata);
48293+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
48294 else if (ret < 0)
48295- fscache_stat(&fscache_n_retrievals_nobufs);
48296+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48297 else
48298- fscache_stat(&fscache_n_retrievals_ok);
48299+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
48300
48301 fscache_put_retrieval(op);
48302 _leave(" = %d", ret);
48303@@ -545,7 +545,7 @@ nobufs_unlock:
48304 spin_unlock(&cookie->lock);
48305 kfree(op);
48306 nobufs:
48307- fscache_stat(&fscache_n_retrievals_nobufs);
48308+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48309 _leave(" = -ENOBUFS");
48310 return -ENOBUFS;
48311 }
48312@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48313
48314 _enter("%p,%p,,,", cookie, page);
48315
48316- fscache_stat(&fscache_n_allocs);
48317+ fscache_stat_unchecked(&fscache_n_allocs);
48318
48319 if (hlist_empty(&cookie->backing_objects))
48320 goto nobufs;
48321@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48322 goto nobufs_unlock;
48323 spin_unlock(&cookie->lock);
48324
48325- fscache_stat(&fscache_n_alloc_ops);
48326+ fscache_stat_unchecked(&fscache_n_alloc_ops);
48327
48328 ret = fscache_wait_for_retrieval_activation(
48329 object, op,
48330@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48331
48332 error:
48333 if (ret == -ERESTARTSYS)
48334- fscache_stat(&fscache_n_allocs_intr);
48335+ fscache_stat_unchecked(&fscache_n_allocs_intr);
48336 else if (ret < 0)
48337- fscache_stat(&fscache_n_allocs_nobufs);
48338+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
48339 else
48340- fscache_stat(&fscache_n_allocs_ok);
48341+ fscache_stat_unchecked(&fscache_n_allocs_ok);
48342
48343 fscache_put_retrieval(op);
48344 _leave(" = %d", ret);
48345@@ -625,7 +625,7 @@ nobufs_unlock:
48346 spin_unlock(&cookie->lock);
48347 kfree(op);
48348 nobufs:
48349- fscache_stat(&fscache_n_allocs_nobufs);
48350+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
48351 _leave(" = -ENOBUFS");
48352 return -ENOBUFS;
48353 }
48354@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48355
48356 spin_lock(&cookie->stores_lock);
48357
48358- fscache_stat(&fscache_n_store_calls);
48359+ fscache_stat_unchecked(&fscache_n_store_calls);
48360
48361 /* find a page to store */
48362 page = NULL;
48363@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48364 page = results[0];
48365 _debug("gang %d [%lx]", n, page->index);
48366 if (page->index > op->store_limit) {
48367- fscache_stat(&fscache_n_store_pages_over_limit);
48368+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
48369 goto superseded;
48370 }
48371
48372@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48373 spin_unlock(&cookie->stores_lock);
48374 spin_unlock(&object->lock);
48375
48376- fscache_stat(&fscache_n_store_pages);
48377+ fscache_stat_unchecked(&fscache_n_store_pages);
48378 fscache_stat(&fscache_n_cop_write_page);
48379 ret = object->cache->ops->write_page(op, page);
48380 fscache_stat_d(&fscache_n_cop_write_page);
48381@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48382 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48383 ASSERT(PageFsCache(page));
48384
48385- fscache_stat(&fscache_n_stores);
48386+ fscache_stat_unchecked(&fscache_n_stores);
48387
48388 op = kzalloc(sizeof(*op), GFP_NOIO);
48389 if (!op)
48390@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48391 spin_unlock(&cookie->stores_lock);
48392 spin_unlock(&object->lock);
48393
48394- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
48395+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
48396 op->store_limit = object->store_limit;
48397
48398 if (fscache_submit_op(object, &op->op) < 0)
48399@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48400
48401 spin_unlock(&cookie->lock);
48402 radix_tree_preload_end();
48403- fscache_stat(&fscache_n_store_ops);
48404- fscache_stat(&fscache_n_stores_ok);
48405+ fscache_stat_unchecked(&fscache_n_store_ops);
48406+ fscache_stat_unchecked(&fscache_n_stores_ok);
48407
48408 /* the work queue now carries its own ref on the object */
48409 fscache_put_operation(&op->op);
48410@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48411 return 0;
48412
48413 already_queued:
48414- fscache_stat(&fscache_n_stores_again);
48415+ fscache_stat_unchecked(&fscache_n_stores_again);
48416 already_pending:
48417 spin_unlock(&cookie->stores_lock);
48418 spin_unlock(&object->lock);
48419 spin_unlock(&cookie->lock);
48420 radix_tree_preload_end();
48421 kfree(op);
48422- fscache_stat(&fscache_n_stores_ok);
48423+ fscache_stat_unchecked(&fscache_n_stores_ok);
48424 _leave(" = 0");
48425 return 0;
48426
48427@@ -851,14 +851,14 @@ nobufs:
48428 spin_unlock(&cookie->lock);
48429 radix_tree_preload_end();
48430 kfree(op);
48431- fscache_stat(&fscache_n_stores_nobufs);
48432+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
48433 _leave(" = -ENOBUFS");
48434 return -ENOBUFS;
48435
48436 nomem_free:
48437 kfree(op);
48438 nomem:
48439- fscache_stat(&fscache_n_stores_oom);
48440+ fscache_stat_unchecked(&fscache_n_stores_oom);
48441 _leave(" = -ENOMEM");
48442 return -ENOMEM;
48443 }
48444@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
48445 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48446 ASSERTCMP(page, !=, NULL);
48447
48448- fscache_stat(&fscache_n_uncaches);
48449+ fscache_stat_unchecked(&fscache_n_uncaches);
48450
48451 /* cache withdrawal may beat us to it */
48452 if (!PageFsCache(page))
48453@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
48454 unsigned long loop;
48455
48456 #ifdef CONFIG_FSCACHE_STATS
48457- atomic_add(pagevec->nr, &fscache_n_marks);
48458+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
48459 #endif
48460
48461 for (loop = 0; loop < pagevec->nr; loop++) {
48462diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
48463index 4765190..2a067f2 100644
48464--- a/fs/fscache/stats.c
48465+++ b/fs/fscache/stats.c
48466@@ -18,95 +18,95 @@
48467 /*
48468 * operation counters
48469 */
48470-atomic_t fscache_n_op_pend;
48471-atomic_t fscache_n_op_run;
48472-atomic_t fscache_n_op_enqueue;
48473-atomic_t fscache_n_op_requeue;
48474-atomic_t fscache_n_op_deferred_release;
48475-atomic_t fscache_n_op_release;
48476-atomic_t fscache_n_op_gc;
48477-atomic_t fscache_n_op_cancelled;
48478-atomic_t fscache_n_op_rejected;
48479+atomic_unchecked_t fscache_n_op_pend;
48480+atomic_unchecked_t fscache_n_op_run;
48481+atomic_unchecked_t fscache_n_op_enqueue;
48482+atomic_unchecked_t fscache_n_op_requeue;
48483+atomic_unchecked_t fscache_n_op_deferred_release;
48484+atomic_unchecked_t fscache_n_op_release;
48485+atomic_unchecked_t fscache_n_op_gc;
48486+atomic_unchecked_t fscache_n_op_cancelled;
48487+atomic_unchecked_t fscache_n_op_rejected;
48488
48489-atomic_t fscache_n_attr_changed;
48490-atomic_t fscache_n_attr_changed_ok;
48491-atomic_t fscache_n_attr_changed_nobufs;
48492-atomic_t fscache_n_attr_changed_nomem;
48493-atomic_t fscache_n_attr_changed_calls;
48494+atomic_unchecked_t fscache_n_attr_changed;
48495+atomic_unchecked_t fscache_n_attr_changed_ok;
48496+atomic_unchecked_t fscache_n_attr_changed_nobufs;
48497+atomic_unchecked_t fscache_n_attr_changed_nomem;
48498+atomic_unchecked_t fscache_n_attr_changed_calls;
48499
48500-atomic_t fscache_n_allocs;
48501-atomic_t fscache_n_allocs_ok;
48502-atomic_t fscache_n_allocs_wait;
48503-atomic_t fscache_n_allocs_nobufs;
48504-atomic_t fscache_n_allocs_intr;
48505-atomic_t fscache_n_allocs_object_dead;
48506-atomic_t fscache_n_alloc_ops;
48507-atomic_t fscache_n_alloc_op_waits;
48508+atomic_unchecked_t fscache_n_allocs;
48509+atomic_unchecked_t fscache_n_allocs_ok;
48510+atomic_unchecked_t fscache_n_allocs_wait;
48511+atomic_unchecked_t fscache_n_allocs_nobufs;
48512+atomic_unchecked_t fscache_n_allocs_intr;
48513+atomic_unchecked_t fscache_n_allocs_object_dead;
48514+atomic_unchecked_t fscache_n_alloc_ops;
48515+atomic_unchecked_t fscache_n_alloc_op_waits;
48516
48517-atomic_t fscache_n_retrievals;
48518-atomic_t fscache_n_retrievals_ok;
48519-atomic_t fscache_n_retrievals_wait;
48520-atomic_t fscache_n_retrievals_nodata;
48521-atomic_t fscache_n_retrievals_nobufs;
48522-atomic_t fscache_n_retrievals_intr;
48523-atomic_t fscache_n_retrievals_nomem;
48524-atomic_t fscache_n_retrievals_object_dead;
48525-atomic_t fscache_n_retrieval_ops;
48526-atomic_t fscache_n_retrieval_op_waits;
48527+atomic_unchecked_t fscache_n_retrievals;
48528+atomic_unchecked_t fscache_n_retrievals_ok;
48529+atomic_unchecked_t fscache_n_retrievals_wait;
48530+atomic_unchecked_t fscache_n_retrievals_nodata;
48531+atomic_unchecked_t fscache_n_retrievals_nobufs;
48532+atomic_unchecked_t fscache_n_retrievals_intr;
48533+atomic_unchecked_t fscache_n_retrievals_nomem;
48534+atomic_unchecked_t fscache_n_retrievals_object_dead;
48535+atomic_unchecked_t fscache_n_retrieval_ops;
48536+atomic_unchecked_t fscache_n_retrieval_op_waits;
48537
48538-atomic_t fscache_n_stores;
48539-atomic_t fscache_n_stores_ok;
48540-atomic_t fscache_n_stores_again;
48541-atomic_t fscache_n_stores_nobufs;
48542-atomic_t fscache_n_stores_oom;
48543-atomic_t fscache_n_store_ops;
48544-atomic_t fscache_n_store_calls;
48545-atomic_t fscache_n_store_pages;
48546-atomic_t fscache_n_store_radix_deletes;
48547-atomic_t fscache_n_store_pages_over_limit;
48548+atomic_unchecked_t fscache_n_stores;
48549+atomic_unchecked_t fscache_n_stores_ok;
48550+atomic_unchecked_t fscache_n_stores_again;
48551+atomic_unchecked_t fscache_n_stores_nobufs;
48552+atomic_unchecked_t fscache_n_stores_oom;
48553+atomic_unchecked_t fscache_n_store_ops;
48554+atomic_unchecked_t fscache_n_store_calls;
48555+atomic_unchecked_t fscache_n_store_pages;
48556+atomic_unchecked_t fscache_n_store_radix_deletes;
48557+atomic_unchecked_t fscache_n_store_pages_over_limit;
48558
48559-atomic_t fscache_n_store_vmscan_not_storing;
48560-atomic_t fscache_n_store_vmscan_gone;
48561-atomic_t fscache_n_store_vmscan_busy;
48562-atomic_t fscache_n_store_vmscan_cancelled;
48563+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
48564+atomic_unchecked_t fscache_n_store_vmscan_gone;
48565+atomic_unchecked_t fscache_n_store_vmscan_busy;
48566+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
48567
48568-atomic_t fscache_n_marks;
48569-atomic_t fscache_n_uncaches;
48570+atomic_unchecked_t fscache_n_marks;
48571+atomic_unchecked_t fscache_n_uncaches;
48572
48573-atomic_t fscache_n_acquires;
48574-atomic_t fscache_n_acquires_null;
48575-atomic_t fscache_n_acquires_no_cache;
48576-atomic_t fscache_n_acquires_ok;
48577-atomic_t fscache_n_acquires_nobufs;
48578-atomic_t fscache_n_acquires_oom;
48579+atomic_unchecked_t fscache_n_acquires;
48580+atomic_unchecked_t fscache_n_acquires_null;
48581+atomic_unchecked_t fscache_n_acquires_no_cache;
48582+atomic_unchecked_t fscache_n_acquires_ok;
48583+atomic_unchecked_t fscache_n_acquires_nobufs;
48584+atomic_unchecked_t fscache_n_acquires_oom;
48585
48586-atomic_t fscache_n_updates;
48587-atomic_t fscache_n_updates_null;
48588-atomic_t fscache_n_updates_run;
48589+atomic_unchecked_t fscache_n_updates;
48590+atomic_unchecked_t fscache_n_updates_null;
48591+atomic_unchecked_t fscache_n_updates_run;
48592
48593-atomic_t fscache_n_relinquishes;
48594-atomic_t fscache_n_relinquishes_null;
48595-atomic_t fscache_n_relinquishes_waitcrt;
48596-atomic_t fscache_n_relinquishes_retire;
48597+atomic_unchecked_t fscache_n_relinquishes;
48598+atomic_unchecked_t fscache_n_relinquishes_null;
48599+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
48600+atomic_unchecked_t fscache_n_relinquishes_retire;
48601
48602-atomic_t fscache_n_cookie_index;
48603-atomic_t fscache_n_cookie_data;
48604-atomic_t fscache_n_cookie_special;
48605+atomic_unchecked_t fscache_n_cookie_index;
48606+atomic_unchecked_t fscache_n_cookie_data;
48607+atomic_unchecked_t fscache_n_cookie_special;
48608
48609-atomic_t fscache_n_object_alloc;
48610-atomic_t fscache_n_object_no_alloc;
48611-atomic_t fscache_n_object_lookups;
48612-atomic_t fscache_n_object_lookups_negative;
48613-atomic_t fscache_n_object_lookups_positive;
48614-atomic_t fscache_n_object_lookups_timed_out;
48615-atomic_t fscache_n_object_created;
48616-atomic_t fscache_n_object_avail;
48617-atomic_t fscache_n_object_dead;
48618+atomic_unchecked_t fscache_n_object_alloc;
48619+atomic_unchecked_t fscache_n_object_no_alloc;
48620+atomic_unchecked_t fscache_n_object_lookups;
48621+atomic_unchecked_t fscache_n_object_lookups_negative;
48622+atomic_unchecked_t fscache_n_object_lookups_positive;
48623+atomic_unchecked_t fscache_n_object_lookups_timed_out;
48624+atomic_unchecked_t fscache_n_object_created;
48625+atomic_unchecked_t fscache_n_object_avail;
48626+atomic_unchecked_t fscache_n_object_dead;
48627
48628-atomic_t fscache_n_checkaux_none;
48629-atomic_t fscache_n_checkaux_okay;
48630-atomic_t fscache_n_checkaux_update;
48631-atomic_t fscache_n_checkaux_obsolete;
48632+atomic_unchecked_t fscache_n_checkaux_none;
48633+atomic_unchecked_t fscache_n_checkaux_okay;
48634+atomic_unchecked_t fscache_n_checkaux_update;
48635+atomic_unchecked_t fscache_n_checkaux_obsolete;
48636
48637 atomic_t fscache_n_cop_alloc_object;
48638 atomic_t fscache_n_cop_lookup_object;
48639@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
48640 seq_puts(m, "FS-Cache statistics\n");
48641
48642 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
48643- atomic_read(&fscache_n_cookie_index),
48644- atomic_read(&fscache_n_cookie_data),
48645- atomic_read(&fscache_n_cookie_special));
48646+ atomic_read_unchecked(&fscache_n_cookie_index),
48647+ atomic_read_unchecked(&fscache_n_cookie_data),
48648+ atomic_read_unchecked(&fscache_n_cookie_special));
48649
48650 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
48651- atomic_read(&fscache_n_object_alloc),
48652- atomic_read(&fscache_n_object_no_alloc),
48653- atomic_read(&fscache_n_object_avail),
48654- atomic_read(&fscache_n_object_dead));
48655+ atomic_read_unchecked(&fscache_n_object_alloc),
48656+ atomic_read_unchecked(&fscache_n_object_no_alloc),
48657+ atomic_read_unchecked(&fscache_n_object_avail),
48658+ atomic_read_unchecked(&fscache_n_object_dead));
48659 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
48660- atomic_read(&fscache_n_checkaux_none),
48661- atomic_read(&fscache_n_checkaux_okay),
48662- atomic_read(&fscache_n_checkaux_update),
48663- atomic_read(&fscache_n_checkaux_obsolete));
48664+ atomic_read_unchecked(&fscache_n_checkaux_none),
48665+ atomic_read_unchecked(&fscache_n_checkaux_okay),
48666+ atomic_read_unchecked(&fscache_n_checkaux_update),
48667+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
48668
48669 seq_printf(m, "Pages : mrk=%u unc=%u\n",
48670- atomic_read(&fscache_n_marks),
48671- atomic_read(&fscache_n_uncaches));
48672+ atomic_read_unchecked(&fscache_n_marks),
48673+ atomic_read_unchecked(&fscache_n_uncaches));
48674
48675 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
48676 " oom=%u\n",
48677- atomic_read(&fscache_n_acquires),
48678- atomic_read(&fscache_n_acquires_null),
48679- atomic_read(&fscache_n_acquires_no_cache),
48680- atomic_read(&fscache_n_acquires_ok),
48681- atomic_read(&fscache_n_acquires_nobufs),
48682- atomic_read(&fscache_n_acquires_oom));
48683+ atomic_read_unchecked(&fscache_n_acquires),
48684+ atomic_read_unchecked(&fscache_n_acquires_null),
48685+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
48686+ atomic_read_unchecked(&fscache_n_acquires_ok),
48687+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
48688+ atomic_read_unchecked(&fscache_n_acquires_oom));
48689
48690 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
48691- atomic_read(&fscache_n_object_lookups),
48692- atomic_read(&fscache_n_object_lookups_negative),
48693- atomic_read(&fscache_n_object_lookups_positive),
48694- atomic_read(&fscache_n_object_created),
48695- atomic_read(&fscache_n_object_lookups_timed_out));
48696+ atomic_read_unchecked(&fscache_n_object_lookups),
48697+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
48698+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
48699+ atomic_read_unchecked(&fscache_n_object_created),
48700+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
48701
48702 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
48703- atomic_read(&fscache_n_updates),
48704- atomic_read(&fscache_n_updates_null),
48705- atomic_read(&fscache_n_updates_run));
48706+ atomic_read_unchecked(&fscache_n_updates),
48707+ atomic_read_unchecked(&fscache_n_updates_null),
48708+ atomic_read_unchecked(&fscache_n_updates_run));
48709
48710 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
48711- atomic_read(&fscache_n_relinquishes),
48712- atomic_read(&fscache_n_relinquishes_null),
48713- atomic_read(&fscache_n_relinquishes_waitcrt),
48714- atomic_read(&fscache_n_relinquishes_retire));
48715+ atomic_read_unchecked(&fscache_n_relinquishes),
48716+ atomic_read_unchecked(&fscache_n_relinquishes_null),
48717+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
48718+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
48719
48720 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
48721- atomic_read(&fscache_n_attr_changed),
48722- atomic_read(&fscache_n_attr_changed_ok),
48723- atomic_read(&fscache_n_attr_changed_nobufs),
48724- atomic_read(&fscache_n_attr_changed_nomem),
48725- atomic_read(&fscache_n_attr_changed_calls));
48726+ atomic_read_unchecked(&fscache_n_attr_changed),
48727+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
48728+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
48729+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
48730+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
48731
48732 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
48733- atomic_read(&fscache_n_allocs),
48734- atomic_read(&fscache_n_allocs_ok),
48735- atomic_read(&fscache_n_allocs_wait),
48736- atomic_read(&fscache_n_allocs_nobufs),
48737- atomic_read(&fscache_n_allocs_intr));
48738+ atomic_read_unchecked(&fscache_n_allocs),
48739+ atomic_read_unchecked(&fscache_n_allocs_ok),
48740+ atomic_read_unchecked(&fscache_n_allocs_wait),
48741+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
48742+ atomic_read_unchecked(&fscache_n_allocs_intr));
48743 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
48744- atomic_read(&fscache_n_alloc_ops),
48745- atomic_read(&fscache_n_alloc_op_waits),
48746- atomic_read(&fscache_n_allocs_object_dead));
48747+ atomic_read_unchecked(&fscache_n_alloc_ops),
48748+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
48749+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
48750
48751 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
48752 " int=%u oom=%u\n",
48753- atomic_read(&fscache_n_retrievals),
48754- atomic_read(&fscache_n_retrievals_ok),
48755- atomic_read(&fscache_n_retrievals_wait),
48756- atomic_read(&fscache_n_retrievals_nodata),
48757- atomic_read(&fscache_n_retrievals_nobufs),
48758- atomic_read(&fscache_n_retrievals_intr),
48759- atomic_read(&fscache_n_retrievals_nomem));
48760+ atomic_read_unchecked(&fscache_n_retrievals),
48761+ atomic_read_unchecked(&fscache_n_retrievals_ok),
48762+ atomic_read_unchecked(&fscache_n_retrievals_wait),
48763+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
48764+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
48765+ atomic_read_unchecked(&fscache_n_retrievals_intr),
48766+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
48767 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
48768- atomic_read(&fscache_n_retrieval_ops),
48769- atomic_read(&fscache_n_retrieval_op_waits),
48770- atomic_read(&fscache_n_retrievals_object_dead));
48771+ atomic_read_unchecked(&fscache_n_retrieval_ops),
48772+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
48773+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
48774
48775 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
48776- atomic_read(&fscache_n_stores),
48777- atomic_read(&fscache_n_stores_ok),
48778- atomic_read(&fscache_n_stores_again),
48779- atomic_read(&fscache_n_stores_nobufs),
48780- atomic_read(&fscache_n_stores_oom));
48781+ atomic_read_unchecked(&fscache_n_stores),
48782+ atomic_read_unchecked(&fscache_n_stores_ok),
48783+ atomic_read_unchecked(&fscache_n_stores_again),
48784+ atomic_read_unchecked(&fscache_n_stores_nobufs),
48785+ atomic_read_unchecked(&fscache_n_stores_oom));
48786 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
48787- atomic_read(&fscache_n_store_ops),
48788- atomic_read(&fscache_n_store_calls),
48789- atomic_read(&fscache_n_store_pages),
48790- atomic_read(&fscache_n_store_radix_deletes),
48791- atomic_read(&fscache_n_store_pages_over_limit));
48792+ atomic_read_unchecked(&fscache_n_store_ops),
48793+ atomic_read_unchecked(&fscache_n_store_calls),
48794+ atomic_read_unchecked(&fscache_n_store_pages),
48795+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
48796+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
48797
48798 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
48799- atomic_read(&fscache_n_store_vmscan_not_storing),
48800- atomic_read(&fscache_n_store_vmscan_gone),
48801- atomic_read(&fscache_n_store_vmscan_busy),
48802- atomic_read(&fscache_n_store_vmscan_cancelled));
48803+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
48804+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
48805+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
48806+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
48807
48808 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
48809- atomic_read(&fscache_n_op_pend),
48810- atomic_read(&fscache_n_op_run),
48811- atomic_read(&fscache_n_op_enqueue),
48812- atomic_read(&fscache_n_op_cancelled),
48813- atomic_read(&fscache_n_op_rejected));
48814+ atomic_read_unchecked(&fscache_n_op_pend),
48815+ atomic_read_unchecked(&fscache_n_op_run),
48816+ atomic_read_unchecked(&fscache_n_op_enqueue),
48817+ atomic_read_unchecked(&fscache_n_op_cancelled),
48818+ atomic_read_unchecked(&fscache_n_op_rejected));
48819 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
48820- atomic_read(&fscache_n_op_deferred_release),
48821- atomic_read(&fscache_n_op_release),
48822- atomic_read(&fscache_n_op_gc));
48823+ atomic_read_unchecked(&fscache_n_op_deferred_release),
48824+ atomic_read_unchecked(&fscache_n_op_release),
48825+ atomic_read_unchecked(&fscache_n_op_gc));
48826
48827 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
48828 atomic_read(&fscache_n_cop_alloc_object),
48829diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
48830index ee8d550..7189d8c 100644
48831--- a/fs/fuse/cuse.c
48832+++ b/fs/fuse/cuse.c
48833@@ -585,10 +585,12 @@ static int __init cuse_init(void)
48834 INIT_LIST_HEAD(&cuse_conntbl[i]);
48835
48836 /* inherit and extend fuse_dev_operations */
48837- cuse_channel_fops = fuse_dev_operations;
48838- cuse_channel_fops.owner = THIS_MODULE;
48839- cuse_channel_fops.open = cuse_channel_open;
48840- cuse_channel_fops.release = cuse_channel_release;
48841+ pax_open_kernel();
48842+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
48843+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
48844+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
48845+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
48846+ pax_close_kernel();
48847
48848 cuse_class = class_create(THIS_MODULE, "cuse");
48849 if (IS_ERR(cuse_class))
48850diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
48851index 8c23fa7..0e3aac7 100644
48852--- a/fs/fuse/dev.c
48853+++ b/fs/fuse/dev.c
48854@@ -1241,7 +1241,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
48855 ret = 0;
48856 pipe_lock(pipe);
48857
48858- if (!pipe->readers) {
48859+ if (!atomic_read(&pipe->readers)) {
48860 send_sig(SIGPIPE, current, 0);
48861 if (!ret)
48862 ret = -EPIPE;
48863diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
48864index 324bc08..4fdd56e 100644
48865--- a/fs/fuse/dir.c
48866+++ b/fs/fuse/dir.c
48867@@ -1226,7 +1226,7 @@ static char *read_link(struct dentry *dentry)
48868 return link;
48869 }
48870
48871-static void free_link(char *link)
48872+static void free_link(const char *link)
48873 {
48874 if (!IS_ERR(link))
48875 free_page((unsigned long) link);
48876diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
48877index 381893c..3793318 100644
48878--- a/fs/gfs2/inode.c
48879+++ b/fs/gfs2/inode.c
48880@@ -1490,7 +1490,7 @@ out:
48881
48882 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48883 {
48884- char *s = nd_get_link(nd);
48885+ const char *s = nd_get_link(nd);
48886 if (!IS_ERR(s))
48887 kfree(s);
48888 }
48889diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
48890index c5bc355..5a513bb 100644
48891--- a/fs/hugetlbfs/inode.c
48892+++ b/fs/hugetlbfs/inode.c
48893@@ -923,7 +923,7 @@ static struct file_system_type hugetlbfs_fs_type = {
48894 .kill_sb = kill_litter_super,
48895 };
48896
48897-static struct vfsmount *hugetlbfs_vfsmount;
48898+struct vfsmount *hugetlbfs_vfsmount;
48899
48900 static int can_do_hugetlb_shm(void)
48901 {
48902diff --git a/fs/inode.c b/fs/inode.c
48903index 64999f1..8fad608 100644
48904--- a/fs/inode.c
48905+++ b/fs/inode.c
48906@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
48907
48908 #ifdef CONFIG_SMP
48909 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
48910- static atomic_t shared_last_ino;
48911- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
48912+ static atomic_unchecked_t shared_last_ino;
48913+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
48914
48915 res = next - LAST_INO_BATCH;
48916 }
48917diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
48918index 4a6cf28..d3a29d3 100644
48919--- a/fs/jffs2/erase.c
48920+++ b/fs/jffs2/erase.c
48921@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
48922 struct jffs2_unknown_node marker = {
48923 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
48924 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
48925- .totlen = cpu_to_je32(c->cleanmarker_size)
48926+ .totlen = cpu_to_je32(c->cleanmarker_size),
48927+ .hdr_crc = cpu_to_je32(0)
48928 };
48929
48930 jffs2_prealloc_raw_node_refs(c, jeb, 1);
48931diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
48932index a6597d6..41b30ec 100644
48933--- a/fs/jffs2/wbuf.c
48934+++ b/fs/jffs2/wbuf.c
48935@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
48936 {
48937 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
48938 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
48939- .totlen = constant_cpu_to_je32(8)
48940+ .totlen = constant_cpu_to_je32(8),
48941+ .hdr_crc = constant_cpu_to_je32(0)
48942 };
48943
48944 /*
48945diff --git a/fs/jfs/super.c b/fs/jfs/super.c
48946index 1a543be..d803c40 100644
48947--- a/fs/jfs/super.c
48948+++ b/fs/jfs/super.c
48949@@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
48950
48951 jfs_inode_cachep =
48952 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
48953- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
48954+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
48955 init_once);
48956 if (jfs_inode_cachep == NULL)
48957 return -ENOMEM;
48958diff --git a/fs/libfs.c b/fs/libfs.c
48959index 7cc37ca..b3e3eec 100644
48960--- a/fs/libfs.c
48961+++ b/fs/libfs.c
48962@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
48963
48964 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
48965 struct dentry *next;
48966+ char d_name[sizeof(next->d_iname)];
48967+ const unsigned char *name;
48968+
48969 next = list_entry(p, struct dentry, d_u.d_child);
48970 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
48971 if (!simple_positive(next)) {
48972@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
48973
48974 spin_unlock(&next->d_lock);
48975 spin_unlock(&dentry->d_lock);
48976- if (filldir(dirent, next->d_name.name,
48977+ name = next->d_name.name;
48978+ if (name == next->d_iname) {
48979+ memcpy(d_name, name, next->d_name.len);
48980+ name = d_name;
48981+ }
48982+ if (filldir(dirent, name,
48983 next->d_name.len, filp->f_pos,
48984 next->d_inode->i_ino,
48985 dt_type(next->d_inode)) < 0)
48986diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
48987index 05d2912..760abfa 100644
48988--- a/fs/lockd/clntproc.c
48989+++ b/fs/lockd/clntproc.c
48990@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
48991 /*
48992 * Cookie counter for NLM requests
48993 */
48994-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
48995+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
48996
48997 void nlmclnt_next_cookie(struct nlm_cookie *c)
48998 {
48999- u32 cookie = atomic_inc_return(&nlm_cookie);
49000+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
49001
49002 memcpy(c->data, &cookie, 4);
49003 c->len=4;
49004diff --git a/fs/locks.c b/fs/locks.c
49005index a94e331..060bce3 100644
49006--- a/fs/locks.c
49007+++ b/fs/locks.c
49008@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
49009 return;
49010
49011 if (filp->f_op && filp->f_op->flock) {
49012- struct file_lock fl = {
49013+ struct file_lock flock = {
49014 .fl_pid = current->tgid,
49015 .fl_file = filp,
49016 .fl_flags = FL_FLOCK,
49017 .fl_type = F_UNLCK,
49018 .fl_end = OFFSET_MAX,
49019 };
49020- filp->f_op->flock(filp, F_SETLKW, &fl);
49021- if (fl.fl_ops && fl.fl_ops->fl_release_private)
49022- fl.fl_ops->fl_release_private(&fl);
49023+ filp->f_op->flock(filp, F_SETLKW, &flock);
49024+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
49025+ flock.fl_ops->fl_release_private(&flock);
49026 }
49027
49028 lock_flocks();
49029diff --git a/fs/namei.c b/fs/namei.c
49030index 5f4cdf3..959a013 100644
49031--- a/fs/namei.c
49032+++ b/fs/namei.c
49033@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
49034 if (ret != -EACCES)
49035 return ret;
49036
49037+#ifdef CONFIG_GRKERNSEC
49038+ /* we'll block if we have to log due to a denied capability use */
49039+ if (mask & MAY_NOT_BLOCK)
49040+ return -ECHILD;
49041+#endif
49042+
49043 if (S_ISDIR(inode->i_mode)) {
49044 /* DACs are overridable for directories */
49045- if (inode_capable(inode, CAP_DAC_OVERRIDE))
49046- return 0;
49047 if (!(mask & MAY_WRITE))
49048- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
49049+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
49050+ inode_capable(inode, CAP_DAC_READ_SEARCH))
49051 return 0;
49052+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
49053+ return 0;
49054 return -EACCES;
49055 }
49056 /*
49057+ * Searching includes executable on directories, else just read.
49058+ */
49059+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
49060+ if (mask == MAY_READ)
49061+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
49062+ inode_capable(inode, CAP_DAC_READ_SEARCH))
49063+ return 0;
49064+
49065+ /*
49066 * Read/write DACs are always overridable.
49067 * Executable DACs are overridable when there is
49068 * at least one exec bit set.
49069@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
49070 if (inode_capable(inode, CAP_DAC_OVERRIDE))
49071 return 0;
49072
49073- /*
49074- * Searching includes executable on directories, else just read.
49075- */
49076- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
49077- if (mask == MAY_READ)
49078- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
49079- return 0;
49080-
49081 return -EACCES;
49082 }
49083
49084@@ -826,7 +834,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
49085 {
49086 struct dentry *dentry = link->dentry;
49087 int error;
49088- char *s;
49089+ const char *s;
49090
49091 BUG_ON(nd->flags & LOOKUP_RCU);
49092
49093@@ -847,6 +855,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
49094 if (error)
49095 goto out_put_nd_path;
49096
49097+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
49098+ dentry->d_inode, dentry, nd->path.mnt)) {
49099+ error = -EACCES;
49100+ goto out_put_nd_path;
49101+ }
49102+
49103 nd->last_type = LAST_BIND;
49104 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
49105 error = PTR_ERR(*p);
49106@@ -1605,6 +1619,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
49107 break;
49108 res = walk_component(nd, path, &nd->last,
49109 nd->last_type, LOOKUP_FOLLOW);
49110+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
49111+ res = -EACCES;
49112 put_link(nd, &link, cookie);
49113 } while (res > 0);
49114
49115@@ -1703,7 +1719,7 @@ EXPORT_SYMBOL(full_name_hash);
49116 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
49117 {
49118 unsigned long a, b, adata, bdata, mask, hash, len;
49119- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
49120+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
49121
49122 hash = a = 0;
49123 len = -sizeof(unsigned long);
49124@@ -1993,6 +2009,8 @@ static int path_lookupat(int dfd, const char *name,
49125 if (err)
49126 break;
49127 err = lookup_last(nd, &path);
49128+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
49129+ err = -EACCES;
49130 put_link(nd, &link, cookie);
49131 }
49132 }
49133@@ -2000,6 +2018,21 @@ static int path_lookupat(int dfd, const char *name,
49134 if (!err)
49135 err = complete_walk(nd);
49136
49137+ if (!(nd->flags & LOOKUP_PARENT)) {
49138+#ifdef CONFIG_GRKERNSEC
49139+ if (flags & LOOKUP_RCU) {
49140+ if (!err)
49141+ path_put(&nd->path);
49142+ err = -ECHILD;
49143+ } else
49144+#endif
49145+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49146+ if (!err)
49147+ path_put(&nd->path);
49148+ err = -ENOENT;
49149+ }
49150+ }
49151+
49152 if (!err && nd->flags & LOOKUP_DIRECTORY) {
49153 if (!nd->inode->i_op->lookup) {
49154 path_put(&nd->path);
49155@@ -2027,8 +2060,17 @@ static int filename_lookup(int dfd, struct filename *name,
49156 retval = path_lookupat(dfd, name->name,
49157 flags | LOOKUP_REVAL, nd);
49158
49159- if (likely(!retval))
49160+ if (likely(!retval)) {
49161+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
49162+#ifdef CONFIG_GRKERNSEC
49163+ if (flags & LOOKUP_RCU)
49164+ return -ECHILD;
49165+#endif
49166+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
49167+ return -ENOENT;
49168+ }
49169 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
49170+ }
49171 return retval;
49172 }
49173
49174@@ -2402,6 +2444,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
49175 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
49176 return -EPERM;
49177
49178+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
49179+ return -EPERM;
49180+ if (gr_handle_rawio(inode))
49181+ return -EPERM;
49182+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
49183+ return -EACCES;
49184+
49185 return 0;
49186 }
49187
49188@@ -2623,7 +2672,7 @@ looked_up:
49189 * cleared otherwise prior to returning.
49190 */
49191 static int lookup_open(struct nameidata *nd, struct path *path,
49192- struct file *file,
49193+ struct path *link, struct file *file,
49194 const struct open_flags *op,
49195 bool got_write, int *opened)
49196 {
49197@@ -2658,6 +2707,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
49198 /* Negative dentry, just create the file */
49199 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
49200 umode_t mode = op->mode;
49201+
49202+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
49203+ error = -EACCES;
49204+ goto out_dput;
49205+ }
49206+
49207+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
49208+ error = -EACCES;
49209+ goto out_dput;
49210+ }
49211+
49212 if (!IS_POSIXACL(dir->d_inode))
49213 mode &= ~current_umask();
49214 /*
49215@@ -2679,6 +2739,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
49216 nd->flags & LOOKUP_EXCL);
49217 if (error)
49218 goto out_dput;
49219+ else
49220+ gr_handle_create(dentry, nd->path.mnt);
49221 }
49222 out_no_open:
49223 path->dentry = dentry;
49224@@ -2693,7 +2755,7 @@ out_dput:
49225 /*
49226 * Handle the last step of open()
49227 */
49228-static int do_last(struct nameidata *nd, struct path *path,
49229+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
49230 struct file *file, const struct open_flags *op,
49231 int *opened, struct filename *name)
49232 {
49233@@ -2722,16 +2784,44 @@ static int do_last(struct nameidata *nd, struct path *path,
49234 error = complete_walk(nd);
49235 if (error)
49236 return error;
49237+#ifdef CONFIG_GRKERNSEC
49238+ if (nd->flags & LOOKUP_RCU) {
49239+ error = -ECHILD;
49240+ goto out;
49241+ }
49242+#endif
49243+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49244+ error = -ENOENT;
49245+ goto out;
49246+ }
49247 audit_inode(name, nd->path.dentry, 0);
49248 if (open_flag & O_CREAT) {
49249 error = -EISDIR;
49250 goto out;
49251 }
49252+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
49253+ error = -EACCES;
49254+ goto out;
49255+ }
49256 goto finish_open;
49257 case LAST_BIND:
49258 error = complete_walk(nd);
49259 if (error)
49260 return error;
49261+#ifdef CONFIG_GRKERNSEC
49262+ if (nd->flags & LOOKUP_RCU) {
49263+ error = -ECHILD;
49264+ goto out;
49265+ }
49266+#endif
49267+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
49268+ error = -ENOENT;
49269+ goto out;
49270+ }
49271+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
49272+ error = -EACCES;
49273+ goto out;
49274+ }
49275 audit_inode(name, dir, 0);
49276 goto finish_open;
49277 }
49278@@ -2780,7 +2870,7 @@ retry_lookup:
49279 */
49280 }
49281 mutex_lock(&dir->d_inode->i_mutex);
49282- error = lookup_open(nd, path, file, op, got_write, opened);
49283+ error = lookup_open(nd, path, link, file, op, got_write, opened);
49284 mutex_unlock(&dir->d_inode->i_mutex);
49285
49286 if (error <= 0) {
49287@@ -2804,11 +2894,28 @@ retry_lookup:
49288 goto finish_open_created;
49289 }
49290
49291+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
49292+ error = -ENOENT;
49293+ goto exit_dput;
49294+ }
49295+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
49296+ error = -EACCES;
49297+ goto exit_dput;
49298+ }
49299+
49300 /*
49301 * create/update audit record if it already exists.
49302 */
49303- if (path->dentry->d_inode)
49304+ if (path->dentry->d_inode) {
49305+ /* only check if O_CREAT is specified, all other checks need to go
49306+ into may_open */
49307+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
49308+ error = -EACCES;
49309+ goto exit_dput;
49310+ }
49311+
49312 audit_inode(name, path->dentry, 0);
49313+ }
49314
49315 /*
49316 * If atomic_open() acquired write access it is dropped now due to
49317@@ -2849,6 +2956,11 @@ finish_lookup:
49318 }
49319 }
49320 BUG_ON(inode != path->dentry->d_inode);
49321+ /* if we're resolving a symlink to another symlink */
49322+ if (link && gr_handle_symlink_owner(link, inode)) {
49323+ error = -EACCES;
49324+ goto out;
49325+ }
49326 return 1;
49327 }
49328
49329@@ -2858,7 +2970,6 @@ finish_lookup:
49330 save_parent.dentry = nd->path.dentry;
49331 save_parent.mnt = mntget(path->mnt);
49332 nd->path.dentry = path->dentry;
49333-
49334 }
49335 nd->inode = inode;
49336 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
49337@@ -2867,6 +2978,22 @@ finish_lookup:
49338 path_put(&save_parent);
49339 return error;
49340 }
49341+
49342+#ifdef CONFIG_GRKERNSEC
49343+ if (nd->flags & LOOKUP_RCU) {
49344+ error = -ECHILD;
49345+ goto out;
49346+ }
49347+#endif
49348+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49349+ error = -ENOENT;
49350+ goto out;
49351+ }
49352+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
49353+ error = -EACCES;
49354+ goto out;
49355+ }
49356+
49357 error = -EISDIR;
49358 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
49359 goto out;
49360@@ -2965,7 +3092,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
49361 if (unlikely(error))
49362 goto out;
49363
49364- error = do_last(nd, &path, file, op, &opened, pathname);
49365+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
49366 while (unlikely(error > 0)) { /* trailing symlink */
49367 struct path link = path;
49368 void *cookie;
49369@@ -2983,7 +3110,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
49370 error = follow_link(&link, nd, &cookie);
49371 if (unlikely(error))
49372 break;
49373- error = do_last(nd, &path, file, op, &opened, pathname);
49374+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
49375 put_link(nd, &link, cookie);
49376 }
49377 out:
49378@@ -3073,8 +3200,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
49379 goto unlock;
49380
49381 error = -EEXIST;
49382- if (dentry->d_inode)
49383+ if (dentry->d_inode) {
49384+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
49385+ error = -ENOENT;
49386+ }
49387 goto fail;
49388+ }
49389 /*
49390 * Special case - lookup gave negative, but... we had foo/bar/
49391 * From the vfs_mknod() POV we just have a negative dentry -
49392@@ -3125,6 +3256,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
49393 }
49394 EXPORT_SYMBOL(user_path_create);
49395
49396+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, int is_dir)
49397+{
49398+ struct filename *tmp = getname(pathname);
49399+ struct dentry *res;
49400+ if (IS_ERR(tmp))
49401+ return ERR_CAST(tmp);
49402+ res = kern_path_create(dfd, tmp->name, path, is_dir);
49403+ if (IS_ERR(res))
49404+ putname(tmp);
49405+ else
49406+ *to = tmp;
49407+ return res;
49408+}
49409+
49410 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
49411 {
49412 int error = may_create(dir, dentry);
49413@@ -3186,6 +3331,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
49414
49415 if (!IS_POSIXACL(path.dentry->d_inode))
49416 mode &= ~current_umask();
49417+
49418+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
49419+ error = -EPERM;
49420+ goto out;
49421+ }
49422+
49423+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
49424+ error = -EACCES;
49425+ goto out;
49426+ }
49427+
49428 error = security_path_mknod(&path, dentry, mode, dev);
49429 if (error)
49430 goto out;
49431@@ -3202,6 +3358,8 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
49432 break;
49433 }
49434 out:
49435+ if (!error)
49436+ gr_handle_create(dentry, path.mnt);
49437 done_path_create(&path, dentry);
49438 return error;
49439 }
49440@@ -3248,9 +3406,18 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
49441
49442 if (!IS_POSIXACL(path.dentry->d_inode))
49443 mode &= ~current_umask();
49444+
49445+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
49446+ error = -EACCES;
49447+ goto out;
49448+ }
49449+
49450 error = security_path_mkdir(&path, dentry, mode);
49451 if (!error)
49452 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
49453+ if (!error)
49454+ gr_handle_create(dentry, path.mnt);
49455+out:
49456 done_path_create(&path, dentry);
49457 return error;
49458 }
49459@@ -3327,6 +3494,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
49460 struct filename *name;
49461 struct dentry *dentry;
49462 struct nameidata nd;
49463+ ino_t saved_ino = 0;
49464+ dev_t saved_dev = 0;
49465
49466 name = user_path_parent(dfd, pathname, &nd);
49467 if (IS_ERR(name))
49468@@ -3358,10 +3527,21 @@ static long do_rmdir(int dfd, const char __user *pathname)
49469 error = -ENOENT;
49470 goto exit3;
49471 }
49472+
49473+ saved_ino = dentry->d_inode->i_ino;
49474+ saved_dev = gr_get_dev_from_dentry(dentry);
49475+
49476+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
49477+ error = -EACCES;
49478+ goto exit3;
49479+ }
49480+
49481 error = security_path_rmdir(&nd.path, dentry);
49482 if (error)
49483 goto exit3;
49484 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
49485+ if (!error && (saved_dev || saved_ino))
49486+ gr_handle_delete(saved_ino, saved_dev);
49487 exit3:
49488 dput(dentry);
49489 exit2:
49490@@ -3423,6 +3603,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
49491 struct dentry *dentry;
49492 struct nameidata nd;
49493 struct inode *inode = NULL;
49494+ ino_t saved_ino = 0;
49495+ dev_t saved_dev = 0;
49496
49497 name = user_path_parent(dfd, pathname, &nd);
49498 if (IS_ERR(name))
49499@@ -3448,10 +3630,22 @@ static long do_unlinkat(int dfd, const char __user *pathname)
49500 if (!inode)
49501 goto slashes;
49502 ihold(inode);
49503+
49504+ if (inode->i_nlink <= 1) {
49505+ saved_ino = inode->i_ino;
49506+ saved_dev = gr_get_dev_from_dentry(dentry);
49507+ }
49508+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
49509+ error = -EACCES;
49510+ goto exit2;
49511+ }
49512+
49513 error = security_path_unlink(&nd.path, dentry);
49514 if (error)
49515 goto exit2;
49516 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
49517+ if (!error && (saved_ino || saved_dev))
49518+ gr_handle_delete(saved_ino, saved_dev);
49519 exit2:
49520 dput(dentry);
49521 }
49522@@ -3523,9 +3717,17 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
49523 if (IS_ERR(dentry))
49524 goto out_putname;
49525
49526+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
49527+ error = -EACCES;
49528+ goto out;
49529+ }
49530+
49531 error = security_path_symlink(&path, dentry, from->name);
49532 if (!error)
49533 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
49534+ if (!error)
49535+ gr_handle_create(dentry, path.mnt);
49536+out:
49537 done_path_create(&path, dentry);
49538 out_putname:
49539 putname(from);
49540@@ -3595,6 +3797,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49541 {
49542 struct dentry *new_dentry;
49543 struct path old_path, new_path;
49544+ struct filename *to = NULL;
49545 int how = 0;
49546 int error;
49547
49548@@ -3618,7 +3821,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49549 if (error)
49550 return error;
49551
49552- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
49553+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
49554 error = PTR_ERR(new_dentry);
49555 if (IS_ERR(new_dentry))
49556 goto out;
49557@@ -3629,11 +3832,28 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49558 error = may_linkat(&old_path);
49559 if (unlikely(error))
49560 goto out_dput;
49561+
49562+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
49563+ old_path.dentry->d_inode,
49564+ old_path.dentry->d_inode->i_mode, to)) {
49565+ error = -EACCES;
49566+ goto out_dput;
49567+ }
49568+
49569+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
49570+ old_path.dentry, old_path.mnt, to)) {
49571+ error = -EACCES;
49572+ goto out_dput;
49573+ }
49574+
49575 error = security_path_link(old_path.dentry, &new_path, new_dentry);
49576 if (error)
49577 goto out_dput;
49578 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
49579+ if (!error)
49580+ gr_handle_create(new_dentry, new_path.mnt);
49581 out_dput:
49582+ putname(to);
49583 done_path_create(&new_path, new_dentry);
49584 out:
49585 path_put(&old_path);
49586@@ -3873,12 +4093,21 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
49587 if (new_dentry == trap)
49588 goto exit5;
49589
49590+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
49591+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
49592+ to);
49593+ if (error)
49594+ goto exit5;
49595+
49596 error = security_path_rename(&oldnd.path, old_dentry,
49597 &newnd.path, new_dentry);
49598 if (error)
49599 goto exit5;
49600 error = vfs_rename(old_dir->d_inode, old_dentry,
49601 new_dir->d_inode, new_dentry);
49602+ if (!error)
49603+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
49604+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
49605 exit5:
49606 dput(new_dentry);
49607 exit4:
49608@@ -3903,6 +4132,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
49609
49610 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
49611 {
49612+ char tmpbuf[64];
49613+ const char *newlink;
49614 int len;
49615
49616 len = PTR_ERR(link);
49617@@ -3912,7 +4143,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
49618 len = strlen(link);
49619 if (len > (unsigned) buflen)
49620 len = buflen;
49621- if (copy_to_user(buffer, link, len))
49622+
49623+ if (len < sizeof(tmpbuf)) {
49624+ memcpy(tmpbuf, link, len);
49625+ newlink = tmpbuf;
49626+ } else
49627+ newlink = link;
49628+
49629+ if (copy_to_user(buffer, newlink, len))
49630 len = -EFAULT;
49631 out:
49632 return len;
49633diff --git a/fs/namespace.c b/fs/namespace.c
49634index 2496062..e26f6d6 100644
49635--- a/fs/namespace.c
49636+++ b/fs/namespace.c
49637@@ -1212,6 +1212,9 @@ static int do_umount(struct mount *mnt, int flags)
49638 if (!(sb->s_flags & MS_RDONLY))
49639 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
49640 up_write(&sb->s_umount);
49641+
49642+ gr_log_remount(mnt->mnt_devname, retval);
49643+
49644 return retval;
49645 }
49646
49647@@ -1231,6 +1234,9 @@ static int do_umount(struct mount *mnt, int flags)
49648 br_write_unlock(&vfsmount_lock);
49649 up_write(&namespace_sem);
49650 release_mounts(&umount_list);
49651+
49652+ gr_log_unmount(mnt->mnt_devname, retval);
49653+
49654 return retval;
49655 }
49656
49657@@ -2244,6 +2250,16 @@ long do_mount(const char *dev_name, const char *dir_name,
49658 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
49659 MS_STRICTATIME);
49660
49661+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
49662+ retval = -EPERM;
49663+ goto dput_out;
49664+ }
49665+
49666+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
49667+ retval = -EPERM;
49668+ goto dput_out;
49669+ }
49670+
49671 if (flags & MS_REMOUNT)
49672 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
49673 data_page);
49674@@ -2258,6 +2274,9 @@ long do_mount(const char *dev_name, const char *dir_name,
49675 dev_name, data_page);
49676 dput_out:
49677 path_put(&path);
49678+
49679+ gr_log_mount(dev_name, dir_name, retval);
49680+
49681 return retval;
49682 }
49683
49684@@ -2516,6 +2535,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
49685 if (error)
49686 goto out2;
49687
49688+ if (gr_handle_chroot_pivot()) {
49689+ error = -EPERM;
49690+ goto out2;
49691+ }
49692+
49693 get_fs_root(current->fs, &root);
49694 error = lock_mount(&old);
49695 if (error)
49696diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
49697index 6fa01ae..2790820 100644
49698--- a/fs/nfs/inode.c
49699+++ b/fs/nfs/inode.c
49700@@ -1029,16 +1029,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
49701 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
49702 }
49703
49704-static atomic_long_t nfs_attr_generation_counter;
49705+static atomic_long_unchecked_t nfs_attr_generation_counter;
49706
49707 static unsigned long nfs_read_attr_generation_counter(void)
49708 {
49709- return atomic_long_read(&nfs_attr_generation_counter);
49710+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
49711 }
49712
49713 unsigned long nfs_inc_attr_generation_counter(void)
49714 {
49715- return atomic_long_inc_return(&nfs_attr_generation_counter);
49716+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
49717 }
49718
49719 void nfs_fattr_init(struct nfs_fattr *fattr)
49720diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
49721index f59169e..fd7d359 100644
49722--- a/fs/nfsd/vfs.c
49723+++ b/fs/nfsd/vfs.c
49724@@ -941,7 +941,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
49725 } else {
49726 oldfs = get_fs();
49727 set_fs(KERNEL_DS);
49728- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
49729+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
49730 set_fs(oldfs);
49731 }
49732
49733@@ -1045,7 +1045,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
49734
49735 /* Write the data. */
49736 oldfs = get_fs(); set_fs(KERNEL_DS);
49737- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
49738+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
49739 set_fs(oldfs);
49740 if (host_err < 0)
49741 goto out_nfserr;
49742@@ -1587,7 +1587,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
49743 */
49744
49745 oldfs = get_fs(); set_fs(KERNEL_DS);
49746- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
49747+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
49748 set_fs(oldfs);
49749
49750 if (host_err < 0)
49751diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
49752index 6fcaeb8..9d16d04 100644
49753--- a/fs/notify/fanotify/fanotify_user.c
49754+++ b/fs/notify/fanotify/fanotify_user.c
49755@@ -250,8 +250,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
49756
49757 fd = fanotify_event_metadata.fd;
49758 ret = -EFAULT;
49759- if (copy_to_user(buf, &fanotify_event_metadata,
49760- fanotify_event_metadata.event_len))
49761+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
49762+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
49763 goto out_close_fd;
49764
49765 ret = prepare_for_access_response(group, event, fd);
49766diff --git a/fs/notify/notification.c b/fs/notify/notification.c
49767index c887b13..0fdf472 100644
49768--- a/fs/notify/notification.c
49769+++ b/fs/notify/notification.c
49770@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
49771 * get set to 0 so it will never get 'freed'
49772 */
49773 static struct fsnotify_event *q_overflow_event;
49774-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
49775+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
49776
49777 /**
49778 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
49779@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
49780 */
49781 u32 fsnotify_get_cookie(void)
49782 {
49783- return atomic_inc_return(&fsnotify_sync_cookie);
49784+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
49785 }
49786 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
49787
49788diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
49789index 99e3610..02c1068 100644
49790--- a/fs/ntfs/dir.c
49791+++ b/fs/ntfs/dir.c
49792@@ -1329,7 +1329,7 @@ find_next_index_buffer:
49793 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
49794 ~(s64)(ndir->itype.index.block_size - 1)));
49795 /* Bounds checks. */
49796- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
49797+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
49798 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
49799 "inode 0x%lx or driver bug.", vdir->i_ino);
49800 goto err_out;
49801diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
49802index 1ecf464..e1ff8bf 100644
49803--- a/fs/ntfs/file.c
49804+++ b/fs/ntfs/file.c
49805@@ -2232,6 +2232,6 @@ const struct inode_operations ntfs_file_inode_ops = {
49806 #endif /* NTFS_RW */
49807 };
49808
49809-const struct file_operations ntfs_empty_file_ops = {};
49810+const struct file_operations ntfs_empty_file_ops __read_only;
49811
49812-const struct inode_operations ntfs_empty_inode_ops = {};
49813+const struct inode_operations ntfs_empty_inode_ops __read_only;
49814diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
49815index a9f78c7..ed8a381 100644
49816--- a/fs/ocfs2/localalloc.c
49817+++ b/fs/ocfs2/localalloc.c
49818@@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
49819 goto bail;
49820 }
49821
49822- atomic_inc(&osb->alloc_stats.moves);
49823+ atomic_inc_unchecked(&osb->alloc_stats.moves);
49824
49825 bail:
49826 if (handle)
49827diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
49828index d355e6e..578d905 100644
49829--- a/fs/ocfs2/ocfs2.h
49830+++ b/fs/ocfs2/ocfs2.h
49831@@ -235,11 +235,11 @@ enum ocfs2_vol_state
49832
49833 struct ocfs2_alloc_stats
49834 {
49835- atomic_t moves;
49836- atomic_t local_data;
49837- atomic_t bitmap_data;
49838- atomic_t bg_allocs;
49839- atomic_t bg_extends;
49840+ atomic_unchecked_t moves;
49841+ atomic_unchecked_t local_data;
49842+ atomic_unchecked_t bitmap_data;
49843+ atomic_unchecked_t bg_allocs;
49844+ atomic_unchecked_t bg_extends;
49845 };
49846
49847 enum ocfs2_local_alloc_state
49848diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
49849index f169da4..9112253 100644
49850--- a/fs/ocfs2/suballoc.c
49851+++ b/fs/ocfs2/suballoc.c
49852@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
49853 mlog_errno(status);
49854 goto bail;
49855 }
49856- atomic_inc(&osb->alloc_stats.bg_extends);
49857+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
49858
49859 /* You should never ask for this much metadata */
49860 BUG_ON(bits_wanted >
49861@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
49862 mlog_errno(status);
49863 goto bail;
49864 }
49865- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49866+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49867
49868 *suballoc_loc = res.sr_bg_blkno;
49869 *suballoc_bit_start = res.sr_bit_offset;
49870@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
49871 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
49872 res->sr_bits);
49873
49874- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49875+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49876
49877 BUG_ON(res->sr_bits != 1);
49878
49879@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
49880 mlog_errno(status);
49881 goto bail;
49882 }
49883- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49884+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49885
49886 BUG_ON(res.sr_bits != 1);
49887
49888@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
49889 cluster_start,
49890 num_clusters);
49891 if (!status)
49892- atomic_inc(&osb->alloc_stats.local_data);
49893+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
49894 } else {
49895 if (min_clusters > (osb->bitmap_cpg - 1)) {
49896 /* The only paths asking for contiguousness
49897@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
49898 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
49899 res.sr_bg_blkno,
49900 res.sr_bit_offset);
49901- atomic_inc(&osb->alloc_stats.bitmap_data);
49902+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
49903 *num_clusters = res.sr_bits;
49904 }
49905 }
49906diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
49907index 0e91ec2..f4b3fc6 100644
49908--- a/fs/ocfs2/super.c
49909+++ b/fs/ocfs2/super.c
49910@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
49911 "%10s => GlobalAllocs: %d LocalAllocs: %d "
49912 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
49913 "Stats",
49914- atomic_read(&osb->alloc_stats.bitmap_data),
49915- atomic_read(&osb->alloc_stats.local_data),
49916- atomic_read(&osb->alloc_stats.bg_allocs),
49917- atomic_read(&osb->alloc_stats.moves),
49918- atomic_read(&osb->alloc_stats.bg_extends));
49919+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
49920+ atomic_read_unchecked(&osb->alloc_stats.local_data),
49921+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
49922+ atomic_read_unchecked(&osb->alloc_stats.moves),
49923+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
49924
49925 out += snprintf(buf + out, len - out,
49926 "%10s => State: %u Descriptor: %llu Size: %u bits "
49927@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
49928 spin_lock_init(&osb->osb_xattr_lock);
49929 ocfs2_init_steal_slots(osb);
49930
49931- atomic_set(&osb->alloc_stats.moves, 0);
49932- atomic_set(&osb->alloc_stats.local_data, 0);
49933- atomic_set(&osb->alloc_stats.bitmap_data, 0);
49934- atomic_set(&osb->alloc_stats.bg_allocs, 0);
49935- atomic_set(&osb->alloc_stats.bg_extends, 0);
49936+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
49937+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
49938+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
49939+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
49940+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
49941
49942 /* Copy the blockcheck stats from the superblock probe */
49943 osb->osb_ecc_stats = *stats;
49944diff --git a/fs/open.c b/fs/open.c
49945index 59071f5..c6229a0 100644
49946--- a/fs/open.c
49947+++ b/fs/open.c
49948@@ -31,6 +31,8 @@
49949 #include <linux/ima.h>
49950 #include <linux/dnotify.h>
49951
49952+#define CREATE_TRACE_POINTS
49953+#include <trace/events/fs.h>
49954 #include "internal.h"
49955
49956 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
49957@@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
49958 error = locks_verify_truncate(inode, NULL, length);
49959 if (!error)
49960 error = security_path_truncate(&path);
49961+
49962+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
49963+ error = -EACCES;
49964+
49965 if (!error)
49966 error = do_truncate(path.dentry, length, 0, NULL);
49967
49968@@ -362,6 +368,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
49969 if (__mnt_is_readonly(path.mnt))
49970 res = -EROFS;
49971
49972+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
49973+ res = -EACCES;
49974+
49975 out_path_release:
49976 path_put(&path);
49977 out:
49978@@ -388,6 +397,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
49979 if (error)
49980 goto dput_and_out;
49981
49982+ gr_log_chdir(path.dentry, path.mnt);
49983+
49984 set_fs_pwd(current->fs, &path);
49985
49986 dput_and_out:
49987@@ -413,6 +424,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
49988 goto out_putf;
49989
49990 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
49991+
49992+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
49993+ error = -EPERM;
49994+
49995+ if (!error)
49996+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
49997+
49998 if (!error)
49999 set_fs_pwd(current->fs, &f.file->f_path);
50000 out_putf:
50001@@ -441,7 +459,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
50002 if (error)
50003 goto dput_and_out;
50004
50005+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
50006+ goto dput_and_out;
50007+
50008 set_fs_root(current->fs, &path);
50009+
50010+ gr_handle_chroot_chdir(&path);
50011+
50012 error = 0;
50013 dput_and_out:
50014 path_put(&path);
50015@@ -459,6 +483,16 @@ static int chmod_common(struct path *path, umode_t mode)
50016 if (error)
50017 return error;
50018 mutex_lock(&inode->i_mutex);
50019+
50020+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
50021+ error = -EACCES;
50022+ goto out_unlock;
50023+ }
50024+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
50025+ error = -EACCES;
50026+ goto out_unlock;
50027+ }
50028+
50029 error = security_path_chmod(path, mode);
50030 if (error)
50031 goto out_unlock;
50032@@ -514,6 +548,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
50033 uid = make_kuid(current_user_ns(), user);
50034 gid = make_kgid(current_user_ns(), group);
50035
50036+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
50037+ return -EACCES;
50038+
50039 newattrs.ia_valid = ATTR_CTIME;
50040 if (user != (uid_t) -1) {
50041 if (!uid_valid(uid))
50042@@ -925,6 +962,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
50043 } else {
50044 fsnotify_open(f);
50045 fd_install(fd, f);
50046+ trace_do_sys_open(tmp->name, flags, mode);
50047 }
50048 }
50049 putname(tmp);
50050diff --git a/fs/pipe.c b/fs/pipe.c
50051index bd3479d..fb92c4d 100644
50052--- a/fs/pipe.c
50053+++ b/fs/pipe.c
50054@@ -438,9 +438,9 @@ redo:
50055 }
50056 if (bufs) /* More to do? */
50057 continue;
50058- if (!pipe->writers)
50059+ if (!atomic_read(&pipe->writers))
50060 break;
50061- if (!pipe->waiting_writers) {
50062+ if (!atomic_read(&pipe->waiting_writers)) {
50063 /* syscall merging: Usually we must not sleep
50064 * if O_NONBLOCK is set, or if we got some data.
50065 * But if a writer sleeps in kernel space, then
50066@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
50067 mutex_lock(&inode->i_mutex);
50068 pipe = inode->i_pipe;
50069
50070- if (!pipe->readers) {
50071+ if (!atomic_read(&pipe->readers)) {
50072 send_sig(SIGPIPE, current, 0);
50073 ret = -EPIPE;
50074 goto out;
50075@@ -553,7 +553,7 @@ redo1:
50076 for (;;) {
50077 int bufs;
50078
50079- if (!pipe->readers) {
50080+ if (!atomic_read(&pipe->readers)) {
50081 send_sig(SIGPIPE, current, 0);
50082 if (!ret)
50083 ret = -EPIPE;
50084@@ -644,9 +644,9 @@ redo2:
50085 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
50086 do_wakeup = 0;
50087 }
50088- pipe->waiting_writers++;
50089+ atomic_inc(&pipe->waiting_writers);
50090 pipe_wait(pipe);
50091- pipe->waiting_writers--;
50092+ atomic_dec(&pipe->waiting_writers);
50093 }
50094 out:
50095 mutex_unlock(&inode->i_mutex);
50096@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
50097 mask = 0;
50098 if (filp->f_mode & FMODE_READ) {
50099 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
50100- if (!pipe->writers && filp->f_version != pipe->w_counter)
50101+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
50102 mask |= POLLHUP;
50103 }
50104
50105@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
50106 * Most Unices do not set POLLERR for FIFOs but on Linux they
50107 * behave exactly like pipes for poll().
50108 */
50109- if (!pipe->readers)
50110+ if (!atomic_read(&pipe->readers))
50111 mask |= POLLERR;
50112 }
50113
50114@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
50115
50116 mutex_lock(&inode->i_mutex);
50117 pipe = inode->i_pipe;
50118- pipe->readers -= decr;
50119- pipe->writers -= decw;
50120+ atomic_sub(decr, &pipe->readers);
50121+ atomic_sub(decw, &pipe->writers);
50122
50123- if (!pipe->readers && !pipe->writers) {
50124+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
50125 free_pipe_info(inode);
50126 } else {
50127 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
50128@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
50129
50130 if (inode->i_pipe) {
50131 ret = 0;
50132- inode->i_pipe->readers++;
50133+ atomic_inc(&inode->i_pipe->readers);
50134 }
50135
50136 mutex_unlock(&inode->i_mutex);
50137@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
50138
50139 if (inode->i_pipe) {
50140 ret = 0;
50141- inode->i_pipe->writers++;
50142+ atomic_inc(&inode->i_pipe->writers);
50143 }
50144
50145 mutex_unlock(&inode->i_mutex);
50146@@ -868,9 +868,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
50147 if (inode->i_pipe) {
50148 ret = 0;
50149 if (filp->f_mode & FMODE_READ)
50150- inode->i_pipe->readers++;
50151+ atomic_inc(&inode->i_pipe->readers);
50152 if (filp->f_mode & FMODE_WRITE)
50153- inode->i_pipe->writers++;
50154+ atomic_inc(&inode->i_pipe->writers);
50155 }
50156
50157 mutex_unlock(&inode->i_mutex);
50158@@ -962,7 +962,7 @@ void free_pipe_info(struct inode *inode)
50159 inode->i_pipe = NULL;
50160 }
50161
50162-static struct vfsmount *pipe_mnt __read_mostly;
50163+struct vfsmount *pipe_mnt __read_mostly;
50164
50165 /*
50166 * pipefs_dname() is called from d_path().
50167@@ -992,7 +992,8 @@ static struct inode * get_pipe_inode(void)
50168 goto fail_iput;
50169 inode->i_pipe = pipe;
50170
50171- pipe->readers = pipe->writers = 1;
50172+ atomic_set(&pipe->readers, 1);
50173+ atomic_set(&pipe->writers, 1);
50174 inode->i_fop = &rdwr_pipefifo_fops;
50175
50176 /*
50177diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
50178index 15af622..0e9f4467 100644
50179--- a/fs/proc/Kconfig
50180+++ b/fs/proc/Kconfig
50181@@ -30,12 +30,12 @@ config PROC_FS
50182
50183 config PROC_KCORE
50184 bool "/proc/kcore support" if !ARM
50185- depends on PROC_FS && MMU
50186+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
50187
50188 config PROC_VMCORE
50189 bool "/proc/vmcore support"
50190- depends on PROC_FS && CRASH_DUMP
50191- default y
50192+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
50193+ default n
50194 help
50195 Exports the dump image of crashed kernel in ELF format.
50196
50197@@ -59,8 +59,8 @@ config PROC_SYSCTL
50198 limited in memory.
50199
50200 config PROC_PAGE_MONITOR
50201- default y
50202- depends on PROC_FS && MMU
50203+ default n
50204+ depends on PROC_FS && MMU && !GRKERNSEC
50205 bool "Enable /proc page monitoring" if EXPERT
50206 help
50207 Various /proc files exist to monitor process memory utilization:
50208diff --git a/fs/proc/array.c b/fs/proc/array.c
50209index bd31e02..15cae71 100644
50210--- a/fs/proc/array.c
50211+++ b/fs/proc/array.c
50212@@ -60,6 +60,7 @@
50213 #include <linux/tty.h>
50214 #include <linux/string.h>
50215 #include <linux/mman.h>
50216+#include <linux/grsecurity.h>
50217 #include <linux/proc_fs.h>
50218 #include <linux/ioport.h>
50219 #include <linux/uaccess.h>
50220@@ -346,6 +347,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
50221 seq_putc(m, '\n');
50222 }
50223
50224+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50225+static inline void task_pax(struct seq_file *m, struct task_struct *p)
50226+{
50227+ if (p->mm)
50228+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
50229+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
50230+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
50231+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
50232+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
50233+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
50234+ else
50235+ seq_printf(m, "PaX:\t-----\n");
50236+}
50237+#endif
50238+
50239 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
50240 struct pid *pid, struct task_struct *task)
50241 {
50242@@ -363,9 +379,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
50243 task_cpus_allowed(m, task);
50244 cpuset_task_status_allowed(m, task);
50245 task_context_switch_counts(m, task);
50246+
50247+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50248+ task_pax(m, task);
50249+#endif
50250+
50251+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
50252+ task_grsec_rbac(m, task);
50253+#endif
50254+
50255 return 0;
50256 }
50257
50258+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50259+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
50260+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
50261+ _mm->pax_flags & MF_PAX_SEGMEXEC))
50262+#endif
50263+
50264 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50265 struct pid *pid, struct task_struct *task, int whole)
50266 {
50267@@ -387,6 +418,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50268 char tcomm[sizeof(task->comm)];
50269 unsigned long flags;
50270
50271+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50272+ if (current->exec_id != m->exec_id) {
50273+ gr_log_badprocpid("stat");
50274+ return 0;
50275+ }
50276+#endif
50277+
50278 state = *get_task_state(task);
50279 vsize = eip = esp = 0;
50280 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
50281@@ -458,6 +496,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50282 gtime = task->gtime;
50283 }
50284
50285+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50286+ if (PAX_RAND_FLAGS(mm)) {
50287+ eip = 0;
50288+ esp = 0;
50289+ wchan = 0;
50290+ }
50291+#endif
50292+#ifdef CONFIG_GRKERNSEC_HIDESYM
50293+ wchan = 0;
50294+ eip =0;
50295+ esp =0;
50296+#endif
50297+
50298 /* scale priority and nice values from timeslices to -20..20 */
50299 /* to make it look like a "normal" Unix priority/nice value */
50300 priority = task_prio(task);
50301@@ -494,9 +545,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50302 seq_put_decimal_ull(m, ' ', vsize);
50303 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
50304 seq_put_decimal_ull(m, ' ', rsslim);
50305+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50306+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
50307+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
50308+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
50309+#else
50310 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
50311 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
50312 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
50313+#endif
50314 seq_put_decimal_ull(m, ' ', esp);
50315 seq_put_decimal_ull(m, ' ', eip);
50316 /* The signal information here is obsolete.
50317@@ -518,7 +575,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50318 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
50319 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
50320
50321- if (mm && permitted) {
50322+ if (mm && permitted
50323+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50324+ && !PAX_RAND_FLAGS(mm)
50325+#endif
50326+ ) {
50327 seq_put_decimal_ull(m, ' ', mm->start_data);
50328 seq_put_decimal_ull(m, ' ', mm->end_data);
50329 seq_put_decimal_ull(m, ' ', mm->start_brk);
50330@@ -556,8 +617,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
50331 struct pid *pid, struct task_struct *task)
50332 {
50333 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
50334- struct mm_struct *mm = get_task_mm(task);
50335+ struct mm_struct *mm;
50336
50337+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50338+ if (current->exec_id != m->exec_id) {
50339+ gr_log_badprocpid("statm");
50340+ return 0;
50341+ }
50342+#endif
50343+ mm = get_task_mm(task);
50344 if (mm) {
50345 size = task_statm(mm, &shared, &text, &data, &resident);
50346 mmput(mm);
50347@@ -580,6 +648,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
50348 return 0;
50349 }
50350
50351+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
50352+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
50353+{
50354+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
50355+}
50356+#endif
50357+
50358 #ifdef CONFIG_CHECKPOINT_RESTORE
50359 static struct pid *
50360 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
50361diff --git a/fs/proc/base.c b/fs/proc/base.c
50362index 9e28356..c485b3c 100644
50363--- a/fs/proc/base.c
50364+++ b/fs/proc/base.c
50365@@ -111,6 +111,14 @@ struct pid_entry {
50366 union proc_op op;
50367 };
50368
50369+struct getdents_callback {
50370+ struct linux_dirent __user * current_dir;
50371+ struct linux_dirent __user * previous;
50372+ struct file * file;
50373+ int count;
50374+ int error;
50375+};
50376+
50377 #define NOD(NAME, MODE, IOP, FOP, OP) { \
50378 .name = (NAME), \
50379 .len = sizeof(NAME) - 1, \
50380@@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
50381 if (!mm->arg_end)
50382 goto out_mm; /* Shh! No looking before we're done */
50383
50384+ if (gr_acl_handle_procpidmem(task))
50385+ goto out_mm;
50386+
50387 len = mm->arg_end - mm->arg_start;
50388
50389 if (len > PAGE_SIZE)
50390@@ -235,12 +246,28 @@ out:
50391 return res;
50392 }
50393
50394+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50395+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
50396+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
50397+ _mm->pax_flags & MF_PAX_SEGMEXEC))
50398+#endif
50399+
50400 static int proc_pid_auxv(struct task_struct *task, char *buffer)
50401 {
50402 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
50403 int res = PTR_ERR(mm);
50404 if (mm && !IS_ERR(mm)) {
50405 unsigned int nwords = 0;
50406+
50407+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50408+ /* allow if we're currently ptracing this task */
50409+ if (PAX_RAND_FLAGS(mm) &&
50410+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
50411+ mmput(mm);
50412+ return 0;
50413+ }
50414+#endif
50415+
50416 do {
50417 nwords += 2;
50418 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
50419@@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
50420 }
50421
50422
50423-#ifdef CONFIG_KALLSYMS
50424+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50425 /*
50426 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
50427 * Returns the resolved symbol. If that fails, simply return the address.
50428@@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
50429 mutex_unlock(&task->signal->cred_guard_mutex);
50430 }
50431
50432-#ifdef CONFIG_STACKTRACE
50433+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50434
50435 #define MAX_STACK_TRACE_DEPTH 64
50436
50437@@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
50438 return count;
50439 }
50440
50441-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
50442+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50443 static int proc_pid_syscall(struct task_struct *task, char *buffer)
50444 {
50445 long nr;
50446@@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
50447 /************************************************************************/
50448
50449 /* permission checks */
50450-static int proc_fd_access_allowed(struct inode *inode)
50451+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
50452 {
50453 struct task_struct *task;
50454 int allowed = 0;
50455@@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
50456 */
50457 task = get_proc_task(inode);
50458 if (task) {
50459- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
50460+ if (log)
50461+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
50462+ else
50463+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
50464 put_task_struct(task);
50465 }
50466 return allowed;
50467@@ -562,10 +592,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
50468 struct task_struct *task,
50469 int hide_pid_min)
50470 {
50471+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50472+ return false;
50473+
50474+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50475+ rcu_read_lock();
50476+ {
50477+ const struct cred *tmpcred = current_cred();
50478+ const struct cred *cred = __task_cred(task);
50479+
50480+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
50481+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50482+ || in_group_p(grsec_proc_gid)
50483+#endif
50484+ ) {
50485+ rcu_read_unlock();
50486+ return true;
50487+ }
50488+ }
50489+ rcu_read_unlock();
50490+
50491+ if (!pid->hide_pid)
50492+ return false;
50493+#endif
50494+
50495 if (pid->hide_pid < hide_pid_min)
50496 return true;
50497 if (in_group_p(pid->pid_gid))
50498 return true;
50499+
50500 return ptrace_may_access(task, PTRACE_MODE_READ);
50501 }
50502
50503@@ -583,7 +638,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
50504 put_task_struct(task);
50505
50506 if (!has_perms) {
50507+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50508+ {
50509+#else
50510 if (pid->hide_pid == 2) {
50511+#endif
50512 /*
50513 * Let's make getdents(), stat(), and open()
50514 * consistent with each other. If a process
50515@@ -681,6 +740,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
50516 if (!task)
50517 return -ESRCH;
50518
50519+ if (gr_acl_handle_procpidmem(task)) {
50520+ put_task_struct(task);
50521+ return -EPERM;
50522+ }
50523+
50524 mm = mm_access(task, mode);
50525 put_task_struct(task);
50526
50527@@ -696,6 +760,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
50528
50529 file->private_data = mm;
50530
50531+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50532+ file->f_version = current->exec_id;
50533+#endif
50534+
50535 return 0;
50536 }
50537
50538@@ -717,6 +785,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
50539 ssize_t copied;
50540 char *page;
50541
50542+#ifdef CONFIG_GRKERNSEC
50543+ if (write)
50544+ return -EPERM;
50545+#endif
50546+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50547+ if (file->f_version != current->exec_id) {
50548+ gr_log_badprocpid("mem");
50549+ return 0;
50550+ }
50551+#endif
50552+
50553 if (!mm)
50554 return 0;
50555
50556@@ -821,6 +900,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
50557 if (!mm)
50558 return 0;
50559
50560+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50561+ if (file->f_version != current->exec_id) {
50562+ gr_log_badprocpid("environ");
50563+ return 0;
50564+ }
50565+#endif
50566+
50567 page = (char *)__get_free_page(GFP_TEMPORARY);
50568 if (!page)
50569 return -ENOMEM;
50570@@ -1436,7 +1522,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
50571 int error = -EACCES;
50572
50573 /* Are we allowed to snoop on the tasks file descriptors? */
50574- if (!proc_fd_access_allowed(inode))
50575+ if (!proc_fd_access_allowed(inode, 0))
50576 goto out;
50577
50578 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
50579@@ -1480,8 +1566,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
50580 struct path path;
50581
50582 /* Are we allowed to snoop on the tasks file descriptors? */
50583- if (!proc_fd_access_allowed(inode))
50584- goto out;
50585+ /* logging this is needed for learning on chromium to work properly,
50586+ but we don't want to flood the logs from 'ps' which does a readlink
50587+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
50588+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
50589+ */
50590+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
50591+ if (!proc_fd_access_allowed(inode,0))
50592+ goto out;
50593+ } else {
50594+ if (!proc_fd_access_allowed(inode,1))
50595+ goto out;
50596+ }
50597
50598 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
50599 if (error)
50600@@ -1531,7 +1627,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
50601 rcu_read_lock();
50602 cred = __task_cred(task);
50603 inode->i_uid = cred->euid;
50604+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50605+ inode->i_gid = grsec_proc_gid;
50606+#else
50607 inode->i_gid = cred->egid;
50608+#endif
50609 rcu_read_unlock();
50610 }
50611 security_task_to_inode(task, inode);
50612@@ -1567,10 +1667,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
50613 return -ENOENT;
50614 }
50615 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
50616+#ifdef CONFIG_GRKERNSEC_PROC_USER
50617+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
50618+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50619+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
50620+#endif
50621 task_dumpable(task)) {
50622 cred = __task_cred(task);
50623 stat->uid = cred->euid;
50624+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50625+ stat->gid = grsec_proc_gid;
50626+#else
50627 stat->gid = cred->egid;
50628+#endif
50629 }
50630 }
50631 rcu_read_unlock();
50632@@ -1608,11 +1717,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
50633
50634 if (task) {
50635 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
50636+#ifdef CONFIG_GRKERNSEC_PROC_USER
50637+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
50638+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50639+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
50640+#endif
50641 task_dumpable(task)) {
50642 rcu_read_lock();
50643 cred = __task_cred(task);
50644 inode->i_uid = cred->euid;
50645+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50646+ inode->i_gid = grsec_proc_gid;
50647+#else
50648 inode->i_gid = cred->egid;
50649+#endif
50650 rcu_read_unlock();
50651 } else {
50652 inode->i_uid = GLOBAL_ROOT_UID;
50653@@ -2065,6 +2183,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
50654 if (!task)
50655 goto out_no_task;
50656
50657+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50658+ goto out;
50659+
50660 /*
50661 * Yes, it does not scale. And it should not. Don't add
50662 * new entries into /proc/<tgid>/ without very good reasons.
50663@@ -2109,6 +2230,9 @@ static int proc_pident_readdir(struct file *filp,
50664 if (!task)
50665 goto out_no_task;
50666
50667+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50668+ goto out;
50669+
50670 ret = 0;
50671 i = filp->f_pos;
50672 switch (i) {
50673@@ -2380,7 +2504,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
50674 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
50675 void *cookie)
50676 {
50677- char *s = nd_get_link(nd);
50678+ const char *s = nd_get_link(nd);
50679 if (!IS_ERR(s))
50680 kfree(s);
50681 }
50682@@ -2662,7 +2786,7 @@ static const struct pid_entry tgid_base_stuff[] = {
50683 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
50684 #endif
50685 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
50686-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
50687+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50688 INF("syscall", S_IRUGO, proc_pid_syscall),
50689 #endif
50690 INF("cmdline", S_IRUGO, proc_pid_cmdline),
50691@@ -2687,10 +2811,10 @@ static const struct pid_entry tgid_base_stuff[] = {
50692 #ifdef CONFIG_SECURITY
50693 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
50694 #endif
50695-#ifdef CONFIG_KALLSYMS
50696+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50697 INF("wchan", S_IRUGO, proc_pid_wchan),
50698 #endif
50699-#ifdef CONFIG_STACKTRACE
50700+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50701 ONE("stack", S_IRUGO, proc_pid_stack),
50702 #endif
50703 #ifdef CONFIG_SCHEDSTATS
50704@@ -2724,6 +2848,9 @@ static const struct pid_entry tgid_base_stuff[] = {
50705 #ifdef CONFIG_HARDWALL
50706 INF("hardwall", S_IRUGO, proc_pid_hardwall),
50707 #endif
50708+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
50709+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
50710+#endif
50711 #ifdef CONFIG_USER_NS
50712 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
50713 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
50714@@ -2856,7 +2983,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
50715 if (!inode)
50716 goto out;
50717
50718+#ifdef CONFIG_GRKERNSEC_PROC_USER
50719+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
50720+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50721+ inode->i_gid = grsec_proc_gid;
50722+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
50723+#else
50724 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
50725+#endif
50726 inode->i_op = &proc_tgid_base_inode_operations;
50727 inode->i_fop = &proc_tgid_base_operations;
50728 inode->i_flags|=S_IMMUTABLE;
50729@@ -2898,7 +3032,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
50730 if (!task)
50731 goto out;
50732
50733+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50734+ goto out_put_task;
50735+
50736 result = proc_pid_instantiate(dir, dentry, task, NULL);
50737+out_put_task:
50738 put_task_struct(task);
50739 out:
50740 return result;
50741@@ -2961,6 +3099,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
50742 static int fake_filldir(void *buf, const char *name, int namelen,
50743 loff_t offset, u64 ino, unsigned d_type)
50744 {
50745+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
50746+ __buf->error = -EINVAL;
50747 return 0;
50748 }
50749
50750@@ -3027,7 +3167,7 @@ static const struct pid_entry tid_base_stuff[] = {
50751 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
50752 #endif
50753 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
50754-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
50755+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50756 INF("syscall", S_IRUGO, proc_pid_syscall),
50757 #endif
50758 INF("cmdline", S_IRUGO, proc_pid_cmdline),
50759@@ -3054,10 +3194,10 @@ static const struct pid_entry tid_base_stuff[] = {
50760 #ifdef CONFIG_SECURITY
50761 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
50762 #endif
50763-#ifdef CONFIG_KALLSYMS
50764+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50765 INF("wchan", S_IRUGO, proc_pid_wchan),
50766 #endif
50767-#ifdef CONFIG_STACKTRACE
50768+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50769 ONE("stack", S_IRUGO, proc_pid_stack),
50770 #endif
50771 #ifdef CONFIG_SCHEDSTATS
50772diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
50773index 82676e3..5f8518a 100644
50774--- a/fs/proc/cmdline.c
50775+++ b/fs/proc/cmdline.c
50776@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
50777
50778 static int __init proc_cmdline_init(void)
50779 {
50780+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50781+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
50782+#else
50783 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
50784+#endif
50785 return 0;
50786 }
50787 module_init(proc_cmdline_init);
50788diff --git a/fs/proc/devices.c b/fs/proc/devices.c
50789index b143471..bb105e5 100644
50790--- a/fs/proc/devices.c
50791+++ b/fs/proc/devices.c
50792@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
50793
50794 static int __init proc_devices_init(void)
50795 {
50796+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50797+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
50798+#else
50799 proc_create("devices", 0, NULL, &proc_devinfo_operations);
50800+#endif
50801 return 0;
50802 }
50803 module_init(proc_devices_init);
50804diff --git a/fs/proc/fd.c b/fs/proc/fd.c
50805index f28a875..c467953 100644
50806--- a/fs/proc/fd.c
50807+++ b/fs/proc/fd.c
50808@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
50809 if (!task)
50810 return -ENOENT;
50811
50812- files = get_files_struct(task);
50813+ if (!gr_acl_handle_procpidmem(task))
50814+ files = get_files_struct(task);
50815 put_task_struct(task);
50816
50817 if (files) {
50818@@ -300,11 +301,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
50819 */
50820 int proc_fd_permission(struct inode *inode, int mask)
50821 {
50822+ struct task_struct *task;
50823 int rv = generic_permission(inode, mask);
50824- if (rv == 0)
50825- return 0;
50826+
50827 if (task_pid(current) == proc_pid(inode))
50828 rv = 0;
50829+
50830+ task = get_proc_task(inode);
50831+ if (task == NULL)
50832+ return rv;
50833+
50834+ if (gr_acl_handle_procpidmem(task))
50835+ rv = -EACCES;
50836+
50837+ put_task_struct(task);
50838+
50839 return rv;
50840 }
50841
50842diff --git a/fs/proc/inode.c b/fs/proc/inode.c
50843index 3b22bbd..895b58c 100644
50844--- a/fs/proc/inode.c
50845+++ b/fs/proc/inode.c
50846@@ -21,11 +21,17 @@
50847 #include <linux/seq_file.h>
50848 #include <linux/slab.h>
50849 #include <linux/mount.h>
50850+#include <linux/grsecurity.h>
50851
50852 #include <asm/uaccess.h>
50853
50854 #include "internal.h"
50855
50856+#ifdef CONFIG_PROC_SYSCTL
50857+extern const struct inode_operations proc_sys_inode_operations;
50858+extern const struct inode_operations proc_sys_dir_operations;
50859+#endif
50860+
50861 static void proc_evict_inode(struct inode *inode)
50862 {
50863 struct proc_dir_entry *de;
50864@@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
50865 ns_ops = PROC_I(inode)->ns_ops;
50866 if (ns_ops && ns_ops->put)
50867 ns_ops->put(PROC_I(inode)->ns);
50868+
50869+#ifdef CONFIG_PROC_SYSCTL
50870+ if (inode->i_op == &proc_sys_inode_operations ||
50871+ inode->i_op == &proc_sys_dir_operations)
50872+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
50873+#endif
50874+
50875 }
50876
50877 static struct kmem_cache * proc_inode_cachep;
50878@@ -455,7 +468,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
50879 if (de->mode) {
50880 inode->i_mode = de->mode;
50881 inode->i_uid = de->uid;
50882+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50883+ inode->i_gid = grsec_proc_gid;
50884+#else
50885 inode->i_gid = de->gid;
50886+#endif
50887 }
50888 if (de->size)
50889 inode->i_size = de->size;
50890diff --git a/fs/proc/internal.h b/fs/proc/internal.h
50891index 43973b0..a20e704 100644
50892--- a/fs/proc/internal.h
50893+++ b/fs/proc/internal.h
50894@@ -54,6 +54,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
50895 struct pid *pid, struct task_struct *task);
50896 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
50897 struct pid *pid, struct task_struct *task);
50898+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
50899+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
50900+#endif
50901 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
50902
50903 extern const struct file_operations proc_tid_children_operations;
50904diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
50905index 86c67ee..cdca321 100644
50906--- a/fs/proc/kcore.c
50907+++ b/fs/proc/kcore.c
50908@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
50909 * the addresses in the elf_phdr on our list.
50910 */
50911 start = kc_offset_to_vaddr(*fpos - elf_buflen);
50912- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
50913+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
50914+ if (tsz > buflen)
50915 tsz = buflen;
50916-
50917+
50918 while (buflen) {
50919 struct kcore_list *m;
50920
50921@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
50922 kfree(elf_buf);
50923 } else {
50924 if (kern_addr_valid(start)) {
50925- unsigned long n;
50926+ char *elf_buf;
50927+ mm_segment_t oldfs;
50928
50929- n = copy_to_user(buffer, (char *)start, tsz);
50930- /*
50931- * We cannot distinguish between fault on source
50932- * and fault on destination. When this happens
50933- * we clear too and hope it will trigger the
50934- * EFAULT again.
50935- */
50936- if (n) {
50937- if (clear_user(buffer + tsz - n,
50938- n))
50939+ elf_buf = kmalloc(tsz, GFP_KERNEL);
50940+ if (!elf_buf)
50941+ return -ENOMEM;
50942+ oldfs = get_fs();
50943+ set_fs(KERNEL_DS);
50944+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
50945+ set_fs(oldfs);
50946+ if (copy_to_user(buffer, elf_buf, tsz)) {
50947+ kfree(elf_buf);
50948 return -EFAULT;
50949+ }
50950 }
50951+ set_fs(oldfs);
50952+ kfree(elf_buf);
50953 } else {
50954 if (clear_user(buffer, tsz))
50955 return -EFAULT;
50956@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
50957
50958 static int open_kcore(struct inode *inode, struct file *filp)
50959 {
50960+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
50961+ return -EPERM;
50962+#endif
50963 if (!capable(CAP_SYS_RAWIO))
50964 return -EPERM;
50965 if (kcore_need_update)
50966diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
50967index 80e4645..53e5fcf 100644
50968--- a/fs/proc/meminfo.c
50969+++ b/fs/proc/meminfo.c
50970@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
50971 vmi.used >> 10,
50972 vmi.largest_chunk >> 10
50973 #ifdef CONFIG_MEMORY_FAILURE
50974- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
50975+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
50976 #endif
50977 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
50978 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
50979diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
50980index b1822dd..df622cb 100644
50981--- a/fs/proc/nommu.c
50982+++ b/fs/proc/nommu.c
50983@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
50984 if (len < 1)
50985 len = 1;
50986 seq_printf(m, "%*c", len, ' ');
50987- seq_path(m, &file->f_path, "");
50988+ seq_path(m, &file->f_path, "\n\\");
50989 }
50990
50991 seq_putc(m, '\n');
50992diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
50993index fe72cd0..cb9b67d 100644
50994--- a/fs/proc/proc_net.c
50995+++ b/fs/proc/proc_net.c
50996@@ -23,6 +23,7 @@
50997 #include <linux/nsproxy.h>
50998 #include <net/net_namespace.h>
50999 #include <linux/seq_file.h>
51000+#include <linux/grsecurity.h>
51001
51002 #include "internal.h"
51003
51004@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
51005 struct task_struct *task;
51006 struct nsproxy *ns;
51007 struct net *net = NULL;
51008+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51009+ const struct cred *cred = current_cred();
51010+#endif
51011+
51012+#ifdef CONFIG_GRKERNSEC_PROC_USER
51013+ if (cred->fsuid)
51014+ return net;
51015+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51016+ if (cred->fsuid && !in_group_p(grsec_proc_gid))
51017+ return net;
51018+#endif
51019
51020 rcu_read_lock();
51021 task = pid_task(proc_pid(dir), PIDTYPE_PID);
51022diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
51023index a781bdf..6665284 100644
51024--- a/fs/proc/proc_sysctl.c
51025+++ b/fs/proc/proc_sysctl.c
51026@@ -12,11 +12,15 @@
51027 #include <linux/module.h>
51028 #include "internal.h"
51029
51030+extern int gr_handle_chroot_sysctl(const int op);
51031+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
51032+ const int op);
51033+
51034 static const struct dentry_operations proc_sys_dentry_operations;
51035 static const struct file_operations proc_sys_file_operations;
51036-static const struct inode_operations proc_sys_inode_operations;
51037+const struct inode_operations proc_sys_inode_operations;
51038 static const struct file_operations proc_sys_dir_file_operations;
51039-static const struct inode_operations proc_sys_dir_operations;
51040+const struct inode_operations proc_sys_dir_operations;
51041
51042 void proc_sys_poll_notify(struct ctl_table_poll *poll)
51043 {
51044@@ -465,6 +469,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
51045
51046 err = NULL;
51047 d_set_d_op(dentry, &proc_sys_dentry_operations);
51048+
51049+ gr_handle_proc_create(dentry, inode);
51050+
51051 d_add(dentry, inode);
51052
51053 out:
51054@@ -480,18 +487,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
51055 struct inode *inode = filp->f_path.dentry->d_inode;
51056 struct ctl_table_header *head = grab_header(inode);
51057 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
51058+ int op = write ? MAY_WRITE : MAY_READ;
51059 ssize_t error;
51060 size_t res;
51061
51062 if (IS_ERR(head))
51063 return PTR_ERR(head);
51064
51065+
51066 /*
51067 * At this point we know that the sysctl was not unregistered
51068 * and won't be until we finish.
51069 */
51070 error = -EPERM;
51071- if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
51072+ if (sysctl_perm(head->root, table, op))
51073 goto out;
51074
51075 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
51076@@ -499,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
51077 if (!table->proc_handler)
51078 goto out;
51079
51080+#ifdef CONFIG_GRKERNSEC
51081+ error = -EPERM;
51082+ if (gr_handle_chroot_sysctl(op))
51083+ goto out;
51084+ dget(filp->f_path.dentry);
51085+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
51086+ dput(filp->f_path.dentry);
51087+ goto out;
51088+ }
51089+ dput(filp->f_path.dentry);
51090+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
51091+ goto out;
51092+ if (write && !capable(CAP_SYS_ADMIN))
51093+ goto out;
51094+#endif
51095+
51096 /* careful: calling conventions are nasty here */
51097 res = count;
51098 error = table->proc_handler(table, write, buf, &res, ppos);
51099@@ -596,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
51100 return -ENOMEM;
51101 } else {
51102 d_set_d_op(child, &proc_sys_dentry_operations);
51103+
51104+ gr_handle_proc_create(child, inode);
51105+
51106 d_add(child, inode);
51107 }
51108 } else {
51109@@ -639,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
51110 if ((*pos)++ < file->f_pos)
51111 return 0;
51112
51113+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
51114+ return 0;
51115+
51116 if (unlikely(S_ISLNK(table->mode)))
51117 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
51118 else
51119@@ -756,6 +787,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
51120 if (IS_ERR(head))
51121 return PTR_ERR(head);
51122
51123+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
51124+ return -ENOENT;
51125+
51126 generic_fillattr(inode, stat);
51127 if (table)
51128 stat->mode = (stat->mode & S_IFMT) | table->mode;
51129@@ -778,13 +812,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
51130 .llseek = generic_file_llseek,
51131 };
51132
51133-static const struct inode_operations proc_sys_inode_operations = {
51134+const struct inode_operations proc_sys_inode_operations = {
51135 .permission = proc_sys_permission,
51136 .setattr = proc_sys_setattr,
51137 .getattr = proc_sys_getattr,
51138 };
51139
51140-static const struct inode_operations proc_sys_dir_operations = {
51141+const struct inode_operations proc_sys_dir_operations = {
51142 .lookup = proc_sys_lookup,
51143 .permission = proc_sys_permission,
51144 .setattr = proc_sys_setattr,
51145diff --git a/fs/proc/root.c b/fs/proc/root.c
51146index 9889a92..2613b48 100644
51147--- a/fs/proc/root.c
51148+++ b/fs/proc/root.c
51149@@ -187,7 +187,15 @@ void __init proc_root_init(void)
51150 #ifdef CONFIG_PROC_DEVICETREE
51151 proc_device_tree_init();
51152 #endif
51153+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51154+#ifdef CONFIG_GRKERNSEC_PROC_USER
51155+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
51156+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51157+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51158+#endif
51159+#else
51160 proc_mkdir("bus", NULL);
51161+#endif
51162 proc_sys_init();
51163 }
51164
51165diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
51166index 90c63f9..e662cfc 100644
51167--- a/fs/proc/task_mmu.c
51168+++ b/fs/proc/task_mmu.c
51169@@ -11,12 +11,19 @@
51170 #include <linux/rmap.h>
51171 #include <linux/swap.h>
51172 #include <linux/swapops.h>
51173+#include <linux/grsecurity.h>
51174
51175 #include <asm/elf.h>
51176 #include <asm/uaccess.h>
51177 #include <asm/tlbflush.h>
51178 #include "internal.h"
51179
51180+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51181+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
51182+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
51183+ _mm->pax_flags & MF_PAX_SEGMEXEC))
51184+#endif
51185+
51186 void task_mem(struct seq_file *m, struct mm_struct *mm)
51187 {
51188 unsigned long data, text, lib, swap;
51189@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51190 "VmExe:\t%8lu kB\n"
51191 "VmLib:\t%8lu kB\n"
51192 "VmPTE:\t%8lu kB\n"
51193- "VmSwap:\t%8lu kB\n",
51194- hiwater_vm << (PAGE_SHIFT-10),
51195+ "VmSwap:\t%8lu kB\n"
51196+
51197+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
51198+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
51199+#endif
51200+
51201+ ,hiwater_vm << (PAGE_SHIFT-10),
51202 total_vm << (PAGE_SHIFT-10),
51203 mm->locked_vm << (PAGE_SHIFT-10),
51204 mm->pinned_vm << (PAGE_SHIFT-10),
51205@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51206 data << (PAGE_SHIFT-10),
51207 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
51208 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
51209- swap << (PAGE_SHIFT-10));
51210+ swap << (PAGE_SHIFT-10)
51211+
51212+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
51213+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51214+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
51215+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
51216+#else
51217+ , mm->context.user_cs_base
51218+ , mm->context.user_cs_limit
51219+#endif
51220+#endif
51221+
51222+ );
51223 }
51224
51225 unsigned long task_vsize(struct mm_struct *mm)
51226@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51227 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
51228 }
51229
51230- /* We don't show the stack guard page in /proc/maps */
51231+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51232+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
51233+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
51234+#else
51235 start = vma->vm_start;
51236- if (stack_guard_page_start(vma, start))
51237- start += PAGE_SIZE;
51238 end = vma->vm_end;
51239- if (stack_guard_page_end(vma, end))
51240- end -= PAGE_SIZE;
51241+#endif
51242
51243 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
51244 start,
51245@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51246 flags & VM_WRITE ? 'w' : '-',
51247 flags & VM_EXEC ? 'x' : '-',
51248 flags & VM_MAYSHARE ? 's' : 'p',
51249+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51250+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
51251+#else
51252 pgoff,
51253+#endif
51254 MAJOR(dev), MINOR(dev), ino, &len);
51255
51256 /*
51257@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51258 */
51259 if (file) {
51260 pad_len_spaces(m, len);
51261- seq_path(m, &file->f_path, "\n");
51262+ seq_path(m, &file->f_path, "\n\\");
51263 goto done;
51264 }
51265
51266@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51267 * Thread stack in /proc/PID/task/TID/maps or
51268 * the main process stack.
51269 */
51270- if (!is_pid || (vma->vm_start <= mm->start_stack &&
51271- vma->vm_end >= mm->start_stack)) {
51272+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
51273+ (vma->vm_start <= mm->start_stack &&
51274+ vma->vm_end >= mm->start_stack)) {
51275 name = "[stack]";
51276 } else {
51277 /* Thread stack in /proc/PID/maps */
51278@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
51279 struct proc_maps_private *priv = m->private;
51280 struct task_struct *task = priv->task;
51281
51282+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51283+ if (current->exec_id != m->exec_id) {
51284+ gr_log_badprocpid("maps");
51285+ return 0;
51286+ }
51287+#endif
51288+
51289 show_map_vma(m, vma, is_pid);
51290
51291 if (m->count < m->size) /* vma is copied successfully */
51292@@ -538,12 +574,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
51293 .private = &mss,
51294 };
51295
51296+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51297+ if (current->exec_id != m->exec_id) {
51298+ gr_log_badprocpid("smaps");
51299+ return 0;
51300+ }
51301+#endif
51302 memset(&mss, 0, sizeof mss);
51303- mss.vma = vma;
51304- /* mmap_sem is held in m_start */
51305- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
51306- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
51307-
51308+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51309+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
51310+#endif
51311+ mss.vma = vma;
51312+ /* mmap_sem is held in m_start */
51313+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
51314+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
51315+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51316+ }
51317+#endif
51318 show_map_vma(m, vma, is_pid);
51319
51320 seq_printf(m,
51321@@ -561,7 +608,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
51322 "KernelPageSize: %8lu kB\n"
51323 "MMUPageSize: %8lu kB\n"
51324 "Locked: %8lu kB\n",
51325+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51326+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
51327+#else
51328 (vma->vm_end - vma->vm_start) >> 10,
51329+#endif
51330 mss.resident >> 10,
51331 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
51332 mss.shared_clean >> 10,
51333@@ -1211,6 +1262,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
51334 int n;
51335 char buffer[50];
51336
51337+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51338+ if (current->exec_id != m->exec_id) {
51339+ gr_log_badprocpid("numa_maps");
51340+ return 0;
51341+ }
51342+#endif
51343+
51344 if (!mm)
51345 return 0;
51346
51347@@ -1228,11 +1286,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
51348 mpol_to_str(buffer, sizeof(buffer), pol, 0);
51349 mpol_cond_put(pol);
51350
51351+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51352+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
51353+#else
51354 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
51355+#endif
51356
51357 if (file) {
51358 seq_printf(m, " file=");
51359- seq_path(m, &file->f_path, "\n\t= ");
51360+ seq_path(m, &file->f_path, "\n\t\\= ");
51361 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
51362 seq_printf(m, " heap");
51363 } else {
51364diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
51365index 1ccfa53..0848f95 100644
51366--- a/fs/proc/task_nommu.c
51367+++ b/fs/proc/task_nommu.c
51368@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51369 else
51370 bytes += kobjsize(mm);
51371
51372- if (current->fs && current->fs->users > 1)
51373+ if (current->fs && atomic_read(&current->fs->users) > 1)
51374 sbytes += kobjsize(current->fs);
51375 else
51376 bytes += kobjsize(current->fs);
51377@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
51378
51379 if (file) {
51380 pad_len_spaces(m, len);
51381- seq_path(m, &file->f_path, "");
51382+ seq_path(m, &file->f_path, "\n\\");
51383 } else if (mm) {
51384 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
51385
51386diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
51387index 2d57e1a..43b1280 100644
51388--- a/fs/pstore/ftrace.c
51389+++ b/fs/pstore/ftrace.c
51390@@ -28,7 +28,9 @@
51391 #include "internal.h"
51392
51393 static void notrace pstore_ftrace_call(unsigned long ip,
51394- unsigned long parent_ip)
51395+ unsigned long parent_ip,
51396+ struct ftrace_ops *op,
51397+ struct pt_regs *regs)
51398 {
51399 unsigned long flags;
51400 struct pstore_ftrace_record rec = {};
51401diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
51402index 16e8abb..2dcf914 100644
51403--- a/fs/quota/netlink.c
51404+++ b/fs/quota/netlink.c
51405@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
51406 void quota_send_warning(struct kqid qid, dev_t dev,
51407 const char warntype)
51408 {
51409- static atomic_t seq;
51410+ static atomic_unchecked_t seq;
51411 struct sk_buff *skb;
51412 void *msg_head;
51413 int ret;
51414@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
51415 "VFS: Not enough memory to send quota warning.\n");
51416 return;
51417 }
51418- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
51419+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
51420 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
51421 if (!msg_head) {
51422 printk(KERN_ERR
51423diff --git a/fs/read_write.c b/fs/read_write.c
51424index d065348..8e2b43d 100644
51425--- a/fs/read_write.c
51426+++ b/fs/read_write.c
51427@@ -935,6 +935,8 @@ ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
51428 if (retval > 0) {
51429 add_rchar(current, retval);
51430 add_wchar(current, retval);
51431+ fsnotify_access(in.file);
51432+ fsnotify_modify(out.file);
51433 }
51434
51435 inc_syscr(current);
51436diff --git a/fs/readdir.c b/fs/readdir.c
51437index 5e69ef5..e5d9099 100644
51438--- a/fs/readdir.c
51439+++ b/fs/readdir.c
51440@@ -17,6 +17,7 @@
51441 #include <linux/security.h>
51442 #include <linux/syscalls.h>
51443 #include <linux/unistd.h>
51444+#include <linux/namei.h>
51445
51446 #include <asm/uaccess.h>
51447
51448@@ -67,6 +68,7 @@ struct old_linux_dirent {
51449
51450 struct readdir_callback {
51451 struct old_linux_dirent __user * dirent;
51452+ struct file * file;
51453 int result;
51454 };
51455
51456@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
51457 buf->result = -EOVERFLOW;
51458 return -EOVERFLOW;
51459 }
51460+
51461+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51462+ return 0;
51463+
51464 buf->result++;
51465 dirent = buf->dirent;
51466 if (!access_ok(VERIFY_WRITE, dirent,
51467@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
51468
51469 buf.result = 0;
51470 buf.dirent = dirent;
51471+ buf.file = f.file;
51472
51473 error = vfs_readdir(f.file, fillonedir, &buf);
51474 if (buf.result)
51475@@ -139,6 +146,7 @@ struct linux_dirent {
51476 struct getdents_callback {
51477 struct linux_dirent __user * current_dir;
51478 struct linux_dirent __user * previous;
51479+ struct file * file;
51480 int count;
51481 int error;
51482 };
51483@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
51484 buf->error = -EOVERFLOW;
51485 return -EOVERFLOW;
51486 }
51487+
51488+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51489+ return 0;
51490+
51491 dirent = buf->previous;
51492 if (dirent) {
51493 if (__put_user(offset, &dirent->d_off))
51494@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
51495 buf.previous = NULL;
51496 buf.count = count;
51497 buf.error = 0;
51498+ buf.file = f.file;
51499
51500 error = vfs_readdir(f.file, filldir, &buf);
51501 if (error >= 0)
51502@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
51503 struct getdents_callback64 {
51504 struct linux_dirent64 __user * current_dir;
51505 struct linux_dirent64 __user * previous;
51506+ struct file *file;
51507 int count;
51508 int error;
51509 };
51510@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
51511 buf->error = -EINVAL; /* only used if we fail.. */
51512 if (reclen > buf->count)
51513 return -EINVAL;
51514+
51515+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51516+ return 0;
51517+
51518 dirent = buf->previous;
51519 if (dirent) {
51520 if (__put_user(offset, &dirent->d_off))
51521@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
51522
51523 buf.current_dir = dirent;
51524 buf.previous = NULL;
51525+ buf.file = f.file;
51526 buf.count = count;
51527 buf.error = 0;
51528
51529@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
51530 error = buf.error;
51531 lastdirent = buf.previous;
51532 if (lastdirent) {
51533- typeof(lastdirent->d_off) d_off = f.file->f_pos;
51534+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
51535 if (__put_user(d_off, &lastdirent->d_off))
51536 error = -EFAULT;
51537 else
51538diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
51539index 2b7882b..1c5ef48 100644
51540--- a/fs/reiserfs/do_balan.c
51541+++ b/fs/reiserfs/do_balan.c
51542@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
51543 return;
51544 }
51545
51546- atomic_inc(&(fs_generation(tb->tb_sb)));
51547+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
51548 do_balance_starts(tb);
51549
51550 /* balance leaf returns 0 except if combining L R and S into
51551diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
51552index e60e870..f40ac16 100644
51553--- a/fs/reiserfs/procfs.c
51554+++ b/fs/reiserfs/procfs.c
51555@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
51556 "SMALL_TAILS " : "NO_TAILS ",
51557 replay_only(sb) ? "REPLAY_ONLY " : "",
51558 convert_reiserfs(sb) ? "CONV " : "",
51559- atomic_read(&r->s_generation_counter),
51560+ atomic_read_unchecked(&r->s_generation_counter),
51561 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
51562 SF(s_do_balance), SF(s_unneeded_left_neighbor),
51563 SF(s_good_search_by_key_reada), SF(s_bmaps),
51564diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
51565index 33215f5..c5d427a 100644
51566--- a/fs/reiserfs/reiserfs.h
51567+++ b/fs/reiserfs/reiserfs.h
51568@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
51569 /* Comment? -Hans */
51570 wait_queue_head_t s_wait;
51571 /* To be obsoleted soon by per buffer seals.. -Hans */
51572- atomic_t s_generation_counter; // increased by one every time the
51573+ atomic_unchecked_t s_generation_counter; // increased by one every time the
51574 // tree gets re-balanced
51575 unsigned long s_properties; /* File system properties. Currently holds
51576 on-disk FS format */
51577@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
51578 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
51579
51580 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
51581-#define get_generation(s) atomic_read (&fs_generation(s))
51582+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
51583 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
51584 #define __fs_changed(gen,s) (gen != get_generation (s))
51585 #define fs_changed(gen,s) \
51586diff --git a/fs/select.c b/fs/select.c
51587index 2ef72d9..f213b17 100644
51588--- a/fs/select.c
51589+++ b/fs/select.c
51590@@ -20,6 +20,7 @@
51591 #include <linux/export.h>
51592 #include <linux/slab.h>
51593 #include <linux/poll.h>
51594+#include <linux/security.h>
51595 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
51596 #include <linux/file.h>
51597 #include <linux/fdtable.h>
51598@@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
51599 struct poll_list *walk = head;
51600 unsigned long todo = nfds;
51601
51602+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
51603 if (nfds > rlimit(RLIMIT_NOFILE))
51604 return -EINVAL;
51605
51606diff --git a/fs/seq_file.c b/fs/seq_file.c
51607index 99dffab..e4fcb71 100644
51608--- a/fs/seq_file.c
51609+++ b/fs/seq_file.c
51610@@ -10,6 +10,7 @@
51611 #include <linux/seq_file.h>
51612 #include <linux/slab.h>
51613 #include <linux/cred.h>
51614+#include <linux/sched.h>
51615
51616 #include <asm/uaccess.h>
51617 #include <asm/page.h>
51618@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
51619 #ifdef CONFIG_USER_NS
51620 p->user_ns = file->f_cred->user_ns;
51621 #endif
51622+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51623+ p->exec_id = current->exec_id;
51624+#endif
51625
51626 /*
51627 * Wrappers around seq_open(e.g. swaps_open) need to be
51628@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
51629 return 0;
51630 }
51631 if (!m->buf) {
51632- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
51633+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
51634 if (!m->buf)
51635 return -ENOMEM;
51636 }
51637@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
51638 Eoverflow:
51639 m->op->stop(m, p);
51640 kfree(m->buf);
51641- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
51642+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
51643 return !m->buf ? -ENOMEM : -EAGAIN;
51644 }
51645
51646@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
51647
51648 /* grab buffer if we didn't have one */
51649 if (!m->buf) {
51650- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
51651+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
51652 if (!m->buf)
51653 goto Enomem;
51654 }
51655@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
51656 goto Fill;
51657 m->op->stop(m, p);
51658 kfree(m->buf);
51659- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
51660+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
51661 if (!m->buf)
51662 goto Enomem;
51663 m->count = 0;
51664@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
51665 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
51666 void *data)
51667 {
51668- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
51669+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
51670 int res = -ENOMEM;
51671
51672 if (op) {
51673diff --git a/fs/splice.c b/fs/splice.c
51674index 48c7bd1..d0740e4 100644
51675--- a/fs/splice.c
51676+++ b/fs/splice.c
51677@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
51678 pipe_lock(pipe);
51679
51680 for (;;) {
51681- if (!pipe->readers) {
51682+ if (!atomic_read(&pipe->readers)) {
51683 send_sig(SIGPIPE, current, 0);
51684 if (!ret)
51685 ret = -EPIPE;
51686@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
51687 do_wakeup = 0;
51688 }
51689
51690- pipe->waiting_writers++;
51691+ atomic_inc(&pipe->waiting_writers);
51692 pipe_wait(pipe);
51693- pipe->waiting_writers--;
51694+ atomic_dec(&pipe->waiting_writers);
51695 }
51696
51697 pipe_unlock(pipe);
51698@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
51699 old_fs = get_fs();
51700 set_fs(get_ds());
51701 /* The cast to a user pointer is valid due to the set_fs() */
51702- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
51703+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
51704 set_fs(old_fs);
51705
51706 return res;
51707@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
51708 old_fs = get_fs();
51709 set_fs(get_ds());
51710 /* The cast to a user pointer is valid due to the set_fs() */
51711- res = vfs_write(file, (const char __user *)buf, count, &pos);
51712+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
51713 set_fs(old_fs);
51714
51715 return res;
51716@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
51717 goto err;
51718
51719 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
51720- vec[i].iov_base = (void __user *) page_address(page);
51721+ vec[i].iov_base = (void __force_user *) page_address(page);
51722 vec[i].iov_len = this_len;
51723 spd.pages[i] = page;
51724 spd.nr_pages++;
51725@@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
51726 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
51727 {
51728 while (!pipe->nrbufs) {
51729- if (!pipe->writers)
51730+ if (!atomic_read(&pipe->writers))
51731 return 0;
51732
51733- if (!pipe->waiting_writers && sd->num_spliced)
51734+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
51735 return 0;
51736
51737 if (sd->flags & SPLICE_F_NONBLOCK)
51738@@ -1192,7 +1192,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
51739 * out of the pipe right after the splice_to_pipe(). So set
51740 * PIPE_READERS appropriately.
51741 */
51742- pipe->readers = 1;
51743+ atomic_set(&pipe->readers, 1);
51744
51745 current->splice_pipe = pipe;
51746 }
51747@@ -1741,9 +1741,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
51748 ret = -ERESTARTSYS;
51749 break;
51750 }
51751- if (!pipe->writers)
51752+ if (!atomic_read(&pipe->writers))
51753 break;
51754- if (!pipe->waiting_writers) {
51755+ if (!atomic_read(&pipe->waiting_writers)) {
51756 if (flags & SPLICE_F_NONBLOCK) {
51757 ret = -EAGAIN;
51758 break;
51759@@ -1775,7 +1775,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
51760 pipe_lock(pipe);
51761
51762 while (pipe->nrbufs >= pipe->buffers) {
51763- if (!pipe->readers) {
51764+ if (!atomic_read(&pipe->readers)) {
51765 send_sig(SIGPIPE, current, 0);
51766 ret = -EPIPE;
51767 break;
51768@@ -1788,9 +1788,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
51769 ret = -ERESTARTSYS;
51770 break;
51771 }
51772- pipe->waiting_writers++;
51773+ atomic_inc(&pipe->waiting_writers);
51774 pipe_wait(pipe);
51775- pipe->waiting_writers--;
51776+ atomic_dec(&pipe->waiting_writers);
51777 }
51778
51779 pipe_unlock(pipe);
51780@@ -1826,14 +1826,14 @@ retry:
51781 pipe_double_lock(ipipe, opipe);
51782
51783 do {
51784- if (!opipe->readers) {
51785+ if (!atomic_read(&opipe->readers)) {
51786 send_sig(SIGPIPE, current, 0);
51787 if (!ret)
51788 ret = -EPIPE;
51789 break;
51790 }
51791
51792- if (!ipipe->nrbufs && !ipipe->writers)
51793+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
51794 break;
51795
51796 /*
51797@@ -1930,7 +1930,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
51798 pipe_double_lock(ipipe, opipe);
51799
51800 do {
51801- if (!opipe->readers) {
51802+ if (!atomic_read(&opipe->readers)) {
51803 send_sig(SIGPIPE, current, 0);
51804 if (!ret)
51805 ret = -EPIPE;
51806@@ -1975,7 +1975,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
51807 * return EAGAIN if we have the potential of some data in the
51808 * future, otherwise just return 0
51809 */
51810- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
51811+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
51812 ret = -EAGAIN;
51813
51814 pipe_unlock(ipipe);
51815diff --git a/fs/stat.c b/fs/stat.c
51816index eae4946..6198f55 100644
51817--- a/fs/stat.c
51818+++ b/fs/stat.c
51819@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
51820 stat->gid = inode->i_gid;
51821 stat->rdev = inode->i_rdev;
51822 stat->size = i_size_read(inode);
51823- stat->atime = inode->i_atime;
51824- stat->mtime = inode->i_mtime;
51825+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
51826+ stat->atime = inode->i_ctime;
51827+ stat->mtime = inode->i_ctime;
51828+ } else {
51829+ stat->atime = inode->i_atime;
51830+ stat->mtime = inode->i_mtime;
51831+ }
51832 stat->ctime = inode->i_ctime;
51833 stat->blksize = (1 << inode->i_blkbits);
51834 stat->blocks = inode->i_blocks;
51835@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
51836 if (retval)
51837 return retval;
51838
51839- if (inode->i_op->getattr)
51840- return inode->i_op->getattr(mnt, dentry, stat);
51841+ if (inode->i_op->getattr) {
51842+ retval = inode->i_op->getattr(mnt, dentry, stat);
51843+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
51844+ stat->atime = stat->ctime;
51845+ stat->mtime = stat->ctime;
51846+ }
51847+ return retval;
51848+ }
51849
51850 generic_fillattr(inode, stat);
51851 return 0;
51852diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
51853index 2fbdff6..5530a61 100644
51854--- a/fs/sysfs/dir.c
51855+++ b/fs/sysfs/dir.c
51856@@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
51857 struct sysfs_dirent *sd;
51858 int rc;
51859
51860+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
51861+ const char *parent_name = parent_sd->s_name;
51862+
51863+ mode = S_IFDIR | S_IRWXU;
51864+
51865+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
51866+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
51867+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
51868+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
51869+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
51870+#endif
51871+
51872 /* allocate */
51873 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
51874 if (!sd)
51875diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
51876index 00012e3..8392349 100644
51877--- a/fs/sysfs/file.c
51878+++ b/fs/sysfs/file.c
51879@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
51880
51881 struct sysfs_open_dirent {
51882 atomic_t refcnt;
51883- atomic_t event;
51884+ atomic_unchecked_t event;
51885 wait_queue_head_t poll;
51886 struct list_head buffers; /* goes through sysfs_buffer.list */
51887 };
51888@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
51889 if (!sysfs_get_active(attr_sd))
51890 return -ENODEV;
51891
51892- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
51893+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
51894 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
51895
51896 sysfs_put_active(attr_sd);
51897@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
51898 return -ENOMEM;
51899
51900 atomic_set(&new_od->refcnt, 0);
51901- atomic_set(&new_od->event, 1);
51902+ atomic_set_unchecked(&new_od->event, 1);
51903 init_waitqueue_head(&new_od->poll);
51904 INIT_LIST_HEAD(&new_od->buffers);
51905 goto retry;
51906@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
51907
51908 sysfs_put_active(attr_sd);
51909
51910- if (buffer->event != atomic_read(&od->event))
51911+ if (buffer->event != atomic_read_unchecked(&od->event))
51912 goto trigger;
51913
51914 return DEFAULT_POLLMASK;
51915@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
51916
51917 od = sd->s_attr.open;
51918 if (od) {
51919- atomic_inc(&od->event);
51920+ atomic_inc_unchecked(&od->event);
51921 wake_up_interruptible(&od->poll);
51922 }
51923
51924diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
51925index 3c9eb56..9dea5be 100644
51926--- a/fs/sysfs/symlink.c
51927+++ b/fs/sysfs/symlink.c
51928@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
51929
51930 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
51931 {
51932- char *page = nd_get_link(nd);
51933+ const char *page = nd_get_link(nd);
51934 if (!IS_ERR(page))
51935 free_page((unsigned long)page);
51936 }
51937diff --git a/fs/udf/misc.c b/fs/udf/misc.c
51938index c175b4d..8f36a16 100644
51939--- a/fs/udf/misc.c
51940+++ b/fs/udf/misc.c
51941@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
51942
51943 u8 udf_tag_checksum(const struct tag *t)
51944 {
51945- u8 *data = (u8 *)t;
51946+ const u8 *data = (const u8 *)t;
51947 u8 checksum = 0;
51948 int i;
51949 for (i = 0; i < sizeof(struct tag); ++i)
51950diff --git a/fs/utimes.c b/fs/utimes.c
51951index bb0696a..552054b 100644
51952--- a/fs/utimes.c
51953+++ b/fs/utimes.c
51954@@ -1,6 +1,7 @@
51955 #include <linux/compiler.h>
51956 #include <linux/file.h>
51957 #include <linux/fs.h>
51958+#include <linux/security.h>
51959 #include <linux/linkage.h>
51960 #include <linux/mount.h>
51961 #include <linux/namei.h>
51962@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
51963 goto mnt_drop_write_and_out;
51964 }
51965 }
51966+
51967+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
51968+ error = -EACCES;
51969+ goto mnt_drop_write_and_out;
51970+ }
51971+
51972 mutex_lock(&inode->i_mutex);
51973 error = notify_change(path->dentry, &newattrs);
51974 mutex_unlock(&inode->i_mutex);
51975diff --git a/fs/xattr.c b/fs/xattr.c
51976index e21c119..21dfc7c 100644
51977--- a/fs/xattr.c
51978+++ b/fs/xattr.c
51979@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
51980 * Extended attribute SET operations
51981 */
51982 static long
51983-setxattr(struct dentry *d, const char __user *name, const void __user *value,
51984+setxattr(struct path *path, const char __user *name, const void __user *value,
51985 size_t size, int flags)
51986 {
51987 int error;
51988@@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
51989 posix_acl_fix_xattr_from_user(kvalue, size);
51990 }
51991
51992- error = vfs_setxattr(d, kname, kvalue, size, flags);
51993+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
51994+ error = -EACCES;
51995+ goto out;
51996+ }
51997+
51998+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
51999 out:
52000 if (vvalue)
52001 vfree(vvalue);
52002@@ -376,7 +381,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
52003 return error;
52004 error = mnt_want_write(path.mnt);
52005 if (!error) {
52006- error = setxattr(path.dentry, name, value, size, flags);
52007+ error = setxattr(&path, name, value, size, flags);
52008 mnt_drop_write(path.mnt);
52009 }
52010 path_put(&path);
52011@@ -395,7 +400,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
52012 return error;
52013 error = mnt_want_write(path.mnt);
52014 if (!error) {
52015- error = setxattr(path.dentry, name, value, size, flags);
52016+ error = setxattr(&path, name, value, size, flags);
52017 mnt_drop_write(path.mnt);
52018 }
52019 path_put(&path);
52020@@ -406,16 +411,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
52021 const void __user *,value, size_t, size, int, flags)
52022 {
52023 struct fd f = fdget(fd);
52024- struct dentry *dentry;
52025 int error = -EBADF;
52026
52027 if (!f.file)
52028 return error;
52029- dentry = f.file->f_path.dentry;
52030- audit_inode(NULL, dentry, 0);
52031+ audit_inode(NULL, f.file->f_path.dentry, 0);
52032 error = mnt_want_write_file(f.file);
52033 if (!error) {
52034- error = setxattr(dentry, name, value, size, flags);
52035+ error = setxattr(&f.file->f_path, name, value, size, flags);
52036 mnt_drop_write_file(f.file);
52037 }
52038 fdput(f);
52039diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
52040index 9fbea87..6b19972 100644
52041--- a/fs/xattr_acl.c
52042+++ b/fs/xattr_acl.c
52043@@ -76,8 +76,8 @@ struct posix_acl *
52044 posix_acl_from_xattr(struct user_namespace *user_ns,
52045 const void *value, size_t size)
52046 {
52047- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
52048- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
52049+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
52050+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
52051 int count;
52052 struct posix_acl *acl;
52053 struct posix_acl_entry *acl_e;
52054diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
52055index 83d0cf3..2ef526b 100644
52056--- a/fs/xfs/xfs_bmap.c
52057+++ b/fs/xfs/xfs_bmap.c
52058@@ -189,7 +189,7 @@ xfs_bmap_validate_ret(
52059 int nmap,
52060 int ret_nmap);
52061 #else
52062-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
52063+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
52064 #endif /* DEBUG */
52065
52066 STATIC int
52067diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
52068index 1b9fc3e..e1bdde0 100644
52069--- a/fs/xfs/xfs_dir2_sf.c
52070+++ b/fs/xfs/xfs_dir2_sf.c
52071@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
52072 }
52073
52074 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
52075- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
52076+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
52077+ char name[sfep->namelen];
52078+ memcpy(name, sfep->name, sfep->namelen);
52079+ if (filldir(dirent, name, sfep->namelen,
52080+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
52081+ *offset = off & 0x7fffffff;
52082+ return 0;
52083+ }
52084+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
52085 off & 0x7fffffff, ino, DT_UNKNOWN)) {
52086 *offset = off & 0x7fffffff;
52087 return 0;
52088diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
52089index c1df3c6..f987db6 100644
52090--- a/fs/xfs/xfs_ioctl.c
52091+++ b/fs/xfs/xfs_ioctl.c
52092@@ -126,7 +126,7 @@ xfs_find_handle(
52093 }
52094
52095 error = -EFAULT;
52096- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
52097+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
52098 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
52099 goto out_put;
52100
52101diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
52102index 4e00cf0..3374374 100644
52103--- a/fs/xfs/xfs_iops.c
52104+++ b/fs/xfs/xfs_iops.c
52105@@ -394,7 +394,7 @@ xfs_vn_put_link(
52106 struct nameidata *nd,
52107 void *p)
52108 {
52109- char *s = nd_get_link(nd);
52110+ const char *s = nd_get_link(nd);
52111
52112 if (!IS_ERR(s))
52113 kfree(s);
52114diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
52115new file mode 100644
52116index 0000000..5ce8347
52117--- /dev/null
52118+++ b/grsecurity/Kconfig
52119@@ -0,0 +1,1015 @@
52120+#
52121+# grecurity configuration
52122+#
52123+menu "Memory Protections"
52124+depends on GRKERNSEC
52125+
52126+config GRKERNSEC_KMEM
52127+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
52128+ default y if GRKERNSEC_CONFIG_AUTO
52129+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52130+ help
52131+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52132+ be written to or read from to modify or leak the contents of the running
52133+ kernel. /dev/port will also not be allowed to be opened. If you have module
52134+ support disabled, enabling this will close up four ways that are
52135+ currently used to insert malicious code into the running kernel.
52136+ Even with all these features enabled, we still highly recommend that
52137+ you use the RBAC system, as it is still possible for an attacker to
52138+ modify the running kernel through privileged I/O granted by ioperm/iopl.
52139+ If you are not using XFree86, you may be able to stop this additional
52140+ case by enabling the 'Disable privileged I/O' option. Though nothing
52141+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52142+ but only to video memory, which is the only writing we allow in this
52143+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52144+ not be allowed to mprotect it with PROT_WRITE later.
52145+ It is highly recommended that you say Y here if you meet all the
52146+ conditions above.
52147+
52148+config GRKERNSEC_VM86
52149+ bool "Restrict VM86 mode"
52150+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52151+ depends on X86_32
52152+
52153+ help
52154+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52155+ make use of a special execution mode on 32bit x86 processors called
52156+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52157+ video cards and will still work with this option enabled. The purpose
52158+ of the option is to prevent exploitation of emulation errors in
52159+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
52160+ Nearly all users should be able to enable this option.
52161+
52162+config GRKERNSEC_IO
52163+ bool "Disable privileged I/O"
52164+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52165+ depends on X86
52166+ select RTC_CLASS
52167+ select RTC_INTF_DEV
52168+ select RTC_DRV_CMOS
52169+
52170+ help
52171+ If you say Y here, all ioperm and iopl calls will return an error.
52172+ Ioperm and iopl can be used to modify the running kernel.
52173+ Unfortunately, some programs need this access to operate properly,
52174+ the most notable of which are XFree86 and hwclock. hwclock can be
52175+ remedied by having RTC support in the kernel, so real-time
52176+ clock support is enabled if this option is enabled, to ensure
52177+ that hwclock operates correctly. XFree86 still will not
52178+ operate correctly with this option enabled, so DO NOT CHOOSE Y
52179+ IF YOU USE XFree86. If you use XFree86 and you still want to
52180+ protect your kernel against modification, use the RBAC system.
52181+
52182+config GRKERNSEC_JIT_HARDEN
52183+ bool "Harden BPF JIT against spray attacks"
52184+ default y if GRKERNSEC_CONFIG_AUTO
52185+ depends on BPF_JIT
52186+ help
52187+ If you say Y here, the native code generated by the kernel's Berkeley
52188+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
52189+ attacks that attempt to fit attacker-beneficial instructions in
52190+ 32bit immediate fields of JIT-generated native instructions. The
52191+ attacker will generally aim to cause an unintended instruction sequence
52192+ of JIT-generated native code to execute by jumping into the middle of
52193+ a generated instruction. This feature effectively randomizes the 32bit
52194+ immediate constants present in the generated code to thwart such attacks.
52195+
52196+ If you're using KERNEXEC, it's recommended that you enable this option
52197+ to supplement the hardening of the kernel.
52198+
52199+config GRKERNSEC_RAND_THREADSTACK
52200+ bool "Insert random gaps between thread stacks"
52201+ default y if GRKERNSEC_CONFIG_AUTO
52202+ depends on PAX_RANDMMAP && !PPC
52203+ help
52204+ If you say Y here, a random-sized gap will be enforced between allocated
52205+ thread stacks. Glibc's NPTL and other threading libraries that
52206+ pass MAP_STACK to the kernel for thread stack allocation are supported.
52207+ The implementation currently provides 8 bits of entropy for the gap.
52208+
52209+ Many distributions do not compile threaded remote services with the
52210+ -fstack-check argument to GCC, causing the variable-sized stack-based
52211+ allocator, alloca(), to not probe the stack on allocation. This
52212+ permits an unbounded alloca() to skip over any guard page and potentially
52213+ modify another thread's stack reliably. An enforced random gap
52214+ reduces the reliability of such an attack and increases the chance
52215+ that such a read/write to another thread's stack instead lands in
52216+ an unmapped area, causing a crash and triggering grsecurity's
52217+ anti-bruteforcing logic.
52218+
52219+config GRKERNSEC_PROC_MEMMAP
52220+ bool "Harden ASLR against information leaks and entropy reduction"
52221+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
52222+ depends on PAX_NOEXEC || PAX_ASLR
52223+ help
52224+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52225+ give no information about the addresses of its mappings if
52226+ PaX features that rely on random addresses are enabled on the task.
52227+ In addition to sanitizing this information and disabling other
52228+ dangerous sources of information, this option causes reads of sensitive
52229+ /proc/<pid> entries where the file descriptor was opened in a different
52230+ task than the one performing the read. Such attempts are logged.
52231+ This option also limits argv/env strings for suid/sgid binaries
52232+ to 512KB to prevent a complete exhaustion of the stack entropy provided
52233+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
52234+ binaries to prevent alternative mmap layouts from being abused.
52235+
52236+ If you use PaX it is essential that you say Y here as it closes up
52237+ several holes that make full ASLR useless locally.
52238+
52239+config GRKERNSEC_BRUTE
52240+ bool "Deter exploit bruteforcing"
52241+ default y if GRKERNSEC_CONFIG_AUTO
52242+ help
52243+ If you say Y here, attempts to bruteforce exploits against forking
52244+ daemons such as apache or sshd, as well as against suid/sgid binaries
52245+ will be deterred. When a child of a forking daemon is killed by PaX
52246+ or crashes due to an illegal instruction or other suspicious signal,
52247+ the parent process will be delayed 30 seconds upon every subsequent
52248+ fork until the administrator is able to assess the situation and
52249+ restart the daemon.
52250+ In the suid/sgid case, the attempt is logged, the user has all their
52251+ processes terminated, and they are prevented from executing any further
52252+ processes for 15 minutes.
52253+ It is recommended that you also enable signal logging in the auditing
52254+ section so that logs are generated when a process triggers a suspicious
52255+ signal.
52256+ If the sysctl option is enabled, a sysctl option with name
52257+ "deter_bruteforce" is created.
52258+
52259+
52260+config GRKERNSEC_MODHARDEN
52261+ bool "Harden module auto-loading"
52262+ default y if GRKERNSEC_CONFIG_AUTO
52263+ depends on MODULES
52264+ help
52265+ If you say Y here, module auto-loading in response to use of some
52266+ feature implemented by an unloaded module will be restricted to
52267+ root users. Enabling this option helps defend against attacks
52268+ by unprivileged users who abuse the auto-loading behavior to
52269+ cause a vulnerable module to load that is then exploited.
52270+
52271+ If this option prevents a legitimate use of auto-loading for a
52272+ non-root user, the administrator can execute modprobe manually
52273+ with the exact name of the module mentioned in the alert log.
52274+ Alternatively, the administrator can add the module to the list
52275+ of modules loaded at boot by modifying init scripts.
52276+
52277+ Modification of init scripts will most likely be needed on
52278+ Ubuntu servers with encrypted home directory support enabled,
52279+ as the first non-root user logging in will cause the ecb(aes),
52280+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52281+
52282+config GRKERNSEC_HIDESYM
52283+ bool "Hide kernel symbols"
52284+ default y if GRKERNSEC_CONFIG_AUTO
52285+ select PAX_USERCOPY_SLABS
52286+ help
52287+ If you say Y here, getting information on loaded modules, and
52288+ displaying all kernel symbols through a syscall will be restricted
52289+ to users with CAP_SYS_MODULE. For software compatibility reasons,
52290+ /proc/kallsyms will be restricted to the root user. The RBAC
52291+ system can hide that entry even from root.
52292+
52293+ This option also prevents leaking of kernel addresses through
52294+ several /proc entries.
52295+
52296+ Note that this option is only effective provided the following
52297+ conditions are met:
52298+ 1) The kernel using grsecurity is not precompiled by some distribution
52299+ 2) You have also enabled GRKERNSEC_DMESG
52300+ 3) You are using the RBAC system and hiding other files such as your
52301+ kernel image and System.map. Alternatively, enabling this option
52302+ causes the permissions on /boot, /lib/modules, and the kernel
52303+ source directory to change at compile time to prevent
52304+ reading by non-root users.
52305+ If the above conditions are met, this option will aid in providing a
52306+ useful protection against local kernel exploitation of overflows
52307+ and arbitrary read/write vulnerabilities.
52308+
52309+config GRKERNSEC_KERN_LOCKOUT
52310+ bool "Active kernel exploit response"
52311+ default y if GRKERNSEC_CONFIG_AUTO
52312+ depends on X86 || ARM || PPC || SPARC
52313+ help
52314+ If you say Y here, when a PaX alert is triggered due to suspicious
52315+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52316+ or an OOPS occurs due to bad memory accesses, instead of just
52317+ terminating the offending process (and potentially allowing
52318+ a subsequent exploit from the same user), we will take one of two
52319+ actions:
52320+ If the user was root, we will panic the system
52321+ If the user was non-root, we will log the attempt, terminate
52322+ all processes owned by the user, then prevent them from creating
52323+ any new processes until the system is restarted
52324+ This deters repeated kernel exploitation/bruteforcing attempts
52325+ and is useful for later forensics.
52326+
52327+endmenu
52328+menu "Role Based Access Control Options"
52329+depends on GRKERNSEC
52330+
52331+config GRKERNSEC_RBAC_DEBUG
52332+ bool
52333+
52334+config GRKERNSEC_NO_RBAC
52335+ bool "Disable RBAC system"
52336+ help
52337+ If you say Y here, the /dev/grsec device will be removed from the kernel,
52338+ preventing the RBAC system from being enabled. You should only say Y
52339+ here if you have no intention of using the RBAC system, so as to prevent
52340+ an attacker with root access from misusing the RBAC system to hide files
52341+ and processes when loadable module support and /dev/[k]mem have been
52342+ locked down.
52343+
52344+config GRKERNSEC_ACL_HIDEKERN
52345+ bool "Hide kernel processes"
52346+ help
52347+ If you say Y here, all kernel threads will be hidden to all
52348+ processes but those whose subject has the "view hidden processes"
52349+ flag.
52350+
52351+config GRKERNSEC_ACL_MAXTRIES
52352+ int "Maximum tries before password lockout"
52353+ default 3
52354+ help
52355+ This option enforces the maximum number of times a user can attempt
52356+ to authorize themselves with the grsecurity RBAC system before being
52357+ denied the ability to attempt authorization again for a specified time.
52358+ The lower the number, the harder it will be to brute-force a password.
52359+
52360+config GRKERNSEC_ACL_TIMEOUT
52361+ int "Time to wait after max password tries, in seconds"
52362+ default 30
52363+ help
52364+ This option specifies the time the user must wait after attempting to
52365+ authorize to the RBAC system with the maximum number of invalid
52366+ passwords. The higher the number, the harder it will be to brute-force
52367+ a password.
52368+
52369+endmenu
52370+menu "Filesystem Protections"
52371+depends on GRKERNSEC
52372+
52373+config GRKERNSEC_PROC
52374+ bool "Proc restrictions"
52375+ default y if GRKERNSEC_CONFIG_AUTO
52376+ help
52377+ If you say Y here, the permissions of the /proc filesystem
52378+ will be altered to enhance system security and privacy. You MUST
52379+ choose either a user only restriction or a user and group restriction.
52380+ Depending upon the option you choose, you can either restrict users to
52381+ see only the processes they themselves run, or choose a group that can
52382+ view all processes and files normally restricted to root if you choose
52383+ the "restrict to user only" option. NOTE: If you're running identd or
52384+ ntpd as a non-root user, you will have to run it as the group you
52385+ specify here.
52386+
52387+config GRKERNSEC_PROC_USER
52388+ bool "Restrict /proc to user only"
52389+ depends on GRKERNSEC_PROC
52390+ help
52391+ If you say Y here, non-root users will only be able to view their own
52392+ processes, and restricts them from viewing network-related information,
52393+ and viewing kernel symbol and module information.
52394+
52395+config GRKERNSEC_PROC_USERGROUP
52396+ bool "Allow special group"
52397+ default y if GRKERNSEC_CONFIG_AUTO
52398+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
52399+ help
52400+ If you say Y here, you will be able to select a group that will be
52401+ able to view all processes and network-related information. If you've
52402+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
52403+ remain hidden. This option is useful if you want to run identd as
52404+ a non-root user. The group you select may also be chosen at boot time
52405+ via "grsec_proc_gid=" on the kernel commandline.
52406+
52407+config GRKERNSEC_PROC_GID
52408+ int "GID for special group"
52409+ depends on GRKERNSEC_PROC_USERGROUP
52410+ default 1001
52411+
52412+config GRKERNSEC_PROC_ADD
52413+ bool "Additional restrictions"
52414+ default y if GRKERNSEC_CONFIG_AUTO
52415+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
52416+ help
52417+ If you say Y here, additional restrictions will be placed on
52418+ /proc that keep normal users from viewing device information and
52419+ slabinfo information that could be useful for exploits.
52420+
52421+config GRKERNSEC_LINK
52422+ bool "Linking restrictions"
52423+ default y if GRKERNSEC_CONFIG_AUTO
52424+ help
52425+ If you say Y here, /tmp race exploits will be prevented, since users
52426+ will no longer be able to follow symlinks owned by other users in
52427+ world-writable +t directories (e.g. /tmp), unless the owner of the
52428+ symlink is the owner of the directory. users will also not be
52429+ able to hardlink to files they do not own. If the sysctl option is
52430+ enabled, a sysctl option with name "linking_restrictions" is created.
52431+
52432+config GRKERNSEC_SYMLINKOWN
52433+ bool "Kernel-enforced SymlinksIfOwnerMatch"
52434+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
52435+ help
52436+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
52437+ that prevents it from being used as a security feature. As Apache
52438+ verifies the symlink by performing a stat() against the target of
52439+ the symlink before it is followed, an attacker can setup a symlink
52440+ to point to a same-owned file, then replace the symlink with one
52441+ that targets another user's file just after Apache "validates" the
52442+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
52443+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
52444+ will be in place for the group you specify. If the sysctl option
52445+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
52446+ created.
52447+
52448+config GRKERNSEC_SYMLINKOWN_GID
52449+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
52450+ depends on GRKERNSEC_SYMLINKOWN
52451+ default 1006
52452+ help
52453+ Setting this GID determines what group kernel-enforced
52454+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
52455+ is enabled, a sysctl option with name "symlinkown_gid" is created.
52456+
52457+config GRKERNSEC_FIFO
52458+ bool "FIFO restrictions"
52459+ default y if GRKERNSEC_CONFIG_AUTO
52460+ help
52461+ If you say Y here, users will not be able to write to FIFOs they don't
52462+ own in world-writable +t directories (e.g. /tmp), unless the owner of
52463+ the FIFO is the same owner of the directory it's held in. If the sysctl
52464+ option is enabled, a sysctl option with name "fifo_restrictions" is
52465+ created.
52466+
52467+config GRKERNSEC_SYSFS_RESTRICT
52468+ bool "Sysfs/debugfs restriction"
52469+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52470+ depends on SYSFS
52471+ help
52472+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
52473+ any filesystem normally mounted under it (e.g. debugfs) will be
52474+ mostly accessible only by root. These filesystems generally provide access
52475+ to hardware and debug information that isn't appropriate for unprivileged
52476+ users of the system. Sysfs and debugfs have also become a large source
52477+ of new vulnerabilities, ranging from infoleaks to local compromise.
52478+ There has been very little oversight with an eye toward security involved
52479+ in adding new exporters of information to these filesystems, so their
52480+ use is discouraged.
52481+ For reasons of compatibility, a few directories have been whitelisted
52482+ for access by non-root users:
52483+ /sys/fs/selinux
52484+ /sys/fs/fuse
52485+ /sys/devices/system/cpu
52486+
52487+config GRKERNSEC_ROFS
52488+ bool "Runtime read-only mount protection"
52489+ help
52490+ If you say Y here, a sysctl option with name "romount_protect" will
52491+ be created. By setting this option to 1 at runtime, filesystems
52492+ will be protected in the following ways:
52493+ * No new writable mounts will be allowed
52494+ * Existing read-only mounts won't be able to be remounted read/write
52495+ * Write operations will be denied on all block devices
52496+ This option acts independently of grsec_lock: once it is set to 1,
52497+ it cannot be turned off. Therefore, please be mindful of the resulting
52498+ behavior if this option is enabled in an init script on a read-only
52499+ filesystem. This feature is mainly intended for secure embedded systems.
52500+
52501+config GRKERNSEC_DEVICE_SIDECHANNEL
52502+ bool "Eliminate stat/notify-based device sidechannels"
52503+ default y if GRKERNSEC_CONFIG_AUTO
52504+ help
52505+ If you say Y here, timing analyses on block or character
52506+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
52507+ will be thwarted for unprivileged users. If a process without
52508+ CAP_MKNOD stats such a device, the last access and last modify times
52509+ will match the device's create time. No access or modify events
52510+ will be triggered through inotify/dnotify/fanotify for such devices.
52511+ This feature will prevent attacks that may at a minimum
52512+ allow an attacker to determine the administrator's password length.
52513+
52514+config GRKERNSEC_CHROOT
52515+ bool "Chroot jail restrictions"
52516+ default y if GRKERNSEC_CONFIG_AUTO
52517+ help
52518+ If you say Y here, you will be able to choose several options that will
52519+ make breaking out of a chrooted jail much more difficult. If you
52520+ encounter no software incompatibilities with the following options, it
52521+ is recommended that you enable each one.
52522+
52523+config GRKERNSEC_CHROOT_MOUNT
52524+ bool "Deny mounts"
52525+ default y if GRKERNSEC_CONFIG_AUTO
52526+ depends on GRKERNSEC_CHROOT
52527+ help
52528+ If you say Y here, processes inside a chroot will not be able to
52529+ mount or remount filesystems. If the sysctl option is enabled, a
52530+ sysctl option with name "chroot_deny_mount" is created.
52531+
52532+config GRKERNSEC_CHROOT_DOUBLE
52533+ bool "Deny double-chroots"
52534+ default y if GRKERNSEC_CONFIG_AUTO
52535+ depends on GRKERNSEC_CHROOT
52536+ help
52537+ If you say Y here, processes inside a chroot will not be able to chroot
52538+ again outside the chroot. This is a widely used method of breaking
52539+ out of a chroot jail and should not be allowed. If the sysctl
52540+ option is enabled, a sysctl option with name
52541+ "chroot_deny_chroot" is created.
52542+
52543+config GRKERNSEC_CHROOT_PIVOT
52544+ bool "Deny pivot_root in chroot"
52545+ default y if GRKERNSEC_CONFIG_AUTO
52546+ depends on GRKERNSEC_CHROOT
52547+ help
52548+ If you say Y here, processes inside a chroot will not be able to use
52549+ a function called pivot_root() that was introduced in Linux 2.3.41. It
52550+ works similar to chroot in that it changes the root filesystem. This
52551+ function could be misused in a chrooted process to attempt to break out
52552+ of the chroot, and therefore should not be allowed. If the sysctl
52553+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
52554+ created.
52555+
52556+config GRKERNSEC_CHROOT_CHDIR
52557+ bool "Enforce chdir(\"/\") on all chroots"
52558+ default y if GRKERNSEC_CONFIG_AUTO
52559+ depends on GRKERNSEC_CHROOT
52560+ help
52561+ If you say Y here, the current working directory of all newly-chrooted
52562+ applications will be set to the the root directory of the chroot.
52563+ The man page on chroot(2) states:
52564+ Note that this call does not change the current working
52565+ directory, so that `.' can be outside the tree rooted at
52566+ `/'. In particular, the super-user can escape from a
52567+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
52568+
52569+ It is recommended that you say Y here, since it's not known to break
52570+ any software. If the sysctl option is enabled, a sysctl option with
52571+ name "chroot_enforce_chdir" is created.
52572+
52573+config GRKERNSEC_CHROOT_CHMOD
52574+ bool "Deny (f)chmod +s"
52575+ default y if GRKERNSEC_CONFIG_AUTO
52576+ depends on GRKERNSEC_CHROOT
52577+ help
52578+ If you say Y here, processes inside a chroot will not be able to chmod
52579+ or fchmod files to make them have suid or sgid bits. This protects
52580+ against another published method of breaking a chroot. If the sysctl
52581+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
52582+ created.
52583+
52584+config GRKERNSEC_CHROOT_FCHDIR
52585+ bool "Deny fchdir out of chroot"
52586+ default y if GRKERNSEC_CONFIG_AUTO
52587+ depends on GRKERNSEC_CHROOT
52588+ help
52589+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
52590+ to a file descriptor of the chrooting process that points to a directory
52591+ outside the filesystem will be stopped. If the sysctl option
52592+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
52593+
52594+config GRKERNSEC_CHROOT_MKNOD
52595+ bool "Deny mknod"
52596+ default y if GRKERNSEC_CONFIG_AUTO
52597+ depends on GRKERNSEC_CHROOT
52598+ help
52599+ If you say Y here, processes inside a chroot will not be allowed to
52600+ mknod. The problem with using mknod inside a chroot is that it
52601+ would allow an attacker to create a device entry that is the same
52602+ as one on the physical root of your system, which could range from
52603+ anything from the console device to a device for your harddrive (which
52604+ they could then use to wipe the drive or steal data). It is recommended
52605+ that you say Y here, unless you run into software incompatibilities.
52606+ If the sysctl option is enabled, a sysctl option with name
52607+ "chroot_deny_mknod" is created.
52608+
52609+config GRKERNSEC_CHROOT_SHMAT
52610+ bool "Deny shmat() out of chroot"
52611+ default y if GRKERNSEC_CONFIG_AUTO
52612+ depends on GRKERNSEC_CHROOT
52613+ help
52614+ If you say Y here, processes inside a chroot will not be able to attach
52615+ to shared memory segments that were created outside of the chroot jail.
52616+ It is recommended that you say Y here. If the sysctl option is enabled,
52617+ a sysctl option with name "chroot_deny_shmat" is created.
52618+
52619+config GRKERNSEC_CHROOT_UNIX
52620+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
52621+ default y if GRKERNSEC_CONFIG_AUTO
52622+ depends on GRKERNSEC_CHROOT
52623+ help
52624+ If you say Y here, processes inside a chroot will not be able to
52625+ connect to abstract (meaning not belonging to a filesystem) Unix
52626+ domain sockets that were bound outside of a chroot. It is recommended
52627+ that you say Y here. If the sysctl option is enabled, a sysctl option
52628+ with name "chroot_deny_unix" is created.
52629+
52630+config GRKERNSEC_CHROOT_FINDTASK
52631+ bool "Protect outside processes"
52632+ default y if GRKERNSEC_CONFIG_AUTO
52633+ depends on GRKERNSEC_CHROOT
52634+ help
52635+ If you say Y here, processes inside a chroot will not be able to
52636+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
52637+ getsid, or view any process outside of the chroot. If the sysctl
52638+ option is enabled, a sysctl option with name "chroot_findtask" is
52639+ created.
52640+
52641+config GRKERNSEC_CHROOT_NICE
52642+ bool "Restrict priority changes"
52643+ default y if GRKERNSEC_CONFIG_AUTO
52644+ depends on GRKERNSEC_CHROOT
52645+ help
52646+ If you say Y here, processes inside a chroot will not be able to raise
52647+ the priority of processes in the chroot, or alter the priority of
52648+ processes outside the chroot. This provides more security than simply
52649+ removing CAP_SYS_NICE from the process' capability set. If the
52650+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
52651+ is created.
52652+
52653+config GRKERNSEC_CHROOT_SYSCTL
52654+ bool "Deny sysctl writes"
52655+ default y if GRKERNSEC_CONFIG_AUTO
52656+ depends on GRKERNSEC_CHROOT
52657+ help
52658+ If you say Y here, an attacker in a chroot will not be able to
52659+ write to sysctl entries, either by sysctl(2) or through a /proc
52660+ interface. It is strongly recommended that you say Y here. If the
52661+ sysctl option is enabled, a sysctl option with name
52662+ "chroot_deny_sysctl" is created.
52663+
52664+config GRKERNSEC_CHROOT_CAPS
52665+ bool "Capability restrictions"
52666+ default y if GRKERNSEC_CONFIG_AUTO
52667+ depends on GRKERNSEC_CHROOT
52668+ help
52669+ If you say Y here, the capabilities on all processes within a
52670+ chroot jail will be lowered to stop module insertion, raw i/o,
52671+ system and net admin tasks, rebooting the system, modifying immutable
52672+ files, modifying IPC owned by another, and changing the system time.
52673+ This is left an option because it can break some apps. Disable this
52674+ if your chrooted apps are having problems performing those kinds of
52675+ tasks. If the sysctl option is enabled, a sysctl option with
52676+ name "chroot_caps" is created.
52677+
52678+endmenu
52679+menu "Kernel Auditing"
52680+depends on GRKERNSEC
52681+
52682+config GRKERNSEC_AUDIT_GROUP
52683+ bool "Single group for auditing"
52684+ help
52685+ If you say Y here, the exec, chdir, and (un)mount logging features
52686+ will only operate on a group you specify. This option is recommended
52687+ if you only want to watch certain users instead of having a large
52688+ amount of logs from the entire system. If the sysctl option is enabled,
52689+ a sysctl option with name "audit_group" is created.
52690+
52691+config GRKERNSEC_AUDIT_GID
52692+ int "GID for auditing"
52693+ depends on GRKERNSEC_AUDIT_GROUP
52694+ default 1007
52695+
52696+config GRKERNSEC_EXECLOG
52697+ bool "Exec logging"
52698+ help
52699+ If you say Y here, all execve() calls will be logged (since the
52700+ other exec*() calls are frontends to execve(), all execution
52701+ will be logged). Useful for shell-servers that like to keep track
52702+ of their users. If the sysctl option is enabled, a sysctl option with
52703+ name "exec_logging" is created.
52704+ WARNING: This option when enabled will produce a LOT of logs, especially
52705+ on an active system.
52706+
52707+config GRKERNSEC_RESLOG
52708+ bool "Resource logging"
52709+ default y if GRKERNSEC_CONFIG_AUTO
52710+ help
52711+ If you say Y here, all attempts to overstep resource limits will
52712+ be logged with the resource name, the requested size, and the current
52713+ limit. It is highly recommended that you say Y here. If the sysctl
52714+ option is enabled, a sysctl option with name "resource_logging" is
52715+ created. If the RBAC system is enabled, the sysctl value is ignored.
52716+
52717+config GRKERNSEC_CHROOT_EXECLOG
52718+ bool "Log execs within chroot"
52719+ help
52720+ If you say Y here, all executions inside a chroot jail will be logged
52721+ to syslog. This can cause a large amount of logs if certain
52722+ applications (eg. djb's daemontools) are installed on the system, and
52723+ is therefore left as an option. If the sysctl option is enabled, a
52724+ sysctl option with name "chroot_execlog" is created.
52725+
52726+config GRKERNSEC_AUDIT_PTRACE
52727+ bool "Ptrace logging"
52728+ help
52729+ If you say Y here, all attempts to attach to a process via ptrace
52730+ will be logged. If the sysctl option is enabled, a sysctl option
52731+ with name "audit_ptrace" is created.
52732+
52733+config GRKERNSEC_AUDIT_CHDIR
52734+ bool "Chdir logging"
52735+ help
52736+ If you say Y here, all chdir() calls will be logged. If the sysctl
52737+ option is enabled, a sysctl option with name "audit_chdir" is created.
52738+
52739+config GRKERNSEC_AUDIT_MOUNT
52740+ bool "(Un)Mount logging"
52741+ help
52742+ If you say Y here, all mounts and unmounts will be logged. If the
52743+ sysctl option is enabled, a sysctl option with name "audit_mount" is
52744+ created.
52745+
52746+config GRKERNSEC_SIGNAL
52747+ bool "Signal logging"
52748+ default y if GRKERNSEC_CONFIG_AUTO
52749+ help
52750+ If you say Y here, certain important signals will be logged, such as
52751+ SIGSEGV, which will as a result inform you of when a error in a program
52752+ occurred, which in some cases could mean a possible exploit attempt.
52753+ If the sysctl option is enabled, a sysctl option with name
52754+ "signal_logging" is created.
52755+
52756+config GRKERNSEC_FORKFAIL
52757+ bool "Fork failure logging"
52758+ help
52759+ If you say Y here, all failed fork() attempts will be logged.
52760+ This could suggest a fork bomb, or someone attempting to overstep
52761+ their process limit. If the sysctl option is enabled, a sysctl option
52762+ with name "forkfail_logging" is created.
52763+
52764+config GRKERNSEC_TIME
52765+ bool "Time change logging"
52766+ default y if GRKERNSEC_CONFIG_AUTO
52767+ help
52768+ If you say Y here, any changes of the system clock will be logged.
52769+ If the sysctl option is enabled, a sysctl option with name
52770+ "timechange_logging" is created.
52771+
52772+config GRKERNSEC_PROC_IPADDR
52773+ bool "/proc/<pid>/ipaddr support"
52774+ default y if GRKERNSEC_CONFIG_AUTO
52775+ help
52776+ If you say Y here, a new entry will be added to each /proc/<pid>
52777+ directory that contains the IP address of the person using the task.
52778+ The IP is carried across local TCP and AF_UNIX stream sockets.
52779+ This information can be useful for IDS/IPSes to perform remote response
52780+ to a local attack. The entry is readable by only the owner of the
52781+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
52782+ the RBAC system), and thus does not create privacy concerns.
52783+
52784+config GRKERNSEC_RWXMAP_LOG
52785+ bool 'Denied RWX mmap/mprotect logging'
52786+ default y if GRKERNSEC_CONFIG_AUTO
52787+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
52788+ help
52789+ If you say Y here, calls to mmap() and mprotect() with explicit
52790+ usage of PROT_WRITE and PROT_EXEC together will be logged when
52791+ denied by the PAX_MPROTECT feature. If the sysctl option is
52792+ enabled, a sysctl option with name "rwxmap_logging" is created.
52793+
52794+config GRKERNSEC_AUDIT_TEXTREL
52795+ bool 'ELF text relocations logging (READ HELP)'
52796+ depends on PAX_MPROTECT
52797+ help
52798+ If you say Y here, text relocations will be logged with the filename
52799+ of the offending library or binary. The purpose of the feature is
52800+ to help Linux distribution developers get rid of libraries and
52801+ binaries that need text relocations which hinder the future progress
52802+ of PaX. Only Linux distribution developers should say Y here, and
52803+ never on a production machine, as this option creates an information
52804+ leak that could aid an attacker in defeating the randomization of
52805+ a single memory region. If the sysctl option is enabled, a sysctl
52806+ option with name "audit_textrel" is created.
52807+
52808+endmenu
52809+
52810+menu "Executable Protections"
52811+depends on GRKERNSEC
52812+
52813+config GRKERNSEC_DMESG
52814+ bool "Dmesg(8) restriction"
52815+ default y if GRKERNSEC_CONFIG_AUTO
52816+ help
52817+ If you say Y here, non-root users will not be able to use dmesg(8)
52818+ to view the contents of the kernel's circular log buffer.
52819+ The kernel's log buffer often contains kernel addresses and other
52820+ identifying information useful to an attacker in fingerprinting a
52821+ system for a targeted exploit.
52822+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
52823+ created.
52824+
52825+config GRKERNSEC_HARDEN_PTRACE
52826+ bool "Deter ptrace-based process snooping"
52827+ default y if GRKERNSEC_CONFIG_AUTO
52828+ help
52829+ If you say Y here, TTY sniffers and other malicious monitoring
52830+ programs implemented through ptrace will be defeated. If you
52831+ have been using the RBAC system, this option has already been
52832+ enabled for several years for all users, with the ability to make
52833+ fine-grained exceptions.
52834+
52835+ This option only affects the ability of non-root users to ptrace
52836+ processes that are not a descendent of the ptracing process.
52837+ This means that strace ./binary and gdb ./binary will still work,
52838+ but attaching to arbitrary processes will not. If the sysctl
52839+ option is enabled, a sysctl option with name "harden_ptrace" is
52840+ created.
52841+
52842+config GRKERNSEC_PTRACE_READEXEC
52843+ bool "Require read access to ptrace sensitive binaries"
52844+ default y if GRKERNSEC_CONFIG_AUTO
52845+ help
52846+ If you say Y here, unprivileged users will not be able to ptrace unreadable
52847+ binaries. This option is useful in environments that
52848+ remove the read bits (e.g. file mode 4711) from suid binaries to
52849+ prevent infoleaking of their contents. This option adds
52850+ consistency to the use of that file mode, as the binary could normally
52851+ be read out when run without privileges while ptracing.
52852+
52853+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
52854+ is created.
52855+
52856+config GRKERNSEC_SETXID
52857+ bool "Enforce consistent multithreaded privileges"
52858+ default y if GRKERNSEC_CONFIG_AUTO
52859+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
52860+ help
52861+ If you say Y here, a change from a root uid to a non-root uid
52862+ in a multithreaded application will cause the resulting uids,
52863+ gids, supplementary groups, and capabilities in that thread
52864+ to be propagated to the other threads of the process. In most
52865+ cases this is unnecessary, as glibc will emulate this behavior
52866+ on behalf of the application. Other libcs do not act in the
52867+ same way, allowing the other threads of the process to continue
52868+ running with root privileges. If the sysctl option is enabled,
52869+ a sysctl option with name "consistent_setxid" is created.
52870+
52871+config GRKERNSEC_TPE
52872+ bool "Trusted Path Execution (TPE)"
52873+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
52874+ help
52875+ If you say Y here, you will be able to choose a gid to add to the
52876+ supplementary groups of users you want to mark as "untrusted."
52877+ These users will not be able to execute any files that are not in
52878+ root-owned directories writable only by root. If the sysctl option
52879+ is enabled, a sysctl option with name "tpe" is created.
52880+
52881+config GRKERNSEC_TPE_ALL
52882+ bool "Partially restrict all non-root users"
52883+ depends on GRKERNSEC_TPE
52884+ help
52885+ If you say Y here, all non-root users will be covered under
52886+ a weaker TPE restriction. This is separate from, and in addition to,
52887+ the main TPE options that you have selected elsewhere. Thus, if a
52888+ "trusted" GID is chosen, this restriction applies to even that GID.
52889+ Under this restriction, all non-root users will only be allowed to
52890+ execute files in directories they own that are not group or
52891+ world-writable, or in directories owned by root and writable only by
52892+ root. If the sysctl option is enabled, a sysctl option with name
52893+ "tpe_restrict_all" is created.
52894+
52895+config GRKERNSEC_TPE_INVERT
52896+ bool "Invert GID option"
52897+ depends on GRKERNSEC_TPE
52898+ help
52899+ If you say Y here, the group you specify in the TPE configuration will
52900+ decide what group TPE restrictions will be *disabled* for. This
52901+ option is useful if you want TPE restrictions to be applied to most
52902+ users on the system. If the sysctl option is enabled, a sysctl option
52903+ with name "tpe_invert" is created. Unlike other sysctl options, this
52904+ entry will default to on for backward-compatibility.
52905+
52906+config GRKERNSEC_TPE_GID
52907+ int
52908+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
52909+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
52910+
52911+config GRKERNSEC_TPE_UNTRUSTED_GID
52912+ int "GID for TPE-untrusted users"
52913+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
52914+ default 1005
52915+ help
52916+ Setting this GID determines what group TPE restrictions will be
52917+ *enabled* for. If the sysctl option is enabled, a sysctl option
52918+ with name "tpe_gid" is created.
52919+
52920+config GRKERNSEC_TPE_TRUSTED_GID
52921+ int "GID for TPE-trusted users"
52922+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
52923+ default 1005
52924+ help
52925+ Setting this GID determines what group TPE restrictions will be
52926+ *disabled* for. If the sysctl option is enabled, a sysctl option
52927+ with name "tpe_gid" is created.
52928+
52929+endmenu
52930+menu "Network Protections"
52931+depends on GRKERNSEC
52932+
52933+config GRKERNSEC_RANDNET
52934+ bool "Larger entropy pools"
52935+ default y if GRKERNSEC_CONFIG_AUTO
52936+ help
52937+ If you say Y here, the entropy pools used for many features of Linux
52938+ and grsecurity will be doubled in size. Since several grsecurity
52939+ features use additional randomness, it is recommended that you say Y
52940+ here. Saying Y here has a similar effect as modifying
52941+ /proc/sys/kernel/random/poolsize.
52942+
52943+config GRKERNSEC_BLACKHOLE
52944+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
52945+ default y if GRKERNSEC_CONFIG_AUTO
52946+ depends on NET
52947+ help
52948+ If you say Y here, neither TCP resets nor ICMP
52949+ destination-unreachable packets will be sent in response to packets
52950+ sent to ports for which no associated listening process exists.
52951+ This feature supports both IPV4 and IPV6 and exempts the
52952+ loopback interface from blackholing. Enabling this feature
52953+ makes a host more resilient to DoS attacks and reduces network
52954+ visibility against scanners.
52955+
52956+ The blackhole feature as-implemented is equivalent to the FreeBSD
52957+ blackhole feature, as it prevents RST responses to all packets, not
52958+ just SYNs. Under most application behavior this causes no
52959+ problems, but applications (like haproxy) may not close certain
52960+ connections in a way that cleanly terminates them on the remote
52961+ end, leaving the remote host in LAST_ACK state. Because of this
52962+ side-effect and to prevent intentional LAST_ACK DoSes, this
52963+ feature also adds automatic mitigation against such attacks.
52964+ The mitigation drastically reduces the amount of time a socket
52965+ can spend in LAST_ACK state. If you're using haproxy and not
52966+ all servers it connects to have this option enabled, consider
52967+ disabling this feature on the haproxy host.
52968+
52969+ If the sysctl option is enabled, two sysctl options with names
52970+ "ip_blackhole" and "lastack_retries" will be created.
52971+ While "ip_blackhole" takes the standard zero/non-zero on/off
52972+ toggle, "lastack_retries" uses the same kinds of values as
52973+ "tcp_retries1" and "tcp_retries2". The default value of 4
52974+ prevents a socket from lasting more than 45 seconds in LAST_ACK
52975+ state.
52976+
52977+config GRKERNSEC_NO_SIMULT_CONNECT
52978+ bool "Disable TCP Simultaneous Connect"
52979+ default y if GRKERNSEC_CONFIG_AUTO
52980+ depends on NET
52981+ help
52982+ If you say Y here, a feature by Willy Tarreau will be enabled that
52983+ removes a weakness in Linux's strict implementation of TCP that
52984+ allows two clients to connect to each other without either entering
52985+ a listening state. The weakness allows an attacker to easily prevent
52986+ a client from connecting to a known server provided the source port
52987+ for the connection is guessed correctly.
52988+
52989+ As the weakness could be used to prevent an antivirus or IPS from
52990+ fetching updates, or prevent an SSL gateway from fetching a CRL,
52991+ it should be eliminated by enabling this option. Though Linux is
52992+ one of few operating systems supporting simultaneous connect, it
52993+ has no legitimate use in practice and is rarely supported by firewalls.
52994+
52995+config GRKERNSEC_SOCKET
52996+ bool "Socket restrictions"
52997+ depends on NET
52998+ help
52999+ If you say Y here, you will be able to choose from several options.
53000+ If you assign a GID on your system and add it to the supplementary
53001+ groups of users you want to restrict socket access to, this patch
53002+ will perform up to three things, based on the option(s) you choose.
53003+
53004+config GRKERNSEC_SOCKET_ALL
53005+ bool "Deny any sockets to group"
53006+ depends on GRKERNSEC_SOCKET
53007+ help
53008+ If you say Y here, you will be able to choose a GID of whose users will
53009+ be unable to connect to other hosts from your machine or run server
53010+ applications from your machine. If the sysctl option is enabled, a
53011+ sysctl option with name "socket_all" is created.
53012+
53013+config GRKERNSEC_SOCKET_ALL_GID
53014+ int "GID to deny all sockets for"
53015+ depends on GRKERNSEC_SOCKET_ALL
53016+ default 1004
53017+ help
53018+ Here you can choose the GID to disable socket access for. Remember to
53019+ add the users you want socket access disabled for to the GID
53020+ specified here. If the sysctl option is enabled, a sysctl option
53021+ with name "socket_all_gid" is created.
53022+
53023+config GRKERNSEC_SOCKET_CLIENT
53024+ bool "Deny client sockets to group"
53025+ depends on GRKERNSEC_SOCKET
53026+ help
53027+ If you say Y here, you will be able to choose a GID of whose users will
53028+ be unable to connect to other hosts from your machine, but will be
53029+ able to run servers. If this option is enabled, all users in the group
53030+ you specify will have to use passive mode when initiating ftp transfers
53031+ from the shell on your machine. If the sysctl option is enabled, a
53032+ sysctl option with name "socket_client" is created.
53033+
53034+config GRKERNSEC_SOCKET_CLIENT_GID
53035+ int "GID to deny client sockets for"
53036+ depends on GRKERNSEC_SOCKET_CLIENT
53037+ default 1003
53038+ help
53039+ Here you can choose the GID to disable client socket access for.
53040+ Remember to add the users you want client socket access disabled for to
53041+ the GID specified here. If the sysctl option is enabled, a sysctl
53042+ option with name "socket_client_gid" is created.
53043+
53044+config GRKERNSEC_SOCKET_SERVER
53045+ bool "Deny server sockets to group"
53046+ depends on GRKERNSEC_SOCKET
53047+ help
53048+ If you say Y here, you will be able to choose a GID of whose users will
53049+ be unable to run server applications from your machine. If the sysctl
53050+ option is enabled, a sysctl option with name "socket_server" is created.
53051+
53052+config GRKERNSEC_SOCKET_SERVER_GID
53053+ int "GID to deny server sockets for"
53054+ depends on GRKERNSEC_SOCKET_SERVER
53055+ default 1002
53056+ help
53057+ Here you can choose the GID to disable server socket access for.
53058+ Remember to add the users you want server socket access disabled for to
53059+ the GID specified here. If the sysctl option is enabled, a sysctl
53060+ option with name "socket_server_gid" is created.
53061+
53062+endmenu
53063+menu "Sysctl Support"
53064+depends on GRKERNSEC && SYSCTL
53065+
53066+config GRKERNSEC_SYSCTL
53067+ bool "Sysctl support"
53068+ default y if GRKERNSEC_CONFIG_AUTO
53069+ help
53070+ If you say Y here, you will be able to change the options that
53071+ grsecurity runs with at bootup, without having to recompile your
53072+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53073+ to enable (1) or disable (0) various features. All the sysctl entries
53074+ are mutable until the "grsec_lock" entry is set to a non-zero value.
53075+ All features enabled in the kernel configuration are disabled at boot
53076+ if you do not say Y to the "Turn on features by default" option.
53077+ All options should be set at startup, and the grsec_lock entry should
53078+ be set to a non-zero value after all the options are set.
53079+ *THIS IS EXTREMELY IMPORTANT*
53080+
53081+config GRKERNSEC_SYSCTL_DISTRO
53082+ bool "Extra sysctl support for distro makers (READ HELP)"
53083+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53084+ help
53085+ If you say Y here, additional sysctl options will be created
53086+ for features that affect processes running as root. Therefore,
53087+ it is critical when using this option that the grsec_lock entry be
53088+ enabled after boot. Only distros with prebuilt kernel packages
53089+ with this option enabled that can ensure grsec_lock is enabled
53090+ after boot should use this option.
53091+ *Failure to set grsec_lock after boot makes all grsec features
53092+ this option covers useless*
53093+
53094+ Currently this option creates the following sysctl entries:
53095+ "Disable Privileged I/O": "disable_priv_io"
53096+
53097+config GRKERNSEC_SYSCTL_ON
53098+ bool "Turn on features by default"
53099+ default y if GRKERNSEC_CONFIG_AUTO
53100+ depends on GRKERNSEC_SYSCTL
53101+ help
53102+ If you say Y here, instead of having all features enabled in the
53103+ kernel configuration disabled at boot time, the features will be
53104+ enabled at boot time. It is recommended you say Y here unless
53105+ there is some reason you would want all sysctl-tunable features to
53106+ be disabled by default. As mentioned elsewhere, it is important
53107+ to enable the grsec_lock entry once you have finished modifying
53108+ the sysctl entries.
53109+
53110+endmenu
53111+menu "Logging Options"
53112+depends on GRKERNSEC
53113+
53114+config GRKERNSEC_FLOODTIME
53115+ int "Seconds in between log messages (minimum)"
53116+ default 10
53117+ help
53118+ This option allows you to enforce the number of seconds between
53119+ grsecurity log messages. The default should be suitable for most
53120+ people, however, if you choose to change it, choose a value small enough
53121+ to allow informative logs to be produced, but large enough to
53122+ prevent flooding.
53123+
53124+config GRKERNSEC_FLOODBURST
53125+ int "Number of messages in a burst (maximum)"
53126+ default 6
53127+ help
53128+ This option allows you to choose the maximum number of messages allowed
53129+ within the flood time interval you chose in a separate option. The
53130+ default should be suitable for most people, however if you find that
53131+ many of your logs are being interpreted as flooding, you may want to
53132+ raise this value.
53133+
53134+endmenu
53135diff --git a/grsecurity/Makefile b/grsecurity/Makefile
53136new file mode 100644
53137index 0000000..1b9afa9
53138--- /dev/null
53139+++ b/grsecurity/Makefile
53140@@ -0,0 +1,38 @@
53141+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53142+# during 2001-2009 it has been completely redesigned by Brad Spengler
53143+# into an RBAC system
53144+#
53145+# All code in this directory and various hooks inserted throughout the kernel
53146+# are copyright Brad Spengler - Open Source Security, Inc., and released
53147+# under the GPL v2 or higher
53148+
53149+KBUILD_CFLAGS += -Werror
53150+
53151+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53152+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
53153+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53154+
53155+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53156+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53157+ gracl_learn.o grsec_log.o
53158+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53159+
53160+ifdef CONFIG_NET
53161+obj-y += grsec_sock.o
53162+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53163+endif
53164+
53165+ifndef CONFIG_GRKERNSEC
53166+obj-y += grsec_disabled.o
53167+endif
53168+
53169+ifdef CONFIG_GRKERNSEC_HIDESYM
53170+extra-y := grsec_hidesym.o
53171+$(obj)/grsec_hidesym.o:
53172+ @-chmod -f 500 /boot
53173+ @-chmod -f 500 /lib/modules
53174+ @-chmod -f 500 /lib64/modules
53175+ @-chmod -f 500 /lib32/modules
53176+ @-chmod -f 700 .
53177+ @echo ' grsec: protected kernel image paths'
53178+endif
53179diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
53180new file mode 100644
53181index 0000000..78353bd
53182--- /dev/null
53183+++ b/grsecurity/gracl.c
53184@@ -0,0 +1,4039 @@
53185+#include <linux/kernel.h>
53186+#include <linux/module.h>
53187+#include <linux/sched.h>
53188+#include <linux/mm.h>
53189+#include <linux/file.h>
53190+#include <linux/fs.h>
53191+#include <linux/namei.h>
53192+#include <linux/mount.h>
53193+#include <linux/tty.h>
53194+#include <linux/proc_fs.h>
53195+#include <linux/lglock.h>
53196+#include <linux/slab.h>
53197+#include <linux/vmalloc.h>
53198+#include <linux/types.h>
53199+#include <linux/sysctl.h>
53200+#include <linux/netdevice.h>
53201+#include <linux/ptrace.h>
53202+#include <linux/gracl.h>
53203+#include <linux/gralloc.h>
53204+#include <linux/security.h>
53205+#include <linux/grinternal.h>
53206+#include <linux/pid_namespace.h>
53207+#include <linux/stop_machine.h>
53208+#include <linux/fdtable.h>
53209+#include <linux/percpu.h>
53210+#include <linux/lglock.h>
53211+#include "../fs/mount.h"
53212+
53213+#include <asm/uaccess.h>
53214+#include <asm/errno.h>
53215+#include <asm/mman.h>
53216+
53217+extern struct lglock vfsmount_lock;
53218+
53219+static struct acl_role_db acl_role_set;
53220+static struct name_db name_set;
53221+static struct inodev_db inodev_set;
53222+
53223+/* for keeping track of userspace pointers used for subjects, so we
53224+ can share references in the kernel as well
53225+*/
53226+
53227+static struct path real_root;
53228+
53229+static struct acl_subj_map_db subj_map_set;
53230+
53231+static struct acl_role_label *default_role;
53232+
53233+static struct acl_role_label *role_list;
53234+
53235+static u16 acl_sp_role_value;
53236+
53237+extern char *gr_shared_page[4];
53238+static DEFINE_MUTEX(gr_dev_mutex);
53239+DEFINE_RWLOCK(gr_inode_lock);
53240+
53241+struct gr_arg *gr_usermode;
53242+
53243+static unsigned int gr_status __read_only = GR_STATUS_INIT;
53244+
53245+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
53246+extern void gr_clear_learn_entries(void);
53247+
53248+#ifdef CONFIG_GRKERNSEC_RESLOG
53249+extern void gr_log_resource(const struct task_struct *task,
53250+ const int res, const unsigned long wanted, const int gt);
53251+#endif
53252+
53253+unsigned char *gr_system_salt;
53254+unsigned char *gr_system_sum;
53255+
53256+static struct sprole_pw **acl_special_roles = NULL;
53257+static __u16 num_sprole_pws = 0;
53258+
53259+static struct acl_role_label *kernel_role = NULL;
53260+
53261+static unsigned int gr_auth_attempts = 0;
53262+static unsigned long gr_auth_expires = 0UL;
53263+
53264+#ifdef CONFIG_NET
53265+extern struct vfsmount *sock_mnt;
53266+#endif
53267+
53268+extern struct vfsmount *pipe_mnt;
53269+extern struct vfsmount *shm_mnt;
53270+#ifdef CONFIG_HUGETLBFS
53271+extern struct vfsmount *hugetlbfs_vfsmount;
53272+#endif
53273+
53274+static struct acl_object_label *fakefs_obj_rw;
53275+static struct acl_object_label *fakefs_obj_rwx;
53276+
53277+extern int gr_init_uidset(void);
53278+extern void gr_free_uidset(void);
53279+extern void gr_remove_uid(uid_t uid);
53280+extern int gr_find_uid(uid_t uid);
53281+
53282+__inline__ int
53283+gr_acl_is_enabled(void)
53284+{
53285+ return (gr_status & GR_READY);
53286+}
53287+
53288+#ifdef CONFIG_BTRFS_FS
53289+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53290+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53291+#endif
53292+
53293+static inline dev_t __get_dev(const struct dentry *dentry)
53294+{
53295+#ifdef CONFIG_BTRFS_FS
53296+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53297+ return get_btrfs_dev_from_inode(dentry->d_inode);
53298+ else
53299+#endif
53300+ return dentry->d_inode->i_sb->s_dev;
53301+}
53302+
53303+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53304+{
53305+ return __get_dev(dentry);
53306+}
53307+
53308+static char gr_task_roletype_to_char(struct task_struct *task)
53309+{
53310+ switch (task->role->roletype &
53311+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
53312+ GR_ROLE_SPECIAL)) {
53313+ case GR_ROLE_DEFAULT:
53314+ return 'D';
53315+ case GR_ROLE_USER:
53316+ return 'U';
53317+ case GR_ROLE_GROUP:
53318+ return 'G';
53319+ case GR_ROLE_SPECIAL:
53320+ return 'S';
53321+ }
53322+
53323+ return 'X';
53324+}
53325+
53326+char gr_roletype_to_char(void)
53327+{
53328+ return gr_task_roletype_to_char(current);
53329+}
53330+
53331+__inline__ int
53332+gr_acl_tpe_check(void)
53333+{
53334+ if (unlikely(!(gr_status & GR_READY)))
53335+ return 0;
53336+ if (current->role->roletype & GR_ROLE_TPE)
53337+ return 1;
53338+ else
53339+ return 0;
53340+}
53341+
53342+int
53343+gr_handle_rawio(const struct inode *inode)
53344+{
53345+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53346+ if (inode && S_ISBLK(inode->i_mode) &&
53347+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53348+ !capable(CAP_SYS_RAWIO))
53349+ return 1;
53350+#endif
53351+ return 0;
53352+}
53353+
53354+static int
53355+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
53356+{
53357+ if (likely(lena != lenb))
53358+ return 0;
53359+
53360+ return !memcmp(a, b, lena);
53361+}
53362+
53363+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
53364+{
53365+ *buflen -= namelen;
53366+ if (*buflen < 0)
53367+ return -ENAMETOOLONG;
53368+ *buffer -= namelen;
53369+ memcpy(*buffer, str, namelen);
53370+ return 0;
53371+}
53372+
53373+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
53374+{
53375+ return prepend(buffer, buflen, name->name, name->len);
53376+}
53377+
53378+static int prepend_path(const struct path *path, struct path *root,
53379+ char **buffer, int *buflen)
53380+{
53381+ struct dentry *dentry = path->dentry;
53382+ struct vfsmount *vfsmnt = path->mnt;
53383+ struct mount *mnt = real_mount(vfsmnt);
53384+ bool slash = false;
53385+ int error = 0;
53386+
53387+ while (dentry != root->dentry || vfsmnt != root->mnt) {
53388+ struct dentry * parent;
53389+
53390+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
53391+ /* Global root? */
53392+ if (!mnt_has_parent(mnt)) {
53393+ goto out;
53394+ }
53395+ dentry = mnt->mnt_mountpoint;
53396+ mnt = mnt->mnt_parent;
53397+ vfsmnt = &mnt->mnt;
53398+ continue;
53399+ }
53400+ parent = dentry->d_parent;
53401+ prefetch(parent);
53402+ spin_lock(&dentry->d_lock);
53403+ error = prepend_name(buffer, buflen, &dentry->d_name);
53404+ spin_unlock(&dentry->d_lock);
53405+ if (!error)
53406+ error = prepend(buffer, buflen, "/", 1);
53407+ if (error)
53408+ break;
53409+
53410+ slash = true;
53411+ dentry = parent;
53412+ }
53413+
53414+out:
53415+ if (!error && !slash)
53416+ error = prepend(buffer, buflen, "/", 1);
53417+
53418+ return error;
53419+}
53420+
53421+/* this must be called with vfsmount_lock and rename_lock held */
53422+
53423+static char *__our_d_path(const struct path *path, struct path *root,
53424+ char *buf, int buflen)
53425+{
53426+ char *res = buf + buflen;
53427+ int error;
53428+
53429+ prepend(&res, &buflen, "\0", 1);
53430+ error = prepend_path(path, root, &res, &buflen);
53431+ if (error)
53432+ return ERR_PTR(error);
53433+
53434+ return res;
53435+}
53436+
53437+static char *
53438+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
53439+{
53440+ char *retval;
53441+
53442+ retval = __our_d_path(path, root, buf, buflen);
53443+ if (unlikely(IS_ERR(retval)))
53444+ retval = strcpy(buf, "<path too long>");
53445+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
53446+ retval[1] = '\0';
53447+
53448+ return retval;
53449+}
53450+
53451+static char *
53452+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
53453+ char *buf, int buflen)
53454+{
53455+ struct path path;
53456+ char *res;
53457+
53458+ path.dentry = (struct dentry *)dentry;
53459+ path.mnt = (struct vfsmount *)vfsmnt;
53460+
53461+ /* we can use real_root.dentry, real_root.mnt, because this is only called
53462+ by the RBAC system */
53463+ res = gen_full_path(&path, &real_root, buf, buflen);
53464+
53465+ return res;
53466+}
53467+
53468+static char *
53469+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
53470+ char *buf, int buflen)
53471+{
53472+ char *res;
53473+ struct path path;
53474+ struct path root;
53475+ struct task_struct *reaper = init_pid_ns.child_reaper;
53476+
53477+ path.dentry = (struct dentry *)dentry;
53478+ path.mnt = (struct vfsmount *)vfsmnt;
53479+
53480+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
53481+ get_fs_root(reaper->fs, &root);
53482+
53483+ write_seqlock(&rename_lock);
53484+ br_read_lock(&vfsmount_lock);
53485+ res = gen_full_path(&path, &root, buf, buflen);
53486+ br_read_unlock(&vfsmount_lock);
53487+ write_sequnlock(&rename_lock);
53488+
53489+ path_put(&root);
53490+ return res;
53491+}
53492+
53493+static char *
53494+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
53495+{
53496+ char *ret;
53497+ write_seqlock(&rename_lock);
53498+ br_read_lock(&vfsmount_lock);
53499+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
53500+ PAGE_SIZE);
53501+ br_read_unlock(&vfsmount_lock);
53502+ write_sequnlock(&rename_lock);
53503+ return ret;
53504+}
53505+
53506+static char *
53507+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
53508+{
53509+ char *ret;
53510+ char *buf;
53511+ int buflen;
53512+
53513+ write_seqlock(&rename_lock);
53514+ br_read_lock(&vfsmount_lock);
53515+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
53516+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
53517+ buflen = (int)(ret - buf);
53518+ if (buflen >= 5)
53519+ prepend(&ret, &buflen, "/proc", 5);
53520+ else
53521+ ret = strcpy(buf, "<path too long>");
53522+ br_read_unlock(&vfsmount_lock);
53523+ write_sequnlock(&rename_lock);
53524+ return ret;
53525+}
53526+
53527+char *
53528+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
53529+{
53530+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
53531+ PAGE_SIZE);
53532+}
53533+
53534+char *
53535+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
53536+{
53537+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
53538+ PAGE_SIZE);
53539+}
53540+
53541+char *
53542+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
53543+{
53544+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
53545+ PAGE_SIZE);
53546+}
53547+
53548+char *
53549+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
53550+{
53551+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
53552+ PAGE_SIZE);
53553+}
53554+
53555+char *
53556+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
53557+{
53558+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
53559+ PAGE_SIZE);
53560+}
53561+
53562+__inline__ __u32
53563+to_gr_audit(const __u32 reqmode)
53564+{
53565+ /* masks off auditable permission flags, then shifts them to create
53566+ auditing flags, and adds the special case of append auditing if
53567+ we're requesting write */
53568+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
53569+}
53570+
53571+struct acl_subject_label *
53572+lookup_subject_map(const struct acl_subject_label *userp)
53573+{
53574+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
53575+ struct subject_map *match;
53576+
53577+ match = subj_map_set.s_hash[index];
53578+
53579+ while (match && match->user != userp)
53580+ match = match->next;
53581+
53582+ if (match != NULL)
53583+ return match->kernel;
53584+ else
53585+ return NULL;
53586+}
53587+
53588+static void
53589+insert_subj_map_entry(struct subject_map *subjmap)
53590+{
53591+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
53592+ struct subject_map **curr;
53593+
53594+ subjmap->prev = NULL;
53595+
53596+ curr = &subj_map_set.s_hash[index];
53597+ if (*curr != NULL)
53598+ (*curr)->prev = subjmap;
53599+
53600+ subjmap->next = *curr;
53601+ *curr = subjmap;
53602+
53603+ return;
53604+}
53605+
53606+static struct acl_role_label *
53607+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
53608+ const gid_t gid)
53609+{
53610+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
53611+ struct acl_role_label *match;
53612+ struct role_allowed_ip *ipp;
53613+ unsigned int x;
53614+ u32 curr_ip = task->signal->curr_ip;
53615+
53616+ task->signal->saved_ip = curr_ip;
53617+
53618+ match = acl_role_set.r_hash[index];
53619+
53620+ while (match) {
53621+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
53622+ for (x = 0; x < match->domain_child_num; x++) {
53623+ if (match->domain_children[x] == uid)
53624+ goto found;
53625+ }
53626+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
53627+ break;
53628+ match = match->next;
53629+ }
53630+found:
53631+ if (match == NULL) {
53632+ try_group:
53633+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
53634+ match = acl_role_set.r_hash[index];
53635+
53636+ while (match) {
53637+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
53638+ for (x = 0; x < match->domain_child_num; x++) {
53639+ if (match->domain_children[x] == gid)
53640+ goto found2;
53641+ }
53642+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
53643+ break;
53644+ match = match->next;
53645+ }
53646+found2:
53647+ if (match == NULL)
53648+ match = default_role;
53649+ if (match->allowed_ips == NULL)
53650+ return match;
53651+ else {
53652+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
53653+ if (likely
53654+ ((ntohl(curr_ip) & ipp->netmask) ==
53655+ (ntohl(ipp->addr) & ipp->netmask)))
53656+ return match;
53657+ }
53658+ match = default_role;
53659+ }
53660+ } else if (match->allowed_ips == NULL) {
53661+ return match;
53662+ } else {
53663+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
53664+ if (likely
53665+ ((ntohl(curr_ip) & ipp->netmask) ==
53666+ (ntohl(ipp->addr) & ipp->netmask)))
53667+ return match;
53668+ }
53669+ goto try_group;
53670+ }
53671+
53672+ return match;
53673+}
53674+
53675+struct acl_subject_label *
53676+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
53677+ const struct acl_role_label *role)
53678+{
53679+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
53680+ struct acl_subject_label *match;
53681+
53682+ match = role->subj_hash[index];
53683+
53684+ while (match && (match->inode != ino || match->device != dev ||
53685+ (match->mode & GR_DELETED))) {
53686+ match = match->next;
53687+ }
53688+
53689+ if (match && !(match->mode & GR_DELETED))
53690+ return match;
53691+ else
53692+ return NULL;
53693+}
53694+
53695+struct acl_subject_label *
53696+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
53697+ const struct acl_role_label *role)
53698+{
53699+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
53700+ struct acl_subject_label *match;
53701+
53702+ match = role->subj_hash[index];
53703+
53704+ while (match && (match->inode != ino || match->device != dev ||
53705+ !(match->mode & GR_DELETED))) {
53706+ match = match->next;
53707+ }
53708+
53709+ if (match && (match->mode & GR_DELETED))
53710+ return match;
53711+ else
53712+ return NULL;
53713+}
53714+
53715+static struct acl_object_label *
53716+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
53717+ const struct acl_subject_label *subj)
53718+{
53719+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
53720+ struct acl_object_label *match;
53721+
53722+ match = subj->obj_hash[index];
53723+
53724+ while (match && (match->inode != ino || match->device != dev ||
53725+ (match->mode & GR_DELETED))) {
53726+ match = match->next;
53727+ }
53728+
53729+ if (match && !(match->mode & GR_DELETED))
53730+ return match;
53731+ else
53732+ return NULL;
53733+}
53734+
53735+static struct acl_object_label *
53736+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
53737+ const struct acl_subject_label *subj)
53738+{
53739+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
53740+ struct acl_object_label *match;
53741+
53742+ match = subj->obj_hash[index];
53743+
53744+ while (match && (match->inode != ino || match->device != dev ||
53745+ !(match->mode & GR_DELETED))) {
53746+ match = match->next;
53747+ }
53748+
53749+ if (match && (match->mode & GR_DELETED))
53750+ return match;
53751+
53752+ match = subj->obj_hash[index];
53753+
53754+ while (match && (match->inode != ino || match->device != dev ||
53755+ (match->mode & GR_DELETED))) {
53756+ match = match->next;
53757+ }
53758+
53759+ if (match && !(match->mode & GR_DELETED))
53760+ return match;
53761+ else
53762+ return NULL;
53763+}
53764+
53765+static struct name_entry *
53766+lookup_name_entry(const char *name)
53767+{
53768+ unsigned int len = strlen(name);
53769+ unsigned int key = full_name_hash(name, len);
53770+ unsigned int index = key % name_set.n_size;
53771+ struct name_entry *match;
53772+
53773+ match = name_set.n_hash[index];
53774+
53775+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
53776+ match = match->next;
53777+
53778+ return match;
53779+}
53780+
53781+static struct name_entry *
53782+lookup_name_entry_create(const char *name)
53783+{
53784+ unsigned int len = strlen(name);
53785+ unsigned int key = full_name_hash(name, len);
53786+ unsigned int index = key % name_set.n_size;
53787+ struct name_entry *match;
53788+
53789+ match = name_set.n_hash[index];
53790+
53791+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
53792+ !match->deleted))
53793+ match = match->next;
53794+
53795+ if (match && match->deleted)
53796+ return match;
53797+
53798+ match = name_set.n_hash[index];
53799+
53800+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
53801+ match->deleted))
53802+ match = match->next;
53803+
53804+ if (match && !match->deleted)
53805+ return match;
53806+ else
53807+ return NULL;
53808+}
53809+
53810+static struct inodev_entry *
53811+lookup_inodev_entry(const ino_t ino, const dev_t dev)
53812+{
53813+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
53814+ struct inodev_entry *match;
53815+
53816+ match = inodev_set.i_hash[index];
53817+
53818+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
53819+ match = match->next;
53820+
53821+ return match;
53822+}
53823+
53824+static void
53825+insert_inodev_entry(struct inodev_entry *entry)
53826+{
53827+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
53828+ inodev_set.i_size);
53829+ struct inodev_entry **curr;
53830+
53831+ entry->prev = NULL;
53832+
53833+ curr = &inodev_set.i_hash[index];
53834+ if (*curr != NULL)
53835+ (*curr)->prev = entry;
53836+
53837+ entry->next = *curr;
53838+ *curr = entry;
53839+
53840+ return;
53841+}
53842+
53843+static void
53844+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
53845+{
53846+ unsigned int index =
53847+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
53848+ struct acl_role_label **curr;
53849+ struct acl_role_label *tmp, *tmp2;
53850+
53851+ curr = &acl_role_set.r_hash[index];
53852+
53853+ /* simple case, slot is empty, just set it to our role */
53854+ if (*curr == NULL) {
53855+ *curr = role;
53856+ } else {
53857+ /* example:
53858+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
53859+ 2 -> 3
53860+ */
53861+ /* first check to see if we can already be reached via this slot */
53862+ tmp = *curr;
53863+ while (tmp && tmp != role)
53864+ tmp = tmp->next;
53865+ if (tmp == role) {
53866+ /* we don't need to add ourselves to this slot's chain */
53867+ return;
53868+ }
53869+ /* we need to add ourselves to this chain, two cases */
53870+ if (role->next == NULL) {
53871+ /* simple case, append the current chain to our role */
53872+ role->next = *curr;
53873+ *curr = role;
53874+ } else {
53875+ /* 1 -> 2 -> 3 -> 4
53876+ 2 -> 3 -> 4
53877+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
53878+ */
53879+ /* trickier case: walk our role's chain until we find
53880+ the role for the start of the current slot's chain */
53881+ tmp = role;
53882+ tmp2 = *curr;
53883+ while (tmp->next && tmp->next != tmp2)
53884+ tmp = tmp->next;
53885+ if (tmp->next == tmp2) {
53886+ /* from example above, we found 3, so just
53887+ replace this slot's chain with ours */
53888+ *curr = role;
53889+ } else {
53890+ /* we didn't find a subset of our role's chain
53891+ in the current slot's chain, so append their
53892+ chain to ours, and set us as the first role in
53893+ the slot's chain
53894+
53895+ we could fold this case with the case above,
53896+ but making it explicit for clarity
53897+ */
53898+ tmp->next = tmp2;
53899+ *curr = role;
53900+ }
53901+ }
53902+ }
53903+
53904+ return;
53905+}
53906+
53907+static void
53908+insert_acl_role_label(struct acl_role_label *role)
53909+{
53910+ int i;
53911+
53912+ if (role_list == NULL) {
53913+ role_list = role;
53914+ role->prev = NULL;
53915+ } else {
53916+ role->prev = role_list;
53917+ role_list = role;
53918+ }
53919+
53920+ /* used for hash chains */
53921+ role->next = NULL;
53922+
53923+ if (role->roletype & GR_ROLE_DOMAIN) {
53924+ for (i = 0; i < role->domain_child_num; i++)
53925+ __insert_acl_role_label(role, role->domain_children[i]);
53926+ } else
53927+ __insert_acl_role_label(role, role->uidgid);
53928+}
53929+
53930+static int
53931+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
53932+{
53933+ struct name_entry **curr, *nentry;
53934+ struct inodev_entry *ientry;
53935+ unsigned int len = strlen(name);
53936+ unsigned int key = full_name_hash(name, len);
53937+ unsigned int index = key % name_set.n_size;
53938+
53939+ curr = &name_set.n_hash[index];
53940+
53941+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
53942+ curr = &((*curr)->next);
53943+
53944+ if (*curr != NULL)
53945+ return 1;
53946+
53947+ nentry = acl_alloc(sizeof (struct name_entry));
53948+ if (nentry == NULL)
53949+ return 0;
53950+ ientry = acl_alloc(sizeof (struct inodev_entry));
53951+ if (ientry == NULL)
53952+ return 0;
53953+ ientry->nentry = nentry;
53954+
53955+ nentry->key = key;
53956+ nentry->name = name;
53957+ nentry->inode = inode;
53958+ nentry->device = device;
53959+ nentry->len = len;
53960+ nentry->deleted = deleted;
53961+
53962+ nentry->prev = NULL;
53963+ curr = &name_set.n_hash[index];
53964+ if (*curr != NULL)
53965+ (*curr)->prev = nentry;
53966+ nentry->next = *curr;
53967+ *curr = nentry;
53968+
53969+ /* insert us into the table searchable by inode/dev */
53970+ insert_inodev_entry(ientry);
53971+
53972+ return 1;
53973+}
53974+
53975+static void
53976+insert_acl_obj_label(struct acl_object_label *obj,
53977+ struct acl_subject_label *subj)
53978+{
53979+ unsigned int index =
53980+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
53981+ struct acl_object_label **curr;
53982+
53983+
53984+ obj->prev = NULL;
53985+
53986+ curr = &subj->obj_hash[index];
53987+ if (*curr != NULL)
53988+ (*curr)->prev = obj;
53989+
53990+ obj->next = *curr;
53991+ *curr = obj;
53992+
53993+ return;
53994+}
53995+
53996+static void
53997+insert_acl_subj_label(struct acl_subject_label *obj,
53998+ struct acl_role_label *role)
53999+{
54000+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
54001+ struct acl_subject_label **curr;
54002+
54003+ obj->prev = NULL;
54004+
54005+ curr = &role->subj_hash[index];
54006+ if (*curr != NULL)
54007+ (*curr)->prev = obj;
54008+
54009+ obj->next = *curr;
54010+ *curr = obj;
54011+
54012+ return;
54013+}
54014+
54015+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
54016+
54017+static void *
54018+create_table(__u32 * len, int elementsize)
54019+{
54020+ unsigned int table_sizes[] = {
54021+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
54022+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
54023+ 4194301, 8388593, 16777213, 33554393, 67108859
54024+ };
54025+ void *newtable = NULL;
54026+ unsigned int pwr = 0;
54027+
54028+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
54029+ table_sizes[pwr] <= *len)
54030+ pwr++;
54031+
54032+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
54033+ return newtable;
54034+
54035+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
54036+ newtable =
54037+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
54038+ else
54039+ newtable = vmalloc(table_sizes[pwr] * elementsize);
54040+
54041+ *len = table_sizes[pwr];
54042+
54043+ return newtable;
54044+}
54045+
54046+static int
54047+init_variables(const struct gr_arg *arg)
54048+{
54049+ struct task_struct *reaper = init_pid_ns.child_reaper;
54050+ unsigned int stacksize;
54051+
54052+ subj_map_set.s_size = arg->role_db.num_subjects;
54053+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
54054+ name_set.n_size = arg->role_db.num_objects;
54055+ inodev_set.i_size = arg->role_db.num_objects;
54056+
54057+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
54058+ !name_set.n_size || !inodev_set.i_size)
54059+ return 1;
54060+
54061+ if (!gr_init_uidset())
54062+ return 1;
54063+
54064+ /* set up the stack that holds allocation info */
54065+
54066+ stacksize = arg->role_db.num_pointers + 5;
54067+
54068+ if (!acl_alloc_stack_init(stacksize))
54069+ return 1;
54070+
54071+ /* grab reference for the real root dentry and vfsmount */
54072+ get_fs_root(reaper->fs, &real_root);
54073+
54074+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54075+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
54076+#endif
54077+
54078+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
54079+ if (fakefs_obj_rw == NULL)
54080+ return 1;
54081+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
54082+
54083+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
54084+ if (fakefs_obj_rwx == NULL)
54085+ return 1;
54086+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
54087+
54088+ subj_map_set.s_hash =
54089+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
54090+ acl_role_set.r_hash =
54091+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
54092+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
54093+ inodev_set.i_hash =
54094+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
54095+
54096+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
54097+ !name_set.n_hash || !inodev_set.i_hash)
54098+ return 1;
54099+
54100+ memset(subj_map_set.s_hash, 0,
54101+ sizeof(struct subject_map *) * subj_map_set.s_size);
54102+ memset(acl_role_set.r_hash, 0,
54103+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
54104+ memset(name_set.n_hash, 0,
54105+ sizeof (struct name_entry *) * name_set.n_size);
54106+ memset(inodev_set.i_hash, 0,
54107+ sizeof (struct inodev_entry *) * inodev_set.i_size);
54108+
54109+ return 0;
54110+}
54111+
54112+/* free information not needed after startup
54113+ currently contains user->kernel pointer mappings for subjects
54114+*/
54115+
54116+static void
54117+free_init_variables(void)
54118+{
54119+ __u32 i;
54120+
54121+ if (subj_map_set.s_hash) {
54122+ for (i = 0; i < subj_map_set.s_size; i++) {
54123+ if (subj_map_set.s_hash[i]) {
54124+ kfree(subj_map_set.s_hash[i]);
54125+ subj_map_set.s_hash[i] = NULL;
54126+ }
54127+ }
54128+
54129+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
54130+ PAGE_SIZE)
54131+ kfree(subj_map_set.s_hash);
54132+ else
54133+ vfree(subj_map_set.s_hash);
54134+ }
54135+
54136+ return;
54137+}
54138+
54139+static void
54140+free_variables(void)
54141+{
54142+ struct acl_subject_label *s;
54143+ struct acl_role_label *r;
54144+ struct task_struct *task, *task2;
54145+ unsigned int x;
54146+
54147+ gr_clear_learn_entries();
54148+
54149+ read_lock(&tasklist_lock);
54150+ do_each_thread(task2, task) {
54151+ task->acl_sp_role = 0;
54152+ task->acl_role_id = 0;
54153+ task->acl = NULL;
54154+ task->role = NULL;
54155+ } while_each_thread(task2, task);
54156+ read_unlock(&tasklist_lock);
54157+
54158+ /* release the reference to the real root dentry and vfsmount */
54159+ path_put(&real_root);
54160+ memset(&real_root, 0, sizeof(real_root));
54161+
54162+ /* free all object hash tables */
54163+
54164+ FOR_EACH_ROLE_START(r)
54165+ if (r->subj_hash == NULL)
54166+ goto next_role;
54167+ FOR_EACH_SUBJECT_START(r, s, x)
54168+ if (s->obj_hash == NULL)
54169+ break;
54170+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
54171+ kfree(s->obj_hash);
54172+ else
54173+ vfree(s->obj_hash);
54174+ FOR_EACH_SUBJECT_END(s, x)
54175+ FOR_EACH_NESTED_SUBJECT_START(r, s)
54176+ if (s->obj_hash == NULL)
54177+ break;
54178+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
54179+ kfree(s->obj_hash);
54180+ else
54181+ vfree(s->obj_hash);
54182+ FOR_EACH_NESTED_SUBJECT_END(s)
54183+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
54184+ kfree(r->subj_hash);
54185+ else
54186+ vfree(r->subj_hash);
54187+ r->subj_hash = NULL;
54188+next_role:
54189+ FOR_EACH_ROLE_END(r)
54190+
54191+ acl_free_all();
54192+
54193+ if (acl_role_set.r_hash) {
54194+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
54195+ PAGE_SIZE)
54196+ kfree(acl_role_set.r_hash);
54197+ else
54198+ vfree(acl_role_set.r_hash);
54199+ }
54200+ if (name_set.n_hash) {
54201+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
54202+ PAGE_SIZE)
54203+ kfree(name_set.n_hash);
54204+ else
54205+ vfree(name_set.n_hash);
54206+ }
54207+
54208+ if (inodev_set.i_hash) {
54209+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
54210+ PAGE_SIZE)
54211+ kfree(inodev_set.i_hash);
54212+ else
54213+ vfree(inodev_set.i_hash);
54214+ }
54215+
54216+ gr_free_uidset();
54217+
54218+ memset(&name_set, 0, sizeof (struct name_db));
54219+ memset(&inodev_set, 0, sizeof (struct inodev_db));
54220+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
54221+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
54222+
54223+ default_role = NULL;
54224+ kernel_role = NULL;
54225+ role_list = NULL;
54226+
54227+ return;
54228+}
54229+
54230+static __u32
54231+count_user_objs(struct acl_object_label *userp)
54232+{
54233+ struct acl_object_label o_tmp;
54234+ __u32 num = 0;
54235+
54236+ while (userp) {
54237+ if (copy_from_user(&o_tmp, userp,
54238+ sizeof (struct acl_object_label)))
54239+ break;
54240+
54241+ userp = o_tmp.prev;
54242+ num++;
54243+ }
54244+
54245+ return num;
54246+}
54247+
54248+static struct acl_subject_label *
54249+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
54250+
54251+static int
54252+copy_user_glob(struct acl_object_label *obj)
54253+{
54254+ struct acl_object_label *g_tmp, **guser;
54255+ unsigned int len;
54256+ char *tmp;
54257+
54258+ if (obj->globbed == NULL)
54259+ return 0;
54260+
54261+ guser = &obj->globbed;
54262+ while (*guser) {
54263+ g_tmp = (struct acl_object_label *)
54264+ acl_alloc(sizeof (struct acl_object_label));
54265+ if (g_tmp == NULL)
54266+ return -ENOMEM;
54267+
54268+ if (copy_from_user(g_tmp, *guser,
54269+ sizeof (struct acl_object_label)))
54270+ return -EFAULT;
54271+
54272+ len = strnlen_user(g_tmp->filename, PATH_MAX);
54273+
54274+ if (!len || len >= PATH_MAX)
54275+ return -EINVAL;
54276+
54277+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54278+ return -ENOMEM;
54279+
54280+ if (copy_from_user(tmp, g_tmp->filename, len))
54281+ return -EFAULT;
54282+ tmp[len-1] = '\0';
54283+ g_tmp->filename = tmp;
54284+
54285+ *guser = g_tmp;
54286+ guser = &(g_tmp->next);
54287+ }
54288+
54289+ return 0;
54290+}
54291+
54292+static int
54293+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
54294+ struct acl_role_label *role)
54295+{
54296+ struct acl_object_label *o_tmp;
54297+ unsigned int len;
54298+ int ret;
54299+ char *tmp;
54300+
54301+ while (userp) {
54302+ if ((o_tmp = (struct acl_object_label *)
54303+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
54304+ return -ENOMEM;
54305+
54306+ if (copy_from_user(o_tmp, userp,
54307+ sizeof (struct acl_object_label)))
54308+ return -EFAULT;
54309+
54310+ userp = o_tmp->prev;
54311+
54312+ len = strnlen_user(o_tmp->filename, PATH_MAX);
54313+
54314+ if (!len || len >= PATH_MAX)
54315+ return -EINVAL;
54316+
54317+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54318+ return -ENOMEM;
54319+
54320+ if (copy_from_user(tmp, o_tmp->filename, len))
54321+ return -EFAULT;
54322+ tmp[len-1] = '\0';
54323+ o_tmp->filename = tmp;
54324+
54325+ insert_acl_obj_label(o_tmp, subj);
54326+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
54327+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
54328+ return -ENOMEM;
54329+
54330+ ret = copy_user_glob(o_tmp);
54331+ if (ret)
54332+ return ret;
54333+
54334+ if (o_tmp->nested) {
54335+ int already_copied;
54336+
54337+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
54338+ if (IS_ERR(o_tmp->nested))
54339+ return PTR_ERR(o_tmp->nested);
54340+
54341+ /* insert into nested subject list if we haven't copied this one yet
54342+ to prevent duplicate entries */
54343+ if (!already_copied) {
54344+ o_tmp->nested->next = role->hash->first;
54345+ role->hash->first = o_tmp->nested;
54346+ }
54347+ }
54348+ }
54349+
54350+ return 0;
54351+}
54352+
54353+static __u32
54354+count_user_subjs(struct acl_subject_label *userp)
54355+{
54356+ struct acl_subject_label s_tmp;
54357+ __u32 num = 0;
54358+
54359+ while (userp) {
54360+ if (copy_from_user(&s_tmp, userp,
54361+ sizeof (struct acl_subject_label)))
54362+ break;
54363+
54364+ userp = s_tmp.prev;
54365+ }
54366+
54367+ return num;
54368+}
54369+
54370+static int
54371+copy_user_allowedips(struct acl_role_label *rolep)
54372+{
54373+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
54374+
54375+ ruserip = rolep->allowed_ips;
54376+
54377+ while (ruserip) {
54378+ rlast = rtmp;
54379+
54380+ if ((rtmp = (struct role_allowed_ip *)
54381+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
54382+ return -ENOMEM;
54383+
54384+ if (copy_from_user(rtmp, ruserip,
54385+ sizeof (struct role_allowed_ip)))
54386+ return -EFAULT;
54387+
54388+ ruserip = rtmp->prev;
54389+
54390+ if (!rlast) {
54391+ rtmp->prev = NULL;
54392+ rolep->allowed_ips = rtmp;
54393+ } else {
54394+ rlast->next = rtmp;
54395+ rtmp->prev = rlast;
54396+ }
54397+
54398+ if (!ruserip)
54399+ rtmp->next = NULL;
54400+ }
54401+
54402+ return 0;
54403+}
54404+
54405+static int
54406+copy_user_transitions(struct acl_role_label *rolep)
54407+{
54408+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
54409+
54410+ unsigned int len;
54411+ char *tmp;
54412+
54413+ rusertp = rolep->transitions;
54414+
54415+ while (rusertp) {
54416+ rlast = rtmp;
54417+
54418+ if ((rtmp = (struct role_transition *)
54419+ acl_alloc(sizeof (struct role_transition))) == NULL)
54420+ return -ENOMEM;
54421+
54422+ if (copy_from_user(rtmp, rusertp,
54423+ sizeof (struct role_transition)))
54424+ return -EFAULT;
54425+
54426+ rusertp = rtmp->prev;
54427+
54428+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
54429+
54430+ if (!len || len >= GR_SPROLE_LEN)
54431+ return -EINVAL;
54432+
54433+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54434+ return -ENOMEM;
54435+
54436+ if (copy_from_user(tmp, rtmp->rolename, len))
54437+ return -EFAULT;
54438+ tmp[len-1] = '\0';
54439+ rtmp->rolename = tmp;
54440+
54441+ if (!rlast) {
54442+ rtmp->prev = NULL;
54443+ rolep->transitions = rtmp;
54444+ } else {
54445+ rlast->next = rtmp;
54446+ rtmp->prev = rlast;
54447+ }
54448+
54449+ if (!rusertp)
54450+ rtmp->next = NULL;
54451+ }
54452+
54453+ return 0;
54454+}
54455+
54456+static struct acl_subject_label *
54457+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
54458+{
54459+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
54460+ unsigned int len;
54461+ char *tmp;
54462+ __u32 num_objs;
54463+ struct acl_ip_label **i_tmp, *i_utmp2;
54464+ struct gr_hash_struct ghash;
54465+ struct subject_map *subjmap;
54466+ unsigned int i_num;
54467+ int err;
54468+
54469+ if (already_copied != NULL)
54470+ *already_copied = 0;
54471+
54472+ s_tmp = lookup_subject_map(userp);
54473+
54474+ /* we've already copied this subject into the kernel, just return
54475+ the reference to it, and don't copy it over again
54476+ */
54477+ if (s_tmp) {
54478+ if (already_copied != NULL)
54479+ *already_copied = 1;
54480+ return(s_tmp);
54481+ }
54482+
54483+ if ((s_tmp = (struct acl_subject_label *)
54484+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
54485+ return ERR_PTR(-ENOMEM);
54486+
54487+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
54488+ if (subjmap == NULL)
54489+ return ERR_PTR(-ENOMEM);
54490+
54491+ subjmap->user = userp;
54492+ subjmap->kernel = s_tmp;
54493+ insert_subj_map_entry(subjmap);
54494+
54495+ if (copy_from_user(s_tmp, userp,
54496+ sizeof (struct acl_subject_label)))
54497+ return ERR_PTR(-EFAULT);
54498+
54499+ len = strnlen_user(s_tmp->filename, PATH_MAX);
54500+
54501+ if (!len || len >= PATH_MAX)
54502+ return ERR_PTR(-EINVAL);
54503+
54504+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54505+ return ERR_PTR(-ENOMEM);
54506+
54507+ if (copy_from_user(tmp, s_tmp->filename, len))
54508+ return ERR_PTR(-EFAULT);
54509+ tmp[len-1] = '\0';
54510+ s_tmp->filename = tmp;
54511+
54512+ if (!strcmp(s_tmp->filename, "/"))
54513+ role->root_label = s_tmp;
54514+
54515+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
54516+ return ERR_PTR(-EFAULT);
54517+
54518+ /* copy user and group transition tables */
54519+
54520+ if (s_tmp->user_trans_num) {
54521+ uid_t *uidlist;
54522+
54523+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
54524+ if (uidlist == NULL)
54525+ return ERR_PTR(-ENOMEM);
54526+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
54527+ return ERR_PTR(-EFAULT);
54528+
54529+ s_tmp->user_transitions = uidlist;
54530+ }
54531+
54532+ if (s_tmp->group_trans_num) {
54533+ gid_t *gidlist;
54534+
54535+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
54536+ if (gidlist == NULL)
54537+ return ERR_PTR(-ENOMEM);
54538+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
54539+ return ERR_PTR(-EFAULT);
54540+
54541+ s_tmp->group_transitions = gidlist;
54542+ }
54543+
54544+ /* set up object hash table */
54545+ num_objs = count_user_objs(ghash.first);
54546+
54547+ s_tmp->obj_hash_size = num_objs;
54548+ s_tmp->obj_hash =
54549+ (struct acl_object_label **)
54550+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
54551+
54552+ if (!s_tmp->obj_hash)
54553+ return ERR_PTR(-ENOMEM);
54554+
54555+ memset(s_tmp->obj_hash, 0,
54556+ s_tmp->obj_hash_size *
54557+ sizeof (struct acl_object_label *));
54558+
54559+ /* add in objects */
54560+ err = copy_user_objs(ghash.first, s_tmp, role);
54561+
54562+ if (err)
54563+ return ERR_PTR(err);
54564+
54565+ /* set pointer for parent subject */
54566+ if (s_tmp->parent_subject) {
54567+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
54568+
54569+ if (IS_ERR(s_tmp2))
54570+ return s_tmp2;
54571+
54572+ s_tmp->parent_subject = s_tmp2;
54573+ }
54574+
54575+ /* add in ip acls */
54576+
54577+ if (!s_tmp->ip_num) {
54578+ s_tmp->ips = NULL;
54579+ goto insert;
54580+ }
54581+
54582+ i_tmp =
54583+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
54584+ sizeof (struct acl_ip_label *));
54585+
54586+ if (!i_tmp)
54587+ return ERR_PTR(-ENOMEM);
54588+
54589+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
54590+ *(i_tmp + i_num) =
54591+ (struct acl_ip_label *)
54592+ acl_alloc(sizeof (struct acl_ip_label));
54593+ if (!*(i_tmp + i_num))
54594+ return ERR_PTR(-ENOMEM);
54595+
54596+ if (copy_from_user
54597+ (&i_utmp2, s_tmp->ips + i_num,
54598+ sizeof (struct acl_ip_label *)))
54599+ return ERR_PTR(-EFAULT);
54600+
54601+ if (copy_from_user
54602+ (*(i_tmp + i_num), i_utmp2,
54603+ sizeof (struct acl_ip_label)))
54604+ return ERR_PTR(-EFAULT);
54605+
54606+ if ((*(i_tmp + i_num))->iface == NULL)
54607+ continue;
54608+
54609+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
54610+ if (!len || len >= IFNAMSIZ)
54611+ return ERR_PTR(-EINVAL);
54612+ tmp = acl_alloc(len);
54613+ if (tmp == NULL)
54614+ return ERR_PTR(-ENOMEM);
54615+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
54616+ return ERR_PTR(-EFAULT);
54617+ (*(i_tmp + i_num))->iface = tmp;
54618+ }
54619+
54620+ s_tmp->ips = i_tmp;
54621+
54622+insert:
54623+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
54624+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
54625+ return ERR_PTR(-ENOMEM);
54626+
54627+ return s_tmp;
54628+}
54629+
54630+static int
54631+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
54632+{
54633+ struct acl_subject_label s_pre;
54634+ struct acl_subject_label * ret;
54635+ int err;
54636+
54637+ while (userp) {
54638+ if (copy_from_user(&s_pre, userp,
54639+ sizeof (struct acl_subject_label)))
54640+ return -EFAULT;
54641+
54642+ ret = do_copy_user_subj(userp, role, NULL);
54643+
54644+ err = PTR_ERR(ret);
54645+ if (IS_ERR(ret))
54646+ return err;
54647+
54648+ insert_acl_subj_label(ret, role);
54649+
54650+ userp = s_pre.prev;
54651+ }
54652+
54653+ return 0;
54654+}
54655+
54656+static int
54657+copy_user_acl(struct gr_arg *arg)
54658+{
54659+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
54660+ struct acl_subject_label *subj_list;
54661+ struct sprole_pw *sptmp;
54662+ struct gr_hash_struct *ghash;
54663+ uid_t *domainlist;
54664+ unsigned int r_num;
54665+ unsigned int len;
54666+ char *tmp;
54667+ int err = 0;
54668+ __u16 i;
54669+ __u32 num_subjs;
54670+
54671+ /* we need a default and kernel role */
54672+ if (arg->role_db.num_roles < 2)
54673+ return -EINVAL;
54674+
54675+ /* copy special role authentication info from userspace */
54676+
54677+ num_sprole_pws = arg->num_sprole_pws;
54678+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
54679+
54680+ if (!acl_special_roles && num_sprole_pws)
54681+ return -ENOMEM;
54682+
54683+ for (i = 0; i < num_sprole_pws; i++) {
54684+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
54685+ if (!sptmp)
54686+ return -ENOMEM;
54687+ if (copy_from_user(sptmp, arg->sprole_pws + i,
54688+ sizeof (struct sprole_pw)))
54689+ return -EFAULT;
54690+
54691+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
54692+
54693+ if (!len || len >= GR_SPROLE_LEN)
54694+ return -EINVAL;
54695+
54696+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54697+ return -ENOMEM;
54698+
54699+ if (copy_from_user(tmp, sptmp->rolename, len))
54700+ return -EFAULT;
54701+
54702+ tmp[len-1] = '\0';
54703+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54704+ printk(KERN_ALERT "Copying special role %s\n", tmp);
54705+#endif
54706+ sptmp->rolename = tmp;
54707+ acl_special_roles[i] = sptmp;
54708+ }
54709+
54710+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
54711+
54712+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
54713+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
54714+
54715+ if (!r_tmp)
54716+ return -ENOMEM;
54717+
54718+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
54719+ sizeof (struct acl_role_label *)))
54720+ return -EFAULT;
54721+
54722+ if (copy_from_user(r_tmp, r_utmp2,
54723+ sizeof (struct acl_role_label)))
54724+ return -EFAULT;
54725+
54726+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
54727+
54728+ if (!len || len >= PATH_MAX)
54729+ return -EINVAL;
54730+
54731+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54732+ return -ENOMEM;
54733+
54734+ if (copy_from_user(tmp, r_tmp->rolename, len))
54735+ return -EFAULT;
54736+
54737+ tmp[len-1] = '\0';
54738+ r_tmp->rolename = tmp;
54739+
54740+ if (!strcmp(r_tmp->rolename, "default")
54741+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
54742+ default_role = r_tmp;
54743+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
54744+ kernel_role = r_tmp;
54745+ }
54746+
54747+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
54748+ return -ENOMEM;
54749+
54750+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
54751+ return -EFAULT;
54752+
54753+ r_tmp->hash = ghash;
54754+
54755+ num_subjs = count_user_subjs(r_tmp->hash->first);
54756+
54757+ r_tmp->subj_hash_size = num_subjs;
54758+ r_tmp->subj_hash =
54759+ (struct acl_subject_label **)
54760+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
54761+
54762+ if (!r_tmp->subj_hash)
54763+ return -ENOMEM;
54764+
54765+ err = copy_user_allowedips(r_tmp);
54766+ if (err)
54767+ return err;
54768+
54769+ /* copy domain info */
54770+ if (r_tmp->domain_children != NULL) {
54771+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
54772+ if (domainlist == NULL)
54773+ return -ENOMEM;
54774+
54775+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
54776+ return -EFAULT;
54777+
54778+ r_tmp->domain_children = domainlist;
54779+ }
54780+
54781+ err = copy_user_transitions(r_tmp);
54782+ if (err)
54783+ return err;
54784+
54785+ memset(r_tmp->subj_hash, 0,
54786+ r_tmp->subj_hash_size *
54787+ sizeof (struct acl_subject_label *));
54788+
54789+ /* acquire the list of subjects, then NULL out
54790+ the list prior to parsing the subjects for this role,
54791+ as during this parsing the list is replaced with a list
54792+ of *nested* subjects for the role
54793+ */
54794+ subj_list = r_tmp->hash->first;
54795+
54796+ /* set nested subject list to null */
54797+ r_tmp->hash->first = NULL;
54798+
54799+ err = copy_user_subjs(subj_list, r_tmp);
54800+
54801+ if (err)
54802+ return err;
54803+
54804+ insert_acl_role_label(r_tmp);
54805+ }
54806+
54807+ if (default_role == NULL || kernel_role == NULL)
54808+ return -EINVAL;
54809+
54810+ return err;
54811+}
54812+
54813+static int
54814+gracl_init(struct gr_arg *args)
54815+{
54816+ int error = 0;
54817+
54818+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
54819+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
54820+
54821+ if (init_variables(args)) {
54822+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
54823+ error = -ENOMEM;
54824+ free_variables();
54825+ goto out;
54826+ }
54827+
54828+ error = copy_user_acl(args);
54829+ free_init_variables();
54830+ if (error) {
54831+ free_variables();
54832+ goto out;
54833+ }
54834+
54835+ if ((error = gr_set_acls(0))) {
54836+ free_variables();
54837+ goto out;
54838+ }
54839+
54840+ pax_open_kernel();
54841+ gr_status |= GR_READY;
54842+ pax_close_kernel();
54843+
54844+ out:
54845+ return error;
54846+}
54847+
54848+/* derived from glibc fnmatch() 0: match, 1: no match*/
54849+
54850+static int
54851+glob_match(const char *p, const char *n)
54852+{
54853+ char c;
54854+
54855+ while ((c = *p++) != '\0') {
54856+ switch (c) {
54857+ case '?':
54858+ if (*n == '\0')
54859+ return 1;
54860+ else if (*n == '/')
54861+ return 1;
54862+ break;
54863+ case '\\':
54864+ if (*n != c)
54865+ return 1;
54866+ break;
54867+ case '*':
54868+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
54869+ if (*n == '/')
54870+ return 1;
54871+ else if (c == '?') {
54872+ if (*n == '\0')
54873+ return 1;
54874+ else
54875+ ++n;
54876+ }
54877+ }
54878+ if (c == '\0') {
54879+ return 0;
54880+ } else {
54881+ const char *endp;
54882+
54883+ if ((endp = strchr(n, '/')) == NULL)
54884+ endp = n + strlen(n);
54885+
54886+ if (c == '[') {
54887+ for (--p; n < endp; ++n)
54888+ if (!glob_match(p, n))
54889+ return 0;
54890+ } else if (c == '/') {
54891+ while (*n != '\0' && *n != '/')
54892+ ++n;
54893+ if (*n == '/' && !glob_match(p, n + 1))
54894+ return 0;
54895+ } else {
54896+ for (--p; n < endp; ++n)
54897+ if (*n == c && !glob_match(p, n))
54898+ return 0;
54899+ }
54900+
54901+ return 1;
54902+ }
54903+ case '[':
54904+ {
54905+ int not;
54906+ char cold;
54907+
54908+ if (*n == '\0' || *n == '/')
54909+ return 1;
54910+
54911+ not = (*p == '!' || *p == '^');
54912+ if (not)
54913+ ++p;
54914+
54915+ c = *p++;
54916+ for (;;) {
54917+ unsigned char fn = (unsigned char)*n;
54918+
54919+ if (c == '\0')
54920+ return 1;
54921+ else {
54922+ if (c == fn)
54923+ goto matched;
54924+ cold = c;
54925+ c = *p++;
54926+
54927+ if (c == '-' && *p != ']') {
54928+ unsigned char cend = *p++;
54929+
54930+ if (cend == '\0')
54931+ return 1;
54932+
54933+ if (cold <= fn && fn <= cend)
54934+ goto matched;
54935+
54936+ c = *p++;
54937+ }
54938+ }
54939+
54940+ if (c == ']')
54941+ break;
54942+ }
54943+ if (!not)
54944+ return 1;
54945+ break;
54946+ matched:
54947+ while (c != ']') {
54948+ if (c == '\0')
54949+ return 1;
54950+
54951+ c = *p++;
54952+ }
54953+ if (not)
54954+ return 1;
54955+ }
54956+ break;
54957+ default:
54958+ if (c != *n)
54959+ return 1;
54960+ }
54961+
54962+ ++n;
54963+ }
54964+
54965+ if (*n == '\0')
54966+ return 0;
54967+
54968+ if (*n == '/')
54969+ return 0;
54970+
54971+ return 1;
54972+}
54973+
54974+static struct acl_object_label *
54975+chk_glob_label(struct acl_object_label *globbed,
54976+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
54977+{
54978+ struct acl_object_label *tmp;
54979+
54980+ if (*path == NULL)
54981+ *path = gr_to_filename_nolock(dentry, mnt);
54982+
54983+ tmp = globbed;
54984+
54985+ while (tmp) {
54986+ if (!glob_match(tmp->filename, *path))
54987+ return tmp;
54988+ tmp = tmp->next;
54989+ }
54990+
54991+ return NULL;
54992+}
54993+
54994+static struct acl_object_label *
54995+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
54996+ const ino_t curr_ino, const dev_t curr_dev,
54997+ const struct acl_subject_label *subj, char **path, const int checkglob)
54998+{
54999+ struct acl_subject_label *tmpsubj;
55000+ struct acl_object_label *retval;
55001+ struct acl_object_label *retval2;
55002+
55003+ tmpsubj = (struct acl_subject_label *) subj;
55004+ read_lock(&gr_inode_lock);
55005+ do {
55006+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
55007+ if (retval) {
55008+ if (checkglob && retval->globbed) {
55009+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
55010+ if (retval2)
55011+ retval = retval2;
55012+ }
55013+ break;
55014+ }
55015+ } while ((tmpsubj = tmpsubj->parent_subject));
55016+ read_unlock(&gr_inode_lock);
55017+
55018+ return retval;
55019+}
55020+
55021+static __inline__ struct acl_object_label *
55022+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
55023+ struct dentry *curr_dentry,
55024+ const struct acl_subject_label *subj, char **path, const int checkglob)
55025+{
55026+ int newglob = checkglob;
55027+ ino_t inode;
55028+ dev_t device;
55029+
55030+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
55031+ as we don't want a / * rule to match instead of the / object
55032+ don't do this for create lookups that call this function though, since they're looking up
55033+ on the parent and thus need globbing checks on all paths
55034+ */
55035+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
55036+ newglob = GR_NO_GLOB;
55037+
55038+ spin_lock(&curr_dentry->d_lock);
55039+ inode = curr_dentry->d_inode->i_ino;
55040+ device = __get_dev(curr_dentry);
55041+ spin_unlock(&curr_dentry->d_lock);
55042+
55043+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
55044+}
55045+
55046+static struct acl_object_label *
55047+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55048+ const struct acl_subject_label *subj, char *path, const int checkglob)
55049+{
55050+ struct dentry *dentry = (struct dentry *) l_dentry;
55051+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
55052+ struct mount *real_mnt = real_mount(mnt);
55053+ struct acl_object_label *retval;
55054+ struct dentry *parent;
55055+
55056+ write_seqlock(&rename_lock);
55057+ br_read_lock(&vfsmount_lock);
55058+
55059+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
55060+#ifdef CONFIG_NET
55061+ mnt == sock_mnt ||
55062+#endif
55063+#ifdef CONFIG_HUGETLBFS
55064+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
55065+#endif
55066+ /* ignore Eric Biederman */
55067+ IS_PRIVATE(l_dentry->d_inode))) {
55068+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
55069+ goto out;
55070+ }
55071+
55072+ for (;;) {
55073+ if (dentry == real_root.dentry && mnt == real_root.mnt)
55074+ break;
55075+
55076+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
55077+ if (!mnt_has_parent(real_mnt))
55078+ break;
55079+
55080+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
55081+ if (retval != NULL)
55082+ goto out;
55083+
55084+ dentry = real_mnt->mnt_mountpoint;
55085+ real_mnt = real_mnt->mnt_parent;
55086+ mnt = &real_mnt->mnt;
55087+ continue;
55088+ }
55089+
55090+ parent = dentry->d_parent;
55091+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
55092+ if (retval != NULL)
55093+ goto out;
55094+
55095+ dentry = parent;
55096+ }
55097+
55098+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
55099+
55100+ /* real_root is pinned so we don't have to hold a reference */
55101+ if (retval == NULL)
55102+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
55103+out:
55104+ br_read_unlock(&vfsmount_lock);
55105+ write_sequnlock(&rename_lock);
55106+
55107+ BUG_ON(retval == NULL);
55108+
55109+ return retval;
55110+}
55111+
55112+static __inline__ struct acl_object_label *
55113+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55114+ const struct acl_subject_label *subj)
55115+{
55116+ char *path = NULL;
55117+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
55118+}
55119+
55120+static __inline__ struct acl_object_label *
55121+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55122+ const struct acl_subject_label *subj)
55123+{
55124+ char *path = NULL;
55125+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
55126+}
55127+
55128+static __inline__ struct acl_object_label *
55129+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55130+ const struct acl_subject_label *subj, char *path)
55131+{
55132+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
55133+}
55134+
55135+static struct acl_subject_label *
55136+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55137+ const struct acl_role_label *role)
55138+{
55139+ struct dentry *dentry = (struct dentry *) l_dentry;
55140+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
55141+ struct mount *real_mnt = real_mount(mnt);
55142+ struct acl_subject_label *retval;
55143+ struct dentry *parent;
55144+
55145+ write_seqlock(&rename_lock);
55146+ br_read_lock(&vfsmount_lock);
55147+
55148+ for (;;) {
55149+ if (dentry == real_root.dentry && mnt == real_root.mnt)
55150+ break;
55151+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
55152+ if (!mnt_has_parent(real_mnt))
55153+ break;
55154+
55155+ spin_lock(&dentry->d_lock);
55156+ read_lock(&gr_inode_lock);
55157+ retval =
55158+ lookup_acl_subj_label(dentry->d_inode->i_ino,
55159+ __get_dev(dentry), role);
55160+ read_unlock(&gr_inode_lock);
55161+ spin_unlock(&dentry->d_lock);
55162+ if (retval != NULL)
55163+ goto out;
55164+
55165+ dentry = real_mnt->mnt_mountpoint;
55166+ real_mnt = real_mnt->mnt_parent;
55167+ mnt = &real_mnt->mnt;
55168+ continue;
55169+ }
55170+
55171+ spin_lock(&dentry->d_lock);
55172+ read_lock(&gr_inode_lock);
55173+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
55174+ __get_dev(dentry), role);
55175+ read_unlock(&gr_inode_lock);
55176+ parent = dentry->d_parent;
55177+ spin_unlock(&dentry->d_lock);
55178+
55179+ if (retval != NULL)
55180+ goto out;
55181+
55182+ dentry = parent;
55183+ }
55184+
55185+ spin_lock(&dentry->d_lock);
55186+ read_lock(&gr_inode_lock);
55187+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
55188+ __get_dev(dentry), role);
55189+ read_unlock(&gr_inode_lock);
55190+ spin_unlock(&dentry->d_lock);
55191+
55192+ if (unlikely(retval == NULL)) {
55193+ /* real_root is pinned, we don't need to hold a reference */
55194+ read_lock(&gr_inode_lock);
55195+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
55196+ __get_dev(real_root.dentry), role);
55197+ read_unlock(&gr_inode_lock);
55198+ }
55199+out:
55200+ br_read_unlock(&vfsmount_lock);
55201+ write_sequnlock(&rename_lock);
55202+
55203+ BUG_ON(retval == NULL);
55204+
55205+ return retval;
55206+}
55207+
55208+static void
55209+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
55210+{
55211+ struct task_struct *task = current;
55212+ const struct cred *cred = current_cred();
55213+
55214+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
55215+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
55216+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
55217+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
55218+
55219+ return;
55220+}
55221+
55222+static void
55223+gr_log_learn_id_change(const char type, const unsigned int real,
55224+ const unsigned int effective, const unsigned int fs)
55225+{
55226+ struct task_struct *task = current;
55227+ const struct cred *cred = current_cred();
55228+
55229+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
55230+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
55231+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
55232+ type, real, effective, fs, &task->signal->saved_ip);
55233+
55234+ return;
55235+}
55236+
55237+__u32
55238+gr_search_file(const struct dentry * dentry, const __u32 mode,
55239+ const struct vfsmount * mnt)
55240+{
55241+ __u32 retval = mode;
55242+ struct acl_subject_label *curracl;
55243+ struct acl_object_label *currobj;
55244+
55245+ if (unlikely(!(gr_status & GR_READY)))
55246+ return (mode & ~GR_AUDITS);
55247+
55248+ curracl = current->acl;
55249+
55250+ currobj = chk_obj_label(dentry, mnt, curracl);
55251+ retval = currobj->mode & mode;
55252+
55253+ /* if we're opening a specified transfer file for writing
55254+ (e.g. /dev/initctl), then transfer our role to init
55255+ */
55256+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
55257+ current->role->roletype & GR_ROLE_PERSIST)) {
55258+ struct task_struct *task = init_pid_ns.child_reaper;
55259+
55260+ if (task->role != current->role) {
55261+ task->acl_sp_role = 0;
55262+ task->acl_role_id = current->acl_role_id;
55263+ task->role = current->role;
55264+ rcu_read_lock();
55265+ read_lock(&grsec_exec_file_lock);
55266+ gr_apply_subject_to_task(task);
55267+ read_unlock(&grsec_exec_file_lock);
55268+ rcu_read_unlock();
55269+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
55270+ }
55271+ }
55272+
55273+ if (unlikely
55274+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
55275+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
55276+ __u32 new_mode = mode;
55277+
55278+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
55279+
55280+ retval = new_mode;
55281+
55282+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
55283+ new_mode |= GR_INHERIT;
55284+
55285+ if (!(mode & GR_NOLEARN))
55286+ gr_log_learn(dentry, mnt, new_mode);
55287+ }
55288+
55289+ return retval;
55290+}
55291+
55292+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
55293+ const struct dentry *parent,
55294+ const struct vfsmount *mnt)
55295+{
55296+ struct name_entry *match;
55297+ struct acl_object_label *matchpo;
55298+ struct acl_subject_label *curracl;
55299+ char *path;
55300+
55301+ if (unlikely(!(gr_status & GR_READY)))
55302+ return NULL;
55303+
55304+ preempt_disable();
55305+ path = gr_to_filename_rbac(new_dentry, mnt);
55306+ match = lookup_name_entry_create(path);
55307+
55308+ curracl = current->acl;
55309+
55310+ if (match) {
55311+ read_lock(&gr_inode_lock);
55312+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
55313+ read_unlock(&gr_inode_lock);
55314+
55315+ if (matchpo) {
55316+ preempt_enable();
55317+ return matchpo;
55318+ }
55319+ }
55320+
55321+ // lookup parent
55322+
55323+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
55324+
55325+ preempt_enable();
55326+ return matchpo;
55327+}
55328+
55329+__u32
55330+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
55331+ const struct vfsmount * mnt, const __u32 mode)
55332+{
55333+ struct acl_object_label *matchpo;
55334+ __u32 retval;
55335+
55336+ if (unlikely(!(gr_status & GR_READY)))
55337+ return (mode & ~GR_AUDITS);
55338+
55339+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
55340+
55341+ retval = matchpo->mode & mode;
55342+
55343+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
55344+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
55345+ __u32 new_mode = mode;
55346+
55347+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
55348+
55349+ gr_log_learn(new_dentry, mnt, new_mode);
55350+ return new_mode;
55351+ }
55352+
55353+ return retval;
55354+}
55355+
55356+__u32
55357+gr_check_link(const struct dentry * new_dentry,
55358+ const struct dentry * parent_dentry,
55359+ const struct vfsmount * parent_mnt,
55360+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
55361+{
55362+ struct acl_object_label *obj;
55363+ __u32 oldmode, newmode;
55364+ __u32 needmode;
55365+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
55366+ GR_DELETE | GR_INHERIT;
55367+
55368+ if (unlikely(!(gr_status & GR_READY)))
55369+ return (GR_CREATE | GR_LINK);
55370+
55371+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
55372+ oldmode = obj->mode;
55373+
55374+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
55375+ newmode = obj->mode;
55376+
55377+ needmode = newmode & checkmodes;
55378+
55379+ // old name for hardlink must have at least the permissions of the new name
55380+ if ((oldmode & needmode) != needmode)
55381+ goto bad;
55382+
55383+ // if old name had restrictions/auditing, make sure the new name does as well
55384+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
55385+
55386+ // don't allow hardlinking of suid/sgid/fcapped files without permission
55387+ if (is_privileged_binary(old_dentry))
55388+ needmode |= GR_SETID;
55389+
55390+ if ((newmode & needmode) != needmode)
55391+ goto bad;
55392+
55393+ // enforce minimum permissions
55394+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
55395+ return newmode;
55396+bad:
55397+ needmode = oldmode;
55398+ if (is_privileged_binary(old_dentry))
55399+ needmode |= GR_SETID;
55400+
55401+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
55402+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
55403+ return (GR_CREATE | GR_LINK);
55404+ } else if (newmode & GR_SUPPRESS)
55405+ return GR_SUPPRESS;
55406+ else
55407+ return 0;
55408+}
55409+
55410+int
55411+gr_check_hidden_task(const struct task_struct *task)
55412+{
55413+ if (unlikely(!(gr_status & GR_READY)))
55414+ return 0;
55415+
55416+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
55417+ return 1;
55418+
55419+ return 0;
55420+}
55421+
55422+int
55423+gr_check_protected_task(const struct task_struct *task)
55424+{
55425+ if (unlikely(!(gr_status & GR_READY) || !task))
55426+ return 0;
55427+
55428+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
55429+ task->acl != current->acl)
55430+ return 1;
55431+
55432+ return 0;
55433+}
55434+
55435+int
55436+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
55437+{
55438+ struct task_struct *p;
55439+ int ret = 0;
55440+
55441+ if (unlikely(!(gr_status & GR_READY) || !pid))
55442+ return ret;
55443+
55444+ read_lock(&tasklist_lock);
55445+ do_each_pid_task(pid, type, p) {
55446+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
55447+ p->acl != current->acl) {
55448+ ret = 1;
55449+ goto out;
55450+ }
55451+ } while_each_pid_task(pid, type, p);
55452+out:
55453+ read_unlock(&tasklist_lock);
55454+
55455+ return ret;
55456+}
55457+
55458+void
55459+gr_copy_label(struct task_struct *tsk)
55460+{
55461+ tsk->signal->used_accept = 0;
55462+ tsk->acl_sp_role = 0;
55463+ tsk->acl_role_id = current->acl_role_id;
55464+ tsk->acl = current->acl;
55465+ tsk->role = current->role;
55466+ tsk->signal->curr_ip = current->signal->curr_ip;
55467+ tsk->signal->saved_ip = current->signal->saved_ip;
55468+ if (current->exec_file)
55469+ get_file(current->exec_file);
55470+ tsk->exec_file = current->exec_file;
55471+ tsk->is_writable = current->is_writable;
55472+ if (unlikely(current->signal->used_accept)) {
55473+ current->signal->curr_ip = 0;
55474+ current->signal->saved_ip = 0;
55475+ }
55476+
55477+ return;
55478+}
55479+
55480+static void
55481+gr_set_proc_res(struct task_struct *task)
55482+{
55483+ struct acl_subject_label *proc;
55484+ unsigned short i;
55485+
55486+ proc = task->acl;
55487+
55488+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
55489+ return;
55490+
55491+ for (i = 0; i < RLIM_NLIMITS; i++) {
55492+ if (!(proc->resmask & (1 << i)))
55493+ continue;
55494+
55495+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
55496+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
55497+ }
55498+
55499+ return;
55500+}
55501+
55502+extern int __gr_process_user_ban(struct user_struct *user);
55503+
55504+int
55505+gr_check_user_change(int real, int effective, int fs)
55506+{
55507+ unsigned int i;
55508+ __u16 num;
55509+ uid_t *uidlist;
55510+ int curuid;
55511+ int realok = 0;
55512+ int effectiveok = 0;
55513+ int fsok = 0;
55514+
55515+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55516+ struct user_struct *user;
55517+
55518+ if (real == -1)
55519+ goto skipit;
55520+
55521+ user = find_user(real);
55522+ if (user == NULL)
55523+ goto skipit;
55524+
55525+ if (__gr_process_user_ban(user)) {
55526+ /* for find_user */
55527+ free_uid(user);
55528+ return 1;
55529+ }
55530+
55531+ /* for find_user */
55532+ free_uid(user);
55533+
55534+skipit:
55535+#endif
55536+
55537+ if (unlikely(!(gr_status & GR_READY)))
55538+ return 0;
55539+
55540+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55541+ gr_log_learn_id_change('u', real, effective, fs);
55542+
55543+ num = current->acl->user_trans_num;
55544+ uidlist = current->acl->user_transitions;
55545+
55546+ if (uidlist == NULL)
55547+ return 0;
55548+
55549+ if (real == -1)
55550+ realok = 1;
55551+ if (effective == -1)
55552+ effectiveok = 1;
55553+ if (fs == -1)
55554+ fsok = 1;
55555+
55556+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
55557+ for (i = 0; i < num; i++) {
55558+ curuid = (int)uidlist[i];
55559+ if (real == curuid)
55560+ realok = 1;
55561+ if (effective == curuid)
55562+ effectiveok = 1;
55563+ if (fs == curuid)
55564+ fsok = 1;
55565+ }
55566+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
55567+ for (i = 0; i < num; i++) {
55568+ curuid = (int)uidlist[i];
55569+ if (real == curuid)
55570+ break;
55571+ if (effective == curuid)
55572+ break;
55573+ if (fs == curuid)
55574+ break;
55575+ }
55576+ /* not in deny list */
55577+ if (i == num) {
55578+ realok = 1;
55579+ effectiveok = 1;
55580+ fsok = 1;
55581+ }
55582+ }
55583+
55584+ if (realok && effectiveok && fsok)
55585+ return 0;
55586+ else {
55587+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
55588+ return 1;
55589+ }
55590+}
55591+
55592+int
55593+gr_check_group_change(int real, int effective, int fs)
55594+{
55595+ unsigned int i;
55596+ __u16 num;
55597+ gid_t *gidlist;
55598+ int curgid;
55599+ int realok = 0;
55600+ int effectiveok = 0;
55601+ int fsok = 0;
55602+
55603+ if (unlikely(!(gr_status & GR_READY)))
55604+ return 0;
55605+
55606+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55607+ gr_log_learn_id_change('g', real, effective, fs);
55608+
55609+ num = current->acl->group_trans_num;
55610+ gidlist = current->acl->group_transitions;
55611+
55612+ if (gidlist == NULL)
55613+ return 0;
55614+
55615+ if (real == -1)
55616+ realok = 1;
55617+ if (effective == -1)
55618+ effectiveok = 1;
55619+ if (fs == -1)
55620+ fsok = 1;
55621+
55622+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
55623+ for (i = 0; i < num; i++) {
55624+ curgid = (int)gidlist[i];
55625+ if (real == curgid)
55626+ realok = 1;
55627+ if (effective == curgid)
55628+ effectiveok = 1;
55629+ if (fs == curgid)
55630+ fsok = 1;
55631+ }
55632+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
55633+ for (i = 0; i < num; i++) {
55634+ curgid = (int)gidlist[i];
55635+ if (real == curgid)
55636+ break;
55637+ if (effective == curgid)
55638+ break;
55639+ if (fs == curgid)
55640+ break;
55641+ }
55642+ /* not in deny list */
55643+ if (i == num) {
55644+ realok = 1;
55645+ effectiveok = 1;
55646+ fsok = 1;
55647+ }
55648+ }
55649+
55650+ if (realok && effectiveok && fsok)
55651+ return 0;
55652+ else {
55653+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
55654+ return 1;
55655+ }
55656+}
55657+
55658+extern int gr_acl_is_capable(const int cap);
55659+
55660+void
55661+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
55662+{
55663+ struct acl_role_label *role = task->role;
55664+ struct acl_subject_label *subj = NULL;
55665+ struct acl_object_label *obj;
55666+ struct file *filp;
55667+
55668+ if (unlikely(!(gr_status & GR_READY)))
55669+ return;
55670+
55671+ filp = task->exec_file;
55672+
55673+ /* kernel process, we'll give them the kernel role */
55674+ if (unlikely(!filp)) {
55675+ task->role = kernel_role;
55676+ task->acl = kernel_role->root_label;
55677+ return;
55678+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
55679+ role = lookup_acl_role_label(task, uid, gid);
55680+
55681+ /* don't change the role if we're not a privileged process */
55682+ if (role && task->role != role &&
55683+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
55684+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
55685+ return;
55686+
55687+ /* perform subject lookup in possibly new role
55688+ we can use this result below in the case where role == task->role
55689+ */
55690+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
55691+
55692+ /* if we changed uid/gid, but result in the same role
55693+ and are using inheritance, don't lose the inherited subject
55694+ if current subject is other than what normal lookup
55695+ would result in, we arrived via inheritance, don't
55696+ lose subject
55697+ */
55698+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
55699+ (subj == task->acl)))
55700+ task->acl = subj;
55701+
55702+ task->role = role;
55703+
55704+ task->is_writable = 0;
55705+
55706+ /* ignore additional mmap checks for processes that are writable
55707+ by the default ACL */
55708+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
55709+ if (unlikely(obj->mode & GR_WRITE))
55710+ task->is_writable = 1;
55711+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
55712+ if (unlikely(obj->mode & GR_WRITE))
55713+ task->is_writable = 1;
55714+
55715+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
55716+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
55717+#endif
55718+
55719+ gr_set_proc_res(task);
55720+
55721+ return;
55722+}
55723+
55724+int
55725+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
55726+ const int unsafe_flags)
55727+{
55728+ struct task_struct *task = current;
55729+ struct acl_subject_label *newacl;
55730+ struct acl_object_label *obj;
55731+ __u32 retmode;
55732+
55733+ if (unlikely(!(gr_status & GR_READY)))
55734+ return 0;
55735+
55736+ newacl = chk_subj_label(dentry, mnt, task->role);
55737+
55738+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
55739+ did an exec
55740+ */
55741+ rcu_read_lock();
55742+ read_lock(&tasklist_lock);
55743+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
55744+ (task->parent->acl->mode & GR_POVERRIDE))) {
55745+ read_unlock(&tasklist_lock);
55746+ rcu_read_unlock();
55747+ goto skip_check;
55748+ }
55749+ read_unlock(&tasklist_lock);
55750+ rcu_read_unlock();
55751+
55752+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
55753+ !(task->role->roletype & GR_ROLE_GOD) &&
55754+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
55755+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
55756+ if (unsafe_flags & LSM_UNSAFE_SHARE)
55757+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
55758+ else
55759+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
55760+ return -EACCES;
55761+ }
55762+
55763+skip_check:
55764+
55765+ obj = chk_obj_label(dentry, mnt, task->acl);
55766+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
55767+
55768+ if (!(task->acl->mode & GR_INHERITLEARN) &&
55769+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
55770+ if (obj->nested)
55771+ task->acl = obj->nested;
55772+ else
55773+ task->acl = newacl;
55774+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
55775+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
55776+
55777+ task->is_writable = 0;
55778+
55779+ /* ignore additional mmap checks for processes that are writable
55780+ by the default ACL */
55781+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
55782+ if (unlikely(obj->mode & GR_WRITE))
55783+ task->is_writable = 1;
55784+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
55785+ if (unlikely(obj->mode & GR_WRITE))
55786+ task->is_writable = 1;
55787+
55788+ gr_set_proc_res(task);
55789+
55790+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
55791+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
55792+#endif
55793+ return 0;
55794+}
55795+
55796+/* always called with valid inodev ptr */
55797+static void
55798+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
55799+{
55800+ struct acl_object_label *matchpo;
55801+ struct acl_subject_label *matchps;
55802+ struct acl_subject_label *subj;
55803+ struct acl_role_label *role;
55804+ unsigned int x;
55805+
55806+ FOR_EACH_ROLE_START(role)
55807+ FOR_EACH_SUBJECT_START(role, subj, x)
55808+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
55809+ matchpo->mode |= GR_DELETED;
55810+ FOR_EACH_SUBJECT_END(subj,x)
55811+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
55812+ /* nested subjects aren't in the role's subj_hash table */
55813+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
55814+ matchpo->mode |= GR_DELETED;
55815+ FOR_EACH_NESTED_SUBJECT_END(subj)
55816+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
55817+ matchps->mode |= GR_DELETED;
55818+ FOR_EACH_ROLE_END(role)
55819+
55820+ inodev->nentry->deleted = 1;
55821+
55822+ return;
55823+}
55824+
55825+void
55826+gr_handle_delete(const ino_t ino, const dev_t dev)
55827+{
55828+ struct inodev_entry *inodev;
55829+
55830+ if (unlikely(!(gr_status & GR_READY)))
55831+ return;
55832+
55833+ write_lock(&gr_inode_lock);
55834+ inodev = lookup_inodev_entry(ino, dev);
55835+ if (inodev != NULL)
55836+ do_handle_delete(inodev, ino, dev);
55837+ write_unlock(&gr_inode_lock);
55838+
55839+ return;
55840+}
55841+
55842+static void
55843+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
55844+ const ino_t newinode, const dev_t newdevice,
55845+ struct acl_subject_label *subj)
55846+{
55847+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
55848+ struct acl_object_label *match;
55849+
55850+ match = subj->obj_hash[index];
55851+
55852+ while (match && (match->inode != oldinode ||
55853+ match->device != olddevice ||
55854+ !(match->mode & GR_DELETED)))
55855+ match = match->next;
55856+
55857+ if (match && (match->inode == oldinode)
55858+ && (match->device == olddevice)
55859+ && (match->mode & GR_DELETED)) {
55860+ if (match->prev == NULL) {
55861+ subj->obj_hash[index] = match->next;
55862+ if (match->next != NULL)
55863+ match->next->prev = NULL;
55864+ } else {
55865+ match->prev->next = match->next;
55866+ if (match->next != NULL)
55867+ match->next->prev = match->prev;
55868+ }
55869+ match->prev = NULL;
55870+ match->next = NULL;
55871+ match->inode = newinode;
55872+ match->device = newdevice;
55873+ match->mode &= ~GR_DELETED;
55874+
55875+ insert_acl_obj_label(match, subj);
55876+ }
55877+
55878+ return;
55879+}
55880+
55881+static void
55882+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
55883+ const ino_t newinode, const dev_t newdevice,
55884+ struct acl_role_label *role)
55885+{
55886+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
55887+ struct acl_subject_label *match;
55888+
55889+ match = role->subj_hash[index];
55890+
55891+ while (match && (match->inode != oldinode ||
55892+ match->device != olddevice ||
55893+ !(match->mode & GR_DELETED)))
55894+ match = match->next;
55895+
55896+ if (match && (match->inode == oldinode)
55897+ && (match->device == olddevice)
55898+ && (match->mode & GR_DELETED)) {
55899+ if (match->prev == NULL) {
55900+ role->subj_hash[index] = match->next;
55901+ if (match->next != NULL)
55902+ match->next->prev = NULL;
55903+ } else {
55904+ match->prev->next = match->next;
55905+ if (match->next != NULL)
55906+ match->next->prev = match->prev;
55907+ }
55908+ match->prev = NULL;
55909+ match->next = NULL;
55910+ match->inode = newinode;
55911+ match->device = newdevice;
55912+ match->mode &= ~GR_DELETED;
55913+
55914+ insert_acl_subj_label(match, role);
55915+ }
55916+
55917+ return;
55918+}
55919+
55920+static void
55921+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
55922+ const ino_t newinode, const dev_t newdevice)
55923+{
55924+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
55925+ struct inodev_entry *match;
55926+
55927+ match = inodev_set.i_hash[index];
55928+
55929+ while (match && (match->nentry->inode != oldinode ||
55930+ match->nentry->device != olddevice || !match->nentry->deleted))
55931+ match = match->next;
55932+
55933+ if (match && (match->nentry->inode == oldinode)
55934+ && (match->nentry->device == olddevice) &&
55935+ match->nentry->deleted) {
55936+ if (match->prev == NULL) {
55937+ inodev_set.i_hash[index] = match->next;
55938+ if (match->next != NULL)
55939+ match->next->prev = NULL;
55940+ } else {
55941+ match->prev->next = match->next;
55942+ if (match->next != NULL)
55943+ match->next->prev = match->prev;
55944+ }
55945+ match->prev = NULL;
55946+ match->next = NULL;
55947+ match->nentry->inode = newinode;
55948+ match->nentry->device = newdevice;
55949+ match->nentry->deleted = 0;
55950+
55951+ insert_inodev_entry(match);
55952+ }
55953+
55954+ return;
55955+}
55956+
55957+static void
55958+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
55959+{
55960+ struct acl_subject_label *subj;
55961+ struct acl_role_label *role;
55962+ unsigned int x;
55963+
55964+ FOR_EACH_ROLE_START(role)
55965+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
55966+
55967+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
55968+ if ((subj->inode == ino) && (subj->device == dev)) {
55969+ subj->inode = ino;
55970+ subj->device = dev;
55971+ }
55972+ /* nested subjects aren't in the role's subj_hash table */
55973+ update_acl_obj_label(matchn->inode, matchn->device,
55974+ ino, dev, subj);
55975+ FOR_EACH_NESTED_SUBJECT_END(subj)
55976+ FOR_EACH_SUBJECT_START(role, subj, x)
55977+ update_acl_obj_label(matchn->inode, matchn->device,
55978+ ino, dev, subj);
55979+ FOR_EACH_SUBJECT_END(subj,x)
55980+ FOR_EACH_ROLE_END(role)
55981+
55982+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
55983+
55984+ return;
55985+}
55986+
55987+static void
55988+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
55989+ const struct vfsmount *mnt)
55990+{
55991+ ino_t ino = dentry->d_inode->i_ino;
55992+ dev_t dev = __get_dev(dentry);
55993+
55994+ __do_handle_create(matchn, ino, dev);
55995+
55996+ return;
55997+}
55998+
55999+void
56000+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56001+{
56002+ struct name_entry *matchn;
56003+
56004+ if (unlikely(!(gr_status & GR_READY)))
56005+ return;
56006+
56007+ preempt_disable();
56008+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
56009+
56010+ if (unlikely((unsigned long)matchn)) {
56011+ write_lock(&gr_inode_lock);
56012+ do_handle_create(matchn, dentry, mnt);
56013+ write_unlock(&gr_inode_lock);
56014+ }
56015+ preempt_enable();
56016+
56017+ return;
56018+}
56019+
56020+void
56021+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56022+{
56023+ struct name_entry *matchn;
56024+
56025+ if (unlikely(!(gr_status & GR_READY)))
56026+ return;
56027+
56028+ preempt_disable();
56029+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
56030+
56031+ if (unlikely((unsigned long)matchn)) {
56032+ write_lock(&gr_inode_lock);
56033+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
56034+ write_unlock(&gr_inode_lock);
56035+ }
56036+ preempt_enable();
56037+
56038+ return;
56039+}
56040+
56041+void
56042+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56043+ struct dentry *old_dentry,
56044+ struct dentry *new_dentry,
56045+ struct vfsmount *mnt, const __u8 replace)
56046+{
56047+ struct name_entry *matchn;
56048+ struct inodev_entry *inodev;
56049+ struct inode *inode = new_dentry->d_inode;
56050+ ino_t old_ino = old_dentry->d_inode->i_ino;
56051+ dev_t old_dev = __get_dev(old_dentry);
56052+
56053+ /* vfs_rename swaps the name and parent link for old_dentry and
56054+ new_dentry
56055+ at this point, old_dentry has the new name, parent link, and inode
56056+ for the renamed file
56057+ if a file is being replaced by a rename, new_dentry has the inode
56058+ and name for the replaced file
56059+ */
56060+
56061+ if (unlikely(!(gr_status & GR_READY)))
56062+ return;
56063+
56064+ preempt_disable();
56065+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
56066+
56067+ /* we wouldn't have to check d_inode if it weren't for
56068+ NFS silly-renaming
56069+ */
56070+
56071+ write_lock(&gr_inode_lock);
56072+ if (unlikely(replace && inode)) {
56073+ ino_t new_ino = inode->i_ino;
56074+ dev_t new_dev = __get_dev(new_dentry);
56075+
56076+ inodev = lookup_inodev_entry(new_ino, new_dev);
56077+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
56078+ do_handle_delete(inodev, new_ino, new_dev);
56079+ }
56080+
56081+ inodev = lookup_inodev_entry(old_ino, old_dev);
56082+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
56083+ do_handle_delete(inodev, old_ino, old_dev);
56084+
56085+ if (unlikely((unsigned long)matchn))
56086+ do_handle_create(matchn, old_dentry, mnt);
56087+
56088+ write_unlock(&gr_inode_lock);
56089+ preempt_enable();
56090+
56091+ return;
56092+}
56093+
56094+static int
56095+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
56096+ unsigned char **sum)
56097+{
56098+ struct acl_role_label *r;
56099+ struct role_allowed_ip *ipp;
56100+ struct role_transition *trans;
56101+ unsigned int i;
56102+ int found = 0;
56103+ u32 curr_ip = current->signal->curr_ip;
56104+
56105+ current->signal->saved_ip = curr_ip;
56106+
56107+ /* check transition table */
56108+
56109+ for (trans = current->role->transitions; trans; trans = trans->next) {
56110+ if (!strcmp(rolename, trans->rolename)) {
56111+ found = 1;
56112+ break;
56113+ }
56114+ }
56115+
56116+ if (!found)
56117+ return 0;
56118+
56119+ /* handle special roles that do not require authentication
56120+ and check ip */
56121+
56122+ FOR_EACH_ROLE_START(r)
56123+ if (!strcmp(rolename, r->rolename) &&
56124+ (r->roletype & GR_ROLE_SPECIAL)) {
56125+ found = 0;
56126+ if (r->allowed_ips != NULL) {
56127+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
56128+ if ((ntohl(curr_ip) & ipp->netmask) ==
56129+ (ntohl(ipp->addr) & ipp->netmask))
56130+ found = 1;
56131+ }
56132+ } else
56133+ found = 2;
56134+ if (!found)
56135+ return 0;
56136+
56137+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
56138+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
56139+ *salt = NULL;
56140+ *sum = NULL;
56141+ return 1;
56142+ }
56143+ }
56144+ FOR_EACH_ROLE_END(r)
56145+
56146+ for (i = 0; i < num_sprole_pws; i++) {
56147+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
56148+ *salt = acl_special_roles[i]->salt;
56149+ *sum = acl_special_roles[i]->sum;
56150+ return 1;
56151+ }
56152+ }
56153+
56154+ return 0;
56155+}
56156+
56157+static void
56158+assign_special_role(char *rolename)
56159+{
56160+ struct acl_object_label *obj;
56161+ struct acl_role_label *r;
56162+ struct acl_role_label *assigned = NULL;
56163+ struct task_struct *tsk;
56164+ struct file *filp;
56165+
56166+ FOR_EACH_ROLE_START(r)
56167+ if (!strcmp(rolename, r->rolename) &&
56168+ (r->roletype & GR_ROLE_SPECIAL)) {
56169+ assigned = r;
56170+ break;
56171+ }
56172+ FOR_EACH_ROLE_END(r)
56173+
56174+ if (!assigned)
56175+ return;
56176+
56177+ read_lock(&tasklist_lock);
56178+ read_lock(&grsec_exec_file_lock);
56179+
56180+ tsk = current->real_parent;
56181+ if (tsk == NULL)
56182+ goto out_unlock;
56183+
56184+ filp = tsk->exec_file;
56185+ if (filp == NULL)
56186+ goto out_unlock;
56187+
56188+ tsk->is_writable = 0;
56189+
56190+ tsk->acl_sp_role = 1;
56191+ tsk->acl_role_id = ++acl_sp_role_value;
56192+ tsk->role = assigned;
56193+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
56194+
56195+ /* ignore additional mmap checks for processes that are writable
56196+ by the default ACL */
56197+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
56198+ if (unlikely(obj->mode & GR_WRITE))
56199+ tsk->is_writable = 1;
56200+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
56201+ if (unlikely(obj->mode & GR_WRITE))
56202+ tsk->is_writable = 1;
56203+
56204+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56205+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
56206+#endif
56207+
56208+out_unlock:
56209+ read_unlock(&grsec_exec_file_lock);
56210+ read_unlock(&tasklist_lock);
56211+ return;
56212+}
56213+
56214+int gr_check_secure_terminal(struct task_struct *task)
56215+{
56216+ struct task_struct *p, *p2, *p3;
56217+ struct files_struct *files;
56218+ struct fdtable *fdt;
56219+ struct file *our_file = NULL, *file;
56220+ int i;
56221+
56222+ if (task->signal->tty == NULL)
56223+ return 1;
56224+
56225+ files = get_files_struct(task);
56226+ if (files != NULL) {
56227+ rcu_read_lock();
56228+ fdt = files_fdtable(files);
56229+ for (i=0; i < fdt->max_fds; i++) {
56230+ file = fcheck_files(files, i);
56231+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
56232+ get_file(file);
56233+ our_file = file;
56234+ }
56235+ }
56236+ rcu_read_unlock();
56237+ put_files_struct(files);
56238+ }
56239+
56240+ if (our_file == NULL)
56241+ return 1;
56242+
56243+ read_lock(&tasklist_lock);
56244+ do_each_thread(p2, p) {
56245+ files = get_files_struct(p);
56246+ if (files == NULL ||
56247+ (p->signal && p->signal->tty == task->signal->tty)) {
56248+ if (files != NULL)
56249+ put_files_struct(files);
56250+ continue;
56251+ }
56252+ rcu_read_lock();
56253+ fdt = files_fdtable(files);
56254+ for (i=0; i < fdt->max_fds; i++) {
56255+ file = fcheck_files(files, i);
56256+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
56257+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
56258+ p3 = task;
56259+ while (p3->pid > 0) {
56260+ if (p3 == p)
56261+ break;
56262+ p3 = p3->real_parent;
56263+ }
56264+ if (p3 == p)
56265+ break;
56266+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
56267+ gr_handle_alertkill(p);
56268+ rcu_read_unlock();
56269+ put_files_struct(files);
56270+ read_unlock(&tasklist_lock);
56271+ fput(our_file);
56272+ return 0;
56273+ }
56274+ }
56275+ rcu_read_unlock();
56276+ put_files_struct(files);
56277+ } while_each_thread(p2, p);
56278+ read_unlock(&tasklist_lock);
56279+
56280+ fput(our_file);
56281+ return 1;
56282+}
56283+
56284+static int gr_rbac_disable(void *unused)
56285+{
56286+ pax_open_kernel();
56287+ gr_status &= ~GR_READY;
56288+ pax_close_kernel();
56289+
56290+ return 0;
56291+}
56292+
56293+ssize_t
56294+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
56295+{
56296+ struct gr_arg_wrapper uwrap;
56297+ unsigned char *sprole_salt = NULL;
56298+ unsigned char *sprole_sum = NULL;
56299+ int error = sizeof (struct gr_arg_wrapper);
56300+ int error2 = 0;
56301+
56302+ mutex_lock(&gr_dev_mutex);
56303+
56304+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
56305+ error = -EPERM;
56306+ goto out;
56307+ }
56308+
56309+ if (count != sizeof (struct gr_arg_wrapper)) {
56310+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
56311+ error = -EINVAL;
56312+ goto out;
56313+ }
56314+
56315+
56316+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
56317+ gr_auth_expires = 0;
56318+ gr_auth_attempts = 0;
56319+ }
56320+
56321+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
56322+ error = -EFAULT;
56323+ goto out;
56324+ }
56325+
56326+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
56327+ error = -EINVAL;
56328+ goto out;
56329+ }
56330+
56331+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
56332+ error = -EFAULT;
56333+ goto out;
56334+ }
56335+
56336+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
56337+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
56338+ time_after(gr_auth_expires, get_seconds())) {
56339+ error = -EBUSY;
56340+ goto out;
56341+ }
56342+
56343+ /* if non-root trying to do anything other than use a special role,
56344+ do not attempt authentication, do not count towards authentication
56345+ locking
56346+ */
56347+
56348+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
56349+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
56350+ !uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
56351+ error = -EPERM;
56352+ goto out;
56353+ }
56354+
56355+ /* ensure pw and special role name are null terminated */
56356+
56357+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
56358+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
56359+
56360+ /* Okay.
56361+ * We have our enough of the argument structure..(we have yet
56362+ * to copy_from_user the tables themselves) . Copy the tables
56363+ * only if we need them, i.e. for loading operations. */
56364+
56365+ switch (gr_usermode->mode) {
56366+ case GR_STATUS:
56367+ if (gr_status & GR_READY) {
56368+ error = 1;
56369+ if (!gr_check_secure_terminal(current))
56370+ error = 3;
56371+ } else
56372+ error = 2;
56373+ goto out;
56374+ case GR_SHUTDOWN:
56375+ if ((gr_status & GR_READY)
56376+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56377+ stop_machine(gr_rbac_disable, NULL, NULL);
56378+ free_variables();
56379+ memset(gr_usermode, 0, sizeof (struct gr_arg));
56380+ memset(gr_system_salt, 0, GR_SALT_LEN);
56381+ memset(gr_system_sum, 0, GR_SHA_LEN);
56382+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
56383+ } else if (gr_status & GR_READY) {
56384+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
56385+ error = -EPERM;
56386+ } else {
56387+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
56388+ error = -EAGAIN;
56389+ }
56390+ break;
56391+ case GR_ENABLE:
56392+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
56393+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
56394+ else {
56395+ if (gr_status & GR_READY)
56396+ error = -EAGAIN;
56397+ else
56398+ error = error2;
56399+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
56400+ }
56401+ break;
56402+ case GR_RELOAD:
56403+ if (!(gr_status & GR_READY)) {
56404+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
56405+ error = -EAGAIN;
56406+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56407+ stop_machine(gr_rbac_disable, NULL, NULL);
56408+ free_variables();
56409+ error2 = gracl_init(gr_usermode);
56410+ if (!error2)
56411+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
56412+ else {
56413+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
56414+ error = error2;
56415+ }
56416+ } else {
56417+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
56418+ error = -EPERM;
56419+ }
56420+ break;
56421+ case GR_SEGVMOD:
56422+ if (unlikely(!(gr_status & GR_READY))) {
56423+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
56424+ error = -EAGAIN;
56425+ break;
56426+ }
56427+
56428+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56429+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
56430+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
56431+ struct acl_subject_label *segvacl;
56432+ segvacl =
56433+ lookup_acl_subj_label(gr_usermode->segv_inode,
56434+ gr_usermode->segv_device,
56435+ current->role);
56436+ if (segvacl) {
56437+ segvacl->crashes = 0;
56438+ segvacl->expires = 0;
56439+ }
56440+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
56441+ gr_remove_uid(gr_usermode->segv_uid);
56442+ }
56443+ } else {
56444+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
56445+ error = -EPERM;
56446+ }
56447+ break;
56448+ case GR_SPROLE:
56449+ case GR_SPROLEPAM:
56450+ if (unlikely(!(gr_status & GR_READY))) {
56451+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
56452+ error = -EAGAIN;
56453+ break;
56454+ }
56455+
56456+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
56457+ current->role->expires = 0;
56458+ current->role->auth_attempts = 0;
56459+ }
56460+
56461+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
56462+ time_after(current->role->expires, get_seconds())) {
56463+ error = -EBUSY;
56464+ goto out;
56465+ }
56466+
56467+ if (lookup_special_role_auth
56468+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
56469+ && ((!sprole_salt && !sprole_sum)
56470+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
56471+ char *p = "";
56472+ assign_special_role(gr_usermode->sp_role);
56473+ read_lock(&tasklist_lock);
56474+ if (current->real_parent)
56475+ p = current->real_parent->role->rolename;
56476+ read_unlock(&tasklist_lock);
56477+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
56478+ p, acl_sp_role_value);
56479+ } else {
56480+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
56481+ error = -EPERM;
56482+ if(!(current->role->auth_attempts++))
56483+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
56484+
56485+ goto out;
56486+ }
56487+ break;
56488+ case GR_UNSPROLE:
56489+ if (unlikely(!(gr_status & GR_READY))) {
56490+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
56491+ error = -EAGAIN;
56492+ break;
56493+ }
56494+
56495+ if (current->role->roletype & GR_ROLE_SPECIAL) {
56496+ char *p = "";
56497+ int i = 0;
56498+
56499+ read_lock(&tasklist_lock);
56500+ if (current->real_parent) {
56501+ p = current->real_parent->role->rolename;
56502+ i = current->real_parent->acl_role_id;
56503+ }
56504+ read_unlock(&tasklist_lock);
56505+
56506+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
56507+ gr_set_acls(1);
56508+ } else {
56509+ error = -EPERM;
56510+ goto out;
56511+ }
56512+ break;
56513+ default:
56514+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
56515+ error = -EINVAL;
56516+ break;
56517+ }
56518+
56519+ if (error != -EPERM)
56520+ goto out;
56521+
56522+ if(!(gr_auth_attempts++))
56523+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
56524+
56525+ out:
56526+ mutex_unlock(&gr_dev_mutex);
56527+ return error;
56528+}
56529+
56530+/* must be called with
56531+ rcu_read_lock();
56532+ read_lock(&tasklist_lock);
56533+ read_lock(&grsec_exec_file_lock);
56534+*/
56535+int gr_apply_subject_to_task(struct task_struct *task)
56536+{
56537+ struct acl_object_label *obj;
56538+ char *tmpname;
56539+ struct acl_subject_label *tmpsubj;
56540+ struct file *filp;
56541+ struct name_entry *nmatch;
56542+
56543+ filp = task->exec_file;
56544+ if (filp == NULL)
56545+ return 0;
56546+
56547+ /* the following is to apply the correct subject
56548+ on binaries running when the RBAC system
56549+ is enabled, when the binaries have been
56550+ replaced or deleted since their execution
56551+ -----
56552+ when the RBAC system starts, the inode/dev
56553+ from exec_file will be one the RBAC system
56554+ is unaware of. It only knows the inode/dev
56555+ of the present file on disk, or the absence
56556+ of it.
56557+ */
56558+ preempt_disable();
56559+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
56560+
56561+ nmatch = lookup_name_entry(tmpname);
56562+ preempt_enable();
56563+ tmpsubj = NULL;
56564+ if (nmatch) {
56565+ if (nmatch->deleted)
56566+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
56567+ else
56568+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
56569+ if (tmpsubj != NULL)
56570+ task->acl = tmpsubj;
56571+ }
56572+ if (tmpsubj == NULL)
56573+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
56574+ task->role);
56575+ if (task->acl) {
56576+ task->is_writable = 0;
56577+ /* ignore additional mmap checks for processes that are writable
56578+ by the default ACL */
56579+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
56580+ if (unlikely(obj->mode & GR_WRITE))
56581+ task->is_writable = 1;
56582+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
56583+ if (unlikely(obj->mode & GR_WRITE))
56584+ task->is_writable = 1;
56585+
56586+ gr_set_proc_res(task);
56587+
56588+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56589+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
56590+#endif
56591+ } else {
56592+ return 1;
56593+ }
56594+
56595+ return 0;
56596+}
56597+
56598+int
56599+gr_set_acls(const int type)
56600+{
56601+ struct task_struct *task, *task2;
56602+ struct acl_role_label *role = current->role;
56603+ __u16 acl_role_id = current->acl_role_id;
56604+ const struct cred *cred;
56605+ int ret;
56606+
56607+ rcu_read_lock();
56608+ read_lock(&tasklist_lock);
56609+ read_lock(&grsec_exec_file_lock);
56610+ do_each_thread(task2, task) {
56611+ /* check to see if we're called from the exit handler,
56612+ if so, only replace ACLs that have inherited the admin
56613+ ACL */
56614+
56615+ if (type && (task->role != role ||
56616+ task->acl_role_id != acl_role_id))
56617+ continue;
56618+
56619+ task->acl_role_id = 0;
56620+ task->acl_sp_role = 0;
56621+
56622+ if (task->exec_file) {
56623+ cred = __task_cred(task);
56624+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
56625+ ret = gr_apply_subject_to_task(task);
56626+ if (ret) {
56627+ read_unlock(&grsec_exec_file_lock);
56628+ read_unlock(&tasklist_lock);
56629+ rcu_read_unlock();
56630+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
56631+ return ret;
56632+ }
56633+ } else {
56634+ // it's a kernel process
56635+ task->role = kernel_role;
56636+ task->acl = kernel_role->root_label;
56637+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
56638+ task->acl->mode &= ~GR_PROCFIND;
56639+#endif
56640+ }
56641+ } while_each_thread(task2, task);
56642+ read_unlock(&grsec_exec_file_lock);
56643+ read_unlock(&tasklist_lock);
56644+ rcu_read_unlock();
56645+
56646+ return 0;
56647+}
56648+
56649+void
56650+gr_learn_resource(const struct task_struct *task,
56651+ const int res, const unsigned long wanted, const int gt)
56652+{
56653+ struct acl_subject_label *acl;
56654+ const struct cred *cred;
56655+
56656+ if (unlikely((gr_status & GR_READY) &&
56657+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
56658+ goto skip_reslog;
56659+
56660+#ifdef CONFIG_GRKERNSEC_RESLOG
56661+ gr_log_resource(task, res, wanted, gt);
56662+#endif
56663+ skip_reslog:
56664+
56665+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
56666+ return;
56667+
56668+ acl = task->acl;
56669+
56670+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
56671+ !(acl->resmask & (1 << (unsigned short) res))))
56672+ return;
56673+
56674+ if (wanted >= acl->res[res].rlim_cur) {
56675+ unsigned long res_add;
56676+
56677+ res_add = wanted;
56678+ switch (res) {
56679+ case RLIMIT_CPU:
56680+ res_add += GR_RLIM_CPU_BUMP;
56681+ break;
56682+ case RLIMIT_FSIZE:
56683+ res_add += GR_RLIM_FSIZE_BUMP;
56684+ break;
56685+ case RLIMIT_DATA:
56686+ res_add += GR_RLIM_DATA_BUMP;
56687+ break;
56688+ case RLIMIT_STACK:
56689+ res_add += GR_RLIM_STACK_BUMP;
56690+ break;
56691+ case RLIMIT_CORE:
56692+ res_add += GR_RLIM_CORE_BUMP;
56693+ break;
56694+ case RLIMIT_RSS:
56695+ res_add += GR_RLIM_RSS_BUMP;
56696+ break;
56697+ case RLIMIT_NPROC:
56698+ res_add += GR_RLIM_NPROC_BUMP;
56699+ break;
56700+ case RLIMIT_NOFILE:
56701+ res_add += GR_RLIM_NOFILE_BUMP;
56702+ break;
56703+ case RLIMIT_MEMLOCK:
56704+ res_add += GR_RLIM_MEMLOCK_BUMP;
56705+ break;
56706+ case RLIMIT_AS:
56707+ res_add += GR_RLIM_AS_BUMP;
56708+ break;
56709+ case RLIMIT_LOCKS:
56710+ res_add += GR_RLIM_LOCKS_BUMP;
56711+ break;
56712+ case RLIMIT_SIGPENDING:
56713+ res_add += GR_RLIM_SIGPENDING_BUMP;
56714+ break;
56715+ case RLIMIT_MSGQUEUE:
56716+ res_add += GR_RLIM_MSGQUEUE_BUMP;
56717+ break;
56718+ case RLIMIT_NICE:
56719+ res_add += GR_RLIM_NICE_BUMP;
56720+ break;
56721+ case RLIMIT_RTPRIO:
56722+ res_add += GR_RLIM_RTPRIO_BUMP;
56723+ break;
56724+ case RLIMIT_RTTIME:
56725+ res_add += GR_RLIM_RTTIME_BUMP;
56726+ break;
56727+ }
56728+
56729+ acl->res[res].rlim_cur = res_add;
56730+
56731+ if (wanted > acl->res[res].rlim_max)
56732+ acl->res[res].rlim_max = res_add;
56733+
56734+ /* only log the subject filename, since resource logging is supported for
56735+ single-subject learning only */
56736+ rcu_read_lock();
56737+ cred = __task_cred(task);
56738+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
56739+ task->role->roletype, cred->uid, cred->gid, acl->filename,
56740+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
56741+ "", (unsigned long) res, &task->signal->saved_ip);
56742+ rcu_read_unlock();
56743+ }
56744+
56745+ return;
56746+}
56747+
56748+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
56749+void
56750+pax_set_initial_flags(struct linux_binprm *bprm)
56751+{
56752+ struct task_struct *task = current;
56753+ struct acl_subject_label *proc;
56754+ unsigned long flags;
56755+
56756+ if (unlikely(!(gr_status & GR_READY)))
56757+ return;
56758+
56759+ flags = pax_get_flags(task);
56760+
56761+ proc = task->acl;
56762+
56763+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
56764+ flags &= ~MF_PAX_PAGEEXEC;
56765+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
56766+ flags &= ~MF_PAX_SEGMEXEC;
56767+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
56768+ flags &= ~MF_PAX_RANDMMAP;
56769+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
56770+ flags &= ~MF_PAX_EMUTRAMP;
56771+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
56772+ flags &= ~MF_PAX_MPROTECT;
56773+
56774+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
56775+ flags |= MF_PAX_PAGEEXEC;
56776+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
56777+ flags |= MF_PAX_SEGMEXEC;
56778+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
56779+ flags |= MF_PAX_RANDMMAP;
56780+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
56781+ flags |= MF_PAX_EMUTRAMP;
56782+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
56783+ flags |= MF_PAX_MPROTECT;
56784+
56785+ pax_set_flags(task, flags);
56786+
56787+ return;
56788+}
56789+#endif
56790+
56791+int
56792+gr_handle_proc_ptrace(struct task_struct *task)
56793+{
56794+ struct file *filp;
56795+ struct task_struct *tmp = task;
56796+ struct task_struct *curtemp = current;
56797+ __u32 retmode;
56798+
56799+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
56800+ if (unlikely(!(gr_status & GR_READY)))
56801+ return 0;
56802+#endif
56803+
56804+ read_lock(&tasklist_lock);
56805+ read_lock(&grsec_exec_file_lock);
56806+ filp = task->exec_file;
56807+
56808+ while (tmp->pid > 0) {
56809+ if (tmp == curtemp)
56810+ break;
56811+ tmp = tmp->real_parent;
56812+ }
56813+
56814+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
56815+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
56816+ read_unlock(&grsec_exec_file_lock);
56817+ read_unlock(&tasklist_lock);
56818+ return 1;
56819+ }
56820+
56821+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56822+ if (!(gr_status & GR_READY)) {
56823+ read_unlock(&grsec_exec_file_lock);
56824+ read_unlock(&tasklist_lock);
56825+ return 0;
56826+ }
56827+#endif
56828+
56829+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
56830+ read_unlock(&grsec_exec_file_lock);
56831+ read_unlock(&tasklist_lock);
56832+
56833+ if (retmode & GR_NOPTRACE)
56834+ return 1;
56835+
56836+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
56837+ && (current->acl != task->acl || (current->acl != current->role->root_label
56838+ && current->pid != task->pid)))
56839+ return 1;
56840+
56841+ return 0;
56842+}
56843+
56844+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
56845+{
56846+ if (unlikely(!(gr_status & GR_READY)))
56847+ return;
56848+
56849+ if (!(current->role->roletype & GR_ROLE_GOD))
56850+ return;
56851+
56852+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
56853+ p->role->rolename, gr_task_roletype_to_char(p),
56854+ p->acl->filename);
56855+}
56856+
56857+int
56858+gr_handle_ptrace(struct task_struct *task, const long request)
56859+{
56860+ struct task_struct *tmp = task;
56861+ struct task_struct *curtemp = current;
56862+ __u32 retmode;
56863+
56864+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
56865+ if (unlikely(!(gr_status & GR_READY)))
56866+ return 0;
56867+#endif
56868+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
56869+ read_lock(&tasklist_lock);
56870+ while (tmp->pid > 0) {
56871+ if (tmp == curtemp)
56872+ break;
56873+ tmp = tmp->real_parent;
56874+ }
56875+
56876+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
56877+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
56878+ read_unlock(&tasklist_lock);
56879+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
56880+ return 1;
56881+ }
56882+ read_unlock(&tasklist_lock);
56883+ }
56884+
56885+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56886+ if (!(gr_status & GR_READY))
56887+ return 0;
56888+#endif
56889+
56890+ read_lock(&grsec_exec_file_lock);
56891+ if (unlikely(!task->exec_file)) {
56892+ read_unlock(&grsec_exec_file_lock);
56893+ return 0;
56894+ }
56895+
56896+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
56897+ read_unlock(&grsec_exec_file_lock);
56898+
56899+ if (retmode & GR_NOPTRACE) {
56900+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
56901+ return 1;
56902+ }
56903+
56904+ if (retmode & GR_PTRACERD) {
56905+ switch (request) {
56906+ case PTRACE_SEIZE:
56907+ case PTRACE_POKETEXT:
56908+ case PTRACE_POKEDATA:
56909+ case PTRACE_POKEUSR:
56910+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
56911+ case PTRACE_SETREGS:
56912+ case PTRACE_SETFPREGS:
56913+#endif
56914+#ifdef CONFIG_X86
56915+ case PTRACE_SETFPXREGS:
56916+#endif
56917+#ifdef CONFIG_ALTIVEC
56918+ case PTRACE_SETVRREGS:
56919+#endif
56920+ return 1;
56921+ default:
56922+ return 0;
56923+ }
56924+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
56925+ !(current->role->roletype & GR_ROLE_GOD) &&
56926+ (current->acl != task->acl)) {
56927+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
56928+ return 1;
56929+ }
56930+
56931+ return 0;
56932+}
56933+
56934+static int is_writable_mmap(const struct file *filp)
56935+{
56936+ struct task_struct *task = current;
56937+ struct acl_object_label *obj, *obj2;
56938+
56939+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
56940+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
56941+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
56942+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
56943+ task->role->root_label);
56944+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
56945+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
56946+ return 1;
56947+ }
56948+ }
56949+ return 0;
56950+}
56951+
56952+int
56953+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
56954+{
56955+ __u32 mode;
56956+
56957+ if (unlikely(!file || !(prot & PROT_EXEC)))
56958+ return 1;
56959+
56960+ if (is_writable_mmap(file))
56961+ return 0;
56962+
56963+ mode =
56964+ gr_search_file(file->f_path.dentry,
56965+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
56966+ file->f_path.mnt);
56967+
56968+ if (!gr_tpe_allow(file))
56969+ return 0;
56970+
56971+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
56972+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
56973+ return 0;
56974+ } else if (unlikely(!(mode & GR_EXEC))) {
56975+ return 0;
56976+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
56977+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
56978+ return 1;
56979+ }
56980+
56981+ return 1;
56982+}
56983+
56984+int
56985+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56986+{
56987+ __u32 mode;
56988+
56989+ if (unlikely(!file || !(prot & PROT_EXEC)))
56990+ return 1;
56991+
56992+ if (is_writable_mmap(file))
56993+ return 0;
56994+
56995+ mode =
56996+ gr_search_file(file->f_path.dentry,
56997+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
56998+ file->f_path.mnt);
56999+
57000+ if (!gr_tpe_allow(file))
57001+ return 0;
57002+
57003+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
57004+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
57005+ return 0;
57006+ } else if (unlikely(!(mode & GR_EXEC))) {
57007+ return 0;
57008+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
57009+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
57010+ return 1;
57011+ }
57012+
57013+ return 1;
57014+}
57015+
57016+void
57017+gr_acl_handle_psacct(struct task_struct *task, const long code)
57018+{
57019+ unsigned long runtime;
57020+ unsigned long cputime;
57021+ unsigned int wday, cday;
57022+ __u8 whr, chr;
57023+ __u8 wmin, cmin;
57024+ __u8 wsec, csec;
57025+ struct timespec timeval;
57026+
57027+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
57028+ !(task->acl->mode & GR_PROCACCT)))
57029+ return;
57030+
57031+ do_posix_clock_monotonic_gettime(&timeval);
57032+ runtime = timeval.tv_sec - task->start_time.tv_sec;
57033+ wday = runtime / (3600 * 24);
57034+ runtime -= wday * (3600 * 24);
57035+ whr = runtime / 3600;
57036+ runtime -= whr * 3600;
57037+ wmin = runtime / 60;
57038+ runtime -= wmin * 60;
57039+ wsec = runtime;
57040+
57041+ cputime = (task->utime + task->stime) / HZ;
57042+ cday = cputime / (3600 * 24);
57043+ cputime -= cday * (3600 * 24);
57044+ chr = cputime / 3600;
57045+ cputime -= chr * 3600;
57046+ cmin = cputime / 60;
57047+ cputime -= cmin * 60;
57048+ csec = cputime;
57049+
57050+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
57051+
57052+ return;
57053+}
57054+
57055+void gr_set_kernel_label(struct task_struct *task)
57056+{
57057+ if (gr_status & GR_READY) {
57058+ task->role = kernel_role;
57059+ task->acl = kernel_role->root_label;
57060+ }
57061+ return;
57062+}
57063+
57064+#ifdef CONFIG_TASKSTATS
57065+int gr_is_taskstats_denied(int pid)
57066+{
57067+ struct task_struct *task;
57068+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57069+ const struct cred *cred;
57070+#endif
57071+ int ret = 0;
57072+
57073+ /* restrict taskstats viewing to un-chrooted root users
57074+ who have the 'view' subject flag if the RBAC system is enabled
57075+ */
57076+
57077+ rcu_read_lock();
57078+ read_lock(&tasklist_lock);
57079+ task = find_task_by_vpid(pid);
57080+ if (task) {
57081+#ifdef CONFIG_GRKERNSEC_CHROOT
57082+ if (proc_is_chrooted(task))
57083+ ret = -EACCES;
57084+#endif
57085+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57086+ cred = __task_cred(task);
57087+#ifdef CONFIG_GRKERNSEC_PROC_USER
57088+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID))
57089+ ret = -EACCES;
57090+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57091+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && !groups_search(cred->group_info, grsec_proc_gid))
57092+ ret = -EACCES;
57093+#endif
57094+#endif
57095+ if (gr_status & GR_READY) {
57096+ if (!(task->acl->mode & GR_VIEW))
57097+ ret = -EACCES;
57098+ }
57099+ } else
57100+ ret = -ENOENT;
57101+
57102+ read_unlock(&tasklist_lock);
57103+ rcu_read_unlock();
57104+
57105+ return ret;
57106+}
57107+#endif
57108+
57109+/* AUXV entries are filled via a descendant of search_binary_handler
57110+ after we've already applied the subject for the target
57111+*/
57112+int gr_acl_enable_at_secure(void)
57113+{
57114+ if (unlikely(!(gr_status & GR_READY)))
57115+ return 0;
57116+
57117+ if (current->acl->mode & GR_ATSECURE)
57118+ return 1;
57119+
57120+ return 0;
57121+}
57122+
57123+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
57124+{
57125+ struct task_struct *task = current;
57126+ struct dentry *dentry = file->f_path.dentry;
57127+ struct vfsmount *mnt = file->f_path.mnt;
57128+ struct acl_object_label *obj, *tmp;
57129+ struct acl_subject_label *subj;
57130+ unsigned int bufsize;
57131+ int is_not_root;
57132+ char *path;
57133+ dev_t dev = __get_dev(dentry);
57134+
57135+ if (unlikely(!(gr_status & GR_READY)))
57136+ return 1;
57137+
57138+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57139+ return 1;
57140+
57141+ /* ignore Eric Biederman */
57142+ if (IS_PRIVATE(dentry->d_inode))
57143+ return 1;
57144+
57145+ subj = task->acl;
57146+ read_lock(&gr_inode_lock);
57147+ do {
57148+ obj = lookup_acl_obj_label(ino, dev, subj);
57149+ if (obj != NULL) {
57150+ read_unlock(&gr_inode_lock);
57151+ return (obj->mode & GR_FIND) ? 1 : 0;
57152+ }
57153+ } while ((subj = subj->parent_subject));
57154+ read_unlock(&gr_inode_lock);
57155+
57156+ /* this is purely an optimization since we're looking for an object
57157+ for the directory we're doing a readdir on
57158+ if it's possible for any globbed object to match the entry we're
57159+ filling into the directory, then the object we find here will be
57160+ an anchor point with attached globbed objects
57161+ */
57162+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
57163+ if (obj->globbed == NULL)
57164+ return (obj->mode & GR_FIND) ? 1 : 0;
57165+
57166+ is_not_root = ((obj->filename[0] == '/') &&
57167+ (obj->filename[1] == '\0')) ? 0 : 1;
57168+ bufsize = PAGE_SIZE - namelen - is_not_root;
57169+
57170+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
57171+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
57172+ return 1;
57173+
57174+ preempt_disable();
57175+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
57176+ bufsize);
57177+
57178+ bufsize = strlen(path);
57179+
57180+ /* if base is "/", don't append an additional slash */
57181+ if (is_not_root)
57182+ *(path + bufsize) = '/';
57183+ memcpy(path + bufsize + is_not_root, name, namelen);
57184+ *(path + bufsize + namelen + is_not_root) = '\0';
57185+
57186+ tmp = obj->globbed;
57187+ while (tmp) {
57188+ if (!glob_match(tmp->filename, path)) {
57189+ preempt_enable();
57190+ return (tmp->mode & GR_FIND) ? 1 : 0;
57191+ }
57192+ tmp = tmp->next;
57193+ }
57194+ preempt_enable();
57195+ return (obj->mode & GR_FIND) ? 1 : 0;
57196+}
57197+
57198+void gr_put_exec_file(struct task_struct *task)
57199+{
57200+ struct file *filp;
57201+
57202+ write_lock(&grsec_exec_file_lock);
57203+ filp = task->exec_file;
57204+ task->exec_file = NULL;
57205+ write_unlock(&grsec_exec_file_lock);
57206+
57207+ if (filp)
57208+ fput(filp);
57209+
57210+ return;
57211+}
57212+
57213+
57214+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
57215+EXPORT_SYMBOL(gr_acl_is_enabled);
57216+#endif
57217+EXPORT_SYMBOL(gr_learn_resource);
57218+EXPORT_SYMBOL(gr_set_kernel_label);
57219+#ifdef CONFIG_SECURITY
57220+EXPORT_SYMBOL(gr_check_user_change);
57221+EXPORT_SYMBOL(gr_check_group_change);
57222+#endif
57223+
57224diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
57225new file mode 100644
57226index 0000000..34fefda
57227--- /dev/null
57228+++ b/grsecurity/gracl_alloc.c
57229@@ -0,0 +1,105 @@
57230+#include <linux/kernel.h>
57231+#include <linux/mm.h>
57232+#include <linux/slab.h>
57233+#include <linux/vmalloc.h>
57234+#include <linux/gracl.h>
57235+#include <linux/grsecurity.h>
57236+
57237+static unsigned long alloc_stack_next = 1;
57238+static unsigned long alloc_stack_size = 1;
57239+static void **alloc_stack;
57240+
57241+static __inline__ int
57242+alloc_pop(void)
57243+{
57244+ if (alloc_stack_next == 1)
57245+ return 0;
57246+
57247+ kfree(alloc_stack[alloc_stack_next - 2]);
57248+
57249+ alloc_stack_next--;
57250+
57251+ return 1;
57252+}
57253+
57254+static __inline__ int
57255+alloc_push(void *buf)
57256+{
57257+ if (alloc_stack_next >= alloc_stack_size)
57258+ return 1;
57259+
57260+ alloc_stack[alloc_stack_next - 1] = buf;
57261+
57262+ alloc_stack_next++;
57263+
57264+ return 0;
57265+}
57266+
57267+void *
57268+acl_alloc(unsigned long len)
57269+{
57270+ void *ret = NULL;
57271+
57272+ if (!len || len > PAGE_SIZE)
57273+ goto out;
57274+
57275+ ret = kmalloc(len, GFP_KERNEL);
57276+
57277+ if (ret) {
57278+ if (alloc_push(ret)) {
57279+ kfree(ret);
57280+ ret = NULL;
57281+ }
57282+ }
57283+
57284+out:
57285+ return ret;
57286+}
57287+
57288+void *
57289+acl_alloc_num(unsigned long num, unsigned long len)
57290+{
57291+ if (!len || (num > (PAGE_SIZE / len)))
57292+ return NULL;
57293+
57294+ return acl_alloc(num * len);
57295+}
57296+
57297+void
57298+acl_free_all(void)
57299+{
57300+ if (gr_acl_is_enabled() || !alloc_stack)
57301+ return;
57302+
57303+ while (alloc_pop()) ;
57304+
57305+ if (alloc_stack) {
57306+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
57307+ kfree(alloc_stack);
57308+ else
57309+ vfree(alloc_stack);
57310+ }
57311+
57312+ alloc_stack = NULL;
57313+ alloc_stack_size = 1;
57314+ alloc_stack_next = 1;
57315+
57316+ return;
57317+}
57318+
57319+int
57320+acl_alloc_stack_init(unsigned long size)
57321+{
57322+ if ((size * sizeof (void *)) <= PAGE_SIZE)
57323+ alloc_stack =
57324+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
57325+ else
57326+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
57327+
57328+ alloc_stack_size = size;
57329+
57330+ if (!alloc_stack)
57331+ return 0;
57332+ else
57333+ return 1;
57334+}
57335diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
57336new file mode 100644
57337index 0000000..6d21049
57338--- /dev/null
57339+++ b/grsecurity/gracl_cap.c
57340@@ -0,0 +1,110 @@
57341+#include <linux/kernel.h>
57342+#include <linux/module.h>
57343+#include <linux/sched.h>
57344+#include <linux/gracl.h>
57345+#include <linux/grsecurity.h>
57346+#include <linux/grinternal.h>
57347+
57348+extern const char *captab_log[];
57349+extern int captab_log_entries;
57350+
57351+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57352+{
57353+ struct acl_subject_label *curracl;
57354+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
57355+ kernel_cap_t cap_audit = __cap_empty_set;
57356+
57357+ if (!gr_acl_is_enabled())
57358+ return 1;
57359+
57360+ curracl = task->acl;
57361+
57362+ cap_drop = curracl->cap_lower;
57363+ cap_mask = curracl->cap_mask;
57364+ cap_audit = curracl->cap_invert_audit;
57365+
57366+ while ((curracl = curracl->parent_subject)) {
57367+ /* if the cap isn't specified in the current computed mask but is specified in the
57368+ current level subject, and is lowered in the current level subject, then add
57369+ it to the set of dropped capabilities
57370+ otherwise, add the current level subject's mask to the current computed mask
57371+ */
57372+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
57373+ cap_raise(cap_mask, cap);
57374+ if (cap_raised(curracl->cap_lower, cap))
57375+ cap_raise(cap_drop, cap);
57376+ if (cap_raised(curracl->cap_invert_audit, cap))
57377+ cap_raise(cap_audit, cap);
57378+ }
57379+ }
57380+
57381+ if (!cap_raised(cap_drop, cap)) {
57382+ if (cap_raised(cap_audit, cap))
57383+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
57384+ return 1;
57385+ }
57386+
57387+ curracl = task->acl;
57388+
57389+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
57390+ && cap_raised(cred->cap_effective, cap)) {
57391+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
57392+ task->role->roletype, cred->uid,
57393+ cred->gid, task->exec_file ?
57394+ gr_to_filename(task->exec_file->f_path.dentry,
57395+ task->exec_file->f_path.mnt) : curracl->filename,
57396+ curracl->filename, 0UL,
57397+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
57398+ return 1;
57399+ }
57400+
57401+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
57402+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
57403+
57404+ return 0;
57405+}
57406+
57407+int
57408+gr_acl_is_capable(const int cap)
57409+{
57410+ return gr_task_acl_is_capable(current, current_cred(), cap);
57411+}
57412+
57413+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
57414+{
57415+ struct acl_subject_label *curracl;
57416+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
57417+
57418+ if (!gr_acl_is_enabled())
57419+ return 1;
57420+
57421+ curracl = task->acl;
57422+
57423+ cap_drop = curracl->cap_lower;
57424+ cap_mask = curracl->cap_mask;
57425+
57426+ while ((curracl = curracl->parent_subject)) {
57427+ /* if the cap isn't specified in the current computed mask but is specified in the
57428+ current level subject, and is lowered in the current level subject, then add
57429+ it to the set of dropped capabilities
57430+ otherwise, add the current level subject's mask to the current computed mask
57431+ */
57432+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
57433+ cap_raise(cap_mask, cap);
57434+ if (cap_raised(curracl->cap_lower, cap))
57435+ cap_raise(cap_drop, cap);
57436+ }
57437+ }
57438+
57439+ if (!cap_raised(cap_drop, cap))
57440+ return 1;
57441+
57442+ return 0;
57443+}
57444+
57445+int
57446+gr_acl_is_capable_nolog(const int cap)
57447+{
57448+ return gr_task_acl_is_capable_nolog(current, cap);
57449+}
57450+
57451diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
57452new file mode 100644
57453index 0000000..a340c17
57454--- /dev/null
57455+++ b/grsecurity/gracl_fs.c
57456@@ -0,0 +1,431 @@
57457+#include <linux/kernel.h>
57458+#include <linux/sched.h>
57459+#include <linux/types.h>
57460+#include <linux/fs.h>
57461+#include <linux/file.h>
57462+#include <linux/stat.h>
57463+#include <linux/grsecurity.h>
57464+#include <linux/grinternal.h>
57465+#include <linux/gracl.h>
57466+
57467+umode_t
57468+gr_acl_umask(void)
57469+{
57470+ if (unlikely(!gr_acl_is_enabled()))
57471+ return 0;
57472+
57473+ return current->role->umask;
57474+}
57475+
57476+__u32
57477+gr_acl_handle_hidden_file(const struct dentry * dentry,
57478+ const struct vfsmount * mnt)
57479+{
57480+ __u32 mode;
57481+
57482+ if (unlikely(!dentry->d_inode))
57483+ return GR_FIND;
57484+
57485+ mode =
57486+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
57487+
57488+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
57489+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
57490+ return mode;
57491+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
57492+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
57493+ return 0;
57494+ } else if (unlikely(!(mode & GR_FIND)))
57495+ return 0;
57496+
57497+ return GR_FIND;
57498+}
57499+
57500+__u32
57501+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57502+ int acc_mode)
57503+{
57504+ __u32 reqmode = GR_FIND;
57505+ __u32 mode;
57506+
57507+ if (unlikely(!dentry->d_inode))
57508+ return reqmode;
57509+
57510+ if (acc_mode & MAY_APPEND)
57511+ reqmode |= GR_APPEND;
57512+ else if (acc_mode & MAY_WRITE)
57513+ reqmode |= GR_WRITE;
57514+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
57515+ reqmode |= GR_READ;
57516+
57517+ mode =
57518+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
57519+ mnt);
57520+
57521+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57522+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
57523+ reqmode & GR_READ ? " reading" : "",
57524+ reqmode & GR_WRITE ? " writing" : reqmode &
57525+ GR_APPEND ? " appending" : "");
57526+ return reqmode;
57527+ } else
57528+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57529+ {
57530+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
57531+ reqmode & GR_READ ? " reading" : "",
57532+ reqmode & GR_WRITE ? " writing" : reqmode &
57533+ GR_APPEND ? " appending" : "");
57534+ return 0;
57535+ } else if (unlikely((mode & reqmode) != reqmode))
57536+ return 0;
57537+
57538+ return reqmode;
57539+}
57540+
57541+__u32
57542+gr_acl_handle_creat(const struct dentry * dentry,
57543+ const struct dentry * p_dentry,
57544+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57545+ const int imode)
57546+{
57547+ __u32 reqmode = GR_WRITE | GR_CREATE;
57548+ __u32 mode;
57549+
57550+ if (acc_mode & MAY_APPEND)
57551+ reqmode |= GR_APPEND;
57552+ // if a directory was required or the directory already exists, then
57553+ // don't count this open as a read
57554+ if ((acc_mode & MAY_READ) &&
57555+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
57556+ reqmode |= GR_READ;
57557+ if ((open_flags & O_CREAT) &&
57558+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
57559+ reqmode |= GR_SETID;
57560+
57561+ mode =
57562+ gr_check_create(dentry, p_dentry, p_mnt,
57563+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
57564+
57565+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57566+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
57567+ reqmode & GR_READ ? " reading" : "",
57568+ reqmode & GR_WRITE ? " writing" : reqmode &
57569+ GR_APPEND ? " appending" : "");
57570+ return reqmode;
57571+ } else
57572+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57573+ {
57574+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
57575+ reqmode & GR_READ ? " reading" : "",
57576+ reqmode & GR_WRITE ? " writing" : reqmode &
57577+ GR_APPEND ? " appending" : "");
57578+ return 0;
57579+ } else if (unlikely((mode & reqmode) != reqmode))
57580+ return 0;
57581+
57582+ return reqmode;
57583+}
57584+
57585+__u32
57586+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
57587+ const int fmode)
57588+{
57589+ __u32 mode, reqmode = GR_FIND;
57590+
57591+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
57592+ reqmode |= GR_EXEC;
57593+ if (fmode & S_IWOTH)
57594+ reqmode |= GR_WRITE;
57595+ if (fmode & S_IROTH)
57596+ reqmode |= GR_READ;
57597+
57598+ mode =
57599+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
57600+ mnt);
57601+
57602+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57603+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
57604+ reqmode & GR_READ ? " reading" : "",
57605+ reqmode & GR_WRITE ? " writing" : "",
57606+ reqmode & GR_EXEC ? " executing" : "");
57607+ return reqmode;
57608+ } else
57609+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57610+ {
57611+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
57612+ reqmode & GR_READ ? " reading" : "",
57613+ reqmode & GR_WRITE ? " writing" : "",
57614+ reqmode & GR_EXEC ? " executing" : "");
57615+ return 0;
57616+ } else if (unlikely((mode & reqmode) != reqmode))
57617+ return 0;
57618+
57619+ return reqmode;
57620+}
57621+
57622+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
57623+{
57624+ __u32 mode;
57625+
57626+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
57627+
57628+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
57629+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
57630+ return mode;
57631+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
57632+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
57633+ return 0;
57634+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
57635+ return 0;
57636+
57637+ return (reqmode);
57638+}
57639+
57640+__u32
57641+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57642+{
57643+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
57644+}
57645+
57646+__u32
57647+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
57648+{
57649+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
57650+}
57651+
57652+__u32
57653+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
57654+{
57655+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
57656+}
57657+
57658+__u32
57659+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
57660+{
57661+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
57662+}
57663+
57664+__u32
57665+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
57666+ umode_t *modeptr)
57667+{
57668+ umode_t mode;
57669+
57670+ *modeptr &= ~gr_acl_umask();
57671+ mode = *modeptr;
57672+
57673+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
57674+ return 1;
57675+
57676+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
57677+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
57678+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
57679+ GR_CHMOD_ACL_MSG);
57680+ } else {
57681+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
57682+ }
57683+}
57684+
57685+__u32
57686+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
57687+{
57688+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
57689+}
57690+
57691+__u32
57692+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
57693+{
57694+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
57695+}
57696+
57697+__u32
57698+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
57699+{
57700+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
57701+}
57702+
57703+__u32
57704+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
57705+{
57706+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
57707+ GR_UNIXCONNECT_ACL_MSG);
57708+}
57709+
57710+/* hardlinks require at minimum create and link permission,
57711+ any additional privilege required is based on the
57712+ privilege of the file being linked to
57713+*/
57714+__u32
57715+gr_acl_handle_link(const struct dentry * new_dentry,
57716+ const struct dentry * parent_dentry,
57717+ const struct vfsmount * parent_mnt,
57718+ const struct dentry * old_dentry,
57719+ const struct vfsmount * old_mnt, const struct filename *to)
57720+{
57721+ __u32 mode;
57722+ __u32 needmode = GR_CREATE | GR_LINK;
57723+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
57724+
57725+ mode =
57726+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
57727+ old_mnt);
57728+
57729+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
57730+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
57731+ return mode;
57732+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
57733+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
57734+ return 0;
57735+ } else if (unlikely((mode & needmode) != needmode))
57736+ return 0;
57737+
57738+ return 1;
57739+}
57740+
57741+__u32
57742+gr_acl_handle_symlink(const struct dentry * new_dentry,
57743+ const struct dentry * parent_dentry,
57744+ const struct vfsmount * parent_mnt, const struct filename *from)
57745+{
57746+ __u32 needmode = GR_WRITE | GR_CREATE;
57747+ __u32 mode;
57748+
57749+ mode =
57750+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
57751+ GR_CREATE | GR_AUDIT_CREATE |
57752+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
57753+
57754+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
57755+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
57756+ return mode;
57757+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
57758+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
57759+ return 0;
57760+ } else if (unlikely((mode & needmode) != needmode))
57761+ return 0;
57762+
57763+ return (GR_WRITE | GR_CREATE);
57764+}
57765+
57766+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
57767+{
57768+ __u32 mode;
57769+
57770+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
57771+
57772+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
57773+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
57774+ return mode;
57775+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
57776+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
57777+ return 0;
57778+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
57779+ return 0;
57780+
57781+ return (reqmode);
57782+}
57783+
57784+__u32
57785+gr_acl_handle_mknod(const struct dentry * new_dentry,
57786+ const struct dentry * parent_dentry,
57787+ const struct vfsmount * parent_mnt,
57788+ const int mode)
57789+{
57790+ __u32 reqmode = GR_WRITE | GR_CREATE;
57791+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
57792+ reqmode |= GR_SETID;
57793+
57794+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
57795+ reqmode, GR_MKNOD_ACL_MSG);
57796+}
57797+
57798+__u32
57799+gr_acl_handle_mkdir(const struct dentry *new_dentry,
57800+ const struct dentry *parent_dentry,
57801+ const struct vfsmount *parent_mnt)
57802+{
57803+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
57804+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
57805+}
57806+
57807+#define RENAME_CHECK_SUCCESS(old, new) \
57808+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
57809+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
57810+
57811+int
57812+gr_acl_handle_rename(struct dentry *new_dentry,
57813+ struct dentry *parent_dentry,
57814+ const struct vfsmount *parent_mnt,
57815+ struct dentry *old_dentry,
57816+ struct inode *old_parent_inode,
57817+ struct vfsmount *old_mnt, const struct filename *newname)
57818+{
57819+ __u32 comp1, comp2;
57820+ int error = 0;
57821+
57822+ if (unlikely(!gr_acl_is_enabled()))
57823+ return 0;
57824+
57825+ if (!new_dentry->d_inode) {
57826+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
57827+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
57828+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
57829+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
57830+ GR_DELETE | GR_AUDIT_DELETE |
57831+ GR_AUDIT_READ | GR_AUDIT_WRITE |
57832+ GR_SUPPRESS, old_mnt);
57833+ } else {
57834+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
57835+ GR_CREATE | GR_DELETE |
57836+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
57837+ GR_AUDIT_READ | GR_AUDIT_WRITE |
57838+ GR_SUPPRESS, parent_mnt);
57839+ comp2 =
57840+ gr_search_file(old_dentry,
57841+ GR_READ | GR_WRITE | GR_AUDIT_READ |
57842+ GR_DELETE | GR_AUDIT_DELETE |
57843+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
57844+ }
57845+
57846+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
57847+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
57848+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
57849+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
57850+ && !(comp2 & GR_SUPPRESS)) {
57851+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
57852+ error = -EACCES;
57853+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
57854+ error = -EACCES;
57855+
57856+ return error;
57857+}
57858+
57859+void
57860+gr_acl_handle_exit(void)
57861+{
57862+ u16 id;
57863+ char *rolename;
57864+
57865+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
57866+ !(current->role->roletype & GR_ROLE_PERSIST))) {
57867+ id = current->acl_role_id;
57868+ rolename = current->role->rolename;
57869+ gr_set_acls(1);
57870+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
57871+ }
57872+
57873+ gr_put_exec_file(current);
57874+ return;
57875+}
57876+
57877+int
57878+gr_acl_handle_procpidmem(const struct task_struct *task)
57879+{
57880+ if (unlikely(!gr_acl_is_enabled()))
57881+ return 0;
57882+
57883+ if (task != current && task->acl->mode & GR_PROTPROCFD)
57884+ return -EACCES;
57885+
57886+ return 0;
57887+}
57888diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
57889new file mode 100644
57890index 0000000..58800a7
57891--- /dev/null
57892+++ b/grsecurity/gracl_ip.c
57893@@ -0,0 +1,384 @@
57894+#include <linux/kernel.h>
57895+#include <asm/uaccess.h>
57896+#include <asm/errno.h>
57897+#include <net/sock.h>
57898+#include <linux/file.h>
57899+#include <linux/fs.h>
57900+#include <linux/net.h>
57901+#include <linux/in.h>
57902+#include <linux/skbuff.h>
57903+#include <linux/ip.h>
57904+#include <linux/udp.h>
57905+#include <linux/types.h>
57906+#include <linux/sched.h>
57907+#include <linux/netdevice.h>
57908+#include <linux/inetdevice.h>
57909+#include <linux/gracl.h>
57910+#include <linux/grsecurity.h>
57911+#include <linux/grinternal.h>
57912+
57913+#define GR_BIND 0x01
57914+#define GR_CONNECT 0x02
57915+#define GR_INVERT 0x04
57916+#define GR_BINDOVERRIDE 0x08
57917+#define GR_CONNECTOVERRIDE 0x10
57918+#define GR_SOCK_FAMILY 0x20
57919+
57920+static const char * gr_protocols[IPPROTO_MAX] = {
57921+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
57922+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
57923+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
57924+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
57925+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
57926+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
57927+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
57928+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
57929+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
57930+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
57931+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
57932+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
57933+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
57934+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
57935+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
57936+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
57937+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
57938+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
57939+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
57940+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
57941+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
57942+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
57943+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
57944+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
57945+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
57946+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
57947+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
57948+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
57949+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
57950+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
57951+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
57952+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
57953+ };
57954+
57955+static const char * gr_socktypes[SOCK_MAX] = {
57956+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
57957+ "unknown:7", "unknown:8", "unknown:9", "packet"
57958+ };
57959+
57960+static const char * gr_sockfamilies[AF_MAX+1] = {
57961+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
57962+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
57963+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
57964+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
57965+ };
57966+
57967+const char *
57968+gr_proto_to_name(unsigned char proto)
57969+{
57970+ return gr_protocols[proto];
57971+}
57972+
57973+const char *
57974+gr_socktype_to_name(unsigned char type)
57975+{
57976+ return gr_socktypes[type];
57977+}
57978+
57979+const char *
57980+gr_sockfamily_to_name(unsigned char family)
57981+{
57982+ return gr_sockfamilies[family];
57983+}
57984+
57985+int
57986+gr_search_socket(const int domain, const int type, const int protocol)
57987+{
57988+ struct acl_subject_label *curr;
57989+ const struct cred *cred = current_cred();
57990+
57991+ if (unlikely(!gr_acl_is_enabled()))
57992+ goto exit;
57993+
57994+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
57995+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
57996+ goto exit; // let the kernel handle it
57997+
57998+ curr = current->acl;
57999+
58000+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
58001+ /* the family is allowed, if this is PF_INET allow it only if
58002+ the extra sock type/protocol checks pass */
58003+ if (domain == PF_INET)
58004+ goto inet_check;
58005+ goto exit;
58006+ } else {
58007+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
58008+ __u32 fakeip = 0;
58009+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58010+ current->role->roletype, cred->uid,
58011+ cred->gid, current->exec_file ?
58012+ gr_to_filename(current->exec_file->f_path.dentry,
58013+ current->exec_file->f_path.mnt) :
58014+ curr->filename, curr->filename,
58015+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
58016+ &current->signal->saved_ip);
58017+ goto exit;
58018+ }
58019+ goto exit_fail;
58020+ }
58021+
58022+inet_check:
58023+ /* the rest of this checking is for IPv4 only */
58024+ if (!curr->ips)
58025+ goto exit;
58026+
58027+ if ((curr->ip_type & (1 << type)) &&
58028+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
58029+ goto exit;
58030+
58031+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
58032+ /* we don't place acls on raw sockets , and sometimes
58033+ dgram/ip sockets are opened for ioctl and not
58034+ bind/connect, so we'll fake a bind learn log */
58035+ if (type == SOCK_RAW || type == SOCK_PACKET) {
58036+ __u32 fakeip = 0;
58037+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58038+ current->role->roletype, cred->uid,
58039+ cred->gid, current->exec_file ?
58040+ gr_to_filename(current->exec_file->f_path.dentry,
58041+ current->exec_file->f_path.mnt) :
58042+ curr->filename, curr->filename,
58043+ &fakeip, 0, type,
58044+ protocol, GR_CONNECT, &current->signal->saved_ip);
58045+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
58046+ __u32 fakeip = 0;
58047+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58048+ current->role->roletype, cred->uid,
58049+ cred->gid, current->exec_file ?
58050+ gr_to_filename(current->exec_file->f_path.dentry,
58051+ current->exec_file->f_path.mnt) :
58052+ curr->filename, curr->filename,
58053+ &fakeip, 0, type,
58054+ protocol, GR_BIND, &current->signal->saved_ip);
58055+ }
58056+ /* we'll log when they use connect or bind */
58057+ goto exit;
58058+ }
58059+
58060+exit_fail:
58061+ if (domain == PF_INET)
58062+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
58063+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
58064+ else
58065+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
58066+ gr_socktype_to_name(type), protocol);
58067+
58068+ return 0;
58069+exit:
58070+ return 1;
58071+}
58072+
58073+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
58074+{
58075+ if ((ip->mode & mode) &&
58076+ (ip_port >= ip->low) &&
58077+ (ip_port <= ip->high) &&
58078+ ((ntohl(ip_addr) & our_netmask) ==
58079+ (ntohl(our_addr) & our_netmask))
58080+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
58081+ && (ip->type & (1 << type))) {
58082+ if (ip->mode & GR_INVERT)
58083+ return 2; // specifically denied
58084+ else
58085+ return 1; // allowed
58086+ }
58087+
58088+ return 0; // not specifically allowed, may continue parsing
58089+}
58090+
58091+static int
58092+gr_search_connectbind(const int full_mode, struct sock *sk,
58093+ struct sockaddr_in *addr, const int type)
58094+{
58095+ char iface[IFNAMSIZ] = {0};
58096+ struct acl_subject_label *curr;
58097+ struct acl_ip_label *ip;
58098+ struct inet_sock *isk;
58099+ struct net_device *dev;
58100+ struct in_device *idev;
58101+ unsigned long i;
58102+ int ret;
58103+ int mode = full_mode & (GR_BIND | GR_CONNECT);
58104+ __u32 ip_addr = 0;
58105+ __u32 our_addr;
58106+ __u32 our_netmask;
58107+ char *p;
58108+ __u16 ip_port = 0;
58109+ const struct cred *cred = current_cred();
58110+
58111+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
58112+ return 0;
58113+
58114+ curr = current->acl;
58115+ isk = inet_sk(sk);
58116+
58117+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
58118+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
58119+ addr->sin_addr.s_addr = curr->inaddr_any_override;
58120+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
58121+ struct sockaddr_in saddr;
58122+ int err;
58123+
58124+ saddr.sin_family = AF_INET;
58125+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
58126+ saddr.sin_port = isk->inet_sport;
58127+
58128+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
58129+ if (err)
58130+ return err;
58131+
58132+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
58133+ if (err)
58134+ return err;
58135+ }
58136+
58137+ if (!curr->ips)
58138+ return 0;
58139+
58140+ ip_addr = addr->sin_addr.s_addr;
58141+ ip_port = ntohs(addr->sin_port);
58142+
58143+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
58144+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58145+ current->role->roletype, cred->uid,
58146+ cred->gid, current->exec_file ?
58147+ gr_to_filename(current->exec_file->f_path.dentry,
58148+ current->exec_file->f_path.mnt) :
58149+ curr->filename, curr->filename,
58150+ &ip_addr, ip_port, type,
58151+ sk->sk_protocol, mode, &current->signal->saved_ip);
58152+ return 0;
58153+ }
58154+
58155+ for (i = 0; i < curr->ip_num; i++) {
58156+ ip = *(curr->ips + i);
58157+ if (ip->iface != NULL) {
58158+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
58159+ p = strchr(iface, ':');
58160+ if (p != NULL)
58161+ *p = '\0';
58162+ dev = dev_get_by_name(sock_net(sk), iface);
58163+ if (dev == NULL)
58164+ continue;
58165+ idev = in_dev_get(dev);
58166+ if (idev == NULL) {
58167+ dev_put(dev);
58168+ continue;
58169+ }
58170+ rcu_read_lock();
58171+ for_ifa(idev) {
58172+ if (!strcmp(ip->iface, ifa->ifa_label)) {
58173+ our_addr = ifa->ifa_address;
58174+ our_netmask = 0xffffffff;
58175+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
58176+ if (ret == 1) {
58177+ rcu_read_unlock();
58178+ in_dev_put(idev);
58179+ dev_put(dev);
58180+ return 0;
58181+ } else if (ret == 2) {
58182+ rcu_read_unlock();
58183+ in_dev_put(idev);
58184+ dev_put(dev);
58185+ goto denied;
58186+ }
58187+ }
58188+ } endfor_ifa(idev);
58189+ rcu_read_unlock();
58190+ in_dev_put(idev);
58191+ dev_put(dev);
58192+ } else {
58193+ our_addr = ip->addr;
58194+ our_netmask = ip->netmask;
58195+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
58196+ if (ret == 1)
58197+ return 0;
58198+ else if (ret == 2)
58199+ goto denied;
58200+ }
58201+ }
58202+
58203+denied:
58204+ if (mode == GR_BIND)
58205+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58206+ else if (mode == GR_CONNECT)
58207+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58208+
58209+ return -EACCES;
58210+}
58211+
58212+int
58213+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
58214+{
58215+ /* always allow disconnection of dgram sockets with connect */
58216+ if (addr->sin_family == AF_UNSPEC)
58217+ return 0;
58218+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
58219+}
58220+
58221+int
58222+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
58223+{
58224+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
58225+}
58226+
58227+int gr_search_listen(struct socket *sock)
58228+{
58229+ struct sock *sk = sock->sk;
58230+ struct sockaddr_in addr;
58231+
58232+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
58233+ addr.sin_port = inet_sk(sk)->inet_sport;
58234+
58235+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
58236+}
58237+
58238+int gr_search_accept(struct socket *sock)
58239+{
58240+ struct sock *sk = sock->sk;
58241+ struct sockaddr_in addr;
58242+
58243+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
58244+ addr.sin_port = inet_sk(sk)->inet_sport;
58245+
58246+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
58247+}
58248+
58249+int
58250+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
58251+{
58252+ if (addr)
58253+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
58254+ else {
58255+ struct sockaddr_in sin;
58256+ const struct inet_sock *inet = inet_sk(sk);
58257+
58258+ sin.sin_addr.s_addr = inet->inet_daddr;
58259+ sin.sin_port = inet->inet_dport;
58260+
58261+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
58262+ }
58263+}
58264+
58265+int
58266+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
58267+{
58268+ struct sockaddr_in sin;
58269+
58270+ if (unlikely(skb->len < sizeof (struct udphdr)))
58271+ return 0; // skip this packet
58272+
58273+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
58274+ sin.sin_port = udp_hdr(skb)->source;
58275+
58276+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
58277+}
58278diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
58279new file mode 100644
58280index 0000000..25f54ef
58281--- /dev/null
58282+++ b/grsecurity/gracl_learn.c
58283@@ -0,0 +1,207 @@
58284+#include <linux/kernel.h>
58285+#include <linux/mm.h>
58286+#include <linux/sched.h>
58287+#include <linux/poll.h>
58288+#include <linux/string.h>
58289+#include <linux/file.h>
58290+#include <linux/types.h>
58291+#include <linux/vmalloc.h>
58292+#include <linux/grinternal.h>
58293+
58294+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
58295+ size_t count, loff_t *ppos);
58296+extern int gr_acl_is_enabled(void);
58297+
58298+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
58299+static int gr_learn_attached;
58300+
58301+/* use a 512k buffer */
58302+#define LEARN_BUFFER_SIZE (512 * 1024)
58303+
58304+static DEFINE_SPINLOCK(gr_learn_lock);
58305+static DEFINE_MUTEX(gr_learn_user_mutex);
58306+
58307+/* we need to maintain two buffers, so that the kernel context of grlearn
58308+ uses a semaphore around the userspace copying, and the other kernel contexts
58309+ use a spinlock when copying into the buffer, since they cannot sleep
58310+*/
58311+static char *learn_buffer;
58312+static char *learn_buffer_user;
58313+static int learn_buffer_len;
58314+static int learn_buffer_user_len;
58315+
58316+static ssize_t
58317+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
58318+{
58319+ DECLARE_WAITQUEUE(wait, current);
58320+ ssize_t retval = 0;
58321+
58322+ add_wait_queue(&learn_wait, &wait);
58323+ set_current_state(TASK_INTERRUPTIBLE);
58324+ do {
58325+ mutex_lock(&gr_learn_user_mutex);
58326+ spin_lock(&gr_learn_lock);
58327+ if (learn_buffer_len)
58328+ break;
58329+ spin_unlock(&gr_learn_lock);
58330+ mutex_unlock(&gr_learn_user_mutex);
58331+ if (file->f_flags & O_NONBLOCK) {
58332+ retval = -EAGAIN;
58333+ goto out;
58334+ }
58335+ if (signal_pending(current)) {
58336+ retval = -ERESTARTSYS;
58337+ goto out;
58338+ }
58339+
58340+ schedule();
58341+ } while (1);
58342+
58343+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
58344+ learn_buffer_user_len = learn_buffer_len;
58345+ retval = learn_buffer_len;
58346+ learn_buffer_len = 0;
58347+
58348+ spin_unlock(&gr_learn_lock);
58349+
58350+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
58351+ retval = -EFAULT;
58352+
58353+ mutex_unlock(&gr_learn_user_mutex);
58354+out:
58355+ set_current_state(TASK_RUNNING);
58356+ remove_wait_queue(&learn_wait, &wait);
58357+ return retval;
58358+}
58359+
58360+static unsigned int
58361+poll_learn(struct file * file, poll_table * wait)
58362+{
58363+ poll_wait(file, &learn_wait, wait);
58364+
58365+ if (learn_buffer_len)
58366+ return (POLLIN | POLLRDNORM);
58367+
58368+ return 0;
58369+}
58370+
58371+void
58372+gr_clear_learn_entries(void)
58373+{
58374+ char *tmp;
58375+
58376+ mutex_lock(&gr_learn_user_mutex);
58377+ spin_lock(&gr_learn_lock);
58378+ tmp = learn_buffer;
58379+ learn_buffer = NULL;
58380+ spin_unlock(&gr_learn_lock);
58381+ if (tmp)
58382+ vfree(tmp);
58383+ if (learn_buffer_user != NULL) {
58384+ vfree(learn_buffer_user);
58385+ learn_buffer_user = NULL;
58386+ }
58387+ learn_buffer_len = 0;
58388+ mutex_unlock(&gr_learn_user_mutex);
58389+
58390+ return;
58391+}
58392+
58393+void
58394+gr_add_learn_entry(const char *fmt, ...)
58395+{
58396+ va_list args;
58397+ unsigned int len;
58398+
58399+ if (!gr_learn_attached)
58400+ return;
58401+
58402+ spin_lock(&gr_learn_lock);
58403+
58404+ /* leave a gap at the end so we know when it's "full" but don't have to
58405+ compute the exact length of the string we're trying to append
58406+ */
58407+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
58408+ spin_unlock(&gr_learn_lock);
58409+ wake_up_interruptible(&learn_wait);
58410+ return;
58411+ }
58412+ if (learn_buffer == NULL) {
58413+ spin_unlock(&gr_learn_lock);
58414+ return;
58415+ }
58416+
58417+ va_start(args, fmt);
58418+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
58419+ va_end(args);
58420+
58421+ learn_buffer_len += len + 1;
58422+
58423+ spin_unlock(&gr_learn_lock);
58424+ wake_up_interruptible(&learn_wait);
58425+
58426+ return;
58427+}
58428+
58429+static int
58430+open_learn(struct inode *inode, struct file *file)
58431+{
58432+ if (file->f_mode & FMODE_READ && gr_learn_attached)
58433+ return -EBUSY;
58434+ if (file->f_mode & FMODE_READ) {
58435+ int retval = 0;
58436+ mutex_lock(&gr_learn_user_mutex);
58437+ if (learn_buffer == NULL)
58438+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
58439+ if (learn_buffer_user == NULL)
58440+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
58441+ if (learn_buffer == NULL) {
58442+ retval = -ENOMEM;
58443+ goto out_error;
58444+ }
58445+ if (learn_buffer_user == NULL) {
58446+ retval = -ENOMEM;
58447+ goto out_error;
58448+ }
58449+ learn_buffer_len = 0;
58450+ learn_buffer_user_len = 0;
58451+ gr_learn_attached = 1;
58452+out_error:
58453+ mutex_unlock(&gr_learn_user_mutex);
58454+ return retval;
58455+ }
58456+ return 0;
58457+}
58458+
58459+static int
58460+close_learn(struct inode *inode, struct file *file)
58461+{
58462+ if (file->f_mode & FMODE_READ) {
58463+ char *tmp = NULL;
58464+ mutex_lock(&gr_learn_user_mutex);
58465+ spin_lock(&gr_learn_lock);
58466+ tmp = learn_buffer;
58467+ learn_buffer = NULL;
58468+ spin_unlock(&gr_learn_lock);
58469+ if (tmp)
58470+ vfree(tmp);
58471+ if (learn_buffer_user != NULL) {
58472+ vfree(learn_buffer_user);
58473+ learn_buffer_user = NULL;
58474+ }
58475+ learn_buffer_len = 0;
58476+ learn_buffer_user_len = 0;
58477+ gr_learn_attached = 0;
58478+ mutex_unlock(&gr_learn_user_mutex);
58479+ }
58480+
58481+ return 0;
58482+}
58483+
58484+const struct file_operations grsec_fops = {
58485+ .read = read_learn,
58486+ .write = write_grsec_handler,
58487+ .open = open_learn,
58488+ .release = close_learn,
58489+ .poll = poll_learn,
58490+};
58491diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
58492new file mode 100644
58493index 0000000..39645c9
58494--- /dev/null
58495+++ b/grsecurity/gracl_res.c
58496@@ -0,0 +1,68 @@
58497+#include <linux/kernel.h>
58498+#include <linux/sched.h>
58499+#include <linux/gracl.h>
58500+#include <linux/grinternal.h>
58501+
58502+static const char *restab_log[] = {
58503+ [RLIMIT_CPU] = "RLIMIT_CPU",
58504+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
58505+ [RLIMIT_DATA] = "RLIMIT_DATA",
58506+ [RLIMIT_STACK] = "RLIMIT_STACK",
58507+ [RLIMIT_CORE] = "RLIMIT_CORE",
58508+ [RLIMIT_RSS] = "RLIMIT_RSS",
58509+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
58510+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
58511+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
58512+ [RLIMIT_AS] = "RLIMIT_AS",
58513+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
58514+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
58515+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
58516+ [RLIMIT_NICE] = "RLIMIT_NICE",
58517+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
58518+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
58519+ [GR_CRASH_RES] = "RLIMIT_CRASH"
58520+};
58521+
58522+void
58523+gr_log_resource(const struct task_struct *task,
58524+ const int res, const unsigned long wanted, const int gt)
58525+{
58526+ const struct cred *cred;
58527+ unsigned long rlim;
58528+
58529+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
58530+ return;
58531+
58532+ // not yet supported resource
58533+ if (unlikely(!restab_log[res]))
58534+ return;
58535+
58536+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
58537+ rlim = task_rlimit_max(task, res);
58538+ else
58539+ rlim = task_rlimit(task, res);
58540+
58541+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
58542+ return;
58543+
58544+ rcu_read_lock();
58545+ cred = __task_cred(task);
58546+
58547+ if (res == RLIMIT_NPROC &&
58548+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
58549+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
58550+ goto out_rcu_unlock;
58551+ else if (res == RLIMIT_MEMLOCK &&
58552+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
58553+ goto out_rcu_unlock;
58554+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
58555+ goto out_rcu_unlock;
58556+ rcu_read_unlock();
58557+
58558+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
58559+
58560+ return;
58561+out_rcu_unlock:
58562+ rcu_read_unlock();
58563+ return;
58564+}
58565diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
58566new file mode 100644
58567index 0000000..25197e9
58568--- /dev/null
58569+++ b/grsecurity/gracl_segv.c
58570@@ -0,0 +1,299 @@
58571+#include <linux/kernel.h>
58572+#include <linux/mm.h>
58573+#include <asm/uaccess.h>
58574+#include <asm/errno.h>
58575+#include <asm/mman.h>
58576+#include <net/sock.h>
58577+#include <linux/file.h>
58578+#include <linux/fs.h>
58579+#include <linux/net.h>
58580+#include <linux/in.h>
58581+#include <linux/slab.h>
58582+#include <linux/types.h>
58583+#include <linux/sched.h>
58584+#include <linux/timer.h>
58585+#include <linux/gracl.h>
58586+#include <linux/grsecurity.h>
58587+#include <linux/grinternal.h>
58588+
58589+static struct crash_uid *uid_set;
58590+static unsigned short uid_used;
58591+static DEFINE_SPINLOCK(gr_uid_lock);
58592+extern rwlock_t gr_inode_lock;
58593+extern struct acl_subject_label *
58594+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
58595+ struct acl_role_label *role);
58596+
58597+#ifdef CONFIG_BTRFS_FS
58598+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
58599+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
58600+#endif
58601+
58602+static inline dev_t __get_dev(const struct dentry *dentry)
58603+{
58604+#ifdef CONFIG_BTRFS_FS
58605+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
58606+ return get_btrfs_dev_from_inode(dentry->d_inode);
58607+ else
58608+#endif
58609+ return dentry->d_inode->i_sb->s_dev;
58610+}
58611+
58612+int
58613+gr_init_uidset(void)
58614+{
58615+ uid_set =
58616+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
58617+ uid_used = 0;
58618+
58619+ return uid_set ? 1 : 0;
58620+}
58621+
58622+void
58623+gr_free_uidset(void)
58624+{
58625+ if (uid_set)
58626+ kfree(uid_set);
58627+
58628+ return;
58629+}
58630+
58631+int
58632+gr_find_uid(const uid_t uid)
58633+{
58634+ struct crash_uid *tmp = uid_set;
58635+ uid_t buid;
58636+ int low = 0, high = uid_used - 1, mid;
58637+
58638+ while (high >= low) {
58639+ mid = (low + high) >> 1;
58640+ buid = tmp[mid].uid;
58641+ if (buid == uid)
58642+ return mid;
58643+ if (buid > uid)
58644+ high = mid - 1;
58645+ if (buid < uid)
58646+ low = mid + 1;
58647+ }
58648+
58649+ return -1;
58650+}
58651+
58652+static __inline__ void
58653+gr_insertsort(void)
58654+{
58655+ unsigned short i, j;
58656+ struct crash_uid index;
58657+
58658+ for (i = 1; i < uid_used; i++) {
58659+ index = uid_set[i];
58660+ j = i;
58661+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
58662+ uid_set[j] = uid_set[j - 1];
58663+ j--;
58664+ }
58665+ uid_set[j] = index;
58666+ }
58667+
58668+ return;
58669+}
58670+
58671+static __inline__ void
58672+gr_insert_uid(const uid_t uid, const unsigned long expires)
58673+{
58674+ int loc;
58675+
58676+ if (uid_used == GR_UIDTABLE_MAX)
58677+ return;
58678+
58679+ loc = gr_find_uid(uid);
58680+
58681+ if (loc >= 0) {
58682+ uid_set[loc].expires = expires;
58683+ return;
58684+ }
58685+
58686+ uid_set[uid_used].uid = uid;
58687+ uid_set[uid_used].expires = expires;
58688+ uid_used++;
58689+
58690+ gr_insertsort();
58691+
58692+ return;
58693+}
58694+
58695+void
58696+gr_remove_uid(const unsigned short loc)
58697+{
58698+ unsigned short i;
58699+
58700+ for (i = loc + 1; i < uid_used; i++)
58701+ uid_set[i - 1] = uid_set[i];
58702+
58703+ uid_used--;
58704+
58705+ return;
58706+}
58707+
58708+int
58709+gr_check_crash_uid(const uid_t uid)
58710+{
58711+ int loc;
58712+ int ret = 0;
58713+
58714+ if (unlikely(!gr_acl_is_enabled()))
58715+ return 0;
58716+
58717+ spin_lock(&gr_uid_lock);
58718+ loc = gr_find_uid(uid);
58719+
58720+ if (loc < 0)
58721+ goto out_unlock;
58722+
58723+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
58724+ gr_remove_uid(loc);
58725+ else
58726+ ret = 1;
58727+
58728+out_unlock:
58729+ spin_unlock(&gr_uid_lock);
58730+ return ret;
58731+}
58732+
58733+static __inline__ int
58734+proc_is_setxid(const struct cred *cred)
58735+{
58736+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
58737+ !uid_eq(cred->uid, cred->fsuid))
58738+ return 1;
58739+ if (!uid_eq(cred->gid, cred->egid) || !uid_eq(cred->gid, cred->sgid) ||
58740+ !uid_eq(cred->gid, cred->fsgid))
58741+ return 1;
58742+
58743+ return 0;
58744+}
58745+
58746+extern int gr_fake_force_sig(int sig, struct task_struct *t);
58747+
58748+void
58749+gr_handle_crash(struct task_struct *task, const int sig)
58750+{
58751+ struct acl_subject_label *curr;
58752+ struct task_struct *tsk, *tsk2;
58753+ const struct cred *cred;
58754+ const struct cred *cred2;
58755+
58756+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
58757+ return;
58758+
58759+ if (unlikely(!gr_acl_is_enabled()))
58760+ return;
58761+
58762+ curr = task->acl;
58763+
58764+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
58765+ return;
58766+
58767+ if (time_before_eq(curr->expires, get_seconds())) {
58768+ curr->expires = 0;
58769+ curr->crashes = 0;
58770+ }
58771+
58772+ curr->crashes++;
58773+
58774+ if (!curr->expires)
58775+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
58776+
58777+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
58778+ time_after(curr->expires, get_seconds())) {
58779+ rcu_read_lock();
58780+ cred = __task_cred(task);
58781+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && proc_is_setxid(cred)) {
58782+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
58783+ spin_lock(&gr_uid_lock);
58784+ gr_insert_uid(cred->uid, curr->expires);
58785+ spin_unlock(&gr_uid_lock);
58786+ curr->expires = 0;
58787+ curr->crashes = 0;
58788+ read_lock(&tasklist_lock);
58789+ do_each_thread(tsk2, tsk) {
58790+ cred2 = __task_cred(tsk);
58791+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
58792+ gr_fake_force_sig(SIGKILL, tsk);
58793+ } while_each_thread(tsk2, tsk);
58794+ read_unlock(&tasklist_lock);
58795+ } else {
58796+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
58797+ read_lock(&tasklist_lock);
58798+ read_lock(&grsec_exec_file_lock);
58799+ do_each_thread(tsk2, tsk) {
58800+ if (likely(tsk != task)) {
58801+ // if this thread has the same subject as the one that triggered
58802+ // RES_CRASH and it's the same binary, kill it
58803+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
58804+ gr_fake_force_sig(SIGKILL, tsk);
58805+ }
58806+ } while_each_thread(tsk2, tsk);
58807+ read_unlock(&grsec_exec_file_lock);
58808+ read_unlock(&tasklist_lock);
58809+ }
58810+ rcu_read_unlock();
58811+ }
58812+
58813+ return;
58814+}
58815+
58816+int
58817+gr_check_crash_exec(const struct file *filp)
58818+{
58819+ struct acl_subject_label *curr;
58820+
58821+ if (unlikely(!gr_acl_is_enabled()))
58822+ return 0;
58823+
58824+ read_lock(&gr_inode_lock);
58825+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
58826+ __get_dev(filp->f_path.dentry),
58827+ current->role);
58828+ read_unlock(&gr_inode_lock);
58829+
58830+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
58831+ (!curr->crashes && !curr->expires))
58832+ return 0;
58833+
58834+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
58835+ time_after(curr->expires, get_seconds()))
58836+ return 1;
58837+ else if (time_before_eq(curr->expires, get_seconds())) {
58838+ curr->crashes = 0;
58839+ curr->expires = 0;
58840+ }
58841+
58842+ return 0;
58843+}
58844+
58845+void
58846+gr_handle_alertkill(struct task_struct *task)
58847+{
58848+ struct acl_subject_label *curracl;
58849+ __u32 curr_ip;
58850+ struct task_struct *p, *p2;
58851+
58852+ if (unlikely(!gr_acl_is_enabled()))
58853+ return;
58854+
58855+ curracl = task->acl;
58856+ curr_ip = task->signal->curr_ip;
58857+
58858+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
58859+ read_lock(&tasklist_lock);
58860+ do_each_thread(p2, p) {
58861+ if (p->signal->curr_ip == curr_ip)
58862+ gr_fake_force_sig(SIGKILL, p);
58863+ } while_each_thread(p2, p);
58864+ read_unlock(&tasklist_lock);
58865+ } else if (curracl->mode & GR_KILLPROC)
58866+ gr_fake_force_sig(SIGKILL, task);
58867+
58868+ return;
58869+}
58870diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
58871new file mode 100644
58872index 0000000..9d83a69
58873--- /dev/null
58874+++ b/grsecurity/gracl_shm.c
58875@@ -0,0 +1,40 @@
58876+#include <linux/kernel.h>
58877+#include <linux/mm.h>
58878+#include <linux/sched.h>
58879+#include <linux/file.h>
58880+#include <linux/ipc.h>
58881+#include <linux/gracl.h>
58882+#include <linux/grsecurity.h>
58883+#include <linux/grinternal.h>
58884+
58885+int
58886+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58887+ const time_t shm_createtime, const uid_t cuid, const int shmid)
58888+{
58889+ struct task_struct *task;
58890+
58891+ if (!gr_acl_is_enabled())
58892+ return 1;
58893+
58894+ rcu_read_lock();
58895+ read_lock(&tasklist_lock);
58896+
58897+ task = find_task_by_vpid(shm_cprid);
58898+
58899+ if (unlikely(!task))
58900+ task = find_task_by_vpid(shm_lapid);
58901+
58902+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
58903+ (task->pid == shm_lapid)) &&
58904+ (task->acl->mode & GR_PROTSHM) &&
58905+ (task->acl != current->acl))) {
58906+ read_unlock(&tasklist_lock);
58907+ rcu_read_unlock();
58908+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
58909+ return 0;
58910+ }
58911+ read_unlock(&tasklist_lock);
58912+ rcu_read_unlock();
58913+
58914+ return 1;
58915+}
58916diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
58917new file mode 100644
58918index 0000000..bc0be01
58919--- /dev/null
58920+++ b/grsecurity/grsec_chdir.c
58921@@ -0,0 +1,19 @@
58922+#include <linux/kernel.h>
58923+#include <linux/sched.h>
58924+#include <linux/fs.h>
58925+#include <linux/file.h>
58926+#include <linux/grsecurity.h>
58927+#include <linux/grinternal.h>
58928+
58929+void
58930+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
58931+{
58932+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58933+ if ((grsec_enable_chdir && grsec_enable_group &&
58934+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
58935+ !grsec_enable_group)) {
58936+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
58937+ }
58938+#endif
58939+ return;
58940+}
58941diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
58942new file mode 100644
58943index 0000000..9807ee2
58944--- /dev/null
58945+++ b/grsecurity/grsec_chroot.c
58946@@ -0,0 +1,368 @@
58947+#include <linux/kernel.h>
58948+#include <linux/module.h>
58949+#include <linux/sched.h>
58950+#include <linux/file.h>
58951+#include <linux/fs.h>
58952+#include <linux/mount.h>
58953+#include <linux/types.h>
58954+#include "../fs/mount.h"
58955+#include <linux/grsecurity.h>
58956+#include <linux/grinternal.h>
58957+
58958+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
58959+{
58960+#ifdef CONFIG_GRKERNSEC
58961+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
58962+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
58963+ task->gr_is_chrooted = 1;
58964+ else
58965+ task->gr_is_chrooted = 0;
58966+
58967+ task->gr_chroot_dentry = path->dentry;
58968+#endif
58969+ return;
58970+}
58971+
58972+void gr_clear_chroot_entries(struct task_struct *task)
58973+{
58974+#ifdef CONFIG_GRKERNSEC
58975+ task->gr_is_chrooted = 0;
58976+ task->gr_chroot_dentry = NULL;
58977+#endif
58978+ return;
58979+}
58980+
58981+int
58982+gr_handle_chroot_unix(const pid_t pid)
58983+{
58984+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58985+ struct task_struct *p;
58986+
58987+ if (unlikely(!grsec_enable_chroot_unix))
58988+ return 1;
58989+
58990+ if (likely(!proc_is_chrooted(current)))
58991+ return 1;
58992+
58993+ rcu_read_lock();
58994+ read_lock(&tasklist_lock);
58995+ p = find_task_by_vpid_unrestricted(pid);
58996+ if (unlikely(p && !have_same_root(current, p))) {
58997+ read_unlock(&tasklist_lock);
58998+ rcu_read_unlock();
58999+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
59000+ return 0;
59001+ }
59002+ read_unlock(&tasklist_lock);
59003+ rcu_read_unlock();
59004+#endif
59005+ return 1;
59006+}
59007+
59008+int
59009+gr_handle_chroot_nice(void)
59010+{
59011+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59012+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
59013+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
59014+ return -EPERM;
59015+ }
59016+#endif
59017+ return 0;
59018+}
59019+
59020+int
59021+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
59022+{
59023+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59024+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
59025+ && proc_is_chrooted(current)) {
59026+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
59027+ return -EACCES;
59028+ }
59029+#endif
59030+ return 0;
59031+}
59032+
59033+int
59034+gr_handle_chroot_rawio(const struct inode *inode)
59035+{
59036+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59037+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
59038+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
59039+ return 1;
59040+#endif
59041+ return 0;
59042+}
59043+
59044+int
59045+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
59046+{
59047+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59048+ struct task_struct *p;
59049+ int ret = 0;
59050+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
59051+ return ret;
59052+
59053+ read_lock(&tasklist_lock);
59054+ do_each_pid_task(pid, type, p) {
59055+ if (!have_same_root(current, p)) {
59056+ ret = 1;
59057+ goto out;
59058+ }
59059+ } while_each_pid_task(pid, type, p);
59060+out:
59061+ read_unlock(&tasklist_lock);
59062+ return ret;
59063+#endif
59064+ return 0;
59065+}
59066+
59067+int
59068+gr_pid_is_chrooted(struct task_struct *p)
59069+{
59070+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59071+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
59072+ return 0;
59073+
59074+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
59075+ !have_same_root(current, p)) {
59076+ return 1;
59077+ }
59078+#endif
59079+ return 0;
59080+}
59081+
59082+EXPORT_SYMBOL(gr_pid_is_chrooted);
59083+
59084+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
59085+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
59086+{
59087+ struct path path, currentroot;
59088+ int ret = 0;
59089+
59090+ path.dentry = (struct dentry *)u_dentry;
59091+ path.mnt = (struct vfsmount *)u_mnt;
59092+ get_fs_root(current->fs, &currentroot);
59093+ if (path_is_under(&path, &currentroot))
59094+ ret = 1;
59095+ path_put(&currentroot);
59096+
59097+ return ret;
59098+}
59099+#endif
59100+
59101+int
59102+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
59103+{
59104+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59105+ if (!grsec_enable_chroot_fchdir)
59106+ return 1;
59107+
59108+ if (!proc_is_chrooted(current))
59109+ return 1;
59110+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
59111+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
59112+ return 0;
59113+ }
59114+#endif
59115+ return 1;
59116+}
59117+
59118+int
59119+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59120+ const time_t shm_createtime)
59121+{
59122+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59123+ struct task_struct *p;
59124+ time_t starttime;
59125+
59126+ if (unlikely(!grsec_enable_chroot_shmat))
59127+ return 1;
59128+
59129+ if (likely(!proc_is_chrooted(current)))
59130+ return 1;
59131+
59132+ rcu_read_lock();
59133+ read_lock(&tasklist_lock);
59134+
59135+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
59136+ starttime = p->start_time.tv_sec;
59137+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
59138+ if (have_same_root(current, p)) {
59139+ goto allow;
59140+ } else {
59141+ read_unlock(&tasklist_lock);
59142+ rcu_read_unlock();
59143+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
59144+ return 0;
59145+ }
59146+ }
59147+ /* creator exited, pid reuse, fall through to next check */
59148+ }
59149+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
59150+ if (unlikely(!have_same_root(current, p))) {
59151+ read_unlock(&tasklist_lock);
59152+ rcu_read_unlock();
59153+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
59154+ return 0;
59155+ }
59156+ }
59157+
59158+allow:
59159+ read_unlock(&tasklist_lock);
59160+ rcu_read_unlock();
59161+#endif
59162+ return 1;
59163+}
59164+
59165+void
59166+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
59167+{
59168+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59169+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
59170+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
59171+#endif
59172+ return;
59173+}
59174+
59175+int
59176+gr_handle_chroot_mknod(const struct dentry *dentry,
59177+ const struct vfsmount *mnt, const int mode)
59178+{
59179+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59180+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
59181+ proc_is_chrooted(current)) {
59182+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
59183+ return -EPERM;
59184+ }
59185+#endif
59186+ return 0;
59187+}
59188+
59189+int
59190+gr_handle_chroot_mount(const struct dentry *dentry,
59191+ const struct vfsmount *mnt, const char *dev_name)
59192+{
59193+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59194+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
59195+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
59196+ return -EPERM;
59197+ }
59198+#endif
59199+ return 0;
59200+}
59201+
59202+int
59203+gr_handle_chroot_pivot(void)
59204+{
59205+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59206+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
59207+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
59208+ return -EPERM;
59209+ }
59210+#endif
59211+ return 0;
59212+}
59213+
59214+int
59215+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
59216+{
59217+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59218+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
59219+ !gr_is_outside_chroot(dentry, mnt)) {
59220+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
59221+ return -EPERM;
59222+ }
59223+#endif
59224+ return 0;
59225+}
59226+
59227+extern const char *captab_log[];
59228+extern int captab_log_entries;
59229+
59230+int
59231+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
59232+{
59233+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59234+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
59235+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
59236+ if (cap_raised(chroot_caps, cap)) {
59237+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
59238+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
59239+ }
59240+ return 0;
59241+ }
59242+ }
59243+#endif
59244+ return 1;
59245+}
59246+
59247+int
59248+gr_chroot_is_capable(const int cap)
59249+{
59250+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59251+ return gr_task_chroot_is_capable(current, current_cred(), cap);
59252+#endif
59253+ return 1;
59254+}
59255+
59256+int
59257+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
59258+{
59259+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59260+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
59261+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
59262+ if (cap_raised(chroot_caps, cap)) {
59263+ return 0;
59264+ }
59265+ }
59266+#endif
59267+ return 1;
59268+}
59269+
59270+int
59271+gr_chroot_is_capable_nolog(const int cap)
59272+{
59273+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59274+ return gr_task_chroot_is_capable_nolog(current, cap);
59275+#endif
59276+ return 1;
59277+}
59278+
59279+int
59280+gr_handle_chroot_sysctl(const int op)
59281+{
59282+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59283+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
59284+ proc_is_chrooted(current))
59285+ return -EACCES;
59286+#endif
59287+ return 0;
59288+}
59289+
59290+void
59291+gr_handle_chroot_chdir(struct path *path)
59292+{
59293+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59294+ if (grsec_enable_chroot_chdir)
59295+ set_fs_pwd(current->fs, path);
59296+#endif
59297+ return;
59298+}
59299+
59300+int
59301+gr_handle_chroot_chmod(const struct dentry *dentry,
59302+ const struct vfsmount *mnt, const int mode)
59303+{
59304+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59305+ /* allow chmod +s on directories, but not files */
59306+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
59307+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
59308+ proc_is_chrooted(current)) {
59309+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
59310+ return -EPERM;
59311+ }
59312+#endif
59313+ return 0;
59314+}
59315diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
59316new file mode 100644
59317index 0000000..7de2055
59318--- /dev/null
59319+++ b/grsecurity/grsec_disabled.c
59320@@ -0,0 +1,442 @@
59321+#include <linux/kernel.h>
59322+#include <linux/module.h>
59323+#include <linux/sched.h>
59324+#include <linux/file.h>
59325+#include <linux/fs.h>
59326+#include <linux/kdev_t.h>
59327+#include <linux/net.h>
59328+#include <linux/in.h>
59329+#include <linux/ip.h>
59330+#include <linux/skbuff.h>
59331+#include <linux/sysctl.h>
59332+
59333+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59334+void
59335+pax_set_initial_flags(struct linux_binprm *bprm)
59336+{
59337+ return;
59338+}
59339+#endif
59340+
59341+#ifdef CONFIG_SYSCTL
59342+__u32
59343+gr_handle_sysctl(const struct ctl_table * table, const int op)
59344+{
59345+ return 0;
59346+}
59347+#endif
59348+
59349+#ifdef CONFIG_TASKSTATS
59350+int gr_is_taskstats_denied(int pid)
59351+{
59352+ return 0;
59353+}
59354+#endif
59355+
59356+int
59357+gr_acl_is_enabled(void)
59358+{
59359+ return 0;
59360+}
59361+
59362+void
59363+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59364+{
59365+ return;
59366+}
59367+
59368+int
59369+gr_handle_rawio(const struct inode *inode)
59370+{
59371+ return 0;
59372+}
59373+
59374+void
59375+gr_acl_handle_psacct(struct task_struct *task, const long code)
59376+{
59377+ return;
59378+}
59379+
59380+int
59381+gr_handle_ptrace(struct task_struct *task, const long request)
59382+{
59383+ return 0;
59384+}
59385+
59386+int
59387+gr_handle_proc_ptrace(struct task_struct *task)
59388+{
59389+ return 0;
59390+}
59391+
59392+void
59393+gr_learn_resource(const struct task_struct *task,
59394+ const int res, const unsigned long wanted, const int gt)
59395+{
59396+ return;
59397+}
59398+
59399+int
59400+gr_set_acls(const int type)
59401+{
59402+ return 0;
59403+}
59404+
59405+int
59406+gr_check_hidden_task(const struct task_struct *tsk)
59407+{
59408+ return 0;
59409+}
59410+
59411+int
59412+gr_check_protected_task(const struct task_struct *task)
59413+{
59414+ return 0;
59415+}
59416+
59417+int
59418+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
59419+{
59420+ return 0;
59421+}
59422+
59423+void
59424+gr_copy_label(struct task_struct *tsk)
59425+{
59426+ return;
59427+}
59428+
59429+void
59430+gr_set_pax_flags(struct task_struct *task)
59431+{
59432+ return;
59433+}
59434+
59435+int
59436+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
59437+ const int unsafe_share)
59438+{
59439+ return 0;
59440+}
59441+
59442+void
59443+gr_handle_delete(const ino_t ino, const dev_t dev)
59444+{
59445+ return;
59446+}
59447+
59448+void
59449+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59450+{
59451+ return;
59452+}
59453+
59454+void
59455+gr_handle_crash(struct task_struct *task, const int sig)
59456+{
59457+ return;
59458+}
59459+
59460+int
59461+gr_check_crash_exec(const struct file *filp)
59462+{
59463+ return 0;
59464+}
59465+
59466+int
59467+gr_check_crash_uid(const uid_t uid)
59468+{
59469+ return 0;
59470+}
59471+
59472+void
59473+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59474+ struct dentry *old_dentry,
59475+ struct dentry *new_dentry,
59476+ struct vfsmount *mnt, const __u8 replace)
59477+{
59478+ return;
59479+}
59480+
59481+int
59482+gr_search_socket(const int family, const int type, const int protocol)
59483+{
59484+ return 1;
59485+}
59486+
59487+int
59488+gr_search_connectbind(const int mode, const struct socket *sock,
59489+ const struct sockaddr_in *addr)
59490+{
59491+ return 0;
59492+}
59493+
59494+void
59495+gr_handle_alertkill(struct task_struct *task)
59496+{
59497+ return;
59498+}
59499+
59500+__u32
59501+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
59502+{
59503+ return 1;
59504+}
59505+
59506+__u32
59507+gr_acl_handle_hidden_file(const struct dentry * dentry,
59508+ const struct vfsmount * mnt)
59509+{
59510+ return 1;
59511+}
59512+
59513+__u32
59514+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
59515+ int acc_mode)
59516+{
59517+ return 1;
59518+}
59519+
59520+__u32
59521+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
59522+{
59523+ return 1;
59524+}
59525+
59526+__u32
59527+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
59528+{
59529+ return 1;
59530+}
59531+
59532+int
59533+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
59534+ unsigned int *vm_flags)
59535+{
59536+ return 1;
59537+}
59538+
59539+__u32
59540+gr_acl_handle_truncate(const struct dentry * dentry,
59541+ const struct vfsmount * mnt)
59542+{
59543+ return 1;
59544+}
59545+
59546+__u32
59547+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
59548+{
59549+ return 1;
59550+}
59551+
59552+__u32
59553+gr_acl_handle_access(const struct dentry * dentry,
59554+ const struct vfsmount * mnt, const int fmode)
59555+{
59556+ return 1;
59557+}
59558+
59559+__u32
59560+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
59561+ umode_t *mode)
59562+{
59563+ return 1;
59564+}
59565+
59566+__u32
59567+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
59568+{
59569+ return 1;
59570+}
59571+
59572+__u32
59573+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
59574+{
59575+ return 1;
59576+}
59577+
59578+void
59579+grsecurity_init(void)
59580+{
59581+ return;
59582+}
59583+
59584+umode_t gr_acl_umask(void)
59585+{
59586+ return 0;
59587+}
59588+
59589+__u32
59590+gr_acl_handle_mknod(const struct dentry * new_dentry,
59591+ const struct dentry * parent_dentry,
59592+ const struct vfsmount * parent_mnt,
59593+ const int mode)
59594+{
59595+ return 1;
59596+}
59597+
59598+__u32
59599+gr_acl_handle_mkdir(const struct dentry * new_dentry,
59600+ const struct dentry * parent_dentry,
59601+ const struct vfsmount * parent_mnt)
59602+{
59603+ return 1;
59604+}
59605+
59606+__u32
59607+gr_acl_handle_symlink(const struct dentry * new_dentry,
59608+ const struct dentry * parent_dentry,
59609+ const struct vfsmount * parent_mnt, const struct filename *from)
59610+{
59611+ return 1;
59612+}
59613+
59614+__u32
59615+gr_acl_handle_link(const struct dentry * new_dentry,
59616+ const struct dentry * parent_dentry,
59617+ const struct vfsmount * parent_mnt,
59618+ const struct dentry * old_dentry,
59619+ const struct vfsmount * old_mnt, const struct filename *to)
59620+{
59621+ return 1;
59622+}
59623+
59624+int
59625+gr_acl_handle_rename(const struct dentry *new_dentry,
59626+ const struct dentry *parent_dentry,
59627+ const struct vfsmount *parent_mnt,
59628+ const struct dentry *old_dentry,
59629+ const struct inode *old_parent_inode,
59630+ const struct vfsmount *old_mnt, const struct filename *newname)
59631+{
59632+ return 0;
59633+}
59634+
59635+int
59636+gr_acl_handle_filldir(const struct file *file, const char *name,
59637+ const int namelen, const ino_t ino)
59638+{
59639+ return 1;
59640+}
59641+
59642+int
59643+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59644+ const time_t shm_createtime, const uid_t cuid, const int shmid)
59645+{
59646+ return 1;
59647+}
59648+
59649+int
59650+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
59651+{
59652+ return 0;
59653+}
59654+
59655+int
59656+gr_search_accept(const struct socket *sock)
59657+{
59658+ return 0;
59659+}
59660+
59661+int
59662+gr_search_listen(const struct socket *sock)
59663+{
59664+ return 0;
59665+}
59666+
59667+int
59668+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
59669+{
59670+ return 0;
59671+}
59672+
59673+__u32
59674+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
59675+{
59676+ return 1;
59677+}
59678+
59679+__u32
59680+gr_acl_handle_creat(const struct dentry * dentry,
59681+ const struct dentry * p_dentry,
59682+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
59683+ const int imode)
59684+{
59685+ return 1;
59686+}
59687+
59688+void
59689+gr_acl_handle_exit(void)
59690+{
59691+ return;
59692+}
59693+
59694+int
59695+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59696+{
59697+ return 1;
59698+}
59699+
59700+void
59701+gr_set_role_label(const uid_t uid, const gid_t gid)
59702+{
59703+ return;
59704+}
59705+
59706+int
59707+gr_acl_handle_procpidmem(const struct task_struct *task)
59708+{
59709+ return 0;
59710+}
59711+
59712+int
59713+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
59714+{
59715+ return 0;
59716+}
59717+
59718+int
59719+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
59720+{
59721+ return 0;
59722+}
59723+
59724+void
59725+gr_set_kernel_label(struct task_struct *task)
59726+{
59727+ return;
59728+}
59729+
59730+int
59731+gr_check_user_change(int real, int effective, int fs)
59732+{
59733+ return 0;
59734+}
59735+
59736+int
59737+gr_check_group_change(int real, int effective, int fs)
59738+{
59739+ return 0;
59740+}
59741+
59742+int gr_acl_enable_at_secure(void)
59743+{
59744+ return 0;
59745+}
59746+
59747+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
59748+{
59749+ return dentry->d_inode->i_sb->s_dev;
59750+}
59751+
59752+void gr_put_exec_file(struct task_struct *task)
59753+{
59754+ return;
59755+}
59756+
59757+EXPORT_SYMBOL(gr_learn_resource);
59758+EXPORT_SYMBOL(gr_set_kernel_label);
59759+#ifdef CONFIG_SECURITY
59760+EXPORT_SYMBOL(gr_check_user_change);
59761+EXPORT_SYMBOL(gr_check_group_change);
59762+#endif
59763diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
59764new file mode 100644
59765index 0000000..abfa971
59766--- /dev/null
59767+++ b/grsecurity/grsec_exec.c
59768@@ -0,0 +1,174 @@
59769+#include <linux/kernel.h>
59770+#include <linux/sched.h>
59771+#include <linux/file.h>
59772+#include <linux/binfmts.h>
59773+#include <linux/fs.h>
59774+#include <linux/types.h>
59775+#include <linux/grdefs.h>
59776+#include <linux/grsecurity.h>
59777+#include <linux/grinternal.h>
59778+#include <linux/capability.h>
59779+#include <linux/module.h>
59780+
59781+#include <asm/uaccess.h>
59782+
59783+#ifdef CONFIG_GRKERNSEC_EXECLOG
59784+static char gr_exec_arg_buf[132];
59785+static DEFINE_MUTEX(gr_exec_arg_mutex);
59786+#endif
59787+
59788+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
59789+
59790+void
59791+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
59792+{
59793+#ifdef CONFIG_GRKERNSEC_EXECLOG
59794+ char *grarg = gr_exec_arg_buf;
59795+ unsigned int i, x, execlen = 0;
59796+ char c;
59797+
59798+ if (!((grsec_enable_execlog && grsec_enable_group &&
59799+ in_group_p(grsec_audit_gid))
59800+ || (grsec_enable_execlog && !grsec_enable_group)))
59801+ return;
59802+
59803+ mutex_lock(&gr_exec_arg_mutex);
59804+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
59805+
59806+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
59807+ const char __user *p;
59808+ unsigned int len;
59809+
59810+ p = get_user_arg_ptr(argv, i);
59811+ if (IS_ERR(p))
59812+ goto log;
59813+
59814+ len = strnlen_user(p, 128 - execlen);
59815+ if (len > 128 - execlen)
59816+ len = 128 - execlen;
59817+ else if (len > 0)
59818+ len--;
59819+ if (copy_from_user(grarg + execlen, p, len))
59820+ goto log;
59821+
59822+ /* rewrite unprintable characters */
59823+ for (x = 0; x < len; x++) {
59824+ c = *(grarg + execlen + x);
59825+ if (c < 32 || c > 126)
59826+ *(grarg + execlen + x) = ' ';
59827+ }
59828+
59829+ execlen += len;
59830+ *(grarg + execlen) = ' ';
59831+ *(grarg + execlen + 1) = '\0';
59832+ execlen++;
59833+ }
59834+
59835+ log:
59836+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
59837+ bprm->file->f_path.mnt, grarg);
59838+ mutex_unlock(&gr_exec_arg_mutex);
59839+#endif
59840+ return;
59841+}
59842+
59843+#ifdef CONFIG_GRKERNSEC
59844+extern int gr_acl_is_capable(const int cap);
59845+extern int gr_acl_is_capable_nolog(const int cap);
59846+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
59847+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
59848+extern int gr_chroot_is_capable(const int cap);
59849+extern int gr_chroot_is_capable_nolog(const int cap);
59850+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
59851+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
59852+#endif
59853+
59854+const char *captab_log[] = {
59855+ "CAP_CHOWN",
59856+ "CAP_DAC_OVERRIDE",
59857+ "CAP_DAC_READ_SEARCH",
59858+ "CAP_FOWNER",
59859+ "CAP_FSETID",
59860+ "CAP_KILL",
59861+ "CAP_SETGID",
59862+ "CAP_SETUID",
59863+ "CAP_SETPCAP",
59864+ "CAP_LINUX_IMMUTABLE",
59865+ "CAP_NET_BIND_SERVICE",
59866+ "CAP_NET_BROADCAST",
59867+ "CAP_NET_ADMIN",
59868+ "CAP_NET_RAW",
59869+ "CAP_IPC_LOCK",
59870+ "CAP_IPC_OWNER",
59871+ "CAP_SYS_MODULE",
59872+ "CAP_SYS_RAWIO",
59873+ "CAP_SYS_CHROOT",
59874+ "CAP_SYS_PTRACE",
59875+ "CAP_SYS_PACCT",
59876+ "CAP_SYS_ADMIN",
59877+ "CAP_SYS_BOOT",
59878+ "CAP_SYS_NICE",
59879+ "CAP_SYS_RESOURCE",
59880+ "CAP_SYS_TIME",
59881+ "CAP_SYS_TTY_CONFIG",
59882+ "CAP_MKNOD",
59883+ "CAP_LEASE",
59884+ "CAP_AUDIT_WRITE",
59885+ "CAP_AUDIT_CONTROL",
59886+ "CAP_SETFCAP",
59887+ "CAP_MAC_OVERRIDE",
59888+ "CAP_MAC_ADMIN",
59889+ "CAP_SYSLOG",
59890+ "CAP_WAKE_ALARM"
59891+};
59892+
59893+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
59894+
59895+int gr_is_capable(const int cap)
59896+{
59897+#ifdef CONFIG_GRKERNSEC
59898+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
59899+ return 1;
59900+ return 0;
59901+#else
59902+ return 1;
59903+#endif
59904+}
59905+
59906+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
59907+{
59908+#ifdef CONFIG_GRKERNSEC
59909+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
59910+ return 1;
59911+ return 0;
59912+#else
59913+ return 1;
59914+#endif
59915+}
59916+
59917+int gr_is_capable_nolog(const int cap)
59918+{
59919+#ifdef CONFIG_GRKERNSEC
59920+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
59921+ return 1;
59922+ return 0;
59923+#else
59924+ return 1;
59925+#endif
59926+}
59927+
59928+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
59929+{
59930+#ifdef CONFIG_GRKERNSEC
59931+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
59932+ return 1;
59933+ return 0;
59934+#else
59935+ return 1;
59936+#endif
59937+}
59938+
59939+EXPORT_SYMBOL(gr_is_capable);
59940+EXPORT_SYMBOL(gr_is_capable_nolog);
59941+EXPORT_SYMBOL(gr_task_is_capable);
59942+EXPORT_SYMBOL(gr_task_is_capable_nolog);
59943diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
59944new file mode 100644
59945index 0000000..d3ee748
59946--- /dev/null
59947+++ b/grsecurity/grsec_fifo.c
59948@@ -0,0 +1,24 @@
59949+#include <linux/kernel.h>
59950+#include <linux/sched.h>
59951+#include <linux/fs.h>
59952+#include <linux/file.h>
59953+#include <linux/grinternal.h>
59954+
59955+int
59956+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
59957+ const struct dentry *dir, const int flag, const int acc_mode)
59958+{
59959+#ifdef CONFIG_GRKERNSEC_FIFO
59960+ const struct cred *cred = current_cred();
59961+
59962+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
59963+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
59964+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
59965+ (cred->fsuid != dentry->d_inode->i_uid)) {
59966+ if (!inode_permission(dentry->d_inode, acc_mode))
59967+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
59968+ return -EACCES;
59969+ }
59970+#endif
59971+ return 0;
59972+}
59973diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
59974new file mode 100644
59975index 0000000..8ca18bf
59976--- /dev/null
59977+++ b/grsecurity/grsec_fork.c
59978@@ -0,0 +1,23 @@
59979+#include <linux/kernel.h>
59980+#include <linux/sched.h>
59981+#include <linux/grsecurity.h>
59982+#include <linux/grinternal.h>
59983+#include <linux/errno.h>
59984+
59985+void
59986+gr_log_forkfail(const int retval)
59987+{
59988+#ifdef CONFIG_GRKERNSEC_FORKFAIL
59989+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
59990+ switch (retval) {
59991+ case -EAGAIN:
59992+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
59993+ break;
59994+ case -ENOMEM:
59995+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
59996+ break;
59997+ }
59998+ }
59999+#endif
60000+ return;
60001+}
60002diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
60003new file mode 100644
60004index 0000000..05a6015
60005--- /dev/null
60006+++ b/grsecurity/grsec_init.c
60007@@ -0,0 +1,283 @@
60008+#include <linux/kernel.h>
60009+#include <linux/sched.h>
60010+#include <linux/mm.h>
60011+#include <linux/gracl.h>
60012+#include <linux/slab.h>
60013+#include <linux/vmalloc.h>
60014+#include <linux/percpu.h>
60015+#include <linux/module.h>
60016+
60017+int grsec_enable_ptrace_readexec;
60018+int grsec_enable_setxid;
60019+int grsec_enable_symlinkown;
60020+int grsec_symlinkown_gid;
60021+int grsec_enable_brute;
60022+int grsec_enable_link;
60023+int grsec_enable_dmesg;
60024+int grsec_enable_harden_ptrace;
60025+int grsec_enable_fifo;
60026+int grsec_enable_execlog;
60027+int grsec_enable_signal;
60028+int grsec_enable_forkfail;
60029+int grsec_enable_audit_ptrace;
60030+int grsec_enable_time;
60031+int grsec_enable_audit_textrel;
60032+int grsec_enable_group;
60033+int grsec_audit_gid;
60034+int grsec_enable_chdir;
60035+int grsec_enable_mount;
60036+int grsec_enable_rofs;
60037+int grsec_enable_chroot_findtask;
60038+int grsec_enable_chroot_mount;
60039+int grsec_enable_chroot_shmat;
60040+int grsec_enable_chroot_fchdir;
60041+int grsec_enable_chroot_double;
60042+int grsec_enable_chroot_pivot;
60043+int grsec_enable_chroot_chdir;
60044+int grsec_enable_chroot_chmod;
60045+int grsec_enable_chroot_mknod;
60046+int grsec_enable_chroot_nice;
60047+int grsec_enable_chroot_execlog;
60048+int grsec_enable_chroot_caps;
60049+int grsec_enable_chroot_sysctl;
60050+int grsec_enable_chroot_unix;
60051+int grsec_enable_tpe;
60052+int grsec_tpe_gid;
60053+int grsec_enable_blackhole;
60054+#ifdef CONFIG_IPV6_MODULE
60055+EXPORT_SYMBOL(grsec_enable_blackhole);
60056+#endif
60057+int grsec_lastack_retries;
60058+int grsec_enable_tpe_all;
60059+int grsec_enable_tpe_invert;
60060+int grsec_enable_socket_all;
60061+int grsec_socket_all_gid;
60062+int grsec_enable_socket_client;
60063+int grsec_socket_client_gid;
60064+int grsec_enable_socket_server;
60065+int grsec_socket_server_gid;
60066+int grsec_resource_logging;
60067+int grsec_disable_privio;
60068+int grsec_enable_log_rwxmaps;
60069+int grsec_lock;
60070+
60071+DEFINE_SPINLOCK(grsec_alert_lock);
60072+unsigned long grsec_alert_wtime = 0;
60073+unsigned long grsec_alert_fyet = 0;
60074+
60075+DEFINE_SPINLOCK(grsec_audit_lock);
60076+
60077+DEFINE_RWLOCK(grsec_exec_file_lock);
60078+
60079+char *gr_shared_page[4];
60080+
60081+char *gr_alert_log_fmt;
60082+char *gr_audit_log_fmt;
60083+char *gr_alert_log_buf;
60084+char *gr_audit_log_buf;
60085+
60086+extern struct gr_arg *gr_usermode;
60087+extern unsigned char *gr_system_salt;
60088+extern unsigned char *gr_system_sum;
60089+
60090+void __init
60091+grsecurity_init(void)
60092+{
60093+ int j;
60094+ /* create the per-cpu shared pages */
60095+
60096+#ifdef CONFIG_X86
60097+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
60098+#endif
60099+
60100+ for (j = 0; j < 4; j++) {
60101+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
60102+ if (gr_shared_page[j] == NULL) {
60103+ panic("Unable to allocate grsecurity shared page");
60104+ return;
60105+ }
60106+ }
60107+
60108+ /* allocate log buffers */
60109+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
60110+ if (!gr_alert_log_fmt) {
60111+ panic("Unable to allocate grsecurity alert log format buffer");
60112+ return;
60113+ }
60114+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
60115+ if (!gr_audit_log_fmt) {
60116+ panic("Unable to allocate grsecurity audit log format buffer");
60117+ return;
60118+ }
60119+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
60120+ if (!gr_alert_log_buf) {
60121+ panic("Unable to allocate grsecurity alert log buffer");
60122+ return;
60123+ }
60124+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
60125+ if (!gr_audit_log_buf) {
60126+ panic("Unable to allocate grsecurity audit log buffer");
60127+ return;
60128+ }
60129+
60130+ /* allocate memory for authentication structure */
60131+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
60132+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
60133+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
60134+
60135+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
60136+ panic("Unable to allocate grsecurity authentication structure");
60137+ return;
60138+ }
60139+
60140+
60141+#ifdef CONFIG_GRKERNSEC_IO
60142+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
60143+ grsec_disable_privio = 1;
60144+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
60145+ grsec_disable_privio = 1;
60146+#else
60147+ grsec_disable_privio = 0;
60148+#endif
60149+#endif
60150+
60151+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60152+ /* for backward compatibility, tpe_invert always defaults to on if
60153+ enabled in the kernel
60154+ */
60155+ grsec_enable_tpe_invert = 1;
60156+#endif
60157+
60158+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
60159+#ifndef CONFIG_GRKERNSEC_SYSCTL
60160+ grsec_lock = 1;
60161+#endif
60162+
60163+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
60164+ grsec_enable_audit_textrel = 1;
60165+#endif
60166+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60167+ grsec_enable_log_rwxmaps = 1;
60168+#endif
60169+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
60170+ grsec_enable_group = 1;
60171+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
60172+#endif
60173+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
60174+ grsec_enable_ptrace_readexec = 1;
60175+#endif
60176+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
60177+ grsec_enable_chdir = 1;
60178+#endif
60179+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60180+ grsec_enable_harden_ptrace = 1;
60181+#endif
60182+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60183+ grsec_enable_mount = 1;
60184+#endif
60185+#ifdef CONFIG_GRKERNSEC_LINK
60186+ grsec_enable_link = 1;
60187+#endif
60188+#ifdef CONFIG_GRKERNSEC_BRUTE
60189+ grsec_enable_brute = 1;
60190+#endif
60191+#ifdef CONFIG_GRKERNSEC_DMESG
60192+ grsec_enable_dmesg = 1;
60193+#endif
60194+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
60195+ grsec_enable_blackhole = 1;
60196+ grsec_lastack_retries = 4;
60197+#endif
60198+#ifdef CONFIG_GRKERNSEC_FIFO
60199+ grsec_enable_fifo = 1;
60200+#endif
60201+#ifdef CONFIG_GRKERNSEC_EXECLOG
60202+ grsec_enable_execlog = 1;
60203+#endif
60204+#ifdef CONFIG_GRKERNSEC_SETXID
60205+ grsec_enable_setxid = 1;
60206+#endif
60207+#ifdef CONFIG_GRKERNSEC_SIGNAL
60208+ grsec_enable_signal = 1;
60209+#endif
60210+#ifdef CONFIG_GRKERNSEC_FORKFAIL
60211+ grsec_enable_forkfail = 1;
60212+#endif
60213+#ifdef CONFIG_GRKERNSEC_TIME
60214+ grsec_enable_time = 1;
60215+#endif
60216+#ifdef CONFIG_GRKERNSEC_RESLOG
60217+ grsec_resource_logging = 1;
60218+#endif
60219+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60220+ grsec_enable_chroot_findtask = 1;
60221+#endif
60222+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
60223+ grsec_enable_chroot_unix = 1;
60224+#endif
60225+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
60226+ grsec_enable_chroot_mount = 1;
60227+#endif
60228+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
60229+ grsec_enable_chroot_fchdir = 1;
60230+#endif
60231+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
60232+ grsec_enable_chroot_shmat = 1;
60233+#endif
60234+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60235+ grsec_enable_audit_ptrace = 1;
60236+#endif
60237+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
60238+ grsec_enable_chroot_double = 1;
60239+#endif
60240+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
60241+ grsec_enable_chroot_pivot = 1;
60242+#endif
60243+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
60244+ grsec_enable_chroot_chdir = 1;
60245+#endif
60246+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
60247+ grsec_enable_chroot_chmod = 1;
60248+#endif
60249+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
60250+ grsec_enable_chroot_mknod = 1;
60251+#endif
60252+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
60253+ grsec_enable_chroot_nice = 1;
60254+#endif
60255+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
60256+ grsec_enable_chroot_execlog = 1;
60257+#endif
60258+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
60259+ grsec_enable_chroot_caps = 1;
60260+#endif
60261+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
60262+ grsec_enable_chroot_sysctl = 1;
60263+#endif
60264+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
60265+ grsec_enable_symlinkown = 1;
60266+ grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
60267+#endif
60268+#ifdef CONFIG_GRKERNSEC_TPE
60269+ grsec_enable_tpe = 1;
60270+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
60271+#ifdef CONFIG_GRKERNSEC_TPE_ALL
60272+ grsec_enable_tpe_all = 1;
60273+#endif
60274+#endif
60275+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
60276+ grsec_enable_socket_all = 1;
60277+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
60278+#endif
60279+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
60280+ grsec_enable_socket_client = 1;
60281+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
60282+#endif
60283+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
60284+ grsec_enable_socket_server = 1;
60285+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
60286+#endif
60287+#endif
60288+
60289+ return;
60290+}
60291diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
60292new file mode 100644
60293index 0000000..6095407
60294--- /dev/null
60295+++ b/grsecurity/grsec_link.c
60296@@ -0,0 +1,58 @@
60297+#include <linux/kernel.h>
60298+#include <linux/sched.h>
60299+#include <linux/fs.h>
60300+#include <linux/file.h>
60301+#include <linux/grinternal.h>
60302+
60303+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
60304+{
60305+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
60306+ const struct inode *link_inode = link->dentry->d_inode;
60307+
60308+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
60309+ /* ignore root-owned links, e.g. /proc/self */
60310+ !uid_eq(link_inode->i_uid, GLOBAL_ROOT_UID) && target &&
60311+ !uid_eq(link_inode->i_uid, target->i_uid)) {
60312+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
60313+ return 1;
60314+ }
60315+#endif
60316+ return 0;
60317+}
60318+
60319+int
60320+gr_handle_follow_link(const struct inode *parent,
60321+ const struct inode *inode,
60322+ const struct dentry *dentry, const struct vfsmount *mnt)
60323+{
60324+#ifdef CONFIG_GRKERNSEC_LINK
60325+ const struct cred *cred = current_cred();
60326+
60327+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
60328+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
60329+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
60330+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
60331+ return -EACCES;
60332+ }
60333+#endif
60334+ return 0;
60335+}
60336+
60337+int
60338+gr_handle_hardlink(const struct dentry *dentry,
60339+ const struct vfsmount *mnt,
60340+ struct inode *inode, const int mode, const struct filename *to)
60341+{
60342+#ifdef CONFIG_GRKERNSEC_LINK
60343+ const struct cred *cred = current_cred();
60344+
60345+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
60346+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
60347+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
60348+ !capable(CAP_FOWNER) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
60349+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
60350+ return -EPERM;
60351+ }
60352+#endif
60353+ return 0;
60354+}
60355diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
60356new file mode 100644
60357index 0000000..a45d2e9
60358--- /dev/null
60359+++ b/grsecurity/grsec_log.c
60360@@ -0,0 +1,322 @@
60361+#include <linux/kernel.h>
60362+#include <linux/sched.h>
60363+#include <linux/file.h>
60364+#include <linux/tty.h>
60365+#include <linux/fs.h>
60366+#include <linux/grinternal.h>
60367+
60368+#ifdef CONFIG_TREE_PREEMPT_RCU
60369+#define DISABLE_PREEMPT() preempt_disable()
60370+#define ENABLE_PREEMPT() preempt_enable()
60371+#else
60372+#define DISABLE_PREEMPT()
60373+#define ENABLE_PREEMPT()
60374+#endif
60375+
60376+#define BEGIN_LOCKS(x) \
60377+ DISABLE_PREEMPT(); \
60378+ rcu_read_lock(); \
60379+ read_lock(&tasklist_lock); \
60380+ read_lock(&grsec_exec_file_lock); \
60381+ if (x != GR_DO_AUDIT) \
60382+ spin_lock(&grsec_alert_lock); \
60383+ else \
60384+ spin_lock(&grsec_audit_lock)
60385+
60386+#define END_LOCKS(x) \
60387+ if (x != GR_DO_AUDIT) \
60388+ spin_unlock(&grsec_alert_lock); \
60389+ else \
60390+ spin_unlock(&grsec_audit_lock); \
60391+ read_unlock(&grsec_exec_file_lock); \
60392+ read_unlock(&tasklist_lock); \
60393+ rcu_read_unlock(); \
60394+ ENABLE_PREEMPT(); \
60395+ if (x == GR_DONT_AUDIT) \
60396+ gr_handle_alertkill(current)
60397+
60398+enum {
60399+ FLOODING,
60400+ NO_FLOODING
60401+};
60402+
60403+extern char *gr_alert_log_fmt;
60404+extern char *gr_audit_log_fmt;
60405+extern char *gr_alert_log_buf;
60406+extern char *gr_audit_log_buf;
60407+
60408+static int gr_log_start(int audit)
60409+{
60410+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
60411+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
60412+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60413+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
60414+ unsigned long curr_secs = get_seconds();
60415+
60416+ if (audit == GR_DO_AUDIT)
60417+ goto set_fmt;
60418+
60419+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
60420+ grsec_alert_wtime = curr_secs;
60421+ grsec_alert_fyet = 0;
60422+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
60423+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
60424+ grsec_alert_fyet++;
60425+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
60426+ grsec_alert_wtime = curr_secs;
60427+ grsec_alert_fyet++;
60428+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
60429+ return FLOODING;
60430+ }
60431+ else return FLOODING;
60432+
60433+set_fmt:
60434+#endif
60435+ memset(buf, 0, PAGE_SIZE);
60436+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
60437+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
60438+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
60439+ } else if (current->signal->curr_ip) {
60440+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
60441+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
60442+ } else if (gr_acl_is_enabled()) {
60443+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
60444+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
60445+ } else {
60446+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
60447+ strcpy(buf, fmt);
60448+ }
60449+
60450+ return NO_FLOODING;
60451+}
60452+
60453+static void gr_log_middle(int audit, const char *msg, va_list ap)
60454+ __attribute__ ((format (printf, 2, 0)));
60455+
60456+static void gr_log_middle(int audit, const char *msg, va_list ap)
60457+{
60458+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60459+ unsigned int len = strlen(buf);
60460+
60461+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
60462+
60463+ return;
60464+}
60465+
60466+static void gr_log_middle_varargs(int audit, const char *msg, ...)
60467+ __attribute__ ((format (printf, 2, 3)));
60468+
60469+static void gr_log_middle_varargs(int audit, const char *msg, ...)
60470+{
60471+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60472+ unsigned int len = strlen(buf);
60473+ va_list ap;
60474+
60475+ va_start(ap, msg);
60476+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
60477+ va_end(ap);
60478+
60479+ return;
60480+}
60481+
60482+static void gr_log_end(int audit, int append_default)
60483+{
60484+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60485+
60486+ if (append_default) {
60487+ unsigned int len = strlen(buf);
60488+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
60489+ }
60490+
60491+ printk("%s\n", buf);
60492+
60493+ return;
60494+}
60495+
60496+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
60497+{
60498+ int logtype;
60499+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
60500+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
60501+ void *voidptr = NULL;
60502+ int num1 = 0, num2 = 0;
60503+ unsigned long ulong1 = 0, ulong2 = 0;
60504+ struct dentry *dentry = NULL;
60505+ struct vfsmount *mnt = NULL;
60506+ struct file *file = NULL;
60507+ struct task_struct *task = NULL;
60508+ const struct cred *cred, *pcred;
60509+ va_list ap;
60510+
60511+ BEGIN_LOCKS(audit);
60512+ logtype = gr_log_start(audit);
60513+ if (logtype == FLOODING) {
60514+ END_LOCKS(audit);
60515+ return;
60516+ }
60517+ va_start(ap, argtypes);
60518+ switch (argtypes) {
60519+ case GR_TTYSNIFF:
60520+ task = va_arg(ap, struct task_struct *);
60521+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
60522+ break;
60523+ case GR_SYSCTL_HIDDEN:
60524+ str1 = va_arg(ap, char *);
60525+ gr_log_middle_varargs(audit, msg, result, str1);
60526+ break;
60527+ case GR_RBAC:
60528+ dentry = va_arg(ap, struct dentry *);
60529+ mnt = va_arg(ap, struct vfsmount *);
60530+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
60531+ break;
60532+ case GR_RBAC_STR:
60533+ dentry = va_arg(ap, struct dentry *);
60534+ mnt = va_arg(ap, struct vfsmount *);
60535+ str1 = va_arg(ap, char *);
60536+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
60537+ break;
60538+ case GR_STR_RBAC:
60539+ str1 = va_arg(ap, char *);
60540+ dentry = va_arg(ap, struct dentry *);
60541+ mnt = va_arg(ap, struct vfsmount *);
60542+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
60543+ break;
60544+ case GR_RBAC_MODE2:
60545+ dentry = va_arg(ap, struct dentry *);
60546+ mnt = va_arg(ap, struct vfsmount *);
60547+ str1 = va_arg(ap, char *);
60548+ str2 = va_arg(ap, char *);
60549+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
60550+ break;
60551+ case GR_RBAC_MODE3:
60552+ dentry = va_arg(ap, struct dentry *);
60553+ mnt = va_arg(ap, struct vfsmount *);
60554+ str1 = va_arg(ap, char *);
60555+ str2 = va_arg(ap, char *);
60556+ str3 = va_arg(ap, char *);
60557+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
60558+ break;
60559+ case GR_FILENAME:
60560+ dentry = va_arg(ap, struct dentry *);
60561+ mnt = va_arg(ap, struct vfsmount *);
60562+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
60563+ break;
60564+ case GR_STR_FILENAME:
60565+ str1 = va_arg(ap, char *);
60566+ dentry = va_arg(ap, struct dentry *);
60567+ mnt = va_arg(ap, struct vfsmount *);
60568+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
60569+ break;
60570+ case GR_FILENAME_STR:
60571+ dentry = va_arg(ap, struct dentry *);
60572+ mnt = va_arg(ap, struct vfsmount *);
60573+ str1 = va_arg(ap, char *);
60574+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
60575+ break;
60576+ case GR_FILENAME_TWO_INT:
60577+ dentry = va_arg(ap, struct dentry *);
60578+ mnt = va_arg(ap, struct vfsmount *);
60579+ num1 = va_arg(ap, int);
60580+ num2 = va_arg(ap, int);
60581+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
60582+ break;
60583+ case GR_FILENAME_TWO_INT_STR:
60584+ dentry = va_arg(ap, struct dentry *);
60585+ mnt = va_arg(ap, struct vfsmount *);
60586+ num1 = va_arg(ap, int);
60587+ num2 = va_arg(ap, int);
60588+ str1 = va_arg(ap, char *);
60589+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
60590+ break;
60591+ case GR_TEXTREL:
60592+ file = va_arg(ap, struct file *);
60593+ ulong1 = va_arg(ap, unsigned long);
60594+ ulong2 = va_arg(ap, unsigned long);
60595+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
60596+ break;
60597+ case GR_PTRACE:
60598+ task = va_arg(ap, struct task_struct *);
60599+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
60600+ break;
60601+ case GR_RESOURCE:
60602+ task = va_arg(ap, struct task_struct *);
60603+ cred = __task_cred(task);
60604+ pcred = __task_cred(task->real_parent);
60605+ ulong1 = va_arg(ap, unsigned long);
60606+ str1 = va_arg(ap, char *);
60607+ ulong2 = va_arg(ap, unsigned long);
60608+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60609+ break;
60610+ case GR_CAP:
60611+ task = va_arg(ap, struct task_struct *);
60612+ cred = __task_cred(task);
60613+ pcred = __task_cred(task->real_parent);
60614+ str1 = va_arg(ap, char *);
60615+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60616+ break;
60617+ case GR_SIG:
60618+ str1 = va_arg(ap, char *);
60619+ voidptr = va_arg(ap, void *);
60620+ gr_log_middle_varargs(audit, msg, str1, voidptr);
60621+ break;
60622+ case GR_SIG2:
60623+ task = va_arg(ap, struct task_struct *);
60624+ cred = __task_cred(task);
60625+ pcred = __task_cred(task->real_parent);
60626+ num1 = va_arg(ap, int);
60627+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60628+ break;
60629+ case GR_CRASH1:
60630+ task = va_arg(ap, struct task_struct *);
60631+ cred = __task_cred(task);
60632+ pcred = __task_cred(task->real_parent);
60633+ ulong1 = va_arg(ap, unsigned long);
60634+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
60635+ break;
60636+ case GR_CRASH2:
60637+ task = va_arg(ap, struct task_struct *);
60638+ cred = __task_cred(task);
60639+ pcred = __task_cred(task->real_parent);
60640+ ulong1 = va_arg(ap, unsigned long);
60641+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
60642+ break;
60643+ case GR_RWXMAP:
60644+ file = va_arg(ap, struct file *);
60645+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
60646+ break;
60647+ case GR_PSACCT:
60648+ {
60649+ unsigned int wday, cday;
60650+ __u8 whr, chr;
60651+ __u8 wmin, cmin;
60652+ __u8 wsec, csec;
60653+ char cur_tty[64] = { 0 };
60654+ char parent_tty[64] = { 0 };
60655+
60656+ task = va_arg(ap, struct task_struct *);
60657+ wday = va_arg(ap, unsigned int);
60658+ cday = va_arg(ap, unsigned int);
60659+ whr = va_arg(ap, int);
60660+ chr = va_arg(ap, int);
60661+ wmin = va_arg(ap, int);
60662+ cmin = va_arg(ap, int);
60663+ wsec = va_arg(ap, int);
60664+ csec = va_arg(ap, int);
60665+ ulong1 = va_arg(ap, unsigned long);
60666+ cred = __task_cred(task);
60667+ pcred = __task_cred(task->real_parent);
60668+
60669+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60670+ }
60671+ break;
60672+ default:
60673+ gr_log_middle(audit, msg, ap);
60674+ }
60675+ va_end(ap);
60676+ // these don't need DEFAULTSECARGS printed on the end
60677+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
60678+ gr_log_end(audit, 0);
60679+ else
60680+ gr_log_end(audit, 1);
60681+ END_LOCKS(audit);
60682+}
60683diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
60684new file mode 100644
60685index 0000000..f536303
60686--- /dev/null
60687+++ b/grsecurity/grsec_mem.c
60688@@ -0,0 +1,40 @@
60689+#include <linux/kernel.h>
60690+#include <linux/sched.h>
60691+#include <linux/mm.h>
60692+#include <linux/mman.h>
60693+#include <linux/grinternal.h>
60694+
60695+void
60696+gr_handle_ioperm(void)
60697+{
60698+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
60699+ return;
60700+}
60701+
60702+void
60703+gr_handle_iopl(void)
60704+{
60705+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
60706+ return;
60707+}
60708+
60709+void
60710+gr_handle_mem_readwrite(u64 from, u64 to)
60711+{
60712+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
60713+ return;
60714+}
60715+
60716+void
60717+gr_handle_vm86(void)
60718+{
60719+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
60720+ return;
60721+}
60722+
60723+void
60724+gr_log_badprocpid(const char *entry)
60725+{
60726+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
60727+ return;
60728+}
60729diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
60730new file mode 100644
60731index 0000000..2131422
60732--- /dev/null
60733+++ b/grsecurity/grsec_mount.c
60734@@ -0,0 +1,62 @@
60735+#include <linux/kernel.h>
60736+#include <linux/sched.h>
60737+#include <linux/mount.h>
60738+#include <linux/grsecurity.h>
60739+#include <linux/grinternal.h>
60740+
60741+void
60742+gr_log_remount(const char *devname, const int retval)
60743+{
60744+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60745+ if (grsec_enable_mount && (retval >= 0))
60746+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
60747+#endif
60748+ return;
60749+}
60750+
60751+void
60752+gr_log_unmount(const char *devname, const int retval)
60753+{
60754+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60755+ if (grsec_enable_mount && (retval >= 0))
60756+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
60757+#endif
60758+ return;
60759+}
60760+
60761+void
60762+gr_log_mount(const char *from, const char *to, const int retval)
60763+{
60764+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60765+ if (grsec_enable_mount && (retval >= 0))
60766+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
60767+#endif
60768+ return;
60769+}
60770+
60771+int
60772+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
60773+{
60774+#ifdef CONFIG_GRKERNSEC_ROFS
60775+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
60776+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
60777+ return -EPERM;
60778+ } else
60779+ return 0;
60780+#endif
60781+ return 0;
60782+}
60783+
60784+int
60785+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
60786+{
60787+#ifdef CONFIG_GRKERNSEC_ROFS
60788+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
60789+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
60790+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
60791+ return -EPERM;
60792+ } else
60793+ return 0;
60794+#endif
60795+ return 0;
60796+}
60797diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
60798new file mode 100644
60799index 0000000..a3b12a0
60800--- /dev/null
60801+++ b/grsecurity/grsec_pax.c
60802@@ -0,0 +1,36 @@
60803+#include <linux/kernel.h>
60804+#include <linux/sched.h>
60805+#include <linux/mm.h>
60806+#include <linux/file.h>
60807+#include <linux/grinternal.h>
60808+#include <linux/grsecurity.h>
60809+
60810+void
60811+gr_log_textrel(struct vm_area_struct * vma)
60812+{
60813+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
60814+ if (grsec_enable_audit_textrel)
60815+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
60816+#endif
60817+ return;
60818+}
60819+
60820+void
60821+gr_log_rwxmmap(struct file *file)
60822+{
60823+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60824+ if (grsec_enable_log_rwxmaps)
60825+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
60826+#endif
60827+ return;
60828+}
60829+
60830+void
60831+gr_log_rwxmprotect(struct file *file)
60832+{
60833+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60834+ if (grsec_enable_log_rwxmaps)
60835+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
60836+#endif
60837+ return;
60838+}
60839diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
60840new file mode 100644
60841index 0000000..f7f29aa
60842--- /dev/null
60843+++ b/grsecurity/grsec_ptrace.c
60844@@ -0,0 +1,30 @@
60845+#include <linux/kernel.h>
60846+#include <linux/sched.h>
60847+#include <linux/grinternal.h>
60848+#include <linux/security.h>
60849+
60850+void
60851+gr_audit_ptrace(struct task_struct *task)
60852+{
60853+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60854+ if (grsec_enable_audit_ptrace)
60855+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
60856+#endif
60857+ return;
60858+}
60859+
60860+int
60861+gr_ptrace_readexec(struct file *file, int unsafe_flags)
60862+{
60863+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
60864+ const struct dentry *dentry = file->f_path.dentry;
60865+ const struct vfsmount *mnt = file->f_path.mnt;
60866+
60867+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
60868+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
60869+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
60870+ return -EACCES;
60871+ }
60872+#endif
60873+ return 0;
60874+}
60875diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
60876new file mode 100644
60877index 0000000..5c00416
60878--- /dev/null
60879+++ b/grsecurity/grsec_sig.c
60880@@ -0,0 +1,222 @@
60881+#include <linux/kernel.h>
60882+#include <linux/sched.h>
60883+#include <linux/delay.h>
60884+#include <linux/grsecurity.h>
60885+#include <linux/grinternal.h>
60886+#include <linux/hardirq.h>
60887+
60888+char *signames[] = {
60889+ [SIGSEGV] = "Segmentation fault",
60890+ [SIGILL] = "Illegal instruction",
60891+ [SIGABRT] = "Abort",
60892+ [SIGBUS] = "Invalid alignment/Bus error"
60893+};
60894+
60895+void
60896+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
60897+{
60898+#ifdef CONFIG_GRKERNSEC_SIGNAL
60899+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
60900+ (sig == SIGABRT) || (sig == SIGBUS))) {
60901+ if (t->pid == current->pid) {
60902+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
60903+ } else {
60904+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
60905+ }
60906+ }
60907+#endif
60908+ return;
60909+}
60910+
60911+int
60912+gr_handle_signal(const struct task_struct *p, const int sig)
60913+{
60914+#ifdef CONFIG_GRKERNSEC
60915+ /* ignore the 0 signal for protected task checks */
60916+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
60917+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
60918+ return -EPERM;
60919+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
60920+ return -EPERM;
60921+ }
60922+#endif
60923+ return 0;
60924+}
60925+
60926+#ifdef CONFIG_GRKERNSEC
60927+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
60928+
60929+int gr_fake_force_sig(int sig, struct task_struct *t)
60930+{
60931+ unsigned long int flags;
60932+ int ret, blocked, ignored;
60933+ struct k_sigaction *action;
60934+
60935+ spin_lock_irqsave(&t->sighand->siglock, flags);
60936+ action = &t->sighand->action[sig-1];
60937+ ignored = action->sa.sa_handler == SIG_IGN;
60938+ blocked = sigismember(&t->blocked, sig);
60939+ if (blocked || ignored) {
60940+ action->sa.sa_handler = SIG_DFL;
60941+ if (blocked) {
60942+ sigdelset(&t->blocked, sig);
60943+ recalc_sigpending_and_wake(t);
60944+ }
60945+ }
60946+ if (action->sa.sa_handler == SIG_DFL)
60947+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
60948+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
60949+
60950+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
60951+
60952+ return ret;
60953+}
60954+#endif
60955+
60956+#ifdef CONFIG_GRKERNSEC_BRUTE
60957+#define GR_USER_BAN_TIME (15 * 60)
60958+#define GR_DAEMON_BRUTE_TIME (30 * 60)
60959+
60960+static int __get_dumpable(unsigned long mm_flags)
60961+{
60962+ int ret;
60963+
60964+ ret = mm_flags & MMF_DUMPABLE_MASK;
60965+ return (ret >= 2) ? 2 : ret;
60966+}
60967+#endif
60968+
60969+void gr_handle_brute_attach(unsigned long mm_flags)
60970+{
60971+#ifdef CONFIG_GRKERNSEC_BRUTE
60972+ struct task_struct *p = current;
60973+ kuid_t uid = GLOBAL_ROOT_UID;
60974+ int daemon = 0;
60975+
60976+ if (!grsec_enable_brute)
60977+ return;
60978+
60979+ rcu_read_lock();
60980+ read_lock(&tasklist_lock);
60981+ read_lock(&grsec_exec_file_lock);
60982+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
60983+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
60984+ p->real_parent->brute = 1;
60985+ daemon = 1;
60986+ } else {
60987+ const struct cred *cred = __task_cred(p), *cred2;
60988+ struct task_struct *tsk, *tsk2;
60989+
60990+ if (!__get_dumpable(mm_flags) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
60991+ struct user_struct *user;
60992+
60993+ uid = cred->uid;
60994+
60995+ /* this is put upon execution past expiration */
60996+ user = find_user(uid);
60997+ if (user == NULL)
60998+ goto unlock;
60999+ user->banned = 1;
61000+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
61001+ if (user->ban_expires == ~0UL)
61002+ user->ban_expires--;
61003+
61004+ do_each_thread(tsk2, tsk) {
61005+ cred2 = __task_cred(tsk);
61006+ if (tsk != p && uid_eq(cred2->uid, uid))
61007+ gr_fake_force_sig(SIGKILL, tsk);
61008+ } while_each_thread(tsk2, tsk);
61009+ }
61010+ }
61011+unlock:
61012+ read_unlock(&grsec_exec_file_lock);
61013+ read_unlock(&tasklist_lock);
61014+ rcu_read_unlock();
61015+
61016+ if (!uid_eq(uid, GLOBAL_ROOT_UID))
61017+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
61018+ from_kuid_munged(&init_user_ns, uid), GR_USER_BAN_TIME / 60);
61019+ else if (daemon)
61020+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
61021+
61022+#endif
61023+ return;
61024+}
61025+
61026+void gr_handle_brute_check(void)
61027+{
61028+#ifdef CONFIG_GRKERNSEC_BRUTE
61029+ struct task_struct *p = current;
61030+
61031+ if (unlikely(p->brute)) {
61032+ if (!grsec_enable_brute)
61033+ p->brute = 0;
61034+ else if (time_before(get_seconds(), p->brute_expires))
61035+ msleep(30 * 1000);
61036+ }
61037+#endif
61038+ return;
61039+}
61040+
61041+void gr_handle_kernel_exploit(void)
61042+{
61043+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
61044+ const struct cred *cred;
61045+ struct task_struct *tsk, *tsk2;
61046+ struct user_struct *user;
61047+ kuid_t uid;
61048+
61049+ if (in_irq() || in_serving_softirq() || in_nmi())
61050+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
61051+
61052+ uid = current_uid();
61053+
61054+ if (uid_eq(uid, GLOBAL_ROOT_UID))
61055+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
61056+ else {
61057+ /* kill all the processes of this user, hold a reference
61058+ to their creds struct, and prevent them from creating
61059+ another process until system reset
61060+ */
61061+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
61062+ from_kuid_munged(&init_user_ns, uid));
61063+ /* we intentionally leak this ref */
61064+ user = get_uid(current->cred->user);
61065+ if (user) {
61066+ user->banned = 1;
61067+ user->ban_expires = ~0UL;
61068+ }
61069+
61070+ read_lock(&tasklist_lock);
61071+ do_each_thread(tsk2, tsk) {
61072+ cred = __task_cred(tsk);
61073+ if (uid_eq(cred->uid, uid))
61074+ gr_fake_force_sig(SIGKILL, tsk);
61075+ } while_each_thread(tsk2, tsk);
61076+ read_unlock(&tasklist_lock);
61077+ }
61078+#endif
61079+}
61080+
61081+int __gr_process_user_ban(struct user_struct *user)
61082+{
61083+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61084+ if (unlikely(user->banned)) {
61085+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
61086+ user->banned = 0;
61087+ user->ban_expires = 0;
61088+ free_uid(user);
61089+ } else
61090+ return -EPERM;
61091+ }
61092+#endif
61093+ return 0;
61094+}
61095+
61096+int gr_process_user_ban(void)
61097+{
61098+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61099+ return __gr_process_user_ban(current->cred->user);
61100+#endif
61101+ return 0;
61102+}
61103diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
61104new file mode 100644
61105index 0000000..4030d57
61106--- /dev/null
61107+++ b/grsecurity/grsec_sock.c
61108@@ -0,0 +1,244 @@
61109+#include <linux/kernel.h>
61110+#include <linux/module.h>
61111+#include <linux/sched.h>
61112+#include <linux/file.h>
61113+#include <linux/net.h>
61114+#include <linux/in.h>
61115+#include <linux/ip.h>
61116+#include <net/sock.h>
61117+#include <net/inet_sock.h>
61118+#include <linux/grsecurity.h>
61119+#include <linux/grinternal.h>
61120+#include <linux/gracl.h>
61121+
61122+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
61123+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
61124+
61125+EXPORT_SYMBOL(gr_search_udp_recvmsg);
61126+EXPORT_SYMBOL(gr_search_udp_sendmsg);
61127+
61128+#ifdef CONFIG_UNIX_MODULE
61129+EXPORT_SYMBOL(gr_acl_handle_unix);
61130+EXPORT_SYMBOL(gr_acl_handle_mknod);
61131+EXPORT_SYMBOL(gr_handle_chroot_unix);
61132+EXPORT_SYMBOL(gr_handle_create);
61133+#endif
61134+
61135+#ifdef CONFIG_GRKERNSEC
61136+#define gr_conn_table_size 32749
61137+struct conn_table_entry {
61138+ struct conn_table_entry *next;
61139+ struct signal_struct *sig;
61140+};
61141+
61142+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
61143+DEFINE_SPINLOCK(gr_conn_table_lock);
61144+
61145+extern const char * gr_socktype_to_name(unsigned char type);
61146+extern const char * gr_proto_to_name(unsigned char proto);
61147+extern const char * gr_sockfamily_to_name(unsigned char family);
61148+
61149+static __inline__ int
61150+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
61151+{
61152+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
61153+}
61154+
61155+static __inline__ int
61156+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
61157+ __u16 sport, __u16 dport)
61158+{
61159+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
61160+ sig->gr_sport == sport && sig->gr_dport == dport))
61161+ return 1;
61162+ else
61163+ return 0;
61164+}
61165+
61166+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
61167+{
61168+ struct conn_table_entry **match;
61169+ unsigned int index;
61170+
61171+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
61172+ sig->gr_sport, sig->gr_dport,
61173+ gr_conn_table_size);
61174+
61175+ newent->sig = sig;
61176+
61177+ match = &gr_conn_table[index];
61178+ newent->next = *match;
61179+ *match = newent;
61180+
61181+ return;
61182+}
61183+
61184+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
61185+{
61186+ struct conn_table_entry *match, *last = NULL;
61187+ unsigned int index;
61188+
61189+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
61190+ sig->gr_sport, sig->gr_dport,
61191+ gr_conn_table_size);
61192+
61193+ match = gr_conn_table[index];
61194+ while (match && !conn_match(match->sig,
61195+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
61196+ sig->gr_dport)) {
61197+ last = match;
61198+ match = match->next;
61199+ }
61200+
61201+ if (match) {
61202+ if (last)
61203+ last->next = match->next;
61204+ else
61205+ gr_conn_table[index] = NULL;
61206+ kfree(match);
61207+ }
61208+
61209+ return;
61210+}
61211+
61212+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
61213+ __u16 sport, __u16 dport)
61214+{
61215+ struct conn_table_entry *match;
61216+ unsigned int index;
61217+
61218+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
61219+
61220+ match = gr_conn_table[index];
61221+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
61222+ match = match->next;
61223+
61224+ if (match)
61225+ return match->sig;
61226+ else
61227+ return NULL;
61228+}
61229+
61230+#endif
61231+
61232+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
61233+{
61234+#ifdef CONFIG_GRKERNSEC
61235+ struct signal_struct *sig = task->signal;
61236+ struct conn_table_entry *newent;
61237+
61238+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
61239+ if (newent == NULL)
61240+ return;
61241+ /* no bh lock needed since we are called with bh disabled */
61242+ spin_lock(&gr_conn_table_lock);
61243+ gr_del_task_from_ip_table_nolock(sig);
61244+ sig->gr_saddr = inet->inet_rcv_saddr;
61245+ sig->gr_daddr = inet->inet_daddr;
61246+ sig->gr_sport = inet->inet_sport;
61247+ sig->gr_dport = inet->inet_dport;
61248+ gr_add_to_task_ip_table_nolock(sig, newent);
61249+ spin_unlock(&gr_conn_table_lock);
61250+#endif
61251+ return;
61252+}
61253+
61254+void gr_del_task_from_ip_table(struct task_struct *task)
61255+{
61256+#ifdef CONFIG_GRKERNSEC
61257+ spin_lock_bh(&gr_conn_table_lock);
61258+ gr_del_task_from_ip_table_nolock(task->signal);
61259+ spin_unlock_bh(&gr_conn_table_lock);
61260+#endif
61261+ return;
61262+}
61263+
61264+void
61265+gr_attach_curr_ip(const struct sock *sk)
61266+{
61267+#ifdef CONFIG_GRKERNSEC
61268+ struct signal_struct *p, *set;
61269+ const struct inet_sock *inet = inet_sk(sk);
61270+
61271+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
61272+ return;
61273+
61274+ set = current->signal;
61275+
61276+ spin_lock_bh(&gr_conn_table_lock);
61277+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
61278+ inet->inet_dport, inet->inet_sport);
61279+ if (unlikely(p != NULL)) {
61280+ set->curr_ip = p->curr_ip;
61281+ set->used_accept = 1;
61282+ gr_del_task_from_ip_table_nolock(p);
61283+ spin_unlock_bh(&gr_conn_table_lock);
61284+ return;
61285+ }
61286+ spin_unlock_bh(&gr_conn_table_lock);
61287+
61288+ set->curr_ip = inet->inet_daddr;
61289+ set->used_accept = 1;
61290+#endif
61291+ return;
61292+}
61293+
61294+int
61295+gr_handle_sock_all(const int family, const int type, const int protocol)
61296+{
61297+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
61298+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
61299+ (family != AF_UNIX)) {
61300+ if (family == AF_INET)
61301+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
61302+ else
61303+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
61304+ return -EACCES;
61305+ }
61306+#endif
61307+ return 0;
61308+}
61309+
61310+int
61311+gr_handle_sock_server(const struct sockaddr *sck)
61312+{
61313+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61314+ if (grsec_enable_socket_server &&
61315+ in_group_p(grsec_socket_server_gid) &&
61316+ sck && (sck->sa_family != AF_UNIX) &&
61317+ (sck->sa_family != AF_LOCAL)) {
61318+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
61319+ return -EACCES;
61320+ }
61321+#endif
61322+ return 0;
61323+}
61324+
61325+int
61326+gr_handle_sock_server_other(const struct sock *sck)
61327+{
61328+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61329+ if (grsec_enable_socket_server &&
61330+ in_group_p(grsec_socket_server_gid) &&
61331+ sck && (sck->sk_family != AF_UNIX) &&
61332+ (sck->sk_family != AF_LOCAL)) {
61333+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
61334+ return -EACCES;
61335+ }
61336+#endif
61337+ return 0;
61338+}
61339+
61340+int
61341+gr_handle_sock_client(const struct sockaddr *sck)
61342+{
61343+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
61344+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
61345+ sck && (sck->sa_family != AF_UNIX) &&
61346+ (sck->sa_family != AF_LOCAL)) {
61347+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
61348+ return -EACCES;
61349+ }
61350+#endif
61351+ return 0;
61352+}
61353diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
61354new file mode 100644
61355index 0000000..f55ef0f
61356--- /dev/null
61357+++ b/grsecurity/grsec_sysctl.c
61358@@ -0,0 +1,469 @@
61359+#include <linux/kernel.h>
61360+#include <linux/sched.h>
61361+#include <linux/sysctl.h>
61362+#include <linux/grsecurity.h>
61363+#include <linux/grinternal.h>
61364+
61365+int
61366+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
61367+{
61368+#ifdef CONFIG_GRKERNSEC_SYSCTL
61369+ if (dirname == NULL || name == NULL)
61370+ return 0;
61371+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
61372+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
61373+ return -EACCES;
61374+ }
61375+#endif
61376+ return 0;
61377+}
61378+
61379+#ifdef CONFIG_GRKERNSEC_ROFS
61380+static int __maybe_unused one = 1;
61381+#endif
61382+
61383+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
61384+struct ctl_table grsecurity_table[] = {
61385+#ifdef CONFIG_GRKERNSEC_SYSCTL
61386+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
61387+#ifdef CONFIG_GRKERNSEC_IO
61388+ {
61389+ .procname = "disable_priv_io",
61390+ .data = &grsec_disable_privio,
61391+ .maxlen = sizeof(int),
61392+ .mode = 0600,
61393+ .proc_handler = &proc_dointvec,
61394+ },
61395+#endif
61396+#endif
61397+#ifdef CONFIG_GRKERNSEC_LINK
61398+ {
61399+ .procname = "linking_restrictions",
61400+ .data = &grsec_enable_link,
61401+ .maxlen = sizeof(int),
61402+ .mode = 0600,
61403+ .proc_handler = &proc_dointvec,
61404+ },
61405+#endif
61406+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
61407+ {
61408+ .procname = "enforce_symlinksifowner",
61409+ .data = &grsec_enable_symlinkown,
61410+ .maxlen = sizeof(int),
61411+ .mode = 0600,
61412+ .proc_handler = &proc_dointvec,
61413+ },
61414+ {
61415+ .procname = "symlinkown_gid",
61416+ .data = &grsec_symlinkown_gid,
61417+ .maxlen = sizeof(int),
61418+ .mode = 0600,
61419+ .proc_handler = &proc_dointvec,
61420+ },
61421+#endif
61422+#ifdef CONFIG_GRKERNSEC_BRUTE
61423+ {
61424+ .procname = "deter_bruteforce",
61425+ .data = &grsec_enable_brute,
61426+ .maxlen = sizeof(int),
61427+ .mode = 0600,
61428+ .proc_handler = &proc_dointvec,
61429+ },
61430+#endif
61431+#ifdef CONFIG_GRKERNSEC_FIFO
61432+ {
61433+ .procname = "fifo_restrictions",
61434+ .data = &grsec_enable_fifo,
61435+ .maxlen = sizeof(int),
61436+ .mode = 0600,
61437+ .proc_handler = &proc_dointvec,
61438+ },
61439+#endif
61440+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
61441+ {
61442+ .procname = "ptrace_readexec",
61443+ .data = &grsec_enable_ptrace_readexec,
61444+ .maxlen = sizeof(int),
61445+ .mode = 0600,
61446+ .proc_handler = &proc_dointvec,
61447+ },
61448+#endif
61449+#ifdef CONFIG_GRKERNSEC_SETXID
61450+ {
61451+ .procname = "consistent_setxid",
61452+ .data = &grsec_enable_setxid,
61453+ .maxlen = sizeof(int),
61454+ .mode = 0600,
61455+ .proc_handler = &proc_dointvec,
61456+ },
61457+#endif
61458+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
61459+ {
61460+ .procname = "ip_blackhole",
61461+ .data = &grsec_enable_blackhole,
61462+ .maxlen = sizeof(int),
61463+ .mode = 0600,
61464+ .proc_handler = &proc_dointvec,
61465+ },
61466+ {
61467+ .procname = "lastack_retries",
61468+ .data = &grsec_lastack_retries,
61469+ .maxlen = sizeof(int),
61470+ .mode = 0600,
61471+ .proc_handler = &proc_dointvec,
61472+ },
61473+#endif
61474+#ifdef CONFIG_GRKERNSEC_EXECLOG
61475+ {
61476+ .procname = "exec_logging",
61477+ .data = &grsec_enable_execlog,
61478+ .maxlen = sizeof(int),
61479+ .mode = 0600,
61480+ .proc_handler = &proc_dointvec,
61481+ },
61482+#endif
61483+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
61484+ {
61485+ .procname = "rwxmap_logging",
61486+ .data = &grsec_enable_log_rwxmaps,
61487+ .maxlen = sizeof(int),
61488+ .mode = 0600,
61489+ .proc_handler = &proc_dointvec,
61490+ },
61491+#endif
61492+#ifdef CONFIG_GRKERNSEC_SIGNAL
61493+ {
61494+ .procname = "signal_logging",
61495+ .data = &grsec_enable_signal,
61496+ .maxlen = sizeof(int),
61497+ .mode = 0600,
61498+ .proc_handler = &proc_dointvec,
61499+ },
61500+#endif
61501+#ifdef CONFIG_GRKERNSEC_FORKFAIL
61502+ {
61503+ .procname = "forkfail_logging",
61504+ .data = &grsec_enable_forkfail,
61505+ .maxlen = sizeof(int),
61506+ .mode = 0600,
61507+ .proc_handler = &proc_dointvec,
61508+ },
61509+#endif
61510+#ifdef CONFIG_GRKERNSEC_TIME
61511+ {
61512+ .procname = "timechange_logging",
61513+ .data = &grsec_enable_time,
61514+ .maxlen = sizeof(int),
61515+ .mode = 0600,
61516+ .proc_handler = &proc_dointvec,
61517+ },
61518+#endif
61519+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
61520+ {
61521+ .procname = "chroot_deny_shmat",
61522+ .data = &grsec_enable_chroot_shmat,
61523+ .maxlen = sizeof(int),
61524+ .mode = 0600,
61525+ .proc_handler = &proc_dointvec,
61526+ },
61527+#endif
61528+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61529+ {
61530+ .procname = "chroot_deny_unix",
61531+ .data = &grsec_enable_chroot_unix,
61532+ .maxlen = sizeof(int),
61533+ .mode = 0600,
61534+ .proc_handler = &proc_dointvec,
61535+ },
61536+#endif
61537+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
61538+ {
61539+ .procname = "chroot_deny_mount",
61540+ .data = &grsec_enable_chroot_mount,
61541+ .maxlen = sizeof(int),
61542+ .mode = 0600,
61543+ .proc_handler = &proc_dointvec,
61544+ },
61545+#endif
61546+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
61547+ {
61548+ .procname = "chroot_deny_fchdir",
61549+ .data = &grsec_enable_chroot_fchdir,
61550+ .maxlen = sizeof(int),
61551+ .mode = 0600,
61552+ .proc_handler = &proc_dointvec,
61553+ },
61554+#endif
61555+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
61556+ {
61557+ .procname = "chroot_deny_chroot",
61558+ .data = &grsec_enable_chroot_double,
61559+ .maxlen = sizeof(int),
61560+ .mode = 0600,
61561+ .proc_handler = &proc_dointvec,
61562+ },
61563+#endif
61564+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
61565+ {
61566+ .procname = "chroot_deny_pivot",
61567+ .data = &grsec_enable_chroot_pivot,
61568+ .maxlen = sizeof(int),
61569+ .mode = 0600,
61570+ .proc_handler = &proc_dointvec,
61571+ },
61572+#endif
61573+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
61574+ {
61575+ .procname = "chroot_enforce_chdir",
61576+ .data = &grsec_enable_chroot_chdir,
61577+ .maxlen = sizeof(int),
61578+ .mode = 0600,
61579+ .proc_handler = &proc_dointvec,
61580+ },
61581+#endif
61582+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
61583+ {
61584+ .procname = "chroot_deny_chmod",
61585+ .data = &grsec_enable_chroot_chmod,
61586+ .maxlen = sizeof(int),
61587+ .mode = 0600,
61588+ .proc_handler = &proc_dointvec,
61589+ },
61590+#endif
61591+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
61592+ {
61593+ .procname = "chroot_deny_mknod",
61594+ .data = &grsec_enable_chroot_mknod,
61595+ .maxlen = sizeof(int),
61596+ .mode = 0600,
61597+ .proc_handler = &proc_dointvec,
61598+ },
61599+#endif
61600+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61601+ {
61602+ .procname = "chroot_restrict_nice",
61603+ .data = &grsec_enable_chroot_nice,
61604+ .maxlen = sizeof(int),
61605+ .mode = 0600,
61606+ .proc_handler = &proc_dointvec,
61607+ },
61608+#endif
61609+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
61610+ {
61611+ .procname = "chroot_execlog",
61612+ .data = &grsec_enable_chroot_execlog,
61613+ .maxlen = sizeof(int),
61614+ .mode = 0600,
61615+ .proc_handler = &proc_dointvec,
61616+ },
61617+#endif
61618+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61619+ {
61620+ .procname = "chroot_caps",
61621+ .data = &grsec_enable_chroot_caps,
61622+ .maxlen = sizeof(int),
61623+ .mode = 0600,
61624+ .proc_handler = &proc_dointvec,
61625+ },
61626+#endif
61627+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
61628+ {
61629+ .procname = "chroot_deny_sysctl",
61630+ .data = &grsec_enable_chroot_sysctl,
61631+ .maxlen = sizeof(int),
61632+ .mode = 0600,
61633+ .proc_handler = &proc_dointvec,
61634+ },
61635+#endif
61636+#ifdef CONFIG_GRKERNSEC_TPE
61637+ {
61638+ .procname = "tpe",
61639+ .data = &grsec_enable_tpe,
61640+ .maxlen = sizeof(int),
61641+ .mode = 0600,
61642+ .proc_handler = &proc_dointvec,
61643+ },
61644+ {
61645+ .procname = "tpe_gid",
61646+ .data = &grsec_tpe_gid,
61647+ .maxlen = sizeof(int),
61648+ .mode = 0600,
61649+ .proc_handler = &proc_dointvec,
61650+ },
61651+#endif
61652+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
61653+ {
61654+ .procname = "tpe_invert",
61655+ .data = &grsec_enable_tpe_invert,
61656+ .maxlen = sizeof(int),
61657+ .mode = 0600,
61658+ .proc_handler = &proc_dointvec,
61659+ },
61660+#endif
61661+#ifdef CONFIG_GRKERNSEC_TPE_ALL
61662+ {
61663+ .procname = "tpe_restrict_all",
61664+ .data = &grsec_enable_tpe_all,
61665+ .maxlen = sizeof(int),
61666+ .mode = 0600,
61667+ .proc_handler = &proc_dointvec,
61668+ },
61669+#endif
61670+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
61671+ {
61672+ .procname = "socket_all",
61673+ .data = &grsec_enable_socket_all,
61674+ .maxlen = sizeof(int),
61675+ .mode = 0600,
61676+ .proc_handler = &proc_dointvec,
61677+ },
61678+ {
61679+ .procname = "socket_all_gid",
61680+ .data = &grsec_socket_all_gid,
61681+ .maxlen = sizeof(int),
61682+ .mode = 0600,
61683+ .proc_handler = &proc_dointvec,
61684+ },
61685+#endif
61686+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
61687+ {
61688+ .procname = "socket_client",
61689+ .data = &grsec_enable_socket_client,
61690+ .maxlen = sizeof(int),
61691+ .mode = 0600,
61692+ .proc_handler = &proc_dointvec,
61693+ },
61694+ {
61695+ .procname = "socket_client_gid",
61696+ .data = &grsec_socket_client_gid,
61697+ .maxlen = sizeof(int),
61698+ .mode = 0600,
61699+ .proc_handler = &proc_dointvec,
61700+ },
61701+#endif
61702+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61703+ {
61704+ .procname = "socket_server",
61705+ .data = &grsec_enable_socket_server,
61706+ .maxlen = sizeof(int),
61707+ .mode = 0600,
61708+ .proc_handler = &proc_dointvec,
61709+ },
61710+ {
61711+ .procname = "socket_server_gid",
61712+ .data = &grsec_socket_server_gid,
61713+ .maxlen = sizeof(int),
61714+ .mode = 0600,
61715+ .proc_handler = &proc_dointvec,
61716+ },
61717+#endif
61718+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
61719+ {
61720+ .procname = "audit_group",
61721+ .data = &grsec_enable_group,
61722+ .maxlen = sizeof(int),
61723+ .mode = 0600,
61724+ .proc_handler = &proc_dointvec,
61725+ },
61726+ {
61727+ .procname = "audit_gid",
61728+ .data = &grsec_audit_gid,
61729+ .maxlen = sizeof(int),
61730+ .mode = 0600,
61731+ .proc_handler = &proc_dointvec,
61732+ },
61733+#endif
61734+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61735+ {
61736+ .procname = "audit_chdir",
61737+ .data = &grsec_enable_chdir,
61738+ .maxlen = sizeof(int),
61739+ .mode = 0600,
61740+ .proc_handler = &proc_dointvec,
61741+ },
61742+#endif
61743+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
61744+ {
61745+ .procname = "audit_mount",
61746+ .data = &grsec_enable_mount,
61747+ .maxlen = sizeof(int),
61748+ .mode = 0600,
61749+ .proc_handler = &proc_dointvec,
61750+ },
61751+#endif
61752+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
61753+ {
61754+ .procname = "audit_textrel",
61755+ .data = &grsec_enable_audit_textrel,
61756+ .maxlen = sizeof(int),
61757+ .mode = 0600,
61758+ .proc_handler = &proc_dointvec,
61759+ },
61760+#endif
61761+#ifdef CONFIG_GRKERNSEC_DMESG
61762+ {
61763+ .procname = "dmesg",
61764+ .data = &grsec_enable_dmesg,
61765+ .maxlen = sizeof(int),
61766+ .mode = 0600,
61767+ .proc_handler = &proc_dointvec,
61768+ },
61769+#endif
61770+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61771+ {
61772+ .procname = "chroot_findtask",
61773+ .data = &grsec_enable_chroot_findtask,
61774+ .maxlen = sizeof(int),
61775+ .mode = 0600,
61776+ .proc_handler = &proc_dointvec,
61777+ },
61778+#endif
61779+#ifdef CONFIG_GRKERNSEC_RESLOG
61780+ {
61781+ .procname = "resource_logging",
61782+ .data = &grsec_resource_logging,
61783+ .maxlen = sizeof(int),
61784+ .mode = 0600,
61785+ .proc_handler = &proc_dointvec,
61786+ },
61787+#endif
61788+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
61789+ {
61790+ .procname = "audit_ptrace",
61791+ .data = &grsec_enable_audit_ptrace,
61792+ .maxlen = sizeof(int),
61793+ .mode = 0600,
61794+ .proc_handler = &proc_dointvec,
61795+ },
61796+#endif
61797+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61798+ {
61799+ .procname = "harden_ptrace",
61800+ .data = &grsec_enable_harden_ptrace,
61801+ .maxlen = sizeof(int),
61802+ .mode = 0600,
61803+ .proc_handler = &proc_dointvec,
61804+ },
61805+#endif
61806+ {
61807+ .procname = "grsec_lock",
61808+ .data = &grsec_lock,
61809+ .maxlen = sizeof(int),
61810+ .mode = 0600,
61811+ .proc_handler = &proc_dointvec,
61812+ },
61813+#endif
61814+#ifdef CONFIG_GRKERNSEC_ROFS
61815+ {
61816+ .procname = "romount_protect",
61817+ .data = &grsec_enable_rofs,
61818+ .maxlen = sizeof(int),
61819+ .mode = 0600,
61820+ .proc_handler = &proc_dointvec_minmax,
61821+ .extra1 = &one,
61822+ .extra2 = &one,
61823+ },
61824+#endif
61825+ { }
61826+};
61827+#endif
61828diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
61829new file mode 100644
61830index 0000000..0dc13c3
61831--- /dev/null
61832+++ b/grsecurity/grsec_time.c
61833@@ -0,0 +1,16 @@
61834+#include <linux/kernel.h>
61835+#include <linux/sched.h>
61836+#include <linux/grinternal.h>
61837+#include <linux/module.h>
61838+
61839+void
61840+gr_log_timechange(void)
61841+{
61842+#ifdef CONFIG_GRKERNSEC_TIME
61843+ if (grsec_enable_time)
61844+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
61845+#endif
61846+ return;
61847+}
61848+
61849+EXPORT_SYMBOL(gr_log_timechange);
61850diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
61851new file mode 100644
61852index 0000000..07e0dc0
61853--- /dev/null
61854+++ b/grsecurity/grsec_tpe.c
61855@@ -0,0 +1,73 @@
61856+#include <linux/kernel.h>
61857+#include <linux/sched.h>
61858+#include <linux/file.h>
61859+#include <linux/fs.h>
61860+#include <linux/grinternal.h>
61861+
61862+extern int gr_acl_tpe_check(void);
61863+
61864+int
61865+gr_tpe_allow(const struct file *file)
61866+{
61867+#ifdef CONFIG_GRKERNSEC
61868+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
61869+ const struct cred *cred = current_cred();
61870+ char *msg = NULL;
61871+ char *msg2 = NULL;
61872+
61873+ // never restrict root
61874+ if (!cred->uid)
61875+ return 1;
61876+
61877+ if (grsec_enable_tpe) {
61878+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
61879+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
61880+ msg = "not being in trusted group";
61881+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
61882+ msg = "being in untrusted group";
61883+#else
61884+ if (in_group_p(grsec_tpe_gid))
61885+ msg = "being in untrusted group";
61886+#endif
61887+ }
61888+ if (!msg && gr_acl_tpe_check())
61889+ msg = "being in untrusted role";
61890+
61891+ // not in any affected group/role
61892+ if (!msg)
61893+ goto next_check;
61894+
61895+ if (inode->i_uid)
61896+ msg2 = "file in non-root-owned directory";
61897+ else if (inode->i_mode & S_IWOTH)
61898+ msg2 = "file in world-writable directory";
61899+ else if (inode->i_mode & S_IWGRP)
61900+ msg2 = "file in group-writable directory";
61901+
61902+ if (msg && msg2) {
61903+ char fullmsg[70] = {0};
61904+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
61905+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
61906+ return 0;
61907+ }
61908+ msg = NULL;
61909+next_check:
61910+#ifdef CONFIG_GRKERNSEC_TPE_ALL
61911+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
61912+ return 1;
61913+
61914+ if (inode->i_uid && (inode->i_uid != cred->uid))
61915+ msg = "directory not owned by user";
61916+ else if (inode->i_mode & S_IWOTH)
61917+ msg = "file in world-writable directory";
61918+ else if (inode->i_mode & S_IWGRP)
61919+ msg = "file in group-writable directory";
61920+
61921+ if (msg) {
61922+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
61923+ return 0;
61924+ }
61925+#endif
61926+#endif
61927+ return 1;
61928+}
61929diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
61930new file mode 100644
61931index 0000000..9f7b1ac
61932--- /dev/null
61933+++ b/grsecurity/grsum.c
61934@@ -0,0 +1,61 @@
61935+#include <linux/err.h>
61936+#include <linux/kernel.h>
61937+#include <linux/sched.h>
61938+#include <linux/mm.h>
61939+#include <linux/scatterlist.h>
61940+#include <linux/crypto.h>
61941+#include <linux/gracl.h>
61942+
61943+
61944+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
61945+#error "crypto and sha256 must be built into the kernel"
61946+#endif
61947+
61948+int
61949+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
61950+{
61951+ char *p;
61952+ struct crypto_hash *tfm;
61953+ struct hash_desc desc;
61954+ struct scatterlist sg;
61955+ unsigned char temp_sum[GR_SHA_LEN];
61956+ volatile int retval = 0;
61957+ volatile int dummy = 0;
61958+ unsigned int i;
61959+
61960+ sg_init_table(&sg, 1);
61961+
61962+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
61963+ if (IS_ERR(tfm)) {
61964+ /* should never happen, since sha256 should be built in */
61965+ return 1;
61966+ }
61967+
61968+ desc.tfm = tfm;
61969+ desc.flags = 0;
61970+
61971+ crypto_hash_init(&desc);
61972+
61973+ p = salt;
61974+ sg_set_buf(&sg, p, GR_SALT_LEN);
61975+ crypto_hash_update(&desc, &sg, sg.length);
61976+
61977+ p = entry->pw;
61978+ sg_set_buf(&sg, p, strlen(p));
61979+
61980+ crypto_hash_update(&desc, &sg, sg.length);
61981+
61982+ crypto_hash_final(&desc, temp_sum);
61983+
61984+ memset(entry->pw, 0, GR_PW_LEN);
61985+
61986+ for (i = 0; i < GR_SHA_LEN; i++)
61987+ if (sum[i] != temp_sum[i])
61988+ retval = 1;
61989+ else
61990+ dummy = 1; // waste a cycle
61991+
61992+ crypto_free_hash(tfm);
61993+
61994+ return retval;
61995+}
61996diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
61997index 77ff547..181834f 100644
61998--- a/include/asm-generic/4level-fixup.h
61999+++ b/include/asm-generic/4level-fixup.h
62000@@ -13,8 +13,10 @@
62001 #define pmd_alloc(mm, pud, address) \
62002 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
62003 NULL: pmd_offset(pud, address))
62004+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
62005
62006 #define pud_alloc(mm, pgd, address) (pgd)
62007+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
62008 #define pud_offset(pgd, start) (pgd)
62009 #define pud_none(pud) 0
62010 #define pud_bad(pud) 0
62011diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
62012index b7babf0..04ad282 100644
62013--- a/include/asm-generic/atomic-long.h
62014+++ b/include/asm-generic/atomic-long.h
62015@@ -22,6 +22,12 @@
62016
62017 typedef atomic64_t atomic_long_t;
62018
62019+#ifdef CONFIG_PAX_REFCOUNT
62020+typedef atomic64_unchecked_t atomic_long_unchecked_t;
62021+#else
62022+typedef atomic64_t atomic_long_unchecked_t;
62023+#endif
62024+
62025 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
62026
62027 static inline long atomic_long_read(atomic_long_t *l)
62028@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
62029 return (long)atomic64_read(v);
62030 }
62031
62032+#ifdef CONFIG_PAX_REFCOUNT
62033+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
62034+{
62035+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62036+
62037+ return (long)atomic64_read_unchecked(v);
62038+}
62039+#endif
62040+
62041 static inline void atomic_long_set(atomic_long_t *l, long i)
62042 {
62043 atomic64_t *v = (atomic64_t *)l;
62044@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
62045 atomic64_set(v, i);
62046 }
62047
62048+#ifdef CONFIG_PAX_REFCOUNT
62049+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
62050+{
62051+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62052+
62053+ atomic64_set_unchecked(v, i);
62054+}
62055+#endif
62056+
62057 static inline void atomic_long_inc(atomic_long_t *l)
62058 {
62059 atomic64_t *v = (atomic64_t *)l;
62060@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
62061 atomic64_inc(v);
62062 }
62063
62064+#ifdef CONFIG_PAX_REFCOUNT
62065+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
62066+{
62067+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62068+
62069+ atomic64_inc_unchecked(v);
62070+}
62071+#endif
62072+
62073 static inline void atomic_long_dec(atomic_long_t *l)
62074 {
62075 atomic64_t *v = (atomic64_t *)l;
62076@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
62077 atomic64_dec(v);
62078 }
62079
62080+#ifdef CONFIG_PAX_REFCOUNT
62081+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
62082+{
62083+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62084+
62085+ atomic64_dec_unchecked(v);
62086+}
62087+#endif
62088+
62089 static inline void atomic_long_add(long i, atomic_long_t *l)
62090 {
62091 atomic64_t *v = (atomic64_t *)l;
62092@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
62093 atomic64_add(i, v);
62094 }
62095
62096+#ifdef CONFIG_PAX_REFCOUNT
62097+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
62098+{
62099+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62100+
62101+ atomic64_add_unchecked(i, v);
62102+}
62103+#endif
62104+
62105 static inline void atomic_long_sub(long i, atomic_long_t *l)
62106 {
62107 atomic64_t *v = (atomic64_t *)l;
62108@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
62109 atomic64_sub(i, v);
62110 }
62111
62112+#ifdef CONFIG_PAX_REFCOUNT
62113+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
62114+{
62115+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62116+
62117+ atomic64_sub_unchecked(i, v);
62118+}
62119+#endif
62120+
62121 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
62122 {
62123 atomic64_t *v = (atomic64_t *)l;
62124@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
62125 return (long)atomic64_add_return(i, v);
62126 }
62127
62128+#ifdef CONFIG_PAX_REFCOUNT
62129+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
62130+{
62131+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62132+
62133+ return (long)atomic64_add_return_unchecked(i, v);
62134+}
62135+#endif
62136+
62137 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
62138 {
62139 atomic64_t *v = (atomic64_t *)l;
62140@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
62141 return (long)atomic64_inc_return(v);
62142 }
62143
62144+#ifdef CONFIG_PAX_REFCOUNT
62145+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
62146+{
62147+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62148+
62149+ return (long)atomic64_inc_return_unchecked(v);
62150+}
62151+#endif
62152+
62153 static inline long atomic_long_dec_return(atomic_long_t *l)
62154 {
62155 atomic64_t *v = (atomic64_t *)l;
62156@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
62157
62158 typedef atomic_t atomic_long_t;
62159
62160+#ifdef CONFIG_PAX_REFCOUNT
62161+typedef atomic_unchecked_t atomic_long_unchecked_t;
62162+#else
62163+typedef atomic_t atomic_long_unchecked_t;
62164+#endif
62165+
62166 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
62167 static inline long atomic_long_read(atomic_long_t *l)
62168 {
62169@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
62170 return (long)atomic_read(v);
62171 }
62172
62173+#ifdef CONFIG_PAX_REFCOUNT
62174+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
62175+{
62176+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62177+
62178+ return (long)atomic_read_unchecked(v);
62179+}
62180+#endif
62181+
62182 static inline void atomic_long_set(atomic_long_t *l, long i)
62183 {
62184 atomic_t *v = (atomic_t *)l;
62185@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
62186 atomic_set(v, i);
62187 }
62188
62189+#ifdef CONFIG_PAX_REFCOUNT
62190+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
62191+{
62192+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62193+
62194+ atomic_set_unchecked(v, i);
62195+}
62196+#endif
62197+
62198 static inline void atomic_long_inc(atomic_long_t *l)
62199 {
62200 atomic_t *v = (atomic_t *)l;
62201@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
62202 atomic_inc(v);
62203 }
62204
62205+#ifdef CONFIG_PAX_REFCOUNT
62206+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
62207+{
62208+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62209+
62210+ atomic_inc_unchecked(v);
62211+}
62212+#endif
62213+
62214 static inline void atomic_long_dec(atomic_long_t *l)
62215 {
62216 atomic_t *v = (atomic_t *)l;
62217@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
62218 atomic_dec(v);
62219 }
62220
62221+#ifdef CONFIG_PAX_REFCOUNT
62222+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
62223+{
62224+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62225+
62226+ atomic_dec_unchecked(v);
62227+}
62228+#endif
62229+
62230 static inline void atomic_long_add(long i, atomic_long_t *l)
62231 {
62232 atomic_t *v = (atomic_t *)l;
62233@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
62234 atomic_add(i, v);
62235 }
62236
62237+#ifdef CONFIG_PAX_REFCOUNT
62238+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
62239+{
62240+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62241+
62242+ atomic_add_unchecked(i, v);
62243+}
62244+#endif
62245+
62246 static inline void atomic_long_sub(long i, atomic_long_t *l)
62247 {
62248 atomic_t *v = (atomic_t *)l;
62249@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
62250 atomic_sub(i, v);
62251 }
62252
62253+#ifdef CONFIG_PAX_REFCOUNT
62254+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
62255+{
62256+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62257+
62258+ atomic_sub_unchecked(i, v);
62259+}
62260+#endif
62261+
62262 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
62263 {
62264 atomic_t *v = (atomic_t *)l;
62265@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
62266 return (long)atomic_add_return(i, v);
62267 }
62268
62269+#ifdef CONFIG_PAX_REFCOUNT
62270+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
62271+{
62272+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62273+
62274+ return (long)atomic_add_return_unchecked(i, v);
62275+}
62276+
62277+#endif
62278+
62279 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
62280 {
62281 atomic_t *v = (atomic_t *)l;
62282@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
62283 return (long)atomic_inc_return(v);
62284 }
62285
62286+#ifdef CONFIG_PAX_REFCOUNT
62287+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
62288+{
62289+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62290+
62291+ return (long)atomic_inc_return_unchecked(v);
62292+}
62293+#endif
62294+
62295 static inline long atomic_long_dec_return(atomic_long_t *l)
62296 {
62297 atomic_t *v = (atomic_t *)l;
62298@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
62299
62300 #endif /* BITS_PER_LONG == 64 */
62301
62302+#ifdef CONFIG_PAX_REFCOUNT
62303+static inline void pax_refcount_needs_these_functions(void)
62304+{
62305+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
62306+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
62307+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
62308+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
62309+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
62310+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
62311+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
62312+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
62313+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
62314+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
62315+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
62316+#ifdef CONFIG_X86
62317+ atomic_clear_mask_unchecked(0, NULL);
62318+ atomic_set_mask_unchecked(0, NULL);
62319+#endif
62320+
62321+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
62322+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
62323+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
62324+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
62325+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
62326+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
62327+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
62328+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
62329+}
62330+#else
62331+#define atomic_read_unchecked(v) atomic_read(v)
62332+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
62333+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
62334+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
62335+#define atomic_inc_unchecked(v) atomic_inc(v)
62336+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
62337+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
62338+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
62339+#define atomic_dec_unchecked(v) atomic_dec(v)
62340+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
62341+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
62342+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
62343+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
62344+
62345+#define atomic_long_read_unchecked(v) atomic_long_read(v)
62346+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
62347+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
62348+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
62349+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
62350+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
62351+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
62352+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
62353+#endif
62354+
62355 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
62356diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
62357index 1ced641..c896ee8 100644
62358--- a/include/asm-generic/atomic.h
62359+++ b/include/asm-generic/atomic.h
62360@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
62361 * Atomically clears the bits set in @mask from @v
62362 */
62363 #ifndef atomic_clear_mask
62364-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
62365+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
62366 {
62367 unsigned long flags;
62368
62369diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
62370index b18ce4f..2ee2843 100644
62371--- a/include/asm-generic/atomic64.h
62372+++ b/include/asm-generic/atomic64.h
62373@@ -16,6 +16,8 @@ typedef struct {
62374 long long counter;
62375 } atomic64_t;
62376
62377+typedef atomic64_t atomic64_unchecked_t;
62378+
62379 #define ATOMIC64_INIT(i) { (i) }
62380
62381 extern long long atomic64_read(const atomic64_t *v);
62382@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
62383 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
62384 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
62385
62386+#define atomic64_read_unchecked(v) atomic64_read(v)
62387+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
62388+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
62389+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
62390+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
62391+#define atomic64_inc_unchecked(v) atomic64_inc(v)
62392+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
62393+#define atomic64_dec_unchecked(v) atomic64_dec(v)
62394+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
62395+
62396 #endif /* _ASM_GENERIC_ATOMIC64_H */
62397diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
62398index 1bfcfe5..e04c5c9 100644
62399--- a/include/asm-generic/cache.h
62400+++ b/include/asm-generic/cache.h
62401@@ -6,7 +6,7 @@
62402 * cache lines need to provide their own cache.h.
62403 */
62404
62405-#define L1_CACHE_SHIFT 5
62406-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
62407+#define L1_CACHE_SHIFT 5UL
62408+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
62409
62410 #endif /* __ASM_GENERIC_CACHE_H */
62411diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
62412index 0d68a1e..b74a761 100644
62413--- a/include/asm-generic/emergency-restart.h
62414+++ b/include/asm-generic/emergency-restart.h
62415@@ -1,7 +1,7 @@
62416 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
62417 #define _ASM_GENERIC_EMERGENCY_RESTART_H
62418
62419-static inline void machine_emergency_restart(void)
62420+static inline __noreturn void machine_emergency_restart(void)
62421 {
62422 machine_restart(NULL);
62423 }
62424diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
62425index 90f99c7..00ce236 100644
62426--- a/include/asm-generic/kmap_types.h
62427+++ b/include/asm-generic/kmap_types.h
62428@@ -2,9 +2,9 @@
62429 #define _ASM_GENERIC_KMAP_TYPES_H
62430
62431 #ifdef __WITH_KM_FENCE
62432-# define KM_TYPE_NR 41
62433+# define KM_TYPE_NR 42
62434 #else
62435-# define KM_TYPE_NR 20
62436+# define KM_TYPE_NR 21
62437 #endif
62438
62439 #endif
62440diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
62441index 9ceb03b..62b0b8f 100644
62442--- a/include/asm-generic/local.h
62443+++ b/include/asm-generic/local.h
62444@@ -23,24 +23,37 @@ typedef struct
62445 atomic_long_t a;
62446 } local_t;
62447
62448+typedef struct {
62449+ atomic_long_unchecked_t a;
62450+} local_unchecked_t;
62451+
62452 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
62453
62454 #define local_read(l) atomic_long_read(&(l)->a)
62455+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
62456 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
62457+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
62458 #define local_inc(l) atomic_long_inc(&(l)->a)
62459+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
62460 #define local_dec(l) atomic_long_dec(&(l)->a)
62461+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
62462 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
62463+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
62464 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
62465+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
62466
62467 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
62468 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
62469 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
62470 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
62471 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
62472+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
62473 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
62474 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
62475+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
62476
62477 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
62478+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
62479 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
62480 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
62481 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
62482diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
62483index 725612b..9cc513a 100644
62484--- a/include/asm-generic/pgtable-nopmd.h
62485+++ b/include/asm-generic/pgtable-nopmd.h
62486@@ -1,14 +1,19 @@
62487 #ifndef _PGTABLE_NOPMD_H
62488 #define _PGTABLE_NOPMD_H
62489
62490-#ifndef __ASSEMBLY__
62491-
62492 #include <asm-generic/pgtable-nopud.h>
62493
62494-struct mm_struct;
62495-
62496 #define __PAGETABLE_PMD_FOLDED
62497
62498+#define PMD_SHIFT PUD_SHIFT
62499+#define PTRS_PER_PMD 1
62500+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
62501+#define PMD_MASK (~(PMD_SIZE-1))
62502+
62503+#ifndef __ASSEMBLY__
62504+
62505+struct mm_struct;
62506+
62507 /*
62508 * Having the pmd type consist of a pud gets the size right, and allows
62509 * us to conceptually access the pud entry that this pmd is folded into
62510@@ -16,11 +21,6 @@ struct mm_struct;
62511 */
62512 typedef struct { pud_t pud; } pmd_t;
62513
62514-#define PMD_SHIFT PUD_SHIFT
62515-#define PTRS_PER_PMD 1
62516-#define PMD_SIZE (1UL << PMD_SHIFT)
62517-#define PMD_MASK (~(PMD_SIZE-1))
62518-
62519 /*
62520 * The "pud_xxx()" functions here are trivial for a folded two-level
62521 * setup: the pmd is never bad, and a pmd always exists (as it's folded
62522diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
62523index 810431d..0ec4804f 100644
62524--- a/include/asm-generic/pgtable-nopud.h
62525+++ b/include/asm-generic/pgtable-nopud.h
62526@@ -1,10 +1,15 @@
62527 #ifndef _PGTABLE_NOPUD_H
62528 #define _PGTABLE_NOPUD_H
62529
62530-#ifndef __ASSEMBLY__
62531-
62532 #define __PAGETABLE_PUD_FOLDED
62533
62534+#define PUD_SHIFT PGDIR_SHIFT
62535+#define PTRS_PER_PUD 1
62536+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
62537+#define PUD_MASK (~(PUD_SIZE-1))
62538+
62539+#ifndef __ASSEMBLY__
62540+
62541 /*
62542 * Having the pud type consist of a pgd gets the size right, and allows
62543 * us to conceptually access the pgd entry that this pud is folded into
62544@@ -12,11 +17,6 @@
62545 */
62546 typedef struct { pgd_t pgd; } pud_t;
62547
62548-#define PUD_SHIFT PGDIR_SHIFT
62549-#define PTRS_PER_PUD 1
62550-#define PUD_SIZE (1UL << PUD_SHIFT)
62551-#define PUD_MASK (~(PUD_SIZE-1))
62552-
62553 /*
62554 * The "pgd_xxx()" functions here are trivial for a folded two-level
62555 * setup: the pud is never bad, and a pud always exists (as it's folded
62556@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
62557 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
62558
62559 #define pgd_populate(mm, pgd, pud) do { } while (0)
62560+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
62561 /*
62562 * (puds are folded into pgds so this doesn't get actually called,
62563 * but the define is needed for a generic inline function.)
62564diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
62565index b36ce40..019426d 100644
62566--- a/include/asm-generic/pgtable.h
62567+++ b/include/asm-generic/pgtable.h
62568@@ -554,6 +554,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
62569 #endif
62570 }
62571
62572+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
62573+static inline unsigned long pax_open_kernel(void) { return 0; }
62574+#endif
62575+
62576+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
62577+static inline unsigned long pax_close_kernel(void) { return 0; }
62578+#endif
62579+
62580 #endif /* CONFIG_MMU */
62581
62582 #endif /* !__ASSEMBLY__ */
62583diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
62584index d1ea7ce..b1ebf2a 100644
62585--- a/include/asm-generic/vmlinux.lds.h
62586+++ b/include/asm-generic/vmlinux.lds.h
62587@@ -218,6 +218,7 @@
62588 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
62589 VMLINUX_SYMBOL(__start_rodata) = .; \
62590 *(.rodata) *(.rodata.*) \
62591+ *(.data..read_only) \
62592 *(__vermagic) /* Kernel version magic */ \
62593 . = ALIGN(8); \
62594 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
62595@@ -725,17 +726,18 @@
62596 * section in the linker script will go there too. @phdr should have
62597 * a leading colon.
62598 *
62599- * Note that this macros defines __per_cpu_load as an absolute symbol.
62600+ * Note that this macros defines per_cpu_load as an absolute symbol.
62601 * If there is no need to put the percpu section at a predetermined
62602 * address, use PERCPU_SECTION.
62603 */
62604 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
62605- VMLINUX_SYMBOL(__per_cpu_load) = .; \
62606- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
62607+ per_cpu_load = .; \
62608+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
62609 - LOAD_OFFSET) { \
62610+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
62611 PERCPU_INPUT(cacheline) \
62612 } phdr \
62613- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
62614+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
62615
62616 /**
62617 * PERCPU_SECTION - define output section for percpu area, simple version
62618diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
62619index 418d270..bfd2794 100644
62620--- a/include/crypto/algapi.h
62621+++ b/include/crypto/algapi.h
62622@@ -34,7 +34,7 @@ struct crypto_type {
62623 unsigned int maskclear;
62624 unsigned int maskset;
62625 unsigned int tfmsize;
62626-};
62627+} __do_const;
62628
62629 struct crypto_instance {
62630 struct crypto_alg alg;
62631diff --git a/include/drm/drmP.h b/include/drm/drmP.h
62632index 3fd8280..2b3c415 100644
62633--- a/include/drm/drmP.h
62634+++ b/include/drm/drmP.h
62635@@ -72,6 +72,7 @@
62636 #include <linux/workqueue.h>
62637 #include <linux/poll.h>
62638 #include <asm/pgalloc.h>
62639+#include <asm/local.h>
62640 #include <drm/drm.h>
62641 #include <drm/drm_sarea.h>
62642
62643@@ -1068,7 +1069,7 @@ struct drm_device {
62644
62645 /** \name Usage Counters */
62646 /*@{ */
62647- int open_count; /**< Outstanding files open */
62648+ local_t open_count; /**< Outstanding files open */
62649 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
62650 atomic_t vma_count; /**< Outstanding vma areas open */
62651 int buf_use; /**< Buffers in use -- cannot alloc */
62652@@ -1079,7 +1080,7 @@ struct drm_device {
62653 /*@{ */
62654 unsigned long counters;
62655 enum drm_stat_type types[15];
62656- atomic_t counts[15];
62657+ atomic_unchecked_t counts[15];
62658 /*@} */
62659
62660 struct list_head filelist;
62661diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
62662index e01cc80..6fb6f25 100644
62663--- a/include/drm/drm_crtc_helper.h
62664+++ b/include/drm/drm_crtc_helper.h
62665@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
62666 struct drm_connector *connector);
62667 /* disable encoder when not in use - more explicit than dpms off */
62668 void (*disable)(struct drm_encoder *encoder);
62669-};
62670+} __no_const;
62671
62672 /**
62673 * drm_connector_helper_funcs - helper operations for connectors
62674diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
62675index d6d1da4..fdd1ac5 100644
62676--- a/include/drm/ttm/ttm_memory.h
62677+++ b/include/drm/ttm/ttm_memory.h
62678@@ -48,7 +48,7 @@
62679
62680 struct ttm_mem_shrink {
62681 int (*do_shrink) (struct ttm_mem_shrink *);
62682-};
62683+} __no_const;
62684
62685 /**
62686 * struct ttm_mem_global - Global memory accounting structure.
62687diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
62688index 22ef21c..75904ba 100644
62689--- a/include/linux/atmdev.h
62690+++ b/include/linux/atmdev.h
62691@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
62692 #endif
62693
62694 struct k_atm_aal_stats {
62695-#define __HANDLE_ITEM(i) atomic_t i
62696+#define __HANDLE_ITEM(i) atomic_unchecked_t i
62697 __AAL_STAT_ITEMS
62698 #undef __HANDLE_ITEM
62699 };
62700diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
62701index de0628e..38f42eb 100644
62702--- a/include/linux/binfmts.h
62703+++ b/include/linux/binfmts.h
62704@@ -75,6 +75,7 @@ struct linux_binfmt {
62705 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
62706 int (*load_shlib)(struct file *);
62707 int (*core_dump)(struct coredump_params *cprm);
62708+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
62709 unsigned long min_coredump; /* minimal dump size */
62710 };
62711
62712diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
62713index 1756001..ab117ec 100644
62714--- a/include/linux/blkdev.h
62715+++ b/include/linux/blkdev.h
62716@@ -1478,7 +1478,7 @@ struct block_device_operations {
62717 /* this callback is with swap_lock and sometimes page table lock held */
62718 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
62719 struct module *owner;
62720-};
62721+} __do_const;
62722
62723 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
62724 unsigned long);
62725diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
62726index 7c2e030..b72475d 100644
62727--- a/include/linux/blktrace_api.h
62728+++ b/include/linux/blktrace_api.h
62729@@ -23,7 +23,7 @@ struct blk_trace {
62730 struct dentry *dir;
62731 struct dentry *dropped_file;
62732 struct dentry *msg_file;
62733- atomic_t dropped;
62734+ atomic_unchecked_t dropped;
62735 };
62736
62737 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
62738diff --git a/include/linux/cache.h b/include/linux/cache.h
62739index 4c57065..4307975 100644
62740--- a/include/linux/cache.h
62741+++ b/include/linux/cache.h
62742@@ -16,6 +16,10 @@
62743 #define __read_mostly
62744 #endif
62745
62746+#ifndef __read_only
62747+#define __read_only __read_mostly
62748+#endif
62749+
62750 #ifndef ____cacheline_aligned
62751 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
62752 #endif
62753diff --git a/include/linux/capability.h b/include/linux/capability.h
62754index 98503b7..cc36d18 100644
62755--- a/include/linux/capability.h
62756+++ b/include/linux/capability.h
62757@@ -211,8 +211,13 @@ extern bool capable(int cap);
62758 extern bool ns_capable(struct user_namespace *ns, int cap);
62759 extern bool nsown_capable(int cap);
62760 extern bool inode_capable(const struct inode *inode, int cap);
62761+extern bool capable_nolog(int cap);
62762+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
62763+extern bool inode_capable_nolog(const struct inode *inode, int cap);
62764
62765 /* audit system wants to get cap info from files as well */
62766 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
62767
62768+extern int is_privileged_binary(const struct dentry *dentry);
62769+
62770 #endif /* !_LINUX_CAPABILITY_H */
62771diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
62772index 8609d57..86e4d79 100644
62773--- a/include/linux/cdrom.h
62774+++ b/include/linux/cdrom.h
62775@@ -87,7 +87,6 @@ struct cdrom_device_ops {
62776
62777 /* driver specifications */
62778 const int capability; /* capability flags */
62779- int n_minors; /* number of active minor devices */
62780 /* handle uniform packets for scsi type devices (scsi,atapi) */
62781 int (*generic_packet) (struct cdrom_device_info *,
62782 struct packet_command *);
62783diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
62784index 42e55de..1cd0e66 100644
62785--- a/include/linux/cleancache.h
62786+++ b/include/linux/cleancache.h
62787@@ -31,7 +31,7 @@ struct cleancache_ops {
62788 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
62789 void (*invalidate_inode)(int, struct cleancache_filekey);
62790 void (*invalidate_fs)(int);
62791-};
62792+} __no_const;
62793
62794 extern struct cleancache_ops
62795 cleancache_register_ops(struct cleancache_ops *ops);
62796diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
62797index 412bc6c..c31666e 100644
62798--- a/include/linux/compiler-gcc4.h
62799+++ b/include/linux/compiler-gcc4.h
62800@@ -32,6 +32,21 @@
62801 #define __linktime_error(message) __attribute__((__error__(message)))
62802
62803 #if __GNUC_MINOR__ >= 5
62804+
62805+#ifdef CONSTIFY_PLUGIN
62806+#define __no_const __attribute__((no_const))
62807+#define __do_const __attribute__((do_const))
62808+#endif
62809+
62810+#ifdef SIZE_OVERFLOW_PLUGIN
62811+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
62812+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
62813+#endif
62814+
62815+#ifdef LATENT_ENTROPY_PLUGIN
62816+#define __latent_entropy __attribute__((latent_entropy))
62817+#endif
62818+
62819 /*
62820 * Mark a position in code as unreachable. This can be used to
62821 * suppress control flow warnings after asm blocks that transfer
62822@@ -47,6 +62,11 @@
62823 #define __noclone __attribute__((__noclone__))
62824
62825 #endif
62826+
62827+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
62828+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
62829+#define __bos0(ptr) __bos((ptr), 0)
62830+#define __bos1(ptr) __bos((ptr), 1)
62831 #endif
62832
62833 #if __GNUC_MINOR__ >= 6
62834diff --git a/include/linux/compiler.h b/include/linux/compiler.h
62835index f430e41..38be90f 100644
62836--- a/include/linux/compiler.h
62837+++ b/include/linux/compiler.h
62838@@ -5,31 +5,62 @@
62839
62840 #ifdef __CHECKER__
62841 # define __user __attribute__((noderef, address_space(1)))
62842+# define __force_user __force __user
62843 # define __kernel __attribute__((address_space(0)))
62844+# define __force_kernel __force __kernel
62845 # define __safe __attribute__((safe))
62846 # define __force __attribute__((force))
62847 # define __nocast __attribute__((nocast))
62848 # define __iomem __attribute__((noderef, address_space(2)))
62849+# define __force_iomem __force __iomem
62850 # define __acquires(x) __attribute__((context(x,0,1)))
62851 # define __releases(x) __attribute__((context(x,1,0)))
62852 # define __acquire(x) __context__(x,1)
62853 # define __release(x) __context__(x,-1)
62854 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
62855 # define __percpu __attribute__((noderef, address_space(3)))
62856+# define __force_percpu __force __percpu
62857 #ifdef CONFIG_SPARSE_RCU_POINTER
62858 # define __rcu __attribute__((noderef, address_space(4)))
62859+# define __force_rcu __force __rcu
62860 #else
62861 # define __rcu
62862+# define __force_rcu
62863 #endif
62864 extern void __chk_user_ptr(const volatile void __user *);
62865 extern void __chk_io_ptr(const volatile void __iomem *);
62866+#elif defined(CHECKER_PLUGIN)
62867+//# define __user
62868+//# define __force_user
62869+//# define __kernel
62870+//# define __force_kernel
62871+# define __safe
62872+# define __force
62873+# define __nocast
62874+# define __iomem
62875+# define __force_iomem
62876+# define __chk_user_ptr(x) (void)0
62877+# define __chk_io_ptr(x) (void)0
62878+# define __builtin_warning(x, y...) (1)
62879+# define __acquires(x)
62880+# define __releases(x)
62881+# define __acquire(x) (void)0
62882+# define __release(x) (void)0
62883+# define __cond_lock(x,c) (c)
62884+# define __percpu
62885+# define __force_percpu
62886+# define __rcu
62887+# define __force_rcu
62888 #else
62889 # define __user
62890+# define __force_user
62891 # define __kernel
62892+# define __force_kernel
62893 # define __safe
62894 # define __force
62895 # define __nocast
62896 # define __iomem
62897+# define __force_iomem
62898 # define __chk_user_ptr(x) (void)0
62899 # define __chk_io_ptr(x) (void)0
62900 # define __builtin_warning(x, y...) (1)
62901@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
62902 # define __release(x) (void)0
62903 # define __cond_lock(x,c) (c)
62904 # define __percpu
62905+# define __force_percpu
62906 # define __rcu
62907+# define __force_rcu
62908 #endif
62909
62910 #ifdef __KERNEL__
62911@@ -264,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
62912 # define __attribute_const__ /* unimplemented */
62913 #endif
62914
62915+#ifndef __no_const
62916+# define __no_const
62917+#endif
62918+
62919+#ifndef __do_const
62920+# define __do_const
62921+#endif
62922+
62923+#ifndef __size_overflow
62924+# define __size_overflow(...)
62925+#endif
62926+
62927+#ifndef __intentional_overflow
62928+# define __intentional_overflow(...)
62929+#endif
62930+
62931+#ifndef __latent_entropy
62932+# define __latent_entropy
62933+#endif
62934+
62935 /*
62936 * Tell gcc if a function is cold. The compiler will assume any path
62937 * directly leading to the call is unlikely.
62938@@ -273,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
62939 #define __cold
62940 #endif
62941
62942+#ifndef __alloc_size
62943+#define __alloc_size(...)
62944+#endif
62945+
62946+#ifndef __bos
62947+#define __bos(ptr, arg)
62948+#endif
62949+
62950+#ifndef __bos0
62951+#define __bos0(ptr)
62952+#endif
62953+
62954+#ifndef __bos1
62955+#define __bos1(ptr)
62956+#endif
62957+
62958 /* Simple shorthand for a section definition */
62959 #ifndef __section
62960 # define __section(S) __attribute__ ((__section__(#S)))
62961@@ -312,6 +381,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
62962 * use is to mediate communication between process-level code and irq/NMI
62963 * handlers, all running on the same CPU.
62964 */
62965-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
62966+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
62967+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
62968
62969 #endif /* __LINUX_COMPILER_H */
62970diff --git a/include/linux/cred.h b/include/linux/cred.h
62971index ebbed2c..908cc2c 100644
62972--- a/include/linux/cred.h
62973+++ b/include/linux/cred.h
62974@@ -208,6 +208,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
62975 static inline void validate_process_creds(void)
62976 {
62977 }
62978+static inline void validate_task_creds(struct task_struct *task)
62979+{
62980+}
62981 #endif
62982
62983 /**
62984diff --git a/include/linux/crypto.h b/include/linux/crypto.h
62985index b92eadf..b4ecdc1 100644
62986--- a/include/linux/crypto.h
62987+++ b/include/linux/crypto.h
62988@@ -373,7 +373,7 @@ struct cipher_tfm {
62989 const u8 *key, unsigned int keylen);
62990 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
62991 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
62992-};
62993+} __no_const;
62994
62995 struct hash_tfm {
62996 int (*init)(struct hash_desc *desc);
62997@@ -394,13 +394,13 @@ struct compress_tfm {
62998 int (*cot_decompress)(struct crypto_tfm *tfm,
62999 const u8 *src, unsigned int slen,
63000 u8 *dst, unsigned int *dlen);
63001-};
63002+} __no_const;
63003
63004 struct rng_tfm {
63005 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
63006 unsigned int dlen);
63007 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
63008-};
63009+} __no_const;
63010
63011 #define crt_ablkcipher crt_u.ablkcipher
63012 #define crt_aead crt_u.aead
63013diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
63014index 7925bf0..d5143d2 100644
63015--- a/include/linux/decompress/mm.h
63016+++ b/include/linux/decompress/mm.h
63017@@ -77,7 +77,7 @@ static void free(void *where)
63018 * warnings when not needed (indeed large_malloc / large_free are not
63019 * needed by inflate */
63020
63021-#define malloc(a) kmalloc(a, GFP_KERNEL)
63022+#define malloc(a) kmalloc((a), GFP_KERNEL)
63023 #define free(a) kfree(a)
63024
63025 #define large_malloc(a) vmalloc(a)
63026diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
63027index 94af418..b1ca7a2 100644
63028--- a/include/linux/dma-mapping.h
63029+++ b/include/linux/dma-mapping.h
63030@@ -54,7 +54,7 @@ struct dma_map_ops {
63031 u64 (*get_required_mask)(struct device *dev);
63032 #endif
63033 int is_phys;
63034-};
63035+} __do_const;
63036
63037 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
63038
63039diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
63040index d3201e4..8281e63 100644
63041--- a/include/linux/dmaengine.h
63042+++ b/include/linux/dmaengine.h
63043@@ -1018,9 +1018,9 @@ struct dma_pinned_list {
63044 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
63045 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
63046
63047-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
63048+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
63049 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
63050-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
63051+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
63052 struct dma_pinned_list *pinned_list, struct page *page,
63053 unsigned int offset, size_t len);
63054
63055diff --git a/include/linux/elf.h b/include/linux/elf.h
63056index 8c9048e..16a4665 100644
63057--- a/include/linux/elf.h
63058+++ b/include/linux/elf.h
63059@@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
63060 #define elf_note elf32_note
63061 #define elf_addr_t Elf32_Off
63062 #define Elf_Half Elf32_Half
63063+#define elf_dyn Elf32_Dyn
63064
63065 #else
63066
63067@@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
63068 #define elf_note elf64_note
63069 #define elf_addr_t Elf64_Off
63070 #define Elf_Half Elf64_Half
63071+#define elf_dyn Elf64_Dyn
63072
63073 #endif
63074
63075diff --git a/include/linux/filter.h b/include/linux/filter.h
63076index 24d251f..7afb83d 100644
63077--- a/include/linux/filter.h
63078+++ b/include/linux/filter.h
63079@@ -20,6 +20,7 @@ struct compat_sock_fprog {
63080
63081 struct sk_buff;
63082 struct sock;
63083+struct bpf_jit_work;
63084
63085 struct sk_filter
63086 {
63087@@ -27,6 +28,9 @@ struct sk_filter
63088 unsigned int len; /* Number of filter blocks */
63089 unsigned int (*bpf_func)(const struct sk_buff *skb,
63090 const struct sock_filter *filter);
63091+#ifdef CONFIG_BPF_JIT
63092+ struct bpf_jit_work *work;
63093+#endif
63094 struct rcu_head rcu;
63095 struct sock_filter insns[0];
63096 };
63097diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
63098index 3044254..9767f41 100644
63099--- a/include/linux/frontswap.h
63100+++ b/include/linux/frontswap.h
63101@@ -11,7 +11,7 @@ struct frontswap_ops {
63102 int (*load)(unsigned, pgoff_t, struct page *);
63103 void (*invalidate_page)(unsigned, pgoff_t);
63104 void (*invalidate_area)(unsigned);
63105-};
63106+} __no_const;
63107
63108 extern bool frontswap_enabled;
63109 extern struct frontswap_ops
63110diff --git a/include/linux/fs.h b/include/linux/fs.h
63111index 75fe9a1..8417cac 100644
63112--- a/include/linux/fs.h
63113+++ b/include/linux/fs.h
63114@@ -1543,7 +1543,8 @@ struct file_operations {
63115 int (*setlease)(struct file *, long, struct file_lock **);
63116 long (*fallocate)(struct file *file, int mode, loff_t offset,
63117 loff_t len);
63118-};
63119+} __do_const;
63120+typedef struct file_operations __no_const file_operations_no_const;
63121
63122 struct inode_operations {
63123 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
63124@@ -2667,4 +2668,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
63125 inode->i_flags |= S_NOSEC;
63126 }
63127
63128+static inline bool is_sidechannel_device(const struct inode *inode)
63129+{
63130+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
63131+ umode_t mode = inode->i_mode;
63132+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
63133+#else
63134+ return false;
63135+#endif
63136+}
63137+
63138 #endif /* _LINUX_FS_H */
63139diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
63140index 003dc0f..3c4ea97 100644
63141--- a/include/linux/fs_struct.h
63142+++ b/include/linux/fs_struct.h
63143@@ -6,7 +6,7 @@
63144 #include <linux/seqlock.h>
63145
63146 struct fs_struct {
63147- int users;
63148+ atomic_t users;
63149 spinlock_t lock;
63150 seqcount_t seq;
63151 int umask;
63152diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
63153index ce31408..b1ad003 100644
63154--- a/include/linux/fscache-cache.h
63155+++ b/include/linux/fscache-cache.h
63156@@ -102,7 +102,7 @@ struct fscache_operation {
63157 fscache_operation_release_t release;
63158 };
63159
63160-extern atomic_t fscache_op_debug_id;
63161+extern atomic_unchecked_t fscache_op_debug_id;
63162 extern void fscache_op_work_func(struct work_struct *work);
63163
63164 extern void fscache_enqueue_operation(struct fscache_operation *);
63165@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
63166 {
63167 INIT_WORK(&op->work, fscache_op_work_func);
63168 atomic_set(&op->usage, 1);
63169- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63170+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63171 op->processor = processor;
63172 op->release = release;
63173 INIT_LIST_HEAD(&op->pend_link);
63174diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
63175index 0fbfb46..508eb0d 100644
63176--- a/include/linux/fsnotify.h
63177+++ b/include/linux/fsnotify.h
63178@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
63179 struct inode *inode = path->dentry->d_inode;
63180 __u32 mask = FS_ACCESS;
63181
63182+ if (is_sidechannel_device(inode))
63183+ return;
63184+
63185 if (S_ISDIR(inode->i_mode))
63186 mask |= FS_ISDIR;
63187
63188@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
63189 struct inode *inode = path->dentry->d_inode;
63190 __u32 mask = FS_MODIFY;
63191
63192+ if (is_sidechannel_device(inode))
63193+ return;
63194+
63195 if (S_ISDIR(inode->i_mode))
63196 mask |= FS_ISDIR;
63197
63198@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
63199 */
63200 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
63201 {
63202- return kstrdup(name, GFP_KERNEL);
63203+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
63204 }
63205
63206 /*
63207diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
63208index 642928c..93afe6a 100644
63209--- a/include/linux/ftrace_event.h
63210+++ b/include/linux/ftrace_event.h
63211@@ -266,7 +266,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
63212 extern int trace_add_event_call(struct ftrace_event_call *call);
63213 extern void trace_remove_event_call(struct ftrace_event_call *call);
63214
63215-#define is_signed_type(type) (((type)(-1)) < 0)
63216+#define is_signed_type(type) (((type)(-1)) < (type)1)
63217
63218 int trace_set_clr_event(const char *system, const char *event, int set);
63219
63220diff --git a/include/linux/genhd.h b/include/linux/genhd.h
63221index 4f440b3..342233a 100644
63222--- a/include/linux/genhd.h
63223+++ b/include/linux/genhd.h
63224@@ -190,7 +190,7 @@ struct gendisk {
63225 struct kobject *slave_dir;
63226
63227 struct timer_rand_state *random;
63228- atomic_t sync_io; /* RAID */
63229+ atomic_unchecked_t sync_io; /* RAID */
63230 struct disk_events *ev;
63231 #ifdef CONFIG_BLK_DEV_INTEGRITY
63232 struct blk_integrity *integrity;
63233diff --git a/include/linux/gfp.h b/include/linux/gfp.h
63234index d0a7967..63c4c47 100644
63235--- a/include/linux/gfp.h
63236+++ b/include/linux/gfp.h
63237@@ -35,6 +35,12 @@ struct vm_area_struct;
63238 #define ___GFP_OTHER_NODE 0x800000u
63239 #define ___GFP_WRITE 0x1000000u
63240
63241+#ifdef CONFIG_PAX_USERCOPY_SLABS
63242+#define ___GFP_USERCOPY 0x2000000u
63243+#else
63244+#define ___GFP_USERCOPY 0
63245+#endif
63246+
63247 /*
63248 * GFP bitmasks..
63249 *
63250@@ -89,6 +95,7 @@ struct vm_area_struct;
63251 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
63252 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
63253 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
63254+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
63255
63256 /*
63257 * This may seem redundant, but it's a way of annotating false positives vs.
63258@@ -96,7 +103,7 @@ struct vm_area_struct;
63259 */
63260 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
63261
63262-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
63263+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
63264 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
63265
63266 /* This equals 0, but use constants in case they ever change */
63267@@ -150,6 +157,8 @@ struct vm_area_struct;
63268 /* 4GB DMA on some platforms */
63269 #define GFP_DMA32 __GFP_DMA32
63270
63271+#define GFP_USERCOPY __GFP_USERCOPY
63272+
63273 /* Convert GFP flags to their corresponding migrate type */
63274 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
63275 {
63276diff --git a/include/linux/gracl.h b/include/linux/gracl.h
63277new file mode 100644
63278index 0000000..ebe6d72
63279--- /dev/null
63280+++ b/include/linux/gracl.h
63281@@ -0,0 +1,319 @@
63282+#ifndef GR_ACL_H
63283+#define GR_ACL_H
63284+
63285+#include <linux/grdefs.h>
63286+#include <linux/resource.h>
63287+#include <linux/capability.h>
63288+#include <linux/dcache.h>
63289+#include <asm/resource.h>
63290+
63291+/* Major status information */
63292+
63293+#define GR_VERSION "grsecurity 2.9.1"
63294+#define GRSECURITY_VERSION 0x2901
63295+
63296+enum {
63297+ GR_SHUTDOWN = 0,
63298+ GR_ENABLE = 1,
63299+ GR_SPROLE = 2,
63300+ GR_RELOAD = 3,
63301+ GR_SEGVMOD = 4,
63302+ GR_STATUS = 5,
63303+ GR_UNSPROLE = 6,
63304+ GR_PASSSET = 7,
63305+ GR_SPROLEPAM = 8,
63306+};
63307+
63308+/* Password setup definitions
63309+ * kernel/grhash.c */
63310+enum {
63311+ GR_PW_LEN = 128,
63312+ GR_SALT_LEN = 16,
63313+ GR_SHA_LEN = 32,
63314+};
63315+
63316+enum {
63317+ GR_SPROLE_LEN = 64,
63318+};
63319+
63320+enum {
63321+ GR_NO_GLOB = 0,
63322+ GR_REG_GLOB,
63323+ GR_CREATE_GLOB
63324+};
63325+
63326+#define GR_NLIMITS 32
63327+
63328+/* Begin Data Structures */
63329+
63330+struct sprole_pw {
63331+ unsigned char *rolename;
63332+ unsigned char salt[GR_SALT_LEN];
63333+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
63334+};
63335+
63336+struct name_entry {
63337+ __u32 key;
63338+ ino_t inode;
63339+ dev_t device;
63340+ char *name;
63341+ __u16 len;
63342+ __u8 deleted;
63343+ struct name_entry *prev;
63344+ struct name_entry *next;
63345+};
63346+
63347+struct inodev_entry {
63348+ struct name_entry *nentry;
63349+ struct inodev_entry *prev;
63350+ struct inodev_entry *next;
63351+};
63352+
63353+struct acl_role_db {
63354+ struct acl_role_label **r_hash;
63355+ __u32 r_size;
63356+};
63357+
63358+struct inodev_db {
63359+ struct inodev_entry **i_hash;
63360+ __u32 i_size;
63361+};
63362+
63363+struct name_db {
63364+ struct name_entry **n_hash;
63365+ __u32 n_size;
63366+};
63367+
63368+struct crash_uid {
63369+ uid_t uid;
63370+ unsigned long expires;
63371+};
63372+
63373+struct gr_hash_struct {
63374+ void **table;
63375+ void **nametable;
63376+ void *first;
63377+ __u32 table_size;
63378+ __u32 used_size;
63379+ int type;
63380+};
63381+
63382+/* Userspace Grsecurity ACL data structures */
63383+
63384+struct acl_subject_label {
63385+ char *filename;
63386+ ino_t inode;
63387+ dev_t device;
63388+ __u32 mode;
63389+ kernel_cap_t cap_mask;
63390+ kernel_cap_t cap_lower;
63391+ kernel_cap_t cap_invert_audit;
63392+
63393+ struct rlimit res[GR_NLIMITS];
63394+ __u32 resmask;
63395+
63396+ __u8 user_trans_type;
63397+ __u8 group_trans_type;
63398+ uid_t *user_transitions;
63399+ gid_t *group_transitions;
63400+ __u16 user_trans_num;
63401+ __u16 group_trans_num;
63402+
63403+ __u32 sock_families[2];
63404+ __u32 ip_proto[8];
63405+ __u32 ip_type;
63406+ struct acl_ip_label **ips;
63407+ __u32 ip_num;
63408+ __u32 inaddr_any_override;
63409+
63410+ __u32 crashes;
63411+ unsigned long expires;
63412+
63413+ struct acl_subject_label *parent_subject;
63414+ struct gr_hash_struct *hash;
63415+ struct acl_subject_label *prev;
63416+ struct acl_subject_label *next;
63417+
63418+ struct acl_object_label **obj_hash;
63419+ __u32 obj_hash_size;
63420+ __u16 pax_flags;
63421+};
63422+
63423+struct role_allowed_ip {
63424+ __u32 addr;
63425+ __u32 netmask;
63426+
63427+ struct role_allowed_ip *prev;
63428+ struct role_allowed_ip *next;
63429+};
63430+
63431+struct role_transition {
63432+ char *rolename;
63433+
63434+ struct role_transition *prev;
63435+ struct role_transition *next;
63436+};
63437+
63438+struct acl_role_label {
63439+ char *rolename;
63440+ uid_t uidgid;
63441+ __u16 roletype;
63442+
63443+ __u16 auth_attempts;
63444+ unsigned long expires;
63445+
63446+ struct acl_subject_label *root_label;
63447+ struct gr_hash_struct *hash;
63448+
63449+ struct acl_role_label *prev;
63450+ struct acl_role_label *next;
63451+
63452+ struct role_transition *transitions;
63453+ struct role_allowed_ip *allowed_ips;
63454+ uid_t *domain_children;
63455+ __u16 domain_child_num;
63456+
63457+ umode_t umask;
63458+
63459+ struct acl_subject_label **subj_hash;
63460+ __u32 subj_hash_size;
63461+};
63462+
63463+struct user_acl_role_db {
63464+ struct acl_role_label **r_table;
63465+ __u32 num_pointers; /* Number of allocations to track */
63466+ __u32 num_roles; /* Number of roles */
63467+ __u32 num_domain_children; /* Number of domain children */
63468+ __u32 num_subjects; /* Number of subjects */
63469+ __u32 num_objects; /* Number of objects */
63470+};
63471+
63472+struct acl_object_label {
63473+ char *filename;
63474+ ino_t inode;
63475+ dev_t device;
63476+ __u32 mode;
63477+
63478+ struct acl_subject_label *nested;
63479+ struct acl_object_label *globbed;
63480+
63481+ /* next two structures not used */
63482+
63483+ struct acl_object_label *prev;
63484+ struct acl_object_label *next;
63485+};
63486+
63487+struct acl_ip_label {
63488+ char *iface;
63489+ __u32 addr;
63490+ __u32 netmask;
63491+ __u16 low, high;
63492+ __u8 mode;
63493+ __u32 type;
63494+ __u32 proto[8];
63495+
63496+ /* next two structures not used */
63497+
63498+ struct acl_ip_label *prev;
63499+ struct acl_ip_label *next;
63500+};
63501+
63502+struct gr_arg {
63503+ struct user_acl_role_db role_db;
63504+ unsigned char pw[GR_PW_LEN];
63505+ unsigned char salt[GR_SALT_LEN];
63506+ unsigned char sum[GR_SHA_LEN];
63507+ unsigned char sp_role[GR_SPROLE_LEN];
63508+ struct sprole_pw *sprole_pws;
63509+ dev_t segv_device;
63510+ ino_t segv_inode;
63511+ uid_t segv_uid;
63512+ __u16 num_sprole_pws;
63513+ __u16 mode;
63514+};
63515+
63516+struct gr_arg_wrapper {
63517+ struct gr_arg *arg;
63518+ __u32 version;
63519+ __u32 size;
63520+};
63521+
63522+struct subject_map {
63523+ struct acl_subject_label *user;
63524+ struct acl_subject_label *kernel;
63525+ struct subject_map *prev;
63526+ struct subject_map *next;
63527+};
63528+
63529+struct acl_subj_map_db {
63530+ struct subject_map **s_hash;
63531+ __u32 s_size;
63532+};
63533+
63534+/* End Data Structures Section */
63535+
63536+/* Hash functions generated by empirical testing by Brad Spengler
63537+ Makes good use of the low bits of the inode. Generally 0-1 times
63538+ in loop for successful match. 0-3 for unsuccessful match.
63539+ Shift/add algorithm with modulus of table size and an XOR*/
63540+
63541+static __inline__ unsigned int
63542+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
63543+{
63544+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
63545+}
63546+
63547+ static __inline__ unsigned int
63548+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
63549+{
63550+ return ((const unsigned long)userp % sz);
63551+}
63552+
63553+static __inline__ unsigned int
63554+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
63555+{
63556+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
63557+}
63558+
63559+static __inline__ unsigned int
63560+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
63561+{
63562+ return full_name_hash((const unsigned char *)name, len) % sz;
63563+}
63564+
63565+#define FOR_EACH_ROLE_START(role) \
63566+ role = role_list; \
63567+ while (role) {
63568+
63569+#define FOR_EACH_ROLE_END(role) \
63570+ role = role->prev; \
63571+ }
63572+
63573+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
63574+ subj = NULL; \
63575+ iter = 0; \
63576+ while (iter < role->subj_hash_size) { \
63577+ if (subj == NULL) \
63578+ subj = role->subj_hash[iter]; \
63579+ if (subj == NULL) { \
63580+ iter++; \
63581+ continue; \
63582+ }
63583+
63584+#define FOR_EACH_SUBJECT_END(subj,iter) \
63585+ subj = subj->next; \
63586+ if (subj == NULL) \
63587+ iter++; \
63588+ }
63589+
63590+
63591+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
63592+ subj = role->hash->first; \
63593+ while (subj != NULL) {
63594+
63595+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
63596+ subj = subj->next; \
63597+ }
63598+
63599+#endif
63600+
63601diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
63602new file mode 100644
63603index 0000000..323ecf2
63604--- /dev/null
63605+++ b/include/linux/gralloc.h
63606@@ -0,0 +1,9 @@
63607+#ifndef __GRALLOC_H
63608+#define __GRALLOC_H
63609+
63610+void acl_free_all(void);
63611+int acl_alloc_stack_init(unsigned long size);
63612+void *acl_alloc(unsigned long len);
63613+void *acl_alloc_num(unsigned long num, unsigned long len);
63614+
63615+#endif
63616diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
63617new file mode 100644
63618index 0000000..be66033
63619--- /dev/null
63620+++ b/include/linux/grdefs.h
63621@@ -0,0 +1,140 @@
63622+#ifndef GRDEFS_H
63623+#define GRDEFS_H
63624+
63625+/* Begin grsecurity status declarations */
63626+
63627+enum {
63628+ GR_READY = 0x01,
63629+ GR_STATUS_INIT = 0x00 // disabled state
63630+};
63631+
63632+/* Begin ACL declarations */
63633+
63634+/* Role flags */
63635+
63636+enum {
63637+ GR_ROLE_USER = 0x0001,
63638+ GR_ROLE_GROUP = 0x0002,
63639+ GR_ROLE_DEFAULT = 0x0004,
63640+ GR_ROLE_SPECIAL = 0x0008,
63641+ GR_ROLE_AUTH = 0x0010,
63642+ GR_ROLE_NOPW = 0x0020,
63643+ GR_ROLE_GOD = 0x0040,
63644+ GR_ROLE_LEARN = 0x0080,
63645+ GR_ROLE_TPE = 0x0100,
63646+ GR_ROLE_DOMAIN = 0x0200,
63647+ GR_ROLE_PAM = 0x0400,
63648+ GR_ROLE_PERSIST = 0x0800
63649+};
63650+
63651+/* ACL Subject and Object mode flags */
63652+enum {
63653+ GR_DELETED = 0x80000000
63654+};
63655+
63656+/* ACL Object-only mode flags */
63657+enum {
63658+ GR_READ = 0x00000001,
63659+ GR_APPEND = 0x00000002,
63660+ GR_WRITE = 0x00000004,
63661+ GR_EXEC = 0x00000008,
63662+ GR_FIND = 0x00000010,
63663+ GR_INHERIT = 0x00000020,
63664+ GR_SETID = 0x00000040,
63665+ GR_CREATE = 0x00000080,
63666+ GR_DELETE = 0x00000100,
63667+ GR_LINK = 0x00000200,
63668+ GR_AUDIT_READ = 0x00000400,
63669+ GR_AUDIT_APPEND = 0x00000800,
63670+ GR_AUDIT_WRITE = 0x00001000,
63671+ GR_AUDIT_EXEC = 0x00002000,
63672+ GR_AUDIT_FIND = 0x00004000,
63673+ GR_AUDIT_INHERIT= 0x00008000,
63674+ GR_AUDIT_SETID = 0x00010000,
63675+ GR_AUDIT_CREATE = 0x00020000,
63676+ GR_AUDIT_DELETE = 0x00040000,
63677+ GR_AUDIT_LINK = 0x00080000,
63678+ GR_PTRACERD = 0x00100000,
63679+ GR_NOPTRACE = 0x00200000,
63680+ GR_SUPPRESS = 0x00400000,
63681+ GR_NOLEARN = 0x00800000,
63682+ GR_INIT_TRANSFER= 0x01000000
63683+};
63684+
63685+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
63686+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
63687+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
63688+
63689+/* ACL subject-only mode flags */
63690+enum {
63691+ GR_KILL = 0x00000001,
63692+ GR_VIEW = 0x00000002,
63693+ GR_PROTECTED = 0x00000004,
63694+ GR_LEARN = 0x00000008,
63695+ GR_OVERRIDE = 0x00000010,
63696+ /* just a placeholder, this mode is only used in userspace */
63697+ GR_DUMMY = 0x00000020,
63698+ GR_PROTSHM = 0x00000040,
63699+ GR_KILLPROC = 0x00000080,
63700+ GR_KILLIPPROC = 0x00000100,
63701+ /* just a placeholder, this mode is only used in userspace */
63702+ GR_NOTROJAN = 0x00000200,
63703+ GR_PROTPROCFD = 0x00000400,
63704+ GR_PROCACCT = 0x00000800,
63705+ GR_RELAXPTRACE = 0x00001000,
63706+ //GR_NESTED = 0x00002000,
63707+ GR_INHERITLEARN = 0x00004000,
63708+ GR_PROCFIND = 0x00008000,
63709+ GR_POVERRIDE = 0x00010000,
63710+ GR_KERNELAUTH = 0x00020000,
63711+ GR_ATSECURE = 0x00040000,
63712+ GR_SHMEXEC = 0x00080000
63713+};
63714+
63715+enum {
63716+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
63717+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
63718+ GR_PAX_ENABLE_MPROTECT = 0x0004,
63719+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
63720+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
63721+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
63722+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
63723+ GR_PAX_DISABLE_MPROTECT = 0x0400,
63724+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
63725+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
63726+};
63727+
63728+enum {
63729+ GR_ID_USER = 0x01,
63730+ GR_ID_GROUP = 0x02,
63731+};
63732+
63733+enum {
63734+ GR_ID_ALLOW = 0x01,
63735+ GR_ID_DENY = 0x02,
63736+};
63737+
63738+#define GR_CRASH_RES 31
63739+#define GR_UIDTABLE_MAX 500
63740+
63741+/* begin resource learning section */
63742+enum {
63743+ GR_RLIM_CPU_BUMP = 60,
63744+ GR_RLIM_FSIZE_BUMP = 50000,
63745+ GR_RLIM_DATA_BUMP = 10000,
63746+ GR_RLIM_STACK_BUMP = 1000,
63747+ GR_RLIM_CORE_BUMP = 10000,
63748+ GR_RLIM_RSS_BUMP = 500000,
63749+ GR_RLIM_NPROC_BUMP = 1,
63750+ GR_RLIM_NOFILE_BUMP = 5,
63751+ GR_RLIM_MEMLOCK_BUMP = 50000,
63752+ GR_RLIM_AS_BUMP = 500000,
63753+ GR_RLIM_LOCKS_BUMP = 2,
63754+ GR_RLIM_SIGPENDING_BUMP = 5,
63755+ GR_RLIM_MSGQUEUE_BUMP = 10000,
63756+ GR_RLIM_NICE_BUMP = 1,
63757+ GR_RLIM_RTPRIO_BUMP = 1,
63758+ GR_RLIM_RTTIME_BUMP = 1000000
63759+};
63760+
63761+#endif
63762diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
63763new file mode 100644
63764index 0000000..c9292f7
63765--- /dev/null
63766+++ b/include/linux/grinternal.h
63767@@ -0,0 +1,223 @@
63768+#ifndef __GRINTERNAL_H
63769+#define __GRINTERNAL_H
63770+
63771+#ifdef CONFIG_GRKERNSEC
63772+
63773+#include <linux/fs.h>
63774+#include <linux/mnt_namespace.h>
63775+#include <linux/nsproxy.h>
63776+#include <linux/gracl.h>
63777+#include <linux/grdefs.h>
63778+#include <linux/grmsg.h>
63779+
63780+void gr_add_learn_entry(const char *fmt, ...)
63781+ __attribute__ ((format (printf, 1, 2)));
63782+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
63783+ const struct vfsmount *mnt);
63784+__u32 gr_check_create(const struct dentry *new_dentry,
63785+ const struct dentry *parent,
63786+ const struct vfsmount *mnt, const __u32 mode);
63787+int gr_check_protected_task(const struct task_struct *task);
63788+__u32 to_gr_audit(const __u32 reqmode);
63789+int gr_set_acls(const int type);
63790+int gr_apply_subject_to_task(struct task_struct *task);
63791+int gr_acl_is_enabled(void);
63792+char gr_roletype_to_char(void);
63793+
63794+void gr_handle_alertkill(struct task_struct *task);
63795+char *gr_to_filename(const struct dentry *dentry,
63796+ const struct vfsmount *mnt);
63797+char *gr_to_filename1(const struct dentry *dentry,
63798+ const struct vfsmount *mnt);
63799+char *gr_to_filename2(const struct dentry *dentry,
63800+ const struct vfsmount *mnt);
63801+char *gr_to_filename3(const struct dentry *dentry,
63802+ const struct vfsmount *mnt);
63803+
63804+extern int grsec_enable_ptrace_readexec;
63805+extern int grsec_enable_harden_ptrace;
63806+extern int grsec_enable_link;
63807+extern int grsec_enable_fifo;
63808+extern int grsec_enable_execve;
63809+extern int grsec_enable_shm;
63810+extern int grsec_enable_execlog;
63811+extern int grsec_enable_signal;
63812+extern int grsec_enable_audit_ptrace;
63813+extern int grsec_enable_forkfail;
63814+extern int grsec_enable_time;
63815+extern int grsec_enable_rofs;
63816+extern int grsec_enable_chroot_shmat;
63817+extern int grsec_enable_chroot_mount;
63818+extern int grsec_enable_chroot_double;
63819+extern int grsec_enable_chroot_pivot;
63820+extern int grsec_enable_chroot_chdir;
63821+extern int grsec_enable_chroot_chmod;
63822+extern int grsec_enable_chroot_mknod;
63823+extern int grsec_enable_chroot_fchdir;
63824+extern int grsec_enable_chroot_nice;
63825+extern int grsec_enable_chroot_execlog;
63826+extern int grsec_enable_chroot_caps;
63827+extern int grsec_enable_chroot_sysctl;
63828+extern int grsec_enable_chroot_unix;
63829+extern int grsec_enable_symlinkown;
63830+extern int grsec_symlinkown_gid;
63831+extern int grsec_enable_tpe;
63832+extern int grsec_tpe_gid;
63833+extern int grsec_enable_tpe_all;
63834+extern int grsec_enable_tpe_invert;
63835+extern int grsec_enable_socket_all;
63836+extern int grsec_socket_all_gid;
63837+extern int grsec_enable_socket_client;
63838+extern int grsec_socket_client_gid;
63839+extern int grsec_enable_socket_server;
63840+extern int grsec_socket_server_gid;
63841+extern int grsec_audit_gid;
63842+extern int grsec_enable_group;
63843+extern int grsec_enable_audit_textrel;
63844+extern int grsec_enable_log_rwxmaps;
63845+extern int grsec_enable_mount;
63846+extern int grsec_enable_chdir;
63847+extern int grsec_resource_logging;
63848+extern int grsec_enable_blackhole;
63849+extern int grsec_lastack_retries;
63850+extern int grsec_enable_brute;
63851+extern int grsec_lock;
63852+
63853+extern spinlock_t grsec_alert_lock;
63854+extern unsigned long grsec_alert_wtime;
63855+extern unsigned long grsec_alert_fyet;
63856+
63857+extern spinlock_t grsec_audit_lock;
63858+
63859+extern rwlock_t grsec_exec_file_lock;
63860+
63861+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
63862+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
63863+ (tsk)->exec_file->f_vfsmnt) : "/")
63864+
63865+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
63866+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
63867+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
63868+
63869+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
63870+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
63871+ (tsk)->exec_file->f_vfsmnt) : "/")
63872+
63873+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
63874+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
63875+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
63876+
63877+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
63878+
63879+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
63880+
63881+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
63882+ (task)->pid, (cred)->uid, \
63883+ (cred)->euid, (cred)->gid, (cred)->egid, \
63884+ gr_parent_task_fullpath(task), \
63885+ (task)->real_parent->comm, (task)->real_parent->pid, \
63886+ (pcred)->uid, (pcred)->euid, \
63887+ (pcred)->gid, (pcred)->egid
63888+
63889+#define GR_CHROOT_CAPS {{ \
63890+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
63891+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
63892+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
63893+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
63894+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
63895+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
63896+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
63897+
63898+#define security_learn(normal_msg,args...) \
63899+({ \
63900+ read_lock(&grsec_exec_file_lock); \
63901+ gr_add_learn_entry(normal_msg "\n", ## args); \
63902+ read_unlock(&grsec_exec_file_lock); \
63903+})
63904+
63905+enum {
63906+ GR_DO_AUDIT,
63907+ GR_DONT_AUDIT,
63908+ /* used for non-audit messages that we shouldn't kill the task on */
63909+ GR_DONT_AUDIT_GOOD
63910+};
63911+
63912+enum {
63913+ GR_TTYSNIFF,
63914+ GR_RBAC,
63915+ GR_RBAC_STR,
63916+ GR_STR_RBAC,
63917+ GR_RBAC_MODE2,
63918+ GR_RBAC_MODE3,
63919+ GR_FILENAME,
63920+ GR_SYSCTL_HIDDEN,
63921+ GR_NOARGS,
63922+ GR_ONE_INT,
63923+ GR_ONE_INT_TWO_STR,
63924+ GR_ONE_STR,
63925+ GR_STR_INT,
63926+ GR_TWO_STR_INT,
63927+ GR_TWO_INT,
63928+ GR_TWO_U64,
63929+ GR_THREE_INT,
63930+ GR_FIVE_INT_TWO_STR,
63931+ GR_TWO_STR,
63932+ GR_THREE_STR,
63933+ GR_FOUR_STR,
63934+ GR_STR_FILENAME,
63935+ GR_FILENAME_STR,
63936+ GR_FILENAME_TWO_INT,
63937+ GR_FILENAME_TWO_INT_STR,
63938+ GR_TEXTREL,
63939+ GR_PTRACE,
63940+ GR_RESOURCE,
63941+ GR_CAP,
63942+ GR_SIG,
63943+ GR_SIG2,
63944+ GR_CRASH1,
63945+ GR_CRASH2,
63946+ GR_PSACCT,
63947+ GR_RWXMAP
63948+};
63949+
63950+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
63951+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
63952+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
63953+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
63954+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
63955+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
63956+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
63957+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
63958+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
63959+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
63960+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
63961+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
63962+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
63963+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
63964+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
63965+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
63966+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
63967+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
63968+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
63969+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
63970+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
63971+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
63972+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
63973+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
63974+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
63975+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
63976+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
63977+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
63978+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
63979+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
63980+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
63981+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
63982+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
63983+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
63984+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
63985+
63986+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
63987+
63988+#endif
63989+
63990+#endif
63991diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
63992new file mode 100644
63993index 0000000..2bd4c8d
63994--- /dev/null
63995+++ b/include/linux/grmsg.h
63996@@ -0,0 +1,111 @@
63997+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
63998+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
63999+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
64000+#define GR_STOPMOD_MSG "denied modification of module state by "
64001+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
64002+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
64003+#define GR_IOPERM_MSG "denied use of ioperm() by "
64004+#define GR_IOPL_MSG "denied use of iopl() by "
64005+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
64006+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
64007+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
64008+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
64009+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
64010+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
64011+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
64012+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
64013+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
64014+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
64015+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
64016+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
64017+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
64018+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
64019+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
64020+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
64021+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
64022+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
64023+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
64024+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
64025+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
64026+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
64027+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
64028+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
64029+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
64030+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
64031+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
64032+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
64033+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
64034+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
64035+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
64036+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
64037+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
64038+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
64039+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
64040+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
64041+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
64042+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
64043+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
64044+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
64045+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
64046+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
64047+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
64048+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
64049+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
64050+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
64051+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
64052+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
64053+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
64054+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
64055+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
64056+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
64057+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
64058+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
64059+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
64060+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
64061+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
64062+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
64063+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
64064+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
64065+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
64066+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
64067+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
64068+#define GR_FAILFORK_MSG "failed fork with errno %s by "
64069+#define GR_NICE_CHROOT_MSG "denied priority change by "
64070+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
64071+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
64072+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
64073+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
64074+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
64075+#define GR_TIME_MSG "time set by "
64076+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
64077+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
64078+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
64079+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
64080+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
64081+#define GR_BIND_MSG "denied bind() by "
64082+#define GR_CONNECT_MSG "denied connect() by "
64083+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
64084+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
64085+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
64086+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
64087+#define GR_CAP_ACL_MSG "use of %s denied for "
64088+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
64089+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
64090+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
64091+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
64092+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
64093+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
64094+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
64095+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
64096+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
64097+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
64098+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
64099+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
64100+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
64101+#define GR_VM86_MSG "denied use of vm86 by "
64102+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
64103+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
64104+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
64105+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
64106+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
64107+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
64108diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
64109new file mode 100644
64110index 0000000..f2f5d5b
64111--- /dev/null
64112+++ b/include/linux/grsecurity.h
64113@@ -0,0 +1,239 @@
64114+#ifndef GR_SECURITY_H
64115+#define GR_SECURITY_H
64116+#include <linux/fs.h>
64117+#include <linux/fs_struct.h>
64118+#include <linux/binfmts.h>
64119+#include <linux/gracl.h>
64120+
64121+/* notify of brain-dead configs */
64122+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64123+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
64124+#endif
64125+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
64126+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
64127+#endif
64128+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
64129+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
64130+#endif
64131+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
64132+#error "CONFIG_PAX enabled, but no PaX options are enabled."
64133+#endif
64134+
64135+#include <linux/compat.h>
64136+
64137+struct user_arg_ptr {
64138+#ifdef CONFIG_COMPAT
64139+ bool is_compat;
64140+#endif
64141+ union {
64142+ const char __user *const __user *native;
64143+#ifdef CONFIG_COMPAT
64144+ const compat_uptr_t __user *compat;
64145+#endif
64146+ } ptr;
64147+};
64148+
64149+void gr_handle_brute_attach(unsigned long mm_flags);
64150+void gr_handle_brute_check(void);
64151+void gr_handle_kernel_exploit(void);
64152+int gr_process_user_ban(void);
64153+
64154+char gr_roletype_to_char(void);
64155+
64156+int gr_acl_enable_at_secure(void);
64157+
64158+int gr_check_user_change(int real, int effective, int fs);
64159+int gr_check_group_change(int real, int effective, int fs);
64160+
64161+void gr_del_task_from_ip_table(struct task_struct *p);
64162+
64163+int gr_pid_is_chrooted(struct task_struct *p);
64164+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
64165+int gr_handle_chroot_nice(void);
64166+int gr_handle_chroot_sysctl(const int op);
64167+int gr_handle_chroot_setpriority(struct task_struct *p,
64168+ const int niceval);
64169+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
64170+int gr_handle_chroot_chroot(const struct dentry *dentry,
64171+ const struct vfsmount *mnt);
64172+void gr_handle_chroot_chdir(struct path *path);
64173+int gr_handle_chroot_chmod(const struct dentry *dentry,
64174+ const struct vfsmount *mnt, const int mode);
64175+int gr_handle_chroot_mknod(const struct dentry *dentry,
64176+ const struct vfsmount *mnt, const int mode);
64177+int gr_handle_chroot_mount(const struct dentry *dentry,
64178+ const struct vfsmount *mnt,
64179+ const char *dev_name);
64180+int gr_handle_chroot_pivot(void);
64181+int gr_handle_chroot_unix(const pid_t pid);
64182+
64183+int gr_handle_rawio(const struct inode *inode);
64184+
64185+void gr_handle_ioperm(void);
64186+void gr_handle_iopl(void);
64187+
64188+umode_t gr_acl_umask(void);
64189+
64190+int gr_tpe_allow(const struct file *file);
64191+
64192+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
64193+void gr_clear_chroot_entries(struct task_struct *task);
64194+
64195+void gr_log_forkfail(const int retval);
64196+void gr_log_timechange(void);
64197+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
64198+void gr_log_chdir(const struct dentry *dentry,
64199+ const struct vfsmount *mnt);
64200+void gr_log_chroot_exec(const struct dentry *dentry,
64201+ const struct vfsmount *mnt);
64202+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
64203+void gr_log_remount(const char *devname, const int retval);
64204+void gr_log_unmount(const char *devname, const int retval);
64205+void gr_log_mount(const char *from, const char *to, const int retval);
64206+void gr_log_textrel(struct vm_area_struct *vma);
64207+void gr_log_rwxmmap(struct file *file);
64208+void gr_log_rwxmprotect(struct file *file);
64209+
64210+int gr_handle_follow_link(const struct inode *parent,
64211+ const struct inode *inode,
64212+ const struct dentry *dentry,
64213+ const struct vfsmount *mnt);
64214+int gr_handle_fifo(const struct dentry *dentry,
64215+ const struct vfsmount *mnt,
64216+ const struct dentry *dir, const int flag,
64217+ const int acc_mode);
64218+int gr_handle_hardlink(const struct dentry *dentry,
64219+ const struct vfsmount *mnt,
64220+ struct inode *inode,
64221+ const int mode, const struct filename *to);
64222+
64223+int gr_is_capable(const int cap);
64224+int gr_is_capable_nolog(const int cap);
64225+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64226+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
64227+
64228+void gr_learn_resource(const struct task_struct *task, const int limit,
64229+ const unsigned long wanted, const int gt);
64230+void gr_copy_label(struct task_struct *tsk);
64231+void gr_handle_crash(struct task_struct *task, const int sig);
64232+int gr_handle_signal(const struct task_struct *p, const int sig);
64233+int gr_check_crash_uid(const uid_t uid);
64234+int gr_check_protected_task(const struct task_struct *task);
64235+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
64236+int gr_acl_handle_mmap(const struct file *file,
64237+ const unsigned long prot);
64238+int gr_acl_handle_mprotect(const struct file *file,
64239+ const unsigned long prot);
64240+int gr_check_hidden_task(const struct task_struct *tsk);
64241+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
64242+ const struct vfsmount *mnt);
64243+__u32 gr_acl_handle_utime(const struct dentry *dentry,
64244+ const struct vfsmount *mnt);
64245+__u32 gr_acl_handle_access(const struct dentry *dentry,
64246+ const struct vfsmount *mnt, const int fmode);
64247+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
64248+ const struct vfsmount *mnt, umode_t *mode);
64249+__u32 gr_acl_handle_chown(const struct dentry *dentry,
64250+ const struct vfsmount *mnt);
64251+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
64252+ const struct vfsmount *mnt);
64253+int gr_handle_ptrace(struct task_struct *task, const long request);
64254+int gr_handle_proc_ptrace(struct task_struct *task);
64255+__u32 gr_acl_handle_execve(const struct dentry *dentry,
64256+ const struct vfsmount *mnt);
64257+int gr_check_crash_exec(const struct file *filp);
64258+int gr_acl_is_enabled(void);
64259+void gr_set_kernel_label(struct task_struct *task);
64260+void gr_set_role_label(struct task_struct *task, const uid_t uid,
64261+ const gid_t gid);
64262+int gr_set_proc_label(const struct dentry *dentry,
64263+ const struct vfsmount *mnt,
64264+ const int unsafe_flags);
64265+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
64266+ const struct vfsmount *mnt);
64267+__u32 gr_acl_handle_open(const struct dentry *dentry,
64268+ const struct vfsmount *mnt, int acc_mode);
64269+__u32 gr_acl_handle_creat(const struct dentry *dentry,
64270+ const struct dentry *p_dentry,
64271+ const struct vfsmount *p_mnt,
64272+ int open_flags, int acc_mode, const int imode);
64273+void gr_handle_create(const struct dentry *dentry,
64274+ const struct vfsmount *mnt);
64275+void gr_handle_proc_create(const struct dentry *dentry,
64276+ const struct inode *inode);
64277+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
64278+ const struct dentry *parent_dentry,
64279+ const struct vfsmount *parent_mnt,
64280+ const int mode);
64281+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
64282+ const struct dentry *parent_dentry,
64283+ const struct vfsmount *parent_mnt);
64284+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
64285+ const struct vfsmount *mnt);
64286+void gr_handle_delete(const ino_t ino, const dev_t dev);
64287+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
64288+ const struct vfsmount *mnt);
64289+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
64290+ const struct dentry *parent_dentry,
64291+ const struct vfsmount *parent_mnt,
64292+ const struct filename *from);
64293+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
64294+ const struct dentry *parent_dentry,
64295+ const struct vfsmount *parent_mnt,
64296+ const struct dentry *old_dentry,
64297+ const struct vfsmount *old_mnt, const struct filename *to);
64298+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
64299+int gr_acl_handle_rename(struct dentry *new_dentry,
64300+ struct dentry *parent_dentry,
64301+ const struct vfsmount *parent_mnt,
64302+ struct dentry *old_dentry,
64303+ struct inode *old_parent_inode,
64304+ struct vfsmount *old_mnt, const struct filename *newname);
64305+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
64306+ struct dentry *old_dentry,
64307+ struct dentry *new_dentry,
64308+ struct vfsmount *mnt, const __u8 replace);
64309+__u32 gr_check_link(const struct dentry *new_dentry,
64310+ const struct dentry *parent_dentry,
64311+ const struct vfsmount *parent_mnt,
64312+ const struct dentry *old_dentry,
64313+ const struct vfsmount *old_mnt);
64314+int gr_acl_handle_filldir(const struct file *file, const char *name,
64315+ const unsigned int namelen, const ino_t ino);
64316+
64317+__u32 gr_acl_handle_unix(const struct dentry *dentry,
64318+ const struct vfsmount *mnt);
64319+void gr_acl_handle_exit(void);
64320+void gr_acl_handle_psacct(struct task_struct *task, const long code);
64321+int gr_acl_handle_procpidmem(const struct task_struct *task);
64322+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
64323+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
64324+void gr_audit_ptrace(struct task_struct *task);
64325+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
64326+void gr_put_exec_file(struct task_struct *task);
64327+
64328+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
64329+
64330+#ifdef CONFIG_GRKERNSEC
64331+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
64332+void gr_handle_vm86(void);
64333+void gr_handle_mem_readwrite(u64 from, u64 to);
64334+
64335+void gr_log_badprocpid(const char *entry);
64336+
64337+extern int grsec_enable_dmesg;
64338+extern int grsec_disable_privio;
64339+
64340+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64341+extern int grsec_proc_gid;
64342+#endif
64343+
64344+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64345+extern int grsec_enable_chroot_findtask;
64346+#endif
64347+#ifdef CONFIG_GRKERNSEC_SETXID
64348+extern int grsec_enable_setxid;
64349+#endif
64350+#endif
64351+
64352+#endif
64353diff --git a/include/linux/grsock.h b/include/linux/grsock.h
64354new file mode 100644
64355index 0000000..e7ffaaf
64356--- /dev/null
64357+++ b/include/linux/grsock.h
64358@@ -0,0 +1,19 @@
64359+#ifndef __GRSOCK_H
64360+#define __GRSOCK_H
64361+
64362+extern void gr_attach_curr_ip(const struct sock *sk);
64363+extern int gr_handle_sock_all(const int family, const int type,
64364+ const int protocol);
64365+extern int gr_handle_sock_server(const struct sockaddr *sck);
64366+extern int gr_handle_sock_server_other(const struct sock *sck);
64367+extern int gr_handle_sock_client(const struct sockaddr *sck);
64368+extern int gr_search_connect(struct socket * sock,
64369+ struct sockaddr_in * addr);
64370+extern int gr_search_bind(struct socket * sock,
64371+ struct sockaddr_in * addr);
64372+extern int gr_search_listen(struct socket * sock);
64373+extern int gr_search_accept(struct socket * sock);
64374+extern int gr_search_socket(const int domain, const int type,
64375+ const int protocol);
64376+
64377+#endif
64378diff --git a/include/linux/highmem.h b/include/linux/highmem.h
64379index ef788b5..ac41b7b 100644
64380--- a/include/linux/highmem.h
64381+++ b/include/linux/highmem.h
64382@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
64383 kunmap_atomic(kaddr);
64384 }
64385
64386+static inline void sanitize_highpage(struct page *page)
64387+{
64388+ void *kaddr;
64389+ unsigned long flags;
64390+
64391+ local_irq_save(flags);
64392+ kaddr = kmap_atomic(page);
64393+ clear_page(kaddr);
64394+ kunmap_atomic(kaddr);
64395+ local_irq_restore(flags);
64396+}
64397+
64398 static inline void zero_user_segments(struct page *page,
64399 unsigned start1, unsigned end1,
64400 unsigned start2, unsigned end2)
64401diff --git a/include/linux/i2c.h b/include/linux/i2c.h
64402index 800de22..7a2fa46 100644
64403--- a/include/linux/i2c.h
64404+++ b/include/linux/i2c.h
64405@@ -367,6 +367,7 @@ struct i2c_algorithm {
64406 /* To determine what the adapter supports */
64407 u32 (*functionality) (struct i2c_adapter *);
64408 };
64409+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
64410
64411 /*
64412 * i2c_adapter is the structure used to identify a physical i2c bus along
64413diff --git a/include/linux/i2o.h b/include/linux/i2o.h
64414index d23c3c2..eb63c81 100644
64415--- a/include/linux/i2o.h
64416+++ b/include/linux/i2o.h
64417@@ -565,7 +565,7 @@ struct i2o_controller {
64418 struct i2o_device *exec; /* Executive */
64419 #if BITS_PER_LONG == 64
64420 spinlock_t context_list_lock; /* lock for context_list */
64421- atomic_t context_list_counter; /* needed for unique contexts */
64422+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
64423 struct list_head context_list; /* list of context id's
64424 and pointers */
64425 #endif
64426diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
64427index aff7ad8..3942bbd 100644
64428--- a/include/linux/if_pppox.h
64429+++ b/include/linux/if_pppox.h
64430@@ -76,7 +76,7 @@ struct pppox_proto {
64431 int (*ioctl)(struct socket *sock, unsigned int cmd,
64432 unsigned long arg);
64433 struct module *owner;
64434-};
64435+} __do_const;
64436
64437 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
64438 extern void unregister_pppox_proto(int proto_num);
64439diff --git a/include/linux/init.h b/include/linux/init.h
64440index e59041e..df0a975 100644
64441--- a/include/linux/init.h
64442+++ b/include/linux/init.h
64443@@ -39,9 +39,36 @@
64444 * Also note, that this data cannot be "const".
64445 */
64446
64447+#ifdef MODULE
64448+#define add_init_latent_entropy
64449+#define add_devinit_latent_entropy
64450+#define add_cpuinit_latent_entropy
64451+#define add_meminit_latent_entropy
64452+#else
64453+#define add_init_latent_entropy __latent_entropy
64454+
64455+#ifdef CONFIG_HOTPLUG
64456+#define add_devinit_latent_entropy
64457+#else
64458+#define add_devinit_latent_entropy __latent_entropy
64459+#endif
64460+
64461+#ifdef CONFIG_HOTPLUG_CPU
64462+#define add_cpuinit_latent_entropy
64463+#else
64464+#define add_cpuinit_latent_entropy __latent_entropy
64465+#endif
64466+
64467+#ifdef CONFIG_MEMORY_HOTPLUG
64468+#define add_meminit_latent_entropy
64469+#else
64470+#define add_meminit_latent_entropy __latent_entropy
64471+#endif
64472+#endif
64473+
64474 /* These are for everybody (although not all archs will actually
64475 discard it in modules) */
64476-#define __init __section(.init.text) __cold notrace
64477+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
64478 #define __initdata __section(.init.data)
64479 #define __initconst __constsection(.init.rodata)
64480 #define __exitdata __section(.exit.data)
64481@@ -94,7 +121,7 @@
64482 #define __exit __section(.exit.text) __exitused __cold notrace
64483
64484 /* Used for HOTPLUG */
64485-#define __devinit __section(.devinit.text) __cold notrace
64486+#define __devinit __section(.devinit.text) __cold notrace add_devinit_latent_entropy
64487 #define __devinitdata __section(.devinit.data)
64488 #define __devinitconst __constsection(.devinit.rodata)
64489 #define __devexit __section(.devexit.text) __exitused __cold notrace
64490@@ -102,7 +129,7 @@
64491 #define __devexitconst __constsection(.devexit.rodata)
64492
64493 /* Used for HOTPLUG_CPU */
64494-#define __cpuinit __section(.cpuinit.text) __cold notrace
64495+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
64496 #define __cpuinitdata __section(.cpuinit.data)
64497 #define __cpuinitconst __constsection(.cpuinit.rodata)
64498 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
64499@@ -110,7 +137,7 @@
64500 #define __cpuexitconst __constsection(.cpuexit.rodata)
64501
64502 /* Used for MEMORY_HOTPLUG */
64503-#define __meminit __section(.meminit.text) __cold notrace
64504+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
64505 #define __meminitdata __section(.meminit.data)
64506 #define __meminitconst __constsection(.meminit.rodata)
64507 #define __memexit __section(.memexit.text) __exitused __cold notrace
64508diff --git a/include/linux/init_task.h b/include/linux/init_task.h
64509index 6d087c5..401cab8 100644
64510--- a/include/linux/init_task.h
64511+++ b/include/linux/init_task.h
64512@@ -143,6 +143,12 @@ extern struct task_group root_task_group;
64513
64514 #define INIT_TASK_COMM "swapper"
64515
64516+#ifdef CONFIG_X86
64517+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
64518+#else
64519+#define INIT_TASK_THREAD_INFO
64520+#endif
64521+
64522 /*
64523 * INIT_TASK is used to set up the first task table, touch at
64524 * your own risk!. Base=0, limit=0x1fffff (=2MB)
64525@@ -182,6 +188,7 @@ extern struct task_group root_task_group;
64526 RCU_POINTER_INITIALIZER(cred, &init_cred), \
64527 .comm = INIT_TASK_COMM, \
64528 .thread = INIT_THREAD, \
64529+ INIT_TASK_THREAD_INFO \
64530 .fs = &init_fs, \
64531 .files = &init_files, \
64532 .signal = &init_signals, \
64533diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
64534index 5e4e617..eee383d 100644
64535--- a/include/linux/interrupt.h
64536+++ b/include/linux/interrupt.h
64537@@ -435,7 +435,7 @@ enum
64538 /* map softirq index to softirq name. update 'softirq_to_name' in
64539 * kernel/softirq.c when adding a new softirq.
64540 */
64541-extern char *softirq_to_name[NR_SOFTIRQS];
64542+extern const char * const softirq_to_name[NR_SOFTIRQS];
64543
64544 /* softirq mask and active fields moved to irq_cpustat_t in
64545 * asm/hardirq.h to get better cache usage. KAO
64546@@ -443,12 +443,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
64547
64548 struct softirq_action
64549 {
64550- void (*action)(struct softirq_action *);
64551+ void (*action)(void);
64552 };
64553
64554 asmlinkage void do_softirq(void);
64555 asmlinkage void __do_softirq(void);
64556-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
64557+extern void open_softirq(int nr, void (*action)(void));
64558 extern void softirq_init(void);
64559 extern void __raise_softirq_irqoff(unsigned int nr);
64560
64561diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
64562index 6883e19..06992b1 100644
64563--- a/include/linux/kallsyms.h
64564+++ b/include/linux/kallsyms.h
64565@@ -15,7 +15,8 @@
64566
64567 struct module;
64568
64569-#ifdef CONFIG_KALLSYMS
64570+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
64571+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64572 /* Lookup the address for a symbol. Returns 0 if not found. */
64573 unsigned long kallsyms_lookup_name(const char *name);
64574
64575@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
64576 /* Stupid that this does nothing, but I didn't create this mess. */
64577 #define __print_symbol(fmt, addr)
64578 #endif /*CONFIG_KALLSYMS*/
64579+#else /* when included by kallsyms.c, vsnprintf.c, or
64580+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
64581+extern void __print_symbol(const char *fmt, unsigned long address);
64582+extern int sprint_backtrace(char *buffer, unsigned long address);
64583+extern int sprint_symbol(char *buffer, unsigned long address);
64584+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
64585+const char *kallsyms_lookup(unsigned long addr,
64586+ unsigned long *symbolsize,
64587+ unsigned long *offset,
64588+ char **modname, char *namebuf);
64589+#endif
64590
64591 /* This macro allows us to keep printk typechecking */
64592 static __printf(1, 2)
64593diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
64594index 4dff0c6..1ca9b72 100644
64595--- a/include/linux/kgdb.h
64596+++ b/include/linux/kgdb.h
64597@@ -53,7 +53,7 @@ extern int kgdb_connected;
64598 extern int kgdb_io_module_registered;
64599
64600 extern atomic_t kgdb_setting_breakpoint;
64601-extern atomic_t kgdb_cpu_doing_single_step;
64602+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
64603
64604 extern struct task_struct *kgdb_usethread;
64605 extern struct task_struct *kgdb_contthread;
64606@@ -255,7 +255,7 @@ struct kgdb_arch {
64607 void (*correct_hw_break)(void);
64608
64609 void (*enable_nmi)(bool on);
64610-};
64611+} __do_const;
64612
64613 /**
64614 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
64615@@ -280,7 +280,7 @@ struct kgdb_io {
64616 void (*pre_exception) (void);
64617 void (*post_exception) (void);
64618 int is_console;
64619-};
64620+} __do_const;
64621
64622 extern struct kgdb_arch arch_kgdb_ops;
64623
64624diff --git a/include/linux/kmod.h b/include/linux/kmod.h
64625index 5398d58..5883a34 100644
64626--- a/include/linux/kmod.h
64627+++ b/include/linux/kmod.h
64628@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
64629 * usually useless though. */
64630 extern __printf(2, 3)
64631 int __request_module(bool wait, const char *name, ...);
64632+extern __printf(3, 4)
64633+int ___request_module(bool wait, char *param_name, const char *name, ...);
64634 #define request_module(mod...) __request_module(true, mod)
64635 #define request_module_nowait(mod...) __request_module(false, mod)
64636 #define try_then_request_module(x, mod...) \
64637diff --git a/include/linux/kobject.h b/include/linux/kobject.h
64638index 1e57449..4fede7b 100644
64639--- a/include/linux/kobject.h
64640+++ b/include/linux/kobject.h
64641@@ -111,7 +111,7 @@ struct kobj_type {
64642 struct attribute **default_attrs;
64643 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
64644 const void *(*namespace)(struct kobject *kobj);
64645-};
64646+} __do_const;
64647
64648 struct kobj_uevent_env {
64649 char *envp[UEVENT_NUM_ENVP];
64650diff --git a/include/linux/kref.h b/include/linux/kref.h
64651index 65af688..0592677 100644
64652--- a/include/linux/kref.h
64653+++ b/include/linux/kref.h
64654@@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
64655 static inline int kref_sub(struct kref *kref, unsigned int count,
64656 void (*release)(struct kref *kref))
64657 {
64658- WARN_ON(release == NULL);
64659+ BUG_ON(release == NULL);
64660
64661 if (atomic_sub_and_test((int) count, &kref->refcount)) {
64662 release(kref);
64663diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
64664index ecc5543..0e96bcc 100644
64665--- a/include/linux/kvm_host.h
64666+++ b/include/linux/kvm_host.h
64667@@ -403,7 +403,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
64668 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
64669 void vcpu_put(struct kvm_vcpu *vcpu);
64670
64671-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
64672+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
64673 struct module *module);
64674 void kvm_exit(void);
64675
64676@@ -558,7 +558,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
64677 struct kvm_guest_debug *dbg);
64678 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
64679
64680-int kvm_arch_init(void *opaque);
64681+int kvm_arch_init(const void *opaque);
64682 void kvm_arch_exit(void);
64683
64684 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
64685diff --git a/include/linux/libata.h b/include/linux/libata.h
64686index e931c9a..7aa8f6f 100644
64687--- a/include/linux/libata.h
64688+++ b/include/linux/libata.h
64689@@ -915,7 +915,7 @@ struct ata_port_operations {
64690 * fields must be pointers.
64691 */
64692 const struct ata_port_operations *inherits;
64693-};
64694+} __do_const;
64695
64696 struct ata_port_info {
64697 unsigned long flags;
64698diff --git a/include/linux/list.h b/include/linux/list.h
64699index cc6d2aa..71febca 100644
64700--- a/include/linux/list.h
64701+++ b/include/linux/list.h
64702@@ -112,6 +112,9 @@ extern void __list_del_entry(struct list_head *entry);
64703 extern void list_del(struct list_head *entry);
64704 #endif
64705
64706+extern void pax_list_add_tail(struct list_head *new, struct list_head *head);
64707+extern void pax_list_del(struct list_head *entry);
64708+
64709 /**
64710 * list_replace - replace old entry by new one
64711 * @old : the element to be replaced
64712diff --git a/include/linux/mm.h b/include/linux/mm.h
64713index 280dae5..39046ec 100644
64714--- a/include/linux/mm.h
64715+++ b/include/linux/mm.h
64716@@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
64717 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
64718 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
64719 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
64720+
64721+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64722+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
64723+#endif
64724+
64725 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
64726
64727 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
64728@@ -1039,34 +1044,6 @@ int set_page_dirty(struct page *page);
64729 int set_page_dirty_lock(struct page *page);
64730 int clear_page_dirty_for_io(struct page *page);
64731
64732-/* Is the vma a continuation of the stack vma above it? */
64733-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
64734-{
64735- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
64736-}
64737-
64738-static inline int stack_guard_page_start(struct vm_area_struct *vma,
64739- unsigned long addr)
64740-{
64741- return (vma->vm_flags & VM_GROWSDOWN) &&
64742- (vma->vm_start == addr) &&
64743- !vma_growsdown(vma->vm_prev, addr);
64744-}
64745-
64746-/* Is the vma a continuation of the stack vma below it? */
64747-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
64748-{
64749- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
64750-}
64751-
64752-static inline int stack_guard_page_end(struct vm_area_struct *vma,
64753- unsigned long addr)
64754-{
64755- return (vma->vm_flags & VM_GROWSUP) &&
64756- (vma->vm_end == addr) &&
64757- !vma_growsup(vma->vm_next, addr);
64758-}
64759-
64760 extern pid_t
64761 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
64762
64763@@ -1166,6 +1143,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
64764 }
64765 #endif
64766
64767+#ifdef CONFIG_MMU
64768+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
64769+#else
64770+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
64771+{
64772+ return __pgprot(0);
64773+}
64774+#endif
64775+
64776 int vma_wants_writenotify(struct vm_area_struct *vma);
64777
64778 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
64779@@ -1184,8 +1170,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
64780 {
64781 return 0;
64782 }
64783+
64784+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
64785+ unsigned long address)
64786+{
64787+ return 0;
64788+}
64789 #else
64790 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
64791+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
64792 #endif
64793
64794 #ifdef __PAGETABLE_PMD_FOLDED
64795@@ -1194,8 +1187,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
64796 {
64797 return 0;
64798 }
64799+
64800+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
64801+ unsigned long address)
64802+{
64803+ return 0;
64804+}
64805 #else
64806 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
64807+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
64808 #endif
64809
64810 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
64811@@ -1213,11 +1213,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
64812 NULL: pud_offset(pgd, address);
64813 }
64814
64815+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
64816+{
64817+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
64818+ NULL: pud_offset(pgd, address);
64819+}
64820+
64821 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
64822 {
64823 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
64824 NULL: pmd_offset(pud, address);
64825 }
64826+
64827+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
64828+{
64829+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
64830+ NULL: pmd_offset(pud, address);
64831+}
64832 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
64833
64834 #if USE_SPLIT_PTLOCKS
64835@@ -1447,6 +1459,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
64836 unsigned long, unsigned long,
64837 unsigned long, unsigned long);
64838 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
64839+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
64840
64841 /* These take the mm semaphore themselves */
64842 extern unsigned long vm_brk(unsigned long, unsigned long);
64843@@ -1510,6 +1523,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
64844 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
64845 struct vm_area_struct **pprev);
64846
64847+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
64848+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
64849+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
64850+
64851 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
64852 NULL if none. Assume start_addr < end_addr. */
64853 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
64854@@ -1538,15 +1555,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
64855 return vma;
64856 }
64857
64858-#ifdef CONFIG_MMU
64859-pgprot_t vm_get_page_prot(unsigned long vm_flags);
64860-#else
64861-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
64862-{
64863- return __pgprot(0);
64864-}
64865-#endif
64866-
64867 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
64868 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
64869 unsigned long pfn, unsigned long size, pgprot_t);
64870@@ -1652,7 +1660,7 @@ extern int unpoison_memory(unsigned long pfn);
64871 extern int sysctl_memory_failure_early_kill;
64872 extern int sysctl_memory_failure_recovery;
64873 extern void shake_page(struct page *p, int access);
64874-extern atomic_long_t mce_bad_pages;
64875+extern atomic_long_unchecked_t mce_bad_pages;
64876 extern int soft_offline_page(struct page *page, int flags);
64877
64878 extern void dump_page(struct page *page);
64879@@ -1683,5 +1691,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
64880 static inline bool page_is_guard(struct page *page) { return false; }
64881 #endif /* CONFIG_DEBUG_PAGEALLOC */
64882
64883+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
64884+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
64885+#else
64886+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
64887+#endif
64888+
64889 #endif /* __KERNEL__ */
64890 #endif /* _LINUX_MM_H */
64891diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
64892index 31f8a3a..499f1db 100644
64893--- a/include/linux/mm_types.h
64894+++ b/include/linux/mm_types.h
64895@@ -275,6 +275,8 @@ struct vm_area_struct {
64896 #ifdef CONFIG_NUMA
64897 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
64898 #endif
64899+
64900+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
64901 };
64902
64903 struct core_thread {
64904@@ -348,7 +350,7 @@ struct mm_struct {
64905 unsigned long def_flags;
64906 unsigned long nr_ptes; /* Page table pages */
64907 unsigned long start_code, end_code, start_data, end_data;
64908- unsigned long start_brk, brk, start_stack;
64909+ unsigned long brk_gap, start_brk, brk, start_stack;
64910 unsigned long arg_start, arg_end, env_start, env_end;
64911
64912 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
64913@@ -399,6 +401,24 @@ struct mm_struct {
64914 struct cpumask cpumask_allocation;
64915 #endif
64916 struct uprobes_state uprobes_state;
64917+
64918+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64919+ unsigned long pax_flags;
64920+#endif
64921+
64922+#ifdef CONFIG_PAX_DLRESOLVE
64923+ unsigned long call_dl_resolve;
64924+#endif
64925+
64926+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
64927+ unsigned long call_syscall;
64928+#endif
64929+
64930+#ifdef CONFIG_PAX_ASLR
64931+ unsigned long delta_mmap; /* randomized offset */
64932+ unsigned long delta_stack; /* randomized offset */
64933+#endif
64934+
64935 };
64936
64937 static inline void mm_init_cpumask(struct mm_struct *mm)
64938diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
64939index c5d5278..f0b68c8 100644
64940--- a/include/linux/mmiotrace.h
64941+++ b/include/linux/mmiotrace.h
64942@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
64943 /* Called from ioremap.c */
64944 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
64945 void __iomem *addr);
64946-extern void mmiotrace_iounmap(volatile void __iomem *addr);
64947+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
64948
64949 /* For anyone to insert markers. Remember trailing newline. */
64950 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
64951@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
64952 {
64953 }
64954
64955-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
64956+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
64957 {
64958 }
64959
64960diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
64961index a23923b..073fee4 100644
64962--- a/include/linux/mmzone.h
64963+++ b/include/linux/mmzone.h
64964@@ -421,7 +421,7 @@ struct zone {
64965 unsigned long flags; /* zone flags, see below */
64966
64967 /* Zone statistics */
64968- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64969+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64970
64971 /*
64972 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
64973diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
64974index fed3def..7cc3f93 100644
64975--- a/include/linux/mod_devicetable.h
64976+++ b/include/linux/mod_devicetable.h
64977@@ -12,7 +12,7 @@
64978 typedef unsigned long kernel_ulong_t;
64979 #endif
64980
64981-#define PCI_ANY_ID (~0)
64982+#define PCI_ANY_ID ((__u16)~0)
64983
64984 struct pci_device_id {
64985 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
64986@@ -139,7 +139,7 @@ struct usb_device_id {
64987 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
64988 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
64989
64990-#define HID_ANY_ID (~0)
64991+#define HID_ANY_ID (~0U)
64992 #define HID_BUS_ANY 0xffff
64993 #define HID_GROUP_ANY 0x0000
64994
64995diff --git a/include/linux/module.h b/include/linux/module.h
64996index 7760c6d..983ee18 100644
64997--- a/include/linux/module.h
64998+++ b/include/linux/module.h
64999@@ -17,9 +17,11 @@
65000 #include <linux/moduleparam.h>
65001 #include <linux/tracepoint.h>
65002 #include <linux/export.h>
65003+#include <linux/fs.h>
65004
65005 #include <linux/percpu.h>
65006 #include <asm/module.h>
65007+#include <asm/pgtable.h>
65008
65009 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
65010 #define MODULE_SIG_STRING "~Module signature appended~\n"
65011@@ -281,19 +283,16 @@ struct module
65012 int (*init)(void);
65013
65014 /* If this is non-NULL, vfree after init() returns */
65015- void *module_init;
65016+ void *module_init_rx, *module_init_rw;
65017
65018 /* Here is the actual code + data, vfree'd on unload. */
65019- void *module_core;
65020+ void *module_core_rx, *module_core_rw;
65021
65022 /* Here are the sizes of the init and core sections */
65023- unsigned int init_size, core_size;
65024+ unsigned int init_size_rw, core_size_rw;
65025
65026 /* The size of the executable code in each section. */
65027- unsigned int init_text_size, core_text_size;
65028-
65029- /* Size of RO sections of the module (text+rodata) */
65030- unsigned int init_ro_size, core_ro_size;
65031+ unsigned int init_size_rx, core_size_rx;
65032
65033 /* Arch-specific module values */
65034 struct mod_arch_specific arch;
65035@@ -349,6 +348,10 @@ struct module
65036 #ifdef CONFIG_EVENT_TRACING
65037 struct ftrace_event_call **trace_events;
65038 unsigned int num_trace_events;
65039+ struct file_operations trace_id;
65040+ struct file_operations trace_enable;
65041+ struct file_operations trace_format;
65042+ struct file_operations trace_filter;
65043 #endif
65044 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
65045 unsigned int num_ftrace_callsites;
65046@@ -396,16 +399,46 @@ bool is_module_address(unsigned long addr);
65047 bool is_module_percpu_address(unsigned long addr);
65048 bool is_module_text_address(unsigned long addr);
65049
65050+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
65051+{
65052+
65053+#ifdef CONFIG_PAX_KERNEXEC
65054+ if (ktla_ktva(addr) >= (unsigned long)start &&
65055+ ktla_ktva(addr) < (unsigned long)start + size)
65056+ return 1;
65057+#endif
65058+
65059+ return ((void *)addr >= start && (void *)addr < start + size);
65060+}
65061+
65062+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
65063+{
65064+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
65065+}
65066+
65067+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
65068+{
65069+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
65070+}
65071+
65072+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
65073+{
65074+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
65075+}
65076+
65077+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
65078+{
65079+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
65080+}
65081+
65082 static inline int within_module_core(unsigned long addr, struct module *mod)
65083 {
65084- return (unsigned long)mod->module_core <= addr &&
65085- addr < (unsigned long)mod->module_core + mod->core_size;
65086+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
65087 }
65088
65089 static inline int within_module_init(unsigned long addr, struct module *mod)
65090 {
65091- return (unsigned long)mod->module_init <= addr &&
65092- addr < (unsigned long)mod->module_init + mod->init_size;
65093+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
65094 }
65095
65096 /* Search for module by name: must hold module_mutex. */
65097diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
65098index 560ca53..5ee8d73 100644
65099--- a/include/linux/moduleloader.h
65100+++ b/include/linux/moduleloader.h
65101@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
65102
65103 /* Allocator used for allocating struct module, core sections and init
65104 sections. Returns NULL on failure. */
65105-void *module_alloc(unsigned long size);
65106+void *module_alloc(unsigned long size) __size_overflow(1);
65107+
65108+#ifdef CONFIG_PAX_KERNEXEC
65109+void *module_alloc_exec(unsigned long size) __size_overflow(1);
65110+#else
65111+#define module_alloc_exec(x) module_alloc(x)
65112+#endif
65113
65114 /* Free memory returned from module_alloc. */
65115 void module_free(struct module *mod, void *module_region);
65116
65117+#ifdef CONFIG_PAX_KERNEXEC
65118+void module_free_exec(struct module *mod, void *module_region);
65119+#else
65120+#define module_free_exec(x, y) module_free((x), (y))
65121+#endif
65122+
65123 /*
65124 * Apply the given relocation to the (simplified) ELF. Return -error
65125 * or 0.
65126@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
65127 unsigned int relsec,
65128 struct module *me)
65129 {
65130+#ifdef CONFIG_MODULES
65131 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
65132+#endif
65133 return -ENOEXEC;
65134 }
65135 #endif
65136@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
65137 unsigned int relsec,
65138 struct module *me)
65139 {
65140+#ifdef CONFIG_MODULES
65141 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
65142+#endif
65143 return -ENOEXEC;
65144 }
65145 #endif
65146diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
65147index d6a5806..7c13347 100644
65148--- a/include/linux/moduleparam.h
65149+++ b/include/linux/moduleparam.h
65150@@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
65151 * @len is usually just sizeof(string).
65152 */
65153 #define module_param_string(name, string, len, perm) \
65154- static const struct kparam_string __param_string_##name \
65155+ static const struct kparam_string __param_string_##name __used \
65156 = { len, string }; \
65157 __module_param_call(MODULE_PARAM_PREFIX, name, \
65158 &param_ops_string, \
65159@@ -425,7 +425,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
65160 */
65161 #define module_param_array_named(name, array, type, nump, perm) \
65162 param_check_##type(name, &(array)[0]); \
65163- static const struct kparam_array __param_arr_##name \
65164+ static const struct kparam_array __param_arr_##name __used \
65165 = { .max = ARRAY_SIZE(array), .num = nump, \
65166 .ops = &param_ops_##type, \
65167 .elemsize = sizeof(array[0]), .elem = array }; \
65168diff --git a/include/linux/namei.h b/include/linux/namei.h
65169index 4bf19d8..5268cea 100644
65170--- a/include/linux/namei.h
65171+++ b/include/linux/namei.h
65172@@ -18,7 +18,7 @@ struct nameidata {
65173 unsigned seq;
65174 int last_type;
65175 unsigned depth;
65176- char *saved_names[MAX_NESTED_LINKS + 1];
65177+ const char *saved_names[MAX_NESTED_LINKS + 1];
65178 };
65179
65180 /*
65181@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
65182
65183 extern void nd_jump_link(struct nameidata *nd, struct path *path);
65184
65185-static inline void nd_set_link(struct nameidata *nd, char *path)
65186+static inline void nd_set_link(struct nameidata *nd, const char *path)
65187 {
65188 nd->saved_names[nd->depth] = path;
65189 }
65190
65191-static inline char *nd_get_link(struct nameidata *nd)
65192+static inline const char *nd_get_link(const struct nameidata *nd)
65193 {
65194 return nd->saved_names[nd->depth];
65195 }
65196diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
65197index a848ffc..3bbbaee 100644
65198--- a/include/linux/netdevice.h
65199+++ b/include/linux/netdevice.h
65200@@ -999,6 +999,7 @@ struct net_device_ops {
65201 struct net_device *dev,
65202 int idx);
65203 };
65204+typedef struct net_device_ops __no_const net_device_ops_no_const;
65205
65206 /*
65207 * The DEVICE structure.
65208@@ -1059,7 +1060,7 @@ struct net_device {
65209 int iflink;
65210
65211 struct net_device_stats stats;
65212- atomic_long_t rx_dropped; /* dropped packets by core network
65213+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
65214 * Do not use this in drivers.
65215 */
65216
65217diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
65218index 7958e84..ed74d7a 100644
65219--- a/include/linux/netfilter/ipset/ip_set.h
65220+++ b/include/linux/netfilter/ipset/ip_set.h
65221@@ -98,7 +98,7 @@ struct ip_set_type_variant {
65222 /* Return true if "b" set is the same as "a"
65223 * according to the create set parameters */
65224 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
65225-};
65226+} __do_const;
65227
65228 /* The core set type structure */
65229 struct ip_set_type {
65230diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
65231index 4966dde..7d8ce06 100644
65232--- a/include/linux/netfilter/nfnetlink.h
65233+++ b/include/linux/netfilter/nfnetlink.h
65234@@ -16,7 +16,7 @@ struct nfnl_callback {
65235 const struct nlattr * const cda[]);
65236 const struct nla_policy *policy; /* netlink attribute policy */
65237 const u_int16_t attr_count; /* number of nlattr's */
65238-};
65239+} __do_const;
65240
65241 struct nfnetlink_subsystem {
65242 const char *name;
65243diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
65244new file mode 100644
65245index 0000000..33f4af8
65246--- /dev/null
65247+++ b/include/linux/netfilter/xt_gradm.h
65248@@ -0,0 +1,9 @@
65249+#ifndef _LINUX_NETFILTER_XT_GRADM_H
65250+#define _LINUX_NETFILTER_XT_GRADM_H 1
65251+
65252+struct xt_gradm_mtinfo {
65253+ __u16 flags;
65254+ __u16 invflags;
65255+};
65256+
65257+#endif
65258diff --git a/include/linux/notifier.h b/include/linux/notifier.h
65259index d65746e..62e72c2 100644
65260--- a/include/linux/notifier.h
65261+++ b/include/linux/notifier.h
65262@@ -51,7 +51,8 @@ struct notifier_block {
65263 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
65264 struct notifier_block __rcu *next;
65265 int priority;
65266-};
65267+} __do_const;
65268+typedef struct notifier_block __no_const notifier_block_no_const;
65269
65270 struct atomic_notifier_head {
65271 spinlock_t lock;
65272diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
65273index a4c5624..79d6d88 100644
65274--- a/include/linux/oprofile.h
65275+++ b/include/linux/oprofile.h
65276@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
65277 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
65278 char const * name, ulong * val);
65279
65280-/** Create a file for read-only access to an atomic_t. */
65281+/** Create a file for read-only access to an atomic_unchecked_t. */
65282 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
65283- char const * name, atomic_t * val);
65284+ char const * name, atomic_unchecked_t * val);
65285
65286 /** create a directory */
65287 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
65288diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
65289index 6bfb2faa..1204767 100644
65290--- a/include/linux/perf_event.h
65291+++ b/include/linux/perf_event.h
65292@@ -328,8 +328,8 @@ struct perf_event {
65293
65294 enum perf_event_active_state state;
65295 unsigned int attach_state;
65296- local64_t count;
65297- atomic64_t child_count;
65298+ local64_t count; /* PaX: fix it one day */
65299+ atomic64_unchecked_t child_count;
65300
65301 /*
65302 * These are the total time in nanoseconds that the event
65303@@ -380,8 +380,8 @@ struct perf_event {
65304 * These accumulate total time (in nanoseconds) that children
65305 * events have been enabled and running, respectively.
65306 */
65307- atomic64_t child_total_time_enabled;
65308- atomic64_t child_total_time_running;
65309+ atomic64_unchecked_t child_total_time_enabled;
65310+ atomic64_unchecked_t child_total_time_running;
65311
65312 /*
65313 * Protect attach/detach and child_list:
65314diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
65315index ad1a427..6419649 100644
65316--- a/include/linux/pipe_fs_i.h
65317+++ b/include/linux/pipe_fs_i.h
65318@@ -45,9 +45,9 @@ struct pipe_buffer {
65319 struct pipe_inode_info {
65320 wait_queue_head_t wait;
65321 unsigned int nrbufs, curbuf, buffers;
65322- unsigned int readers;
65323- unsigned int writers;
65324- unsigned int waiting_writers;
65325+ atomic_t readers;
65326+ atomic_t writers;
65327+ atomic_t waiting_writers;
65328 unsigned int r_counter;
65329 unsigned int w_counter;
65330 struct page *tmp_page;
65331diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
65332index 5f28cae..3d23723 100644
65333--- a/include/linux/platform_data/usb-ehci-s5p.h
65334+++ b/include/linux/platform_data/usb-ehci-s5p.h
65335@@ -14,7 +14,7 @@
65336 struct s5p_ehci_platdata {
65337 int (*phy_init)(struct platform_device *pdev, int type);
65338 int (*phy_exit)(struct platform_device *pdev, int type);
65339-};
65340+} __no_const;
65341
65342 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
65343
65344diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
65345index f271860..6b3bec5 100644
65346--- a/include/linux/pm_runtime.h
65347+++ b/include/linux/pm_runtime.h
65348@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
65349
65350 static inline void pm_runtime_mark_last_busy(struct device *dev)
65351 {
65352- ACCESS_ONCE(dev->power.last_busy) = jiffies;
65353+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
65354 }
65355
65356 #else /* !CONFIG_PM_RUNTIME */
65357diff --git a/include/linux/poison.h b/include/linux/poison.h
65358index 2110a81..13a11bb 100644
65359--- a/include/linux/poison.h
65360+++ b/include/linux/poison.h
65361@@ -19,8 +19,8 @@
65362 * under normal circumstances, used to verify that nobody uses
65363 * non-initialized list entries.
65364 */
65365-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
65366-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
65367+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
65368+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
65369
65370 /********** include/linux/timer.h **********/
65371 /*
65372diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
65373index 4a496eb..d9c5659 100644
65374--- a/include/linux/power/smartreflex.h
65375+++ b/include/linux/power/smartreflex.h
65376@@ -238,7 +238,7 @@ struct omap_sr_class_data {
65377 int (*notify)(struct omap_sr *sr, u32 status);
65378 u8 notify_flags;
65379 u8 class_type;
65380-};
65381+} __do_const;
65382
65383 /**
65384 * struct omap_sr_nvalue_table - Smartreflex n-target value info
65385diff --git a/include/linux/printk.h b/include/linux/printk.h
65386index 9afc01e..92c32e8 100644
65387--- a/include/linux/printk.h
65388+++ b/include/linux/printk.h
65389@@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
65390 extern int printk_needs_cpu(int cpu);
65391 extern void printk_tick(void);
65392
65393+extern int kptr_restrict;
65394+
65395 #ifdef CONFIG_PRINTK
65396 asmlinkage __printf(5, 0)
65397 int vprintk_emit(int facility, int level,
65398@@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
65399
65400 extern int printk_delay_msec;
65401 extern int dmesg_restrict;
65402-extern int kptr_restrict;
65403
65404 void log_buf_kexec_setup(void);
65405 void __init setup_log_buf(int early);
65406diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
65407index 3fd2e87..75db910 100644
65408--- a/include/linux/proc_fs.h
65409+++ b/include/linux/proc_fs.h
65410@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
65411 return proc_create_data(name, mode, parent, proc_fops, NULL);
65412 }
65413
65414+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
65415+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
65416+{
65417+#ifdef CONFIG_GRKERNSEC_PROC_USER
65418+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
65419+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65420+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
65421+#else
65422+ return proc_create_data(name, mode, parent, proc_fops, NULL);
65423+#endif
65424+}
65425+
65426 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
65427 umode_t mode, struct proc_dir_entry *base,
65428 read_proc_t *read_proc, void * data)
65429diff --git a/include/linux/random.h b/include/linux/random.h
65430index 6330ed4..419c6c3 100644
65431--- a/include/linux/random.h
65432+++ b/include/linux/random.h
65433@@ -30,12 +30,17 @@ void srandom32(u32 seed);
65434
65435 u32 prandom32(struct rnd_state *);
65436
65437+static inline unsigned long pax_get_random_long(void)
65438+{
65439+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
65440+}
65441+
65442 /*
65443 * Handle minimum values for seeds
65444 */
65445 static inline u32 __seed(u32 x, u32 m)
65446 {
65447- return (x < m) ? x + m : x;
65448+ return (x <= m) ? x + m + 1 : x;
65449 }
65450
65451 /**
65452diff --git a/include/linux/reboot.h b/include/linux/reboot.h
65453index 23b3630..e1bc12b 100644
65454--- a/include/linux/reboot.h
65455+++ b/include/linux/reboot.h
65456@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
65457 * Architecture-specific implementations of sys_reboot commands.
65458 */
65459
65460-extern void machine_restart(char *cmd);
65461-extern void machine_halt(void);
65462-extern void machine_power_off(void);
65463+extern void machine_restart(char *cmd) __noreturn;
65464+extern void machine_halt(void) __noreturn;
65465+extern void machine_power_off(void) __noreturn;
65466
65467 extern void machine_shutdown(void);
65468 struct pt_regs;
65469@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
65470 */
65471
65472 extern void kernel_restart_prepare(char *cmd);
65473-extern void kernel_restart(char *cmd);
65474-extern void kernel_halt(void);
65475-extern void kernel_power_off(void);
65476+extern void kernel_restart(char *cmd) __noreturn;
65477+extern void kernel_halt(void) __noreturn;
65478+extern void kernel_power_off(void) __noreturn;
65479
65480 extern int C_A_D; /* for sysctl */
65481 void ctrl_alt_del(void);
65482@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
65483 * Emergency restart, callable from an interrupt handler.
65484 */
65485
65486-extern void emergency_restart(void);
65487+extern void emergency_restart(void) __noreturn;
65488 #include <asm/emergency-restart.h>
65489
65490 #endif /* _LINUX_REBOOT_H */
65491diff --git a/include/linux/regset.h b/include/linux/regset.h
65492index 8e0c9fe..fdb64bc 100644
65493--- a/include/linux/regset.h
65494+++ b/include/linux/regset.h
65495@@ -161,7 +161,7 @@ struct user_regset {
65496 unsigned int align;
65497 unsigned int bias;
65498 unsigned int core_note_type;
65499-};
65500+} __do_const;
65501
65502 /**
65503 * struct user_regset_view - available regsets
65504diff --git a/include/linux/relay.h b/include/linux/relay.h
65505index 91cacc3..b55ff74 100644
65506--- a/include/linux/relay.h
65507+++ b/include/linux/relay.h
65508@@ -160,7 +160,7 @@ struct rchan_callbacks
65509 * The callback should return 0 if successful, negative if not.
65510 */
65511 int (*remove_buf_file)(struct dentry *dentry);
65512-};
65513+} __no_const;
65514
65515 /*
65516 * CONFIG_RELAY kernel API, kernel/relay.c
65517diff --git a/include/linux/rio.h b/include/linux/rio.h
65518index a3e7842..d973ca6 100644
65519--- a/include/linux/rio.h
65520+++ b/include/linux/rio.h
65521@@ -339,7 +339,7 @@ struct rio_ops {
65522 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
65523 u64 rstart, u32 size, u32 flags);
65524 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
65525-};
65526+} __no_const;
65527
65528 #define RIO_RESOURCE_MEM 0x00000100
65529 #define RIO_RESOURCE_DOORBELL 0x00000200
65530diff --git a/include/linux/rmap.h b/include/linux/rmap.h
65531index bfe1f47..6a33ee3 100644
65532--- a/include/linux/rmap.h
65533+++ b/include/linux/rmap.h
65534@@ -134,8 +134,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
65535 void anon_vma_init(void); /* create anon_vma_cachep */
65536 int anon_vma_prepare(struct vm_area_struct *);
65537 void unlink_anon_vmas(struct vm_area_struct *);
65538-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
65539-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
65540+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
65541+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
65542
65543 static inline void anon_vma_merge(struct vm_area_struct *vma,
65544 struct vm_area_struct *next)
65545diff --git a/include/linux/sched.h b/include/linux/sched.h
65546index 0dd42a0..f5dc099 100644
65547--- a/include/linux/sched.h
65548+++ b/include/linux/sched.h
65549@@ -61,6 +61,7 @@ struct bio_list;
65550 struct fs_struct;
65551 struct perf_event_context;
65552 struct blk_plug;
65553+struct linux_binprm;
65554
65555 /*
65556 * List of flags we want to share for kernel threads,
65557@@ -344,10 +345,23 @@ struct user_namespace;
65558 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
65559
65560 extern int sysctl_max_map_count;
65561+extern unsigned long sysctl_heap_stack_gap;
65562
65563 #include <linux/aio.h>
65564
65565 #ifdef CONFIG_MMU
65566+
65567+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
65568+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
65569+#else
65570+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
65571+{
65572+ return 0;
65573+}
65574+#endif
65575+
65576+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
65577+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
65578 extern void arch_pick_mmap_layout(struct mm_struct *mm);
65579 extern unsigned long
65580 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
65581@@ -614,6 +628,17 @@ struct signal_struct {
65582 #ifdef CONFIG_TASKSTATS
65583 struct taskstats *stats;
65584 #endif
65585+
65586+#ifdef CONFIG_GRKERNSEC
65587+ u32 curr_ip;
65588+ u32 saved_ip;
65589+ u32 gr_saddr;
65590+ u32 gr_daddr;
65591+ u16 gr_sport;
65592+ u16 gr_dport;
65593+ u8 used_accept:1;
65594+#endif
65595+
65596 #ifdef CONFIG_AUDIT
65597 unsigned audit_tty;
65598 struct tty_audit_buf *tty_audit_buf;
65599@@ -691,6 +716,11 @@ struct user_struct {
65600 struct key *session_keyring; /* UID's default session keyring */
65601 #endif
65602
65603+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65604+ unsigned int banned;
65605+ unsigned long ban_expires;
65606+#endif
65607+
65608 /* Hash table maintenance information */
65609 struct hlist_node uidhash_node;
65610 kuid_t uid;
65611@@ -1312,8 +1342,8 @@ struct task_struct {
65612 struct list_head thread_group;
65613
65614 struct completion *vfork_done; /* for vfork() */
65615- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
65616- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
65617+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
65618+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
65619
65620 cputime_t utime, stime, utimescaled, stimescaled;
65621 cputime_t gtime;
65622@@ -1329,11 +1359,6 @@ struct task_struct {
65623 struct task_cputime cputime_expires;
65624 struct list_head cpu_timers[3];
65625
65626-/* process credentials */
65627- const struct cred __rcu *real_cred; /* objective and real subjective task
65628- * credentials (COW) */
65629- const struct cred __rcu *cred; /* effective (overridable) subjective task
65630- * credentials (COW) */
65631 char comm[TASK_COMM_LEN]; /* executable name excluding path
65632 - access with [gs]et_task_comm (which lock
65633 it with task_lock())
65634@@ -1350,6 +1375,10 @@ struct task_struct {
65635 #endif
65636 /* CPU-specific state of this task */
65637 struct thread_struct thread;
65638+/* thread_info moved to task_struct */
65639+#ifdef CONFIG_X86
65640+ struct thread_info tinfo;
65641+#endif
65642 /* filesystem information */
65643 struct fs_struct *fs;
65644 /* open file information */
65645@@ -1423,6 +1452,10 @@ struct task_struct {
65646 gfp_t lockdep_reclaim_gfp;
65647 #endif
65648
65649+/* process credentials */
65650+ const struct cred __rcu *real_cred; /* objective and real subjective task
65651+ * credentials (COW) */
65652+
65653 /* journalling filesystem info */
65654 void *journal_info;
65655
65656@@ -1461,6 +1494,10 @@ struct task_struct {
65657 /* cg_list protected by css_set_lock and tsk->alloc_lock */
65658 struct list_head cg_list;
65659 #endif
65660+
65661+ const struct cred __rcu *cred; /* effective (overridable) subjective task
65662+ * credentials (COW) */
65663+
65664 #ifdef CONFIG_FUTEX
65665 struct robust_list_head __user *robust_list;
65666 #ifdef CONFIG_COMPAT
65667@@ -1548,8 +1585,75 @@ struct task_struct {
65668 #ifdef CONFIG_UPROBES
65669 struct uprobe_task *utask;
65670 #endif
65671+
65672+#ifdef CONFIG_GRKERNSEC
65673+ /* grsecurity */
65674+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65675+ u64 exec_id;
65676+#endif
65677+#ifdef CONFIG_GRKERNSEC_SETXID
65678+ const struct cred *delayed_cred;
65679+#endif
65680+ struct dentry *gr_chroot_dentry;
65681+ struct acl_subject_label *acl;
65682+ struct acl_role_label *role;
65683+ struct file *exec_file;
65684+ unsigned long brute_expires;
65685+ u16 acl_role_id;
65686+ /* is this the task that authenticated to the special role */
65687+ u8 acl_sp_role;
65688+ u8 is_writable;
65689+ u8 brute;
65690+ u8 gr_is_chrooted;
65691+#endif
65692+
65693 };
65694
65695+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
65696+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
65697+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
65698+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
65699+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
65700+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
65701+
65702+#ifdef CONFIG_PAX_SOFTMODE
65703+extern int pax_softmode;
65704+#endif
65705+
65706+extern int pax_check_flags(unsigned long *);
65707+
65708+/* if tsk != current then task_lock must be held on it */
65709+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65710+static inline unsigned long pax_get_flags(struct task_struct *tsk)
65711+{
65712+ if (likely(tsk->mm))
65713+ return tsk->mm->pax_flags;
65714+ else
65715+ return 0UL;
65716+}
65717+
65718+/* if tsk != current then task_lock must be held on it */
65719+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
65720+{
65721+ if (likely(tsk->mm)) {
65722+ tsk->mm->pax_flags = flags;
65723+ return 0;
65724+ }
65725+ return -EINVAL;
65726+}
65727+#endif
65728+
65729+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
65730+extern void pax_set_initial_flags(struct linux_binprm *bprm);
65731+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
65732+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
65733+#endif
65734+
65735+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
65736+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
65737+extern void pax_report_refcount_overflow(struct pt_regs *regs);
65738+extern void check_object_size(const void *ptr, unsigned long n, bool to);
65739+
65740 /* Future-safe accessor for struct task_struct's cpus_allowed. */
65741 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
65742
65743@@ -2092,7 +2196,9 @@ void yield(void);
65744 extern struct exec_domain default_exec_domain;
65745
65746 union thread_union {
65747+#ifndef CONFIG_X86
65748 struct thread_info thread_info;
65749+#endif
65750 unsigned long stack[THREAD_SIZE/sizeof(long)];
65751 };
65752
65753@@ -2125,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
65754 */
65755
65756 extern struct task_struct *find_task_by_vpid(pid_t nr);
65757+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
65758 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
65759 struct pid_namespace *ns);
65760
65761@@ -2281,7 +2388,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
65762 extern void exit_itimers(struct signal_struct *);
65763 extern void flush_itimer_signals(void);
65764
65765-extern void do_group_exit(int);
65766+extern __noreturn void do_group_exit(int);
65767
65768 extern void daemonize(const char *, ...);
65769 extern int allow_signal(int);
65770@@ -2485,9 +2592,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
65771
65772 #endif
65773
65774-static inline int object_is_on_stack(void *obj)
65775+static inline int object_starts_on_stack(void *obj)
65776 {
65777- void *stack = task_stack_page(current);
65778+ const void *stack = task_stack_page(current);
65779
65780 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
65781 }
65782diff --git a/include/linux/security.h b/include/linux/security.h
65783index 05e88bd..5cda002 100644
65784--- a/include/linux/security.h
65785+++ b/include/linux/security.h
65786@@ -26,6 +26,7 @@
65787 #include <linux/capability.h>
65788 #include <linux/slab.h>
65789 #include <linux/err.h>
65790+#include <linux/grsecurity.h>
65791
65792 struct linux_binprm;
65793 struct cred;
65794diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
65795index 68a04a3..866e6a1 100644
65796--- a/include/linux/seq_file.h
65797+++ b/include/linux/seq_file.h
65798@@ -26,6 +26,9 @@ struct seq_file {
65799 struct mutex lock;
65800 const struct seq_operations *op;
65801 int poll_event;
65802+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65803+ u64 exec_id;
65804+#endif
65805 #ifdef CONFIG_USER_NS
65806 struct user_namespace *user_ns;
65807 #endif
65808@@ -38,6 +41,7 @@ struct seq_operations {
65809 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
65810 int (*show) (struct seq_file *m, void *v);
65811 };
65812+typedef struct seq_operations __no_const seq_operations_no_const;
65813
65814 #define SEQ_SKIP 1
65815
65816diff --git a/include/linux/shm.h b/include/linux/shm.h
65817index bcf8a6a..4d0af77 100644
65818--- a/include/linux/shm.h
65819+++ b/include/linux/shm.h
65820@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
65821
65822 /* The task created the shm object. NULL if the task is dead. */
65823 struct task_struct *shm_creator;
65824+#ifdef CONFIG_GRKERNSEC
65825+ time_t shm_createtime;
65826+ pid_t shm_lapid;
65827+#endif
65828 };
65829
65830 /* shm_mode upper byte flags */
65831diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
65832index 6a2c34e..a1f320f 100644
65833--- a/include/linux/skbuff.h
65834+++ b/include/linux/skbuff.h
65835@@ -577,7 +577,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
65836 extern struct sk_buff *__alloc_skb(unsigned int size,
65837 gfp_t priority, int flags, int node);
65838 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
65839-static inline struct sk_buff *alloc_skb(unsigned int size,
65840+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
65841 gfp_t priority)
65842 {
65843 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
65844@@ -687,7 +687,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
65845 */
65846 static inline int skb_queue_empty(const struct sk_buff_head *list)
65847 {
65848- return list->next == (struct sk_buff *)list;
65849+ return list->next == (const struct sk_buff *)list;
65850 }
65851
65852 /**
65853@@ -700,7 +700,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
65854 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
65855 const struct sk_buff *skb)
65856 {
65857- return skb->next == (struct sk_buff *)list;
65858+ return skb->next == (const struct sk_buff *)list;
65859 }
65860
65861 /**
65862@@ -713,7 +713,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
65863 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
65864 const struct sk_buff *skb)
65865 {
65866- return skb->prev == (struct sk_buff *)list;
65867+ return skb->prev == (const struct sk_buff *)list;
65868 }
65869
65870 /**
65871@@ -1626,7 +1626,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
65872 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
65873 */
65874 #ifndef NET_SKB_PAD
65875-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
65876+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
65877 #endif
65878
65879 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
65880@@ -2204,7 +2204,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
65881 int noblock, int *err);
65882 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
65883 struct poll_table_struct *wait);
65884-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
65885+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
65886 int offset, struct iovec *to,
65887 int size);
65888 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
65889diff --git a/include/linux/slab.h b/include/linux/slab.h
65890index 83d1a14..209e1a6 100644
65891--- a/include/linux/slab.h
65892+++ b/include/linux/slab.h
65893@@ -11,12 +11,20 @@
65894
65895 #include <linux/gfp.h>
65896 #include <linux/types.h>
65897+#include <linux/err.h>
65898
65899 /*
65900 * Flags to pass to kmem_cache_create().
65901 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
65902 */
65903 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
65904+
65905+#ifdef CONFIG_PAX_USERCOPY_SLABS
65906+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
65907+#else
65908+#define SLAB_USERCOPY 0x00000000UL
65909+#endif
65910+
65911 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
65912 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
65913 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
65914@@ -87,10 +95,13 @@
65915 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
65916 * Both make kfree a no-op.
65917 */
65918-#define ZERO_SIZE_PTR ((void *)16)
65919+#define ZERO_SIZE_PTR \
65920+({ \
65921+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
65922+ (void *)(-MAX_ERRNO-1L); \
65923+})
65924
65925-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
65926- (unsigned long)ZERO_SIZE_PTR)
65927+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
65928
65929 /*
65930 * Common fields provided in kmem_cache by all slab allocators
65931@@ -110,7 +121,7 @@ struct kmem_cache {
65932 unsigned int align; /* Alignment as calculated */
65933 unsigned long flags; /* Active flags on the slab */
65934 const char *name; /* Slab name for sysfs */
65935- int refcount; /* Use counter */
65936+ atomic_t refcount; /* Use counter */
65937 void (*ctor)(void *); /* Called on object slot creation */
65938 struct list_head list; /* List of all slab caches on the system */
65939 };
65940@@ -185,6 +196,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
65941 void kfree(const void *);
65942 void kzfree(const void *);
65943 size_t ksize(const void *);
65944+const char *check_heap_object(const void *ptr, unsigned long n);
65945+bool is_usercopy_object(const void *ptr);
65946
65947 /*
65948 * Allocator specific definitions. These are mainly used to establish optimized
65949@@ -323,7 +336,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
65950 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
65951 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
65952 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
65953-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
65954+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
65955 #define kmalloc_track_caller(size, flags) \
65956 __kmalloc_track_caller(size, flags, _RET_IP_)
65957 #else
65958@@ -343,7 +356,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
65959 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
65960 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
65961 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
65962-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
65963+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
65964 #define kmalloc_node_track_caller(size, flags, node) \
65965 __kmalloc_node_track_caller(size, flags, node, \
65966 _RET_IP_)
65967diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
65968index cc290f0..0ba60931 100644
65969--- a/include/linux/slab_def.h
65970+++ b/include/linux/slab_def.h
65971@@ -52,7 +52,7 @@ struct kmem_cache {
65972 /* 4) cache creation/removal */
65973 const char *name;
65974 struct list_head list;
65975- int refcount;
65976+ atomic_t refcount;
65977 int object_size;
65978 int align;
65979
65980@@ -68,10 +68,10 @@ struct kmem_cache {
65981 unsigned long node_allocs;
65982 unsigned long node_frees;
65983 unsigned long node_overflow;
65984- atomic_t allochit;
65985- atomic_t allocmiss;
65986- atomic_t freehit;
65987- atomic_t freemiss;
65988+ atomic_unchecked_t allochit;
65989+ atomic_unchecked_t allocmiss;
65990+ atomic_unchecked_t freehit;
65991+ atomic_unchecked_t freemiss;
65992
65993 /*
65994 * If debugging is enabled, then the allocator can add additional
65995@@ -104,11 +104,16 @@ struct cache_sizes {
65996 #ifdef CONFIG_ZONE_DMA
65997 struct kmem_cache *cs_dmacachep;
65998 #endif
65999+
66000+#ifdef CONFIG_PAX_USERCOPY_SLABS
66001+ struct kmem_cache *cs_usercopycachep;
66002+#endif
66003+
66004 };
66005 extern struct cache_sizes malloc_sizes[];
66006
66007 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
66008-void *__kmalloc(size_t size, gfp_t flags);
66009+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
66010
66011 #ifdef CONFIG_TRACING
66012 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
66013@@ -145,6 +150,13 @@ found:
66014 cachep = malloc_sizes[i].cs_dmacachep;
66015 else
66016 #endif
66017+
66018+#ifdef CONFIG_PAX_USERCOPY_SLABS
66019+ if (flags & GFP_USERCOPY)
66020+ cachep = malloc_sizes[i].cs_usercopycachep;
66021+ else
66022+#endif
66023+
66024 cachep = malloc_sizes[i].cs_cachep;
66025
66026 ret = kmem_cache_alloc_trace(cachep, flags, size);
66027@@ -155,7 +167,7 @@ found:
66028 }
66029
66030 #ifdef CONFIG_NUMA
66031-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
66032+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66033 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
66034
66035 #ifdef CONFIG_TRACING
66036@@ -198,6 +210,13 @@ found:
66037 cachep = malloc_sizes[i].cs_dmacachep;
66038 else
66039 #endif
66040+
66041+#ifdef CONFIG_PAX_USERCOPY_SLABS
66042+ if (flags & GFP_USERCOPY)
66043+ cachep = malloc_sizes[i].cs_usercopycachep;
66044+ else
66045+#endif
66046+
66047 cachep = malloc_sizes[i].cs_cachep;
66048
66049 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
66050diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
66051index f28e14a..7831211 100644
66052--- a/include/linux/slob_def.h
66053+++ b/include/linux/slob_def.h
66054@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
66055 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
66056 }
66057
66058-void *__kmalloc_node(size_t size, gfp_t flags, int node);
66059+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66060
66061 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
66062 {
66063@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
66064 return __kmalloc_node(size, flags, NUMA_NO_NODE);
66065 }
66066
66067-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
66068+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
66069 {
66070 return kmalloc(size, flags);
66071 }
66072diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
66073index df448ad..b99e7f6 100644
66074--- a/include/linux/slub_def.h
66075+++ b/include/linux/slub_def.h
66076@@ -91,7 +91,7 @@ struct kmem_cache {
66077 struct kmem_cache_order_objects max;
66078 struct kmem_cache_order_objects min;
66079 gfp_t allocflags; /* gfp flags to use on each alloc */
66080- int refcount; /* Refcount for slab cache destroy */
66081+ atomic_t refcount; /* Refcount for slab cache destroy */
66082 void (*ctor)(void *);
66083 int inuse; /* Offset to metadata */
66084 int align; /* Alignment */
66085@@ -152,7 +152,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
66086 * Sorry that the following has to be that ugly but some versions of GCC
66087 * have trouble with constant propagation and loops.
66088 */
66089-static __always_inline int kmalloc_index(size_t size)
66090+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
66091 {
66092 if (!size)
66093 return 0;
66094@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
66095 }
66096
66097 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
66098-void *__kmalloc(size_t size, gfp_t flags);
66099+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
66100
66101 static __always_inline void *
66102 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
66103@@ -258,7 +258,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
66104 }
66105 #endif
66106
66107-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
66108+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
66109 {
66110 unsigned int order = get_order(size);
66111 return kmalloc_order_trace(size, flags, order);
66112@@ -283,7 +283,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
66113 }
66114
66115 #ifdef CONFIG_NUMA
66116-void *__kmalloc_node(size_t size, gfp_t flags, int node);
66117+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66118 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
66119
66120 #ifdef CONFIG_TRACING
66121diff --git a/include/linux/sonet.h b/include/linux/sonet.h
66122index 680f9a3..f13aeb0 100644
66123--- a/include/linux/sonet.h
66124+++ b/include/linux/sonet.h
66125@@ -7,7 +7,7 @@
66126 #include <uapi/linux/sonet.h>
66127
66128 struct k_sonet_stats {
66129-#define __HANDLE_ITEM(i) atomic_t i
66130+#define __HANDLE_ITEM(i) atomic_unchecked_t i
66131 __SONET_ITEMS
66132 #undef __HANDLE_ITEM
66133 };
66134diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
66135index 34206b8..f019e06 100644
66136--- a/include/linux/sunrpc/clnt.h
66137+++ b/include/linux/sunrpc/clnt.h
66138@@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
66139 {
66140 switch (sap->sa_family) {
66141 case AF_INET:
66142- return ntohs(((struct sockaddr_in *)sap)->sin_port);
66143+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
66144 case AF_INET6:
66145- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
66146+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
66147 }
66148 return 0;
66149 }
66150@@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
66151 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
66152 const struct sockaddr *src)
66153 {
66154- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
66155+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
66156 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
66157
66158 dsin->sin_family = ssin->sin_family;
66159@@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
66160 if (sa->sa_family != AF_INET6)
66161 return 0;
66162
66163- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
66164+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
66165 }
66166
66167 #endif /* __KERNEL__ */
66168diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
66169index 0b8e3e6..33e0a01 100644
66170--- a/include/linux/sunrpc/svc_rdma.h
66171+++ b/include/linux/sunrpc/svc_rdma.h
66172@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
66173 extern unsigned int svcrdma_max_requests;
66174 extern unsigned int svcrdma_max_req_size;
66175
66176-extern atomic_t rdma_stat_recv;
66177-extern atomic_t rdma_stat_read;
66178-extern atomic_t rdma_stat_write;
66179-extern atomic_t rdma_stat_sq_starve;
66180-extern atomic_t rdma_stat_rq_starve;
66181-extern atomic_t rdma_stat_rq_poll;
66182-extern atomic_t rdma_stat_rq_prod;
66183-extern atomic_t rdma_stat_sq_poll;
66184-extern atomic_t rdma_stat_sq_prod;
66185+extern atomic_unchecked_t rdma_stat_recv;
66186+extern atomic_unchecked_t rdma_stat_read;
66187+extern atomic_unchecked_t rdma_stat_write;
66188+extern atomic_unchecked_t rdma_stat_sq_starve;
66189+extern atomic_unchecked_t rdma_stat_rq_starve;
66190+extern atomic_unchecked_t rdma_stat_rq_poll;
66191+extern atomic_unchecked_t rdma_stat_rq_prod;
66192+extern atomic_unchecked_t rdma_stat_sq_poll;
66193+extern atomic_unchecked_t rdma_stat_sq_prod;
66194
66195 #define RPCRDMA_VERSION 1
66196
66197diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
66198index cd844a6..3ca3592 100644
66199--- a/include/linux/sysctl.h
66200+++ b/include/linux/sysctl.h
66201@@ -41,6 +41,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
66202
66203 extern int proc_dostring(struct ctl_table *, int,
66204 void __user *, size_t *, loff_t *);
66205+extern int proc_dostring_modpriv(struct ctl_table *, int,
66206+ void __user *, size_t *, loff_t *);
66207 extern int proc_dointvec(struct ctl_table *, int,
66208 void __user *, size_t *, loff_t *);
66209 extern int proc_dointvec_minmax(struct ctl_table *, int,
66210diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
66211index 7faf933..eb6f5e3 100644
66212--- a/include/linux/sysrq.h
66213+++ b/include/linux/sysrq.h
66214@@ -36,7 +36,7 @@ struct sysrq_key_op {
66215 char *help_msg;
66216 char *action_msg;
66217 int enable_mask;
66218-};
66219+} __do_const;
66220
66221 #ifdef CONFIG_MAGIC_SYSRQ
66222
66223diff --git a/include/linux/tty.h b/include/linux/tty.h
66224index f0b4eb4..1c4854e 100644
66225--- a/include/linux/tty.h
66226+++ b/include/linux/tty.h
66227@@ -192,7 +192,7 @@ struct tty_port {
66228 const struct tty_port_operations *ops; /* Port operations */
66229 spinlock_t lock; /* Lock protecting tty field */
66230 int blocked_open; /* Waiting to open */
66231- int count; /* Usage count */
66232+ atomic_t count; /* Usage count */
66233 wait_queue_head_t open_wait; /* Open waiters */
66234 wait_queue_head_t close_wait; /* Close waiters */
66235 wait_queue_head_t delta_msr_wait; /* Modem status change */
66236@@ -513,7 +513,7 @@ extern int tty_port_open(struct tty_port *port,
66237 struct tty_struct *tty, struct file *filp);
66238 static inline int tty_port_users(struct tty_port *port)
66239 {
66240- return port->count + port->blocked_open;
66241+ return atomic_read(&port->count) + port->blocked_open;
66242 }
66243
66244 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
66245diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
66246index dd976cf..e272742 100644
66247--- a/include/linux/tty_driver.h
66248+++ b/include/linux/tty_driver.h
66249@@ -284,7 +284,7 @@ struct tty_operations {
66250 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
66251 #endif
66252 const struct file_operations *proc_fops;
66253-};
66254+} __do_const;
66255
66256 struct tty_driver {
66257 int magic; /* magic number for this structure */
66258diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
66259index fb79dd8d..07d4773 100644
66260--- a/include/linux/tty_ldisc.h
66261+++ b/include/linux/tty_ldisc.h
66262@@ -149,7 +149,7 @@ struct tty_ldisc_ops {
66263
66264 struct module *owner;
66265
66266- int refcount;
66267+ atomic_t refcount;
66268 };
66269
66270 struct tty_ldisc {
66271diff --git a/include/linux/types.h b/include/linux/types.h
66272index 1cc0e4b..0d50edf 100644
66273--- a/include/linux/types.h
66274+++ b/include/linux/types.h
66275@@ -175,10 +175,26 @@ typedef struct {
66276 int counter;
66277 } atomic_t;
66278
66279+#ifdef CONFIG_PAX_REFCOUNT
66280+typedef struct {
66281+ int counter;
66282+} atomic_unchecked_t;
66283+#else
66284+typedef atomic_t atomic_unchecked_t;
66285+#endif
66286+
66287 #ifdef CONFIG_64BIT
66288 typedef struct {
66289 long counter;
66290 } atomic64_t;
66291+
66292+#ifdef CONFIG_PAX_REFCOUNT
66293+typedef struct {
66294+ long counter;
66295+} atomic64_unchecked_t;
66296+#else
66297+typedef atomic64_t atomic64_unchecked_t;
66298+#endif
66299 #endif
66300
66301 struct list_head {
66302diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
66303index 5ca0951..ab496a5 100644
66304--- a/include/linux/uaccess.h
66305+++ b/include/linux/uaccess.h
66306@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
66307 long ret; \
66308 mm_segment_t old_fs = get_fs(); \
66309 \
66310- set_fs(KERNEL_DS); \
66311 pagefault_disable(); \
66312- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
66313- pagefault_enable(); \
66314+ set_fs(KERNEL_DS); \
66315+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
66316 set_fs(old_fs); \
66317+ pagefault_enable(); \
66318 ret; \
66319 })
66320
66321diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
66322index 99c1b4d..bb94261 100644
66323--- a/include/linux/unaligned/access_ok.h
66324+++ b/include/linux/unaligned/access_ok.h
66325@@ -6,32 +6,32 @@
66326
66327 static inline u16 get_unaligned_le16(const void *p)
66328 {
66329- return le16_to_cpup((__le16 *)p);
66330+ return le16_to_cpup((const __le16 *)p);
66331 }
66332
66333 static inline u32 get_unaligned_le32(const void *p)
66334 {
66335- return le32_to_cpup((__le32 *)p);
66336+ return le32_to_cpup((const __le32 *)p);
66337 }
66338
66339 static inline u64 get_unaligned_le64(const void *p)
66340 {
66341- return le64_to_cpup((__le64 *)p);
66342+ return le64_to_cpup((const __le64 *)p);
66343 }
66344
66345 static inline u16 get_unaligned_be16(const void *p)
66346 {
66347- return be16_to_cpup((__be16 *)p);
66348+ return be16_to_cpup((const __be16 *)p);
66349 }
66350
66351 static inline u32 get_unaligned_be32(const void *p)
66352 {
66353- return be32_to_cpup((__be32 *)p);
66354+ return be32_to_cpup((const __be32 *)p);
66355 }
66356
66357 static inline u64 get_unaligned_be64(const void *p)
66358 {
66359- return be64_to_cpup((__be64 *)p);
66360+ return be64_to_cpup((const __be64 *)p);
66361 }
66362
66363 static inline void put_unaligned_le16(u16 val, void *p)
66364diff --git a/include/linux/usb.h b/include/linux/usb.h
66365index 10278d1..e21ec3c 100644
66366--- a/include/linux/usb.h
66367+++ b/include/linux/usb.h
66368@@ -551,7 +551,7 @@ struct usb_device {
66369 int maxchild;
66370
66371 u32 quirks;
66372- atomic_t urbnum;
66373+ atomic_unchecked_t urbnum;
66374
66375 unsigned long active_duration;
66376
66377diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
66378index c5d36c6..108f4f9 100644
66379--- a/include/linux/usb/renesas_usbhs.h
66380+++ b/include/linux/usb/renesas_usbhs.h
66381@@ -39,7 +39,7 @@ enum {
66382 */
66383 struct renesas_usbhs_driver_callback {
66384 int (*notify_hotplug)(struct platform_device *pdev);
66385-};
66386+} __no_const;
66387
66388 /*
66389 * callback functions for platform
66390diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
66391index 6f8fbcf..8259001 100644
66392--- a/include/linux/vermagic.h
66393+++ b/include/linux/vermagic.h
66394@@ -25,9 +25,35 @@
66395 #define MODULE_ARCH_VERMAGIC ""
66396 #endif
66397
66398+#ifdef CONFIG_PAX_REFCOUNT
66399+#define MODULE_PAX_REFCOUNT "REFCOUNT "
66400+#else
66401+#define MODULE_PAX_REFCOUNT ""
66402+#endif
66403+
66404+#ifdef CONSTIFY_PLUGIN
66405+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
66406+#else
66407+#define MODULE_CONSTIFY_PLUGIN ""
66408+#endif
66409+
66410+#ifdef STACKLEAK_PLUGIN
66411+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
66412+#else
66413+#define MODULE_STACKLEAK_PLUGIN ""
66414+#endif
66415+
66416+#ifdef CONFIG_GRKERNSEC
66417+#define MODULE_GRSEC "GRSEC "
66418+#else
66419+#define MODULE_GRSEC ""
66420+#endif
66421+
66422 #define VERMAGIC_STRING \
66423 UTS_RELEASE " " \
66424 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
66425 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
66426- MODULE_ARCH_VERMAGIC
66427+ MODULE_ARCH_VERMAGIC \
66428+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
66429+ MODULE_GRSEC
66430
66431diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
66432index 6071e91..ca6a489 100644
66433--- a/include/linux/vmalloc.h
66434+++ b/include/linux/vmalloc.h
66435@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
66436 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
66437 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
66438 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
66439+
66440+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66441+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
66442+#endif
66443+
66444 /* bits [20..32] reserved for arch specific ioremap internals */
66445
66446 /*
66447@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
66448 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
66449 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
66450 unsigned long start, unsigned long end, gfp_t gfp_mask,
66451- pgprot_t prot, int node, const void *caller);
66452+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
66453 extern void vfree(const void *addr);
66454
66455 extern void *vmap(struct page **pages, unsigned int count,
66456@@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
66457 extern void free_vm_area(struct vm_struct *area);
66458
66459 /* for /dev/kmem */
66460-extern long vread(char *buf, char *addr, unsigned long count);
66461-extern long vwrite(char *buf, char *addr, unsigned long count);
66462+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
66463+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
66464
66465 /*
66466 * Internals. Dont't use..
66467diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
66468index 92a86b2..1d9eb3c 100644
66469--- a/include/linux/vmstat.h
66470+++ b/include/linux/vmstat.h
66471@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
66472 /*
66473 * Zone based page accounting with per cpu differentials.
66474 */
66475-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66476+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66477
66478 static inline void zone_page_state_add(long x, struct zone *zone,
66479 enum zone_stat_item item)
66480 {
66481- atomic_long_add(x, &zone->vm_stat[item]);
66482- atomic_long_add(x, &vm_stat[item]);
66483+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
66484+ atomic_long_add_unchecked(x, &vm_stat[item]);
66485 }
66486
66487 static inline unsigned long global_page_state(enum zone_stat_item item)
66488 {
66489- long x = atomic_long_read(&vm_stat[item]);
66490+ long x = atomic_long_read_unchecked(&vm_stat[item]);
66491 #ifdef CONFIG_SMP
66492 if (x < 0)
66493 x = 0;
66494@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
66495 static inline unsigned long zone_page_state(struct zone *zone,
66496 enum zone_stat_item item)
66497 {
66498- long x = atomic_long_read(&zone->vm_stat[item]);
66499+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
66500 #ifdef CONFIG_SMP
66501 if (x < 0)
66502 x = 0;
66503@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
66504 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
66505 enum zone_stat_item item)
66506 {
66507- long x = atomic_long_read(&zone->vm_stat[item]);
66508+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
66509
66510 #ifdef CONFIG_SMP
66511 int cpu;
66512@@ -218,8 +218,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
66513
66514 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
66515 {
66516- atomic_long_inc(&zone->vm_stat[item]);
66517- atomic_long_inc(&vm_stat[item]);
66518+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
66519+ atomic_long_inc_unchecked(&vm_stat[item]);
66520 }
66521
66522 static inline void __inc_zone_page_state(struct page *page,
66523@@ -230,8 +230,8 @@ static inline void __inc_zone_page_state(struct page *page,
66524
66525 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
66526 {
66527- atomic_long_dec(&zone->vm_stat[item]);
66528- atomic_long_dec(&vm_stat[item]);
66529+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
66530+ atomic_long_dec_unchecked(&vm_stat[item]);
66531 }
66532
66533 static inline void __dec_zone_page_state(struct page *page,
66534diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
66535index 95d1c91..6798cca 100644
66536--- a/include/media/v4l2-dev.h
66537+++ b/include/media/v4l2-dev.h
66538@@ -76,7 +76,7 @@ struct v4l2_file_operations {
66539 int (*mmap) (struct file *, struct vm_area_struct *);
66540 int (*open) (struct file *);
66541 int (*release) (struct file *);
66542-};
66543+} __do_const;
66544
66545 /*
66546 * Newer version of video_device, handled by videodev2.c
66547diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
66548index e48b571..7e40de4 100644
66549--- a/include/media/v4l2-ioctl.h
66550+++ b/include/media/v4l2-ioctl.h
66551@@ -282,7 +282,6 @@ struct v4l2_ioctl_ops {
66552 bool valid_prio, int cmd, void *arg);
66553 };
66554
66555-
66556 /* v4l debugging and diagnostics */
66557
66558 /* Debug bitmask flags to be used on V4L2 */
66559diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
66560index 9e5425b..8136ffc 100644
66561--- a/include/net/caif/cfctrl.h
66562+++ b/include/net/caif/cfctrl.h
66563@@ -52,7 +52,7 @@ struct cfctrl_rsp {
66564 void (*radioset_rsp)(void);
66565 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
66566 struct cflayer *client_layer);
66567-};
66568+} __no_const;
66569
66570 /* Link Setup Parameters for CAIF-Links. */
66571 struct cfctrl_link_param {
66572@@ -101,8 +101,8 @@ struct cfctrl_request_info {
66573 struct cfctrl {
66574 struct cfsrvl serv;
66575 struct cfctrl_rsp res;
66576- atomic_t req_seq_no;
66577- atomic_t rsp_seq_no;
66578+ atomic_unchecked_t req_seq_no;
66579+ atomic_unchecked_t rsp_seq_no;
66580 struct list_head list;
66581 /* Protects from simultaneous access to first_req list */
66582 spinlock_t info_list_lock;
66583diff --git a/include/net/flow.h b/include/net/flow.h
66584index 628e11b..4c475df 100644
66585--- a/include/net/flow.h
66586+++ b/include/net/flow.h
66587@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
66588
66589 extern void flow_cache_flush(void);
66590 extern void flow_cache_flush_deferred(void);
66591-extern atomic_t flow_cache_genid;
66592+extern atomic_unchecked_t flow_cache_genid;
66593
66594 #endif
66595diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
66596index e5062c9..e709988 100644
66597--- a/include/net/gro_cells.h
66598+++ b/include/net/gro_cells.h
66599@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
66600 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
66601
66602 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
66603- atomic_long_inc(&dev->rx_dropped);
66604+ atomic_long_inc_unchecked(&dev->rx_dropped);
66605 kfree_skb(skb);
66606 return;
66607 }
66608diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
66609index 1832927..ce39aea 100644
66610--- a/include/net/inet_connection_sock.h
66611+++ b/include/net/inet_connection_sock.h
66612@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
66613 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
66614 int (*bind_conflict)(const struct sock *sk,
66615 const struct inet_bind_bucket *tb, bool relax);
66616-};
66617+} __do_const;
66618
66619 /** inet_connection_sock - INET connection oriented sock
66620 *
66621diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
66622index 53f464d..ba76aaa 100644
66623--- a/include/net/inetpeer.h
66624+++ b/include/net/inetpeer.h
66625@@ -47,8 +47,8 @@ struct inet_peer {
66626 */
66627 union {
66628 struct {
66629- atomic_t rid; /* Frag reception counter */
66630- atomic_t ip_id_count; /* IP ID for the next packet */
66631+ atomic_unchecked_t rid; /* Frag reception counter */
66632+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
66633 };
66634 struct rcu_head rcu;
66635 struct inet_peer *gc_next;
66636@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
66637 more++;
66638 inet_peer_refcheck(p);
66639 do {
66640- old = atomic_read(&p->ip_id_count);
66641+ old = atomic_read_unchecked(&p->ip_id_count);
66642 new = old + more;
66643 if (!new)
66644 new = 1;
66645- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
66646+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
66647 return new;
66648 }
66649
66650diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
66651index 9497be1..5a4fafe 100644
66652--- a/include/net/ip_fib.h
66653+++ b/include/net/ip_fib.h
66654@@ -169,7 +169,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
66655
66656 #define FIB_RES_SADDR(net, res) \
66657 ((FIB_RES_NH(res).nh_saddr_genid == \
66658- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
66659+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
66660 FIB_RES_NH(res).nh_saddr : \
66661 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
66662 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
66663diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
66664index ee75ccd..2cc2b95 100644
66665--- a/include/net/ip_vs.h
66666+++ b/include/net/ip_vs.h
66667@@ -510,7 +510,7 @@ struct ip_vs_conn {
66668 struct ip_vs_conn *control; /* Master control connection */
66669 atomic_t n_control; /* Number of controlled ones */
66670 struct ip_vs_dest *dest; /* real server */
66671- atomic_t in_pkts; /* incoming packet counter */
66672+ atomic_unchecked_t in_pkts; /* incoming packet counter */
66673
66674 /* packet transmitter for different forwarding methods. If it
66675 mangles the packet, it must return NF_DROP or better NF_STOLEN,
66676@@ -648,7 +648,7 @@ struct ip_vs_dest {
66677 __be16 port; /* port number of the server */
66678 union nf_inet_addr addr; /* IP address of the server */
66679 volatile unsigned int flags; /* dest status flags */
66680- atomic_t conn_flags; /* flags to copy to conn */
66681+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
66682 atomic_t weight; /* server weight */
66683
66684 atomic_t refcnt; /* reference counter */
66685diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
66686index 80ffde3..968b0f4 100644
66687--- a/include/net/irda/ircomm_tty.h
66688+++ b/include/net/irda/ircomm_tty.h
66689@@ -35,6 +35,7 @@
66690 #include <linux/termios.h>
66691 #include <linux/timer.h>
66692 #include <linux/tty.h> /* struct tty_struct */
66693+#include <asm/local.h>
66694
66695 #include <net/irda/irias_object.h>
66696 #include <net/irda/ircomm_core.h>
66697diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
66698index cc7c197..9f2da2a 100644
66699--- a/include/net/iucv/af_iucv.h
66700+++ b/include/net/iucv/af_iucv.h
66701@@ -141,7 +141,7 @@ struct iucv_sock {
66702 struct iucv_sock_list {
66703 struct hlist_head head;
66704 rwlock_t lock;
66705- atomic_t autobind_name;
66706+ atomic_unchecked_t autobind_name;
66707 };
66708
66709 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
66710diff --git a/include/net/neighbour.h b/include/net/neighbour.h
66711index 0dab173..1b76af0 100644
66712--- a/include/net/neighbour.h
66713+++ b/include/net/neighbour.h
66714@@ -123,7 +123,7 @@ struct neigh_ops {
66715 void (*error_report)(struct neighbour *, struct sk_buff *);
66716 int (*output)(struct neighbour *, struct sk_buff *);
66717 int (*connected_output)(struct neighbour *, struct sk_buff *);
66718-};
66719+} __do_const;
66720
66721 struct pneigh_entry {
66722 struct pneigh_entry *next;
66723diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
66724index 95e6466..251016d 100644
66725--- a/include/net/net_namespace.h
66726+++ b/include/net/net_namespace.h
66727@@ -110,7 +110,7 @@ struct net {
66728 #endif
66729 struct netns_ipvs *ipvs;
66730 struct sock *diag_nlsk;
66731- atomic_t rt_genid;
66732+ atomic_unchecked_t rt_genid;
66733 };
66734
66735 /*
66736@@ -320,12 +320,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
66737
66738 static inline int rt_genid(struct net *net)
66739 {
66740- return atomic_read(&net->rt_genid);
66741+ return atomic_read_unchecked(&net->rt_genid);
66742 }
66743
66744 static inline void rt_genid_bump(struct net *net)
66745 {
66746- atomic_inc(&net->rt_genid);
66747+ atomic_inc_unchecked(&net->rt_genid);
66748 }
66749
66750 #endif /* __NET_NET_NAMESPACE_H */
66751diff --git a/include/net/netdma.h b/include/net/netdma.h
66752index 8ba8ce2..99b7fff 100644
66753--- a/include/net/netdma.h
66754+++ b/include/net/netdma.h
66755@@ -24,7 +24,7 @@
66756 #include <linux/dmaengine.h>
66757 #include <linux/skbuff.h>
66758
66759-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
66760+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
66761 struct sk_buff *skb, int offset, struct iovec *to,
66762 size_t len, struct dma_pinned_list *pinned_list);
66763
66764diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
66765index 252fd10..aa1421f 100644
66766--- a/include/net/netfilter/nf_queue.h
66767+++ b/include/net/netfilter/nf_queue.h
66768@@ -22,7 +22,7 @@ struct nf_queue_handler {
66769 int (*outfn)(struct nf_queue_entry *entry,
66770 unsigned int queuenum);
66771 char *name;
66772-};
66773+} __do_const;
66774
66775 extern int nf_register_queue_handler(u_int8_t pf,
66776 const struct nf_queue_handler *qh);
66777diff --git a/include/net/netlink.h b/include/net/netlink.h
66778index 9690b0f..87aded7 100644
66779--- a/include/net/netlink.h
66780+++ b/include/net/netlink.h
66781@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
66782 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
66783 {
66784 if (mark)
66785- skb_trim(skb, (unsigned char *) mark - skb->data);
66786+ skb_trim(skb, (const unsigned char *) mark - skb->data);
66787 }
66788
66789 /**
66790diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
66791index 2ae2b83..dbdc85e 100644
66792--- a/include/net/netns/ipv4.h
66793+++ b/include/net/netns/ipv4.h
66794@@ -64,7 +64,7 @@ struct netns_ipv4 {
66795 kgid_t sysctl_ping_group_range[2];
66796 long sysctl_tcp_mem[3];
66797
66798- atomic_t dev_addr_genid;
66799+ atomic_unchecked_t dev_addr_genid;
66800
66801 #ifdef CONFIG_IP_MROUTE
66802 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
66803diff --git a/include/net/protocol.h b/include/net/protocol.h
66804index 929528c..c84d4f6 100644
66805--- a/include/net/protocol.h
66806+++ b/include/net/protocol.h
66807@@ -48,7 +48,7 @@ struct net_protocol {
66808 int (*gro_complete)(struct sk_buff *skb);
66809 unsigned int no_policy:1,
66810 netns_ok:1;
66811-};
66812+} __do_const;
66813
66814 #if IS_ENABLED(CONFIG_IPV6)
66815 struct inet6_protocol {
66816@@ -69,7 +69,7 @@ struct inet6_protocol {
66817 int (*gro_complete)(struct sk_buff *skb);
66818
66819 unsigned int flags; /* INET6_PROTO_xxx */
66820-};
66821+} __do_const;
66822
66823 #define INET6_PROTO_NOPOLICY 0x1
66824 #define INET6_PROTO_FINAL 0x2
66825diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
66826index 9c6414f..fbd0524 100644
66827--- a/include/net/sctp/sctp.h
66828+++ b/include/net/sctp/sctp.h
66829@@ -318,9 +318,9 @@ do { \
66830
66831 #else /* SCTP_DEBUG */
66832
66833-#define SCTP_DEBUG_PRINTK(whatever...)
66834-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
66835-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
66836+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
66837+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
66838+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
66839 #define SCTP_ENABLE_DEBUG
66840 #define SCTP_DISABLE_DEBUG
66841 #define SCTP_ASSERT(expr, str, func)
66842diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
66843index 64158aa..b65533c 100644
66844--- a/include/net/sctp/structs.h
66845+++ b/include/net/sctp/structs.h
66846@@ -496,7 +496,7 @@ struct sctp_af {
66847 int sockaddr_len;
66848 sa_family_t sa_family;
66849 struct list_head list;
66850-};
66851+} __do_const;
66852
66853 struct sctp_af *sctp_get_af_specific(sa_family_t);
66854 int sctp_register_af(struct sctp_af *);
66855@@ -516,7 +516,7 @@ struct sctp_pf {
66856 struct sctp_association *asoc);
66857 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
66858 struct sctp_af *af;
66859-};
66860+} __do_const;
66861
66862
66863 /* Structure to track chunk fragments that have been acked, but peer
66864diff --git a/include/net/sock.h b/include/net/sock.h
66865index c945fba..e162e56 100644
66866--- a/include/net/sock.h
66867+++ b/include/net/sock.h
66868@@ -304,7 +304,7 @@ struct sock {
66869 #ifdef CONFIG_RPS
66870 __u32 sk_rxhash;
66871 #endif
66872- atomic_t sk_drops;
66873+ atomic_unchecked_t sk_drops;
66874 int sk_rcvbuf;
66875
66876 struct sk_filter __rcu *sk_filter;
66877@@ -1763,7 +1763,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
66878 }
66879
66880 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
66881- char __user *from, char *to,
66882+ char __user *from, unsigned char *to,
66883 int copy, int offset)
66884 {
66885 if (skb->ip_summed == CHECKSUM_NONE) {
66886@@ -2022,7 +2022,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
66887 }
66888 }
66889
66890-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
66891+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
66892
66893 /**
66894 * sk_page_frag - return an appropriate page_frag
66895diff --git a/include/net/tcp.h b/include/net/tcp.h
66896index 4af45e3..af97861 100644
66897--- a/include/net/tcp.h
66898+++ b/include/net/tcp.h
66899@@ -531,7 +531,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
66900 extern void tcp_xmit_retransmit_queue(struct sock *);
66901 extern void tcp_simple_retransmit(struct sock *);
66902 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
66903-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
66904+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
66905
66906 extern void tcp_send_probe0(struct sock *);
66907 extern void tcp_send_partial(struct sock *);
66908@@ -702,8 +702,8 @@ struct tcp_skb_cb {
66909 struct inet6_skb_parm h6;
66910 #endif
66911 } header; /* For incoming frames */
66912- __u32 seq; /* Starting sequence number */
66913- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
66914+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
66915+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
66916 __u32 when; /* used to compute rtt's */
66917 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
66918
66919@@ -717,7 +717,7 @@ struct tcp_skb_cb {
66920
66921 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
66922 /* 1 byte hole */
66923- __u32 ack_seq; /* Sequence number ACK'd */
66924+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
66925 };
66926
66927 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
66928diff --git a/include/net/xfrm.h b/include/net/xfrm.h
66929index 63445ed..74ef61d 100644
66930--- a/include/net/xfrm.h
66931+++ b/include/net/xfrm.h
66932@@ -423,7 +423,7 @@ struct xfrm_mode {
66933 struct module *owner;
66934 unsigned int encap;
66935 int flags;
66936-};
66937+} __do_const;
66938
66939 /* Flags for xfrm_mode. */
66940 enum {
66941@@ -514,7 +514,7 @@ struct xfrm_policy {
66942 struct timer_list timer;
66943
66944 struct flow_cache_object flo;
66945- atomic_t genid;
66946+ atomic_unchecked_t genid;
66947 u32 priority;
66948 u32 index;
66949 struct xfrm_mark mark;
66950diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
66951index 1a046b1..ee0bef0 100644
66952--- a/include/rdma/iw_cm.h
66953+++ b/include/rdma/iw_cm.h
66954@@ -122,7 +122,7 @@ struct iw_cm_verbs {
66955 int backlog);
66956
66957 int (*destroy_listen)(struct iw_cm_id *cm_id);
66958-};
66959+} __no_const;
66960
66961 /**
66962 * iw_create_cm_id - Create an IW CM identifier.
66963diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
66964index 399162b..b337f1a 100644
66965--- a/include/scsi/libfc.h
66966+++ b/include/scsi/libfc.h
66967@@ -762,6 +762,7 @@ struct libfc_function_template {
66968 */
66969 void (*disc_stop_final) (struct fc_lport *);
66970 };
66971+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
66972
66973 /**
66974 * struct fc_disc - Discovery context
66975@@ -866,7 +867,7 @@ struct fc_lport {
66976 struct fc_vport *vport;
66977
66978 /* Operational Information */
66979- struct libfc_function_template tt;
66980+ libfc_function_template_no_const tt;
66981 u8 link_up;
66982 u8 qfull;
66983 enum fc_lport_state state;
66984diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
66985index 55367b0..d97bd2a 100644
66986--- a/include/scsi/scsi_device.h
66987+++ b/include/scsi/scsi_device.h
66988@@ -169,9 +169,9 @@ struct scsi_device {
66989 unsigned int max_device_blocked; /* what device_blocked counts down from */
66990 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
66991
66992- atomic_t iorequest_cnt;
66993- atomic_t iodone_cnt;
66994- atomic_t ioerr_cnt;
66995+ atomic_unchecked_t iorequest_cnt;
66996+ atomic_unchecked_t iodone_cnt;
66997+ atomic_unchecked_t ioerr_cnt;
66998
66999 struct device sdev_gendev,
67000 sdev_dev;
67001diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
67002index b797e8f..8e2c3aa 100644
67003--- a/include/scsi/scsi_transport_fc.h
67004+++ b/include/scsi/scsi_transport_fc.h
67005@@ -751,7 +751,8 @@ struct fc_function_template {
67006 unsigned long show_host_system_hostname:1;
67007
67008 unsigned long disable_target_scan:1;
67009-};
67010+} __do_const;
67011+typedef struct fc_function_template __no_const fc_function_template_no_const;
67012
67013
67014 /**
67015diff --git a/include/sound/soc.h b/include/sound/soc.h
67016index 91244a0..89ca1a7 100644
67017--- a/include/sound/soc.h
67018+++ b/include/sound/soc.h
67019@@ -769,7 +769,7 @@ struct snd_soc_codec_driver {
67020 /* probe ordering - for components with runtime dependencies */
67021 int probe_order;
67022 int remove_order;
67023-};
67024+} __do_const;
67025
67026 /* SoC platform interface */
67027 struct snd_soc_platform_driver {
67028@@ -815,7 +815,7 @@ struct snd_soc_platform_driver {
67029 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
67030 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
67031 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
67032-};
67033+} __do_const;
67034
67035 struct snd_soc_platform {
67036 const char *name;
67037diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
67038index fca8bbe..c0242ea 100644
67039--- a/include/target/target_core_base.h
67040+++ b/include/target/target_core_base.h
67041@@ -760,7 +760,7 @@ struct se_device {
67042 spinlock_t stats_lock;
67043 /* Active commands on this virtual SE device */
67044 atomic_t simple_cmds;
67045- atomic_t dev_ordered_id;
67046+ atomic_unchecked_t dev_ordered_id;
67047 atomic_t dev_ordered_sync;
67048 atomic_t dev_qf_count;
67049 struct se_obj dev_obj;
67050diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
67051new file mode 100644
67052index 0000000..fb634b7
67053--- /dev/null
67054+++ b/include/trace/events/fs.h
67055@@ -0,0 +1,53 @@
67056+#undef TRACE_SYSTEM
67057+#define TRACE_SYSTEM fs
67058+
67059+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
67060+#define _TRACE_FS_H
67061+
67062+#include <linux/fs.h>
67063+#include <linux/tracepoint.h>
67064+
67065+TRACE_EVENT(do_sys_open,
67066+
67067+ TP_PROTO(const char *filename, int flags, int mode),
67068+
67069+ TP_ARGS(filename, flags, mode),
67070+
67071+ TP_STRUCT__entry(
67072+ __string( filename, filename )
67073+ __field( int, flags )
67074+ __field( int, mode )
67075+ ),
67076+
67077+ TP_fast_assign(
67078+ __assign_str(filename, filename);
67079+ __entry->flags = flags;
67080+ __entry->mode = mode;
67081+ ),
67082+
67083+ TP_printk("\"%s\" %x %o",
67084+ __get_str(filename), __entry->flags, __entry->mode)
67085+);
67086+
67087+TRACE_EVENT(open_exec,
67088+
67089+ TP_PROTO(const char *filename),
67090+
67091+ TP_ARGS(filename),
67092+
67093+ TP_STRUCT__entry(
67094+ __string( filename, filename )
67095+ ),
67096+
67097+ TP_fast_assign(
67098+ __assign_str(filename, filename);
67099+ ),
67100+
67101+ TP_printk("\"%s\"",
67102+ __get_str(filename))
67103+);
67104+
67105+#endif /* _TRACE_FS_H */
67106+
67107+/* This part must be outside protection */
67108+#include <trace/define_trace.h>
67109diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
67110index 1c09820..7f5ec79 100644
67111--- a/include/trace/events/irq.h
67112+++ b/include/trace/events/irq.h
67113@@ -36,7 +36,7 @@ struct softirq_action;
67114 */
67115 TRACE_EVENT(irq_handler_entry,
67116
67117- TP_PROTO(int irq, struct irqaction *action),
67118+ TP_PROTO(int irq, const struct irqaction *action),
67119
67120 TP_ARGS(irq, action),
67121
67122@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
67123 */
67124 TRACE_EVENT(irq_handler_exit,
67125
67126- TP_PROTO(int irq, struct irqaction *action, int ret),
67127+ TP_PROTO(int irq, const struct irqaction *action, int ret),
67128
67129 TP_ARGS(irq, action, ret),
67130
67131diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
67132index 7caf44c..23c6f27 100644
67133--- a/include/uapi/linux/a.out.h
67134+++ b/include/uapi/linux/a.out.h
67135@@ -39,6 +39,14 @@ enum machine_type {
67136 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
67137 };
67138
67139+/* Constants for the N_FLAGS field */
67140+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
67141+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
67142+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
67143+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
67144+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
67145+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
67146+
67147 #if !defined (N_MAGIC)
67148 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
67149 #endif
67150diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
67151index d876736..b36014e 100644
67152--- a/include/uapi/linux/byteorder/little_endian.h
67153+++ b/include/uapi/linux/byteorder/little_endian.h
67154@@ -42,51 +42,51 @@
67155
67156 static inline __le64 __cpu_to_le64p(const __u64 *p)
67157 {
67158- return (__force __le64)*p;
67159+ return (__force const __le64)*p;
67160 }
67161 static inline __u64 __le64_to_cpup(const __le64 *p)
67162 {
67163- return (__force __u64)*p;
67164+ return (__force const __u64)*p;
67165 }
67166 static inline __le32 __cpu_to_le32p(const __u32 *p)
67167 {
67168- return (__force __le32)*p;
67169+ return (__force const __le32)*p;
67170 }
67171 static inline __u32 __le32_to_cpup(const __le32 *p)
67172 {
67173- return (__force __u32)*p;
67174+ return (__force const __u32)*p;
67175 }
67176 static inline __le16 __cpu_to_le16p(const __u16 *p)
67177 {
67178- return (__force __le16)*p;
67179+ return (__force const __le16)*p;
67180 }
67181 static inline __u16 __le16_to_cpup(const __le16 *p)
67182 {
67183- return (__force __u16)*p;
67184+ return (__force const __u16)*p;
67185 }
67186 static inline __be64 __cpu_to_be64p(const __u64 *p)
67187 {
67188- return (__force __be64)__swab64p(p);
67189+ return (__force const __be64)__swab64p(p);
67190 }
67191 static inline __u64 __be64_to_cpup(const __be64 *p)
67192 {
67193- return __swab64p((__u64 *)p);
67194+ return __swab64p((const __u64 *)p);
67195 }
67196 static inline __be32 __cpu_to_be32p(const __u32 *p)
67197 {
67198- return (__force __be32)__swab32p(p);
67199+ return (__force const __be32)__swab32p(p);
67200 }
67201 static inline __u32 __be32_to_cpup(const __be32 *p)
67202 {
67203- return __swab32p((__u32 *)p);
67204+ return __swab32p((const __u32 *)p);
67205 }
67206 static inline __be16 __cpu_to_be16p(const __u16 *p)
67207 {
67208- return (__force __be16)__swab16p(p);
67209+ return (__force const __be16)__swab16p(p);
67210 }
67211 static inline __u16 __be16_to_cpup(const __be16 *p)
67212 {
67213- return __swab16p((__u16 *)p);
67214+ return __swab16p((const __u16 *)p);
67215 }
67216 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
67217 #define __le64_to_cpus(x) do { (void)(x); } while (0)
67218diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
67219index 126a817..d522bd1 100644
67220--- a/include/uapi/linux/elf.h
67221+++ b/include/uapi/linux/elf.h
67222@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
67223 #define PT_GNU_EH_FRAME 0x6474e550
67224
67225 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
67226+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
67227+
67228+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
67229+
67230+/* Constants for the e_flags field */
67231+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
67232+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
67233+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
67234+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
67235+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
67236+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
67237
67238 /*
67239 * Extended Numbering
67240@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
67241 #define DT_DEBUG 21
67242 #define DT_TEXTREL 22
67243 #define DT_JMPREL 23
67244+#define DT_FLAGS 30
67245+ #define DF_TEXTREL 0x00000004
67246 #define DT_ENCODING 32
67247 #define OLD_DT_LOOS 0x60000000
67248 #define DT_LOOS 0x6000000d
67249@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
67250 #define PF_W 0x2
67251 #define PF_X 0x1
67252
67253+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
67254+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
67255+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
67256+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
67257+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
67258+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
67259+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
67260+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
67261+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
67262+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
67263+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
67264+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
67265+
67266 typedef struct elf32_phdr{
67267 Elf32_Word p_type;
67268 Elf32_Off p_offset;
67269@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
67270 #define EI_OSABI 7
67271 #define EI_PAD 8
67272
67273+#define EI_PAX 14
67274+
67275 #define ELFMAG0 0x7f /* EI_MAG */
67276 #define ELFMAG1 'E'
67277 #define ELFMAG2 'L'
67278diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
67279index aa169c4..6a2771d 100644
67280--- a/include/uapi/linux/personality.h
67281+++ b/include/uapi/linux/personality.h
67282@@ -30,6 +30,7 @@ enum {
67283 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
67284 ADDR_NO_RANDOMIZE | \
67285 ADDR_COMPAT_LAYOUT | \
67286+ ADDR_LIMIT_3GB | \
67287 MMAP_PAGE_ZERO)
67288
67289 /*
67290diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
67291index 7530e74..e714828 100644
67292--- a/include/uapi/linux/screen_info.h
67293+++ b/include/uapi/linux/screen_info.h
67294@@ -43,7 +43,8 @@ struct screen_info {
67295 __u16 pages; /* 0x32 */
67296 __u16 vesa_attributes; /* 0x34 */
67297 __u32 capabilities; /* 0x36 */
67298- __u8 _reserved[6]; /* 0x3a */
67299+ __u16 vesapm_size; /* 0x3a */
67300+ __u8 _reserved[4]; /* 0x3c */
67301 } __attribute__((packed));
67302
67303 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
67304diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
67305index 6d67213..8dab561 100644
67306--- a/include/uapi/linux/sysctl.h
67307+++ b/include/uapi/linux/sysctl.h
67308@@ -155,7 +155,11 @@ enum
67309 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
67310 };
67311
67312-
67313+#ifdef CONFIG_PAX_SOFTMODE
67314+enum {
67315+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
67316+};
67317+#endif
67318
67319 /* CTL_VM names: */
67320 enum
67321diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
67322index 26607bd..588b65f 100644
67323--- a/include/uapi/linux/xattr.h
67324+++ b/include/uapi/linux/xattr.h
67325@@ -60,5 +60,9 @@
67326 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
67327 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
67328
67329+/* User namespace */
67330+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
67331+#define XATTR_PAX_FLAGS_SUFFIX "flags"
67332+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
67333
67334 #endif /* _UAPI_LINUX_XATTR_H */
67335diff --git a/include/video/omapdss.h b/include/video/omapdss.h
67336index 3729173..7b2df7e 100644
67337--- a/include/video/omapdss.h
67338+++ b/include/video/omapdss.h
67339@@ -323,7 +323,7 @@ struct omap_dss_board_info {
67340 int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
67341 void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
67342 int (*set_min_bus_tput)(struct device *dev, unsigned long r);
67343-};
67344+} __do_const;
67345
67346 /* Init with the board info */
67347 extern int omap_display_init(struct omap_dss_board_info *board_data);
67348diff --git a/include/video/udlfb.h b/include/video/udlfb.h
67349index f9466fa..f4e2b81 100644
67350--- a/include/video/udlfb.h
67351+++ b/include/video/udlfb.h
67352@@ -53,10 +53,10 @@ struct dlfb_data {
67353 u32 pseudo_palette[256];
67354 int blank_mode; /*one of FB_BLANK_ */
67355 /* blit-only rendering path metrics, exposed through sysfs */
67356- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
67357- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
67358- atomic_t bytes_sent; /* to usb, after compression including overhead */
67359- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
67360+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
67361+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
67362+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
67363+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
67364 };
67365
67366 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
67367diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
67368index 0993a22..32ba2fe 100644
67369--- a/include/video/uvesafb.h
67370+++ b/include/video/uvesafb.h
67371@@ -177,6 +177,7 @@ struct uvesafb_par {
67372 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
67373 u8 pmi_setpal; /* PMI for palette changes */
67374 u16 *pmi_base; /* protected mode interface location */
67375+ u8 *pmi_code; /* protected mode code location */
67376 void *pmi_start;
67377 void *pmi_pal;
67378 u8 *vbe_state_orig; /*
67379diff --git a/init/Kconfig b/init/Kconfig
67380index 6fdd6e3..5b01610 100644
67381--- a/init/Kconfig
67382+++ b/init/Kconfig
67383@@ -925,6 +925,7 @@ endif # CGROUPS
67384
67385 config CHECKPOINT_RESTORE
67386 bool "Checkpoint/restore support" if EXPERT
67387+ depends on !GRKERNSEC
67388 default n
67389 help
67390 Enables additional kernel features in a sake of checkpoint/restore.
67391@@ -1016,6 +1017,8 @@ config UIDGID_CONVERTED
67392 depends on OCFS2_FS = n
67393 depends on XFS_FS = n
67394
67395+ depends on GRKERNSEC = n
67396+
67397 config UIDGID_STRICT_TYPE_CHECKS
67398 bool "Require conversions between uid/gids and their internal representation"
67399 depends on UIDGID_CONVERTED
67400@@ -1405,7 +1408,7 @@ config SLUB_DEBUG
67401
67402 config COMPAT_BRK
67403 bool "Disable heap randomization"
67404- default y
67405+ default n
67406 help
67407 Randomizing heap placement makes heap exploits harder, but it
67408 also breaks ancient binaries (including anything libc5 based).
67409@@ -1648,7 +1651,7 @@ config INIT_ALL_POSSIBLE
67410 config STOP_MACHINE
67411 bool
67412 default y
67413- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
67414+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
67415 help
67416 Need stop_machine() primitive.
67417
67418diff --git a/init/Makefile b/init/Makefile
67419index 7bc47ee..6da2dc7 100644
67420--- a/init/Makefile
67421+++ b/init/Makefile
67422@@ -2,6 +2,9 @@
67423 # Makefile for the linux kernel.
67424 #
67425
67426+ccflags-y := $(GCC_PLUGINS_CFLAGS)
67427+asflags-y := $(GCC_PLUGINS_AFLAGS)
67428+
67429 obj-y := main.o version.o mounts.o
67430 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
67431 obj-y += noinitramfs.o
67432diff --git a/init/do_mounts.c b/init/do_mounts.c
67433index f8a6642..4e5ee1b 100644
67434--- a/init/do_mounts.c
67435+++ b/init/do_mounts.c
67436@@ -336,11 +336,11 @@ static void __init get_fs_names(char *page)
67437 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
67438 {
67439 struct super_block *s;
67440- int err = sys_mount(name, "/root", fs, flags, data);
67441+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
67442 if (err)
67443 return err;
67444
67445- sys_chdir("/root");
67446+ sys_chdir((const char __force_user *)"/root");
67447 s = current->fs->pwd.dentry->d_sb;
67448 ROOT_DEV = s->s_dev;
67449 printk(KERN_INFO
67450@@ -461,18 +461,18 @@ void __init change_floppy(char *fmt, ...)
67451 va_start(args, fmt);
67452 vsprintf(buf, fmt, args);
67453 va_end(args);
67454- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
67455+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
67456 if (fd >= 0) {
67457 sys_ioctl(fd, FDEJECT, 0);
67458 sys_close(fd);
67459 }
67460 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
67461- fd = sys_open("/dev/console", O_RDWR, 0);
67462+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
67463 if (fd >= 0) {
67464 sys_ioctl(fd, TCGETS, (long)&termios);
67465 termios.c_lflag &= ~ICANON;
67466 sys_ioctl(fd, TCSETSF, (long)&termios);
67467- sys_read(fd, &c, 1);
67468+ sys_read(fd, (char __user *)&c, 1);
67469 termios.c_lflag |= ICANON;
67470 sys_ioctl(fd, TCSETSF, (long)&termios);
67471 sys_close(fd);
67472@@ -566,6 +566,6 @@ void __init prepare_namespace(void)
67473 mount_root();
67474 out:
67475 devtmpfs_mount("dev");
67476- sys_mount(".", "/", NULL, MS_MOVE, NULL);
67477- sys_chroot(".");
67478+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
67479+ sys_chroot((const char __force_user *)".");
67480 }
67481diff --git a/init/do_mounts.h b/init/do_mounts.h
67482index f5b978a..69dbfe8 100644
67483--- a/init/do_mounts.h
67484+++ b/init/do_mounts.h
67485@@ -15,15 +15,15 @@ extern int root_mountflags;
67486
67487 static inline int create_dev(char *name, dev_t dev)
67488 {
67489- sys_unlink(name);
67490- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
67491+ sys_unlink((char __force_user *)name);
67492+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
67493 }
67494
67495 #if BITS_PER_LONG == 32
67496 static inline u32 bstat(char *name)
67497 {
67498 struct stat64 stat;
67499- if (sys_stat64(name, &stat) != 0)
67500+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
67501 return 0;
67502 if (!S_ISBLK(stat.st_mode))
67503 return 0;
67504@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
67505 static inline u32 bstat(char *name)
67506 {
67507 struct stat stat;
67508- if (sys_newstat(name, &stat) != 0)
67509+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
67510 return 0;
67511 if (!S_ISBLK(stat.st_mode))
67512 return 0;
67513diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
67514index 5e4ded5..aa3cd7e 100644
67515--- a/init/do_mounts_initrd.c
67516+++ b/init/do_mounts_initrd.c
67517@@ -54,8 +54,8 @@ static void __init handle_initrd(void)
67518 create_dev("/dev/root.old", Root_RAM0);
67519 /* mount initrd on rootfs' /root */
67520 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
67521- sys_mkdir("/old", 0700);
67522- sys_chdir("/old");
67523+ sys_mkdir((const char __force_user *)"/old", 0700);
67524+ sys_chdir((const char __force_user *)"/old");
67525
67526 /*
67527 * In case that a resume from disk is carried out by linuxrc or one of
67528@@ -69,31 +69,31 @@ static void __init handle_initrd(void)
67529 current->flags &= ~PF_FREEZER_SKIP;
67530
67531 /* move initrd to rootfs' /old */
67532- sys_mount("..", ".", NULL, MS_MOVE, NULL);
67533+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
67534 /* switch root and cwd back to / of rootfs */
67535- sys_chroot("..");
67536+ sys_chroot((const char __force_user *)"..");
67537
67538 if (new_decode_dev(real_root_dev) == Root_RAM0) {
67539- sys_chdir("/old");
67540+ sys_chdir((const char __force_user *)"/old");
67541 return;
67542 }
67543
67544- sys_chdir("/");
67545+ sys_chdir((const char __force_user *)"/");
67546 ROOT_DEV = new_decode_dev(real_root_dev);
67547 mount_root();
67548
67549 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
67550- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
67551+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
67552 if (!error)
67553 printk("okay\n");
67554 else {
67555- int fd = sys_open("/dev/root.old", O_RDWR, 0);
67556+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
67557 if (error == -ENOENT)
67558 printk("/initrd does not exist. Ignored.\n");
67559 else
67560 printk("failed\n");
67561 printk(KERN_NOTICE "Unmounting old root\n");
67562- sys_umount("/old", MNT_DETACH);
67563+ sys_umount((char __force_user *)"/old", MNT_DETACH);
67564 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
67565 if (fd < 0) {
67566 error = fd;
67567@@ -116,11 +116,11 @@ int __init initrd_load(void)
67568 * mounted in the normal path.
67569 */
67570 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
67571- sys_unlink("/initrd.image");
67572+ sys_unlink((const char __force_user *)"/initrd.image");
67573 handle_initrd();
67574 return 1;
67575 }
67576 }
67577- sys_unlink("/initrd.image");
67578+ sys_unlink((const char __force_user *)"/initrd.image");
67579 return 0;
67580 }
67581diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
67582index 8cb6db5..d729f50 100644
67583--- a/init/do_mounts_md.c
67584+++ b/init/do_mounts_md.c
67585@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
67586 partitioned ? "_d" : "", minor,
67587 md_setup_args[ent].device_names);
67588
67589- fd = sys_open(name, 0, 0);
67590+ fd = sys_open((char __force_user *)name, 0, 0);
67591 if (fd < 0) {
67592 printk(KERN_ERR "md: open failed - cannot start "
67593 "array %s\n", name);
67594@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
67595 * array without it
67596 */
67597 sys_close(fd);
67598- fd = sys_open(name, 0, 0);
67599+ fd = sys_open((char __force_user *)name, 0, 0);
67600 sys_ioctl(fd, BLKRRPART, 0);
67601 }
67602 sys_close(fd);
67603@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
67604
67605 wait_for_device_probe();
67606
67607- fd = sys_open("/dev/md0", 0, 0);
67608+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
67609 if (fd >= 0) {
67610 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
67611 sys_close(fd);
67612diff --git a/init/init_task.c b/init/init_task.c
67613index 8b2f399..f0797c9 100644
67614--- a/init/init_task.c
67615+++ b/init/init_task.c
67616@@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
67617 * Initial thread structure. Alignment of this is handled by a special
67618 * linker map entry.
67619 */
67620+#ifdef CONFIG_X86
67621+union thread_union init_thread_union __init_task_data;
67622+#else
67623 union thread_union init_thread_union __init_task_data =
67624 { INIT_THREAD_INFO(init_task) };
67625+#endif
67626diff --git a/init/initramfs.c b/init/initramfs.c
67627index 84c6bf1..8899338 100644
67628--- a/init/initramfs.c
67629+++ b/init/initramfs.c
67630@@ -84,7 +84,7 @@ static void __init free_hash(void)
67631 }
67632 }
67633
67634-static long __init do_utime(char *filename, time_t mtime)
67635+static long __init do_utime(char __force_user *filename, time_t mtime)
67636 {
67637 struct timespec t[2];
67638
67639@@ -119,7 +119,7 @@ static void __init dir_utime(void)
67640 struct dir_entry *de, *tmp;
67641 list_for_each_entry_safe(de, tmp, &dir_list, list) {
67642 list_del(&de->list);
67643- do_utime(de->name, de->mtime);
67644+ do_utime((char __force_user *)de->name, de->mtime);
67645 kfree(de->name);
67646 kfree(de);
67647 }
67648@@ -281,7 +281,7 @@ static int __init maybe_link(void)
67649 if (nlink >= 2) {
67650 char *old = find_link(major, minor, ino, mode, collected);
67651 if (old)
67652- return (sys_link(old, collected) < 0) ? -1 : 1;
67653+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
67654 }
67655 return 0;
67656 }
67657@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
67658 {
67659 struct stat st;
67660
67661- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
67662+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
67663 if (S_ISDIR(st.st_mode))
67664- sys_rmdir(path);
67665+ sys_rmdir((char __force_user *)path);
67666 else
67667- sys_unlink(path);
67668+ sys_unlink((char __force_user *)path);
67669 }
67670 }
67671
67672@@ -315,7 +315,7 @@ static int __init do_name(void)
67673 int openflags = O_WRONLY|O_CREAT;
67674 if (ml != 1)
67675 openflags |= O_TRUNC;
67676- wfd = sys_open(collected, openflags, mode);
67677+ wfd = sys_open((char __force_user *)collected, openflags, mode);
67678
67679 if (wfd >= 0) {
67680 sys_fchown(wfd, uid, gid);
67681@@ -327,17 +327,17 @@ static int __init do_name(void)
67682 }
67683 }
67684 } else if (S_ISDIR(mode)) {
67685- sys_mkdir(collected, mode);
67686- sys_chown(collected, uid, gid);
67687- sys_chmod(collected, mode);
67688+ sys_mkdir((char __force_user *)collected, mode);
67689+ sys_chown((char __force_user *)collected, uid, gid);
67690+ sys_chmod((char __force_user *)collected, mode);
67691 dir_add(collected, mtime);
67692 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
67693 S_ISFIFO(mode) || S_ISSOCK(mode)) {
67694 if (maybe_link() == 0) {
67695- sys_mknod(collected, mode, rdev);
67696- sys_chown(collected, uid, gid);
67697- sys_chmod(collected, mode);
67698- do_utime(collected, mtime);
67699+ sys_mknod((char __force_user *)collected, mode, rdev);
67700+ sys_chown((char __force_user *)collected, uid, gid);
67701+ sys_chmod((char __force_user *)collected, mode);
67702+ do_utime((char __force_user *)collected, mtime);
67703 }
67704 }
67705 return 0;
67706@@ -346,15 +346,15 @@ static int __init do_name(void)
67707 static int __init do_copy(void)
67708 {
67709 if (count >= body_len) {
67710- sys_write(wfd, victim, body_len);
67711+ sys_write(wfd, (char __force_user *)victim, body_len);
67712 sys_close(wfd);
67713- do_utime(vcollected, mtime);
67714+ do_utime((char __force_user *)vcollected, mtime);
67715 kfree(vcollected);
67716 eat(body_len);
67717 state = SkipIt;
67718 return 0;
67719 } else {
67720- sys_write(wfd, victim, count);
67721+ sys_write(wfd, (char __force_user *)victim, count);
67722 body_len -= count;
67723 eat(count);
67724 return 1;
67725@@ -365,9 +365,9 @@ static int __init do_symlink(void)
67726 {
67727 collected[N_ALIGN(name_len) + body_len] = '\0';
67728 clean_path(collected, 0);
67729- sys_symlink(collected + N_ALIGN(name_len), collected);
67730- sys_lchown(collected, uid, gid);
67731- do_utime(collected, mtime);
67732+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
67733+ sys_lchown((char __force_user *)collected, uid, gid);
67734+ do_utime((char __force_user *)collected, mtime);
67735 state = SkipIt;
67736 next_state = Reset;
67737 return 0;
67738diff --git a/init/main.c b/init/main.c
67739index e33e09d..b699703 100644
67740--- a/init/main.c
67741+++ b/init/main.c
67742@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
67743 extern void tc_init(void);
67744 #endif
67745
67746+extern void grsecurity_init(void);
67747+
67748 /*
67749 * Debug helper: via this flag we know that we are in 'early bootup code'
67750 * where only the boot processor is running with IRQ disabled. This means
67751@@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
67752
67753 __setup("reset_devices", set_reset_devices);
67754
67755+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67756+int grsec_proc_gid = CONFIG_GRKERNSEC_PROC_GID;
67757+static int __init setup_grsec_proc_gid(char *str)
67758+{
67759+ grsec_proc_gid = (int)simple_strtol(str, NULL, 0);
67760+ return 1;
67761+}
67762+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
67763+#endif
67764+
67765+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
67766+extern char pax_enter_kernel_user[];
67767+extern char pax_exit_kernel_user[];
67768+extern pgdval_t clone_pgd_mask;
67769+#endif
67770+
67771+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
67772+static int __init setup_pax_nouderef(char *str)
67773+{
67774+#ifdef CONFIG_X86_32
67775+ unsigned int cpu;
67776+ struct desc_struct *gdt;
67777+
67778+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
67779+ gdt = get_cpu_gdt_table(cpu);
67780+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
67781+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
67782+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
67783+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
67784+ }
67785+ loadsegment(ds, __KERNEL_DS);
67786+ loadsegment(es, __KERNEL_DS);
67787+ loadsegment(ss, __KERNEL_DS);
67788+#else
67789+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
67790+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
67791+ clone_pgd_mask = ~(pgdval_t)0UL;
67792+#endif
67793+
67794+ return 0;
67795+}
67796+early_param("pax_nouderef", setup_pax_nouderef);
67797+#endif
67798+
67799+#ifdef CONFIG_PAX_SOFTMODE
67800+int pax_softmode;
67801+
67802+static int __init setup_pax_softmode(char *str)
67803+{
67804+ get_option(&str, &pax_softmode);
67805+ return 1;
67806+}
67807+__setup("pax_softmode=", setup_pax_softmode);
67808+#endif
67809+
67810 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
67811 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
67812 static const char *panic_later, *panic_param;
67813@@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
67814 {
67815 int count = preempt_count();
67816 int ret;
67817+ const char *msg1 = "", *msg2 = "";
67818
67819 if (initcall_debug)
67820 ret = do_one_initcall_debug(fn);
67821@@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
67822 sprintf(msgbuf, "error code %d ", ret);
67823
67824 if (preempt_count() != count) {
67825- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
67826+ msg1 = " preemption imbalance";
67827 preempt_count() = count;
67828 }
67829 if (irqs_disabled()) {
67830- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
67831+ msg2 = " disabled interrupts";
67832 local_irq_enable();
67833 }
67834- if (msgbuf[0]) {
67835- printk("initcall %pF returned with %s\n", fn, msgbuf);
67836+ if (msgbuf[0] || *msg1 || *msg2) {
67837+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
67838 }
67839
67840 return ret;
67841@@ -743,6 +801,10 @@ static char *initcall_level_names[] __initdata = {
67842 "late",
67843 };
67844
67845+#ifdef CONFIG_PAX_LATENT_ENTROPY
67846+u64 latent_entropy;
67847+#endif
67848+
67849 static void __init do_initcall_level(int level)
67850 {
67851 extern const struct kernel_param __start___param[], __stop___param[];
67852@@ -755,8 +817,14 @@ static void __init do_initcall_level(int level)
67853 level, level,
67854 &repair_env_string);
67855
67856- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
67857+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
67858 do_one_initcall(*fn);
67859+
67860+#ifdef CONFIG_PAX_LATENT_ENTROPY
67861+ add_device_randomness(&latent_entropy, sizeof(latent_entropy));
67862+#endif
67863+
67864+ }
67865 }
67866
67867 static void __init do_initcalls(void)
67868@@ -790,8 +858,14 @@ static void __init do_pre_smp_initcalls(void)
67869 {
67870 initcall_t *fn;
67871
67872- for (fn = __initcall_start; fn < __initcall0_start; fn++)
67873+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
67874 do_one_initcall(*fn);
67875+
67876+#ifdef CONFIG_PAX_LATENT_ENTROPY
67877+ add_device_randomness(&latent_entropy, sizeof(latent_entropy));
67878+#endif
67879+
67880+ }
67881 }
67882
67883 static int run_init_process(const char *init_filename)
67884@@ -876,7 +950,7 @@ static void __init kernel_init_freeable(void)
67885 do_basic_setup();
67886
67887 /* Open the /dev/console on the rootfs, this should never fail */
67888- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
67889+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
67890 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
67891
67892 (void) sys_dup(0);
67893@@ -889,11 +963,13 @@ static void __init kernel_init_freeable(void)
67894 if (!ramdisk_execute_command)
67895 ramdisk_execute_command = "/init";
67896
67897- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
67898+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
67899 ramdisk_execute_command = NULL;
67900 prepare_namespace();
67901 }
67902
67903+ grsecurity_init();
67904+
67905 /*
67906 * Ok, we have completed the initial bootup, and
67907 * we're essentially up and running. Get rid of the
67908diff --git a/ipc/mqueue.c b/ipc/mqueue.c
67909index 71a3ca1..cc330ee 100644
67910--- a/ipc/mqueue.c
67911+++ b/ipc/mqueue.c
67912@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
67913 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
67914 info->attr.mq_msgsize);
67915
67916+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
67917 spin_lock(&mq_lock);
67918 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
67919 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
67920diff --git a/ipc/msg.c b/ipc/msg.c
67921index a71af5a..a90a110 100644
67922--- a/ipc/msg.c
67923+++ b/ipc/msg.c
67924@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
67925 return security_msg_queue_associate(msq, msgflg);
67926 }
67927
67928+static struct ipc_ops msg_ops = {
67929+ .getnew = newque,
67930+ .associate = msg_security,
67931+ .more_checks = NULL
67932+};
67933+
67934 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
67935 {
67936 struct ipc_namespace *ns;
67937- struct ipc_ops msg_ops;
67938 struct ipc_params msg_params;
67939
67940 ns = current->nsproxy->ipc_ns;
67941
67942- msg_ops.getnew = newque;
67943- msg_ops.associate = msg_security;
67944- msg_ops.more_checks = NULL;
67945-
67946 msg_params.key = key;
67947 msg_params.flg = msgflg;
67948
67949diff --git a/ipc/sem.c b/ipc/sem.c
67950index 58d31f1..cce7a55 100644
67951--- a/ipc/sem.c
67952+++ b/ipc/sem.c
67953@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
67954 return 0;
67955 }
67956
67957+static struct ipc_ops sem_ops = {
67958+ .getnew = newary,
67959+ .associate = sem_security,
67960+ .more_checks = sem_more_checks
67961+};
67962+
67963 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
67964 {
67965 struct ipc_namespace *ns;
67966- struct ipc_ops sem_ops;
67967 struct ipc_params sem_params;
67968
67969 ns = current->nsproxy->ipc_ns;
67970@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
67971 if (nsems < 0 || nsems > ns->sc_semmsl)
67972 return -EINVAL;
67973
67974- sem_ops.getnew = newary;
67975- sem_ops.associate = sem_security;
67976- sem_ops.more_checks = sem_more_checks;
67977-
67978 sem_params.key = key;
67979 sem_params.flg = semflg;
67980 sem_params.u.nsems = nsems;
67981diff --git a/ipc/shm.c b/ipc/shm.c
67982index dff40c9..9450e27 100644
67983--- a/ipc/shm.c
67984+++ b/ipc/shm.c
67985@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
67986 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
67987 #endif
67988
67989+#ifdef CONFIG_GRKERNSEC
67990+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
67991+ const time_t shm_createtime, const uid_t cuid,
67992+ const int shmid);
67993+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
67994+ const time_t shm_createtime);
67995+#endif
67996+
67997 void shm_init_ns(struct ipc_namespace *ns)
67998 {
67999 ns->shm_ctlmax = SHMMAX;
68000@@ -520,6 +528,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
68001 shp->shm_lprid = 0;
68002 shp->shm_atim = shp->shm_dtim = 0;
68003 shp->shm_ctim = get_seconds();
68004+#ifdef CONFIG_GRKERNSEC
68005+ {
68006+ struct timespec timeval;
68007+ do_posix_clock_monotonic_gettime(&timeval);
68008+
68009+ shp->shm_createtime = timeval.tv_sec;
68010+ }
68011+#endif
68012 shp->shm_segsz = size;
68013 shp->shm_nattch = 0;
68014 shp->shm_file = file;
68015@@ -571,18 +587,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
68016 return 0;
68017 }
68018
68019+static struct ipc_ops shm_ops = {
68020+ .getnew = newseg,
68021+ .associate = shm_security,
68022+ .more_checks = shm_more_checks
68023+};
68024+
68025 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
68026 {
68027 struct ipc_namespace *ns;
68028- struct ipc_ops shm_ops;
68029 struct ipc_params shm_params;
68030
68031 ns = current->nsproxy->ipc_ns;
68032
68033- shm_ops.getnew = newseg;
68034- shm_ops.associate = shm_security;
68035- shm_ops.more_checks = shm_more_checks;
68036-
68037 shm_params.key = key;
68038 shm_params.flg = shmflg;
68039 shm_params.u.size = size;
68040@@ -1003,6 +1020,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
68041 f_mode = FMODE_READ | FMODE_WRITE;
68042 }
68043 if (shmflg & SHM_EXEC) {
68044+
68045+#ifdef CONFIG_PAX_MPROTECT
68046+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
68047+ goto out;
68048+#endif
68049+
68050 prot |= PROT_EXEC;
68051 acc_mode |= S_IXUGO;
68052 }
68053@@ -1026,9 +1049,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
68054 if (err)
68055 goto out_unlock;
68056
68057+#ifdef CONFIG_GRKERNSEC
68058+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
68059+ shp->shm_perm.cuid, shmid) ||
68060+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
68061+ err = -EACCES;
68062+ goto out_unlock;
68063+ }
68064+#endif
68065+
68066 path = shp->shm_file->f_path;
68067 path_get(&path);
68068 shp->shm_nattch++;
68069+#ifdef CONFIG_GRKERNSEC
68070+ shp->shm_lapid = current->pid;
68071+#endif
68072 size = i_size_read(path.dentry->d_inode);
68073 shm_unlock(shp);
68074
68075diff --git a/kernel/acct.c b/kernel/acct.c
68076index 051e071..15e0920 100644
68077--- a/kernel/acct.c
68078+++ b/kernel/acct.c
68079@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
68080 */
68081 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
68082 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
68083- file->f_op->write(file, (char *)&ac,
68084+ file->f_op->write(file, (char __force_user *)&ac,
68085 sizeof(acct_t), &file->f_pos);
68086 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
68087 set_fs(fs);
68088diff --git a/kernel/audit.c b/kernel/audit.c
68089index 40414e9..c920b72 100644
68090--- a/kernel/audit.c
68091+++ b/kernel/audit.c
68092@@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
68093 3) suppressed due to audit_rate_limit
68094 4) suppressed due to audit_backlog_limit
68095 */
68096-static atomic_t audit_lost = ATOMIC_INIT(0);
68097+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
68098
68099 /* The netlink socket. */
68100 static struct sock *audit_sock;
68101@@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
68102 unsigned long now;
68103 int print;
68104
68105- atomic_inc(&audit_lost);
68106+ atomic_inc_unchecked(&audit_lost);
68107
68108 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
68109
68110@@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
68111 printk(KERN_WARNING
68112 "audit: audit_lost=%d audit_rate_limit=%d "
68113 "audit_backlog_limit=%d\n",
68114- atomic_read(&audit_lost),
68115+ atomic_read_unchecked(&audit_lost),
68116 audit_rate_limit,
68117 audit_backlog_limit);
68118 audit_panic(message);
68119@@ -677,7 +677,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
68120 status_set.pid = audit_pid;
68121 status_set.rate_limit = audit_rate_limit;
68122 status_set.backlog_limit = audit_backlog_limit;
68123- status_set.lost = atomic_read(&audit_lost);
68124+ status_set.lost = atomic_read_unchecked(&audit_lost);
68125 status_set.backlog = skb_queue_len(&audit_skb_queue);
68126 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
68127 &status_set, sizeof(status_set));
68128diff --git a/kernel/auditsc.c b/kernel/auditsc.c
68129index 157e989..b28b365 100644
68130--- a/kernel/auditsc.c
68131+++ b/kernel/auditsc.c
68132@@ -2352,7 +2352,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
68133 }
68134
68135 /* global counter which is incremented every time something logs in */
68136-static atomic_t session_id = ATOMIC_INIT(0);
68137+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
68138
68139 /**
68140 * audit_set_loginuid - set current task's audit_context loginuid
68141@@ -2376,7 +2376,7 @@ int audit_set_loginuid(kuid_t loginuid)
68142 return -EPERM;
68143 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
68144
68145- sessionid = atomic_inc_return(&session_id);
68146+ sessionid = atomic_inc_return_unchecked(&session_id);
68147 if (context && context->in_syscall) {
68148 struct audit_buffer *ab;
68149
68150diff --git a/kernel/capability.c b/kernel/capability.c
68151index 493d972..ea17248 100644
68152--- a/kernel/capability.c
68153+++ b/kernel/capability.c
68154@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
68155 * before modification is attempted and the application
68156 * fails.
68157 */
68158+ if (tocopy > ARRAY_SIZE(kdata))
68159+ return -EFAULT;
68160+
68161 if (copy_to_user(dataptr, kdata, tocopy
68162 * sizeof(struct __user_cap_data_struct))) {
68163 return -EFAULT;
68164@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
68165 int ret;
68166
68167 rcu_read_lock();
68168- ret = security_capable(__task_cred(t), ns, cap);
68169+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
68170+ gr_task_is_capable(t, __task_cred(t), cap);
68171 rcu_read_unlock();
68172
68173- return (ret == 0);
68174+ return ret;
68175 }
68176
68177 /**
68178@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
68179 int ret;
68180
68181 rcu_read_lock();
68182- ret = security_capable_noaudit(__task_cred(t), ns, cap);
68183+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
68184 rcu_read_unlock();
68185
68186- return (ret == 0);
68187+ return ret;
68188 }
68189
68190 /**
68191@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
68192 BUG();
68193 }
68194
68195- if (security_capable(current_cred(), ns, cap) == 0) {
68196+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
68197 current->flags |= PF_SUPERPRIV;
68198 return true;
68199 }
68200@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
68201 }
68202 EXPORT_SYMBOL(ns_capable);
68203
68204+bool ns_capable_nolog(struct user_namespace *ns, int cap)
68205+{
68206+ if (unlikely(!cap_valid(cap))) {
68207+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
68208+ BUG();
68209+ }
68210+
68211+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
68212+ current->flags |= PF_SUPERPRIV;
68213+ return true;
68214+ }
68215+ return false;
68216+}
68217+EXPORT_SYMBOL(ns_capable_nolog);
68218+
68219 /**
68220 * capable - Determine if the current task has a superior capability in effect
68221 * @cap: The capability to be tested for
68222@@ -408,6 +427,12 @@ bool capable(int cap)
68223 }
68224 EXPORT_SYMBOL(capable);
68225
68226+bool capable_nolog(int cap)
68227+{
68228+ return ns_capable_nolog(&init_user_ns, cap);
68229+}
68230+EXPORT_SYMBOL(capable_nolog);
68231+
68232 /**
68233 * nsown_capable - Check superior capability to one's own user_ns
68234 * @cap: The capability in question
68235@@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
68236
68237 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
68238 }
68239+
68240+bool inode_capable_nolog(const struct inode *inode, int cap)
68241+{
68242+ struct user_namespace *ns = current_user_ns();
68243+
68244+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
68245+}
68246diff --git a/kernel/cgroup.c b/kernel/cgroup.c
68247index ad99830..992d8a7 100644
68248--- a/kernel/cgroup.c
68249+++ b/kernel/cgroup.c
68250@@ -5514,7 +5514,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
68251 struct css_set *cg = link->cg;
68252 struct task_struct *task;
68253 int count = 0;
68254- seq_printf(seq, "css_set %p\n", cg);
68255+ seq_printf(seq, "css_set %pK\n", cg);
68256 list_for_each_entry(task, &cg->tasks, cg_list) {
68257 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
68258 seq_puts(seq, " ...\n");
68259diff --git a/kernel/compat.c b/kernel/compat.c
68260index c28a306..b4d0cf3 100644
68261--- a/kernel/compat.c
68262+++ b/kernel/compat.c
68263@@ -13,6 +13,7 @@
68264
68265 #include <linux/linkage.h>
68266 #include <linux/compat.h>
68267+#include <linux/module.h>
68268 #include <linux/errno.h>
68269 #include <linux/time.h>
68270 #include <linux/signal.h>
68271@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
68272 mm_segment_t oldfs;
68273 long ret;
68274
68275- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
68276+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
68277 oldfs = get_fs();
68278 set_fs(KERNEL_DS);
68279 ret = hrtimer_nanosleep_restart(restart);
68280@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
68281 oldfs = get_fs();
68282 set_fs(KERNEL_DS);
68283 ret = hrtimer_nanosleep(&tu,
68284- rmtp ? (struct timespec __user *)&rmt : NULL,
68285+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
68286 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
68287 set_fs(oldfs);
68288
68289@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
68290 mm_segment_t old_fs = get_fs();
68291
68292 set_fs(KERNEL_DS);
68293- ret = sys_sigpending((old_sigset_t __user *) &s);
68294+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
68295 set_fs(old_fs);
68296 if (ret == 0)
68297 ret = put_user(s, set);
68298@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
68299 mm_segment_t old_fs = get_fs();
68300
68301 set_fs(KERNEL_DS);
68302- ret = sys_old_getrlimit(resource, &r);
68303+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
68304 set_fs(old_fs);
68305
68306 if (!ret) {
68307@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
68308 mm_segment_t old_fs = get_fs();
68309
68310 set_fs(KERNEL_DS);
68311- ret = sys_getrusage(who, (struct rusage __user *) &r);
68312+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
68313 set_fs(old_fs);
68314
68315 if (ret)
68316@@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
68317 set_fs (KERNEL_DS);
68318 ret = sys_wait4(pid,
68319 (stat_addr ?
68320- (unsigned int __user *) &status : NULL),
68321- options, (struct rusage __user *) &r);
68322+ (unsigned int __force_user *) &status : NULL),
68323+ options, (struct rusage __force_user *) &r);
68324 set_fs (old_fs);
68325
68326 if (ret > 0) {
68327@@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
68328 memset(&info, 0, sizeof(info));
68329
68330 set_fs(KERNEL_DS);
68331- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
68332- uru ? (struct rusage __user *)&ru : NULL);
68333+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
68334+ uru ? (struct rusage __force_user *)&ru : NULL);
68335 set_fs(old_fs);
68336
68337 if ((ret < 0) || (info.si_signo == 0))
68338@@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
68339 oldfs = get_fs();
68340 set_fs(KERNEL_DS);
68341 err = sys_timer_settime(timer_id, flags,
68342- (struct itimerspec __user *) &newts,
68343- (struct itimerspec __user *) &oldts);
68344+ (struct itimerspec __force_user *) &newts,
68345+ (struct itimerspec __force_user *) &oldts);
68346 set_fs(oldfs);
68347 if (!err && old && put_compat_itimerspec(old, &oldts))
68348 return -EFAULT;
68349@@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
68350 oldfs = get_fs();
68351 set_fs(KERNEL_DS);
68352 err = sys_timer_gettime(timer_id,
68353- (struct itimerspec __user *) &ts);
68354+ (struct itimerspec __force_user *) &ts);
68355 set_fs(oldfs);
68356 if (!err && put_compat_itimerspec(setting, &ts))
68357 return -EFAULT;
68358@@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
68359 oldfs = get_fs();
68360 set_fs(KERNEL_DS);
68361 err = sys_clock_settime(which_clock,
68362- (struct timespec __user *) &ts);
68363+ (struct timespec __force_user *) &ts);
68364 set_fs(oldfs);
68365 return err;
68366 }
68367@@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
68368 oldfs = get_fs();
68369 set_fs(KERNEL_DS);
68370 err = sys_clock_gettime(which_clock,
68371- (struct timespec __user *) &ts);
68372+ (struct timespec __force_user *) &ts);
68373 set_fs(oldfs);
68374 if (!err && put_compat_timespec(&ts, tp))
68375 return -EFAULT;
68376@@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
68377
68378 oldfs = get_fs();
68379 set_fs(KERNEL_DS);
68380- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
68381+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
68382 set_fs(oldfs);
68383
68384 err = compat_put_timex(utp, &txc);
68385@@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
68386 oldfs = get_fs();
68387 set_fs(KERNEL_DS);
68388 err = sys_clock_getres(which_clock,
68389- (struct timespec __user *) &ts);
68390+ (struct timespec __force_user *) &ts);
68391 set_fs(oldfs);
68392 if (!err && tp && put_compat_timespec(&ts, tp))
68393 return -EFAULT;
68394@@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
68395 long err;
68396 mm_segment_t oldfs;
68397 struct timespec tu;
68398- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
68399+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
68400
68401- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
68402+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
68403 oldfs = get_fs();
68404 set_fs(KERNEL_DS);
68405 err = clock_nanosleep_restart(restart);
68406@@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
68407 oldfs = get_fs();
68408 set_fs(KERNEL_DS);
68409 err = sys_clock_nanosleep(which_clock, flags,
68410- (struct timespec __user *) &in,
68411- (struct timespec __user *) &out);
68412+ (struct timespec __force_user *) &in,
68413+ (struct timespec __force_user *) &out);
68414 set_fs(oldfs);
68415
68416 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
68417diff --git a/kernel/configs.c b/kernel/configs.c
68418index 42e8fa0..9e7406b 100644
68419--- a/kernel/configs.c
68420+++ b/kernel/configs.c
68421@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
68422 struct proc_dir_entry *entry;
68423
68424 /* create the current config file */
68425+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
68426+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
68427+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
68428+ &ikconfig_file_ops);
68429+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68430+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
68431+ &ikconfig_file_ops);
68432+#endif
68433+#else
68434 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
68435 &ikconfig_file_ops);
68436+#endif
68437+
68438 if (!entry)
68439 return -ENOMEM;
68440
68441diff --git a/kernel/cred.c b/kernel/cred.c
68442index 48cea3d..3476734 100644
68443--- a/kernel/cred.c
68444+++ b/kernel/cred.c
68445@@ -207,6 +207,16 @@ void exit_creds(struct task_struct *tsk)
68446 validate_creds(cred);
68447 alter_cred_subscribers(cred, -1);
68448 put_cred(cred);
68449+
68450+#ifdef CONFIG_GRKERNSEC_SETXID
68451+ cred = (struct cred *) tsk->delayed_cred;
68452+ if (cred != NULL) {
68453+ tsk->delayed_cred = NULL;
68454+ validate_creds(cred);
68455+ alter_cred_subscribers(cred, -1);
68456+ put_cred(cred);
68457+ }
68458+#endif
68459 }
68460
68461 /**
68462@@ -469,7 +479,7 @@ error_put:
68463 * Always returns 0 thus allowing this function to be tail-called at the end
68464 * of, say, sys_setgid().
68465 */
68466-int commit_creds(struct cred *new)
68467+static int __commit_creds(struct cred *new)
68468 {
68469 struct task_struct *task = current;
68470 const struct cred *old = task->real_cred;
68471@@ -488,6 +498,8 @@ int commit_creds(struct cred *new)
68472
68473 get_cred(new); /* we will require a ref for the subj creds too */
68474
68475+ gr_set_role_label(task, new->uid, new->gid);
68476+
68477 /* dumpability changes */
68478 if (!uid_eq(old->euid, new->euid) ||
68479 !gid_eq(old->egid, new->egid) ||
68480@@ -537,6 +549,101 @@ int commit_creds(struct cred *new)
68481 put_cred(old);
68482 return 0;
68483 }
68484+#ifdef CONFIG_GRKERNSEC_SETXID
68485+extern int set_user(struct cred *new);
68486+
68487+void gr_delayed_cred_worker(void)
68488+{
68489+ const struct cred *new = current->delayed_cred;
68490+ struct cred *ncred;
68491+
68492+ current->delayed_cred = NULL;
68493+
68494+ if (current_uid() && new != NULL) {
68495+ // from doing get_cred on it when queueing this
68496+ put_cred(new);
68497+ return;
68498+ } else if (new == NULL)
68499+ return;
68500+
68501+ ncred = prepare_creds();
68502+ if (!ncred)
68503+ goto die;
68504+ // uids
68505+ ncred->uid = new->uid;
68506+ ncred->euid = new->euid;
68507+ ncred->suid = new->suid;
68508+ ncred->fsuid = new->fsuid;
68509+ // gids
68510+ ncred->gid = new->gid;
68511+ ncred->egid = new->egid;
68512+ ncred->sgid = new->sgid;
68513+ ncred->fsgid = new->fsgid;
68514+ // groups
68515+ if (set_groups(ncred, new->group_info) < 0) {
68516+ abort_creds(ncred);
68517+ goto die;
68518+ }
68519+ // caps
68520+ ncred->securebits = new->securebits;
68521+ ncred->cap_inheritable = new->cap_inheritable;
68522+ ncred->cap_permitted = new->cap_permitted;
68523+ ncred->cap_effective = new->cap_effective;
68524+ ncred->cap_bset = new->cap_bset;
68525+
68526+ if (set_user(ncred)) {
68527+ abort_creds(ncred);
68528+ goto die;
68529+ }
68530+
68531+ // from doing get_cred on it when queueing this
68532+ put_cred(new);
68533+
68534+ __commit_creds(ncred);
68535+ return;
68536+die:
68537+ // from doing get_cred on it when queueing this
68538+ put_cred(new);
68539+ do_group_exit(SIGKILL);
68540+}
68541+#endif
68542+
68543+int commit_creds(struct cred *new)
68544+{
68545+#ifdef CONFIG_GRKERNSEC_SETXID
68546+ int ret;
68547+ int schedule_it = 0;
68548+ struct task_struct *t;
68549+
68550+ /* we won't get called with tasklist_lock held for writing
68551+ and interrupts disabled as the cred struct in that case is
68552+ init_cred
68553+ */
68554+ if (grsec_enable_setxid && !current_is_single_threaded() &&
68555+ !current_uid() && new->uid) {
68556+ schedule_it = 1;
68557+ }
68558+ ret = __commit_creds(new);
68559+ if (schedule_it) {
68560+ rcu_read_lock();
68561+ read_lock(&tasklist_lock);
68562+ for (t = next_thread(current); t != current;
68563+ t = next_thread(t)) {
68564+ if (t->delayed_cred == NULL) {
68565+ t->delayed_cred = get_cred(new);
68566+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
68567+ set_tsk_need_resched(t);
68568+ }
68569+ }
68570+ read_unlock(&tasklist_lock);
68571+ rcu_read_unlock();
68572+ }
68573+ return ret;
68574+#else
68575+ return __commit_creds(new);
68576+#endif
68577+}
68578+
68579 EXPORT_SYMBOL(commit_creds);
68580
68581 /**
68582diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
68583index 9a61738..c5c8f3a 100644
68584--- a/kernel/debug/debug_core.c
68585+++ b/kernel/debug/debug_core.c
68586@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
68587 */
68588 static atomic_t masters_in_kgdb;
68589 static atomic_t slaves_in_kgdb;
68590-static atomic_t kgdb_break_tasklet_var;
68591+static atomic_unchecked_t kgdb_break_tasklet_var;
68592 atomic_t kgdb_setting_breakpoint;
68593
68594 struct task_struct *kgdb_usethread;
68595@@ -132,7 +132,7 @@ int kgdb_single_step;
68596 static pid_t kgdb_sstep_pid;
68597
68598 /* to keep track of the CPU which is doing the single stepping*/
68599-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
68600+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
68601
68602 /*
68603 * If you are debugging a problem where roundup (the collection of
68604@@ -540,7 +540,7 @@ return_normal:
68605 * kernel will only try for the value of sstep_tries before
68606 * giving up and continuing on.
68607 */
68608- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
68609+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
68610 (kgdb_info[cpu].task &&
68611 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
68612 atomic_set(&kgdb_active, -1);
68613@@ -634,8 +634,8 @@ cpu_master_loop:
68614 }
68615
68616 kgdb_restore:
68617- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
68618- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
68619+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
68620+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
68621 if (kgdb_info[sstep_cpu].task)
68622 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
68623 else
68624@@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
68625 static void kgdb_tasklet_bpt(unsigned long ing)
68626 {
68627 kgdb_breakpoint();
68628- atomic_set(&kgdb_break_tasklet_var, 0);
68629+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
68630 }
68631
68632 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
68633
68634 void kgdb_schedule_breakpoint(void)
68635 {
68636- if (atomic_read(&kgdb_break_tasklet_var) ||
68637+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
68638 atomic_read(&kgdb_active) != -1 ||
68639 atomic_read(&kgdb_setting_breakpoint))
68640 return;
68641- atomic_inc(&kgdb_break_tasklet_var);
68642+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
68643 tasklet_schedule(&kgdb_tasklet_breakpoint);
68644 }
68645 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
68646diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
68647index 4d5f8d5..4743f33 100644
68648--- a/kernel/debug/kdb/kdb_main.c
68649+++ b/kernel/debug/kdb/kdb_main.c
68650@@ -1972,7 +1972,7 @@ static int kdb_lsmod(int argc, const char **argv)
68651 list_for_each_entry(mod, kdb_modules, list) {
68652
68653 kdb_printf("%-20s%8u 0x%p ", mod->name,
68654- mod->core_size, (void *)mod);
68655+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
68656 #ifdef CONFIG_MODULE_UNLOAD
68657 kdb_printf("%4ld ", module_refcount(mod));
68658 #endif
68659@@ -1982,7 +1982,7 @@ static int kdb_lsmod(int argc, const char **argv)
68660 kdb_printf(" (Loading)");
68661 else
68662 kdb_printf(" (Live)");
68663- kdb_printf(" 0x%p", mod->module_core);
68664+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
68665
68666 #ifdef CONFIG_MODULE_UNLOAD
68667 {
68668diff --git a/kernel/events/core.c b/kernel/events/core.c
68669index dbccf83..8c66482 100644
68670--- a/kernel/events/core.c
68671+++ b/kernel/events/core.c
68672@@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
68673 return 0;
68674 }
68675
68676-static atomic64_t perf_event_id;
68677+static atomic64_unchecked_t perf_event_id;
68678
68679 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
68680 enum event_type_t event_type);
68681@@ -2668,7 +2668,7 @@ static void __perf_event_read(void *info)
68682
68683 static inline u64 perf_event_count(struct perf_event *event)
68684 {
68685- return local64_read(&event->count) + atomic64_read(&event->child_count);
68686+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
68687 }
68688
68689 static u64 perf_event_read(struct perf_event *event)
68690@@ -2998,9 +2998,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
68691 mutex_lock(&event->child_mutex);
68692 total += perf_event_read(event);
68693 *enabled += event->total_time_enabled +
68694- atomic64_read(&event->child_total_time_enabled);
68695+ atomic64_read_unchecked(&event->child_total_time_enabled);
68696 *running += event->total_time_running +
68697- atomic64_read(&event->child_total_time_running);
68698+ atomic64_read_unchecked(&event->child_total_time_running);
68699
68700 list_for_each_entry(child, &event->child_list, child_list) {
68701 total += perf_event_read(child);
68702@@ -3403,10 +3403,10 @@ void perf_event_update_userpage(struct perf_event *event)
68703 userpg->offset -= local64_read(&event->hw.prev_count);
68704
68705 userpg->time_enabled = enabled +
68706- atomic64_read(&event->child_total_time_enabled);
68707+ atomic64_read_unchecked(&event->child_total_time_enabled);
68708
68709 userpg->time_running = running +
68710- atomic64_read(&event->child_total_time_running);
68711+ atomic64_read_unchecked(&event->child_total_time_running);
68712
68713 arch_perf_update_userpage(userpg, now);
68714
68715@@ -3965,11 +3965,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
68716 values[n++] = perf_event_count(event);
68717 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
68718 values[n++] = enabled +
68719- atomic64_read(&event->child_total_time_enabled);
68720+ atomic64_read_unchecked(&event->child_total_time_enabled);
68721 }
68722 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
68723 values[n++] = running +
68724- atomic64_read(&event->child_total_time_running);
68725+ atomic64_read_unchecked(&event->child_total_time_running);
68726 }
68727 if (read_format & PERF_FORMAT_ID)
68728 values[n++] = primary_event_id(event);
68729@@ -4712,12 +4712,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
68730 * need to add enough zero bytes after the string to handle
68731 * the 64bit alignment we do later.
68732 */
68733- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
68734+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
68735 if (!buf) {
68736 name = strncpy(tmp, "//enomem", sizeof(tmp));
68737 goto got_name;
68738 }
68739- name = d_path(&file->f_path, buf, PATH_MAX);
68740+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
68741 if (IS_ERR(name)) {
68742 name = strncpy(tmp, "//toolong", sizeof(tmp));
68743 goto got_name;
68744@@ -6156,7 +6156,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
68745 event->parent = parent_event;
68746
68747 event->ns = get_pid_ns(current->nsproxy->pid_ns);
68748- event->id = atomic64_inc_return(&perf_event_id);
68749+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
68750
68751 event->state = PERF_EVENT_STATE_INACTIVE;
68752
68753@@ -6774,10 +6774,10 @@ static void sync_child_event(struct perf_event *child_event,
68754 /*
68755 * Add back the child's count to the parent's count:
68756 */
68757- atomic64_add(child_val, &parent_event->child_count);
68758- atomic64_add(child_event->total_time_enabled,
68759+ atomic64_add_unchecked(child_val, &parent_event->child_count);
68760+ atomic64_add_unchecked(child_event->total_time_enabled,
68761 &parent_event->child_total_time_enabled);
68762- atomic64_add(child_event->total_time_running,
68763+ atomic64_add_unchecked(child_event->total_time_running,
68764 &parent_event->child_total_time_running);
68765
68766 /*
68767diff --git a/kernel/exit.c b/kernel/exit.c
68768index 346616c..f103b28 100644
68769--- a/kernel/exit.c
68770+++ b/kernel/exit.c
68771@@ -182,6 +182,10 @@ void release_task(struct task_struct * p)
68772 struct task_struct *leader;
68773 int zap_leader;
68774 repeat:
68775+#ifdef CONFIG_NET
68776+ gr_del_task_from_ip_table(p);
68777+#endif
68778+
68779 /* don't need to get the RCU readlock here - the process is dead and
68780 * can't be modifying its own credentials. But shut RCU-lockdep up */
68781 rcu_read_lock();
68782@@ -394,7 +398,7 @@ int allow_signal(int sig)
68783 * know it'll be handled, so that they don't get converted to
68784 * SIGKILL or just silently dropped.
68785 */
68786- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
68787+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
68788 recalc_sigpending();
68789 spin_unlock_irq(&current->sighand->siglock);
68790 return 0;
68791@@ -430,6 +434,9 @@ void daemonize(const char *name, ...)
68792 vsnprintf(current->comm, sizeof(current->comm), name, args);
68793 va_end(args);
68794
68795+ gr_put_exec_file(current);
68796+ gr_set_kernel_label(current);
68797+
68798 /*
68799 * If we were started as result of loading a module, close all of the
68800 * user space pages. We don't need them, and if we didn't close them
68801@@ -812,6 +819,8 @@ void do_exit(long code)
68802 struct task_struct *tsk = current;
68803 int group_dead;
68804
68805+ set_fs(USER_DS);
68806+
68807 profile_task_exit(tsk);
68808
68809 WARN_ON(blk_needs_flush_plug(tsk));
68810@@ -828,7 +837,6 @@ void do_exit(long code)
68811 * mm_release()->clear_child_tid() from writing to a user-controlled
68812 * kernel address.
68813 */
68814- set_fs(USER_DS);
68815
68816 ptrace_event(PTRACE_EVENT_EXIT, code);
68817
68818@@ -887,6 +895,9 @@ void do_exit(long code)
68819 tsk->exit_code = code;
68820 taskstats_exit(tsk, group_dead);
68821
68822+ gr_acl_handle_psacct(tsk, code);
68823+ gr_acl_handle_exit();
68824+
68825 exit_mm(tsk);
68826
68827 if (group_dead)
68828@@ -1007,7 +1018,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
68829 * Take down every thread in the group. This is called by fatal signals
68830 * as well as by sys_exit_group (below).
68831 */
68832-void
68833+__noreturn void
68834 do_group_exit(int exit_code)
68835 {
68836 struct signal_struct *sig = current->signal;
68837diff --git a/kernel/fork.c b/kernel/fork.c
68838index acc4cb6..b524cb5 100644
68839--- a/kernel/fork.c
68840+++ b/kernel/fork.c
68841@@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
68842 *stackend = STACK_END_MAGIC; /* for overflow detection */
68843
68844 #ifdef CONFIG_CC_STACKPROTECTOR
68845- tsk->stack_canary = get_random_int();
68846+ tsk->stack_canary = pax_get_random_long();
68847 #endif
68848
68849 /*
68850@@ -344,13 +344,81 @@ free_tsk:
68851 }
68852
68853 #ifdef CONFIG_MMU
68854+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
68855+{
68856+ struct vm_area_struct *tmp;
68857+ unsigned long charge;
68858+ struct mempolicy *pol;
68859+ struct file *file;
68860+
68861+ charge = 0;
68862+ if (mpnt->vm_flags & VM_ACCOUNT) {
68863+ unsigned long len = vma_pages(mpnt);
68864+
68865+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
68866+ goto fail_nomem;
68867+ charge = len;
68868+ }
68869+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68870+ if (!tmp)
68871+ goto fail_nomem;
68872+ *tmp = *mpnt;
68873+ tmp->vm_mm = mm;
68874+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
68875+ pol = mpol_dup(vma_policy(mpnt));
68876+ if (IS_ERR(pol))
68877+ goto fail_nomem_policy;
68878+ vma_set_policy(tmp, pol);
68879+ if (anon_vma_fork(tmp, mpnt))
68880+ goto fail_nomem_anon_vma_fork;
68881+ tmp->vm_flags &= ~VM_LOCKED;
68882+ tmp->vm_next = tmp->vm_prev = NULL;
68883+ tmp->vm_mirror = NULL;
68884+ file = tmp->vm_file;
68885+ if (file) {
68886+ struct inode *inode = file->f_path.dentry->d_inode;
68887+ struct address_space *mapping = file->f_mapping;
68888+
68889+ get_file(file);
68890+ if (tmp->vm_flags & VM_DENYWRITE)
68891+ atomic_dec(&inode->i_writecount);
68892+ mutex_lock(&mapping->i_mmap_mutex);
68893+ if (tmp->vm_flags & VM_SHARED)
68894+ mapping->i_mmap_writable++;
68895+ flush_dcache_mmap_lock(mapping);
68896+ /* insert tmp into the share list, just after mpnt */
68897+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
68898+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
68899+ else
68900+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
68901+ flush_dcache_mmap_unlock(mapping);
68902+ mutex_unlock(&mapping->i_mmap_mutex);
68903+ }
68904+
68905+ /*
68906+ * Clear hugetlb-related page reserves for children. This only
68907+ * affects MAP_PRIVATE mappings. Faults generated by the child
68908+ * are not guaranteed to succeed, even if read-only
68909+ */
68910+ if (is_vm_hugetlb_page(tmp))
68911+ reset_vma_resv_huge_pages(tmp);
68912+
68913+ return tmp;
68914+
68915+fail_nomem_anon_vma_fork:
68916+ mpol_put(pol);
68917+fail_nomem_policy:
68918+ kmem_cache_free(vm_area_cachep, tmp);
68919+fail_nomem:
68920+ vm_unacct_memory(charge);
68921+ return NULL;
68922+}
68923+
68924 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
68925 {
68926 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
68927 struct rb_node **rb_link, *rb_parent;
68928 int retval;
68929- unsigned long charge;
68930- struct mempolicy *pol;
68931
68932 down_write(&oldmm->mmap_sem);
68933 flush_cache_dup_mm(oldmm);
68934@@ -363,8 +431,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
68935 mm->locked_vm = 0;
68936 mm->mmap = NULL;
68937 mm->mmap_cache = NULL;
68938- mm->free_area_cache = oldmm->mmap_base;
68939- mm->cached_hole_size = ~0UL;
68940+ mm->free_area_cache = oldmm->free_area_cache;
68941+ mm->cached_hole_size = oldmm->cached_hole_size;
68942 mm->map_count = 0;
68943 cpumask_clear(mm_cpumask(mm));
68944 mm->mm_rb = RB_ROOT;
68945@@ -380,57 +448,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
68946
68947 prev = NULL;
68948 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
68949- struct file *file;
68950-
68951 if (mpnt->vm_flags & VM_DONTCOPY) {
68952 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
68953 -vma_pages(mpnt));
68954 continue;
68955 }
68956- charge = 0;
68957- if (mpnt->vm_flags & VM_ACCOUNT) {
68958- unsigned long len = vma_pages(mpnt);
68959-
68960- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
68961- goto fail_nomem;
68962- charge = len;
68963- }
68964- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68965- if (!tmp)
68966- goto fail_nomem;
68967- *tmp = *mpnt;
68968- INIT_LIST_HEAD(&tmp->anon_vma_chain);
68969- pol = mpol_dup(vma_policy(mpnt));
68970- retval = PTR_ERR(pol);
68971- if (IS_ERR(pol))
68972- goto fail_nomem_policy;
68973- vma_set_policy(tmp, pol);
68974- tmp->vm_mm = mm;
68975- if (anon_vma_fork(tmp, mpnt))
68976- goto fail_nomem_anon_vma_fork;
68977- tmp->vm_flags &= ~VM_LOCKED;
68978- tmp->vm_next = tmp->vm_prev = NULL;
68979- file = tmp->vm_file;
68980- if (file) {
68981- struct inode *inode = file->f_path.dentry->d_inode;
68982- struct address_space *mapping = file->f_mapping;
68983-
68984- get_file(file);
68985- if (tmp->vm_flags & VM_DENYWRITE)
68986- atomic_dec(&inode->i_writecount);
68987- mutex_lock(&mapping->i_mmap_mutex);
68988- if (tmp->vm_flags & VM_SHARED)
68989- mapping->i_mmap_writable++;
68990- flush_dcache_mmap_lock(mapping);
68991- /* insert tmp into the share list, just after mpnt */
68992- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
68993- vma_nonlinear_insert(tmp,
68994- &mapping->i_mmap_nonlinear);
68995- else
68996- vma_interval_tree_insert_after(tmp, mpnt,
68997- &mapping->i_mmap);
68998- flush_dcache_mmap_unlock(mapping);
68999- mutex_unlock(&mapping->i_mmap_mutex);
69000+ tmp = dup_vma(mm, oldmm, mpnt);
69001+ if (!tmp) {
69002+ retval = -ENOMEM;
69003+ goto out;
69004 }
69005
69006 /*
69007@@ -462,6 +488,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69008 if (retval)
69009 goto out;
69010 }
69011+
69012+#ifdef CONFIG_PAX_SEGMEXEC
69013+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
69014+ struct vm_area_struct *mpnt_m;
69015+
69016+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
69017+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
69018+
69019+ if (!mpnt->vm_mirror)
69020+ continue;
69021+
69022+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
69023+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
69024+ mpnt->vm_mirror = mpnt_m;
69025+ } else {
69026+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
69027+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
69028+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
69029+ mpnt->vm_mirror->vm_mirror = mpnt;
69030+ }
69031+ }
69032+ BUG_ON(mpnt_m);
69033+ }
69034+#endif
69035+
69036 /* a new mm has just been created */
69037 arch_dup_mmap(oldmm, mm);
69038 retval = 0;
69039@@ -470,14 +521,6 @@ out:
69040 flush_tlb_mm(oldmm);
69041 up_write(&oldmm->mmap_sem);
69042 return retval;
69043-fail_nomem_anon_vma_fork:
69044- mpol_put(pol);
69045-fail_nomem_policy:
69046- kmem_cache_free(vm_area_cachep, tmp);
69047-fail_nomem:
69048- retval = -ENOMEM;
69049- vm_unacct_memory(charge);
69050- goto out;
69051 }
69052
69053 static inline int mm_alloc_pgd(struct mm_struct *mm)
69054@@ -692,8 +735,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
69055 return ERR_PTR(err);
69056
69057 mm = get_task_mm(task);
69058- if (mm && mm != current->mm &&
69059- !ptrace_may_access(task, mode)) {
69060+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
69061+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
69062 mmput(mm);
69063 mm = ERR_PTR(-EACCES);
69064 }
69065@@ -912,13 +955,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
69066 spin_unlock(&fs->lock);
69067 return -EAGAIN;
69068 }
69069- fs->users++;
69070+ atomic_inc(&fs->users);
69071 spin_unlock(&fs->lock);
69072 return 0;
69073 }
69074 tsk->fs = copy_fs_struct(fs);
69075 if (!tsk->fs)
69076 return -ENOMEM;
69077+ /* Carry through gr_chroot_dentry and is_chrooted instead
69078+ of recomputing it here. Already copied when the task struct
69079+ is duplicated. This allows pivot_root to not be treated as
69080+ a chroot
69081+ */
69082+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
69083+
69084 return 0;
69085 }
69086
69087@@ -1183,6 +1233,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
69088 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
69089 #endif
69090 retval = -EAGAIN;
69091+
69092+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
69093+
69094 if (atomic_read(&p->real_cred->user->processes) >=
69095 task_rlimit(p, RLIMIT_NPROC)) {
69096 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
69097@@ -1422,6 +1475,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
69098 goto bad_fork_free_pid;
69099 }
69100
69101+ /* synchronizes with gr_set_acls()
69102+ we need to call this past the point of no return for fork()
69103+ */
69104+ gr_copy_label(p);
69105+
69106 if (clone_flags & CLONE_THREAD) {
69107 current->signal->nr_threads++;
69108 atomic_inc(&current->signal->live);
69109@@ -1505,6 +1563,8 @@ bad_fork_cleanup_count:
69110 bad_fork_free:
69111 free_task(p);
69112 fork_out:
69113+ gr_log_forkfail(retval);
69114+
69115 return ERR_PTR(retval);
69116 }
69117
69118@@ -1605,6 +1665,8 @@ long do_fork(unsigned long clone_flags,
69119 if (clone_flags & CLONE_PARENT_SETTID)
69120 put_user(nr, parent_tidptr);
69121
69122+ gr_handle_brute_check();
69123+
69124 if (clone_flags & CLONE_VFORK) {
69125 p->vfork_done = &vfork;
69126 init_completion(&vfork);
69127@@ -1714,7 +1776,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
69128 return 0;
69129
69130 /* don't need lock here; in the worst case we'll do useless copy */
69131- if (fs->users == 1)
69132+ if (atomic_read(&fs->users) == 1)
69133 return 0;
69134
69135 *new_fsp = copy_fs_struct(fs);
69136@@ -1803,7 +1865,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
69137 fs = current->fs;
69138 spin_lock(&fs->lock);
69139 current->fs = new_fs;
69140- if (--fs->users)
69141+ gr_set_chroot_entries(current, &current->fs->root);
69142+ if (atomic_dec_return(&fs->users))
69143 new_fs = NULL;
69144 else
69145 new_fs = fs;
69146diff --git a/kernel/futex.c b/kernel/futex.c
69147index 19eb089..b8c65ea 100644
69148--- a/kernel/futex.c
69149+++ b/kernel/futex.c
69150@@ -54,6 +54,7 @@
69151 #include <linux/mount.h>
69152 #include <linux/pagemap.h>
69153 #include <linux/syscalls.h>
69154+#include <linux/ptrace.h>
69155 #include <linux/signal.h>
69156 #include <linux/export.h>
69157 #include <linux/magic.h>
69158@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
69159 struct page *page, *page_head;
69160 int err, ro = 0;
69161
69162+#ifdef CONFIG_PAX_SEGMEXEC
69163+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
69164+ return -EFAULT;
69165+#endif
69166+
69167 /*
69168 * The futex address must be "naturally" aligned.
69169 */
69170@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
69171 {
69172 u32 curval;
69173 int i;
69174+ mm_segment_t oldfs;
69175
69176 /*
69177 * This will fail and we want it. Some arch implementations do
69178@@ -2744,8 +2751,11 @@ static int __init futex_init(void)
69179 * implementation, the non-functional ones will return
69180 * -ENOSYS.
69181 */
69182+ oldfs = get_fs();
69183+ set_fs(USER_DS);
69184 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
69185 futex_cmpxchg_enabled = 1;
69186+ set_fs(oldfs);
69187
69188 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
69189 plist_head_init(&futex_queues[i].chain);
69190diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
69191index 9b22d03..6295b62 100644
69192--- a/kernel/gcov/base.c
69193+++ b/kernel/gcov/base.c
69194@@ -102,11 +102,6 @@ void gcov_enable_events(void)
69195 }
69196
69197 #ifdef CONFIG_MODULES
69198-static inline int within(void *addr, void *start, unsigned long size)
69199-{
69200- return ((addr >= start) && (addr < start + size));
69201-}
69202-
69203 /* Update list and generate events when modules are unloaded. */
69204 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
69205 void *data)
69206@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
69207 prev = NULL;
69208 /* Remove entries located in module from linked list. */
69209 for (info = gcov_info_head; info; info = info->next) {
69210- if (within(info, mod->module_core, mod->core_size)) {
69211+ if (within_module_core_rw((unsigned long)info, mod)) {
69212 if (prev)
69213 prev->next = info->next;
69214 else
69215diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
69216index 6db7a5e..25b6648 100644
69217--- a/kernel/hrtimer.c
69218+++ b/kernel/hrtimer.c
69219@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
69220 local_irq_restore(flags);
69221 }
69222
69223-static void run_hrtimer_softirq(struct softirq_action *h)
69224+static void run_hrtimer_softirq(void)
69225 {
69226 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
69227
69228diff --git a/kernel/jump_label.c b/kernel/jump_label.c
69229index 60f48fa..7f3a770 100644
69230--- a/kernel/jump_label.c
69231+++ b/kernel/jump_label.c
69232@@ -13,6 +13,7 @@
69233 #include <linux/sort.h>
69234 #include <linux/err.h>
69235 #include <linux/static_key.h>
69236+#include <linux/mm.h>
69237
69238 #ifdef HAVE_JUMP_LABEL
69239
69240@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
69241
69242 size = (((unsigned long)stop - (unsigned long)start)
69243 / sizeof(struct jump_entry));
69244+ pax_open_kernel();
69245 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
69246+ pax_close_kernel();
69247 }
69248
69249 static void jump_label_update(struct static_key *key, int enable);
69250@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
69251 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
69252 struct jump_entry *iter;
69253
69254+ pax_open_kernel();
69255 for (iter = iter_start; iter < iter_stop; iter++) {
69256 if (within_module_init(iter->code, mod))
69257 iter->code = 0;
69258 }
69259+ pax_close_kernel();
69260 }
69261
69262 static int
69263diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
69264index 2169fee..45c017a 100644
69265--- a/kernel/kallsyms.c
69266+++ b/kernel/kallsyms.c
69267@@ -11,6 +11,9 @@
69268 * Changed the compression method from stem compression to "table lookup"
69269 * compression (see scripts/kallsyms.c for a more complete description)
69270 */
69271+#ifdef CONFIG_GRKERNSEC_HIDESYM
69272+#define __INCLUDED_BY_HIDESYM 1
69273+#endif
69274 #include <linux/kallsyms.h>
69275 #include <linux/module.h>
69276 #include <linux/init.h>
69277@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
69278
69279 static inline int is_kernel_inittext(unsigned long addr)
69280 {
69281+ if (system_state != SYSTEM_BOOTING)
69282+ return 0;
69283+
69284 if (addr >= (unsigned long)_sinittext
69285 && addr <= (unsigned long)_einittext)
69286 return 1;
69287 return 0;
69288 }
69289
69290+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69291+#ifdef CONFIG_MODULES
69292+static inline int is_module_text(unsigned long addr)
69293+{
69294+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
69295+ return 1;
69296+
69297+ addr = ktla_ktva(addr);
69298+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
69299+}
69300+#else
69301+static inline int is_module_text(unsigned long addr)
69302+{
69303+ return 0;
69304+}
69305+#endif
69306+#endif
69307+
69308 static inline int is_kernel_text(unsigned long addr)
69309 {
69310 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
69311@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
69312
69313 static inline int is_kernel(unsigned long addr)
69314 {
69315+
69316+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69317+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
69318+ return 1;
69319+
69320+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
69321+#else
69322 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
69323+#endif
69324+
69325 return 1;
69326 return in_gate_area_no_mm(addr);
69327 }
69328
69329 static int is_ksym_addr(unsigned long addr)
69330 {
69331+
69332+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69333+ if (is_module_text(addr))
69334+ return 0;
69335+#endif
69336+
69337 if (all_var)
69338 return is_kernel(addr);
69339
69340@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
69341
69342 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
69343 {
69344- iter->name[0] = '\0';
69345 iter->nameoff = get_symbol_offset(new_pos);
69346 iter->pos = new_pos;
69347 }
69348@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
69349 {
69350 struct kallsym_iter *iter = m->private;
69351
69352+#ifdef CONFIG_GRKERNSEC_HIDESYM
69353+ if (current_uid())
69354+ return 0;
69355+#endif
69356+
69357 /* Some debugging symbols have no name. Ignore them. */
69358 if (!iter->name[0])
69359 return 0;
69360@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
69361 */
69362 type = iter->exported ? toupper(iter->type) :
69363 tolower(iter->type);
69364+
69365 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
69366 type, iter->name, iter->module_name);
69367 } else
69368@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
69369 struct kallsym_iter *iter;
69370 int ret;
69371
69372- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
69373+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
69374 if (!iter)
69375 return -ENOMEM;
69376 reset_iter(iter, 0);
69377diff --git a/kernel/kcmp.c b/kernel/kcmp.c
69378index 30b7b22..c726387 100644
69379--- a/kernel/kcmp.c
69380+++ b/kernel/kcmp.c
69381@@ -98,6 +98,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
69382 struct task_struct *task1, *task2;
69383 int ret;
69384
69385+#ifdef CONFIG_GRKERNSEC
69386+ return -ENOSYS;
69387+#endif
69388+
69389 rcu_read_lock();
69390
69391 /*
69392diff --git a/kernel/kexec.c b/kernel/kexec.c
69393index 5e4bd78..00c5b91 100644
69394--- a/kernel/kexec.c
69395+++ b/kernel/kexec.c
69396@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
69397 unsigned long flags)
69398 {
69399 struct compat_kexec_segment in;
69400- struct kexec_segment out, __user *ksegments;
69401+ struct kexec_segment out;
69402+ struct kexec_segment __user *ksegments;
69403 unsigned long i, result;
69404
69405 /* Don't allow clients that don't understand the native
69406diff --git a/kernel/kmod.c b/kernel/kmod.c
69407index 1c317e3..4a92a55 100644
69408--- a/kernel/kmod.c
69409+++ b/kernel/kmod.c
69410@@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
69411 kfree(info->argv);
69412 }
69413
69414-static int call_modprobe(char *module_name, int wait)
69415+static int call_modprobe(char *module_name, char *module_param, int wait)
69416 {
69417 static char *envp[] = {
69418 "HOME=/",
69419@@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
69420 NULL
69421 };
69422
69423- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
69424+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
69425 if (!argv)
69426 goto out;
69427
69428@@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
69429 argv[1] = "-q";
69430 argv[2] = "--";
69431 argv[3] = module_name; /* check free_modprobe_argv() */
69432- argv[4] = NULL;
69433+ argv[4] = module_param;
69434+ argv[5] = NULL;
69435
69436 return call_usermodehelper_fns(modprobe_path, argv, envp,
69437 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
69438@@ -120,9 +121,8 @@ out:
69439 * If module auto-loading support is disabled then this function
69440 * becomes a no-operation.
69441 */
69442-int __request_module(bool wait, const char *fmt, ...)
69443+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
69444 {
69445- va_list args;
69446 char module_name[MODULE_NAME_LEN];
69447 unsigned int max_modprobes;
69448 int ret;
69449@@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
69450 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
69451 static int kmod_loop_msg;
69452
69453- va_start(args, fmt);
69454- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
69455- va_end(args);
69456+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
69457 if (ret >= MODULE_NAME_LEN)
69458 return -ENAMETOOLONG;
69459
69460@@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
69461 if (ret)
69462 return ret;
69463
69464+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69465+ if (!current_uid()) {
69466+ /* hack to workaround consolekit/udisks stupidity */
69467+ read_lock(&tasklist_lock);
69468+ if (!strcmp(current->comm, "mount") &&
69469+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
69470+ read_unlock(&tasklist_lock);
69471+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
69472+ return -EPERM;
69473+ }
69474+ read_unlock(&tasklist_lock);
69475+ }
69476+#endif
69477+
69478 /* If modprobe needs a service that is in a module, we get a recursive
69479 * loop. Limit the number of running kmod threads to max_threads/2 or
69480 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
69481@@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
69482
69483 trace_module_request(module_name, wait, _RET_IP_);
69484
69485- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
69486+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
69487
69488 atomic_dec(&kmod_concurrent);
69489 return ret;
69490 }
69491+
69492+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
69493+{
69494+ va_list args;
69495+ int ret;
69496+
69497+ va_start(args, fmt);
69498+ ret = ____request_module(wait, module_param, fmt, args);
69499+ va_end(args);
69500+
69501+ return ret;
69502+}
69503+
69504+int __request_module(bool wait, const char *fmt, ...)
69505+{
69506+ va_list args;
69507+ int ret;
69508+
69509+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69510+ if (current_uid()) {
69511+ char module_param[MODULE_NAME_LEN];
69512+
69513+ memset(module_param, 0, sizeof(module_param));
69514+
69515+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
69516+
69517+ va_start(args, fmt);
69518+ ret = ____request_module(wait, module_param, fmt, args);
69519+ va_end(args);
69520+
69521+ return ret;
69522+ }
69523+#endif
69524+
69525+ va_start(args, fmt);
69526+ ret = ____request_module(wait, NULL, fmt, args);
69527+ va_end(args);
69528+
69529+ return ret;
69530+}
69531+
69532 EXPORT_SYMBOL(__request_module);
69533 #endif /* CONFIG_MODULES */
69534
69535@@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
69536 *
69537 * Thus the __user pointer cast is valid here.
69538 */
69539- sys_wait4(pid, (int __user *)&ret, 0, NULL);
69540+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
69541
69542 /*
69543 * If ret is 0, either ____call_usermodehelper failed and the
69544diff --git a/kernel/kprobes.c b/kernel/kprobes.c
69545index 098f396..fe85ff1 100644
69546--- a/kernel/kprobes.c
69547+++ b/kernel/kprobes.c
69548@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
69549 * kernel image and loaded module images reside. This is required
69550 * so x86_64 can correctly handle the %rip-relative fixups.
69551 */
69552- kip->insns = module_alloc(PAGE_SIZE);
69553+ kip->insns = module_alloc_exec(PAGE_SIZE);
69554 if (!kip->insns) {
69555 kfree(kip);
69556 return NULL;
69557@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
69558 */
69559 if (!list_is_singular(&kip->list)) {
69560 list_del(&kip->list);
69561- module_free(NULL, kip->insns);
69562+ module_free_exec(NULL, kip->insns);
69563 kfree(kip);
69564 }
69565 return 1;
69566@@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
69567 {
69568 int i, err = 0;
69569 unsigned long offset = 0, size = 0;
69570- char *modname, namebuf[128];
69571+ char *modname, namebuf[KSYM_NAME_LEN];
69572 const char *symbol_name;
69573 void *addr;
69574 struct kprobe_blackpoint *kb;
69575@@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
69576 kprobe_type = "k";
69577
69578 if (sym)
69579- seq_printf(pi, "%p %s %s+0x%x %s ",
69580+ seq_printf(pi, "%pK %s %s+0x%x %s ",
69581 p->addr, kprobe_type, sym, offset,
69582 (modname ? modname : " "));
69583 else
69584- seq_printf(pi, "%p %s %p ",
69585+ seq_printf(pi, "%pK %s %pK ",
69586 p->addr, kprobe_type, p->addr);
69587
69588 if (!pp)
69589@@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
69590 const char *sym = NULL;
69591 unsigned int i = *(loff_t *) v;
69592 unsigned long offset = 0;
69593- char *modname, namebuf[128];
69594+ char *modname, namebuf[KSYM_NAME_LEN];
69595
69596 head = &kprobe_table[i];
69597 preempt_disable();
69598diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
69599index 4e316e1..5501eef 100644
69600--- a/kernel/ksysfs.c
69601+++ b/kernel/ksysfs.c
69602@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
69603 {
69604 if (count+1 > UEVENT_HELPER_PATH_LEN)
69605 return -ENOENT;
69606+ if (!capable(CAP_SYS_ADMIN))
69607+ return -EPERM;
69608 memcpy(uevent_helper, buf, count);
69609 uevent_helper[count] = '\0';
69610 if (count && uevent_helper[count-1] == '\n')
69611diff --git a/kernel/lockdep.c b/kernel/lockdep.c
69612index 7981e5b..7f2105c 100644
69613--- a/kernel/lockdep.c
69614+++ b/kernel/lockdep.c
69615@@ -590,6 +590,10 @@ static int static_obj(void *obj)
69616 end = (unsigned long) &_end,
69617 addr = (unsigned long) obj;
69618
69619+#ifdef CONFIG_PAX_KERNEXEC
69620+ start = ktla_ktva(start);
69621+#endif
69622+
69623 /*
69624 * static variable?
69625 */
69626@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
69627 if (!static_obj(lock->key)) {
69628 debug_locks_off();
69629 printk("INFO: trying to register non-static key.\n");
69630+ printk("lock:%pS key:%pS.\n", lock, lock->key);
69631 printk("the code is fine but needs lockdep annotation.\n");
69632 printk("turning off the locking correctness validator.\n");
69633 dump_stack();
69634@@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
69635 if (!class)
69636 return 0;
69637 }
69638- atomic_inc((atomic_t *)&class->ops);
69639+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
69640 if (very_verbose(class)) {
69641 printk("\nacquire class [%p] %s", class->key, class->name);
69642 if (class->name_version > 1)
69643diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
69644index 91c32a0..7b88d63 100644
69645--- a/kernel/lockdep_proc.c
69646+++ b/kernel/lockdep_proc.c
69647@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
69648
69649 static void print_name(struct seq_file *m, struct lock_class *class)
69650 {
69651- char str[128];
69652+ char str[KSYM_NAME_LEN];
69653 const char *name = class->name;
69654
69655 if (!name) {
69656@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
69657 return 0;
69658 }
69659
69660- seq_printf(m, "%p", class->key);
69661+ seq_printf(m, "%pK", class->key);
69662 #ifdef CONFIG_DEBUG_LOCKDEP
69663 seq_printf(m, " OPS:%8ld", class->ops);
69664 #endif
69665@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
69666
69667 list_for_each_entry(entry, &class->locks_after, entry) {
69668 if (entry->distance == 1) {
69669- seq_printf(m, " -> [%p] ", entry->class->key);
69670+ seq_printf(m, " -> [%pK] ", entry->class->key);
69671 print_name(m, entry->class);
69672 seq_puts(m, "\n");
69673 }
69674@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
69675 if (!class->key)
69676 continue;
69677
69678- seq_printf(m, "[%p] ", class->key);
69679+ seq_printf(m, "[%pK] ", class->key);
69680 print_name(m, class);
69681 seq_puts(m, "\n");
69682 }
69683@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
69684 if (!i)
69685 seq_line(m, '-', 40-namelen, namelen);
69686
69687- snprintf(ip, sizeof(ip), "[<%p>]",
69688+ snprintf(ip, sizeof(ip), "[<%pK>]",
69689 (void *)class->contention_point[i]);
69690 seq_printf(m, "%40s %14lu %29s %pS\n",
69691 name, stats->contention_point[i],
69692@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
69693 if (!i)
69694 seq_line(m, '-', 40-namelen, namelen);
69695
69696- snprintf(ip, sizeof(ip), "[<%p>]",
69697+ snprintf(ip, sizeof(ip), "[<%pK>]",
69698 (void *)class->contending_point[i]);
69699 seq_printf(m, "%40s %14lu %29s %pS\n",
69700 name, stats->contending_point[i],
69701diff --git a/kernel/module.c b/kernel/module.c
69702index 6e48c3a..ac2ef5b 100644
69703--- a/kernel/module.c
69704+++ b/kernel/module.c
69705@@ -59,6 +59,7 @@
69706 #include <linux/pfn.h>
69707 #include <linux/bsearch.h>
69708 #include <linux/fips.h>
69709+#include <linux/grsecurity.h>
69710 #include "module-internal.h"
69711
69712 #define CREATE_TRACE_POINTS
69713@@ -153,7 +154,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
69714
69715 /* Bounds of module allocation, for speeding __module_address.
69716 * Protected by module_mutex. */
69717-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
69718+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
69719+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
69720
69721 int register_module_notifier(struct notifier_block * nb)
69722 {
69723@@ -318,7 +320,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
69724 return true;
69725
69726 list_for_each_entry_rcu(mod, &modules, list) {
69727- struct symsearch arr[] = {
69728+ struct symsearch modarr[] = {
69729 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
69730 NOT_GPL_ONLY, false },
69731 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
69732@@ -340,7 +342,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
69733 #endif
69734 };
69735
69736- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
69737+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
69738 return true;
69739 }
69740 return false;
69741@@ -472,7 +474,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
69742 static int percpu_modalloc(struct module *mod,
69743 unsigned long size, unsigned long align)
69744 {
69745- if (align > PAGE_SIZE) {
69746+ if (align-1 >= PAGE_SIZE) {
69747 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
69748 mod->name, align, PAGE_SIZE);
69749 align = PAGE_SIZE;
69750@@ -1072,7 +1074,7 @@ struct module_attribute module_uevent =
69751 static ssize_t show_coresize(struct module_attribute *mattr,
69752 struct module_kobject *mk, char *buffer)
69753 {
69754- return sprintf(buffer, "%u\n", mk->mod->core_size);
69755+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
69756 }
69757
69758 static struct module_attribute modinfo_coresize =
69759@@ -1081,7 +1083,7 @@ static struct module_attribute modinfo_coresize =
69760 static ssize_t show_initsize(struct module_attribute *mattr,
69761 struct module_kobject *mk, char *buffer)
69762 {
69763- return sprintf(buffer, "%u\n", mk->mod->init_size);
69764+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
69765 }
69766
69767 static struct module_attribute modinfo_initsize =
69768@@ -1295,7 +1297,7 @@ resolve_symbol_wait(struct module *mod,
69769 */
69770 #ifdef CONFIG_SYSFS
69771
69772-#ifdef CONFIG_KALLSYMS
69773+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
69774 static inline bool sect_empty(const Elf_Shdr *sect)
69775 {
69776 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
69777@@ -1761,21 +1763,21 @@ static void set_section_ro_nx(void *base,
69778
69779 static void unset_module_core_ro_nx(struct module *mod)
69780 {
69781- set_page_attributes(mod->module_core + mod->core_text_size,
69782- mod->module_core + mod->core_size,
69783+ set_page_attributes(mod->module_core_rw,
69784+ mod->module_core_rw + mod->core_size_rw,
69785 set_memory_x);
69786- set_page_attributes(mod->module_core,
69787- mod->module_core + mod->core_ro_size,
69788+ set_page_attributes(mod->module_core_rx,
69789+ mod->module_core_rx + mod->core_size_rx,
69790 set_memory_rw);
69791 }
69792
69793 static void unset_module_init_ro_nx(struct module *mod)
69794 {
69795- set_page_attributes(mod->module_init + mod->init_text_size,
69796- mod->module_init + mod->init_size,
69797+ set_page_attributes(mod->module_init_rw,
69798+ mod->module_init_rw + mod->init_size_rw,
69799 set_memory_x);
69800- set_page_attributes(mod->module_init,
69801- mod->module_init + mod->init_ro_size,
69802+ set_page_attributes(mod->module_init_rx,
69803+ mod->module_init_rx + mod->init_size_rx,
69804 set_memory_rw);
69805 }
69806
69807@@ -1786,14 +1788,14 @@ void set_all_modules_text_rw(void)
69808
69809 mutex_lock(&module_mutex);
69810 list_for_each_entry_rcu(mod, &modules, list) {
69811- if ((mod->module_core) && (mod->core_text_size)) {
69812- set_page_attributes(mod->module_core,
69813- mod->module_core + mod->core_text_size,
69814+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
69815+ set_page_attributes(mod->module_core_rx,
69816+ mod->module_core_rx + mod->core_size_rx,
69817 set_memory_rw);
69818 }
69819- if ((mod->module_init) && (mod->init_text_size)) {
69820- set_page_attributes(mod->module_init,
69821- mod->module_init + mod->init_text_size,
69822+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
69823+ set_page_attributes(mod->module_init_rx,
69824+ mod->module_init_rx + mod->init_size_rx,
69825 set_memory_rw);
69826 }
69827 }
69828@@ -1807,14 +1809,14 @@ void set_all_modules_text_ro(void)
69829
69830 mutex_lock(&module_mutex);
69831 list_for_each_entry_rcu(mod, &modules, list) {
69832- if ((mod->module_core) && (mod->core_text_size)) {
69833- set_page_attributes(mod->module_core,
69834- mod->module_core + mod->core_text_size,
69835+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
69836+ set_page_attributes(mod->module_core_rx,
69837+ mod->module_core_rx + mod->core_size_rx,
69838 set_memory_ro);
69839 }
69840- if ((mod->module_init) && (mod->init_text_size)) {
69841- set_page_attributes(mod->module_init,
69842- mod->module_init + mod->init_text_size,
69843+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
69844+ set_page_attributes(mod->module_init_rx,
69845+ mod->module_init_rx + mod->init_size_rx,
69846 set_memory_ro);
69847 }
69848 }
69849@@ -1860,16 +1862,19 @@ static void free_module(struct module *mod)
69850
69851 /* This may be NULL, but that's OK */
69852 unset_module_init_ro_nx(mod);
69853- module_free(mod, mod->module_init);
69854+ module_free(mod, mod->module_init_rw);
69855+ module_free_exec(mod, mod->module_init_rx);
69856 kfree(mod->args);
69857 percpu_modfree(mod);
69858
69859 /* Free lock-classes: */
69860- lockdep_free_key_range(mod->module_core, mod->core_size);
69861+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
69862+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
69863
69864 /* Finally, free the core (containing the module structure) */
69865 unset_module_core_ro_nx(mod);
69866- module_free(mod, mod->module_core);
69867+ module_free_exec(mod, mod->module_core_rx);
69868+ module_free(mod, mod->module_core_rw);
69869
69870 #ifdef CONFIG_MPU
69871 update_protections(current->mm);
69872@@ -1939,9 +1944,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
69873 int ret = 0;
69874 const struct kernel_symbol *ksym;
69875
69876+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69877+ int is_fs_load = 0;
69878+ int register_filesystem_found = 0;
69879+ char *p;
69880+
69881+ p = strstr(mod->args, "grsec_modharden_fs");
69882+ if (p) {
69883+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
69884+ /* copy \0 as well */
69885+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
69886+ is_fs_load = 1;
69887+ }
69888+#endif
69889+
69890 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
69891 const char *name = info->strtab + sym[i].st_name;
69892
69893+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69894+ /* it's a real shame this will never get ripped and copied
69895+ upstream! ;(
69896+ */
69897+ if (is_fs_load && !strcmp(name, "register_filesystem"))
69898+ register_filesystem_found = 1;
69899+#endif
69900+
69901 switch (sym[i].st_shndx) {
69902 case SHN_COMMON:
69903 /* We compiled with -fno-common. These are not
69904@@ -1962,7 +1989,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
69905 ksym = resolve_symbol_wait(mod, info, name);
69906 /* Ok if resolved. */
69907 if (ksym && !IS_ERR(ksym)) {
69908+ pax_open_kernel();
69909 sym[i].st_value = ksym->value;
69910+ pax_close_kernel();
69911 break;
69912 }
69913
69914@@ -1981,11 +2010,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
69915 secbase = (unsigned long)mod_percpu(mod);
69916 else
69917 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
69918+ pax_open_kernel();
69919 sym[i].st_value += secbase;
69920+ pax_close_kernel();
69921 break;
69922 }
69923 }
69924
69925+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69926+ if (is_fs_load && !register_filesystem_found) {
69927+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
69928+ ret = -EPERM;
69929+ }
69930+#endif
69931+
69932 return ret;
69933 }
69934
69935@@ -2069,22 +2107,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
69936 || s->sh_entsize != ~0UL
69937 || strstarts(sname, ".init"))
69938 continue;
69939- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
69940+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
69941+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
69942+ else
69943+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
69944 pr_debug("\t%s\n", sname);
69945 }
69946- switch (m) {
69947- case 0: /* executable */
69948- mod->core_size = debug_align(mod->core_size);
69949- mod->core_text_size = mod->core_size;
69950- break;
69951- case 1: /* RO: text and ro-data */
69952- mod->core_size = debug_align(mod->core_size);
69953- mod->core_ro_size = mod->core_size;
69954- break;
69955- case 3: /* whole core */
69956- mod->core_size = debug_align(mod->core_size);
69957- break;
69958- }
69959 }
69960
69961 pr_debug("Init section allocation order:\n");
69962@@ -2098,23 +2126,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
69963 || s->sh_entsize != ~0UL
69964 || !strstarts(sname, ".init"))
69965 continue;
69966- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
69967- | INIT_OFFSET_MASK);
69968+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
69969+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
69970+ else
69971+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
69972+ s->sh_entsize |= INIT_OFFSET_MASK;
69973 pr_debug("\t%s\n", sname);
69974 }
69975- switch (m) {
69976- case 0: /* executable */
69977- mod->init_size = debug_align(mod->init_size);
69978- mod->init_text_size = mod->init_size;
69979- break;
69980- case 1: /* RO: text and ro-data */
69981- mod->init_size = debug_align(mod->init_size);
69982- mod->init_ro_size = mod->init_size;
69983- break;
69984- case 3: /* whole init */
69985- mod->init_size = debug_align(mod->init_size);
69986- break;
69987- }
69988 }
69989 }
69990
69991@@ -2286,7 +2304,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
69992
69993 /* Put symbol section at end of init part of module. */
69994 symsect->sh_flags |= SHF_ALLOC;
69995- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
69996+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
69997 info->index.sym) | INIT_OFFSET_MASK;
69998 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
69999
70000@@ -2306,13 +2324,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
70001 }
70002
70003 /* Append room for core symbols at end of core part. */
70004- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
70005- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
70006- mod->core_size += strtab_size;
70007+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
70008+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
70009+ mod->core_size_rx += strtab_size;
70010
70011 /* Put string table section at end of init part of module. */
70012 strsect->sh_flags |= SHF_ALLOC;
70013- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
70014+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
70015 info->index.str) | INIT_OFFSET_MASK;
70016 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
70017 }
70018@@ -2330,12 +2348,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
70019 /* Make sure we get permanent strtab: don't use info->strtab. */
70020 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
70021
70022+ pax_open_kernel();
70023+
70024 /* Set types up while we still have access to sections. */
70025 for (i = 0; i < mod->num_symtab; i++)
70026 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
70027
70028- mod->core_symtab = dst = mod->module_core + info->symoffs;
70029- mod->core_strtab = s = mod->module_core + info->stroffs;
70030+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
70031+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
70032 src = mod->symtab;
70033 *s++ = 0;
70034 for (ndst = i = 0; i < mod->num_symtab; i++) {
70035@@ -2348,6 +2368,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
70036 }
70037 }
70038 mod->core_num_syms = ndst;
70039+
70040+ pax_close_kernel();
70041 }
70042 #else
70043 static inline void layout_symtab(struct module *mod, struct load_info *info)
70044@@ -2381,17 +2403,33 @@ void * __weak module_alloc(unsigned long size)
70045 return size == 0 ? NULL : vmalloc_exec(size);
70046 }
70047
70048-static void *module_alloc_update_bounds(unsigned long size)
70049+static void *module_alloc_update_bounds_rw(unsigned long size)
70050 {
70051 void *ret = module_alloc(size);
70052
70053 if (ret) {
70054 mutex_lock(&module_mutex);
70055 /* Update module bounds. */
70056- if ((unsigned long)ret < module_addr_min)
70057- module_addr_min = (unsigned long)ret;
70058- if ((unsigned long)ret + size > module_addr_max)
70059- module_addr_max = (unsigned long)ret + size;
70060+ if ((unsigned long)ret < module_addr_min_rw)
70061+ module_addr_min_rw = (unsigned long)ret;
70062+ if ((unsigned long)ret + size > module_addr_max_rw)
70063+ module_addr_max_rw = (unsigned long)ret + size;
70064+ mutex_unlock(&module_mutex);
70065+ }
70066+ return ret;
70067+}
70068+
70069+static void *module_alloc_update_bounds_rx(unsigned long size)
70070+{
70071+ void *ret = module_alloc_exec(size);
70072+
70073+ if (ret) {
70074+ mutex_lock(&module_mutex);
70075+ /* Update module bounds. */
70076+ if ((unsigned long)ret < module_addr_min_rx)
70077+ module_addr_min_rx = (unsigned long)ret;
70078+ if ((unsigned long)ret + size > module_addr_max_rx)
70079+ module_addr_max_rx = (unsigned long)ret + size;
70080 mutex_unlock(&module_mutex);
70081 }
70082 return ret;
70083@@ -2610,8 +2648,14 @@ static struct module *setup_load_info(struct load_info *info)
70084 static int check_modinfo(struct module *mod, struct load_info *info)
70085 {
70086 const char *modmagic = get_modinfo(info, "vermagic");
70087+ const char *license = get_modinfo(info, "license");
70088 int err;
70089
70090+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
70091+ if (!license || !license_is_gpl_compatible(license))
70092+ return -ENOEXEC;
70093+#endif
70094+
70095 /* This is allowed: modprobe --force will invalidate it. */
70096 if (!modmagic) {
70097 err = try_to_force_load(mod, "bad vermagic");
70098@@ -2634,7 +2678,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
70099 }
70100
70101 /* Set up license info based on the info section */
70102- set_license(mod, get_modinfo(info, "license"));
70103+ set_license(mod, license);
70104
70105 return 0;
70106 }
70107@@ -2728,7 +2772,7 @@ static int move_module(struct module *mod, struct load_info *info)
70108 void *ptr;
70109
70110 /* Do the allocs. */
70111- ptr = module_alloc_update_bounds(mod->core_size);
70112+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
70113 /*
70114 * The pointer to this block is stored in the module structure
70115 * which is inside the block. Just mark it as not being a
70116@@ -2738,23 +2782,50 @@ static int move_module(struct module *mod, struct load_info *info)
70117 if (!ptr)
70118 return -ENOMEM;
70119
70120- memset(ptr, 0, mod->core_size);
70121- mod->module_core = ptr;
70122+ memset(ptr, 0, mod->core_size_rw);
70123+ mod->module_core_rw = ptr;
70124
70125- ptr = module_alloc_update_bounds(mod->init_size);
70126+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
70127 /*
70128 * The pointer to this block is stored in the module structure
70129 * which is inside the block. This block doesn't need to be
70130 * scanned as it contains data and code that will be freed
70131 * after the module is initialized.
70132 */
70133- kmemleak_ignore(ptr);
70134- if (!ptr && mod->init_size) {
70135- module_free(mod, mod->module_core);
70136+ kmemleak_not_leak(ptr);
70137+ if (!ptr && mod->init_size_rw) {
70138+ module_free(mod, mod->module_core_rw);
70139 return -ENOMEM;
70140 }
70141- memset(ptr, 0, mod->init_size);
70142- mod->module_init = ptr;
70143+ memset(ptr, 0, mod->init_size_rw);
70144+ mod->module_init_rw = ptr;
70145+
70146+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
70147+ kmemleak_not_leak(ptr);
70148+ if (!ptr) {
70149+ module_free(mod, mod->module_init_rw);
70150+ module_free(mod, mod->module_core_rw);
70151+ return -ENOMEM;
70152+ }
70153+
70154+ pax_open_kernel();
70155+ memset(ptr, 0, mod->core_size_rx);
70156+ pax_close_kernel();
70157+ mod->module_core_rx = ptr;
70158+
70159+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
70160+ kmemleak_not_leak(ptr);
70161+ if (!ptr && mod->init_size_rx) {
70162+ module_free_exec(mod, mod->module_core_rx);
70163+ module_free(mod, mod->module_init_rw);
70164+ module_free(mod, mod->module_core_rw);
70165+ return -ENOMEM;
70166+ }
70167+
70168+ pax_open_kernel();
70169+ memset(ptr, 0, mod->init_size_rx);
70170+ pax_close_kernel();
70171+ mod->module_init_rx = ptr;
70172
70173 /* Transfer each section which specifies SHF_ALLOC */
70174 pr_debug("final section addresses:\n");
70175@@ -2765,16 +2836,45 @@ static int move_module(struct module *mod, struct load_info *info)
70176 if (!(shdr->sh_flags & SHF_ALLOC))
70177 continue;
70178
70179- if (shdr->sh_entsize & INIT_OFFSET_MASK)
70180- dest = mod->module_init
70181- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70182- else
70183- dest = mod->module_core + shdr->sh_entsize;
70184+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
70185+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
70186+ dest = mod->module_init_rw
70187+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70188+ else
70189+ dest = mod->module_init_rx
70190+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70191+ } else {
70192+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
70193+ dest = mod->module_core_rw + shdr->sh_entsize;
70194+ else
70195+ dest = mod->module_core_rx + shdr->sh_entsize;
70196+ }
70197+
70198+ if (shdr->sh_type != SHT_NOBITS) {
70199+
70200+#ifdef CONFIG_PAX_KERNEXEC
70201+#ifdef CONFIG_X86_64
70202+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
70203+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
70204+#endif
70205+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
70206+ pax_open_kernel();
70207+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
70208+ pax_close_kernel();
70209+ } else
70210+#endif
70211
70212- if (shdr->sh_type != SHT_NOBITS)
70213 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
70214+ }
70215 /* Update sh_addr to point to copy in image. */
70216- shdr->sh_addr = (unsigned long)dest;
70217+
70218+#ifdef CONFIG_PAX_KERNEXEC
70219+ if (shdr->sh_flags & SHF_EXECINSTR)
70220+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
70221+ else
70222+#endif
70223+
70224+ shdr->sh_addr = (unsigned long)dest;
70225 pr_debug("\t0x%lx %s\n",
70226 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
70227 }
70228@@ -2829,12 +2929,12 @@ static void flush_module_icache(const struct module *mod)
70229 * Do it before processing of module parameters, so the module
70230 * can provide parameter accessor functions of its own.
70231 */
70232- if (mod->module_init)
70233- flush_icache_range((unsigned long)mod->module_init,
70234- (unsigned long)mod->module_init
70235- + mod->init_size);
70236- flush_icache_range((unsigned long)mod->module_core,
70237- (unsigned long)mod->module_core + mod->core_size);
70238+ if (mod->module_init_rx)
70239+ flush_icache_range((unsigned long)mod->module_init_rx,
70240+ (unsigned long)mod->module_init_rx
70241+ + mod->init_size_rx);
70242+ flush_icache_range((unsigned long)mod->module_core_rx,
70243+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
70244
70245 set_fs(old_fs);
70246 }
70247@@ -2904,8 +3004,10 @@ out:
70248 static void module_deallocate(struct module *mod, struct load_info *info)
70249 {
70250 percpu_modfree(mod);
70251- module_free(mod, mod->module_init);
70252- module_free(mod, mod->module_core);
70253+ module_free_exec(mod, mod->module_init_rx);
70254+ module_free_exec(mod, mod->module_core_rx);
70255+ module_free(mod, mod->module_init_rw);
70256+ module_free(mod, mod->module_core_rw);
70257 }
70258
70259 int __weak module_finalize(const Elf_Ehdr *hdr,
70260@@ -2918,7 +3020,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
70261 static int post_relocation(struct module *mod, const struct load_info *info)
70262 {
70263 /* Sort exception table now relocations are done. */
70264+ pax_open_kernel();
70265 sort_extable(mod->extable, mod->extable + mod->num_exentries);
70266+ pax_close_kernel();
70267
70268 /* Copy relocated percpu area over. */
70269 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
70270@@ -2989,9 +3093,38 @@ static struct module *load_module(void __user *umod,
70271 if (err)
70272 goto free_unload;
70273
70274+ /* Now copy in args */
70275+ mod->args = strndup_user(uargs, ~0UL >> 1);
70276+ if (IS_ERR(mod->args)) {
70277+ err = PTR_ERR(mod->args);
70278+ goto free_unload;
70279+ }
70280+
70281 /* Set up MODINFO_ATTR fields */
70282 setup_modinfo(mod, &info);
70283
70284+#ifdef CONFIG_GRKERNSEC_MODHARDEN
70285+ {
70286+ char *p, *p2;
70287+
70288+ if (strstr(mod->args, "grsec_modharden_netdev")) {
70289+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
70290+ err = -EPERM;
70291+ goto free_modinfo;
70292+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
70293+ p += sizeof("grsec_modharden_normal") - 1;
70294+ p2 = strstr(p, "_");
70295+ if (p2) {
70296+ *p2 = '\0';
70297+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
70298+ *p2 = '_';
70299+ }
70300+ err = -EPERM;
70301+ goto free_modinfo;
70302+ }
70303+ }
70304+#endif
70305+
70306 /* Fix up syms, so that st_value is a pointer to location. */
70307 err = simplify_symbols(mod, &info);
70308 if (err < 0)
70309@@ -3007,13 +3140,6 @@ static struct module *load_module(void __user *umod,
70310
70311 flush_module_icache(mod);
70312
70313- /* Now copy in args */
70314- mod->args = strndup_user(uargs, ~0UL >> 1);
70315- if (IS_ERR(mod->args)) {
70316- err = PTR_ERR(mod->args);
70317- goto free_arch_cleanup;
70318- }
70319-
70320 /* Mark state as coming so strong_try_module_get() ignores us. */
70321 mod->state = MODULE_STATE_COMING;
70322
70323@@ -3081,11 +3207,11 @@ again:
70324 unlock:
70325 mutex_unlock(&module_mutex);
70326 synchronize_sched();
70327- kfree(mod->args);
70328 free_arch_cleanup:
70329 module_arch_cleanup(mod);
70330 free_modinfo:
70331 free_modinfo(mod);
70332+ kfree(mod->args);
70333 free_unload:
70334 module_unload_free(mod);
70335 free_module:
70336@@ -3126,16 +3252,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
70337 MODULE_STATE_COMING, mod);
70338
70339 /* Set RO and NX regions for core */
70340- set_section_ro_nx(mod->module_core,
70341- mod->core_text_size,
70342- mod->core_ro_size,
70343- mod->core_size);
70344+ set_section_ro_nx(mod->module_core_rx,
70345+ mod->core_size_rx,
70346+ mod->core_size_rx,
70347+ mod->core_size_rx);
70348
70349 /* Set RO and NX regions for init */
70350- set_section_ro_nx(mod->module_init,
70351- mod->init_text_size,
70352- mod->init_ro_size,
70353- mod->init_size);
70354+ set_section_ro_nx(mod->module_init_rx,
70355+ mod->init_size_rx,
70356+ mod->init_size_rx,
70357+ mod->init_size_rx);
70358
70359 do_mod_ctors(mod);
70360 /* Start the module */
70361@@ -3180,11 +3306,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
70362 mod->strtab = mod->core_strtab;
70363 #endif
70364 unset_module_init_ro_nx(mod);
70365- module_free(mod, mod->module_init);
70366- mod->module_init = NULL;
70367- mod->init_size = 0;
70368- mod->init_ro_size = 0;
70369- mod->init_text_size = 0;
70370+ module_free(mod, mod->module_init_rw);
70371+ module_free_exec(mod, mod->module_init_rx);
70372+ mod->module_init_rw = NULL;
70373+ mod->module_init_rx = NULL;
70374+ mod->init_size_rw = 0;
70375+ mod->init_size_rx = 0;
70376 mutex_unlock(&module_mutex);
70377 wake_up_all(&module_wq);
70378
70379@@ -3216,10 +3343,16 @@ static const char *get_ksymbol(struct module *mod,
70380 unsigned long nextval;
70381
70382 /* At worse, next value is at end of module */
70383- if (within_module_init(addr, mod))
70384- nextval = (unsigned long)mod->module_init+mod->init_text_size;
70385+ if (within_module_init_rx(addr, mod))
70386+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
70387+ else if (within_module_init_rw(addr, mod))
70388+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
70389+ else if (within_module_core_rx(addr, mod))
70390+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
70391+ else if (within_module_core_rw(addr, mod))
70392+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
70393 else
70394- nextval = (unsigned long)mod->module_core+mod->core_text_size;
70395+ return NULL;
70396
70397 /* Scan for closest preceding symbol, and next symbol. (ELF
70398 starts real symbols at 1). */
70399@@ -3454,7 +3587,7 @@ static int m_show(struct seq_file *m, void *p)
70400 char buf[8];
70401
70402 seq_printf(m, "%s %u",
70403- mod->name, mod->init_size + mod->core_size);
70404+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
70405 print_unload_info(m, mod);
70406
70407 /* Informative for users. */
70408@@ -3463,7 +3596,7 @@ static int m_show(struct seq_file *m, void *p)
70409 mod->state == MODULE_STATE_COMING ? "Loading":
70410 "Live");
70411 /* Used by oprofile and other similar tools. */
70412- seq_printf(m, " 0x%pK", mod->module_core);
70413+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
70414
70415 /* Taints info */
70416 if (mod->taints)
70417@@ -3499,7 +3632,17 @@ static const struct file_operations proc_modules_operations = {
70418
70419 static int __init proc_modules_init(void)
70420 {
70421+#ifndef CONFIG_GRKERNSEC_HIDESYM
70422+#ifdef CONFIG_GRKERNSEC_PROC_USER
70423+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
70424+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70425+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
70426+#else
70427 proc_create("modules", 0, NULL, &proc_modules_operations);
70428+#endif
70429+#else
70430+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
70431+#endif
70432 return 0;
70433 }
70434 module_init(proc_modules_init);
70435@@ -3558,12 +3701,12 @@ struct module *__module_address(unsigned long addr)
70436 {
70437 struct module *mod;
70438
70439- if (addr < module_addr_min || addr > module_addr_max)
70440+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
70441+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
70442 return NULL;
70443
70444 list_for_each_entry_rcu(mod, &modules, list)
70445- if (within_module_core(addr, mod)
70446- || within_module_init(addr, mod))
70447+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
70448 return mod;
70449 return NULL;
70450 }
70451@@ -3597,11 +3740,20 @@ bool is_module_text_address(unsigned long addr)
70452 */
70453 struct module *__module_text_address(unsigned long addr)
70454 {
70455- struct module *mod = __module_address(addr);
70456+ struct module *mod;
70457+
70458+#ifdef CONFIG_X86_32
70459+ addr = ktla_ktva(addr);
70460+#endif
70461+
70462+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
70463+ return NULL;
70464+
70465+ mod = __module_address(addr);
70466+
70467 if (mod) {
70468 /* Make sure it's within the text section. */
70469- if (!within(addr, mod->module_init, mod->init_text_size)
70470- && !within(addr, mod->module_core, mod->core_text_size))
70471+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
70472 mod = NULL;
70473 }
70474 return mod;
70475diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
70476index 7e3443f..b2a1e6b 100644
70477--- a/kernel/mutex-debug.c
70478+++ b/kernel/mutex-debug.c
70479@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
70480 }
70481
70482 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70483- struct thread_info *ti)
70484+ struct task_struct *task)
70485 {
70486 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
70487
70488 /* Mark the current thread as blocked on the lock: */
70489- ti->task->blocked_on = waiter;
70490+ task->blocked_on = waiter;
70491 }
70492
70493 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70494- struct thread_info *ti)
70495+ struct task_struct *task)
70496 {
70497 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
70498- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
70499- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
70500- ti->task->blocked_on = NULL;
70501+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
70502+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
70503+ task->blocked_on = NULL;
70504
70505 list_del_init(&waiter->list);
70506 waiter->task = NULL;
70507diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
70508index 0799fd3..d06ae3b 100644
70509--- a/kernel/mutex-debug.h
70510+++ b/kernel/mutex-debug.h
70511@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
70512 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
70513 extern void debug_mutex_add_waiter(struct mutex *lock,
70514 struct mutex_waiter *waiter,
70515- struct thread_info *ti);
70516+ struct task_struct *task);
70517 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70518- struct thread_info *ti);
70519+ struct task_struct *task);
70520 extern void debug_mutex_unlock(struct mutex *lock);
70521 extern void debug_mutex_init(struct mutex *lock, const char *name,
70522 struct lock_class_key *key);
70523diff --git a/kernel/mutex.c b/kernel/mutex.c
70524index a307cc9..27fd2e9 100644
70525--- a/kernel/mutex.c
70526+++ b/kernel/mutex.c
70527@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70528 spin_lock_mutex(&lock->wait_lock, flags);
70529
70530 debug_mutex_lock_common(lock, &waiter);
70531- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
70532+ debug_mutex_add_waiter(lock, &waiter, task);
70533
70534 /* add waiting tasks to the end of the waitqueue (FIFO): */
70535 list_add_tail(&waiter.list, &lock->wait_list);
70536@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70537 * TASK_UNINTERRUPTIBLE case.)
70538 */
70539 if (unlikely(signal_pending_state(state, task))) {
70540- mutex_remove_waiter(lock, &waiter,
70541- task_thread_info(task));
70542+ mutex_remove_waiter(lock, &waiter, task);
70543 mutex_release(&lock->dep_map, 1, ip);
70544 spin_unlock_mutex(&lock->wait_lock, flags);
70545
70546@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70547 done:
70548 lock_acquired(&lock->dep_map, ip);
70549 /* got the lock - rejoice! */
70550- mutex_remove_waiter(lock, &waiter, current_thread_info());
70551+ mutex_remove_waiter(lock, &waiter, task);
70552 mutex_set_owner(lock);
70553
70554 /* set it to 0 if there are no waiters left: */
70555diff --git a/kernel/notifier.c b/kernel/notifier.c
70556index 2d5cc4c..d9ea600 100644
70557--- a/kernel/notifier.c
70558+++ b/kernel/notifier.c
70559@@ -5,6 +5,7 @@
70560 #include <linux/rcupdate.h>
70561 #include <linux/vmalloc.h>
70562 #include <linux/reboot.h>
70563+#include <linux/mm.h>
70564
70565 /*
70566 * Notifier list for kernel code which wants to be called
70567@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
70568 while ((*nl) != NULL) {
70569 if (n->priority > (*nl)->priority)
70570 break;
70571- nl = &((*nl)->next);
70572+ nl = (struct notifier_block **)&((*nl)->next);
70573 }
70574- n->next = *nl;
70575+ pax_open_kernel();
70576+ *(const void **)&n->next = *nl;
70577 rcu_assign_pointer(*nl, n);
70578+ pax_close_kernel();
70579 return 0;
70580 }
70581
70582@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
70583 return 0;
70584 if (n->priority > (*nl)->priority)
70585 break;
70586- nl = &((*nl)->next);
70587+ nl = (struct notifier_block **)&((*nl)->next);
70588 }
70589- n->next = *nl;
70590+ pax_open_kernel();
70591+ *(const void **)&n->next = *nl;
70592 rcu_assign_pointer(*nl, n);
70593+ pax_close_kernel();
70594 return 0;
70595 }
70596
70597@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
70598 {
70599 while ((*nl) != NULL) {
70600 if ((*nl) == n) {
70601+ pax_open_kernel();
70602 rcu_assign_pointer(*nl, n->next);
70603+ pax_close_kernel();
70604 return 0;
70605 }
70606- nl = &((*nl)->next);
70607+ nl = (struct notifier_block **)&((*nl)->next);
70608 }
70609 return -ENOENT;
70610 }
70611diff --git a/kernel/panic.c b/kernel/panic.c
70612index e1b2822..5edc1d9 100644
70613--- a/kernel/panic.c
70614+++ b/kernel/panic.c
70615@@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
70616 const char *board;
70617
70618 printk(KERN_WARNING "------------[ cut here ]------------\n");
70619- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
70620+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
70621 board = dmi_get_system_info(DMI_PRODUCT_NAME);
70622 if (board)
70623 printk(KERN_WARNING "Hardware name: %s\n", board);
70624@@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
70625 */
70626 void __stack_chk_fail(void)
70627 {
70628- panic("stack-protector: Kernel stack is corrupted in: %p\n",
70629+ dump_stack();
70630+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
70631 __builtin_return_address(0));
70632 }
70633 EXPORT_SYMBOL(__stack_chk_fail);
70634diff --git a/kernel/pid.c b/kernel/pid.c
70635index aebd4f5..1693c13 100644
70636--- a/kernel/pid.c
70637+++ b/kernel/pid.c
70638@@ -33,6 +33,7 @@
70639 #include <linux/rculist.h>
70640 #include <linux/bootmem.h>
70641 #include <linux/hash.h>
70642+#include <linux/security.h>
70643 #include <linux/pid_namespace.h>
70644 #include <linux/init_task.h>
70645 #include <linux/syscalls.h>
70646@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
70647
70648 int pid_max = PID_MAX_DEFAULT;
70649
70650-#define RESERVED_PIDS 300
70651+#define RESERVED_PIDS 500
70652
70653 int pid_max_min = RESERVED_PIDS + 1;
70654 int pid_max_max = PID_MAX_LIMIT;
70655@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
70656 */
70657 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
70658 {
70659+ struct task_struct *task;
70660+
70661 rcu_lockdep_assert(rcu_read_lock_held(),
70662 "find_task_by_pid_ns() needs rcu_read_lock()"
70663 " protection");
70664- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
70665+
70666+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
70667+
70668+ if (gr_pid_is_chrooted(task))
70669+ return NULL;
70670+
70671+ return task;
70672 }
70673
70674 struct task_struct *find_task_by_vpid(pid_t vnr)
70675@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
70676 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
70677 }
70678
70679+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
70680+{
70681+ rcu_lockdep_assert(rcu_read_lock_held(),
70682+ "find_task_by_pid_ns() needs rcu_read_lock()"
70683+ " protection");
70684+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
70685+}
70686+
70687 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
70688 {
70689 struct pid *pid;
70690diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
70691index 125cb67..2e5c8ad 100644
70692--- a/kernel/posix-cpu-timers.c
70693+++ b/kernel/posix-cpu-timers.c
70694@@ -6,9 +6,11 @@
70695 #include <linux/posix-timers.h>
70696 #include <linux/errno.h>
70697 #include <linux/math64.h>
70698+#include <linux/security.h>
70699 #include <asm/uaccess.h>
70700 #include <linux/kernel_stat.h>
70701 #include <trace/events/timer.h>
70702+#include <linux/random.h>
70703
70704 /*
70705 * Called after updating RLIMIT_CPU to run cpu timer and update
70706@@ -494,6 +496,8 @@ static void cleanup_timers(struct list_head *head,
70707 */
70708 void posix_cpu_timers_exit(struct task_struct *tsk)
70709 {
70710+ add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
70711+ sizeof(unsigned long long));
70712 cleanup_timers(tsk->cpu_timers,
70713 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
70714
70715@@ -1578,14 +1582,14 @@ struct k_clock clock_posix_cpu = {
70716
70717 static __init int init_posix_cpu_timers(void)
70718 {
70719- struct k_clock process = {
70720+ static struct k_clock process = {
70721 .clock_getres = process_cpu_clock_getres,
70722 .clock_get = process_cpu_clock_get,
70723 .timer_create = process_cpu_timer_create,
70724 .nsleep = process_cpu_nsleep,
70725 .nsleep_restart = process_cpu_nsleep_restart,
70726 };
70727- struct k_clock thread = {
70728+ static struct k_clock thread = {
70729 .clock_getres = thread_cpu_clock_getres,
70730 .clock_get = thread_cpu_clock_get,
70731 .timer_create = thread_cpu_timer_create,
70732diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
70733index 69185ae..cc2847a 100644
70734--- a/kernel/posix-timers.c
70735+++ b/kernel/posix-timers.c
70736@@ -43,6 +43,7 @@
70737 #include <linux/idr.h>
70738 #include <linux/posix-clock.h>
70739 #include <linux/posix-timers.h>
70740+#include <linux/grsecurity.h>
70741 #include <linux/syscalls.h>
70742 #include <linux/wait.h>
70743 #include <linux/workqueue.h>
70744@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
70745 * which we beg off on and pass to do_sys_settimeofday().
70746 */
70747
70748-static struct k_clock posix_clocks[MAX_CLOCKS];
70749+static struct k_clock *posix_clocks[MAX_CLOCKS];
70750
70751 /*
70752 * These ones are defined below.
70753@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
70754 */
70755 static __init int init_posix_timers(void)
70756 {
70757- struct k_clock clock_realtime = {
70758+ static struct k_clock clock_realtime = {
70759 .clock_getres = hrtimer_get_res,
70760 .clock_get = posix_clock_realtime_get,
70761 .clock_set = posix_clock_realtime_set,
70762@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
70763 .timer_get = common_timer_get,
70764 .timer_del = common_timer_del,
70765 };
70766- struct k_clock clock_monotonic = {
70767+ static struct k_clock clock_monotonic = {
70768 .clock_getres = hrtimer_get_res,
70769 .clock_get = posix_ktime_get_ts,
70770 .nsleep = common_nsleep,
70771@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
70772 .timer_get = common_timer_get,
70773 .timer_del = common_timer_del,
70774 };
70775- struct k_clock clock_monotonic_raw = {
70776+ static struct k_clock clock_monotonic_raw = {
70777 .clock_getres = hrtimer_get_res,
70778 .clock_get = posix_get_monotonic_raw,
70779 };
70780- struct k_clock clock_realtime_coarse = {
70781+ static struct k_clock clock_realtime_coarse = {
70782 .clock_getres = posix_get_coarse_res,
70783 .clock_get = posix_get_realtime_coarse,
70784 };
70785- struct k_clock clock_monotonic_coarse = {
70786+ static struct k_clock clock_monotonic_coarse = {
70787 .clock_getres = posix_get_coarse_res,
70788 .clock_get = posix_get_monotonic_coarse,
70789 };
70790- struct k_clock clock_boottime = {
70791+ static struct k_clock clock_boottime = {
70792 .clock_getres = hrtimer_get_res,
70793 .clock_get = posix_get_boottime,
70794 .nsleep = common_nsleep,
70795@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
70796 return;
70797 }
70798
70799- posix_clocks[clock_id] = *new_clock;
70800+ posix_clocks[clock_id] = new_clock;
70801 }
70802 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
70803
70804@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
70805 return (id & CLOCKFD_MASK) == CLOCKFD ?
70806 &clock_posix_dynamic : &clock_posix_cpu;
70807
70808- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
70809+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
70810 return NULL;
70811- return &posix_clocks[id];
70812+ return posix_clocks[id];
70813 }
70814
70815 static int common_timer_create(struct k_itimer *new_timer)
70816@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
70817 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
70818 return -EFAULT;
70819
70820+ /* only the CLOCK_REALTIME clock can be set, all other clocks
70821+ have their clock_set fptr set to a nosettime dummy function
70822+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
70823+ call common_clock_set, which calls do_sys_settimeofday, which
70824+ we hook
70825+ */
70826+
70827 return kc->clock_set(which_clock, &new_tp);
70828 }
70829
70830diff --git a/kernel/power/process.c b/kernel/power/process.c
70831index 87da817..30ddd13 100644
70832--- a/kernel/power/process.c
70833+++ b/kernel/power/process.c
70834@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
70835 u64 elapsed_csecs64;
70836 unsigned int elapsed_csecs;
70837 bool wakeup = false;
70838+ bool timedout = false;
70839
70840 do_gettimeofday(&start);
70841
70842@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
70843
70844 while (true) {
70845 todo = 0;
70846+ if (time_after(jiffies, end_time))
70847+ timedout = true;
70848 read_lock(&tasklist_lock);
70849 do_each_thread(g, p) {
70850 if (p == current || !freeze_task(p))
70851@@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
70852 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
70853 * transition can't race with task state testing here.
70854 */
70855- if (!task_is_stopped_or_traced(p) &&
70856- !freezer_should_skip(p))
70857+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
70858 todo++;
70859+ if (timedout) {
70860+ printk(KERN_ERR "Task refusing to freeze:\n");
70861+ sched_show_task(p);
70862+ }
70863+ }
70864 } while_each_thread(g, p);
70865 read_unlock(&tasklist_lock);
70866
70867@@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
70868 todo += wq_busy;
70869 }
70870
70871- if (!todo || time_after(jiffies, end_time))
70872+ if (!todo || timedout)
70873 break;
70874
70875 if (pm_wakeup_pending()) {
70876diff --git a/kernel/printk.c b/kernel/printk.c
70877index f8e0b5a..dda2a5c 100644
70878--- a/kernel/printk.c
70879+++ b/kernel/printk.c
70880@@ -817,6 +817,11 @@ static int check_syslog_permissions(int type, bool from_file)
70881 if (from_file && type != SYSLOG_ACTION_OPEN)
70882 return 0;
70883
70884+#ifdef CONFIG_GRKERNSEC_DMESG
70885+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
70886+ return -EPERM;
70887+#endif
70888+
70889 if (syslog_action_restricted(type)) {
70890 if (capable(CAP_SYSLOG))
70891 return 0;
70892diff --git a/kernel/profile.c b/kernel/profile.c
70893index 76b8e77..a2930e8 100644
70894--- a/kernel/profile.c
70895+++ b/kernel/profile.c
70896@@ -39,7 +39,7 @@ struct profile_hit {
70897 /* Oprofile timer tick hook */
70898 static int (*timer_hook)(struct pt_regs *) __read_mostly;
70899
70900-static atomic_t *prof_buffer;
70901+static atomic_unchecked_t *prof_buffer;
70902 static unsigned long prof_len, prof_shift;
70903
70904 int prof_on __read_mostly;
70905@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
70906 hits[i].pc = 0;
70907 continue;
70908 }
70909- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
70910+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
70911 hits[i].hits = hits[i].pc = 0;
70912 }
70913 }
70914@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
70915 * Add the current hit(s) and flush the write-queue out
70916 * to the global buffer:
70917 */
70918- atomic_add(nr_hits, &prof_buffer[pc]);
70919+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
70920 for (i = 0; i < NR_PROFILE_HIT; ++i) {
70921- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
70922+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
70923 hits[i].pc = hits[i].hits = 0;
70924 }
70925 out:
70926@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
70927 {
70928 unsigned long pc;
70929 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
70930- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
70931+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
70932 }
70933 #endif /* !CONFIG_SMP */
70934
70935@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
70936 return -EFAULT;
70937 buf++; p++; count--; read++;
70938 }
70939- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
70940+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
70941 if (copy_to_user(buf, (void *)pnt, count))
70942 return -EFAULT;
70943 read += count;
70944@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
70945 }
70946 #endif
70947 profile_discard_flip_buffers();
70948- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
70949+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
70950 return count;
70951 }
70952
70953diff --git a/kernel/ptrace.c b/kernel/ptrace.c
70954index 1f5e55d..8b8f969 100644
70955--- a/kernel/ptrace.c
70956+++ b/kernel/ptrace.c
70957@@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
70958
70959 if (seize)
70960 flags |= PT_SEIZED;
70961- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
70962+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
70963 flags |= PT_PTRACE_CAP;
70964 task->ptrace = flags;
70965
70966@@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
70967 break;
70968 return -EIO;
70969 }
70970- if (copy_to_user(dst, buf, retval))
70971+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
70972 return -EFAULT;
70973 copied += retval;
70974 src += retval;
70975@@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
70976 bool seized = child->ptrace & PT_SEIZED;
70977 int ret = -EIO;
70978 siginfo_t siginfo, *si;
70979- void __user *datavp = (void __user *) data;
70980+ void __user *datavp = (__force void __user *) data;
70981 unsigned long __user *datalp = datavp;
70982 unsigned long flags;
70983
70984@@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
70985 goto out;
70986 }
70987
70988+ if (gr_handle_ptrace(child, request)) {
70989+ ret = -EPERM;
70990+ goto out_put_task_struct;
70991+ }
70992+
70993 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
70994 ret = ptrace_attach(child, request, addr, data);
70995 /*
70996 * Some architectures need to do book-keeping after
70997 * a ptrace attach.
70998 */
70999- if (!ret)
71000+ if (!ret) {
71001 arch_ptrace_attach(child);
71002+ gr_audit_ptrace(child);
71003+ }
71004 goto out_put_task_struct;
71005 }
71006
71007@@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
71008 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
71009 if (copied != sizeof(tmp))
71010 return -EIO;
71011- return put_user(tmp, (unsigned long __user *)data);
71012+ return put_user(tmp, (__force unsigned long __user *)data);
71013 }
71014
71015 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
71016@@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
71017 goto out;
71018 }
71019
71020+ if (gr_handle_ptrace(child, request)) {
71021+ ret = -EPERM;
71022+ goto out_put_task_struct;
71023+ }
71024+
71025 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71026 ret = ptrace_attach(child, request, addr, data);
71027 /*
71028 * Some architectures need to do book-keeping after
71029 * a ptrace attach.
71030 */
71031- if (!ret)
71032+ if (!ret) {
71033 arch_ptrace_attach(child);
71034+ gr_audit_ptrace(child);
71035+ }
71036 goto out_put_task_struct;
71037 }
71038
71039diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
71040index e4c6a59..c86621a 100644
71041--- a/kernel/rcutiny.c
71042+++ b/kernel/rcutiny.c
71043@@ -46,7 +46,7 @@
71044 struct rcu_ctrlblk;
71045 static void invoke_rcu_callbacks(void);
71046 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
71047-static void rcu_process_callbacks(struct softirq_action *unused);
71048+static void rcu_process_callbacks(void);
71049 static void __call_rcu(struct rcu_head *head,
71050 void (*func)(struct rcu_head *rcu),
71051 struct rcu_ctrlblk *rcp);
71052@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
71053 rcu_is_callbacks_kthread()));
71054 }
71055
71056-static void rcu_process_callbacks(struct softirq_action *unused)
71057+static void rcu_process_callbacks(void)
71058 {
71059 __rcu_process_callbacks(&rcu_sched_ctrlblk);
71060 __rcu_process_callbacks(&rcu_bh_ctrlblk);
71061diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
71062index 3d01902..afbf46e 100644
71063--- a/kernel/rcutiny_plugin.h
71064+++ b/kernel/rcutiny_plugin.h
71065@@ -893,7 +893,7 @@ static int rcu_kthread(void *arg)
71066 have_rcu_kthread_work = morework;
71067 local_irq_restore(flags);
71068 if (work)
71069- rcu_process_callbacks(NULL);
71070+ rcu_process_callbacks();
71071 schedule_timeout_interruptible(1); /* Leave CPU for others. */
71072 }
71073
71074diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
71075index aaa7b9f..055ff1e 100644
71076--- a/kernel/rcutorture.c
71077+++ b/kernel/rcutorture.c
71078@@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
71079 { 0 };
71080 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
71081 { 0 };
71082-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
71083-static atomic_t n_rcu_torture_alloc;
71084-static atomic_t n_rcu_torture_alloc_fail;
71085-static atomic_t n_rcu_torture_free;
71086-static atomic_t n_rcu_torture_mberror;
71087-static atomic_t n_rcu_torture_error;
71088+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
71089+static atomic_unchecked_t n_rcu_torture_alloc;
71090+static atomic_unchecked_t n_rcu_torture_alloc_fail;
71091+static atomic_unchecked_t n_rcu_torture_free;
71092+static atomic_unchecked_t n_rcu_torture_mberror;
71093+static atomic_unchecked_t n_rcu_torture_error;
71094 static long n_rcu_torture_barrier_error;
71095 static long n_rcu_torture_boost_ktrerror;
71096 static long n_rcu_torture_boost_rterror;
71097@@ -272,11 +272,11 @@ rcu_torture_alloc(void)
71098
71099 spin_lock_bh(&rcu_torture_lock);
71100 if (list_empty(&rcu_torture_freelist)) {
71101- atomic_inc(&n_rcu_torture_alloc_fail);
71102+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
71103 spin_unlock_bh(&rcu_torture_lock);
71104 return NULL;
71105 }
71106- atomic_inc(&n_rcu_torture_alloc);
71107+ atomic_inc_unchecked(&n_rcu_torture_alloc);
71108 p = rcu_torture_freelist.next;
71109 list_del_init(p);
71110 spin_unlock_bh(&rcu_torture_lock);
71111@@ -289,7 +289,7 @@ rcu_torture_alloc(void)
71112 static void
71113 rcu_torture_free(struct rcu_torture *p)
71114 {
71115- atomic_inc(&n_rcu_torture_free);
71116+ atomic_inc_unchecked(&n_rcu_torture_free);
71117 spin_lock_bh(&rcu_torture_lock);
71118 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
71119 spin_unlock_bh(&rcu_torture_lock);
71120@@ -410,7 +410,7 @@ rcu_torture_cb(struct rcu_head *p)
71121 i = rp->rtort_pipe_count;
71122 if (i > RCU_TORTURE_PIPE_LEN)
71123 i = RCU_TORTURE_PIPE_LEN;
71124- atomic_inc(&rcu_torture_wcount[i]);
71125+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
71126 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
71127 rp->rtort_mbtest = 0;
71128 rcu_torture_free(rp);
71129@@ -459,7 +459,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
71130 i = rp->rtort_pipe_count;
71131 if (i > RCU_TORTURE_PIPE_LEN)
71132 i = RCU_TORTURE_PIPE_LEN;
71133- atomic_inc(&rcu_torture_wcount[i]);
71134+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
71135 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
71136 rp->rtort_mbtest = 0;
71137 list_del(&rp->rtort_free);
71138@@ -1002,7 +1002,7 @@ rcu_torture_writer(void *arg)
71139 i = old_rp->rtort_pipe_count;
71140 if (i > RCU_TORTURE_PIPE_LEN)
71141 i = RCU_TORTURE_PIPE_LEN;
71142- atomic_inc(&rcu_torture_wcount[i]);
71143+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
71144 old_rp->rtort_pipe_count++;
71145 cur_ops->deferred_free(old_rp);
71146 }
71147@@ -1087,7 +1087,7 @@ static void rcu_torture_timer(unsigned long unused)
71148 }
71149 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
71150 if (p->rtort_mbtest == 0)
71151- atomic_inc(&n_rcu_torture_mberror);
71152+ atomic_inc_unchecked(&n_rcu_torture_mberror);
71153 spin_lock(&rand_lock);
71154 cur_ops->read_delay(&rand);
71155 n_rcu_torture_timers++;
71156@@ -1151,7 +1151,7 @@ rcu_torture_reader(void *arg)
71157 }
71158 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
71159 if (p->rtort_mbtest == 0)
71160- atomic_inc(&n_rcu_torture_mberror);
71161+ atomic_inc_unchecked(&n_rcu_torture_mberror);
71162 cur_ops->read_delay(&rand);
71163 preempt_disable();
71164 pipe_count = p->rtort_pipe_count;
71165@@ -1210,11 +1210,11 @@ rcu_torture_printk(char *page)
71166 rcu_torture_current,
71167 rcu_torture_current_version,
71168 list_empty(&rcu_torture_freelist),
71169- atomic_read(&n_rcu_torture_alloc),
71170- atomic_read(&n_rcu_torture_alloc_fail),
71171- atomic_read(&n_rcu_torture_free));
71172+ atomic_read_unchecked(&n_rcu_torture_alloc),
71173+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
71174+ atomic_read_unchecked(&n_rcu_torture_free));
71175 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
71176- atomic_read(&n_rcu_torture_mberror),
71177+ atomic_read_unchecked(&n_rcu_torture_mberror),
71178 n_rcu_torture_boost_ktrerror,
71179 n_rcu_torture_boost_rterror);
71180 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
71181@@ -1233,14 +1233,14 @@ rcu_torture_printk(char *page)
71182 n_barrier_attempts,
71183 n_rcu_torture_barrier_error);
71184 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
71185- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
71186+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
71187 n_rcu_torture_barrier_error != 0 ||
71188 n_rcu_torture_boost_ktrerror != 0 ||
71189 n_rcu_torture_boost_rterror != 0 ||
71190 n_rcu_torture_boost_failure != 0 ||
71191 i > 1) {
71192 cnt += sprintf(&page[cnt], "!!! ");
71193- atomic_inc(&n_rcu_torture_error);
71194+ atomic_inc_unchecked(&n_rcu_torture_error);
71195 WARN_ON_ONCE(1);
71196 }
71197 cnt += sprintf(&page[cnt], "Reader Pipe: ");
71198@@ -1254,7 +1254,7 @@ rcu_torture_printk(char *page)
71199 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
71200 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
71201 cnt += sprintf(&page[cnt], " %d",
71202- atomic_read(&rcu_torture_wcount[i]));
71203+ atomic_read_unchecked(&rcu_torture_wcount[i]));
71204 }
71205 cnt += sprintf(&page[cnt], "\n");
71206 if (cur_ops->stats)
71207@@ -1938,7 +1938,7 @@ rcu_torture_cleanup(void)
71208
71209 if (cur_ops->cleanup)
71210 cur_ops->cleanup();
71211- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
71212+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
71213 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
71214 else if (n_online_successes != n_online_attempts ||
71215 n_offline_successes != n_offline_attempts)
71216@@ -2007,18 +2007,18 @@ rcu_torture_init(void)
71217
71218 rcu_torture_current = NULL;
71219 rcu_torture_current_version = 0;
71220- atomic_set(&n_rcu_torture_alloc, 0);
71221- atomic_set(&n_rcu_torture_alloc_fail, 0);
71222- atomic_set(&n_rcu_torture_free, 0);
71223- atomic_set(&n_rcu_torture_mberror, 0);
71224- atomic_set(&n_rcu_torture_error, 0);
71225+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
71226+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
71227+ atomic_set_unchecked(&n_rcu_torture_free, 0);
71228+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
71229+ atomic_set_unchecked(&n_rcu_torture_error, 0);
71230 n_rcu_torture_barrier_error = 0;
71231 n_rcu_torture_boost_ktrerror = 0;
71232 n_rcu_torture_boost_rterror = 0;
71233 n_rcu_torture_boost_failure = 0;
71234 n_rcu_torture_boosts = 0;
71235 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
71236- atomic_set(&rcu_torture_wcount[i], 0);
71237+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
71238 for_each_possible_cpu(cpu) {
71239 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
71240 per_cpu(rcu_torture_count, cpu)[i] = 0;
71241diff --git a/kernel/rcutree.c b/kernel/rcutree.c
71242index 2682295..0f2297e 100644
71243--- a/kernel/rcutree.c
71244+++ b/kernel/rcutree.c
71245@@ -348,9 +348,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
71246 rcu_prepare_for_idle(smp_processor_id());
71247 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
71248 smp_mb__before_atomic_inc(); /* See above. */
71249- atomic_inc(&rdtp->dynticks);
71250+ atomic_inc_unchecked(&rdtp->dynticks);
71251 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
71252- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
71253+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
71254
71255 /*
71256 * It is illegal to enter an extended quiescent state while
71257@@ -508,10 +508,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
71258 int user)
71259 {
71260 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
71261- atomic_inc(&rdtp->dynticks);
71262+ atomic_inc_unchecked(&rdtp->dynticks);
71263 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
71264 smp_mb__after_atomic_inc(); /* See above. */
71265- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
71266+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
71267 rcu_cleanup_after_idle(smp_processor_id());
71268 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
71269 if (!user && !is_idle_task(current)) {
71270@@ -670,14 +670,14 @@ void rcu_nmi_enter(void)
71271 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
71272
71273 if (rdtp->dynticks_nmi_nesting == 0 &&
71274- (atomic_read(&rdtp->dynticks) & 0x1))
71275+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
71276 return;
71277 rdtp->dynticks_nmi_nesting++;
71278 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
71279- atomic_inc(&rdtp->dynticks);
71280+ atomic_inc_unchecked(&rdtp->dynticks);
71281 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
71282 smp_mb__after_atomic_inc(); /* See above. */
71283- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
71284+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
71285 }
71286
71287 /**
71288@@ -696,9 +696,9 @@ void rcu_nmi_exit(void)
71289 return;
71290 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
71291 smp_mb__before_atomic_inc(); /* See above. */
71292- atomic_inc(&rdtp->dynticks);
71293+ atomic_inc_unchecked(&rdtp->dynticks);
71294 smp_mb__after_atomic_inc(); /* Force delay to next write. */
71295- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
71296+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
71297 }
71298
71299 /**
71300@@ -712,7 +712,7 @@ int rcu_is_cpu_idle(void)
71301 int ret;
71302
71303 preempt_disable();
71304- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
71305+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
71306 preempt_enable();
71307 return ret;
71308 }
71309@@ -795,7 +795,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
71310 */
71311 static int dyntick_save_progress_counter(struct rcu_data *rdp)
71312 {
71313- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
71314+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
71315 return (rdp->dynticks_snap & 0x1) == 0;
71316 }
71317
71318@@ -810,7 +810,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
71319 unsigned int curr;
71320 unsigned int snap;
71321
71322- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
71323+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
71324 snap = (unsigned int)rdp->dynticks_snap;
71325
71326 /*
71327@@ -858,10 +858,10 @@ static int jiffies_till_stall_check(void)
71328 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
71329 */
71330 if (till_stall_check < 3) {
71331- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
71332+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
71333 till_stall_check = 3;
71334 } else if (till_stall_check > 300) {
71335- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
71336+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
71337 till_stall_check = 300;
71338 }
71339 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
71340@@ -1589,7 +1589,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
71341 rsp->qlen += rdp->qlen;
71342 rdp->n_cbs_orphaned += rdp->qlen;
71343 rdp->qlen_lazy = 0;
71344- ACCESS_ONCE(rdp->qlen) = 0;
71345+ ACCESS_ONCE_RW(rdp->qlen) = 0;
71346 }
71347
71348 /*
71349@@ -1831,7 +1831,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
71350 }
71351 smp_mb(); /* List handling before counting for rcu_barrier(). */
71352 rdp->qlen_lazy -= count_lazy;
71353- ACCESS_ONCE(rdp->qlen) -= count;
71354+ ACCESS_ONCE_RW(rdp->qlen) -= count;
71355 rdp->n_cbs_invoked += count;
71356
71357 /* Reinstate batch limit if we have worked down the excess. */
71358@@ -2024,7 +2024,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
71359 /*
71360 * Do RCU core processing for the current CPU.
71361 */
71362-static void rcu_process_callbacks(struct softirq_action *unused)
71363+static void rcu_process_callbacks(void)
71364 {
71365 struct rcu_state *rsp;
71366
71367@@ -2136,7 +2136,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
71368 local_irq_restore(flags);
71369 return;
71370 }
71371- ACCESS_ONCE(rdp->qlen)++;
71372+ ACCESS_ONCE_RW(rdp->qlen)++;
71373 if (lazy)
71374 rdp->qlen_lazy++;
71375 else
71376@@ -2250,8 +2250,8 @@ void synchronize_rcu_bh(void)
71377 }
71378 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
71379
71380-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
71381-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
71382+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
71383+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
71384
71385 static int synchronize_sched_expedited_cpu_stop(void *data)
71386 {
71387@@ -2312,7 +2312,7 @@ void synchronize_sched_expedited(void)
71388 int firstsnap, s, snap, trycount = 0;
71389
71390 /* Note that atomic_inc_return() implies full memory barrier. */
71391- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
71392+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
71393 get_online_cpus();
71394 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
71395
71396@@ -2334,7 +2334,7 @@ void synchronize_sched_expedited(void)
71397 }
71398
71399 /* Check to see if someone else did our work for us. */
71400- s = atomic_read(&sync_sched_expedited_done);
71401+ s = atomic_read_unchecked(&sync_sched_expedited_done);
71402 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
71403 smp_mb(); /* ensure test happens before caller kfree */
71404 return;
71405@@ -2349,7 +2349,7 @@ void synchronize_sched_expedited(void)
71406 * grace period works for us.
71407 */
71408 get_online_cpus();
71409- snap = atomic_read(&sync_sched_expedited_started);
71410+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
71411 smp_mb(); /* ensure read is before try_stop_cpus(). */
71412 }
71413
71414@@ -2360,12 +2360,12 @@ void synchronize_sched_expedited(void)
71415 * than we did beat us to the punch.
71416 */
71417 do {
71418- s = atomic_read(&sync_sched_expedited_done);
71419+ s = atomic_read_unchecked(&sync_sched_expedited_done);
71420 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
71421 smp_mb(); /* ensure test happens before caller kfree */
71422 break;
71423 }
71424- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
71425+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
71426
71427 put_online_cpus();
71428 }
71429@@ -2539,7 +2539,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
71430 * ACCESS_ONCE() to prevent the compiler from speculating
71431 * the increment to precede the early-exit check.
71432 */
71433- ACCESS_ONCE(rsp->n_barrier_done)++;
71434+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
71435 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
71436 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
71437 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
71438@@ -2581,7 +2581,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
71439
71440 /* Increment ->n_barrier_done to prevent duplicate work. */
71441 smp_mb(); /* Keep increment after above mechanism. */
71442- ACCESS_ONCE(rsp->n_barrier_done)++;
71443+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
71444 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
71445 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
71446 smp_mb(); /* Keep increment before caller's subsequent code. */
71447@@ -2626,10 +2626,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
71448 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
71449 init_callback_list(rdp);
71450 rdp->qlen_lazy = 0;
71451- ACCESS_ONCE(rdp->qlen) = 0;
71452+ ACCESS_ONCE_RW(rdp->qlen) = 0;
71453 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
71454 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
71455- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
71456+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
71457 #ifdef CONFIG_RCU_USER_QS
71458 WARN_ON_ONCE(rdp->dynticks->in_user);
71459 #endif
71460@@ -2664,8 +2664,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
71461 rdp->blimit = blimit;
71462 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
71463 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
71464- atomic_set(&rdp->dynticks->dynticks,
71465- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
71466+ atomic_set_unchecked(&rdp->dynticks->dynticks,
71467+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
71468 rcu_prepare_for_idle_init(cpu);
71469 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
71470
71471diff --git a/kernel/rcutree.h b/kernel/rcutree.h
71472index a240f03..d469618 100644
71473--- a/kernel/rcutree.h
71474+++ b/kernel/rcutree.h
71475@@ -86,7 +86,7 @@ struct rcu_dynticks {
71476 long long dynticks_nesting; /* Track irq/process nesting level. */
71477 /* Process level is worth LLONG_MAX/2. */
71478 int dynticks_nmi_nesting; /* Track NMI nesting level. */
71479- atomic_t dynticks; /* Even value for idle, else odd. */
71480+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
71481 #ifdef CONFIG_RCU_FAST_NO_HZ
71482 int dyntick_drain; /* Prepare-for-idle state variable. */
71483 unsigned long dyntick_holdoff;
71484diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
71485index f921154..34c4873 100644
71486--- a/kernel/rcutree_plugin.h
71487+++ b/kernel/rcutree_plugin.h
71488@@ -865,7 +865,7 @@ void synchronize_rcu_expedited(void)
71489
71490 /* Clean up and exit. */
71491 smp_mb(); /* ensure expedited GP seen before counter increment. */
71492- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
71493+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
71494 unlock_mb_ret:
71495 mutex_unlock(&sync_rcu_preempt_exp_mutex);
71496 mb_ret:
71497@@ -2040,7 +2040,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
71498 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
71499 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
71500 cpu, ticks_value, ticks_title,
71501- atomic_read(&rdtp->dynticks) & 0xfff,
71502+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
71503 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
71504 fast_no_hz);
71505 }
71506diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
71507index 693513b..b9f1d63 100644
71508--- a/kernel/rcutree_trace.c
71509+++ b/kernel/rcutree_trace.c
71510@@ -92,7 +92,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
71511 rdp->completed, rdp->gpnum,
71512 rdp->passed_quiesce, rdp->qs_pending);
71513 seq_printf(m, " dt=%d/%llx/%d df=%lu",
71514- atomic_read(&rdp->dynticks->dynticks),
71515+ atomic_read_unchecked(&rdp->dynticks->dynticks),
71516 rdp->dynticks->dynticks_nesting,
71517 rdp->dynticks->dynticks_nmi_nesting,
71518 rdp->dynticks_fqs);
71519@@ -154,7 +154,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
71520 rdp->completed, rdp->gpnum,
71521 rdp->passed_quiesce, rdp->qs_pending);
71522 seq_printf(m, ",%d,%llx,%d,%lu",
71523- atomic_read(&rdp->dynticks->dynticks),
71524+ atomic_read_unchecked(&rdp->dynticks->dynticks),
71525 rdp->dynticks->dynticks_nesting,
71526 rdp->dynticks->dynticks_nmi_nesting,
71527 rdp->dynticks_fqs);
71528diff --git a/kernel/resource.c b/kernel/resource.c
71529index 73f35d4..4684fc4 100644
71530--- a/kernel/resource.c
71531+++ b/kernel/resource.c
71532@@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
71533
71534 static int __init ioresources_init(void)
71535 {
71536+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71537+#ifdef CONFIG_GRKERNSEC_PROC_USER
71538+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
71539+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
71540+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71541+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
71542+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
71543+#endif
71544+#else
71545 proc_create("ioports", 0, NULL, &proc_ioports_operations);
71546 proc_create("iomem", 0, NULL, &proc_iomem_operations);
71547+#endif
71548 return 0;
71549 }
71550 __initcall(ioresources_init);
71551diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
71552index 98ec494..4241d6d 100644
71553--- a/kernel/rtmutex-tester.c
71554+++ b/kernel/rtmutex-tester.c
71555@@ -20,7 +20,7 @@
71556 #define MAX_RT_TEST_MUTEXES 8
71557
71558 static spinlock_t rttest_lock;
71559-static atomic_t rttest_event;
71560+static atomic_unchecked_t rttest_event;
71561
71562 struct test_thread_data {
71563 int opcode;
71564@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71565
71566 case RTTEST_LOCKCONT:
71567 td->mutexes[td->opdata] = 1;
71568- td->event = atomic_add_return(1, &rttest_event);
71569+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71570 return 0;
71571
71572 case RTTEST_RESET:
71573@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71574 return 0;
71575
71576 case RTTEST_RESETEVENT:
71577- atomic_set(&rttest_event, 0);
71578+ atomic_set_unchecked(&rttest_event, 0);
71579 return 0;
71580
71581 default:
71582@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71583 return ret;
71584
71585 td->mutexes[id] = 1;
71586- td->event = atomic_add_return(1, &rttest_event);
71587+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71588 rt_mutex_lock(&mutexes[id]);
71589- td->event = atomic_add_return(1, &rttest_event);
71590+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71591 td->mutexes[id] = 4;
71592 return 0;
71593
71594@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71595 return ret;
71596
71597 td->mutexes[id] = 1;
71598- td->event = atomic_add_return(1, &rttest_event);
71599+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71600 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
71601- td->event = atomic_add_return(1, &rttest_event);
71602+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71603 td->mutexes[id] = ret ? 0 : 4;
71604 return ret ? -EINTR : 0;
71605
71606@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71607 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
71608 return ret;
71609
71610- td->event = atomic_add_return(1, &rttest_event);
71611+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71612 rt_mutex_unlock(&mutexes[id]);
71613- td->event = atomic_add_return(1, &rttest_event);
71614+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71615 td->mutexes[id] = 0;
71616 return 0;
71617
71618@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
71619 break;
71620
71621 td->mutexes[dat] = 2;
71622- td->event = atomic_add_return(1, &rttest_event);
71623+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71624 break;
71625
71626 default:
71627@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
71628 return;
71629
71630 td->mutexes[dat] = 3;
71631- td->event = atomic_add_return(1, &rttest_event);
71632+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71633 break;
71634
71635 case RTTEST_LOCKNOWAIT:
71636@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
71637 return;
71638
71639 td->mutexes[dat] = 1;
71640- td->event = atomic_add_return(1, &rttest_event);
71641+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71642 return;
71643
71644 default:
71645diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
71646index 15f60d0..7e50319 100644
71647--- a/kernel/sched/auto_group.c
71648+++ b/kernel/sched/auto_group.c
71649@@ -11,7 +11,7 @@
71650
71651 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
71652 static struct autogroup autogroup_default;
71653-static atomic_t autogroup_seq_nr;
71654+static atomic_unchecked_t autogroup_seq_nr;
71655
71656 void __init autogroup_init(struct task_struct *init_task)
71657 {
71658@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
71659
71660 kref_init(&ag->kref);
71661 init_rwsem(&ag->lock);
71662- ag->id = atomic_inc_return(&autogroup_seq_nr);
71663+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
71664 ag->tg = tg;
71665 #ifdef CONFIG_RT_GROUP_SCHED
71666 /*
71667diff --git a/kernel/sched/core.c b/kernel/sched/core.c
71668index 2d8927f..f617765 100644
71669--- a/kernel/sched/core.c
71670+++ b/kernel/sched/core.c
71671@@ -3562,6 +3562,8 @@ int can_nice(const struct task_struct *p, const int nice)
71672 /* convert nice value [19,-20] to rlimit style value [1,40] */
71673 int nice_rlim = 20 - nice;
71674
71675+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
71676+
71677 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
71678 capable(CAP_SYS_NICE));
71679 }
71680@@ -3595,7 +3597,8 @@ SYSCALL_DEFINE1(nice, int, increment)
71681 if (nice > 19)
71682 nice = 19;
71683
71684- if (increment < 0 && !can_nice(current, nice))
71685+ if (increment < 0 && (!can_nice(current, nice) ||
71686+ gr_handle_chroot_nice()))
71687 return -EPERM;
71688
71689 retval = security_task_setnice(current, nice);
71690@@ -3749,6 +3752,7 @@ recheck:
71691 unsigned long rlim_rtprio =
71692 task_rlimit(p, RLIMIT_RTPRIO);
71693
71694+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
71695 /* can't set/change the rt policy */
71696 if (policy != p->policy && !rlim_rtprio)
71697 return -EPERM;
71698diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
71699index 6b800a1..0c36227 100644
71700--- a/kernel/sched/fair.c
71701+++ b/kernel/sched/fair.c
71702@@ -4890,7 +4890,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
71703 * run_rebalance_domains is triggered when needed from the scheduler tick.
71704 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
71705 */
71706-static void run_rebalance_domains(struct softirq_action *h)
71707+static void run_rebalance_domains(void)
71708 {
71709 int this_cpu = smp_processor_id();
71710 struct rq *this_rq = cpu_rq(this_cpu);
71711diff --git a/kernel/signal.c b/kernel/signal.c
71712index e4d4014..76cf5dd 100644
71713--- a/kernel/signal.c
71714+++ b/kernel/signal.c
71715@@ -49,12 +49,12 @@ static struct kmem_cache *sigqueue_cachep;
71716
71717 int print_fatal_signals __read_mostly;
71718
71719-static void __user *sig_handler(struct task_struct *t, int sig)
71720+static __sighandler_t sig_handler(struct task_struct *t, int sig)
71721 {
71722 return t->sighand->action[sig - 1].sa.sa_handler;
71723 }
71724
71725-static int sig_handler_ignored(void __user *handler, int sig)
71726+static int sig_handler_ignored(__sighandler_t handler, int sig)
71727 {
71728 /* Is it explicitly or implicitly ignored? */
71729 return handler == SIG_IGN ||
71730@@ -63,7 +63,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
71731
71732 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
71733 {
71734- void __user *handler;
71735+ __sighandler_t handler;
71736
71737 handler = sig_handler(t, sig);
71738
71739@@ -367,6 +367,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
71740 atomic_inc(&user->sigpending);
71741 rcu_read_unlock();
71742
71743+ if (!override_rlimit)
71744+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
71745+
71746 if (override_rlimit ||
71747 atomic_read(&user->sigpending) <=
71748 task_rlimit(t, RLIMIT_SIGPENDING)) {
71749@@ -491,7 +494,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
71750
71751 int unhandled_signal(struct task_struct *tsk, int sig)
71752 {
71753- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
71754+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
71755 if (is_global_init(tsk))
71756 return 1;
71757 if (handler != SIG_IGN && handler != SIG_DFL)
71758@@ -817,6 +820,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
71759 }
71760 }
71761
71762+ /* allow glibc communication via tgkill to other threads in our
71763+ thread group */
71764+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
71765+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
71766+ && gr_handle_signal(t, sig))
71767+ return -EPERM;
71768+
71769 return security_task_kill(t, info, sig, 0);
71770 }
71771
71772@@ -1198,7 +1208,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
71773 return send_signal(sig, info, p, 1);
71774 }
71775
71776-static int
71777+int
71778 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
71779 {
71780 return send_signal(sig, info, t, 0);
71781@@ -1235,6 +1245,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
71782 unsigned long int flags;
71783 int ret, blocked, ignored;
71784 struct k_sigaction *action;
71785+ int is_unhandled = 0;
71786
71787 spin_lock_irqsave(&t->sighand->siglock, flags);
71788 action = &t->sighand->action[sig-1];
71789@@ -1249,9 +1260,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
71790 }
71791 if (action->sa.sa_handler == SIG_DFL)
71792 t->signal->flags &= ~SIGNAL_UNKILLABLE;
71793+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
71794+ is_unhandled = 1;
71795 ret = specific_send_sig_info(sig, info, t);
71796 spin_unlock_irqrestore(&t->sighand->siglock, flags);
71797
71798+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
71799+ normal operation */
71800+ if (is_unhandled) {
71801+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
71802+ gr_handle_crash(t, sig);
71803+ }
71804+
71805 return ret;
71806 }
71807
71808@@ -1318,8 +1338,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
71809 ret = check_kill_permission(sig, info, p);
71810 rcu_read_unlock();
71811
71812- if (!ret && sig)
71813+ if (!ret && sig) {
71814 ret = do_send_sig_info(sig, info, p, true);
71815+ if (!ret)
71816+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
71817+ }
71818
71819 return ret;
71820 }
71821@@ -2864,7 +2887,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
71822 int error = -ESRCH;
71823
71824 rcu_read_lock();
71825- p = find_task_by_vpid(pid);
71826+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
71827+ /* allow glibc communication via tgkill to other threads in our
71828+ thread group */
71829+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
71830+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
71831+ p = find_task_by_vpid_unrestricted(pid);
71832+ else
71833+#endif
71834+ p = find_task_by_vpid(pid);
71835 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
71836 error = check_kill_permission(sig, info, p);
71837 /*
71838diff --git a/kernel/softirq.c b/kernel/softirq.c
71839index cc96bdc..8bb9750 100644
71840--- a/kernel/softirq.c
71841+++ b/kernel/softirq.c
71842@@ -57,7 +57,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
71843
71844 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
71845
71846-char *softirq_to_name[NR_SOFTIRQS] = {
71847+const char * const softirq_to_name[NR_SOFTIRQS] = {
71848 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
71849 "TASKLET", "SCHED", "HRTIMER", "RCU"
71850 };
71851@@ -244,7 +244,7 @@ restart:
71852 kstat_incr_softirqs_this_cpu(vec_nr);
71853
71854 trace_softirq_entry(vec_nr);
71855- h->action(h);
71856+ h->action();
71857 trace_softirq_exit(vec_nr);
71858 if (unlikely(prev_count != preempt_count())) {
71859 printk(KERN_ERR "huh, entered softirq %u %s %p"
71860@@ -391,9 +391,11 @@ void __raise_softirq_irqoff(unsigned int nr)
71861 or_softirq_pending(1UL << nr);
71862 }
71863
71864-void open_softirq(int nr, void (*action)(struct softirq_action *))
71865+void open_softirq(int nr, void (*action)(void))
71866 {
71867- softirq_vec[nr].action = action;
71868+ pax_open_kernel();
71869+ *(void **)&softirq_vec[nr].action = action;
71870+ pax_close_kernel();
71871 }
71872
71873 /*
71874@@ -447,7 +449,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
71875
71876 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
71877
71878-static void tasklet_action(struct softirq_action *a)
71879+static void tasklet_action(void)
71880 {
71881 struct tasklet_struct *list;
71882
71883@@ -482,7 +484,7 @@ static void tasklet_action(struct softirq_action *a)
71884 }
71885 }
71886
71887-static void tasklet_hi_action(struct softirq_action *a)
71888+static void tasklet_hi_action(void)
71889 {
71890 struct tasklet_struct *list;
71891
71892diff --git a/kernel/srcu.c b/kernel/srcu.c
71893index 97c465e..d83f3bb 100644
71894--- a/kernel/srcu.c
71895+++ b/kernel/srcu.c
71896@@ -302,9 +302,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
71897 preempt_disable();
71898 idx = rcu_dereference_index_check(sp->completed,
71899 rcu_read_lock_sched_held()) & 0x1;
71900- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
71901+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
71902 smp_mb(); /* B */ /* Avoid leaking the critical section. */
71903- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
71904+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
71905 preempt_enable();
71906 return idx;
71907 }
71908@@ -320,7 +320,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
71909 {
71910 preempt_disable();
71911 smp_mb(); /* C */ /* Avoid leaking the critical section. */
71912- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
71913+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
71914 preempt_enable();
71915 }
71916 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
71917diff --git a/kernel/sys.c b/kernel/sys.c
71918index e6e0ece..1f2e413 100644
71919--- a/kernel/sys.c
71920+++ b/kernel/sys.c
71921@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
71922 error = -EACCES;
71923 goto out;
71924 }
71925+
71926+ if (gr_handle_chroot_setpriority(p, niceval)) {
71927+ error = -EACCES;
71928+ goto out;
71929+ }
71930+
71931 no_nice = security_task_setnice(p, niceval);
71932 if (no_nice) {
71933 error = no_nice;
71934@@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
71935 goto error;
71936 }
71937
71938+ if (gr_check_group_change(new->gid, new->egid, -1))
71939+ goto error;
71940+
71941 if (rgid != (gid_t) -1 ||
71942 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
71943 new->sgid = new->egid;
71944@@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
71945 old = current_cred();
71946
71947 retval = -EPERM;
71948+
71949+ if (gr_check_group_change(kgid, kgid, kgid))
71950+ goto error;
71951+
71952 if (nsown_capable(CAP_SETGID))
71953 new->gid = new->egid = new->sgid = new->fsgid = kgid;
71954 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
71955@@ -647,7 +660,7 @@ error:
71956 /*
71957 * change the user struct in a credentials set to match the new UID
71958 */
71959-static int set_user(struct cred *new)
71960+int set_user(struct cred *new)
71961 {
71962 struct user_struct *new_user;
71963
71964@@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
71965 goto error;
71966 }
71967
71968+ if (gr_check_user_change(new->uid, new->euid, -1))
71969+ goto error;
71970+
71971 if (!uid_eq(new->uid, old->uid)) {
71972 retval = set_user(new);
71973 if (retval < 0)
71974@@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
71975 old = current_cred();
71976
71977 retval = -EPERM;
71978+
71979+ if (gr_check_crash_uid(kuid))
71980+ goto error;
71981+ if (gr_check_user_change(kuid, kuid, kuid))
71982+ goto error;
71983+
71984 if (nsown_capable(CAP_SETUID)) {
71985 new->suid = new->uid = kuid;
71986 if (!uid_eq(kuid, old->uid)) {
71987@@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
71988 goto error;
71989 }
71990
71991+ if (gr_check_user_change(kruid, keuid, -1))
71992+ goto error;
71993+
71994 if (ruid != (uid_t) -1) {
71995 new->uid = kruid;
71996 if (!uid_eq(kruid, old->uid)) {
71997@@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
71998 goto error;
71999 }
72000
72001+ if (gr_check_group_change(krgid, kegid, -1))
72002+ goto error;
72003+
72004 if (rgid != (gid_t) -1)
72005 new->gid = krgid;
72006 if (egid != (gid_t) -1)
72007@@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
72008 if (!uid_valid(kuid))
72009 return old_fsuid;
72010
72011+ if (gr_check_user_change(-1, -1, kuid))
72012+ goto error;
72013+
72014 new = prepare_creds();
72015 if (!new)
72016 return old_fsuid;
72017@@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
72018 }
72019 }
72020
72021+error:
72022 abort_creds(new);
72023 return old_fsuid;
72024
72025@@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
72026 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
72027 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
72028 nsown_capable(CAP_SETGID)) {
72029+ if (gr_check_group_change(-1, -1, kgid))
72030+ goto error;
72031+
72032 if (!gid_eq(kgid, old->fsgid)) {
72033 new->fsgid = kgid;
72034 goto change_okay;
72035 }
72036 }
72037
72038+error:
72039 abort_creds(new);
72040 return old_fsgid;
72041
72042@@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
72043 return -EFAULT;
72044
72045 down_read(&uts_sem);
72046- error = __copy_to_user(&name->sysname, &utsname()->sysname,
72047+ error = __copy_to_user(name->sysname, &utsname()->sysname,
72048 __OLD_UTS_LEN);
72049 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
72050- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
72051+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
72052 __OLD_UTS_LEN);
72053 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
72054- error |= __copy_to_user(&name->release, &utsname()->release,
72055+ error |= __copy_to_user(name->release, &utsname()->release,
72056 __OLD_UTS_LEN);
72057 error |= __put_user(0, name->release + __OLD_UTS_LEN);
72058- error |= __copy_to_user(&name->version, &utsname()->version,
72059+ error |= __copy_to_user(name->version, &utsname()->version,
72060 __OLD_UTS_LEN);
72061 error |= __put_user(0, name->version + __OLD_UTS_LEN);
72062- error |= __copy_to_user(&name->machine, &utsname()->machine,
72063+ error |= __copy_to_user(name->machine, &utsname()->machine,
72064 __OLD_UTS_LEN);
72065 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
72066 up_read(&uts_sem);
72067@@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
72068 error = get_dumpable(me->mm);
72069 break;
72070 case PR_SET_DUMPABLE:
72071- if (arg2 < 0 || arg2 > 1) {
72072+ if (arg2 > 1) {
72073 error = -EINVAL;
72074 break;
72075 }
72076diff --git a/kernel/sysctl.c b/kernel/sysctl.c
72077index 26f65ea..df8e5ad 100644
72078--- a/kernel/sysctl.c
72079+++ b/kernel/sysctl.c
72080@@ -92,7 +92,6 @@
72081
72082
72083 #if defined(CONFIG_SYSCTL)
72084-
72085 /* External variables not in a header file. */
72086 extern int sysctl_overcommit_memory;
72087 extern int sysctl_overcommit_ratio;
72088@@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
72089 void __user *buffer, size_t *lenp, loff_t *ppos);
72090 #endif
72091
72092-#ifdef CONFIG_PRINTK
72093 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72094 void __user *buffer, size_t *lenp, loff_t *ppos);
72095-#endif
72096
72097 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
72098 void __user *buffer, size_t *lenp, loff_t *ppos);
72099@@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
72100
72101 #endif
72102
72103+extern struct ctl_table grsecurity_table[];
72104+
72105 static struct ctl_table kern_table[];
72106 static struct ctl_table vm_table[];
72107 static struct ctl_table fs_table[];
72108@@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
72109 int sysctl_legacy_va_layout;
72110 #endif
72111
72112+#ifdef CONFIG_PAX_SOFTMODE
72113+static ctl_table pax_table[] = {
72114+ {
72115+ .procname = "softmode",
72116+ .data = &pax_softmode,
72117+ .maxlen = sizeof(unsigned int),
72118+ .mode = 0600,
72119+ .proc_handler = &proc_dointvec,
72120+ },
72121+
72122+ { }
72123+};
72124+#endif
72125+
72126 /* The default sysctl tables: */
72127
72128 static struct ctl_table sysctl_base_table[] = {
72129@@ -266,6 +279,22 @@ static int max_extfrag_threshold = 1000;
72130 #endif
72131
72132 static struct ctl_table kern_table[] = {
72133+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
72134+ {
72135+ .procname = "grsecurity",
72136+ .mode = 0500,
72137+ .child = grsecurity_table,
72138+ },
72139+#endif
72140+
72141+#ifdef CONFIG_PAX_SOFTMODE
72142+ {
72143+ .procname = "pax",
72144+ .mode = 0500,
72145+ .child = pax_table,
72146+ },
72147+#endif
72148+
72149 {
72150 .procname = "sched_child_runs_first",
72151 .data = &sysctl_sched_child_runs_first,
72152@@ -552,7 +581,7 @@ static struct ctl_table kern_table[] = {
72153 .data = &modprobe_path,
72154 .maxlen = KMOD_PATH_LEN,
72155 .mode = 0644,
72156- .proc_handler = proc_dostring,
72157+ .proc_handler = proc_dostring_modpriv,
72158 },
72159 {
72160 .procname = "modules_disabled",
72161@@ -719,16 +748,20 @@ static struct ctl_table kern_table[] = {
72162 .extra1 = &zero,
72163 .extra2 = &one,
72164 },
72165+#endif
72166 {
72167 .procname = "kptr_restrict",
72168 .data = &kptr_restrict,
72169 .maxlen = sizeof(int),
72170 .mode = 0644,
72171 .proc_handler = proc_dointvec_minmax_sysadmin,
72172+#ifdef CONFIG_GRKERNSEC_HIDESYM
72173+ .extra1 = &two,
72174+#else
72175 .extra1 = &zero,
72176+#endif
72177 .extra2 = &two,
72178 },
72179-#endif
72180 {
72181 .procname = "ngroups_max",
72182 .data = &ngroups_max,
72183@@ -1225,6 +1258,13 @@ static struct ctl_table vm_table[] = {
72184 .proc_handler = proc_dointvec_minmax,
72185 .extra1 = &zero,
72186 },
72187+ {
72188+ .procname = "heap_stack_gap",
72189+ .data = &sysctl_heap_stack_gap,
72190+ .maxlen = sizeof(sysctl_heap_stack_gap),
72191+ .mode = 0644,
72192+ .proc_handler = proc_doulongvec_minmax,
72193+ },
72194 #else
72195 {
72196 .procname = "nr_trim_pages",
72197@@ -1675,6 +1715,16 @@ int proc_dostring(struct ctl_table *table, int write,
72198 buffer, lenp, ppos);
72199 }
72200
72201+int proc_dostring_modpriv(struct ctl_table *table, int write,
72202+ void __user *buffer, size_t *lenp, loff_t *ppos)
72203+{
72204+ if (write && !capable(CAP_SYS_MODULE))
72205+ return -EPERM;
72206+
72207+ return _proc_do_string(table->data, table->maxlen, write,
72208+ buffer, lenp, ppos);
72209+}
72210+
72211 static size_t proc_skip_spaces(char **buf)
72212 {
72213 size_t ret;
72214@@ -1780,6 +1830,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
72215 len = strlen(tmp);
72216 if (len > *size)
72217 len = *size;
72218+ if (len > sizeof(tmp))
72219+ len = sizeof(tmp);
72220 if (copy_to_user(*buf, tmp, len))
72221 return -EFAULT;
72222 *size -= len;
72223@@ -1972,7 +2024,6 @@ static int proc_taint(struct ctl_table *table, int write,
72224 return err;
72225 }
72226
72227-#ifdef CONFIG_PRINTK
72228 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72229 void __user *buffer, size_t *lenp, loff_t *ppos)
72230 {
72231@@ -1981,7 +2032,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72232
72233 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
72234 }
72235-#endif
72236
72237 struct do_proc_dointvec_minmax_conv_param {
72238 int *min;
72239@@ -2128,8 +2178,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
72240 *i = val;
72241 } else {
72242 val = convdiv * (*i) / convmul;
72243- if (!first)
72244+ if (!first) {
72245 err = proc_put_char(&buffer, &left, '\t');
72246+ if (err)
72247+ break;
72248+ }
72249 err = proc_put_long(&buffer, &left, val, false);
72250 if (err)
72251 break;
72252@@ -2521,6 +2574,12 @@ int proc_dostring(struct ctl_table *table, int write,
72253 return -ENOSYS;
72254 }
72255
72256+int proc_dostring_modpriv(struct ctl_table *table, int write,
72257+ void __user *buffer, size_t *lenp, loff_t *ppos)
72258+{
72259+ return -ENOSYS;
72260+}
72261+
72262 int proc_dointvec(struct ctl_table *table, int write,
72263 void __user *buffer, size_t *lenp, loff_t *ppos)
72264 {
72265@@ -2577,5 +2636,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
72266 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
72267 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
72268 EXPORT_SYMBOL(proc_dostring);
72269+EXPORT_SYMBOL(proc_dostring_modpriv);
72270 EXPORT_SYMBOL(proc_doulongvec_minmax);
72271 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
72272diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
72273index 65bdcf1..21eb831 100644
72274--- a/kernel/sysctl_binary.c
72275+++ b/kernel/sysctl_binary.c
72276@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
72277 int i;
72278
72279 set_fs(KERNEL_DS);
72280- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
72281+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
72282 set_fs(old_fs);
72283 if (result < 0)
72284 goto out_kfree;
72285@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
72286 }
72287
72288 set_fs(KERNEL_DS);
72289- result = vfs_write(file, buffer, str - buffer, &pos);
72290+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
72291 set_fs(old_fs);
72292 if (result < 0)
72293 goto out_kfree;
72294@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
72295 int i;
72296
72297 set_fs(KERNEL_DS);
72298- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
72299+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
72300 set_fs(old_fs);
72301 if (result < 0)
72302 goto out_kfree;
72303@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
72304 }
72305
72306 set_fs(KERNEL_DS);
72307- result = vfs_write(file, buffer, str - buffer, &pos);
72308+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
72309 set_fs(old_fs);
72310 if (result < 0)
72311 goto out_kfree;
72312@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
72313 int i;
72314
72315 set_fs(KERNEL_DS);
72316- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
72317+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
72318 set_fs(old_fs);
72319 if (result < 0)
72320 goto out;
72321@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
72322 __le16 dnaddr;
72323
72324 set_fs(KERNEL_DS);
72325- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
72326+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
72327 set_fs(old_fs);
72328 if (result < 0)
72329 goto out;
72330@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
72331 le16_to_cpu(dnaddr) & 0x3ff);
72332
72333 set_fs(KERNEL_DS);
72334- result = vfs_write(file, buf, len, &pos);
72335+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
72336 set_fs(old_fs);
72337 if (result < 0)
72338 goto out;
72339diff --git a/kernel/taskstats.c b/kernel/taskstats.c
72340index 145bb4d..b2aa969 100644
72341--- a/kernel/taskstats.c
72342+++ b/kernel/taskstats.c
72343@@ -28,9 +28,12 @@
72344 #include <linux/fs.h>
72345 #include <linux/file.h>
72346 #include <linux/pid_namespace.h>
72347+#include <linux/grsecurity.h>
72348 #include <net/genetlink.h>
72349 #include <linux/atomic.h>
72350
72351+extern int gr_is_taskstats_denied(int pid);
72352+
72353 /*
72354 * Maximum length of a cpumask that can be specified in
72355 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
72356@@ -570,6 +573,9 @@ err:
72357
72358 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
72359 {
72360+ if (gr_is_taskstats_denied(current->pid))
72361+ return -EACCES;
72362+
72363 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
72364 return cmd_attr_register_cpumask(info);
72365 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
72366diff --git a/kernel/time.c b/kernel/time.c
72367index d226c6a..c7c0960 100644
72368--- a/kernel/time.c
72369+++ b/kernel/time.c
72370@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
72371 return error;
72372
72373 if (tz) {
72374+ /* we log in do_settimeofday called below, so don't log twice
72375+ */
72376+ if (!tv)
72377+ gr_log_timechange();
72378+
72379 sys_tz = *tz;
72380 update_vsyscall_tz();
72381 if (firsttime) {
72382diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
72383index f11d83b..d016d91 100644
72384--- a/kernel/time/alarmtimer.c
72385+++ b/kernel/time/alarmtimer.c
72386@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
72387 struct platform_device *pdev;
72388 int error = 0;
72389 int i;
72390- struct k_clock alarm_clock = {
72391+ static struct k_clock alarm_clock = {
72392 .clock_getres = alarm_clock_getres,
72393 .clock_get = alarm_clock_get,
72394 .timer_create = alarm_timer_create,
72395diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
72396index f113755..ec24223 100644
72397--- a/kernel/time/tick-broadcast.c
72398+++ b/kernel/time/tick-broadcast.c
72399@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
72400 * then clear the broadcast bit.
72401 */
72402 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
72403- int cpu = smp_processor_id();
72404+ cpu = smp_processor_id();
72405
72406 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
72407 tick_broadcast_clear_oneshot(cpu);
72408diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
72409index e424970..4c7962b 100644
72410--- a/kernel/time/timekeeping.c
72411+++ b/kernel/time/timekeeping.c
72412@@ -15,6 +15,7 @@
72413 #include <linux/init.h>
72414 #include <linux/mm.h>
72415 #include <linux/sched.h>
72416+#include <linux/grsecurity.h>
72417 #include <linux/syscore_ops.h>
72418 #include <linux/clocksource.h>
72419 #include <linux/jiffies.h>
72420@@ -368,6 +369,8 @@ int do_settimeofday(const struct timespec *tv)
72421 if (!timespec_valid_strict(tv))
72422 return -EINVAL;
72423
72424+ gr_log_timechange();
72425+
72426 write_seqlock_irqsave(&tk->lock, flags);
72427
72428 timekeeping_forward_now(tk);
72429diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
72430index af5a7e9..715611a 100644
72431--- a/kernel/time/timer_list.c
72432+++ b/kernel/time/timer_list.c
72433@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
72434
72435 static void print_name_offset(struct seq_file *m, void *sym)
72436 {
72437+#ifdef CONFIG_GRKERNSEC_HIDESYM
72438+ SEQ_printf(m, "<%p>", NULL);
72439+#else
72440 char symname[KSYM_NAME_LEN];
72441
72442 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
72443 SEQ_printf(m, "<%pK>", sym);
72444 else
72445 SEQ_printf(m, "%s", symname);
72446+#endif
72447 }
72448
72449 static void
72450@@ -112,7 +116,11 @@ next_one:
72451 static void
72452 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
72453 {
72454+#ifdef CONFIG_GRKERNSEC_HIDESYM
72455+ SEQ_printf(m, " .base: %p\n", NULL);
72456+#else
72457 SEQ_printf(m, " .base: %pK\n", base);
72458+#endif
72459 SEQ_printf(m, " .index: %d\n",
72460 base->index);
72461 SEQ_printf(m, " .resolution: %Lu nsecs\n",
72462@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
72463 {
72464 struct proc_dir_entry *pe;
72465
72466+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72467+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
72468+#else
72469 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
72470+#endif
72471 if (!pe)
72472 return -ENOMEM;
72473 return 0;
72474diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
72475index 0b537f2..40d6c20 100644
72476--- a/kernel/time/timer_stats.c
72477+++ b/kernel/time/timer_stats.c
72478@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
72479 static unsigned long nr_entries;
72480 static struct entry entries[MAX_ENTRIES];
72481
72482-static atomic_t overflow_count;
72483+static atomic_unchecked_t overflow_count;
72484
72485 /*
72486 * The entries are in a hash-table, for fast lookup:
72487@@ -140,7 +140,7 @@ static void reset_entries(void)
72488 nr_entries = 0;
72489 memset(entries, 0, sizeof(entries));
72490 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
72491- atomic_set(&overflow_count, 0);
72492+ atomic_set_unchecked(&overflow_count, 0);
72493 }
72494
72495 static struct entry *alloc_entry(void)
72496@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
72497 if (likely(entry))
72498 entry->count++;
72499 else
72500- atomic_inc(&overflow_count);
72501+ atomic_inc_unchecked(&overflow_count);
72502
72503 out_unlock:
72504 raw_spin_unlock_irqrestore(lock, flags);
72505@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
72506
72507 static void print_name_offset(struct seq_file *m, unsigned long addr)
72508 {
72509+#ifdef CONFIG_GRKERNSEC_HIDESYM
72510+ seq_printf(m, "<%p>", NULL);
72511+#else
72512 char symname[KSYM_NAME_LEN];
72513
72514 if (lookup_symbol_name(addr, symname) < 0)
72515- seq_printf(m, "<%p>", (void *)addr);
72516+ seq_printf(m, "<%pK>", (void *)addr);
72517 else
72518 seq_printf(m, "%s", symname);
72519+#endif
72520 }
72521
72522 static int tstats_show(struct seq_file *m, void *v)
72523@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
72524
72525 seq_puts(m, "Timer Stats Version: v0.2\n");
72526 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
72527- if (atomic_read(&overflow_count))
72528+ if (atomic_read_unchecked(&overflow_count))
72529 seq_printf(m, "Overflow: %d entries\n",
72530- atomic_read(&overflow_count));
72531+ atomic_read_unchecked(&overflow_count));
72532
72533 for (i = 0; i < nr_entries; i++) {
72534 entry = entries + i;
72535@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
72536 {
72537 struct proc_dir_entry *pe;
72538
72539+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72540+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
72541+#else
72542 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
72543+#endif
72544 if (!pe)
72545 return -ENOMEM;
72546 return 0;
72547diff --git a/kernel/timer.c b/kernel/timer.c
72548index 367d008..46857a0 100644
72549--- a/kernel/timer.c
72550+++ b/kernel/timer.c
72551@@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
72552 /*
72553 * This function runs timers and the timer-tq in bottom half context.
72554 */
72555-static void run_timer_softirq(struct softirq_action *h)
72556+static void run_timer_softirq(void)
72557 {
72558 struct tvec_base *base = __this_cpu_read(tvec_bases);
72559
72560@@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
72561 return NOTIFY_OK;
72562 }
72563
72564-static struct notifier_block __cpuinitdata timers_nb = {
72565+static struct notifier_block __cpuinitconst timers_nb = {
72566 .notifier_call = timer_cpu_notify,
72567 };
72568
72569diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
72570index c0bd030..62a1927 100644
72571--- a/kernel/trace/blktrace.c
72572+++ b/kernel/trace/blktrace.c
72573@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
72574 struct blk_trace *bt = filp->private_data;
72575 char buf[16];
72576
72577- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
72578+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
72579
72580 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
72581 }
72582@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
72583 return 1;
72584
72585 bt = buf->chan->private_data;
72586- atomic_inc(&bt->dropped);
72587+ atomic_inc_unchecked(&bt->dropped);
72588 return 0;
72589 }
72590
72591@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
72592
72593 bt->dir = dir;
72594 bt->dev = dev;
72595- atomic_set(&bt->dropped, 0);
72596+ atomic_set_unchecked(&bt->dropped, 0);
72597
72598 ret = -EIO;
72599 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
72600diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
72601index 51b7159..18137d6 100644
72602--- a/kernel/trace/ftrace.c
72603+++ b/kernel/trace/ftrace.c
72604@@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
72605 if (unlikely(ftrace_disabled))
72606 return 0;
72607
72608+ ret = ftrace_arch_code_modify_prepare();
72609+ FTRACE_WARN_ON(ret);
72610+ if (ret)
72611+ return 0;
72612+
72613 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
72614+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
72615 if (ret) {
72616 ftrace_bug(ret, ip);
72617- return 0;
72618 }
72619- return 1;
72620+ return ret ? 0 : 1;
72621 }
72622
72623 /*
72624@@ -2965,7 +2970,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
72625
72626 int
72627 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
72628- void *data)
72629+ void *data)
72630 {
72631 struct ftrace_func_probe *entry;
72632 struct ftrace_page *pg;
72633@@ -3832,8 +3837,10 @@ static int ftrace_process_locs(struct module *mod,
72634 if (!count)
72635 return 0;
72636
72637+ pax_open_kernel();
72638 sort(start, count, sizeof(*start),
72639 ftrace_cmp_ips, ftrace_swap_ips);
72640+ pax_close_kernel();
72641
72642 start_pg = ftrace_allocate_pages(count);
72643 if (!start_pg)
72644@@ -4541,8 +4548,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
72645 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
72646
72647 static int ftrace_graph_active;
72648-static struct notifier_block ftrace_suspend_notifier;
72649-
72650 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
72651 {
72652 return 0;
72653@@ -4686,6 +4691,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
72654 return NOTIFY_DONE;
72655 }
72656
72657+static struct notifier_block ftrace_suspend_notifier = {
72658+ .notifier_call = ftrace_suspend_notifier_call
72659+};
72660+
72661 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
72662 trace_func_graph_ent_t entryfunc)
72663 {
72664@@ -4699,7 +4708,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
72665 goto out;
72666 }
72667
72668- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
72669 register_pm_notifier(&ftrace_suspend_notifier);
72670
72671 ftrace_graph_active++;
72672diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
72673index 4cb5e51..e7e05d9 100644
72674--- a/kernel/trace/ring_buffer.c
72675+++ b/kernel/trace/ring_buffer.c
72676@@ -346,9 +346,9 @@ struct buffer_data_page {
72677 */
72678 struct buffer_page {
72679 struct list_head list; /* list of buffer pages */
72680- local_t write; /* index for next write */
72681+ local_unchecked_t write; /* index for next write */
72682 unsigned read; /* index for next read */
72683- local_t entries; /* entries on this page */
72684+ local_unchecked_t entries; /* entries on this page */
72685 unsigned long real_end; /* real end of data */
72686 struct buffer_data_page *page; /* Actual data page */
72687 };
72688@@ -460,8 +460,8 @@ struct ring_buffer_per_cpu {
72689 unsigned long lost_events;
72690 unsigned long last_overrun;
72691 local_t entries_bytes;
72692- local_t commit_overrun;
72693- local_t overrun;
72694+ local_unchecked_t commit_overrun;
72695+ local_unchecked_t overrun;
72696 local_t entries;
72697 local_t committing;
72698 local_t commits;
72699@@ -860,8 +860,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
72700 *
72701 * We add a counter to the write field to denote this.
72702 */
72703- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
72704- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
72705+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
72706+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
72707
72708 /*
72709 * Just make sure we have seen our old_write and synchronize
72710@@ -889,8 +889,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
72711 * cmpxchg to only update if an interrupt did not already
72712 * do it for us. If the cmpxchg fails, we don't care.
72713 */
72714- (void)local_cmpxchg(&next_page->write, old_write, val);
72715- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
72716+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
72717+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
72718
72719 /*
72720 * No need to worry about races with clearing out the commit.
72721@@ -1249,12 +1249,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
72722
72723 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
72724 {
72725- return local_read(&bpage->entries) & RB_WRITE_MASK;
72726+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
72727 }
72728
72729 static inline unsigned long rb_page_write(struct buffer_page *bpage)
72730 {
72731- return local_read(&bpage->write) & RB_WRITE_MASK;
72732+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
72733 }
72734
72735 static int
72736@@ -1349,7 +1349,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
72737 * bytes consumed in ring buffer from here.
72738 * Increment overrun to account for the lost events.
72739 */
72740- local_add(page_entries, &cpu_buffer->overrun);
72741+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
72742 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
72743 }
72744
72745@@ -1905,7 +1905,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
72746 * it is our responsibility to update
72747 * the counters.
72748 */
72749- local_add(entries, &cpu_buffer->overrun);
72750+ local_add_unchecked(entries, &cpu_buffer->overrun);
72751 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
72752
72753 /*
72754@@ -2055,7 +2055,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
72755 if (tail == BUF_PAGE_SIZE)
72756 tail_page->real_end = 0;
72757
72758- local_sub(length, &tail_page->write);
72759+ local_sub_unchecked(length, &tail_page->write);
72760 return;
72761 }
72762
72763@@ -2090,7 +2090,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
72764 rb_event_set_padding(event);
72765
72766 /* Set the write back to the previous setting */
72767- local_sub(length, &tail_page->write);
72768+ local_sub_unchecked(length, &tail_page->write);
72769 return;
72770 }
72771
72772@@ -2102,7 +2102,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
72773
72774 /* Set write to end of buffer */
72775 length = (tail + length) - BUF_PAGE_SIZE;
72776- local_sub(length, &tail_page->write);
72777+ local_sub_unchecked(length, &tail_page->write);
72778 }
72779
72780 /*
72781@@ -2128,7 +2128,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
72782 * about it.
72783 */
72784 if (unlikely(next_page == commit_page)) {
72785- local_inc(&cpu_buffer->commit_overrun);
72786+ local_inc_unchecked(&cpu_buffer->commit_overrun);
72787 goto out_reset;
72788 }
72789
72790@@ -2182,7 +2182,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
72791 cpu_buffer->tail_page) &&
72792 (cpu_buffer->commit_page ==
72793 cpu_buffer->reader_page))) {
72794- local_inc(&cpu_buffer->commit_overrun);
72795+ local_inc_unchecked(&cpu_buffer->commit_overrun);
72796 goto out_reset;
72797 }
72798 }
72799@@ -2230,7 +2230,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
72800 length += RB_LEN_TIME_EXTEND;
72801
72802 tail_page = cpu_buffer->tail_page;
72803- write = local_add_return(length, &tail_page->write);
72804+ write = local_add_return_unchecked(length, &tail_page->write);
72805
72806 /* set write to only the index of the write */
72807 write &= RB_WRITE_MASK;
72808@@ -2247,7 +2247,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
72809 kmemcheck_annotate_bitfield(event, bitfield);
72810 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
72811
72812- local_inc(&tail_page->entries);
72813+ local_inc_unchecked(&tail_page->entries);
72814
72815 /*
72816 * If this is the first commit on the page, then update
72817@@ -2280,7 +2280,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
72818
72819 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
72820 unsigned long write_mask =
72821- local_read(&bpage->write) & ~RB_WRITE_MASK;
72822+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
72823 unsigned long event_length = rb_event_length(event);
72824 /*
72825 * This is on the tail page. It is possible that
72826@@ -2290,7 +2290,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
72827 */
72828 old_index += write_mask;
72829 new_index += write_mask;
72830- index = local_cmpxchg(&bpage->write, old_index, new_index);
72831+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
72832 if (index == old_index) {
72833 /* update counters */
72834 local_sub(event_length, &cpu_buffer->entries_bytes);
72835@@ -2629,7 +2629,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
72836
72837 /* Do the likely case first */
72838 if (likely(bpage->page == (void *)addr)) {
72839- local_dec(&bpage->entries);
72840+ local_dec_unchecked(&bpage->entries);
72841 return;
72842 }
72843
72844@@ -2641,7 +2641,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
72845 start = bpage;
72846 do {
72847 if (bpage->page == (void *)addr) {
72848- local_dec(&bpage->entries);
72849+ local_dec_unchecked(&bpage->entries);
72850 return;
72851 }
72852 rb_inc_page(cpu_buffer, &bpage);
72853@@ -2923,7 +2923,7 @@ static inline unsigned long
72854 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
72855 {
72856 return local_read(&cpu_buffer->entries) -
72857- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
72858+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
72859 }
72860
72861 /**
72862@@ -3011,7 +3011,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
72863 return 0;
72864
72865 cpu_buffer = buffer->buffers[cpu];
72866- ret = local_read(&cpu_buffer->overrun);
72867+ ret = local_read_unchecked(&cpu_buffer->overrun);
72868
72869 return ret;
72870 }
72871@@ -3032,7 +3032,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
72872 return 0;
72873
72874 cpu_buffer = buffer->buffers[cpu];
72875- ret = local_read(&cpu_buffer->commit_overrun);
72876+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
72877
72878 return ret;
72879 }
72880@@ -3077,7 +3077,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
72881 /* if you care about this being correct, lock the buffer */
72882 for_each_buffer_cpu(buffer, cpu) {
72883 cpu_buffer = buffer->buffers[cpu];
72884- overruns += local_read(&cpu_buffer->overrun);
72885+ overruns += local_read_unchecked(&cpu_buffer->overrun);
72886 }
72887
72888 return overruns;
72889@@ -3253,8 +3253,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
72890 /*
72891 * Reset the reader page to size zero.
72892 */
72893- local_set(&cpu_buffer->reader_page->write, 0);
72894- local_set(&cpu_buffer->reader_page->entries, 0);
72895+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
72896+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
72897 local_set(&cpu_buffer->reader_page->page->commit, 0);
72898 cpu_buffer->reader_page->real_end = 0;
72899
72900@@ -3288,7 +3288,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
72901 * want to compare with the last_overrun.
72902 */
72903 smp_mb();
72904- overwrite = local_read(&(cpu_buffer->overrun));
72905+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
72906
72907 /*
72908 * Here's the tricky part.
72909@@ -3858,8 +3858,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
72910
72911 cpu_buffer->head_page
72912 = list_entry(cpu_buffer->pages, struct buffer_page, list);
72913- local_set(&cpu_buffer->head_page->write, 0);
72914- local_set(&cpu_buffer->head_page->entries, 0);
72915+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
72916+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
72917 local_set(&cpu_buffer->head_page->page->commit, 0);
72918
72919 cpu_buffer->head_page->read = 0;
72920@@ -3869,14 +3869,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
72921
72922 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
72923 INIT_LIST_HEAD(&cpu_buffer->new_pages);
72924- local_set(&cpu_buffer->reader_page->write, 0);
72925- local_set(&cpu_buffer->reader_page->entries, 0);
72926+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
72927+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
72928 local_set(&cpu_buffer->reader_page->page->commit, 0);
72929 cpu_buffer->reader_page->read = 0;
72930
72931- local_set(&cpu_buffer->commit_overrun, 0);
72932+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
72933 local_set(&cpu_buffer->entries_bytes, 0);
72934- local_set(&cpu_buffer->overrun, 0);
72935+ local_set_unchecked(&cpu_buffer->overrun, 0);
72936 local_set(&cpu_buffer->entries, 0);
72937 local_set(&cpu_buffer->committing, 0);
72938 local_set(&cpu_buffer->commits, 0);
72939@@ -4279,8 +4279,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
72940 rb_init_page(bpage);
72941 bpage = reader->page;
72942 reader->page = *data_page;
72943- local_set(&reader->write, 0);
72944- local_set(&reader->entries, 0);
72945+ local_set_unchecked(&reader->write, 0);
72946+ local_set_unchecked(&reader->entries, 0);
72947 reader->read = 0;
72948 *data_page = bpage;
72949
72950diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
72951index 31e4f55..62da00f 100644
72952--- a/kernel/trace/trace.c
72953+++ b/kernel/trace/trace.c
72954@@ -4436,10 +4436,9 @@ static const struct file_operations tracing_dyn_info_fops = {
72955 };
72956 #endif
72957
72958-static struct dentry *d_tracer;
72959-
72960 struct dentry *tracing_init_dentry(void)
72961 {
72962+ static struct dentry *d_tracer;
72963 static int once;
72964
72965 if (d_tracer)
72966@@ -4459,10 +4458,9 @@ struct dentry *tracing_init_dentry(void)
72967 return d_tracer;
72968 }
72969
72970-static struct dentry *d_percpu;
72971-
72972 struct dentry *tracing_dentry_percpu(void)
72973 {
72974+ static struct dentry *d_percpu;
72975 static int once;
72976 struct dentry *d_tracer;
72977
72978diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
72979index d608d09..7eddcb1 100644
72980--- a/kernel/trace/trace_events.c
72981+++ b/kernel/trace/trace_events.c
72982@@ -1320,10 +1320,6 @@ static LIST_HEAD(ftrace_module_file_list);
72983 struct ftrace_module_file_ops {
72984 struct list_head list;
72985 struct module *mod;
72986- struct file_operations id;
72987- struct file_operations enable;
72988- struct file_operations format;
72989- struct file_operations filter;
72990 };
72991
72992 static struct ftrace_module_file_ops *
72993@@ -1344,17 +1340,12 @@ trace_create_file_ops(struct module *mod)
72994
72995 file_ops->mod = mod;
72996
72997- file_ops->id = ftrace_event_id_fops;
72998- file_ops->id.owner = mod;
72999-
73000- file_ops->enable = ftrace_enable_fops;
73001- file_ops->enable.owner = mod;
73002-
73003- file_ops->filter = ftrace_event_filter_fops;
73004- file_ops->filter.owner = mod;
73005-
73006- file_ops->format = ftrace_event_format_fops;
73007- file_ops->format.owner = mod;
73008+ pax_open_kernel();
73009+ *(void **)&mod->trace_id.owner = mod;
73010+ *(void **)&mod->trace_enable.owner = mod;
73011+ *(void **)&mod->trace_filter.owner = mod;
73012+ *(void **)&mod->trace_format.owner = mod;
73013+ pax_close_kernel();
73014
73015 list_add(&file_ops->list, &ftrace_module_file_list);
73016
73017@@ -1378,8 +1369,8 @@ static void trace_module_add_events(struct module *mod)
73018
73019 for_each_event(call, start, end) {
73020 __trace_add_event_call(*call, mod,
73021- &file_ops->id, &file_ops->enable,
73022- &file_ops->filter, &file_ops->format);
73023+ &mod->trace_id, &mod->trace_enable,
73024+ &mod->trace_filter, &mod->trace_format);
73025 }
73026 }
73027
73028diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
73029index fd3c8aa..5f324a6 100644
73030--- a/kernel/trace/trace_mmiotrace.c
73031+++ b/kernel/trace/trace_mmiotrace.c
73032@@ -24,7 +24,7 @@ struct header_iter {
73033 static struct trace_array *mmio_trace_array;
73034 static bool overrun_detected;
73035 static unsigned long prev_overruns;
73036-static atomic_t dropped_count;
73037+static atomic_unchecked_t dropped_count;
73038
73039 static void mmio_reset_data(struct trace_array *tr)
73040 {
73041@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
73042
73043 static unsigned long count_overruns(struct trace_iterator *iter)
73044 {
73045- unsigned long cnt = atomic_xchg(&dropped_count, 0);
73046+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
73047 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
73048
73049 if (over > prev_overruns)
73050@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
73051 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
73052 sizeof(*entry), 0, pc);
73053 if (!event) {
73054- atomic_inc(&dropped_count);
73055+ atomic_inc_unchecked(&dropped_count);
73056 return;
73057 }
73058 entry = ring_buffer_event_data(event);
73059@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
73060 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
73061 sizeof(*entry), 0, pc);
73062 if (!event) {
73063- atomic_inc(&dropped_count);
73064+ atomic_inc_unchecked(&dropped_count);
73065 return;
73066 }
73067 entry = ring_buffer_event_data(event);
73068diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
73069index 123b189..1e9e2a6 100644
73070--- a/kernel/trace/trace_output.c
73071+++ b/kernel/trace/trace_output.c
73072@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
73073
73074 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
73075 if (!IS_ERR(p)) {
73076- p = mangle_path(s->buffer + s->len, p, "\n");
73077+ p = mangle_path(s->buffer + s->len, p, "\n\\");
73078 if (p) {
73079 s->len = p - s->buffer;
73080 return 1;
73081@@ -824,14 +824,16 @@ int register_ftrace_event(struct trace_event *event)
73082 goto out;
73083 }
73084
73085+ pax_open_kernel();
73086 if (event->funcs->trace == NULL)
73087- event->funcs->trace = trace_nop_print;
73088+ *(void **)&event->funcs->trace = trace_nop_print;
73089 if (event->funcs->raw == NULL)
73090- event->funcs->raw = trace_nop_print;
73091+ *(void **)&event->funcs->raw = trace_nop_print;
73092 if (event->funcs->hex == NULL)
73093- event->funcs->hex = trace_nop_print;
73094+ *(void **)&event->funcs->hex = trace_nop_print;
73095 if (event->funcs->binary == NULL)
73096- event->funcs->binary = trace_nop_print;
73097+ *(void **)&event->funcs->binary = trace_nop_print;
73098+ pax_close_kernel();
73099
73100 key = event->type & (EVENT_HASHSIZE - 1);
73101
73102diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
73103index 0c1b1657..95337e9 100644
73104--- a/kernel/trace/trace_stack.c
73105+++ b/kernel/trace/trace_stack.c
73106@@ -53,7 +53,7 @@ static inline void check_stack(void)
73107 return;
73108
73109 /* we do not handle interrupt stacks yet */
73110- if (!object_is_on_stack(&this_size))
73111+ if (!object_starts_on_stack(&this_size))
73112 return;
73113
73114 local_irq_save(flags);
73115diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
73116index 28e9d6c9..50381bd 100644
73117--- a/lib/Kconfig.debug
73118+++ b/lib/Kconfig.debug
73119@@ -1278,6 +1278,7 @@ config LATENCYTOP
73120 depends on DEBUG_KERNEL
73121 depends on STACKTRACE_SUPPORT
73122 depends on PROC_FS
73123+ depends on !GRKERNSEC_HIDESYM
73124 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
73125 select KALLSYMS
73126 select KALLSYMS_ALL
73127@@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
73128
73129 config PROVIDE_OHCI1394_DMA_INIT
73130 bool "Remote debugging over FireWire early on boot"
73131- depends on PCI && X86
73132+ depends on PCI && X86 && !GRKERNSEC
73133 help
73134 If you want to debug problems which hang or crash the kernel early
73135 on boot and the crashing machine has a FireWire port, you can use
73136@@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
73137
73138 config FIREWIRE_OHCI_REMOTE_DMA
73139 bool "Remote debugging over FireWire with firewire-ohci"
73140- depends on FIREWIRE_OHCI
73141+ depends on FIREWIRE_OHCI && !GRKERNSEC
73142 help
73143 This option lets you use the FireWire bus for remote debugging
73144 with help of the firewire-ohci driver. It enables unfiltered
73145diff --git a/lib/Makefile b/lib/Makefile
73146index a08b791..a3ff1eb 100644
73147--- a/lib/Makefile
73148+++ b/lib/Makefile
73149@@ -46,7 +46,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
73150
73151 obj-$(CONFIG_BTREE) += btree.o
73152 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
73153-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
73154+obj-y += list_debug.o
73155 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
73156
73157 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
73158diff --git a/lib/bitmap.c b/lib/bitmap.c
73159index 06fdfa1..97c5c7d 100644
73160--- a/lib/bitmap.c
73161+++ b/lib/bitmap.c
73162@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
73163 {
73164 int c, old_c, totaldigits, ndigits, nchunks, nbits;
73165 u32 chunk;
73166- const char __user __force *ubuf = (const char __user __force *)buf;
73167+ const char __user *ubuf = (const char __force_user *)buf;
73168
73169 bitmap_zero(maskp, nmaskbits);
73170
73171@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
73172 {
73173 if (!access_ok(VERIFY_READ, ubuf, ulen))
73174 return -EFAULT;
73175- return __bitmap_parse((const char __force *)ubuf,
73176+ return __bitmap_parse((const char __force_kernel *)ubuf,
73177 ulen, 1, maskp, nmaskbits);
73178
73179 }
73180@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
73181 {
73182 unsigned a, b;
73183 int c, old_c, totaldigits;
73184- const char __user __force *ubuf = (const char __user __force *)buf;
73185+ const char __user *ubuf = (const char __force_user *)buf;
73186 int exp_digit, in_range;
73187
73188 totaldigits = c = 0;
73189@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
73190 {
73191 if (!access_ok(VERIFY_READ, ubuf, ulen))
73192 return -EFAULT;
73193- return __bitmap_parselist((const char __force *)ubuf,
73194+ return __bitmap_parselist((const char __force_kernel *)ubuf,
73195 ulen, 1, maskp, nmaskbits);
73196 }
73197 EXPORT_SYMBOL(bitmap_parselist_user);
73198diff --git a/lib/bug.c b/lib/bug.c
73199index a28c141..2bd3d95 100644
73200--- a/lib/bug.c
73201+++ b/lib/bug.c
73202@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
73203 return BUG_TRAP_TYPE_NONE;
73204
73205 bug = find_bug(bugaddr);
73206+ if (!bug)
73207+ return BUG_TRAP_TYPE_NONE;
73208
73209 file = NULL;
73210 line = 0;
73211diff --git a/lib/debugobjects.c b/lib/debugobjects.c
73212index d11808c..dc2d6f8 100644
73213--- a/lib/debugobjects.c
73214+++ b/lib/debugobjects.c
73215@@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
73216 if (limit > 4)
73217 return;
73218
73219- is_on_stack = object_is_on_stack(addr);
73220+ is_on_stack = object_starts_on_stack(addr);
73221 if (is_on_stack == onstack)
73222 return;
73223
73224diff --git a/lib/devres.c b/lib/devres.c
73225index 80b9c76..9e32279 100644
73226--- a/lib/devres.c
73227+++ b/lib/devres.c
73228@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
73229 void devm_iounmap(struct device *dev, void __iomem *addr)
73230 {
73231 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
73232- (void *)addr));
73233+ (void __force *)addr));
73234 iounmap(addr);
73235 }
73236 EXPORT_SYMBOL(devm_iounmap);
73237@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
73238 {
73239 ioport_unmap(addr);
73240 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
73241- devm_ioport_map_match, (void *)addr));
73242+ devm_ioport_map_match, (void __force *)addr));
73243 }
73244 EXPORT_SYMBOL(devm_ioport_unmap);
73245
73246diff --git a/lib/dma-debug.c b/lib/dma-debug.c
73247index d84beb9..da44791 100644
73248--- a/lib/dma-debug.c
73249+++ b/lib/dma-debug.c
73250@@ -754,7 +754,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
73251
73252 void dma_debug_add_bus(struct bus_type *bus)
73253 {
73254- struct notifier_block *nb;
73255+ notifier_block_no_const *nb;
73256
73257 if (global_disable)
73258 return;
73259@@ -919,7 +919,7 @@ out:
73260
73261 static void check_for_stack(struct device *dev, void *addr)
73262 {
73263- if (object_is_on_stack(addr))
73264+ if (object_starts_on_stack(addr))
73265 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
73266 "stack [addr=%p]\n", addr);
73267 }
73268diff --git a/lib/inflate.c b/lib/inflate.c
73269index 013a761..c28f3fc 100644
73270--- a/lib/inflate.c
73271+++ b/lib/inflate.c
73272@@ -269,7 +269,7 @@ static void free(void *where)
73273 malloc_ptr = free_mem_ptr;
73274 }
73275 #else
73276-#define malloc(a) kmalloc(a, GFP_KERNEL)
73277+#define malloc(a) kmalloc((a), GFP_KERNEL)
73278 #define free(a) kfree(a)
73279 #endif
73280
73281diff --git a/lib/ioremap.c b/lib/ioremap.c
73282index 0c9216c..863bd89 100644
73283--- a/lib/ioremap.c
73284+++ b/lib/ioremap.c
73285@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
73286 unsigned long next;
73287
73288 phys_addr -= addr;
73289- pmd = pmd_alloc(&init_mm, pud, addr);
73290+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
73291 if (!pmd)
73292 return -ENOMEM;
73293 do {
73294@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
73295 unsigned long next;
73296
73297 phys_addr -= addr;
73298- pud = pud_alloc(&init_mm, pgd, addr);
73299+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
73300 if (!pud)
73301 return -ENOMEM;
73302 do {
73303diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
73304index bd2bea9..6b3c95e 100644
73305--- a/lib/is_single_threaded.c
73306+++ b/lib/is_single_threaded.c
73307@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
73308 struct task_struct *p, *t;
73309 bool ret;
73310
73311+ if (!mm)
73312+ return true;
73313+
73314 if (atomic_read(&task->signal->live) != 1)
73315 return false;
73316
73317diff --git a/lib/list_debug.c b/lib/list_debug.c
73318index c24c2f7..3fc5da0 100644
73319--- a/lib/list_debug.c
73320+++ b/lib/list_debug.c
73321@@ -11,7 +11,9 @@
73322 #include <linux/bug.h>
73323 #include <linux/kernel.h>
73324 #include <linux/rculist.h>
73325+#include <linux/mm.h>
73326
73327+#ifdef CONFIG_DEBUG_LIST
73328 /*
73329 * Insert a new entry between two known consecutive entries.
73330 *
73331@@ -19,21 +21,32 @@
73332 * the prev/next entries already!
73333 */
73334
73335-void __list_add(struct list_head *new,
73336- struct list_head *prev,
73337- struct list_head *next)
73338+static bool __list_add_debug(struct list_head *new,
73339+ struct list_head *prev,
73340+ struct list_head *next)
73341 {
73342- WARN(next->prev != prev,
73343+ if (WARN(next->prev != prev,
73344 "list_add corruption. next->prev should be "
73345 "prev (%p), but was %p. (next=%p).\n",
73346- prev, next->prev, next);
73347- WARN(prev->next != next,
73348+ prev, next->prev, next) ||
73349+ WARN(prev->next != next,
73350 "list_add corruption. prev->next should be "
73351 "next (%p), but was %p. (prev=%p).\n",
73352- next, prev->next, prev);
73353- WARN(new == prev || new == next,
73354+ next, prev->next, prev) ||
73355+ WARN(new == prev || new == next,
73356 "list_add double add: new=%p, prev=%p, next=%p.\n",
73357- new, prev, next);
73358+ new, prev, next))
73359+ return false;
73360+ return true;
73361+}
73362+
73363+void __list_add(struct list_head *new,
73364+ struct list_head *prev,
73365+ struct list_head *next)
73366+{
73367+ if (!__list_add_debug(new, prev, next))
73368+ return;
73369+
73370 next->prev = new;
73371 new->next = next;
73372 new->prev = prev;
73373@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
73374 }
73375 EXPORT_SYMBOL(__list_add);
73376
73377-void __list_del_entry(struct list_head *entry)
73378+static bool __list_del_entry_debug(struct list_head *entry)
73379 {
73380 struct list_head *prev, *next;
73381
73382@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
73383 WARN(next->prev != entry,
73384 "list_del corruption. next->prev should be %p, "
73385 "but was %p\n", entry, next->prev))
73386+ return false;
73387+ return true;
73388+}
73389+
73390+void __list_del_entry(struct list_head *entry)
73391+{
73392+ if (!__list_del_entry_debug(entry))
73393 return;
73394
73395- __list_del(prev, next);
73396+ __list_del(entry->prev, entry->next);
73397 }
73398 EXPORT_SYMBOL(__list_del_entry);
73399
73400@@ -86,15 +106,54 @@ EXPORT_SYMBOL(list_del);
73401 void __list_add_rcu(struct list_head *new,
73402 struct list_head *prev, struct list_head *next)
73403 {
73404- WARN(next->prev != prev,
73405+ if (WARN(next->prev != prev,
73406 "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
73407- prev, next->prev, next);
73408- WARN(prev->next != next,
73409+ prev, next->prev, next) ||
73410+ WARN(prev->next != next,
73411 "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
73412- next, prev->next, prev);
73413+ next, prev->next, prev))
73414+ return;
73415+
73416 new->next = next;
73417 new->prev = prev;
73418 rcu_assign_pointer(list_next_rcu(prev), new);
73419 next->prev = new;
73420 }
73421 EXPORT_SYMBOL(__list_add_rcu);
73422+#endif
73423+
73424+void pax_list_add_tail(struct list_head *new, struct list_head *head)
73425+{
73426+ struct list_head *prev, *next;
73427+
73428+ prev = head->prev;
73429+ next = head;
73430+
73431+#ifdef CONFIG_DEBUG_LIST
73432+ if (!__list_add_debug(new, prev, next))
73433+ return;
73434+#endif
73435+
73436+ pax_open_kernel();
73437+ next->prev = new;
73438+ new->next = next;
73439+ new->prev = prev;
73440+ prev->next = new;
73441+ pax_close_kernel();
73442+}
73443+EXPORT_SYMBOL(pax_list_add_tail);
73444+
73445+void pax_list_del(struct list_head *entry)
73446+{
73447+#ifdef CONFIG_DEBUG_LIST
73448+ if (!__list_del_entry_debug(entry))
73449+ return;
73450+#endif
73451+
73452+ pax_open_kernel();
73453+ __list_del(entry->prev, entry->next);
73454+ entry->next = LIST_POISON1;
73455+ entry->prev = LIST_POISON2;
73456+ pax_close_kernel();
73457+}
73458+EXPORT_SYMBOL(pax_list_del);
73459diff --git a/lib/radix-tree.c b/lib/radix-tree.c
73460index e796429..6e38f9f 100644
73461--- a/lib/radix-tree.c
73462+++ b/lib/radix-tree.c
73463@@ -92,7 +92,7 @@ struct radix_tree_preload {
73464 int nr;
73465 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
73466 };
73467-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
73468+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
73469
73470 static inline void *ptr_to_indirect(void *ptr)
73471 {
73472diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
73473index bb2b201..46abaf9 100644
73474--- a/lib/strncpy_from_user.c
73475+++ b/lib/strncpy_from_user.c
73476@@ -21,7 +21,7 @@
73477 */
73478 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
73479 {
73480- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73481+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73482 long res = 0;
73483
73484 /*
73485diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
73486index a28df52..3d55877 100644
73487--- a/lib/strnlen_user.c
73488+++ b/lib/strnlen_user.c
73489@@ -26,7 +26,7 @@
73490 */
73491 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
73492 {
73493- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73494+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73495 long align, res = 0;
73496 unsigned long c;
73497
73498diff --git a/lib/vsprintf.c b/lib/vsprintf.c
73499index 39c99fe..18f060b 100644
73500--- a/lib/vsprintf.c
73501+++ b/lib/vsprintf.c
73502@@ -16,6 +16,9 @@
73503 * - scnprintf and vscnprintf
73504 */
73505
73506+#ifdef CONFIG_GRKERNSEC_HIDESYM
73507+#define __INCLUDED_BY_HIDESYM 1
73508+#endif
73509 #include <stdarg.h>
73510 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
73511 #include <linux/types.h>
73512@@ -533,7 +536,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
73513 char sym[KSYM_SYMBOL_LEN];
73514 if (ext == 'B')
73515 sprint_backtrace(sym, value);
73516- else if (ext != 'f' && ext != 's')
73517+ else if (ext != 'f' && ext != 's' && ext != 'a')
73518 sprint_symbol(sym, value);
73519 else
73520 sprint_symbol_no_offset(sym, value);
73521@@ -966,7 +969,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
73522 return number(buf, end, *(const netdev_features_t *)addr, spec);
73523 }
73524
73525+#ifdef CONFIG_GRKERNSEC_HIDESYM
73526+int kptr_restrict __read_mostly = 2;
73527+#else
73528 int kptr_restrict __read_mostly;
73529+#endif
73530
73531 /*
73532 * Show a '%p' thing. A kernel extension is that the '%p' is followed
73533@@ -980,6 +987,8 @@ int kptr_restrict __read_mostly;
73534 * - 'S' For symbolic direct pointers with offset
73535 * - 's' For symbolic direct pointers without offset
73536 * - 'B' For backtraced symbolic direct pointers with offset
73537+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
73538+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
73539 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
73540 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
73541 * - 'M' For a 6-byte MAC address, it prints the address in the
73542@@ -1035,12 +1044,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73543
73544 if (!ptr && *fmt != 'K') {
73545 /*
73546- * Print (null) with the same width as a pointer so it makes
73547+ * Print (nil) with the same width as a pointer so it makes
73548 * tabular output look nice.
73549 */
73550 if (spec.field_width == -1)
73551 spec.field_width = default_width;
73552- return string(buf, end, "(null)", spec);
73553+ return string(buf, end, "(nil)", spec);
73554 }
73555
73556 switch (*fmt) {
73557@@ -1050,6 +1059,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73558 /* Fallthrough */
73559 case 'S':
73560 case 's':
73561+#ifdef CONFIG_GRKERNSEC_HIDESYM
73562+ break;
73563+#else
73564+ return symbol_string(buf, end, ptr, spec, *fmt);
73565+#endif
73566+ case 'A':
73567+ case 'a':
73568 case 'B':
73569 return symbol_string(buf, end, ptr, spec, *fmt);
73570 case 'R':
73571@@ -1090,6 +1106,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73572 va_end(va);
73573 return buf;
73574 }
73575+ case 'P':
73576+ break;
73577 case 'K':
73578 /*
73579 * %pK cannot be used in IRQ context because its test
73580@@ -1113,6 +1131,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73581 }
73582 break;
73583 }
73584+
73585+#ifdef CONFIG_GRKERNSEC_HIDESYM
73586+ /* 'P' = approved pointers to copy to userland,
73587+ as in the /proc/kallsyms case, as we make it display nothing
73588+ for non-root users, and the real contents for root users
73589+ Also ignore 'K' pointers, since we force their NULLing for non-root users
73590+ above
73591+ */
73592+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
73593+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
73594+ dump_stack();
73595+ ptr = NULL;
73596+ }
73597+#endif
73598+
73599 spec.flags |= SMALL;
73600 if (spec.field_width == -1) {
73601 spec.field_width = default_width;
73602@@ -1831,11 +1864,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
73603 typeof(type) value; \
73604 if (sizeof(type) == 8) { \
73605 args = PTR_ALIGN(args, sizeof(u32)); \
73606- *(u32 *)&value = *(u32 *)args; \
73607- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
73608+ *(u32 *)&value = *(const u32 *)args; \
73609+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
73610 } else { \
73611 args = PTR_ALIGN(args, sizeof(type)); \
73612- value = *(typeof(type) *)args; \
73613+ value = *(const typeof(type) *)args; \
73614 } \
73615 args += sizeof(type); \
73616 value; \
73617@@ -1898,7 +1931,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
73618 case FORMAT_TYPE_STR: {
73619 const char *str_arg = args;
73620 args += strlen(str_arg) + 1;
73621- str = string(str, end, (char *)str_arg, spec);
73622+ str = string(str, end, str_arg, spec);
73623 break;
73624 }
73625
73626diff --git a/localversion-grsec b/localversion-grsec
73627new file mode 100644
73628index 0000000..7cd6065
73629--- /dev/null
73630+++ b/localversion-grsec
73631@@ -0,0 +1 @@
73632+-grsec
73633diff --git a/mm/Kconfig b/mm/Kconfig
73634index a3f8ddd..f31e92e 100644
73635--- a/mm/Kconfig
73636+++ b/mm/Kconfig
73637@@ -252,10 +252,10 @@ config KSM
73638 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
73639
73640 config DEFAULT_MMAP_MIN_ADDR
73641- int "Low address space to protect from user allocation"
73642+ int "Low address space to protect from user allocation"
73643 depends on MMU
73644- default 4096
73645- help
73646+ default 65536
73647+ help
73648 This is the portion of low virtual memory which should be protected
73649 from userspace allocation. Keeping a user from writing to low pages
73650 can help reduce the impact of kernel NULL pointer bugs.
73651@@ -286,7 +286,7 @@ config MEMORY_FAILURE
73652
73653 config HWPOISON_INJECT
73654 tristate "HWPoison pages injector"
73655- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
73656+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
73657 select PROC_PAGE_MONITOR
73658
73659 config NOMMU_INITIAL_TRIM_EXCESS
73660diff --git a/mm/filemap.c b/mm/filemap.c
73661index 83efee7..3f99381 100644
73662--- a/mm/filemap.c
73663+++ b/mm/filemap.c
73664@@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
73665 struct address_space *mapping = file->f_mapping;
73666
73667 if (!mapping->a_ops->readpage)
73668- return -ENOEXEC;
73669+ return -ENODEV;
73670 file_accessed(file);
73671 vma->vm_ops = &generic_file_vm_ops;
73672 return 0;
73673@@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
73674 *pos = i_size_read(inode);
73675
73676 if (limit != RLIM_INFINITY) {
73677+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
73678 if (*pos >= limit) {
73679 send_sig(SIGXFSZ, current, 0);
73680 return -EFBIG;
73681diff --git a/mm/fremap.c b/mm/fremap.c
73682index a0aaf0e..20325c3 100644
73683--- a/mm/fremap.c
73684+++ b/mm/fremap.c
73685@@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
73686 retry:
73687 vma = find_vma(mm, start);
73688
73689+#ifdef CONFIG_PAX_SEGMEXEC
73690+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
73691+ goto out;
73692+#endif
73693+
73694 /*
73695 * Make sure the vma is shared, that it supports prefaulting,
73696 * and that the remapped range is valid and fully within
73697diff --git a/mm/highmem.c b/mm/highmem.c
73698index 09fc744..3936897 100644
73699--- a/mm/highmem.c
73700+++ b/mm/highmem.c
73701@@ -138,9 +138,10 @@ static void flush_all_zero_pkmaps(void)
73702 * So no dangers, even with speculative execution.
73703 */
73704 page = pte_page(pkmap_page_table[i]);
73705+ pax_open_kernel();
73706 pte_clear(&init_mm, (unsigned long)page_address(page),
73707 &pkmap_page_table[i]);
73708-
73709+ pax_close_kernel();
73710 set_page_address(page, NULL);
73711 need_flush = 1;
73712 }
73713@@ -199,9 +200,11 @@ start:
73714 }
73715 }
73716 vaddr = PKMAP_ADDR(last_pkmap_nr);
73717+
73718+ pax_open_kernel();
73719 set_pte_at(&init_mm, vaddr,
73720 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
73721-
73722+ pax_close_kernel();
73723 pkmap_count[last_pkmap_nr] = 1;
73724 set_page_address(page, (void *)vaddr);
73725
73726diff --git a/mm/huge_memory.c b/mm/huge_memory.c
73727index 40f17c3..c1cc011 100644
73728--- a/mm/huge_memory.c
73729+++ b/mm/huge_memory.c
73730@@ -710,7 +710,7 @@ out:
73731 * run pte_offset_map on the pmd, if an huge pmd could
73732 * materialize from under us from a different thread.
73733 */
73734- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
73735+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
73736 return VM_FAULT_OOM;
73737 /* if an huge pmd materialized from under us just retry later */
73738 if (unlikely(pmd_trans_huge(*pmd)))
73739diff --git a/mm/hugetlb.c b/mm/hugetlb.c
73740index f198aca..a19a5a5 100644
73741--- a/mm/hugetlb.c
73742+++ b/mm/hugetlb.c
73743@@ -2509,6 +2509,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
73744 return 1;
73745 }
73746
73747+#ifdef CONFIG_PAX_SEGMEXEC
73748+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
73749+{
73750+ struct mm_struct *mm = vma->vm_mm;
73751+ struct vm_area_struct *vma_m;
73752+ unsigned long address_m;
73753+ pte_t *ptep_m;
73754+
73755+ vma_m = pax_find_mirror_vma(vma);
73756+ if (!vma_m)
73757+ return;
73758+
73759+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
73760+ address_m = address + SEGMEXEC_TASK_SIZE;
73761+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
73762+ get_page(page_m);
73763+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
73764+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
73765+}
73766+#endif
73767+
73768 /*
73769 * Hugetlb_cow() should be called with page lock of the original hugepage held.
73770 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
73771@@ -2627,6 +2648,11 @@ retry_avoidcopy:
73772 make_huge_pte(vma, new_page, 1));
73773 page_remove_rmap(old_page);
73774 hugepage_add_new_anon_rmap(new_page, vma, address);
73775+
73776+#ifdef CONFIG_PAX_SEGMEXEC
73777+ pax_mirror_huge_pte(vma, address, new_page);
73778+#endif
73779+
73780 /* Make the old page be freed below */
73781 new_page = old_page;
73782 }
73783@@ -2786,6 +2812,10 @@ retry:
73784 && (vma->vm_flags & VM_SHARED)));
73785 set_huge_pte_at(mm, address, ptep, new_pte);
73786
73787+#ifdef CONFIG_PAX_SEGMEXEC
73788+ pax_mirror_huge_pte(vma, address, page);
73789+#endif
73790+
73791 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
73792 /* Optimization, do the COW without a second fault */
73793 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
73794@@ -2815,6 +2845,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
73795 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
73796 struct hstate *h = hstate_vma(vma);
73797
73798+#ifdef CONFIG_PAX_SEGMEXEC
73799+ struct vm_area_struct *vma_m;
73800+#endif
73801+
73802 address &= huge_page_mask(h);
73803
73804 ptep = huge_pte_offset(mm, address);
73805@@ -2828,6 +2862,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
73806 VM_FAULT_SET_HINDEX(hstate_index(h));
73807 }
73808
73809+#ifdef CONFIG_PAX_SEGMEXEC
73810+ vma_m = pax_find_mirror_vma(vma);
73811+ if (vma_m) {
73812+ unsigned long address_m;
73813+
73814+ if (vma->vm_start > vma_m->vm_start) {
73815+ address_m = address;
73816+ address -= SEGMEXEC_TASK_SIZE;
73817+ vma = vma_m;
73818+ h = hstate_vma(vma);
73819+ } else
73820+ address_m = address + SEGMEXEC_TASK_SIZE;
73821+
73822+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
73823+ return VM_FAULT_OOM;
73824+ address_m &= HPAGE_MASK;
73825+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
73826+ }
73827+#endif
73828+
73829 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
73830 if (!ptep)
73831 return VM_FAULT_OOM;
73832diff --git a/mm/internal.h b/mm/internal.h
73833index 3c5197d..08d0065 100644
73834--- a/mm/internal.h
73835+++ b/mm/internal.h
73836@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
73837 * in mm/page_alloc.c
73838 */
73839 extern void __free_pages_bootmem(struct page *page, unsigned int order);
73840+extern void free_compound_page(struct page *page);
73841 extern void prep_compound_page(struct page *page, unsigned long order);
73842 #ifdef CONFIG_MEMORY_FAILURE
73843 extern bool is_free_buddy_page(struct page *page);
73844diff --git a/mm/kmemleak.c b/mm/kmemleak.c
73845index a217cc5..74c9ec0 100644
73846--- a/mm/kmemleak.c
73847+++ b/mm/kmemleak.c
73848@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
73849
73850 for (i = 0; i < object->trace_len; i++) {
73851 void *ptr = (void *)object->trace[i];
73852- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
73853+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
73854 }
73855 }
73856
73857diff --git a/mm/maccess.c b/mm/maccess.c
73858index d53adf9..03a24bf 100644
73859--- a/mm/maccess.c
73860+++ b/mm/maccess.c
73861@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
73862 set_fs(KERNEL_DS);
73863 pagefault_disable();
73864 ret = __copy_from_user_inatomic(dst,
73865- (__force const void __user *)src, size);
73866+ (const void __force_user *)src, size);
73867 pagefault_enable();
73868 set_fs(old_fs);
73869
73870@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
73871
73872 set_fs(KERNEL_DS);
73873 pagefault_disable();
73874- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
73875+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
73876 pagefault_enable();
73877 set_fs(old_fs);
73878
73879diff --git a/mm/madvise.c b/mm/madvise.c
73880index 03dfa5c..b032917 100644
73881--- a/mm/madvise.c
73882+++ b/mm/madvise.c
73883@@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
73884 pgoff_t pgoff;
73885 unsigned long new_flags = vma->vm_flags;
73886
73887+#ifdef CONFIG_PAX_SEGMEXEC
73888+ struct vm_area_struct *vma_m;
73889+#endif
73890+
73891 switch (behavior) {
73892 case MADV_NORMAL:
73893 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
73894@@ -123,6 +127,13 @@ success:
73895 /*
73896 * vm_flags is protected by the mmap_sem held in write mode.
73897 */
73898+
73899+#ifdef CONFIG_PAX_SEGMEXEC
73900+ vma_m = pax_find_mirror_vma(vma);
73901+ if (vma_m)
73902+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
73903+#endif
73904+
73905 vma->vm_flags = new_flags;
73906
73907 out:
73908@@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
73909 struct vm_area_struct ** prev,
73910 unsigned long start, unsigned long end)
73911 {
73912+
73913+#ifdef CONFIG_PAX_SEGMEXEC
73914+ struct vm_area_struct *vma_m;
73915+#endif
73916+
73917 *prev = vma;
73918 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
73919 return -EINVAL;
73920@@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
73921 zap_page_range(vma, start, end - start, &details);
73922 } else
73923 zap_page_range(vma, start, end - start, NULL);
73924+
73925+#ifdef CONFIG_PAX_SEGMEXEC
73926+ vma_m = pax_find_mirror_vma(vma);
73927+ if (vma_m) {
73928+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
73929+ struct zap_details details = {
73930+ .nonlinear_vma = vma_m,
73931+ .last_index = ULONG_MAX,
73932+ };
73933+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
73934+ } else
73935+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
73936+ }
73937+#endif
73938+
73939 return 0;
73940 }
73941
73942@@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
73943 if (end < start)
73944 goto out;
73945
73946+#ifdef CONFIG_PAX_SEGMEXEC
73947+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
73948+ if (end > SEGMEXEC_TASK_SIZE)
73949+ goto out;
73950+ } else
73951+#endif
73952+
73953+ if (end > TASK_SIZE)
73954+ goto out;
73955+
73956 error = 0;
73957 if (end == start)
73958 goto out;
73959diff --git a/mm/memory-failure.c b/mm/memory-failure.c
73960index 8b20278..05dac18 100644
73961--- a/mm/memory-failure.c
73962+++ b/mm/memory-failure.c
73963@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
73964
73965 int sysctl_memory_failure_recovery __read_mostly = 1;
73966
73967-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
73968+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
73969
73970 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
73971
73972@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
73973 pfn, t->comm, t->pid);
73974 si.si_signo = SIGBUS;
73975 si.si_errno = 0;
73976- si.si_addr = (void *)addr;
73977+ si.si_addr = (void __user *)addr;
73978 #ifdef __ARCH_SI_TRAPNO
73979 si.si_trapno = trapno;
73980 #endif
73981@@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
73982 }
73983
73984 nr_pages = 1 << compound_trans_order(hpage);
73985- atomic_long_add(nr_pages, &mce_bad_pages);
73986+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
73987
73988 /*
73989 * We need/can do nothing about count=0 pages.
73990@@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
73991 if (!PageHWPoison(hpage)
73992 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
73993 || (p != hpage && TestSetPageHWPoison(hpage))) {
73994- atomic_long_sub(nr_pages, &mce_bad_pages);
73995+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
73996 return 0;
73997 }
73998 set_page_hwpoison_huge_page(hpage);
73999@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
74000 }
74001 if (hwpoison_filter(p)) {
74002 if (TestClearPageHWPoison(p))
74003- atomic_long_sub(nr_pages, &mce_bad_pages);
74004+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74005 unlock_page(hpage);
74006 put_page(hpage);
74007 return 0;
74008@@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
74009 return 0;
74010 }
74011 if (TestClearPageHWPoison(p))
74012- atomic_long_sub(nr_pages, &mce_bad_pages);
74013+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74014 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
74015 return 0;
74016 }
74017@@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
74018 */
74019 if (TestClearPageHWPoison(page)) {
74020 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
74021- atomic_long_sub(nr_pages, &mce_bad_pages);
74022+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74023 freeit = 1;
74024 if (PageHuge(page))
74025 clear_page_hwpoison_huge_page(page);
74026@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
74027 }
74028 done:
74029 if (!PageHWPoison(hpage))
74030- atomic_long_add(1 << compound_trans_order(hpage),
74031+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
74032 &mce_bad_pages);
74033 set_page_hwpoison_huge_page(hpage);
74034 dequeue_hwpoisoned_huge_page(hpage);
74035@@ -1582,7 +1582,7 @@ int soft_offline_page(struct page *page, int flags)
74036 return ret;
74037
74038 done:
74039- atomic_long_add(1, &mce_bad_pages);
74040+ atomic_long_add_unchecked(1, &mce_bad_pages);
74041 SetPageHWPoison(page);
74042 /* keep elevated page count for bad page */
74043 return ret;
74044diff --git a/mm/memory.c b/mm/memory.c
74045index f2973b2..fd020a7 100644
74046--- a/mm/memory.c
74047+++ b/mm/memory.c
74048@@ -431,6 +431,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
74049 free_pte_range(tlb, pmd, addr);
74050 } while (pmd++, addr = next, addr != end);
74051
74052+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
74053 start &= PUD_MASK;
74054 if (start < floor)
74055 return;
74056@@ -445,6 +446,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
74057 pmd = pmd_offset(pud, start);
74058 pud_clear(pud);
74059 pmd_free_tlb(tlb, pmd, start);
74060+#endif
74061+
74062 }
74063
74064 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74065@@ -464,6 +467,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74066 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
74067 } while (pud++, addr = next, addr != end);
74068
74069+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
74070 start &= PGDIR_MASK;
74071 if (start < floor)
74072 return;
74073@@ -478,6 +482,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74074 pud = pud_offset(pgd, start);
74075 pgd_clear(pgd);
74076 pud_free_tlb(tlb, pud, start);
74077+#endif
74078+
74079 }
74080
74081 /*
74082@@ -1626,12 +1632,6 @@ no_page_table:
74083 return page;
74084 }
74085
74086-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
74087-{
74088- return stack_guard_page_start(vma, addr) ||
74089- stack_guard_page_end(vma, addr+PAGE_SIZE);
74090-}
74091-
74092 /**
74093 * __get_user_pages() - pin user pages in memory
74094 * @tsk: task_struct of target task
74095@@ -1704,10 +1704,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74096 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
74097 i = 0;
74098
74099- do {
74100+ while (nr_pages) {
74101 struct vm_area_struct *vma;
74102
74103- vma = find_extend_vma(mm, start);
74104+ vma = find_vma(mm, start);
74105 if (!vma && in_gate_area(mm, start)) {
74106 unsigned long pg = start & PAGE_MASK;
74107 pgd_t *pgd;
74108@@ -1755,7 +1755,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74109 goto next_page;
74110 }
74111
74112- if (!vma ||
74113+ if (!vma || start < vma->vm_start ||
74114 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
74115 !(vm_flags & vma->vm_flags))
74116 return i ? : -EFAULT;
74117@@ -1782,11 +1782,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74118 int ret;
74119 unsigned int fault_flags = 0;
74120
74121- /* For mlock, just skip the stack guard page. */
74122- if (foll_flags & FOLL_MLOCK) {
74123- if (stack_guard_page(vma, start))
74124- goto next_page;
74125- }
74126 if (foll_flags & FOLL_WRITE)
74127 fault_flags |= FAULT_FLAG_WRITE;
74128 if (nonblocking)
74129@@ -1860,7 +1855,7 @@ next_page:
74130 start += PAGE_SIZE;
74131 nr_pages--;
74132 } while (nr_pages && start < vma->vm_end);
74133- } while (nr_pages);
74134+ }
74135 return i;
74136 }
74137 EXPORT_SYMBOL(__get_user_pages);
74138@@ -2067,6 +2062,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
74139 page_add_file_rmap(page);
74140 set_pte_at(mm, addr, pte, mk_pte(page, prot));
74141
74142+#ifdef CONFIG_PAX_SEGMEXEC
74143+ pax_mirror_file_pte(vma, addr, page, ptl);
74144+#endif
74145+
74146 retval = 0;
74147 pte_unmap_unlock(pte, ptl);
74148 return retval;
74149@@ -2111,9 +2110,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
74150 if (!page_count(page))
74151 return -EINVAL;
74152 if (!(vma->vm_flags & VM_MIXEDMAP)) {
74153+
74154+#ifdef CONFIG_PAX_SEGMEXEC
74155+ struct vm_area_struct *vma_m;
74156+#endif
74157+
74158 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
74159 BUG_ON(vma->vm_flags & VM_PFNMAP);
74160 vma->vm_flags |= VM_MIXEDMAP;
74161+
74162+#ifdef CONFIG_PAX_SEGMEXEC
74163+ vma_m = pax_find_mirror_vma(vma);
74164+ if (vma_m)
74165+ vma_m->vm_flags |= VM_MIXEDMAP;
74166+#endif
74167+
74168 }
74169 return insert_page(vma, addr, page, vma->vm_page_prot);
74170 }
74171@@ -2196,6 +2207,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
74172 unsigned long pfn)
74173 {
74174 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
74175+ BUG_ON(vma->vm_mirror);
74176
74177 if (addr < vma->vm_start || addr >= vma->vm_end)
74178 return -EFAULT;
74179@@ -2396,7 +2408,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
74180
74181 BUG_ON(pud_huge(*pud));
74182
74183- pmd = pmd_alloc(mm, pud, addr);
74184+ pmd = (mm == &init_mm) ?
74185+ pmd_alloc_kernel(mm, pud, addr) :
74186+ pmd_alloc(mm, pud, addr);
74187 if (!pmd)
74188 return -ENOMEM;
74189 do {
74190@@ -2416,7 +2430,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
74191 unsigned long next;
74192 int err;
74193
74194- pud = pud_alloc(mm, pgd, addr);
74195+ pud = (mm == &init_mm) ?
74196+ pud_alloc_kernel(mm, pgd, addr) :
74197+ pud_alloc(mm, pgd, addr);
74198 if (!pud)
74199 return -ENOMEM;
74200 do {
74201@@ -2504,6 +2520,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
74202 copy_user_highpage(dst, src, va, vma);
74203 }
74204
74205+#ifdef CONFIG_PAX_SEGMEXEC
74206+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
74207+{
74208+ struct mm_struct *mm = vma->vm_mm;
74209+ spinlock_t *ptl;
74210+ pte_t *pte, entry;
74211+
74212+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
74213+ entry = *pte;
74214+ if (!pte_present(entry)) {
74215+ if (!pte_none(entry)) {
74216+ BUG_ON(pte_file(entry));
74217+ free_swap_and_cache(pte_to_swp_entry(entry));
74218+ pte_clear_not_present_full(mm, address, pte, 0);
74219+ }
74220+ } else {
74221+ struct page *page;
74222+
74223+ flush_cache_page(vma, address, pte_pfn(entry));
74224+ entry = ptep_clear_flush(vma, address, pte);
74225+ BUG_ON(pte_dirty(entry));
74226+ page = vm_normal_page(vma, address, entry);
74227+ if (page) {
74228+ update_hiwater_rss(mm);
74229+ if (PageAnon(page))
74230+ dec_mm_counter_fast(mm, MM_ANONPAGES);
74231+ else
74232+ dec_mm_counter_fast(mm, MM_FILEPAGES);
74233+ page_remove_rmap(page);
74234+ page_cache_release(page);
74235+ }
74236+ }
74237+ pte_unmap_unlock(pte, ptl);
74238+}
74239+
74240+/* PaX: if vma is mirrored, synchronize the mirror's PTE
74241+ *
74242+ * the ptl of the lower mapped page is held on entry and is not released on exit
74243+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
74244+ */
74245+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
74246+{
74247+ struct mm_struct *mm = vma->vm_mm;
74248+ unsigned long address_m;
74249+ spinlock_t *ptl_m;
74250+ struct vm_area_struct *vma_m;
74251+ pmd_t *pmd_m;
74252+ pte_t *pte_m, entry_m;
74253+
74254+ BUG_ON(!page_m || !PageAnon(page_m));
74255+
74256+ vma_m = pax_find_mirror_vma(vma);
74257+ if (!vma_m)
74258+ return;
74259+
74260+ BUG_ON(!PageLocked(page_m));
74261+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74262+ address_m = address + SEGMEXEC_TASK_SIZE;
74263+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74264+ pte_m = pte_offset_map(pmd_m, address_m);
74265+ ptl_m = pte_lockptr(mm, pmd_m);
74266+ if (ptl != ptl_m) {
74267+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74268+ if (!pte_none(*pte_m))
74269+ goto out;
74270+ }
74271+
74272+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
74273+ page_cache_get(page_m);
74274+ page_add_anon_rmap(page_m, vma_m, address_m);
74275+ inc_mm_counter_fast(mm, MM_ANONPAGES);
74276+ set_pte_at(mm, address_m, pte_m, entry_m);
74277+ update_mmu_cache(vma_m, address_m, entry_m);
74278+out:
74279+ if (ptl != ptl_m)
74280+ spin_unlock(ptl_m);
74281+ pte_unmap(pte_m);
74282+ unlock_page(page_m);
74283+}
74284+
74285+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
74286+{
74287+ struct mm_struct *mm = vma->vm_mm;
74288+ unsigned long address_m;
74289+ spinlock_t *ptl_m;
74290+ struct vm_area_struct *vma_m;
74291+ pmd_t *pmd_m;
74292+ pte_t *pte_m, entry_m;
74293+
74294+ BUG_ON(!page_m || PageAnon(page_m));
74295+
74296+ vma_m = pax_find_mirror_vma(vma);
74297+ if (!vma_m)
74298+ return;
74299+
74300+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74301+ address_m = address + SEGMEXEC_TASK_SIZE;
74302+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74303+ pte_m = pte_offset_map(pmd_m, address_m);
74304+ ptl_m = pte_lockptr(mm, pmd_m);
74305+ if (ptl != ptl_m) {
74306+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74307+ if (!pte_none(*pte_m))
74308+ goto out;
74309+ }
74310+
74311+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
74312+ page_cache_get(page_m);
74313+ page_add_file_rmap(page_m);
74314+ inc_mm_counter_fast(mm, MM_FILEPAGES);
74315+ set_pte_at(mm, address_m, pte_m, entry_m);
74316+ update_mmu_cache(vma_m, address_m, entry_m);
74317+out:
74318+ if (ptl != ptl_m)
74319+ spin_unlock(ptl_m);
74320+ pte_unmap(pte_m);
74321+}
74322+
74323+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
74324+{
74325+ struct mm_struct *mm = vma->vm_mm;
74326+ unsigned long address_m;
74327+ spinlock_t *ptl_m;
74328+ struct vm_area_struct *vma_m;
74329+ pmd_t *pmd_m;
74330+ pte_t *pte_m, entry_m;
74331+
74332+ vma_m = pax_find_mirror_vma(vma);
74333+ if (!vma_m)
74334+ return;
74335+
74336+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74337+ address_m = address + SEGMEXEC_TASK_SIZE;
74338+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74339+ pte_m = pte_offset_map(pmd_m, address_m);
74340+ ptl_m = pte_lockptr(mm, pmd_m);
74341+ if (ptl != ptl_m) {
74342+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74343+ if (!pte_none(*pte_m))
74344+ goto out;
74345+ }
74346+
74347+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
74348+ set_pte_at(mm, address_m, pte_m, entry_m);
74349+out:
74350+ if (ptl != ptl_m)
74351+ spin_unlock(ptl_m);
74352+ pte_unmap(pte_m);
74353+}
74354+
74355+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
74356+{
74357+ struct page *page_m;
74358+ pte_t entry;
74359+
74360+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
74361+ goto out;
74362+
74363+ entry = *pte;
74364+ page_m = vm_normal_page(vma, address, entry);
74365+ if (!page_m)
74366+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
74367+ else if (PageAnon(page_m)) {
74368+ if (pax_find_mirror_vma(vma)) {
74369+ pte_unmap_unlock(pte, ptl);
74370+ lock_page(page_m);
74371+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
74372+ if (pte_same(entry, *pte))
74373+ pax_mirror_anon_pte(vma, address, page_m, ptl);
74374+ else
74375+ unlock_page(page_m);
74376+ }
74377+ } else
74378+ pax_mirror_file_pte(vma, address, page_m, ptl);
74379+
74380+out:
74381+ pte_unmap_unlock(pte, ptl);
74382+}
74383+#endif
74384+
74385 /*
74386 * This routine handles present pages, when users try to write
74387 * to a shared page. It is done by copying the page to a new address
74388@@ -2720,6 +2916,12 @@ gotten:
74389 */
74390 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
74391 if (likely(pte_same(*page_table, orig_pte))) {
74392+
74393+#ifdef CONFIG_PAX_SEGMEXEC
74394+ if (pax_find_mirror_vma(vma))
74395+ BUG_ON(!trylock_page(new_page));
74396+#endif
74397+
74398 if (old_page) {
74399 if (!PageAnon(old_page)) {
74400 dec_mm_counter_fast(mm, MM_FILEPAGES);
74401@@ -2771,6 +2973,10 @@ gotten:
74402 page_remove_rmap(old_page);
74403 }
74404
74405+#ifdef CONFIG_PAX_SEGMEXEC
74406+ pax_mirror_anon_pte(vma, address, new_page, ptl);
74407+#endif
74408+
74409 /* Free the old page.. */
74410 new_page = old_page;
74411 ret |= VM_FAULT_WRITE;
74412@@ -3051,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
74413 swap_free(entry);
74414 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
74415 try_to_free_swap(page);
74416+
74417+#ifdef CONFIG_PAX_SEGMEXEC
74418+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
74419+#endif
74420+
74421 unlock_page(page);
74422 if (swapcache) {
74423 /*
74424@@ -3074,6 +3285,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
74425
74426 /* No need to invalidate - it was non-present before */
74427 update_mmu_cache(vma, address, page_table);
74428+
74429+#ifdef CONFIG_PAX_SEGMEXEC
74430+ pax_mirror_anon_pte(vma, address, page, ptl);
74431+#endif
74432+
74433 unlock:
74434 pte_unmap_unlock(page_table, ptl);
74435 out:
74436@@ -3093,40 +3309,6 @@ out_release:
74437 }
74438
74439 /*
74440- * This is like a special single-page "expand_{down|up}wards()",
74441- * except we must first make sure that 'address{-|+}PAGE_SIZE'
74442- * doesn't hit another vma.
74443- */
74444-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
74445-{
74446- address &= PAGE_MASK;
74447- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
74448- struct vm_area_struct *prev = vma->vm_prev;
74449-
74450- /*
74451- * Is there a mapping abutting this one below?
74452- *
74453- * That's only ok if it's the same stack mapping
74454- * that has gotten split..
74455- */
74456- if (prev && prev->vm_end == address)
74457- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
74458-
74459- expand_downwards(vma, address - PAGE_SIZE);
74460- }
74461- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
74462- struct vm_area_struct *next = vma->vm_next;
74463-
74464- /* As VM_GROWSDOWN but s/below/above/ */
74465- if (next && next->vm_start == address + PAGE_SIZE)
74466- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
74467-
74468- expand_upwards(vma, address + PAGE_SIZE);
74469- }
74470- return 0;
74471-}
74472-
74473-/*
74474 * We enter with non-exclusive mmap_sem (to exclude vma changes,
74475 * but allow concurrent faults), and pte mapped but not yet locked.
74476 * We return with mmap_sem still held, but pte unmapped and unlocked.
74477@@ -3135,27 +3317,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
74478 unsigned long address, pte_t *page_table, pmd_t *pmd,
74479 unsigned int flags)
74480 {
74481- struct page *page;
74482+ struct page *page = NULL;
74483 spinlock_t *ptl;
74484 pte_t entry;
74485
74486- pte_unmap(page_table);
74487-
74488- /* Check if we need to add a guard page to the stack */
74489- if (check_stack_guard_page(vma, address) < 0)
74490- return VM_FAULT_SIGBUS;
74491-
74492- /* Use the zero-page for reads */
74493 if (!(flags & FAULT_FLAG_WRITE)) {
74494 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
74495 vma->vm_page_prot));
74496- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
74497+ ptl = pte_lockptr(mm, pmd);
74498+ spin_lock(ptl);
74499 if (!pte_none(*page_table))
74500 goto unlock;
74501 goto setpte;
74502 }
74503
74504 /* Allocate our own private page. */
74505+ pte_unmap(page_table);
74506+
74507 if (unlikely(anon_vma_prepare(vma)))
74508 goto oom;
74509 page = alloc_zeroed_user_highpage_movable(vma, address);
74510@@ -3174,6 +3352,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
74511 if (!pte_none(*page_table))
74512 goto release;
74513
74514+#ifdef CONFIG_PAX_SEGMEXEC
74515+ if (pax_find_mirror_vma(vma))
74516+ BUG_ON(!trylock_page(page));
74517+#endif
74518+
74519 inc_mm_counter_fast(mm, MM_ANONPAGES);
74520 page_add_new_anon_rmap(page, vma, address);
74521 setpte:
74522@@ -3181,6 +3364,12 @@ setpte:
74523
74524 /* No need to invalidate - it was non-present before */
74525 update_mmu_cache(vma, address, page_table);
74526+
74527+#ifdef CONFIG_PAX_SEGMEXEC
74528+ if (page)
74529+ pax_mirror_anon_pte(vma, address, page, ptl);
74530+#endif
74531+
74532 unlock:
74533 pte_unmap_unlock(page_table, ptl);
74534 return 0;
74535@@ -3324,6 +3513,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74536 */
74537 /* Only go through if we didn't race with anybody else... */
74538 if (likely(pte_same(*page_table, orig_pte))) {
74539+
74540+#ifdef CONFIG_PAX_SEGMEXEC
74541+ if (anon && pax_find_mirror_vma(vma))
74542+ BUG_ON(!trylock_page(page));
74543+#endif
74544+
74545 flush_icache_page(vma, page);
74546 entry = mk_pte(page, vma->vm_page_prot);
74547 if (flags & FAULT_FLAG_WRITE)
74548@@ -3343,6 +3538,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74549
74550 /* no need to invalidate: a not-present page won't be cached */
74551 update_mmu_cache(vma, address, page_table);
74552+
74553+#ifdef CONFIG_PAX_SEGMEXEC
74554+ if (anon)
74555+ pax_mirror_anon_pte(vma, address, page, ptl);
74556+ else
74557+ pax_mirror_file_pte(vma, address, page, ptl);
74558+#endif
74559+
74560 } else {
74561 if (cow_page)
74562 mem_cgroup_uncharge_page(cow_page);
74563@@ -3497,6 +3700,12 @@ int handle_pte_fault(struct mm_struct *mm,
74564 if (flags & FAULT_FLAG_WRITE)
74565 flush_tlb_fix_spurious_fault(vma, address);
74566 }
74567+
74568+#ifdef CONFIG_PAX_SEGMEXEC
74569+ pax_mirror_pte(vma, address, pte, pmd, ptl);
74570+ return 0;
74571+#endif
74572+
74573 unlock:
74574 pte_unmap_unlock(pte, ptl);
74575 return 0;
74576@@ -3513,6 +3722,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74577 pmd_t *pmd;
74578 pte_t *pte;
74579
74580+#ifdef CONFIG_PAX_SEGMEXEC
74581+ struct vm_area_struct *vma_m;
74582+#endif
74583+
74584 __set_current_state(TASK_RUNNING);
74585
74586 count_vm_event(PGFAULT);
74587@@ -3524,6 +3737,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74588 if (unlikely(is_vm_hugetlb_page(vma)))
74589 return hugetlb_fault(mm, vma, address, flags);
74590
74591+#ifdef CONFIG_PAX_SEGMEXEC
74592+ vma_m = pax_find_mirror_vma(vma);
74593+ if (vma_m) {
74594+ unsigned long address_m;
74595+ pgd_t *pgd_m;
74596+ pud_t *pud_m;
74597+ pmd_t *pmd_m;
74598+
74599+ if (vma->vm_start > vma_m->vm_start) {
74600+ address_m = address;
74601+ address -= SEGMEXEC_TASK_SIZE;
74602+ vma = vma_m;
74603+ } else
74604+ address_m = address + SEGMEXEC_TASK_SIZE;
74605+
74606+ pgd_m = pgd_offset(mm, address_m);
74607+ pud_m = pud_alloc(mm, pgd_m, address_m);
74608+ if (!pud_m)
74609+ return VM_FAULT_OOM;
74610+ pmd_m = pmd_alloc(mm, pud_m, address_m);
74611+ if (!pmd_m)
74612+ return VM_FAULT_OOM;
74613+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
74614+ return VM_FAULT_OOM;
74615+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
74616+ }
74617+#endif
74618+
74619 retry:
74620 pgd = pgd_offset(mm, address);
74621 pud = pud_alloc(mm, pgd, address);
74622@@ -3565,7 +3806,7 @@ retry:
74623 * run pte_offset_map on the pmd, if an huge pmd could
74624 * materialize from under us from a different thread.
74625 */
74626- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
74627+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
74628 return VM_FAULT_OOM;
74629 /* if an huge pmd materialized from under us just retry later */
74630 if (unlikely(pmd_trans_huge(*pmd)))
74631@@ -3602,6 +3843,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
74632 spin_unlock(&mm->page_table_lock);
74633 return 0;
74634 }
74635+
74636+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
74637+{
74638+ pud_t *new = pud_alloc_one(mm, address);
74639+ if (!new)
74640+ return -ENOMEM;
74641+
74642+ smp_wmb(); /* See comment in __pte_alloc */
74643+
74644+ spin_lock(&mm->page_table_lock);
74645+ if (pgd_present(*pgd)) /* Another has populated it */
74646+ pud_free(mm, new);
74647+ else
74648+ pgd_populate_kernel(mm, pgd, new);
74649+ spin_unlock(&mm->page_table_lock);
74650+ return 0;
74651+}
74652 #endif /* __PAGETABLE_PUD_FOLDED */
74653
74654 #ifndef __PAGETABLE_PMD_FOLDED
74655@@ -3632,6 +3890,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
74656 spin_unlock(&mm->page_table_lock);
74657 return 0;
74658 }
74659+
74660+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
74661+{
74662+ pmd_t *new = pmd_alloc_one(mm, address);
74663+ if (!new)
74664+ return -ENOMEM;
74665+
74666+ smp_wmb(); /* See comment in __pte_alloc */
74667+
74668+ spin_lock(&mm->page_table_lock);
74669+#ifndef __ARCH_HAS_4LEVEL_HACK
74670+ if (pud_present(*pud)) /* Another has populated it */
74671+ pmd_free(mm, new);
74672+ else
74673+ pud_populate_kernel(mm, pud, new);
74674+#else
74675+ if (pgd_present(*pud)) /* Another has populated it */
74676+ pmd_free(mm, new);
74677+ else
74678+ pgd_populate_kernel(mm, pud, new);
74679+#endif /* __ARCH_HAS_4LEVEL_HACK */
74680+ spin_unlock(&mm->page_table_lock);
74681+ return 0;
74682+}
74683 #endif /* __PAGETABLE_PMD_FOLDED */
74684
74685 int make_pages_present(unsigned long addr, unsigned long end)
74686@@ -3669,7 +3951,7 @@ static int __init gate_vma_init(void)
74687 gate_vma.vm_start = FIXADDR_USER_START;
74688 gate_vma.vm_end = FIXADDR_USER_END;
74689 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
74690- gate_vma.vm_page_prot = __P101;
74691+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
74692
74693 return 0;
74694 }
74695diff --git a/mm/mempolicy.c b/mm/mempolicy.c
74696index 002c281..9429765 100644
74697--- a/mm/mempolicy.c
74698+++ b/mm/mempolicy.c
74699@@ -655,6 +655,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
74700 unsigned long vmstart;
74701 unsigned long vmend;
74702
74703+#ifdef CONFIG_PAX_SEGMEXEC
74704+ struct vm_area_struct *vma_m;
74705+#endif
74706+
74707 vma = find_vma(mm, start);
74708 if (!vma || vma->vm_start > start)
74709 return -EFAULT;
74710@@ -691,9 +695,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
74711 if (err)
74712 goto out;
74713 }
74714+
74715 err = vma_replace_policy(vma, new_pol);
74716 if (err)
74717 goto out;
74718+
74719+#ifdef CONFIG_PAX_SEGMEXEC
74720+ vma_m = pax_find_mirror_vma(vma);
74721+ if (vma_m) {
74722+ err = vma_replace_policy(vma_m, new_pol);
74723+ if (err)
74724+ goto out;
74725+ }
74726+#endif
74727+
74728 }
74729
74730 out:
74731@@ -1150,6 +1165,17 @@ static long do_mbind(unsigned long start, unsigned long len,
74732
74733 if (end < start)
74734 return -EINVAL;
74735+
74736+#ifdef CONFIG_PAX_SEGMEXEC
74737+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
74738+ if (end > SEGMEXEC_TASK_SIZE)
74739+ return -EINVAL;
74740+ } else
74741+#endif
74742+
74743+ if (end > TASK_SIZE)
74744+ return -EINVAL;
74745+
74746 if (end == start)
74747 return 0;
74748
74749@@ -1373,8 +1399,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
74750 */
74751 tcred = __task_cred(task);
74752 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
74753- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
74754- !capable(CAP_SYS_NICE)) {
74755+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
74756 rcu_read_unlock();
74757 err = -EPERM;
74758 goto out_put;
74759@@ -1405,6 +1430,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
74760 goto out;
74761 }
74762
74763+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74764+ if (mm != current->mm &&
74765+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
74766+ mmput(mm);
74767+ err = -EPERM;
74768+ goto out;
74769+ }
74770+#endif
74771+
74772 err = do_migrate_pages(mm, old, new,
74773 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
74774
74775diff --git a/mm/migrate.c b/mm/migrate.c
74776index 77ed2d7..317d528 100644
74777--- a/mm/migrate.c
74778+++ b/mm/migrate.c
74779@@ -1350,8 +1350,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
74780 */
74781 tcred = __task_cred(task);
74782 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
74783- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
74784- !capable(CAP_SYS_NICE)) {
74785+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
74786 rcu_read_unlock();
74787 err = -EPERM;
74788 goto out;
74789diff --git a/mm/mlock.c b/mm/mlock.c
74790index f0b9ce5..da8d069 100644
74791--- a/mm/mlock.c
74792+++ b/mm/mlock.c
74793@@ -13,6 +13,7 @@
74794 #include <linux/pagemap.h>
74795 #include <linux/mempolicy.h>
74796 #include <linux/syscalls.h>
74797+#include <linux/security.h>
74798 #include <linux/sched.h>
74799 #include <linux/export.h>
74800 #include <linux/rmap.h>
74801@@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
74802 {
74803 unsigned long nstart, end, tmp;
74804 struct vm_area_struct * vma, * prev;
74805- int error;
74806+ int error = 0;
74807
74808 VM_BUG_ON(start & ~PAGE_MASK);
74809 VM_BUG_ON(len != PAGE_ALIGN(len));
74810@@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
74811 return -EINVAL;
74812 if (end == start)
74813 return 0;
74814+ if (end > TASK_SIZE)
74815+ return -EINVAL;
74816+
74817 vma = find_vma(current->mm, start);
74818 if (!vma || vma->vm_start > start)
74819 return -ENOMEM;
74820@@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
74821 for (nstart = start ; ; ) {
74822 vm_flags_t newflags;
74823
74824+#ifdef CONFIG_PAX_SEGMEXEC
74825+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
74826+ break;
74827+#endif
74828+
74829 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
74830
74831 newflags = vma->vm_flags | VM_LOCKED;
74832@@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
74833 lock_limit >>= PAGE_SHIFT;
74834
74835 /* check against resource limits */
74836+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
74837 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
74838 error = do_mlock(start, len, 1);
74839 up_write(&current->mm->mmap_sem);
74840@@ -517,17 +527,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
74841 static int do_mlockall(int flags)
74842 {
74843 struct vm_area_struct * vma, * prev = NULL;
74844- unsigned int def_flags = 0;
74845
74846 if (flags & MCL_FUTURE)
74847- def_flags = VM_LOCKED;
74848- current->mm->def_flags = def_flags;
74849+ current->mm->def_flags |= VM_LOCKED;
74850+ else
74851+ current->mm->def_flags &= ~VM_LOCKED;
74852 if (flags == MCL_FUTURE)
74853 goto out;
74854
74855 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
74856 vm_flags_t newflags;
74857
74858+#ifdef CONFIG_PAX_SEGMEXEC
74859+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
74860+ break;
74861+#endif
74862+
74863+ BUG_ON(vma->vm_end > TASK_SIZE);
74864 newflags = vma->vm_flags | VM_LOCKED;
74865 if (!(flags & MCL_CURRENT))
74866 newflags &= ~VM_LOCKED;
74867@@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
74868 lock_limit >>= PAGE_SHIFT;
74869
74870 ret = -ENOMEM;
74871+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
74872 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
74873 capable(CAP_IPC_LOCK))
74874 ret = do_mlockall(flags);
74875diff --git a/mm/mmap.c b/mm/mmap.c
74876index 9a796c4..21f8e50 100644
74877--- a/mm/mmap.c
74878+++ b/mm/mmap.c
74879@@ -31,6 +31,7 @@
74880 #include <linux/audit.h>
74881 #include <linux/khugepaged.h>
74882 #include <linux/uprobes.h>
74883+#include <linux/random.h>
74884
74885 #include <asm/uaccess.h>
74886 #include <asm/cacheflush.h>
74887@@ -47,6 +48,16 @@
74888 #define arch_rebalance_pgtables(addr, len) (addr)
74889 #endif
74890
74891+static inline void verify_mm_writelocked(struct mm_struct *mm)
74892+{
74893+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
74894+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
74895+ up_read(&mm->mmap_sem);
74896+ BUG();
74897+ }
74898+#endif
74899+}
74900+
74901 static void unmap_region(struct mm_struct *mm,
74902 struct vm_area_struct *vma, struct vm_area_struct *prev,
74903 unsigned long start, unsigned long end);
74904@@ -66,22 +77,32 @@ static void unmap_region(struct mm_struct *mm,
74905 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
74906 *
74907 */
74908-pgprot_t protection_map[16] = {
74909+pgprot_t protection_map[16] __read_only = {
74910 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
74911 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
74912 };
74913
74914-pgprot_t vm_get_page_prot(unsigned long vm_flags)
74915+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
74916 {
74917- return __pgprot(pgprot_val(protection_map[vm_flags &
74918+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
74919 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
74920 pgprot_val(arch_vm_get_page_prot(vm_flags)));
74921+
74922+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
74923+ if (!(__supported_pte_mask & _PAGE_NX) &&
74924+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
74925+ (vm_flags & (VM_READ | VM_WRITE)))
74926+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
74927+#endif
74928+
74929+ return prot;
74930 }
74931 EXPORT_SYMBOL(vm_get_page_prot);
74932
74933 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
74934 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
74935 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
74936+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
74937 /*
74938 * Make sure vm_committed_as in one cacheline and not cacheline shared with
74939 * other variables. It can be updated by several CPUs frequently.
74940@@ -223,6 +244,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
74941 struct vm_area_struct *next = vma->vm_next;
74942
74943 might_sleep();
74944+ BUG_ON(vma->vm_mirror);
74945 if (vma->vm_ops && vma->vm_ops->close)
74946 vma->vm_ops->close(vma);
74947 if (vma->vm_file)
74948@@ -266,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
74949 * not page aligned -Ram Gupta
74950 */
74951 rlim = rlimit(RLIMIT_DATA);
74952+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
74953 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
74954 (mm->end_data - mm->start_data) > rlim)
74955 goto out;
74956@@ -736,6 +759,12 @@ static int
74957 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
74958 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
74959 {
74960+
74961+#ifdef CONFIG_PAX_SEGMEXEC
74962+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
74963+ return 0;
74964+#endif
74965+
74966 if (is_mergeable_vma(vma, file, vm_flags) &&
74967 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
74968 if (vma->vm_pgoff == vm_pgoff)
74969@@ -755,6 +784,12 @@ static int
74970 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
74971 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
74972 {
74973+
74974+#ifdef CONFIG_PAX_SEGMEXEC
74975+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
74976+ return 0;
74977+#endif
74978+
74979 if (is_mergeable_vma(vma, file, vm_flags) &&
74980 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
74981 pgoff_t vm_pglen;
74982@@ -797,13 +832,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
74983 struct vm_area_struct *vma_merge(struct mm_struct *mm,
74984 struct vm_area_struct *prev, unsigned long addr,
74985 unsigned long end, unsigned long vm_flags,
74986- struct anon_vma *anon_vma, struct file *file,
74987+ struct anon_vma *anon_vma, struct file *file,
74988 pgoff_t pgoff, struct mempolicy *policy)
74989 {
74990 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
74991 struct vm_area_struct *area, *next;
74992 int err;
74993
74994+#ifdef CONFIG_PAX_SEGMEXEC
74995+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
74996+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
74997+
74998+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
74999+#endif
75000+
75001 /*
75002 * We later require that vma->vm_flags == vm_flags,
75003 * so this tests vma->vm_flags & VM_SPECIAL, too.
75004@@ -819,6 +861,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75005 if (next && next->vm_end == end) /* cases 6, 7, 8 */
75006 next = next->vm_next;
75007
75008+#ifdef CONFIG_PAX_SEGMEXEC
75009+ if (prev)
75010+ prev_m = pax_find_mirror_vma(prev);
75011+ if (area)
75012+ area_m = pax_find_mirror_vma(area);
75013+ if (next)
75014+ next_m = pax_find_mirror_vma(next);
75015+#endif
75016+
75017 /*
75018 * Can it merge with the predecessor?
75019 */
75020@@ -838,9 +889,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75021 /* cases 1, 6 */
75022 err = vma_adjust(prev, prev->vm_start,
75023 next->vm_end, prev->vm_pgoff, NULL);
75024- } else /* cases 2, 5, 7 */
75025+
75026+#ifdef CONFIG_PAX_SEGMEXEC
75027+ if (!err && prev_m)
75028+ err = vma_adjust(prev_m, prev_m->vm_start,
75029+ next_m->vm_end, prev_m->vm_pgoff, NULL);
75030+#endif
75031+
75032+ } else { /* cases 2, 5, 7 */
75033 err = vma_adjust(prev, prev->vm_start,
75034 end, prev->vm_pgoff, NULL);
75035+
75036+#ifdef CONFIG_PAX_SEGMEXEC
75037+ if (!err && prev_m)
75038+ err = vma_adjust(prev_m, prev_m->vm_start,
75039+ end_m, prev_m->vm_pgoff, NULL);
75040+#endif
75041+
75042+ }
75043 if (err)
75044 return NULL;
75045 khugepaged_enter_vma_merge(prev);
75046@@ -854,12 +920,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75047 mpol_equal(policy, vma_policy(next)) &&
75048 can_vma_merge_before(next, vm_flags,
75049 anon_vma, file, pgoff+pglen)) {
75050- if (prev && addr < prev->vm_end) /* case 4 */
75051+ if (prev && addr < prev->vm_end) { /* case 4 */
75052 err = vma_adjust(prev, prev->vm_start,
75053 addr, prev->vm_pgoff, NULL);
75054- else /* cases 3, 8 */
75055+
75056+#ifdef CONFIG_PAX_SEGMEXEC
75057+ if (!err && prev_m)
75058+ err = vma_adjust(prev_m, prev_m->vm_start,
75059+ addr_m, prev_m->vm_pgoff, NULL);
75060+#endif
75061+
75062+ } else { /* cases 3, 8 */
75063 err = vma_adjust(area, addr, next->vm_end,
75064 next->vm_pgoff - pglen, NULL);
75065+
75066+#ifdef CONFIG_PAX_SEGMEXEC
75067+ if (!err && area_m)
75068+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
75069+ next_m->vm_pgoff - pglen, NULL);
75070+#endif
75071+
75072+ }
75073 if (err)
75074 return NULL;
75075 khugepaged_enter_vma_merge(area);
75076@@ -968,16 +1049,13 @@ none:
75077 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
75078 struct file *file, long pages)
75079 {
75080- const unsigned long stack_flags
75081- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
75082-
75083 mm->total_vm += pages;
75084
75085 if (file) {
75086 mm->shared_vm += pages;
75087 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
75088 mm->exec_vm += pages;
75089- } else if (flags & stack_flags)
75090+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
75091 mm->stack_vm += pages;
75092 }
75093 #endif /* CONFIG_PROC_FS */
75094@@ -1013,7 +1091,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75095 * (the exception is when the underlying filesystem is noexec
75096 * mounted, in which case we dont add PROT_EXEC.)
75097 */
75098- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
75099+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
75100 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
75101 prot |= PROT_EXEC;
75102
75103@@ -1039,7 +1117,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75104 /* Obtain the address to map to. we verify (or select) it and ensure
75105 * that it represents a valid section of the address space.
75106 */
75107- addr = get_unmapped_area(file, addr, len, pgoff, flags);
75108+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
75109 if (addr & ~PAGE_MASK)
75110 return addr;
75111
75112@@ -1050,6 +1128,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75113 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
75114 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
75115
75116+#ifdef CONFIG_PAX_MPROTECT
75117+ if (mm->pax_flags & MF_PAX_MPROTECT) {
75118+#ifndef CONFIG_PAX_MPROTECT_COMPAT
75119+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
75120+ gr_log_rwxmmap(file);
75121+
75122+#ifdef CONFIG_PAX_EMUPLT
75123+ vm_flags &= ~VM_EXEC;
75124+#else
75125+ return -EPERM;
75126+#endif
75127+
75128+ }
75129+
75130+ if (!(vm_flags & VM_EXEC))
75131+ vm_flags &= ~VM_MAYEXEC;
75132+#else
75133+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
75134+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
75135+#endif
75136+ else
75137+ vm_flags &= ~VM_MAYWRITE;
75138+ }
75139+#endif
75140+
75141+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
75142+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
75143+ vm_flags &= ~VM_PAGEEXEC;
75144+#endif
75145+
75146 if (flags & MAP_LOCKED)
75147 if (!can_do_mlock())
75148 return -EPERM;
75149@@ -1061,6 +1169,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75150 locked += mm->locked_vm;
75151 lock_limit = rlimit(RLIMIT_MEMLOCK);
75152 lock_limit >>= PAGE_SHIFT;
75153+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
75154 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
75155 return -EAGAIN;
75156 }
75157@@ -1127,6 +1236,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75158 }
75159 }
75160
75161+ if (!gr_acl_handle_mmap(file, prot))
75162+ return -EACCES;
75163+
75164 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
75165 }
75166
75167@@ -1203,7 +1315,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
75168 vm_flags_t vm_flags = vma->vm_flags;
75169
75170 /* If it was private or non-writable, the write bit is already clear */
75171- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
75172+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
75173 return 0;
75174
75175 /* The backer wishes to know when pages are first written to? */
75176@@ -1252,13 +1364,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
75177 unsigned long charged = 0;
75178 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
75179
75180+#ifdef CONFIG_PAX_SEGMEXEC
75181+ struct vm_area_struct *vma_m = NULL;
75182+#endif
75183+
75184+ /*
75185+ * mm->mmap_sem is required to protect against another thread
75186+ * changing the mappings in case we sleep.
75187+ */
75188+ verify_mm_writelocked(mm);
75189+
75190 /* Clear old maps */
75191 error = -ENOMEM;
75192-munmap_back:
75193 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
75194 if (do_munmap(mm, addr, len))
75195 return -ENOMEM;
75196- goto munmap_back;
75197+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
75198 }
75199
75200 /* Check against address space limit. */
75201@@ -1307,6 +1428,16 @@ munmap_back:
75202 goto unacct_error;
75203 }
75204
75205+#ifdef CONFIG_PAX_SEGMEXEC
75206+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
75207+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
75208+ if (!vma_m) {
75209+ error = -ENOMEM;
75210+ goto free_vma;
75211+ }
75212+ }
75213+#endif
75214+
75215 vma->vm_mm = mm;
75216 vma->vm_start = addr;
75217 vma->vm_end = addr + len;
75218@@ -1331,6 +1462,13 @@ munmap_back:
75219 if (error)
75220 goto unmap_and_free_vma;
75221
75222+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
75223+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
75224+ vma->vm_flags |= VM_PAGEEXEC;
75225+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
75226+ }
75227+#endif
75228+
75229 /* Can addr have changed??
75230 *
75231 * Answer: Yes, several device drivers can do it in their
75232@@ -1365,6 +1503,11 @@ munmap_back:
75233 vma_link(mm, vma, prev, rb_link, rb_parent);
75234 file = vma->vm_file;
75235
75236+#ifdef CONFIG_PAX_SEGMEXEC
75237+ if (vma_m)
75238+ BUG_ON(pax_mirror_vma(vma_m, vma));
75239+#endif
75240+
75241 /* Once vma denies write, undo our temporary denial count */
75242 if (correct_wcount)
75243 atomic_inc(&inode->i_writecount);
75244@@ -1372,6 +1515,7 @@ out:
75245 perf_event_mmap(vma);
75246
75247 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
75248+ track_exec_limit(mm, addr, addr + len, vm_flags);
75249 if (vm_flags & VM_LOCKED) {
75250 if (!mlock_vma_pages_range(vma, addr, addr + len))
75251 mm->locked_vm += (len >> PAGE_SHIFT);
75252@@ -1393,6 +1537,12 @@ unmap_and_free_vma:
75253 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
75254 charged = 0;
75255 free_vma:
75256+
75257+#ifdef CONFIG_PAX_SEGMEXEC
75258+ if (vma_m)
75259+ kmem_cache_free(vm_area_cachep, vma_m);
75260+#endif
75261+
75262 kmem_cache_free(vm_area_cachep, vma);
75263 unacct_error:
75264 if (charged)
75265@@ -1400,6 +1550,62 @@ unacct_error:
75266 return error;
75267 }
75268
75269+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
75270+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
75271+{
75272+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
75273+ return (random32() & 0xFF) << PAGE_SHIFT;
75274+
75275+ return 0;
75276+}
75277+#endif
75278+
75279+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
75280+{
75281+ if (!vma) {
75282+#ifdef CONFIG_STACK_GROWSUP
75283+ if (addr > sysctl_heap_stack_gap)
75284+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
75285+ else
75286+ vma = find_vma(current->mm, 0);
75287+ if (vma && (vma->vm_flags & VM_GROWSUP))
75288+ return false;
75289+#endif
75290+ return true;
75291+ }
75292+
75293+ if (addr + len > vma->vm_start)
75294+ return false;
75295+
75296+ if (vma->vm_flags & VM_GROWSDOWN)
75297+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
75298+#ifdef CONFIG_STACK_GROWSUP
75299+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
75300+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
75301+#endif
75302+ else if (offset)
75303+ return offset <= vma->vm_start - addr - len;
75304+
75305+ return true;
75306+}
75307+
75308+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
75309+{
75310+ if (vma->vm_start < len)
75311+ return -ENOMEM;
75312+
75313+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
75314+ if (offset <= vma->vm_start - len)
75315+ return vma->vm_start - len - offset;
75316+ else
75317+ return -ENOMEM;
75318+ }
75319+
75320+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
75321+ return vma->vm_start - len - sysctl_heap_stack_gap;
75322+ return -ENOMEM;
75323+}
75324+
75325 /* Get an address range which is currently unmapped.
75326 * For shmat() with addr=0.
75327 *
75328@@ -1426,18 +1632,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
75329 if (flags & MAP_FIXED)
75330 return addr;
75331
75332+#ifdef CONFIG_PAX_RANDMMAP
75333+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
75334+#endif
75335+
75336 if (addr) {
75337 addr = PAGE_ALIGN(addr);
75338- vma = find_vma(mm, addr);
75339- if (TASK_SIZE - len >= addr &&
75340- (!vma || addr + len <= vma->vm_start))
75341- return addr;
75342+ if (TASK_SIZE - len >= addr) {
75343+ vma = find_vma(mm, addr);
75344+ if (check_heap_stack_gap(vma, addr, len))
75345+ return addr;
75346+ }
75347 }
75348 if (len > mm->cached_hole_size) {
75349- start_addr = addr = mm->free_area_cache;
75350+ start_addr = addr = mm->free_area_cache;
75351 } else {
75352- start_addr = addr = TASK_UNMAPPED_BASE;
75353- mm->cached_hole_size = 0;
75354+ start_addr = addr = mm->mmap_base;
75355+ mm->cached_hole_size = 0;
75356 }
75357
75358 full_search:
75359@@ -1448,34 +1659,40 @@ full_search:
75360 * Start a new search - just in case we missed
75361 * some holes.
75362 */
75363- if (start_addr != TASK_UNMAPPED_BASE) {
75364- addr = TASK_UNMAPPED_BASE;
75365- start_addr = addr;
75366+ if (start_addr != mm->mmap_base) {
75367+ start_addr = addr = mm->mmap_base;
75368 mm->cached_hole_size = 0;
75369 goto full_search;
75370 }
75371 return -ENOMEM;
75372 }
75373- if (!vma || addr + len <= vma->vm_start) {
75374- /*
75375- * Remember the place where we stopped the search:
75376- */
75377- mm->free_area_cache = addr + len;
75378- return addr;
75379- }
75380+ if (check_heap_stack_gap(vma, addr, len))
75381+ break;
75382 if (addr + mm->cached_hole_size < vma->vm_start)
75383 mm->cached_hole_size = vma->vm_start - addr;
75384 addr = vma->vm_end;
75385 }
75386+
75387+ /*
75388+ * Remember the place where we stopped the search:
75389+ */
75390+ mm->free_area_cache = addr + len;
75391+ return addr;
75392 }
75393 #endif
75394
75395 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
75396 {
75397+
75398+#ifdef CONFIG_PAX_SEGMEXEC
75399+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
75400+ return;
75401+#endif
75402+
75403 /*
75404 * Is this a new hole at the lowest possible address?
75405 */
75406- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
75407+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
75408 mm->free_area_cache = addr;
75409 }
75410
75411@@ -1491,7 +1708,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
75412 {
75413 struct vm_area_struct *vma;
75414 struct mm_struct *mm = current->mm;
75415- unsigned long addr = addr0, start_addr;
75416+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
75417
75418 /* requested length too big for entire address space */
75419 if (len > TASK_SIZE)
75420@@ -1500,13 +1717,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
75421 if (flags & MAP_FIXED)
75422 return addr;
75423
75424+#ifdef CONFIG_PAX_RANDMMAP
75425+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
75426+#endif
75427+
75428 /* requesting a specific address */
75429 if (addr) {
75430 addr = PAGE_ALIGN(addr);
75431- vma = find_vma(mm, addr);
75432- if (TASK_SIZE - len >= addr &&
75433- (!vma || addr + len <= vma->vm_start))
75434- return addr;
75435+ if (TASK_SIZE - len >= addr) {
75436+ vma = find_vma(mm, addr);
75437+ if (check_heap_stack_gap(vma, addr, len))
75438+ return addr;
75439+ }
75440 }
75441
75442 /* check if free_area_cache is useful for us */
75443@@ -1530,7 +1752,7 @@ try_again:
75444 * return with success:
75445 */
75446 vma = find_vma(mm, addr);
75447- if (!vma || addr+len <= vma->vm_start)
75448+ if (check_heap_stack_gap(vma, addr, len))
75449 /* remember the address as a hint for next time */
75450 return (mm->free_area_cache = addr);
75451
75452@@ -1539,8 +1761,8 @@ try_again:
75453 mm->cached_hole_size = vma->vm_start - addr;
75454
75455 /* try just below the current vma->vm_start */
75456- addr = vma->vm_start-len;
75457- } while (len < vma->vm_start);
75458+ addr = skip_heap_stack_gap(vma, len);
75459+ } while (!IS_ERR_VALUE(addr));
75460
75461 fail:
75462 /*
75463@@ -1563,13 +1785,21 @@ fail:
75464 * can happen with large stack limits and large mmap()
75465 * allocations.
75466 */
75467+ mm->mmap_base = TASK_UNMAPPED_BASE;
75468+
75469+#ifdef CONFIG_PAX_RANDMMAP
75470+ if (mm->pax_flags & MF_PAX_RANDMMAP)
75471+ mm->mmap_base += mm->delta_mmap;
75472+#endif
75473+
75474+ mm->free_area_cache = mm->mmap_base;
75475 mm->cached_hole_size = ~0UL;
75476- mm->free_area_cache = TASK_UNMAPPED_BASE;
75477 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
75478 /*
75479 * Restore the topdown base:
75480 */
75481- mm->free_area_cache = mm->mmap_base;
75482+ mm->mmap_base = base;
75483+ mm->free_area_cache = base;
75484 mm->cached_hole_size = ~0UL;
75485
75486 return addr;
75487@@ -1578,6 +1808,12 @@ fail:
75488
75489 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
75490 {
75491+
75492+#ifdef CONFIG_PAX_SEGMEXEC
75493+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
75494+ return;
75495+#endif
75496+
75497 /*
75498 * Is this a new hole at the highest possible address?
75499 */
75500@@ -1585,8 +1821,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
75501 mm->free_area_cache = addr;
75502
75503 /* dont allow allocations above current base */
75504- if (mm->free_area_cache > mm->mmap_base)
75505+ if (mm->free_area_cache > mm->mmap_base) {
75506 mm->free_area_cache = mm->mmap_base;
75507+ mm->cached_hole_size = ~0UL;
75508+ }
75509 }
75510
75511 unsigned long
75512@@ -1685,6 +1923,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
75513 return vma;
75514 }
75515
75516+#ifdef CONFIG_PAX_SEGMEXEC
75517+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
75518+{
75519+ struct vm_area_struct *vma_m;
75520+
75521+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
75522+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
75523+ BUG_ON(vma->vm_mirror);
75524+ return NULL;
75525+ }
75526+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
75527+ vma_m = vma->vm_mirror;
75528+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
75529+ BUG_ON(vma->vm_file != vma_m->vm_file);
75530+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
75531+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
75532+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
75533+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
75534+ return vma_m;
75535+}
75536+#endif
75537+
75538 /*
75539 * Verify that the stack growth is acceptable and
75540 * update accounting. This is shared with both the
75541@@ -1701,6 +1961,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75542 return -ENOMEM;
75543
75544 /* Stack limit test */
75545+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
75546 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
75547 return -ENOMEM;
75548
75549@@ -1711,6 +1972,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75550 locked = mm->locked_vm + grow;
75551 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
75552 limit >>= PAGE_SHIFT;
75553+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
75554 if (locked > limit && !capable(CAP_IPC_LOCK))
75555 return -ENOMEM;
75556 }
75557@@ -1740,37 +2002,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75558 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
75559 * vma is the last one with address > vma->vm_end. Have to extend vma.
75560 */
75561+#ifndef CONFIG_IA64
75562+static
75563+#endif
75564 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
75565 {
75566 int error;
75567+ bool locknext;
75568
75569 if (!(vma->vm_flags & VM_GROWSUP))
75570 return -EFAULT;
75571
75572+ /* Also guard against wrapping around to address 0. */
75573+ if (address < PAGE_ALIGN(address+1))
75574+ address = PAGE_ALIGN(address+1);
75575+ else
75576+ return -ENOMEM;
75577+
75578 /*
75579 * We must make sure the anon_vma is allocated
75580 * so that the anon_vma locking is not a noop.
75581 */
75582 if (unlikely(anon_vma_prepare(vma)))
75583 return -ENOMEM;
75584+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
75585+ if (locknext && anon_vma_prepare(vma->vm_next))
75586+ return -ENOMEM;
75587 vma_lock_anon_vma(vma);
75588+ if (locknext)
75589+ vma_lock_anon_vma(vma->vm_next);
75590
75591 /*
75592 * vma->vm_start/vm_end cannot change under us because the caller
75593 * is required to hold the mmap_sem in read mode. We need the
75594- * anon_vma lock to serialize against concurrent expand_stacks.
75595- * Also guard against wrapping around to address 0.
75596+ * anon_vma locks to serialize against concurrent expand_stacks
75597+ * and expand_upwards.
75598 */
75599- if (address < PAGE_ALIGN(address+4))
75600- address = PAGE_ALIGN(address+4);
75601- else {
75602- vma_unlock_anon_vma(vma);
75603- return -ENOMEM;
75604- }
75605 error = 0;
75606
75607 /* Somebody else might have raced and expanded it already */
75608- if (address > vma->vm_end) {
75609+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
75610+ error = -ENOMEM;
75611+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
75612 unsigned long size, grow;
75613
75614 size = address - vma->vm_start;
75615@@ -1787,6 +2060,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
75616 }
75617 }
75618 }
75619+ if (locknext)
75620+ vma_unlock_anon_vma(vma->vm_next);
75621 vma_unlock_anon_vma(vma);
75622 khugepaged_enter_vma_merge(vma);
75623 validate_mm(vma->vm_mm);
75624@@ -1801,6 +2076,8 @@ int expand_downwards(struct vm_area_struct *vma,
75625 unsigned long address)
75626 {
75627 int error;
75628+ bool lockprev = false;
75629+ struct vm_area_struct *prev;
75630
75631 /*
75632 * We must make sure the anon_vma is allocated
75633@@ -1814,6 +2091,15 @@ int expand_downwards(struct vm_area_struct *vma,
75634 if (error)
75635 return error;
75636
75637+ prev = vma->vm_prev;
75638+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
75639+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
75640+#endif
75641+ if (lockprev && anon_vma_prepare(prev))
75642+ return -ENOMEM;
75643+ if (lockprev)
75644+ vma_lock_anon_vma(prev);
75645+
75646 vma_lock_anon_vma(vma);
75647
75648 /*
75649@@ -1823,9 +2109,17 @@ int expand_downwards(struct vm_area_struct *vma,
75650 */
75651
75652 /* Somebody else might have raced and expanded it already */
75653- if (address < vma->vm_start) {
75654+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
75655+ error = -ENOMEM;
75656+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
75657 unsigned long size, grow;
75658
75659+#ifdef CONFIG_PAX_SEGMEXEC
75660+ struct vm_area_struct *vma_m;
75661+
75662+ vma_m = pax_find_mirror_vma(vma);
75663+#endif
75664+
75665 size = vma->vm_end - address;
75666 grow = (vma->vm_start - address) >> PAGE_SHIFT;
75667
75668@@ -1837,6 +2131,17 @@ int expand_downwards(struct vm_area_struct *vma,
75669 vma->vm_start = address;
75670 vma->vm_pgoff -= grow;
75671 anon_vma_interval_tree_post_update_vma(vma);
75672+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
75673+
75674+#ifdef CONFIG_PAX_SEGMEXEC
75675+ if (vma_m) {
75676+ anon_vma_interval_tree_pre_update_vma(vma_m);
75677+ vma_m->vm_start -= grow << PAGE_SHIFT;
75678+ vma_m->vm_pgoff -= grow;
75679+ anon_vma_interval_tree_post_update_vma(vma_m);
75680+ }
75681+#endif
75682+
75683 perf_event_mmap(vma);
75684 }
75685 }
75686@@ -1914,6 +2219,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
75687 do {
75688 long nrpages = vma_pages(vma);
75689
75690+#ifdef CONFIG_PAX_SEGMEXEC
75691+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
75692+ vma = remove_vma(vma);
75693+ continue;
75694+ }
75695+#endif
75696+
75697 if (vma->vm_flags & VM_ACCOUNT)
75698 nr_accounted += nrpages;
75699 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
75700@@ -1959,6 +2271,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
75701 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
75702 vma->vm_prev = NULL;
75703 do {
75704+
75705+#ifdef CONFIG_PAX_SEGMEXEC
75706+ if (vma->vm_mirror) {
75707+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
75708+ vma->vm_mirror->vm_mirror = NULL;
75709+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
75710+ vma->vm_mirror = NULL;
75711+ }
75712+#endif
75713+
75714 rb_erase(&vma->vm_rb, &mm->mm_rb);
75715 mm->map_count--;
75716 tail_vma = vma;
75717@@ -1987,14 +2309,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
75718 struct vm_area_struct *new;
75719 int err = -ENOMEM;
75720
75721+#ifdef CONFIG_PAX_SEGMEXEC
75722+ struct vm_area_struct *vma_m, *new_m = NULL;
75723+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
75724+#endif
75725+
75726 if (is_vm_hugetlb_page(vma) && (addr &
75727 ~(huge_page_mask(hstate_vma(vma)))))
75728 return -EINVAL;
75729
75730+#ifdef CONFIG_PAX_SEGMEXEC
75731+ vma_m = pax_find_mirror_vma(vma);
75732+#endif
75733+
75734 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75735 if (!new)
75736 goto out_err;
75737
75738+#ifdef CONFIG_PAX_SEGMEXEC
75739+ if (vma_m) {
75740+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75741+ if (!new_m) {
75742+ kmem_cache_free(vm_area_cachep, new);
75743+ goto out_err;
75744+ }
75745+ }
75746+#endif
75747+
75748 /* most fields are the same, copy all, and then fixup */
75749 *new = *vma;
75750
75751@@ -2007,6 +2348,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
75752 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
75753 }
75754
75755+#ifdef CONFIG_PAX_SEGMEXEC
75756+ if (vma_m) {
75757+ *new_m = *vma_m;
75758+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
75759+ new_m->vm_mirror = new;
75760+ new->vm_mirror = new_m;
75761+
75762+ if (new_below)
75763+ new_m->vm_end = addr_m;
75764+ else {
75765+ new_m->vm_start = addr_m;
75766+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
75767+ }
75768+ }
75769+#endif
75770+
75771 pol = mpol_dup(vma_policy(vma));
75772 if (IS_ERR(pol)) {
75773 err = PTR_ERR(pol);
75774@@ -2029,6 +2386,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
75775 else
75776 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
75777
75778+#ifdef CONFIG_PAX_SEGMEXEC
75779+ if (!err && vma_m) {
75780+ if (anon_vma_clone(new_m, vma_m))
75781+ goto out_free_mpol;
75782+
75783+ mpol_get(pol);
75784+ vma_set_policy(new_m, pol);
75785+
75786+ if (new_m->vm_file)
75787+ get_file(new_m->vm_file);
75788+
75789+ if (new_m->vm_ops && new_m->vm_ops->open)
75790+ new_m->vm_ops->open(new_m);
75791+
75792+ if (new_below)
75793+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
75794+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
75795+ else
75796+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
75797+
75798+ if (err) {
75799+ if (new_m->vm_ops && new_m->vm_ops->close)
75800+ new_m->vm_ops->close(new_m);
75801+ if (new_m->vm_file)
75802+ fput(new_m->vm_file);
75803+ mpol_put(pol);
75804+ }
75805+ }
75806+#endif
75807+
75808 /* Success. */
75809 if (!err)
75810 return 0;
75811@@ -2038,10 +2425,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
75812 new->vm_ops->close(new);
75813 if (new->vm_file)
75814 fput(new->vm_file);
75815- unlink_anon_vmas(new);
75816 out_free_mpol:
75817 mpol_put(pol);
75818 out_free_vma:
75819+
75820+#ifdef CONFIG_PAX_SEGMEXEC
75821+ if (new_m) {
75822+ unlink_anon_vmas(new_m);
75823+ kmem_cache_free(vm_area_cachep, new_m);
75824+ }
75825+#endif
75826+
75827+ unlink_anon_vmas(new);
75828 kmem_cache_free(vm_area_cachep, new);
75829 out_err:
75830 return err;
75831@@ -2054,6 +2449,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
75832 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
75833 unsigned long addr, int new_below)
75834 {
75835+
75836+#ifdef CONFIG_PAX_SEGMEXEC
75837+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
75838+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
75839+ if (mm->map_count >= sysctl_max_map_count-1)
75840+ return -ENOMEM;
75841+ } else
75842+#endif
75843+
75844 if (mm->map_count >= sysctl_max_map_count)
75845 return -ENOMEM;
75846
75847@@ -2065,11 +2469,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
75848 * work. This now handles partial unmappings.
75849 * Jeremy Fitzhardinge <jeremy@goop.org>
75850 */
75851+#ifdef CONFIG_PAX_SEGMEXEC
75852 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
75853 {
75854+ int ret = __do_munmap(mm, start, len);
75855+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
75856+ return ret;
75857+
75858+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
75859+}
75860+
75861+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
75862+#else
75863+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
75864+#endif
75865+{
75866 unsigned long end;
75867 struct vm_area_struct *vma, *prev, *last;
75868
75869+ /*
75870+ * mm->mmap_sem is required to protect against another thread
75871+ * changing the mappings in case we sleep.
75872+ */
75873+ verify_mm_writelocked(mm);
75874+
75875 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
75876 return -EINVAL;
75877
75878@@ -2144,6 +2567,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
75879 /* Fix up all other VM information */
75880 remove_vma_list(mm, vma);
75881
75882+ track_exec_limit(mm, start, end, 0UL);
75883+
75884 return 0;
75885 }
75886
75887@@ -2152,6 +2577,13 @@ int vm_munmap(unsigned long start, size_t len)
75888 int ret;
75889 struct mm_struct *mm = current->mm;
75890
75891+
75892+#ifdef CONFIG_PAX_SEGMEXEC
75893+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
75894+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
75895+ return -EINVAL;
75896+#endif
75897+
75898 down_write(&mm->mmap_sem);
75899 ret = do_munmap(mm, start, len);
75900 up_write(&mm->mmap_sem);
75901@@ -2165,16 +2597,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
75902 return vm_munmap(addr, len);
75903 }
75904
75905-static inline void verify_mm_writelocked(struct mm_struct *mm)
75906-{
75907-#ifdef CONFIG_DEBUG_VM
75908- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
75909- WARN_ON(1);
75910- up_read(&mm->mmap_sem);
75911- }
75912-#endif
75913-}
75914-
75915 /*
75916 * this is really a simplified "do_mmap". it only handles
75917 * anonymous maps. eventually we may be able to do some
75918@@ -2188,6 +2610,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
75919 struct rb_node ** rb_link, * rb_parent;
75920 pgoff_t pgoff = addr >> PAGE_SHIFT;
75921 int error;
75922+ unsigned long charged;
75923
75924 len = PAGE_ALIGN(len);
75925 if (!len)
75926@@ -2195,16 +2618,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
75927
75928 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
75929
75930+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
75931+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
75932+ flags &= ~VM_EXEC;
75933+
75934+#ifdef CONFIG_PAX_MPROTECT
75935+ if (mm->pax_flags & MF_PAX_MPROTECT)
75936+ flags &= ~VM_MAYEXEC;
75937+#endif
75938+
75939+ }
75940+#endif
75941+
75942 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
75943 if (error & ~PAGE_MASK)
75944 return error;
75945
75946+ charged = len >> PAGE_SHIFT;
75947+
75948 /*
75949 * mlock MCL_FUTURE?
75950 */
75951 if (mm->def_flags & VM_LOCKED) {
75952 unsigned long locked, lock_limit;
75953- locked = len >> PAGE_SHIFT;
75954+ locked = charged;
75955 locked += mm->locked_vm;
75956 lock_limit = rlimit(RLIMIT_MEMLOCK);
75957 lock_limit >>= PAGE_SHIFT;
75958@@ -2221,21 +2658,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
75959 /*
75960 * Clear old maps. this also does some error checking for us
75961 */
75962- munmap_back:
75963 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
75964 if (do_munmap(mm, addr, len))
75965 return -ENOMEM;
75966- goto munmap_back;
75967+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
75968 }
75969
75970 /* Check against address space limits *after* clearing old maps... */
75971- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
75972+ if (!may_expand_vm(mm, charged))
75973 return -ENOMEM;
75974
75975 if (mm->map_count > sysctl_max_map_count)
75976 return -ENOMEM;
75977
75978- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
75979+ if (security_vm_enough_memory_mm(mm, charged))
75980 return -ENOMEM;
75981
75982 /* Can we just expand an old private anonymous mapping? */
75983@@ -2249,7 +2685,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
75984 */
75985 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
75986 if (!vma) {
75987- vm_unacct_memory(len >> PAGE_SHIFT);
75988+ vm_unacct_memory(charged);
75989 return -ENOMEM;
75990 }
75991
75992@@ -2263,11 +2699,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
75993 vma_link(mm, vma, prev, rb_link, rb_parent);
75994 out:
75995 perf_event_mmap(vma);
75996- mm->total_vm += len >> PAGE_SHIFT;
75997+ mm->total_vm += charged;
75998 if (flags & VM_LOCKED) {
75999 if (!mlock_vma_pages_range(vma, addr, addr + len))
76000- mm->locked_vm += (len >> PAGE_SHIFT);
76001+ mm->locked_vm += charged;
76002 }
76003+ track_exec_limit(mm, addr, addr + len, flags);
76004 return addr;
76005 }
76006
76007@@ -2325,6 +2762,7 @@ void exit_mmap(struct mm_struct *mm)
76008 while (vma) {
76009 if (vma->vm_flags & VM_ACCOUNT)
76010 nr_accounted += vma_pages(vma);
76011+ vma->vm_mirror = NULL;
76012 vma = remove_vma(vma);
76013 }
76014 vm_unacct_memory(nr_accounted);
76015@@ -2341,6 +2779,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
76016 struct vm_area_struct *prev;
76017 struct rb_node **rb_link, *rb_parent;
76018
76019+#ifdef CONFIG_PAX_SEGMEXEC
76020+ struct vm_area_struct *vma_m = NULL;
76021+#endif
76022+
76023+ if (security_mmap_addr(vma->vm_start))
76024+ return -EPERM;
76025+
76026 /*
76027 * The vm_pgoff of a purely anonymous vma should be irrelevant
76028 * until its first write fault, when page's anon_vma and index
76029@@ -2364,7 +2809,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
76030 security_vm_enough_memory_mm(mm, vma_pages(vma)))
76031 return -ENOMEM;
76032
76033+#ifdef CONFIG_PAX_SEGMEXEC
76034+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
76035+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76036+ if (!vma_m)
76037+ return -ENOMEM;
76038+ }
76039+#endif
76040+
76041 vma_link(mm, vma, prev, rb_link, rb_parent);
76042+
76043+#ifdef CONFIG_PAX_SEGMEXEC
76044+ if (vma_m)
76045+ BUG_ON(pax_mirror_vma(vma_m, vma));
76046+#endif
76047+
76048 return 0;
76049 }
76050
76051@@ -2384,6 +2843,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
76052 struct mempolicy *pol;
76053 bool faulted_in_anon_vma = true;
76054
76055+ BUG_ON(vma->vm_mirror);
76056+
76057 /*
76058 * If anonymous vma has not yet been faulted, update new pgoff
76059 * to match new location, to increase its chance of merging.
76060@@ -2450,6 +2911,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
76061 return NULL;
76062 }
76063
76064+#ifdef CONFIG_PAX_SEGMEXEC
76065+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
76066+{
76067+ struct vm_area_struct *prev_m;
76068+ struct rb_node **rb_link_m, *rb_parent_m;
76069+ struct mempolicy *pol_m;
76070+
76071+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
76072+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
76073+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
76074+ *vma_m = *vma;
76075+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
76076+ if (anon_vma_clone(vma_m, vma))
76077+ return -ENOMEM;
76078+ pol_m = vma_policy(vma_m);
76079+ mpol_get(pol_m);
76080+ vma_set_policy(vma_m, pol_m);
76081+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
76082+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
76083+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
76084+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
76085+ if (vma_m->vm_file)
76086+ get_file(vma_m->vm_file);
76087+ if (vma_m->vm_ops && vma_m->vm_ops->open)
76088+ vma_m->vm_ops->open(vma_m);
76089+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
76090+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
76091+ vma_m->vm_mirror = vma;
76092+ vma->vm_mirror = vma_m;
76093+ return 0;
76094+}
76095+#endif
76096+
76097 /*
76098 * Return true if the calling process may expand its vm space by the passed
76099 * number of pages
76100@@ -2461,6 +2955,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
76101
76102 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
76103
76104+#ifdef CONFIG_PAX_RANDMMAP
76105+ if (mm->pax_flags & MF_PAX_RANDMMAP)
76106+ cur -= mm->brk_gap;
76107+#endif
76108+
76109+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
76110 if (cur + npages > lim)
76111 return 0;
76112 return 1;
76113@@ -2531,6 +3031,22 @@ int install_special_mapping(struct mm_struct *mm,
76114 vma->vm_start = addr;
76115 vma->vm_end = addr + len;
76116
76117+#ifdef CONFIG_PAX_MPROTECT
76118+ if (mm->pax_flags & MF_PAX_MPROTECT) {
76119+#ifndef CONFIG_PAX_MPROTECT_COMPAT
76120+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
76121+ return -EPERM;
76122+ if (!(vm_flags & VM_EXEC))
76123+ vm_flags &= ~VM_MAYEXEC;
76124+#else
76125+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
76126+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
76127+#endif
76128+ else
76129+ vm_flags &= ~VM_MAYWRITE;
76130+ }
76131+#endif
76132+
76133 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
76134 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76135
76136diff --git a/mm/mprotect.c b/mm/mprotect.c
76137index a409926..8b32e6d 100644
76138--- a/mm/mprotect.c
76139+++ b/mm/mprotect.c
76140@@ -23,10 +23,17 @@
76141 #include <linux/mmu_notifier.h>
76142 #include <linux/migrate.h>
76143 #include <linux/perf_event.h>
76144+
76145+#ifdef CONFIG_PAX_MPROTECT
76146+#include <linux/elf.h>
76147+#include <linux/binfmts.h>
76148+#endif
76149+
76150 #include <asm/uaccess.h>
76151 #include <asm/pgtable.h>
76152 #include <asm/cacheflush.h>
76153 #include <asm/tlbflush.h>
76154+#include <asm/mmu_context.h>
76155
76156 #ifndef pgprot_modify
76157 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
76158@@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
76159 flush_tlb_range(vma, start, end);
76160 }
76161
76162+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
76163+/* called while holding the mmap semaphor for writing except stack expansion */
76164+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
76165+{
76166+ unsigned long oldlimit, newlimit = 0UL;
76167+
76168+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
76169+ return;
76170+
76171+ spin_lock(&mm->page_table_lock);
76172+ oldlimit = mm->context.user_cs_limit;
76173+ if ((prot & VM_EXEC) && oldlimit < end)
76174+ /* USER_CS limit moved up */
76175+ newlimit = end;
76176+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
76177+ /* USER_CS limit moved down */
76178+ newlimit = start;
76179+
76180+ if (newlimit) {
76181+ mm->context.user_cs_limit = newlimit;
76182+
76183+#ifdef CONFIG_SMP
76184+ wmb();
76185+ cpus_clear(mm->context.cpu_user_cs_mask);
76186+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
76187+#endif
76188+
76189+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
76190+ }
76191+ spin_unlock(&mm->page_table_lock);
76192+ if (newlimit == end) {
76193+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
76194+
76195+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
76196+ if (is_vm_hugetlb_page(vma))
76197+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
76198+ else
76199+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
76200+ }
76201+}
76202+#endif
76203+
76204 int
76205 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76206 unsigned long start, unsigned long end, unsigned long newflags)
76207@@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76208 int error;
76209 int dirty_accountable = 0;
76210
76211+#ifdef CONFIG_PAX_SEGMEXEC
76212+ struct vm_area_struct *vma_m = NULL;
76213+ unsigned long start_m, end_m;
76214+
76215+ start_m = start + SEGMEXEC_TASK_SIZE;
76216+ end_m = end + SEGMEXEC_TASK_SIZE;
76217+#endif
76218+
76219 if (newflags == oldflags) {
76220 *pprev = vma;
76221 return 0;
76222 }
76223
76224+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
76225+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
76226+
76227+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
76228+ return -ENOMEM;
76229+
76230+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
76231+ return -ENOMEM;
76232+ }
76233+
76234 /*
76235 * If we make a private mapping writable we increase our commit;
76236 * but (without finer accounting) cannot reduce our commit if we
76237@@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76238 }
76239 }
76240
76241+#ifdef CONFIG_PAX_SEGMEXEC
76242+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
76243+ if (start != vma->vm_start) {
76244+ error = split_vma(mm, vma, start, 1);
76245+ if (error)
76246+ goto fail;
76247+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
76248+ *pprev = (*pprev)->vm_next;
76249+ }
76250+
76251+ if (end != vma->vm_end) {
76252+ error = split_vma(mm, vma, end, 0);
76253+ if (error)
76254+ goto fail;
76255+ }
76256+
76257+ if (pax_find_mirror_vma(vma)) {
76258+ error = __do_munmap(mm, start_m, end_m - start_m);
76259+ if (error)
76260+ goto fail;
76261+ } else {
76262+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76263+ if (!vma_m) {
76264+ error = -ENOMEM;
76265+ goto fail;
76266+ }
76267+ vma->vm_flags = newflags;
76268+ error = pax_mirror_vma(vma_m, vma);
76269+ if (error) {
76270+ vma->vm_flags = oldflags;
76271+ goto fail;
76272+ }
76273+ }
76274+ }
76275+#endif
76276+
76277 /*
76278 * First try to merge with previous and/or next vma.
76279 */
76280@@ -204,9 +307,21 @@ success:
76281 * vm_flags and vm_page_prot are protected by the mmap_sem
76282 * held in write mode.
76283 */
76284+
76285+#ifdef CONFIG_PAX_SEGMEXEC
76286+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
76287+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
76288+#endif
76289+
76290 vma->vm_flags = newflags;
76291+
76292+#ifdef CONFIG_PAX_MPROTECT
76293+ if (mm->binfmt && mm->binfmt->handle_mprotect)
76294+ mm->binfmt->handle_mprotect(vma, newflags);
76295+#endif
76296+
76297 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
76298- vm_get_page_prot(newflags));
76299+ vm_get_page_prot(vma->vm_flags));
76300
76301 if (vma_wants_writenotify(vma)) {
76302 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
76303@@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76304 end = start + len;
76305 if (end <= start)
76306 return -ENOMEM;
76307+
76308+#ifdef CONFIG_PAX_SEGMEXEC
76309+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
76310+ if (end > SEGMEXEC_TASK_SIZE)
76311+ return -EINVAL;
76312+ } else
76313+#endif
76314+
76315+ if (end > TASK_SIZE)
76316+ return -EINVAL;
76317+
76318 if (!arch_validate_prot(prot))
76319 return -EINVAL;
76320
76321@@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76322 /*
76323 * Does the application expect PROT_READ to imply PROT_EXEC:
76324 */
76325- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
76326+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
76327 prot |= PROT_EXEC;
76328
76329 vm_flags = calc_vm_prot_bits(prot);
76330@@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76331 if (start > vma->vm_start)
76332 prev = vma;
76333
76334+#ifdef CONFIG_PAX_MPROTECT
76335+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
76336+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
76337+#endif
76338+
76339 for (nstart = start ; ; ) {
76340 unsigned long newflags;
76341
76342@@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76343
76344 /* newflags >> 4 shift VM_MAY% in place of VM_% */
76345 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
76346+ if (prot & (PROT_WRITE | PROT_EXEC))
76347+ gr_log_rwxmprotect(vma->vm_file);
76348+
76349+ error = -EACCES;
76350+ goto out;
76351+ }
76352+
76353+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
76354 error = -EACCES;
76355 goto out;
76356 }
76357@@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76358 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
76359 if (error)
76360 goto out;
76361+
76362+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
76363+
76364 nstart = tmp;
76365
76366 if (nstart < prev->vm_end)
76367diff --git a/mm/mremap.c b/mm/mremap.c
76368index 1b61c2d..1cc0e3c 100644
76369--- a/mm/mremap.c
76370+++ b/mm/mremap.c
76371@@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
76372 continue;
76373 pte = ptep_get_and_clear(mm, old_addr, old_pte);
76374 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
76375+
76376+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
76377+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
76378+ pte = pte_exprotect(pte);
76379+#endif
76380+
76381 set_pte_at(mm, new_addr, new_pte, pte);
76382 }
76383
76384@@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
76385 if (is_vm_hugetlb_page(vma))
76386 goto Einval;
76387
76388+#ifdef CONFIG_PAX_SEGMEXEC
76389+ if (pax_find_mirror_vma(vma))
76390+ goto Einval;
76391+#endif
76392+
76393 /* We can't remap across vm area boundaries */
76394 if (old_len > vma->vm_end - addr)
76395 goto Efault;
76396@@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
76397 unsigned long ret = -EINVAL;
76398 unsigned long charged = 0;
76399 unsigned long map_flags;
76400+ unsigned long pax_task_size = TASK_SIZE;
76401
76402 if (new_addr & ~PAGE_MASK)
76403 goto out;
76404
76405- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
76406+#ifdef CONFIG_PAX_SEGMEXEC
76407+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
76408+ pax_task_size = SEGMEXEC_TASK_SIZE;
76409+#endif
76410+
76411+ pax_task_size -= PAGE_SIZE;
76412+
76413+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
76414 goto out;
76415
76416 /* Check if the location we're moving into overlaps the
76417 * old location at all, and fail if it does.
76418 */
76419- if ((new_addr <= addr) && (new_addr+new_len) > addr)
76420- goto out;
76421-
76422- if ((addr <= new_addr) && (addr+old_len) > new_addr)
76423+ if (addr + old_len > new_addr && new_addr + new_len > addr)
76424 goto out;
76425
76426 ret = do_munmap(mm, new_addr, new_len);
76427@@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76428 struct vm_area_struct *vma;
76429 unsigned long ret = -EINVAL;
76430 unsigned long charged = 0;
76431+ unsigned long pax_task_size = TASK_SIZE;
76432
76433 down_write(&current->mm->mmap_sem);
76434
76435@@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76436 if (!new_len)
76437 goto out;
76438
76439+#ifdef CONFIG_PAX_SEGMEXEC
76440+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
76441+ pax_task_size = SEGMEXEC_TASK_SIZE;
76442+#endif
76443+
76444+ pax_task_size -= PAGE_SIZE;
76445+
76446+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
76447+ old_len > pax_task_size || addr > pax_task_size-old_len)
76448+ goto out;
76449+
76450 if (flags & MREMAP_FIXED) {
76451 if (flags & MREMAP_MAYMOVE)
76452 ret = mremap_to(addr, old_len, new_addr, new_len);
76453@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76454 addr + new_len);
76455 }
76456 ret = addr;
76457+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
76458 goto out;
76459 }
76460 }
76461@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76462 goto out;
76463 }
76464
76465+ map_flags = vma->vm_flags;
76466 ret = move_vma(vma, addr, old_len, new_len, new_addr);
76467+ if (!(ret & ~PAGE_MASK)) {
76468+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
76469+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
76470+ }
76471 }
76472 out:
76473 if (ret & ~PAGE_MASK)
76474diff --git a/mm/nommu.c b/mm/nommu.c
76475index 45131b4..c521665 100644
76476--- a/mm/nommu.c
76477+++ b/mm/nommu.c
76478@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
76479 int sysctl_overcommit_ratio = 50; /* default is 50% */
76480 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
76481 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
76482-int heap_stack_gap = 0;
76483
76484 atomic_long_t mmap_pages_allocated;
76485
76486@@ -824,15 +823,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
76487 EXPORT_SYMBOL(find_vma);
76488
76489 /*
76490- * find a VMA
76491- * - we don't extend stack VMAs under NOMMU conditions
76492- */
76493-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
76494-{
76495- return find_vma(mm, addr);
76496-}
76497-
76498-/*
76499 * expand a stack to a given address
76500 * - not supported under NOMMU conditions
76501 */
76502@@ -1540,6 +1530,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
76503
76504 /* most fields are the same, copy all, and then fixup */
76505 *new = *vma;
76506+ INIT_LIST_HEAD(&new->anon_vma_chain);
76507 *region = *vma->vm_region;
76508 new->vm_region = region;
76509
76510diff --git a/mm/page_alloc.c b/mm/page_alloc.c
76511index ceb4168..d7774f2 100644
76512--- a/mm/page_alloc.c
76513+++ b/mm/page_alloc.c
76514@@ -340,7 +340,7 @@ out:
76515 * This usage means that zero-order pages may not be compound.
76516 */
76517
76518-static void free_compound_page(struct page *page)
76519+void free_compound_page(struct page *page)
76520 {
76521 __free_pages_ok(page, compound_order(page));
76522 }
76523@@ -693,6 +693,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
76524 int i;
76525 int bad = 0;
76526
76527+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76528+ unsigned long index = 1UL << order;
76529+#endif
76530+
76531 trace_mm_page_free(page, order);
76532 kmemcheck_free_shadow(page, order);
76533
76534@@ -708,6 +712,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
76535 debug_check_no_obj_freed(page_address(page),
76536 PAGE_SIZE << order);
76537 }
76538+
76539+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76540+ for (; index; --index)
76541+ sanitize_highpage(page + index - 1);
76542+#endif
76543+
76544 arch_free_page(page, order);
76545 kernel_map_pages(page, 1 << order, 0);
76546
76547@@ -849,8 +859,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
76548 arch_alloc_page(page, order);
76549 kernel_map_pages(page, 1 << order, 1);
76550
76551+#ifndef CONFIG_PAX_MEMORY_SANITIZE
76552 if (gfp_flags & __GFP_ZERO)
76553 prep_zero_page(page, order, gfp_flags);
76554+#endif
76555
76556 if (order && (gfp_flags & __GFP_COMP))
76557 prep_compound_page(page, order);
76558@@ -3684,7 +3696,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
76559 unsigned long pfn;
76560
76561 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
76562+#ifdef CONFIG_X86_32
76563+ /* boot failures in VMware 8 on 32bit vanilla since
76564+ this change */
76565+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
76566+#else
76567 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
76568+#endif
76569 return 1;
76570 }
76571 return 0;
76572diff --git a/mm/percpu.c b/mm/percpu.c
76573index ddc5efb..f632d2c 100644
76574--- a/mm/percpu.c
76575+++ b/mm/percpu.c
76576@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
76577 static unsigned int pcpu_high_unit_cpu __read_mostly;
76578
76579 /* the address of the first chunk which starts with the kernel static area */
76580-void *pcpu_base_addr __read_mostly;
76581+void *pcpu_base_addr __read_only;
76582 EXPORT_SYMBOL_GPL(pcpu_base_addr);
76583
76584 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
76585diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
76586index 926b466..b23df53 100644
76587--- a/mm/process_vm_access.c
76588+++ b/mm/process_vm_access.c
76589@@ -13,6 +13,7 @@
76590 #include <linux/uio.h>
76591 #include <linux/sched.h>
76592 #include <linux/highmem.h>
76593+#include <linux/security.h>
76594 #include <linux/ptrace.h>
76595 #include <linux/slab.h>
76596 #include <linux/syscalls.h>
76597@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
76598 size_t iov_l_curr_offset = 0;
76599 ssize_t iov_len;
76600
76601+ return -ENOSYS; // PaX: until properly audited
76602+
76603 /*
76604 * Work out how many pages of struct pages we're going to need
76605 * when eventually calling get_user_pages
76606 */
76607 for (i = 0; i < riovcnt; i++) {
76608 iov_len = rvec[i].iov_len;
76609- if (iov_len > 0) {
76610- nr_pages_iov = ((unsigned long)rvec[i].iov_base
76611- + iov_len)
76612- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
76613- / PAGE_SIZE + 1;
76614- nr_pages = max(nr_pages, nr_pages_iov);
76615- }
76616+ if (iov_len <= 0)
76617+ continue;
76618+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
76619+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
76620+ nr_pages = max(nr_pages, nr_pages_iov);
76621 }
76622
76623 if (nr_pages == 0)
76624@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
76625 goto free_proc_pages;
76626 }
76627
76628+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
76629+ rc = -EPERM;
76630+ goto put_task_struct;
76631+ }
76632+
76633 mm = mm_access(task, PTRACE_MODE_ATTACH);
76634 if (!mm || IS_ERR(mm)) {
76635 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
76636diff --git a/mm/rmap.c b/mm/rmap.c
76637index 2ee1ef0..2e175ba 100644
76638--- a/mm/rmap.c
76639+++ b/mm/rmap.c
76640@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76641 struct anon_vma *anon_vma = vma->anon_vma;
76642 struct anon_vma_chain *avc;
76643
76644+#ifdef CONFIG_PAX_SEGMEXEC
76645+ struct anon_vma_chain *avc_m = NULL;
76646+#endif
76647+
76648 might_sleep();
76649 if (unlikely(!anon_vma)) {
76650 struct mm_struct *mm = vma->vm_mm;
76651@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76652 if (!avc)
76653 goto out_enomem;
76654
76655+#ifdef CONFIG_PAX_SEGMEXEC
76656+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
76657+ if (!avc_m)
76658+ goto out_enomem_free_avc;
76659+#endif
76660+
76661 anon_vma = find_mergeable_anon_vma(vma);
76662 allocated = NULL;
76663 if (!anon_vma) {
76664@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76665 /* page_table_lock to protect against threads */
76666 spin_lock(&mm->page_table_lock);
76667 if (likely(!vma->anon_vma)) {
76668+
76669+#ifdef CONFIG_PAX_SEGMEXEC
76670+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
76671+
76672+ if (vma_m) {
76673+ BUG_ON(vma_m->anon_vma);
76674+ vma_m->anon_vma = anon_vma;
76675+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
76676+ avc_m = NULL;
76677+ }
76678+#endif
76679+
76680 vma->anon_vma = anon_vma;
76681 anon_vma_chain_link(vma, avc, anon_vma);
76682 allocated = NULL;
76683@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76684
76685 if (unlikely(allocated))
76686 put_anon_vma(allocated);
76687+
76688+#ifdef CONFIG_PAX_SEGMEXEC
76689+ if (unlikely(avc_m))
76690+ anon_vma_chain_free(avc_m);
76691+#endif
76692+
76693 if (unlikely(avc))
76694 anon_vma_chain_free(avc);
76695 }
76696 return 0;
76697
76698 out_enomem_free_avc:
76699+
76700+#ifdef CONFIG_PAX_SEGMEXEC
76701+ if (avc_m)
76702+ anon_vma_chain_free(avc_m);
76703+#endif
76704+
76705 anon_vma_chain_free(avc);
76706 out_enomem:
76707 return -ENOMEM;
76708@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
76709 * Attach the anon_vmas from src to dst.
76710 * Returns 0 on success, -ENOMEM on failure.
76711 */
76712-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
76713+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
76714 {
76715 struct anon_vma_chain *avc, *pavc;
76716 struct anon_vma *root = NULL;
76717@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
76718 * the corresponding VMA in the parent process is attached to.
76719 * Returns 0 on success, non-zero on failure.
76720 */
76721-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
76722+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
76723 {
76724 struct anon_vma_chain *avc;
76725 struct anon_vma *anon_vma;
76726diff --git a/mm/shmem.c b/mm/shmem.c
76727index 50c5b8f..0bc87f7 100644
76728--- a/mm/shmem.c
76729+++ b/mm/shmem.c
76730@@ -31,7 +31,7 @@
76731 #include <linux/export.h>
76732 #include <linux/swap.h>
76733
76734-static struct vfsmount *shm_mnt;
76735+struct vfsmount *shm_mnt;
76736
76737 #ifdef CONFIG_SHMEM
76738 /*
76739@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
76740 #define BOGO_DIRENT_SIZE 20
76741
76742 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
76743-#define SHORT_SYMLINK_LEN 128
76744+#define SHORT_SYMLINK_LEN 64
76745
76746 /*
76747 * shmem_fallocate and shmem_writepage communicate via inode->i_private
76748@@ -2112,6 +2112,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
76749 static int shmem_xattr_validate(const char *name)
76750 {
76751 struct { const char *prefix; size_t len; } arr[] = {
76752+
76753+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
76754+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
76755+#endif
76756+
76757 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
76758 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
76759 };
76760@@ -2167,6 +2172,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
76761 if (err)
76762 return err;
76763
76764+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
76765+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
76766+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
76767+ return -EOPNOTSUPP;
76768+ if (size > 8)
76769+ return -EINVAL;
76770+ }
76771+#endif
76772+
76773 return simple_xattr_set(&info->xattrs, name, value, size, flags);
76774 }
76775
76776@@ -2466,8 +2480,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
76777 int err = -ENOMEM;
76778
76779 /* Round up to L1_CACHE_BYTES to resist false sharing */
76780- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
76781- L1_CACHE_BYTES), GFP_KERNEL);
76782+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
76783 if (!sbinfo)
76784 return -ENOMEM;
76785
76786diff --git a/mm/slab.c b/mm/slab.c
76787index 33d3363..93c6810 100644
76788--- a/mm/slab.c
76789+++ b/mm/slab.c
76790@@ -164,7 +164,7 @@ static bool pfmemalloc_active __read_mostly;
76791
76792 /* Legal flag mask for kmem_cache_create(). */
76793 #if DEBUG
76794-# define CREATE_MASK (SLAB_RED_ZONE | \
76795+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
76796 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
76797 SLAB_CACHE_DMA | \
76798 SLAB_STORE_USER | \
76799@@ -172,7 +172,7 @@ static bool pfmemalloc_active __read_mostly;
76800 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
76801 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
76802 #else
76803-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
76804+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
76805 SLAB_CACHE_DMA | \
76806 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
76807 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
76808@@ -322,7 +322,7 @@ struct kmem_list3 {
76809 * Need this for bootstrapping a per node allocator.
76810 */
76811 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
76812-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
76813+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
76814 #define CACHE_CACHE 0
76815 #define SIZE_AC MAX_NUMNODES
76816 #define SIZE_L3 (2 * MAX_NUMNODES)
76817@@ -423,10 +423,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
76818 if ((x)->max_freeable < i) \
76819 (x)->max_freeable = i; \
76820 } while (0)
76821-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
76822-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
76823-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
76824-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
76825+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
76826+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
76827+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
76828+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
76829 #else
76830 #define STATS_INC_ACTIVE(x) do { } while (0)
76831 #define STATS_DEC_ACTIVE(x) do { } while (0)
76832@@ -534,7 +534,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
76833 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
76834 */
76835 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
76836- const struct slab *slab, void *obj)
76837+ const struct slab *slab, const void *obj)
76838 {
76839 u32 offset = (obj - slab->s_mem);
76840 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
76841@@ -555,12 +555,13 @@ EXPORT_SYMBOL(malloc_sizes);
76842 struct cache_names {
76843 char *name;
76844 char *name_dma;
76845+ char *name_usercopy;
76846 };
76847
76848 static struct cache_names __initdata cache_names[] = {
76849-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
76850+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
76851 #include <linux/kmalloc_sizes.h>
76852- {NULL,}
76853+ {NULL}
76854 #undef CACHE
76855 };
76856
76857@@ -721,6 +722,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
76858 if (unlikely(gfpflags & GFP_DMA))
76859 return csizep->cs_dmacachep;
76860 #endif
76861+
76862+#ifdef CONFIG_PAX_USERCOPY_SLABS
76863+ if (unlikely(gfpflags & GFP_USERCOPY))
76864+ return csizep->cs_usercopycachep;
76865+#endif
76866+
76867 return csizep->cs_cachep;
76868 }
76869
76870@@ -1676,7 +1683,7 @@ void __init kmem_cache_init(void)
76871 sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
76872 sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
76873 sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
76874- __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
76875+ __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
76876 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
76877
76878 if (INDEX_AC != INDEX_L3) {
76879@@ -1685,7 +1692,7 @@ void __init kmem_cache_init(void)
76880 sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
76881 sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
76882 sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
76883- __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
76884+ __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
76885 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
76886 }
76887
76888@@ -1705,7 +1712,7 @@ void __init kmem_cache_init(void)
76889 sizes->cs_cachep->size = sizes->cs_size;
76890 sizes->cs_cachep->object_size = sizes->cs_size;
76891 sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
76892- __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
76893+ __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
76894 list_add(&sizes->cs_cachep->list, &slab_caches);
76895 }
76896 #ifdef CONFIG_ZONE_DMA
76897@@ -1718,6 +1725,17 @@ void __init kmem_cache_init(void)
76898 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
76899 list_add(&sizes->cs_dmacachep->list, &slab_caches);
76900 #endif
76901+
76902+#ifdef CONFIG_PAX_USERCOPY_SLABS
76903+ sizes->cs_usercopycachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
76904+ sizes->cs_usercopycachep->name = names->name_dma;
76905+ sizes->cs_usercopycachep->size = sizes->cs_size;
76906+ sizes->cs_usercopycachep->object_size = sizes->cs_size;
76907+ sizes->cs_usercopycachep->align = ARCH_KMALLOC_MINALIGN;
76908+ __kmem_cache_create(sizes->cs_usercopycachep, ARCH_KMALLOC_FLAGS| SLAB_PANIC|SLAB_USERCOPY);
76909+ list_add(&sizes->cs_usercopycachep->list, &slab_caches);
76910+#endif
76911+
76912 sizes++;
76913 names++;
76914 }
76915@@ -4405,10 +4423,10 @@ static int s_show(struct seq_file *m, void *p)
76916 }
76917 /* cpu stats */
76918 {
76919- unsigned long allochit = atomic_read(&cachep->allochit);
76920- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
76921- unsigned long freehit = atomic_read(&cachep->freehit);
76922- unsigned long freemiss = atomic_read(&cachep->freemiss);
76923+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
76924+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
76925+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
76926+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
76927
76928 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
76929 allochit, allocmiss, freehit, freemiss);
76930@@ -4667,13 +4685,71 @@ static int __init slab_proc_init(void)
76931 {
76932 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
76933 #ifdef CONFIG_DEBUG_SLAB_LEAK
76934- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
76935+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
76936 #endif
76937 return 0;
76938 }
76939 module_init(slab_proc_init);
76940 #endif
76941
76942+bool is_usercopy_object(const void *ptr)
76943+{
76944+ struct page *page;
76945+ struct kmem_cache *cachep;
76946+
76947+ if (ZERO_OR_NULL_PTR(ptr))
76948+ return false;
76949+
76950+ if (!slab_is_available())
76951+ return false;
76952+
76953+ if (!virt_addr_valid(ptr))
76954+ return false;
76955+
76956+ page = virt_to_head_page(ptr);
76957+
76958+ if (!PageSlab(page))
76959+ return false;
76960+
76961+ cachep = page->slab_cache;
76962+ return cachep->flags & SLAB_USERCOPY;
76963+}
76964+
76965+#ifdef CONFIG_PAX_USERCOPY
76966+const char *check_heap_object(const void *ptr, unsigned long n)
76967+{
76968+ struct page *page;
76969+ struct kmem_cache *cachep;
76970+ struct slab *slabp;
76971+ unsigned int objnr;
76972+ unsigned long offset;
76973+
76974+ if (ZERO_OR_NULL_PTR(ptr))
76975+ return "<null>";
76976+
76977+ if (!virt_addr_valid(ptr))
76978+ return NULL;
76979+
76980+ page = virt_to_head_page(ptr);
76981+
76982+ if (!PageSlab(page))
76983+ return NULL;
76984+
76985+ cachep = page->slab_cache;
76986+ if (!(cachep->flags & SLAB_USERCOPY))
76987+ return cachep->name;
76988+
76989+ slabp = page->slab_page;
76990+ objnr = obj_to_index(cachep, slabp, ptr);
76991+ BUG_ON(objnr >= cachep->num);
76992+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
76993+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
76994+ return NULL;
76995+
76996+ return cachep->name;
76997+}
76998+#endif
76999+
77000 /**
77001 * ksize - get the actual amount of memory allocated for a given object
77002 * @objp: Pointer to the object
77003diff --git a/mm/slab_common.c b/mm/slab_common.c
77004index 069a24e6..226a310 100644
77005--- a/mm/slab_common.c
77006+++ b/mm/slab_common.c
77007@@ -127,7 +127,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
77008 err = __kmem_cache_create(s, flags);
77009 if (!err) {
77010
77011- s->refcount = 1;
77012+ atomic_set(&s->refcount, 1);
77013 list_add(&s->list, &slab_caches);
77014
77015 } else {
77016@@ -163,8 +163,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
77017 {
77018 get_online_cpus();
77019 mutex_lock(&slab_mutex);
77020- s->refcount--;
77021- if (!s->refcount) {
77022+ if (atomic_dec_and_test(&s->refcount)) {
77023 list_del(&s->list);
77024
77025 if (!__kmem_cache_shutdown(s)) {
77026diff --git a/mm/slob.c b/mm/slob.c
77027index 1e921c5..1ce12c2 100644
77028--- a/mm/slob.c
77029+++ b/mm/slob.c
77030@@ -159,7 +159,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
77031 /*
77032 * Return the size of a slob block.
77033 */
77034-static slobidx_t slob_units(slob_t *s)
77035+static slobidx_t slob_units(const slob_t *s)
77036 {
77037 if (s->units > 0)
77038 return s->units;
77039@@ -169,7 +169,7 @@ static slobidx_t slob_units(slob_t *s)
77040 /*
77041 * Return the next free slob block pointer after this one.
77042 */
77043-static slob_t *slob_next(slob_t *s)
77044+static slob_t *slob_next(const slob_t *s)
77045 {
77046 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
77047 slobidx_t next;
77048@@ -184,14 +184,14 @@ static slob_t *slob_next(slob_t *s)
77049 /*
77050 * Returns true if s is the last free block in its page.
77051 */
77052-static int slob_last(slob_t *s)
77053+static int slob_last(const slob_t *s)
77054 {
77055 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
77056 }
77057
77058-static void *slob_new_pages(gfp_t gfp, int order, int node)
77059+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
77060 {
77061- void *page;
77062+ struct page *page;
77063
77064 #ifdef CONFIG_NUMA
77065 if (node != NUMA_NO_NODE)
77066@@ -203,14 +203,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
77067 if (!page)
77068 return NULL;
77069
77070- return page_address(page);
77071+ __SetPageSlab(page);
77072+ return page;
77073 }
77074
77075-static void slob_free_pages(void *b, int order)
77076+static void slob_free_pages(struct page *sp, int order)
77077 {
77078 if (current->reclaim_state)
77079 current->reclaim_state->reclaimed_slab += 1 << order;
77080- free_pages((unsigned long)b, order);
77081+ __ClearPageSlab(sp);
77082+ reset_page_mapcount(sp);
77083+ sp->private = 0;
77084+ __free_pages(sp, order);
77085 }
77086
77087 /*
77088@@ -315,15 +319,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
77089
77090 /* Not enough space: must allocate a new page */
77091 if (!b) {
77092- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
77093- if (!b)
77094+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
77095+ if (!sp)
77096 return NULL;
77097- sp = virt_to_page(b);
77098- __SetPageSlab(sp);
77099+ b = page_address(sp);
77100
77101 spin_lock_irqsave(&slob_lock, flags);
77102 sp->units = SLOB_UNITS(PAGE_SIZE);
77103 sp->freelist = b;
77104+ sp->private = 0;
77105 INIT_LIST_HEAD(&sp->list);
77106 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
77107 set_slob_page_free(sp, slob_list);
77108@@ -361,9 +365,7 @@ static void slob_free(void *block, int size)
77109 if (slob_page_free(sp))
77110 clear_slob_page_free(sp);
77111 spin_unlock_irqrestore(&slob_lock, flags);
77112- __ClearPageSlab(sp);
77113- reset_page_mapcount(sp);
77114- slob_free_pages(b, 0);
77115+ slob_free_pages(sp, 0);
77116 return;
77117 }
77118
77119@@ -426,11 +428,10 @@ out:
77120 */
77121
77122 static __always_inline void *
77123-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77124+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
77125 {
77126- unsigned int *m;
77127- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77128- void *ret;
77129+ slob_t *m;
77130+ void *ret = NULL;
77131
77132 gfp &= gfp_allowed_mask;
77133
77134@@ -444,20 +445,23 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77135
77136 if (!m)
77137 return NULL;
77138- *m = size;
77139+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
77140+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
77141+ m[0].units = size;
77142+ m[1].units = align;
77143 ret = (void *)m + align;
77144
77145 trace_kmalloc_node(caller, ret,
77146 size, size + align, gfp, node);
77147 } else {
77148 unsigned int order = get_order(size);
77149+ struct page *page;
77150
77151 if (likely(order))
77152 gfp |= __GFP_COMP;
77153- ret = slob_new_pages(gfp, order, node);
77154- if (ret) {
77155- struct page *page;
77156- page = virt_to_page(ret);
77157+ page = slob_new_pages(gfp, order, node);
77158+ if (page) {
77159+ ret = page_address(page);
77160 page->private = size;
77161 }
77162
77163@@ -465,7 +469,17 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77164 size, PAGE_SIZE << order, gfp, node);
77165 }
77166
77167- kmemleak_alloc(ret, size, 1, gfp);
77168+ return ret;
77169+}
77170+
77171+static __always_inline void *
77172+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77173+{
77174+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77175+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
77176+
77177+ if (!ZERO_OR_NULL_PTR(ret))
77178+ kmemleak_alloc(ret, size, 1, gfp);
77179 return ret;
77180 }
77181
77182@@ -501,15 +515,91 @@ void kfree(const void *block)
77183 kmemleak_free(block);
77184
77185 sp = virt_to_page(block);
77186- if (PageSlab(sp)) {
77187+ VM_BUG_ON(!PageSlab(sp));
77188+ if (!sp->private) {
77189 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77190- unsigned int *m = (unsigned int *)(block - align);
77191- slob_free(m, *m + align);
77192- } else
77193+ slob_t *m = (slob_t *)(block - align);
77194+ slob_free(m, m[0].units + align);
77195+ } else {
77196+ __ClearPageSlab(sp);
77197+ reset_page_mapcount(sp);
77198+ sp->private = 0;
77199 put_page(sp);
77200+ }
77201 }
77202 EXPORT_SYMBOL(kfree);
77203
77204+bool is_usercopy_object(const void *ptr)
77205+{
77206+ if (!slab_is_available())
77207+ return false;
77208+
77209+ // PAX: TODO
77210+
77211+ return false;
77212+}
77213+
77214+#ifdef CONFIG_PAX_USERCOPY
77215+const char *check_heap_object(const void *ptr, unsigned long n)
77216+{
77217+ struct page *page;
77218+ const slob_t *free;
77219+ const void *base;
77220+ unsigned long flags;
77221+
77222+ if (ZERO_OR_NULL_PTR(ptr))
77223+ return "<null>";
77224+
77225+ if (!virt_addr_valid(ptr))
77226+ return NULL;
77227+
77228+ page = virt_to_head_page(ptr);
77229+ if (!PageSlab(page))
77230+ return NULL;
77231+
77232+ if (page->private) {
77233+ base = page;
77234+ if (base <= ptr && n <= page->private - (ptr - base))
77235+ return NULL;
77236+ return "<slob>";
77237+ }
77238+
77239+ /* some tricky double walking to find the chunk */
77240+ spin_lock_irqsave(&slob_lock, flags);
77241+ base = (void *)((unsigned long)ptr & PAGE_MASK);
77242+ free = page->freelist;
77243+
77244+ while (!slob_last(free) && (void *)free <= ptr) {
77245+ base = free + slob_units(free);
77246+ free = slob_next(free);
77247+ }
77248+
77249+ while (base < (void *)free) {
77250+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
77251+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
77252+ int offset;
77253+
77254+ if (ptr < base + align)
77255+ break;
77256+
77257+ offset = ptr - base - align;
77258+ if (offset >= m) {
77259+ base += size;
77260+ continue;
77261+ }
77262+
77263+ if (n > m - offset)
77264+ break;
77265+
77266+ spin_unlock_irqrestore(&slob_lock, flags);
77267+ return NULL;
77268+ }
77269+
77270+ spin_unlock_irqrestore(&slob_lock, flags);
77271+ return "<slob>";
77272+}
77273+#endif
77274+
77275 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
77276 size_t ksize(const void *block)
77277 {
77278@@ -520,10 +610,11 @@ size_t ksize(const void *block)
77279 return 0;
77280
77281 sp = virt_to_page(block);
77282- if (PageSlab(sp)) {
77283+ VM_BUG_ON(!PageSlab(sp));
77284+ if (!sp->private) {
77285 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77286- unsigned int *m = (unsigned int *)(block - align);
77287- return SLOB_UNITS(*m) * SLOB_UNIT;
77288+ slob_t *m = (slob_t *)(block - align);
77289+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
77290 } else
77291 return sp->private;
77292 }
77293@@ -550,23 +641,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
77294
77295 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
77296 {
77297- void *b;
77298+ void *b = NULL;
77299
77300 flags &= gfp_allowed_mask;
77301
77302 lockdep_trace_alloc(flags);
77303
77304+#ifdef CONFIG_PAX_USERCOPY_SLABS
77305+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
77306+#else
77307 if (c->size < PAGE_SIZE) {
77308 b = slob_alloc(c->size, flags, c->align, node);
77309 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
77310 SLOB_UNITS(c->size) * SLOB_UNIT,
77311 flags, node);
77312 } else {
77313- b = slob_new_pages(flags, get_order(c->size), node);
77314+ struct page *sp;
77315+
77316+ sp = slob_new_pages(flags, get_order(c->size), node);
77317+ if (sp) {
77318+ b = page_address(sp);
77319+ sp->private = c->size;
77320+ }
77321 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
77322 PAGE_SIZE << get_order(c->size),
77323 flags, node);
77324 }
77325+#endif
77326
77327 if (c->ctor)
77328 c->ctor(b);
77329@@ -578,10 +679,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
77330
77331 static void __kmem_cache_free(void *b, int size)
77332 {
77333- if (size < PAGE_SIZE)
77334+ struct page *sp;
77335+
77336+ sp = virt_to_page(b);
77337+ BUG_ON(!PageSlab(sp));
77338+ if (!sp->private)
77339 slob_free(b, size);
77340 else
77341- slob_free_pages(b, get_order(size));
77342+ slob_free_pages(sp, get_order(size));
77343 }
77344
77345 static void kmem_rcu_free(struct rcu_head *head)
77346@@ -594,17 +699,31 @@ static void kmem_rcu_free(struct rcu_head *head)
77347
77348 void kmem_cache_free(struct kmem_cache *c, void *b)
77349 {
77350+ int size = c->size;
77351+
77352+#ifdef CONFIG_PAX_USERCOPY_SLABS
77353+ if (size + c->align < PAGE_SIZE) {
77354+ size += c->align;
77355+ b -= c->align;
77356+ }
77357+#endif
77358+
77359 kmemleak_free_recursive(b, c->flags);
77360 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
77361 struct slob_rcu *slob_rcu;
77362- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
77363- slob_rcu->size = c->size;
77364+ slob_rcu = b + (size - sizeof(struct slob_rcu));
77365+ slob_rcu->size = size;
77366 call_rcu(&slob_rcu->head, kmem_rcu_free);
77367 } else {
77368- __kmem_cache_free(b, c->size);
77369+ __kmem_cache_free(b, size);
77370 }
77371
77372+#ifdef CONFIG_PAX_USERCOPY_SLABS
77373+ trace_kfree(_RET_IP_, b);
77374+#else
77375 trace_kmem_cache_free(_RET_IP_, b);
77376+#endif
77377+
77378 }
77379 EXPORT_SYMBOL(kmem_cache_free);
77380
77381diff --git a/mm/slub.c b/mm/slub.c
77382index a0d6984..e280e5d 100644
77383--- a/mm/slub.c
77384+++ b/mm/slub.c
77385@@ -201,7 +201,7 @@ struct track {
77386
77387 enum track_item { TRACK_ALLOC, TRACK_FREE };
77388
77389-#ifdef CONFIG_SYSFS
77390+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77391 static int sysfs_slab_add(struct kmem_cache *);
77392 static int sysfs_slab_alias(struct kmem_cache *, const char *);
77393 static void sysfs_slab_remove(struct kmem_cache *);
77394@@ -521,7 +521,7 @@ static void print_track(const char *s, struct track *t)
77395 if (!t->addr)
77396 return;
77397
77398- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
77399+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
77400 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
77401 #ifdef CONFIG_STACKTRACE
77402 {
77403@@ -2623,6 +2623,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
77404
77405 page = virt_to_head_page(x);
77406
77407+ BUG_ON(!PageSlab(page));
77408+
77409 if (kmem_cache_debug(s) && page->slab != s) {
77410 pr_err("kmem_cache_free: Wrong slab cache. %s but object"
77411 " is from %s\n", page->slab->name, s->name);
77412@@ -2663,7 +2665,7 @@ static int slub_min_objects;
77413 * Merge control. If this is set then no merging of slab caches will occur.
77414 * (Could be removed. This was introduced to pacify the merge skeptics.)
77415 */
77416-static int slub_nomerge;
77417+static int slub_nomerge = 1;
77418
77419 /*
77420 * Calculate the order of allocation given an slab object size.
77421@@ -3225,6 +3227,10 @@ EXPORT_SYMBOL(kmalloc_caches);
77422 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
77423 #endif
77424
77425+#ifdef CONFIG_PAX_USERCOPY_SLABS
77426+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
77427+#endif
77428+
77429 static int __init setup_slub_min_order(char *str)
77430 {
77431 get_option(&str, &slub_min_order);
77432@@ -3342,6 +3348,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
77433 return kmalloc_dma_caches[index];
77434
77435 #endif
77436+
77437+#ifdef CONFIG_PAX_USERCOPY_SLABS
77438+ if (flags & SLAB_USERCOPY)
77439+ return kmalloc_usercopy_caches[index];
77440+
77441+#endif
77442+
77443 return kmalloc_caches[index];
77444 }
77445
77446@@ -3410,6 +3423,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
77447 EXPORT_SYMBOL(__kmalloc_node);
77448 #endif
77449
77450+bool is_usercopy_object(const void *ptr)
77451+{
77452+ struct page *page;
77453+ struct kmem_cache *s;
77454+
77455+ if (ZERO_OR_NULL_PTR(ptr))
77456+ return false;
77457+
77458+ if (!slab_is_available())
77459+ return false;
77460+
77461+ if (!virt_addr_valid(ptr))
77462+ return false;
77463+
77464+ page = virt_to_head_page(ptr);
77465+
77466+ if (!PageSlab(page))
77467+ return false;
77468+
77469+ s = page->slab;
77470+ return s->flags & SLAB_USERCOPY;
77471+}
77472+
77473+#ifdef CONFIG_PAX_USERCOPY
77474+const char *check_heap_object(const void *ptr, unsigned long n)
77475+{
77476+ struct page *page;
77477+ struct kmem_cache *s;
77478+ unsigned long offset;
77479+
77480+ if (ZERO_OR_NULL_PTR(ptr))
77481+ return "<null>";
77482+
77483+ if (!virt_addr_valid(ptr))
77484+ return NULL;
77485+
77486+ page = virt_to_head_page(ptr);
77487+
77488+ if (!PageSlab(page))
77489+ return NULL;
77490+
77491+ s = page->slab;
77492+ if (!(s->flags & SLAB_USERCOPY))
77493+ return s->name;
77494+
77495+ offset = (ptr - page_address(page)) % s->size;
77496+ if (offset <= s->object_size && n <= s->object_size - offset)
77497+ return NULL;
77498+
77499+ return s->name;
77500+}
77501+#endif
77502+
77503 size_t ksize(const void *object)
77504 {
77505 struct page *page;
77506@@ -3684,7 +3750,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
77507 int node;
77508
77509 list_add(&s->list, &slab_caches);
77510- s->refcount = -1;
77511+ atomic_set(&s->refcount, -1);
77512
77513 for_each_node_state(node, N_NORMAL_MEMORY) {
77514 struct kmem_cache_node *n = get_node(s, node);
77515@@ -3807,17 +3873,17 @@ void __init kmem_cache_init(void)
77516
77517 /* Caches that are not of the two-to-the-power-of size */
77518 if (KMALLOC_MIN_SIZE <= 32) {
77519- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
77520+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
77521 caches++;
77522 }
77523
77524 if (KMALLOC_MIN_SIZE <= 64) {
77525- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
77526+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
77527 caches++;
77528 }
77529
77530 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
77531- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
77532+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
77533 caches++;
77534 }
77535
77536@@ -3859,6 +3925,22 @@ void __init kmem_cache_init(void)
77537 }
77538 }
77539 #endif
77540+
77541+#ifdef CONFIG_PAX_USERCOPY_SLABS
77542+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
77543+ struct kmem_cache *s = kmalloc_caches[i];
77544+
77545+ if (s && s->size) {
77546+ char *name = kasprintf(GFP_NOWAIT,
77547+ "usercopy-kmalloc-%d", s->object_size);
77548+
77549+ BUG_ON(!name);
77550+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
77551+ s->object_size, SLAB_USERCOPY);
77552+ }
77553+ }
77554+#endif
77555+
77556 printk(KERN_INFO
77557 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
77558 " CPUs=%d, Nodes=%d\n",
77559@@ -3885,7 +3967,7 @@ static int slab_unmergeable(struct kmem_cache *s)
77560 /*
77561 * We may have set a slab to be unmergeable during bootstrap.
77562 */
77563- if (s->refcount < 0)
77564+ if (atomic_read(&s->refcount) < 0)
77565 return 1;
77566
77567 return 0;
77568@@ -3939,7 +4021,7 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
77569
77570 s = find_mergeable(size, align, flags, name, ctor);
77571 if (s) {
77572- s->refcount++;
77573+ atomic_inc(&s->refcount);
77574 /*
77575 * Adjust the object sizes so that we clear
77576 * the complete object on kzalloc.
77577@@ -3948,7 +4030,7 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
77578 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
77579
77580 if (sysfs_slab_alias(s, name)) {
77581- s->refcount--;
77582+ atomic_dec(&s->refcount);
77583 s = NULL;
77584 }
77585 }
77586@@ -4063,7 +4145,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
77587 }
77588 #endif
77589
77590-#ifdef CONFIG_SYSFS
77591+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77592 static int count_inuse(struct page *page)
77593 {
77594 return page->inuse;
77595@@ -4450,12 +4532,12 @@ static void resiliency_test(void)
77596 validate_slab_cache(kmalloc_caches[9]);
77597 }
77598 #else
77599-#ifdef CONFIG_SYSFS
77600+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77601 static void resiliency_test(void) {};
77602 #endif
77603 #endif
77604
77605-#ifdef CONFIG_SYSFS
77606+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77607 enum slab_stat_type {
77608 SL_ALL, /* All slabs */
77609 SL_PARTIAL, /* Only partially allocated slabs */
77610@@ -4699,7 +4781,7 @@ SLAB_ATTR_RO(ctor);
77611
77612 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
77613 {
77614- return sprintf(buf, "%d\n", s->refcount - 1);
77615+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
77616 }
77617 SLAB_ATTR_RO(aliases);
77618
77619@@ -5261,6 +5343,7 @@ static char *create_unique_id(struct kmem_cache *s)
77620 return name;
77621 }
77622
77623+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77624 static int sysfs_slab_add(struct kmem_cache *s)
77625 {
77626 int err;
77627@@ -5323,6 +5406,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
77628 kobject_del(&s->kobj);
77629 kobject_put(&s->kobj);
77630 }
77631+#endif
77632
77633 /*
77634 * Need to buffer aliases during bootup until sysfs becomes
77635@@ -5336,6 +5420,7 @@ struct saved_alias {
77636
77637 static struct saved_alias *alias_list;
77638
77639+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77640 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
77641 {
77642 struct saved_alias *al;
77643@@ -5358,6 +5443,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
77644 alias_list = al;
77645 return 0;
77646 }
77647+#endif
77648
77649 static int __init slab_sysfs_init(void)
77650 {
77651diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
77652index 1b7e22a..3fcd4f3 100644
77653--- a/mm/sparse-vmemmap.c
77654+++ b/mm/sparse-vmemmap.c
77655@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
77656 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
77657 if (!p)
77658 return NULL;
77659- pud_populate(&init_mm, pud, p);
77660+ pud_populate_kernel(&init_mm, pud, p);
77661 }
77662 return pud;
77663 }
77664@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
77665 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
77666 if (!p)
77667 return NULL;
77668- pgd_populate(&init_mm, pgd, p);
77669+ pgd_populate_kernel(&init_mm, pgd, p);
77670 }
77671 return pgd;
77672 }
77673diff --git a/mm/swap.c b/mm/swap.c
77674index 6310dc2..3662b3f 100644
77675--- a/mm/swap.c
77676+++ b/mm/swap.c
77677@@ -30,6 +30,7 @@
77678 #include <linux/backing-dev.h>
77679 #include <linux/memcontrol.h>
77680 #include <linux/gfp.h>
77681+#include <linux/hugetlb.h>
77682
77683 #include "internal.h"
77684
77685@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
77686
77687 __page_cache_release(page);
77688 dtor = get_compound_page_dtor(page);
77689+ if (!PageHuge(page))
77690+ BUG_ON(dtor != free_compound_page);
77691 (*dtor)(page);
77692 }
77693
77694diff --git a/mm/swapfile.c b/mm/swapfile.c
77695index f91a255..9dcac21 100644
77696--- a/mm/swapfile.c
77697+++ b/mm/swapfile.c
77698@@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
77699
77700 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
77701 /* Activity counter to indicate that a swapon or swapoff has occurred */
77702-static atomic_t proc_poll_event = ATOMIC_INIT(0);
77703+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
77704
77705 static inline unsigned char swap_count(unsigned char ent)
77706 {
77707@@ -1601,7 +1601,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
77708 }
77709 filp_close(swap_file, NULL);
77710 err = 0;
77711- atomic_inc(&proc_poll_event);
77712+ atomic_inc_unchecked(&proc_poll_event);
77713 wake_up_interruptible(&proc_poll_wait);
77714
77715 out_dput:
77716@@ -1618,8 +1618,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
77717
77718 poll_wait(file, &proc_poll_wait, wait);
77719
77720- if (seq->poll_event != atomic_read(&proc_poll_event)) {
77721- seq->poll_event = atomic_read(&proc_poll_event);
77722+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
77723+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
77724 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
77725 }
77726
77727@@ -1717,7 +1717,7 @@ static int swaps_open(struct inode *inode, struct file *file)
77728 return ret;
77729
77730 seq = file->private_data;
77731- seq->poll_event = atomic_read(&proc_poll_event);
77732+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
77733 return 0;
77734 }
77735
77736@@ -2059,7 +2059,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
77737 (frontswap_map) ? "FS" : "");
77738
77739 mutex_unlock(&swapon_mutex);
77740- atomic_inc(&proc_poll_event);
77741+ atomic_inc_unchecked(&proc_poll_event);
77742 wake_up_interruptible(&proc_poll_wait);
77743
77744 if (S_ISREG(inode->i_mode))
77745diff --git a/mm/util.c b/mm/util.c
77746index dc3036c..b6c7c9d 100644
77747--- a/mm/util.c
77748+++ b/mm/util.c
77749@@ -292,6 +292,12 @@ done:
77750 void arch_pick_mmap_layout(struct mm_struct *mm)
77751 {
77752 mm->mmap_base = TASK_UNMAPPED_BASE;
77753+
77754+#ifdef CONFIG_PAX_RANDMMAP
77755+ if (mm->pax_flags & MF_PAX_RANDMMAP)
77756+ mm->mmap_base += mm->delta_mmap;
77757+#endif
77758+
77759 mm->get_unmapped_area = arch_get_unmapped_area;
77760 mm->unmap_area = arch_unmap_area;
77761 }
77762diff --git a/mm/vmalloc.c b/mm/vmalloc.c
77763index 78e0830..bc6bbd8 100644
77764--- a/mm/vmalloc.c
77765+++ b/mm/vmalloc.c
77766@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
77767
77768 pte = pte_offset_kernel(pmd, addr);
77769 do {
77770- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
77771- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
77772+
77773+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
77774+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
77775+ BUG_ON(!pte_exec(*pte));
77776+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
77777+ continue;
77778+ }
77779+#endif
77780+
77781+ {
77782+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
77783+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
77784+ }
77785 } while (pte++, addr += PAGE_SIZE, addr != end);
77786 }
77787
77788@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
77789 pte = pte_alloc_kernel(pmd, addr);
77790 if (!pte)
77791 return -ENOMEM;
77792+
77793+ pax_open_kernel();
77794 do {
77795 struct page *page = pages[*nr];
77796
77797- if (WARN_ON(!pte_none(*pte)))
77798+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
77799+ if (pgprot_val(prot) & _PAGE_NX)
77800+#endif
77801+
77802+ if (!pte_none(*pte)) {
77803+ pax_close_kernel();
77804+ WARN_ON(1);
77805 return -EBUSY;
77806- if (WARN_ON(!page))
77807+ }
77808+ if (!page) {
77809+ pax_close_kernel();
77810+ WARN_ON(1);
77811 return -ENOMEM;
77812+ }
77813 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
77814 (*nr)++;
77815 } while (pte++, addr += PAGE_SIZE, addr != end);
77816+ pax_close_kernel();
77817 return 0;
77818 }
77819
77820@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
77821 pmd_t *pmd;
77822 unsigned long next;
77823
77824- pmd = pmd_alloc(&init_mm, pud, addr);
77825+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
77826 if (!pmd)
77827 return -ENOMEM;
77828 do {
77829@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
77830 pud_t *pud;
77831 unsigned long next;
77832
77833- pud = pud_alloc(&init_mm, pgd, addr);
77834+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
77835 if (!pud)
77836 return -ENOMEM;
77837 do {
77838@@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
77839 * and fall back on vmalloc() if that fails. Others
77840 * just put it in the vmalloc space.
77841 */
77842-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
77843+#ifdef CONFIG_MODULES
77844+#ifdef MODULES_VADDR
77845 unsigned long addr = (unsigned long)x;
77846 if (addr >= MODULES_VADDR && addr < MODULES_END)
77847 return 1;
77848 #endif
77849+
77850+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
77851+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
77852+ return 1;
77853+#endif
77854+
77855+#endif
77856+
77857 return is_vmalloc_addr(x);
77858 }
77859
77860@@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
77861
77862 if (!pgd_none(*pgd)) {
77863 pud_t *pud = pud_offset(pgd, addr);
77864+#ifdef CONFIG_X86
77865+ if (!pud_large(*pud))
77866+#endif
77867 if (!pud_none(*pud)) {
77868 pmd_t *pmd = pmd_offset(pud, addr);
77869+#ifdef CONFIG_X86
77870+ if (!pmd_large(*pmd))
77871+#endif
77872 if (!pmd_none(*pmd)) {
77873 pte_t *ptep, pte;
77874
77875@@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
77876 * Allocate a region of KVA of the specified size and alignment, within the
77877 * vstart and vend.
77878 */
77879-static struct vmap_area *alloc_vmap_area(unsigned long size,
77880+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
77881 unsigned long align,
77882 unsigned long vstart, unsigned long vend,
77883 int node, gfp_t gfp_mask)
77884@@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
77885 struct vm_struct *area;
77886
77887 BUG_ON(in_interrupt());
77888+
77889+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
77890+ if (flags & VM_KERNEXEC) {
77891+ if (start != VMALLOC_START || end != VMALLOC_END)
77892+ return NULL;
77893+ start = (unsigned long)MODULES_EXEC_VADDR;
77894+ end = (unsigned long)MODULES_EXEC_END;
77895+ }
77896+#endif
77897+
77898 if (flags & VM_IOREMAP) {
77899 int bit = fls(size);
77900
77901@@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
77902 if (count > totalram_pages)
77903 return NULL;
77904
77905+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
77906+ if (!(pgprot_val(prot) & _PAGE_NX))
77907+ flags |= VM_KERNEXEC;
77908+#endif
77909+
77910 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
77911 __builtin_return_address(0));
77912 if (!area)
77913@@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
77914 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
77915 goto fail;
77916
77917+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
77918+ if (!(pgprot_val(prot) & _PAGE_NX))
77919+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
77920+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
77921+ else
77922+#endif
77923+
77924 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
77925 start, end, node, gfp_mask, caller);
77926 if (!area)
77927@@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
77928 * For tight control over page level allocator and protection flags
77929 * use __vmalloc() instead.
77930 */
77931-
77932 void *vmalloc_exec(unsigned long size)
77933 {
77934- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
77935+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
77936 -1, __builtin_return_address(0));
77937 }
77938
77939@@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
77940 unsigned long uaddr = vma->vm_start;
77941 unsigned long usize = vma->vm_end - vma->vm_start;
77942
77943+ BUG_ON(vma->vm_mirror);
77944+
77945 if ((PAGE_SIZE-1) & (unsigned long)addr)
77946 return -EINVAL;
77947
77948@@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
77949 v->addr, v->addr + v->size, v->size);
77950
77951 if (v->caller)
77952+#ifdef CONFIG_GRKERNSEC_HIDESYM
77953+ seq_printf(m, " %pK", v->caller);
77954+#else
77955 seq_printf(m, " %pS", v->caller);
77956+#endif
77957
77958 if (v->nr_pages)
77959 seq_printf(m, " pages=%d", v->nr_pages);
77960diff --git a/mm/vmstat.c b/mm/vmstat.c
77961index c737057..a49753a 100644
77962--- a/mm/vmstat.c
77963+++ b/mm/vmstat.c
77964@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
77965 *
77966 * vm_stat contains the global counters
77967 */
77968-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
77969+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
77970 EXPORT_SYMBOL(vm_stat);
77971
77972 #ifdef CONFIG_SMP
77973@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
77974 v = p->vm_stat_diff[i];
77975 p->vm_stat_diff[i] = 0;
77976 local_irq_restore(flags);
77977- atomic_long_add(v, &zone->vm_stat[i]);
77978+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
77979 global_diff[i] += v;
77980 #ifdef CONFIG_NUMA
77981 /* 3 seconds idle till flush */
77982@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
77983
77984 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
77985 if (global_diff[i])
77986- atomic_long_add(global_diff[i], &vm_stat[i]);
77987+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
77988 }
77989
77990 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
77991@@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
77992 if (pset->vm_stat_diff[i]) {
77993 int v = pset->vm_stat_diff[i];
77994 pset->vm_stat_diff[i] = 0;
77995- atomic_long_add(v, &zone->vm_stat[i]);
77996- atomic_long_add(v, &vm_stat[i]);
77997+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
77998+ atomic_long_add_unchecked(v, &vm_stat[i]);
77999 }
78000 }
78001 #endif
78002@@ -1224,10 +1224,20 @@ static int __init setup_vmstat(void)
78003 start_cpu_timer(cpu);
78004 #endif
78005 #ifdef CONFIG_PROC_FS
78006- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
78007- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
78008- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
78009- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
78010+ {
78011+ mode_t gr_mode = S_IRUGO;
78012+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78013+ gr_mode = S_IRUSR;
78014+#endif
78015+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
78016+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
78017+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
78018+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
78019+#else
78020+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
78021+#endif
78022+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
78023+ }
78024 #endif
78025 return 0;
78026 }
78027diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
78028index ee07072..593e3fd 100644
78029--- a/net/8021q/vlan.c
78030+++ b/net/8021q/vlan.c
78031@@ -484,7 +484,7 @@ out:
78032 return NOTIFY_DONE;
78033 }
78034
78035-static struct notifier_block vlan_notifier_block __read_mostly = {
78036+static struct notifier_block vlan_notifier_block = {
78037 .notifier_call = vlan_device_event,
78038 };
78039
78040@@ -559,8 +559,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
78041 err = -EPERM;
78042 if (!capable(CAP_NET_ADMIN))
78043 break;
78044- if ((args.u.name_type >= 0) &&
78045- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
78046+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
78047 struct vlan_net *vn;
78048
78049 vn = net_generic(net, vlan_net_id);
78050diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
78051index 02efb25..41541a9 100644
78052--- a/net/9p/trans_fd.c
78053+++ b/net/9p/trans_fd.c
78054@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
78055 oldfs = get_fs();
78056 set_fs(get_ds());
78057 /* The cast to a user pointer is valid due to the set_fs() */
78058- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
78059+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
78060 set_fs(oldfs);
78061
78062 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
78063diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
78064index 876fbe8..8bbea9f 100644
78065--- a/net/atm/atm_misc.c
78066+++ b/net/atm/atm_misc.c
78067@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
78068 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
78069 return 1;
78070 atm_return(vcc, truesize);
78071- atomic_inc(&vcc->stats->rx_drop);
78072+ atomic_inc_unchecked(&vcc->stats->rx_drop);
78073 return 0;
78074 }
78075 EXPORT_SYMBOL(atm_charge);
78076@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
78077 }
78078 }
78079 atm_return(vcc, guess);
78080- atomic_inc(&vcc->stats->rx_drop);
78081+ atomic_inc_unchecked(&vcc->stats->rx_drop);
78082 return NULL;
78083 }
78084 EXPORT_SYMBOL(atm_alloc_charge);
78085@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
78086
78087 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
78088 {
78089-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
78090+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
78091 __SONET_ITEMS
78092 #undef __HANDLE_ITEM
78093 }
78094@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
78095
78096 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
78097 {
78098-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
78099+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
78100 __SONET_ITEMS
78101 #undef __HANDLE_ITEM
78102 }
78103diff --git a/net/atm/lec.h b/net/atm/lec.h
78104index a86aff9..3a0d6f6 100644
78105--- a/net/atm/lec.h
78106+++ b/net/atm/lec.h
78107@@ -48,7 +48,7 @@ struct lane2_ops {
78108 const u8 *tlvs, u32 sizeoftlvs);
78109 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
78110 const u8 *tlvs, u32 sizeoftlvs);
78111-};
78112+} __no_const;
78113
78114 /*
78115 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
78116diff --git a/net/atm/proc.c b/net/atm/proc.c
78117index 0d020de..011c7bb 100644
78118--- a/net/atm/proc.c
78119+++ b/net/atm/proc.c
78120@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
78121 const struct k_atm_aal_stats *stats)
78122 {
78123 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
78124- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
78125- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
78126- atomic_read(&stats->rx_drop));
78127+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
78128+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
78129+ atomic_read_unchecked(&stats->rx_drop));
78130 }
78131
78132 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
78133diff --git a/net/atm/resources.c b/net/atm/resources.c
78134index 0447d5d..3cf4728 100644
78135--- a/net/atm/resources.c
78136+++ b/net/atm/resources.c
78137@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
78138 static void copy_aal_stats(struct k_atm_aal_stats *from,
78139 struct atm_aal_stats *to)
78140 {
78141-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
78142+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
78143 __AAL_STAT_ITEMS
78144 #undef __HANDLE_ITEM
78145 }
78146@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
78147 static void subtract_aal_stats(struct k_atm_aal_stats *from,
78148 struct atm_aal_stats *to)
78149 {
78150-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
78151+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
78152 __AAL_STAT_ITEMS
78153 #undef __HANDLE_ITEM
78154 }
78155diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
78156index c6fcc76..1270d14 100644
78157--- a/net/batman-adv/bat_iv_ogm.c
78158+++ b/net/batman-adv/bat_iv_ogm.c
78159@@ -62,7 +62,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
78160
78161 /* randomize initial seqno to avoid collision */
78162 get_random_bytes(&random_seqno, sizeof(random_seqno));
78163- atomic_set(&hard_iface->seqno, random_seqno);
78164+ atomic_set_unchecked(&hard_iface->seqno, random_seqno);
78165
78166 hard_iface->packet_len = BATADV_OGM_HLEN;
78167 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
78168@@ -608,9 +608,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
78169 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
78170
78171 /* change sequence number to network order */
78172- seqno = (uint32_t)atomic_read(&hard_iface->seqno);
78173+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->seqno);
78174 batadv_ogm_packet->seqno = htonl(seqno);
78175- atomic_inc(&hard_iface->seqno);
78176+ atomic_inc_unchecked(&hard_iface->seqno);
78177
78178 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
78179 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
78180@@ -1015,7 +1015,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
78181 return;
78182
78183 /* could be changed by schedule_own_packet() */
78184- if_incoming_seqno = atomic_read(&if_incoming->seqno);
78185+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
78186
78187 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
78188 has_directlink_flag = 1;
78189diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
78190index d112fd6..686a447 100644
78191--- a/net/batman-adv/hard-interface.c
78192+++ b/net/batman-adv/hard-interface.c
78193@@ -327,7 +327,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
78194 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
78195 dev_add_pack(&hard_iface->batman_adv_ptype);
78196
78197- atomic_set(&hard_iface->frag_seqno, 1);
78198+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
78199 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
78200 hard_iface->net_dev->name);
78201
78202@@ -450,7 +450,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
78203 /* This can't be called via a bat_priv callback because
78204 * we have no bat_priv yet.
78205 */
78206- atomic_set(&hard_iface->seqno, 1);
78207+ atomic_set_unchecked(&hard_iface->seqno, 1);
78208 hard_iface->packet_buff = NULL;
78209
78210 return hard_iface;
78211diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
78212index ce0684a..4a0cbf1 100644
78213--- a/net/batman-adv/soft-interface.c
78214+++ b/net/batman-adv/soft-interface.c
78215@@ -234,7 +234,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
78216 primary_if->net_dev->dev_addr, ETH_ALEN);
78217
78218 /* set broadcast sequence number */
78219- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
78220+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
78221 bcast_packet->seqno = htonl(seqno);
78222
78223 batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
78224@@ -427,7 +427,7 @@ struct net_device *batadv_softif_create(const char *name)
78225 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
78226
78227 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
78228- atomic_set(&bat_priv->bcast_seqno, 1);
78229+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
78230 atomic_set(&bat_priv->tt.vn, 0);
78231 atomic_set(&bat_priv->tt.local_changes, 0);
78232 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
78233diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
78234index ac1e07a..4c846e2 100644
78235--- a/net/batman-adv/types.h
78236+++ b/net/batman-adv/types.h
78237@@ -33,8 +33,8 @@ struct batadv_hard_iface {
78238 int16_t if_num;
78239 char if_status;
78240 struct net_device *net_dev;
78241- atomic_t seqno;
78242- atomic_t frag_seqno;
78243+ atomic_unchecked_t seqno;
78244+ atomic_unchecked_t frag_seqno;
78245 unsigned char *packet_buff;
78246 int packet_len;
78247 struct kobject *hardif_obj;
78248@@ -244,7 +244,7 @@ struct batadv_priv {
78249 atomic_t orig_interval; /* uint */
78250 atomic_t hop_penalty; /* uint */
78251 atomic_t log_level; /* uint */
78252- atomic_t bcast_seqno;
78253+ atomic_unchecked_t bcast_seqno;
78254 atomic_t bcast_queue_left;
78255 atomic_t batman_queue_left;
78256 char num_ifaces;
78257diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
78258index f397232..3206a33 100644
78259--- a/net/batman-adv/unicast.c
78260+++ b/net/batman-adv/unicast.c
78261@@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
78262 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
78263 frag2->flags = large_tail;
78264
78265- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
78266+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
78267 frag1->seqno = htons(seqno - 1);
78268 frag2->seqno = htons(seqno);
78269
78270diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
78271index 07f0739..3c42e34 100644
78272--- a/net/bluetooth/hci_sock.c
78273+++ b/net/bluetooth/hci_sock.c
78274@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
78275 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
78276 }
78277
78278- len = min_t(unsigned int, len, sizeof(uf));
78279+ len = min((size_t)len, sizeof(uf));
78280 if (copy_from_user(&uf, optval, len)) {
78281 err = -EFAULT;
78282 break;
78283diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
78284index a91239d..d7ed533 100644
78285--- a/net/bluetooth/l2cap_core.c
78286+++ b/net/bluetooth/l2cap_core.c
78287@@ -3183,8 +3183,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
78288 break;
78289
78290 case L2CAP_CONF_RFC:
78291- if (olen == sizeof(rfc))
78292- memcpy(&rfc, (void *)val, olen);
78293+ if (olen != sizeof(rfc))
78294+ break;
78295+
78296+ memcpy(&rfc, (void *)val, olen);
78297
78298 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
78299 rfc.mode != chan->mode)
78300diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
78301index 083f2bf..799f9448 100644
78302--- a/net/bluetooth/l2cap_sock.c
78303+++ b/net/bluetooth/l2cap_sock.c
78304@@ -471,7 +471,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
78305 struct sock *sk = sock->sk;
78306 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
78307 struct l2cap_options opts;
78308- int len, err = 0;
78309+ int err = 0;
78310+ size_t len = optlen;
78311 u32 opt;
78312
78313 BT_DBG("sk %p", sk);
78314@@ -493,7 +494,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
78315 opts.max_tx = chan->max_tx;
78316 opts.txwin_size = chan->tx_win;
78317
78318- len = min_t(unsigned int, sizeof(opts), optlen);
78319+ len = min(sizeof(opts), len);
78320 if (copy_from_user((char *) &opts, optval, len)) {
78321 err = -EFAULT;
78322 break;
78323@@ -571,7 +572,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78324 struct bt_security sec;
78325 struct bt_power pwr;
78326 struct l2cap_conn *conn;
78327- int len, err = 0;
78328+ int err = 0;
78329+ size_t len = optlen;
78330 u32 opt;
78331
78332 BT_DBG("sk %p", sk);
78333@@ -594,7 +596,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78334
78335 sec.level = BT_SECURITY_LOW;
78336
78337- len = min_t(unsigned int, sizeof(sec), optlen);
78338+ len = min(sizeof(sec), len);
78339 if (copy_from_user((char *) &sec, optval, len)) {
78340 err = -EFAULT;
78341 break;
78342@@ -691,7 +693,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78343
78344 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
78345
78346- len = min_t(unsigned int, sizeof(pwr), optlen);
78347+ len = min(sizeof(pwr), len);
78348 if (copy_from_user((char *) &pwr, optval, len)) {
78349 err = -EFAULT;
78350 break;
78351diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
78352index 868a909..d044bc3 100644
78353--- a/net/bluetooth/rfcomm/sock.c
78354+++ b/net/bluetooth/rfcomm/sock.c
78355@@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
78356 struct sock *sk = sock->sk;
78357 struct bt_security sec;
78358 int err = 0;
78359- size_t len;
78360+ size_t len = optlen;
78361 u32 opt;
78362
78363 BT_DBG("sk %p", sk);
78364@@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
78365
78366 sec.level = BT_SECURITY_LOW;
78367
78368- len = min_t(unsigned int, sizeof(sec), optlen);
78369+ len = min(sizeof(sec), len);
78370 if (copy_from_user((char *) &sec, optval, len)) {
78371 err = -EFAULT;
78372 break;
78373diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
78374index ccc2487..921073d 100644
78375--- a/net/bluetooth/rfcomm/tty.c
78376+++ b/net/bluetooth/rfcomm/tty.c
78377@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
78378 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
78379
78380 spin_lock_irqsave(&dev->port.lock, flags);
78381- if (dev->port.count > 0) {
78382+ if (atomic_read(&dev->port.count) > 0) {
78383 spin_unlock_irqrestore(&dev->port.lock, flags);
78384 return;
78385 }
78386@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
78387 return -ENODEV;
78388
78389 BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
78390- dev->channel, dev->port.count);
78391+ dev->channel, atomic_read(&dev->port.count));
78392
78393 spin_lock_irqsave(&dev->port.lock, flags);
78394- if (++dev->port.count > 1) {
78395+ if (atomic_inc_return(&dev->port.count) > 1) {
78396 spin_unlock_irqrestore(&dev->port.lock, flags);
78397 return 0;
78398 }
78399@@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
78400 return;
78401
78402 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
78403- dev->port.count);
78404+ atomic_read(&dev->port.count));
78405
78406 spin_lock_irqsave(&dev->port.lock, flags);
78407- if (!--dev->port.count) {
78408+ if (!atomic_dec_return(&dev->port.count)) {
78409 spin_unlock_irqrestore(&dev->port.lock, flags);
78410 if (dev->tty_dev->parent)
78411 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
78412diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
78413index 5fe2ff3..121d696 100644
78414--- a/net/bridge/netfilter/ebtables.c
78415+++ b/net/bridge/netfilter/ebtables.c
78416@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
78417 tmp.valid_hooks = t->table->valid_hooks;
78418 }
78419 mutex_unlock(&ebt_mutex);
78420- if (copy_to_user(user, &tmp, *len) != 0){
78421+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
78422 BUGPRINT("c2u Didn't work\n");
78423 ret = -EFAULT;
78424 break;
78425@@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
78426 goto out;
78427 tmp.valid_hooks = t->valid_hooks;
78428
78429- if (copy_to_user(user, &tmp, *len) != 0) {
78430+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
78431 ret = -EFAULT;
78432 break;
78433 }
78434@@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
78435 tmp.entries_size = t->table->entries_size;
78436 tmp.valid_hooks = t->table->valid_hooks;
78437
78438- if (copy_to_user(user, &tmp, *len) != 0) {
78439+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
78440 ret = -EFAULT;
78441 break;
78442 }
78443diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
78444index 44f270f..1f5602d 100644
78445--- a/net/caif/cfctrl.c
78446+++ b/net/caif/cfctrl.c
78447@@ -10,6 +10,7 @@
78448 #include <linux/spinlock.h>
78449 #include <linux/slab.h>
78450 #include <linux/pkt_sched.h>
78451+#include <linux/sched.h>
78452 #include <net/caif/caif_layer.h>
78453 #include <net/caif/cfpkt.h>
78454 #include <net/caif/cfctrl.h>
78455@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
78456 memset(&dev_info, 0, sizeof(dev_info));
78457 dev_info.id = 0xff;
78458 cfsrvl_init(&this->serv, 0, &dev_info, false);
78459- atomic_set(&this->req_seq_no, 1);
78460- atomic_set(&this->rsp_seq_no, 1);
78461+ atomic_set_unchecked(&this->req_seq_no, 1);
78462+ atomic_set_unchecked(&this->rsp_seq_no, 1);
78463 this->serv.layer.receive = cfctrl_recv;
78464 sprintf(this->serv.layer.name, "ctrl");
78465 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
78466@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
78467 struct cfctrl_request_info *req)
78468 {
78469 spin_lock_bh(&ctrl->info_list_lock);
78470- atomic_inc(&ctrl->req_seq_no);
78471- req->sequence_no = atomic_read(&ctrl->req_seq_no);
78472+ atomic_inc_unchecked(&ctrl->req_seq_no);
78473+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
78474 list_add_tail(&req->list, &ctrl->list);
78475 spin_unlock_bh(&ctrl->info_list_lock);
78476 }
78477@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
78478 if (p != first)
78479 pr_warn("Requests are not received in order\n");
78480
78481- atomic_set(&ctrl->rsp_seq_no,
78482+ atomic_set_unchecked(&ctrl->rsp_seq_no,
78483 p->sequence_no);
78484 list_del(&p->list);
78485 goto out;
78486diff --git a/net/can/af_can.c b/net/can/af_can.c
78487index ddac1ee..3ee0a78 100644
78488--- a/net/can/af_can.c
78489+++ b/net/can/af_can.c
78490@@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
78491 };
78492
78493 /* notifier block for netdevice event */
78494-static struct notifier_block can_netdev_notifier __read_mostly = {
78495+static struct notifier_block can_netdev_notifier = {
78496 .notifier_call = can_notifier,
78497 };
78498
78499diff --git a/net/can/gw.c b/net/can/gw.c
78500index 1f5c978..ef714c7 100644
78501--- a/net/can/gw.c
78502+++ b/net/can/gw.c
78503@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
78504 MODULE_ALIAS("can-gw");
78505
78506 static HLIST_HEAD(cgw_list);
78507-static struct notifier_block notifier;
78508
78509 static struct kmem_cache *cgw_cache __read_mostly;
78510
78511@@ -887,6 +886,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
78512 return err;
78513 }
78514
78515+static struct notifier_block notifier = {
78516+ .notifier_call = cgw_notifier
78517+};
78518+
78519 static __init int cgw_module_init(void)
78520 {
78521 printk(banner);
78522@@ -898,7 +901,6 @@ static __init int cgw_module_init(void)
78523 return -ENOMEM;
78524
78525 /* set notifier */
78526- notifier.notifier_call = cgw_notifier;
78527 register_netdevice_notifier(&notifier);
78528
78529 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
78530diff --git a/net/compat.c b/net/compat.c
78531index 79ae884..17c5c09 100644
78532--- a/net/compat.c
78533+++ b/net/compat.c
78534@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
78535 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
78536 __get_user(kmsg->msg_flags, &umsg->msg_flags))
78537 return -EFAULT;
78538- kmsg->msg_name = compat_ptr(tmp1);
78539- kmsg->msg_iov = compat_ptr(tmp2);
78540- kmsg->msg_control = compat_ptr(tmp3);
78541+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
78542+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
78543+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
78544 return 0;
78545 }
78546
78547@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78548
78549 if (kern_msg->msg_namelen) {
78550 if (mode == VERIFY_READ) {
78551- int err = move_addr_to_kernel(kern_msg->msg_name,
78552+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
78553 kern_msg->msg_namelen,
78554 kern_address);
78555 if (err < 0)
78556@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78557 kern_msg->msg_name = NULL;
78558
78559 tot_len = iov_from_user_compat_to_kern(kern_iov,
78560- (struct compat_iovec __user *)kern_msg->msg_iov,
78561+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
78562 kern_msg->msg_iovlen);
78563 if (tot_len >= 0)
78564 kern_msg->msg_iov = kern_iov;
78565@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78566
78567 #define CMSG_COMPAT_FIRSTHDR(msg) \
78568 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
78569- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
78570+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
78571 (struct compat_cmsghdr __user *)NULL)
78572
78573 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
78574 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
78575 (ucmlen) <= (unsigned long) \
78576 ((mhdr)->msg_controllen - \
78577- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
78578+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
78579
78580 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
78581 struct compat_cmsghdr __user *cmsg, int cmsg_len)
78582 {
78583 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
78584- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
78585+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
78586 msg->msg_controllen)
78587 return NULL;
78588 return (struct compat_cmsghdr __user *)ptr;
78589@@ -219,7 +219,7 @@ Efault:
78590
78591 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
78592 {
78593- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
78594+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
78595 struct compat_cmsghdr cmhdr;
78596 struct compat_timeval ctv;
78597 struct compat_timespec cts[3];
78598@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
78599
78600 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
78601 {
78602- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
78603+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
78604 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
78605 int fdnum = scm->fp->count;
78606 struct file **fp = scm->fp->fp;
78607@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
78608 return -EFAULT;
78609 old_fs = get_fs();
78610 set_fs(KERNEL_DS);
78611- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
78612+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
78613 set_fs(old_fs);
78614
78615 return err;
78616@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
78617 len = sizeof(ktime);
78618 old_fs = get_fs();
78619 set_fs(KERNEL_DS);
78620- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
78621+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
78622 set_fs(old_fs);
78623
78624 if (!err) {
78625@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78626 case MCAST_JOIN_GROUP:
78627 case MCAST_LEAVE_GROUP:
78628 {
78629- struct compat_group_req __user *gr32 = (void *)optval;
78630+ struct compat_group_req __user *gr32 = (void __user *)optval;
78631 struct group_req __user *kgr =
78632 compat_alloc_user_space(sizeof(struct group_req));
78633 u32 interface;
78634@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78635 case MCAST_BLOCK_SOURCE:
78636 case MCAST_UNBLOCK_SOURCE:
78637 {
78638- struct compat_group_source_req __user *gsr32 = (void *)optval;
78639+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
78640 struct group_source_req __user *kgsr = compat_alloc_user_space(
78641 sizeof(struct group_source_req));
78642 u32 interface;
78643@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78644 }
78645 case MCAST_MSFILTER:
78646 {
78647- struct compat_group_filter __user *gf32 = (void *)optval;
78648+ struct compat_group_filter __user *gf32 = (void __user *)optval;
78649 struct group_filter __user *kgf;
78650 u32 interface, fmode, numsrc;
78651
78652@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
78653 char __user *optval, int __user *optlen,
78654 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
78655 {
78656- struct compat_group_filter __user *gf32 = (void *)optval;
78657+ struct compat_group_filter __user *gf32 = (void __user *)optval;
78658 struct group_filter __user *kgf;
78659 int __user *koptlen;
78660 u32 interface, fmode, numsrc;
78661@@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
78662
78663 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
78664 return -EINVAL;
78665- if (copy_from_user(a, args, nas[call]))
78666+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
78667 return -EFAULT;
78668 a0 = a[0];
78669 a1 = a[1];
78670diff --git a/net/core/datagram.c b/net/core/datagram.c
78671index 0337e2b..47914a0 100644
78672--- a/net/core/datagram.c
78673+++ b/net/core/datagram.c
78674@@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
78675 }
78676
78677 kfree_skb(skb);
78678- atomic_inc(&sk->sk_drops);
78679+ atomic_inc_unchecked(&sk->sk_drops);
78680 sk_mem_reclaim_partial(sk);
78681
78682 return err;
78683diff --git a/net/core/dev.c b/net/core/dev.c
78684index e5942bf..25998c3 100644
78685--- a/net/core/dev.c
78686+++ b/net/core/dev.c
78687@@ -1162,9 +1162,13 @@ void dev_load(struct net *net, const char *name)
78688 if (no_module && capable(CAP_NET_ADMIN))
78689 no_module = request_module("netdev-%s", name);
78690 if (no_module && capable(CAP_SYS_MODULE)) {
78691+#ifdef CONFIG_GRKERNSEC_MODHARDEN
78692+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
78693+#else
78694 if (!request_module("%s", name))
78695 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
78696 name);
78697+#endif
78698 }
78699 }
78700 EXPORT_SYMBOL(dev_load);
78701@@ -1627,7 +1631,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
78702 {
78703 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
78704 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
78705- atomic_long_inc(&dev->rx_dropped);
78706+ atomic_long_inc_unchecked(&dev->rx_dropped);
78707 kfree_skb(skb);
78708 return NET_RX_DROP;
78709 }
78710@@ -1637,7 +1641,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
78711 nf_reset(skb);
78712
78713 if (unlikely(!is_skb_forwardable(dev, skb))) {
78714- atomic_long_inc(&dev->rx_dropped);
78715+ atomic_long_inc_unchecked(&dev->rx_dropped);
78716 kfree_skb(skb);
78717 return NET_RX_DROP;
78718 }
78719@@ -2093,7 +2097,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
78720
78721 struct dev_gso_cb {
78722 void (*destructor)(struct sk_buff *skb);
78723-};
78724+} __no_const;
78725
78726 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
78727
78728@@ -2955,7 +2959,7 @@ enqueue:
78729
78730 local_irq_restore(flags);
78731
78732- atomic_long_inc(&skb->dev->rx_dropped);
78733+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
78734 kfree_skb(skb);
78735 return NET_RX_DROP;
78736 }
78737@@ -3027,7 +3031,7 @@ int netif_rx_ni(struct sk_buff *skb)
78738 }
78739 EXPORT_SYMBOL(netif_rx_ni);
78740
78741-static void net_tx_action(struct softirq_action *h)
78742+static void net_tx_action(void)
78743 {
78744 struct softnet_data *sd = &__get_cpu_var(softnet_data);
78745
78746@@ -3358,7 +3362,7 @@ ncls:
78747 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
78748 } else {
78749 drop:
78750- atomic_long_inc(&skb->dev->rx_dropped);
78751+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
78752 kfree_skb(skb);
78753 /* Jamal, now you will not able to escape explaining
78754 * me how you were going to use this. :-)
78755@@ -3944,7 +3948,7 @@ void netif_napi_del(struct napi_struct *napi)
78756 }
78757 EXPORT_SYMBOL(netif_napi_del);
78758
78759-static void net_rx_action(struct softirq_action *h)
78760+static void net_rx_action(void)
78761 {
78762 struct softnet_data *sd = &__get_cpu_var(softnet_data);
78763 unsigned long time_limit = jiffies + 2;
78764@@ -4423,8 +4427,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
78765 else
78766 seq_printf(seq, "%04x", ntohs(pt->type));
78767
78768+#ifdef CONFIG_GRKERNSEC_HIDESYM
78769+ seq_printf(seq, " %-8s %p\n",
78770+ pt->dev ? pt->dev->name : "", NULL);
78771+#else
78772 seq_printf(seq, " %-8s %pF\n",
78773 pt->dev ? pt->dev->name : "", pt->func);
78774+#endif
78775 }
78776
78777 return 0;
78778@@ -5987,7 +5996,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
78779 } else {
78780 netdev_stats_to_stats64(storage, &dev->stats);
78781 }
78782- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
78783+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
78784 return storage;
78785 }
78786 EXPORT_SYMBOL(dev_get_stats);
78787diff --git a/net/core/flow.c b/net/core/flow.c
78788index e318c7e..168b1d0 100644
78789--- a/net/core/flow.c
78790+++ b/net/core/flow.c
78791@@ -61,7 +61,7 @@ struct flow_cache {
78792 struct timer_list rnd_timer;
78793 };
78794
78795-atomic_t flow_cache_genid = ATOMIC_INIT(0);
78796+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
78797 EXPORT_SYMBOL(flow_cache_genid);
78798 static struct flow_cache flow_cache_global;
78799 static struct kmem_cache *flow_cachep __read_mostly;
78800@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
78801
78802 static int flow_entry_valid(struct flow_cache_entry *fle)
78803 {
78804- if (atomic_read(&flow_cache_genid) != fle->genid)
78805+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
78806 return 0;
78807 if (fle->object && !fle->object->ops->check(fle->object))
78808 return 0;
78809@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
78810 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
78811 fcp->hash_count++;
78812 }
78813- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
78814+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
78815 flo = fle->object;
78816 if (!flo)
78817 goto ret_object;
78818@@ -280,7 +280,7 @@ nocache:
78819 }
78820 flo = resolver(net, key, family, dir, flo, ctx);
78821 if (fle) {
78822- fle->genid = atomic_read(&flow_cache_genid);
78823+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
78824 if (!IS_ERR(flo))
78825 fle->object = flo;
78826 else
78827diff --git a/net/core/iovec.c b/net/core/iovec.c
78828index 7e7aeb0..2a998cb 100644
78829--- a/net/core/iovec.c
78830+++ b/net/core/iovec.c
78831@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
78832 if (m->msg_namelen) {
78833 if (mode == VERIFY_READ) {
78834 void __user *namep;
78835- namep = (void __user __force *) m->msg_name;
78836+ namep = (void __force_user *) m->msg_name;
78837 err = move_addr_to_kernel(namep, m->msg_namelen,
78838 address);
78839 if (err < 0)
78840@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
78841 }
78842
78843 size = m->msg_iovlen * sizeof(struct iovec);
78844- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
78845+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
78846 return -EFAULT;
78847
78848 m->msg_iov = iov;
78849diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
78850index fad649a..f2fdac4 100644
78851--- a/net/core/rtnetlink.c
78852+++ b/net/core/rtnetlink.c
78853@@ -198,14 +198,16 @@ int __rtnl_register(int protocol, int msgtype,
78854 rtnl_msg_handlers[protocol] = tab;
78855 }
78856
78857+ pax_open_kernel();
78858 if (doit)
78859- tab[msgindex].doit = doit;
78860+ *(void **)&tab[msgindex].doit = doit;
78861
78862 if (dumpit)
78863- tab[msgindex].dumpit = dumpit;
78864+ *(void **)&tab[msgindex].dumpit = dumpit;
78865
78866 if (calcit)
78867- tab[msgindex].calcit = calcit;
78868+ *(void **)&tab[msgindex].calcit = calcit;
78869+ pax_close_kernel();
78870
78871 return 0;
78872 }
78873@@ -248,8 +250,10 @@ int rtnl_unregister(int protocol, int msgtype)
78874 if (rtnl_msg_handlers[protocol] == NULL)
78875 return -ENOENT;
78876
78877- rtnl_msg_handlers[protocol][msgindex].doit = NULL;
78878- rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
78879+ pax_open_kernel();
78880+ *(void **)&rtnl_msg_handlers[protocol][msgindex].doit = NULL;
78881+ *(void **)&rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
78882+ pax_close_kernel();
78883
78884 return 0;
78885 }
78886diff --git a/net/core/scm.c b/net/core/scm.c
78887index ab57084..0190c8f 100644
78888--- a/net/core/scm.c
78889+++ b/net/core/scm.c
78890@@ -223,7 +223,7 @@ EXPORT_SYMBOL(__scm_send);
78891 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
78892 {
78893 struct cmsghdr __user *cm
78894- = (__force struct cmsghdr __user *)msg->msg_control;
78895+ = (struct cmsghdr __force_user *)msg->msg_control;
78896 struct cmsghdr cmhdr;
78897 int cmlen = CMSG_LEN(len);
78898 int err;
78899@@ -246,7 +246,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
78900 err = -EFAULT;
78901 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
78902 goto out;
78903- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
78904+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
78905 goto out;
78906 cmlen = CMSG_SPACE(len);
78907 if (msg->msg_controllen < cmlen)
78908@@ -262,7 +262,7 @@ EXPORT_SYMBOL(put_cmsg);
78909 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
78910 {
78911 struct cmsghdr __user *cm
78912- = (__force struct cmsghdr __user*)msg->msg_control;
78913+ = (struct cmsghdr __force_user *)msg->msg_control;
78914
78915 int fdmax = 0;
78916 int fdnum = scm->fp->count;
78917@@ -282,7 +282,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
78918 if (fdnum < fdmax)
78919 fdmax = fdnum;
78920
78921- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
78922+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
78923 i++, cmfptr++)
78924 {
78925 struct socket *sock;
78926diff --git a/net/core/sock.c b/net/core/sock.c
78927index 8a146cf..ee08914d 100644
78928--- a/net/core/sock.c
78929+++ b/net/core/sock.c
78930@@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
78931 struct sk_buff_head *list = &sk->sk_receive_queue;
78932
78933 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
78934- atomic_inc(&sk->sk_drops);
78935+ atomic_inc_unchecked(&sk->sk_drops);
78936 trace_sock_rcvqueue_full(sk, skb);
78937 return -ENOMEM;
78938 }
78939@@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
78940 return err;
78941
78942 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
78943- atomic_inc(&sk->sk_drops);
78944+ atomic_inc_unchecked(&sk->sk_drops);
78945 return -ENOBUFS;
78946 }
78947
78948@@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
78949 skb_dst_force(skb);
78950
78951 spin_lock_irqsave(&list->lock, flags);
78952- skb->dropcount = atomic_read(&sk->sk_drops);
78953+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
78954 __skb_queue_tail(list, skb);
78955 spin_unlock_irqrestore(&list->lock, flags);
78956
78957@@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
78958 skb->dev = NULL;
78959
78960 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
78961- atomic_inc(&sk->sk_drops);
78962+ atomic_inc_unchecked(&sk->sk_drops);
78963 goto discard_and_relse;
78964 }
78965 if (nested)
78966@@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
78967 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
78968 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
78969 bh_unlock_sock(sk);
78970- atomic_inc(&sk->sk_drops);
78971+ atomic_inc_unchecked(&sk->sk_drops);
78972 goto discard_and_relse;
78973 }
78974
78975@@ -875,12 +875,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
78976 struct timeval tm;
78977 } v;
78978
78979- int lv = sizeof(int);
78980- int len;
78981+ unsigned int lv = sizeof(int);
78982+ unsigned int len;
78983
78984 if (get_user(len, optlen))
78985 return -EFAULT;
78986- if (len < 0)
78987+ if (len > INT_MAX)
78988 return -EINVAL;
78989
78990 memset(&v, 0, sizeof(v));
78991@@ -1028,11 +1028,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
78992
78993 case SO_PEERNAME:
78994 {
78995- char address[128];
78996+ char address[_K_SS_MAXSIZE];
78997
78998 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
78999 return -ENOTCONN;
79000- if (lv < len)
79001+ if (lv < len || sizeof address < len)
79002 return -EINVAL;
79003 if (copy_to_user(optval, address, len))
79004 return -EFAULT;
79005@@ -1080,7 +1080,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79006
79007 if (len > lv)
79008 len = lv;
79009- if (copy_to_user(optval, &v, len))
79010+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
79011 return -EFAULT;
79012 lenout:
79013 if (put_user(len, optlen))
79014@@ -2212,7 +2212,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
79015 */
79016 smp_wmb();
79017 atomic_set(&sk->sk_refcnt, 1);
79018- atomic_set(&sk->sk_drops, 0);
79019+ atomic_set_unchecked(&sk->sk_drops, 0);
79020 }
79021 EXPORT_SYMBOL(sock_init_data);
79022
79023diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
79024index 602cd63..05c6c60 100644
79025--- a/net/core/sock_diag.c
79026+++ b/net/core/sock_diag.c
79027@@ -15,20 +15,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
79028
79029 int sock_diag_check_cookie(void *sk, __u32 *cookie)
79030 {
79031+#ifndef CONFIG_GRKERNSEC_HIDESYM
79032 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
79033 cookie[1] != INET_DIAG_NOCOOKIE) &&
79034 ((u32)(unsigned long)sk != cookie[0] ||
79035 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
79036 return -ESTALE;
79037 else
79038+#endif
79039 return 0;
79040 }
79041 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
79042
79043 void sock_diag_save_cookie(void *sk, __u32 *cookie)
79044 {
79045+#ifdef CONFIG_GRKERNSEC_HIDESYM
79046+ cookie[0] = 0;
79047+ cookie[1] = 0;
79048+#else
79049 cookie[0] = (u32)(unsigned long)sk;
79050 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
79051+#endif
79052 }
79053 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
79054
79055diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
79056index a55eecc..dd8428c 100644
79057--- a/net/decnet/sysctl_net_decnet.c
79058+++ b/net/decnet/sysctl_net_decnet.c
79059@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
79060
79061 if (len > *lenp) len = *lenp;
79062
79063- if (copy_to_user(buffer, addr, len))
79064+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
79065 return -EFAULT;
79066
79067 *lenp = len;
79068@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
79069
79070 if (len > *lenp) len = *lenp;
79071
79072- if (copy_to_user(buffer, devname, len))
79073+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
79074 return -EFAULT;
79075
79076 *lenp = len;
79077diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
79078index 2a6abc1..c379ba7 100644
79079--- a/net/ipv4/devinet.c
79080+++ b/net/ipv4/devinet.c
79081@@ -822,9 +822,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
79082 if (!ifa) {
79083 ret = -ENOBUFS;
79084 ifa = inet_alloc_ifa();
79085+ if (!ifa)
79086+ break;
79087 INIT_HLIST_NODE(&ifa->hash);
79088- if (!ifa)
79089- break;
79090 if (colon)
79091 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
79092 else
79093diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
79094index 825c608..750ff29 100644
79095--- a/net/ipv4/fib_frontend.c
79096+++ b/net/ipv4/fib_frontend.c
79097@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
79098 #ifdef CONFIG_IP_ROUTE_MULTIPATH
79099 fib_sync_up(dev);
79100 #endif
79101- atomic_inc(&net->ipv4.dev_addr_genid);
79102+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79103 rt_cache_flush(dev_net(dev));
79104 break;
79105 case NETDEV_DOWN:
79106 fib_del_ifaddr(ifa, NULL);
79107- atomic_inc(&net->ipv4.dev_addr_genid);
79108+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79109 if (ifa->ifa_dev->ifa_list == NULL) {
79110 /* Last address was deleted from this interface.
79111 * Disable IP.
79112@@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
79113 #ifdef CONFIG_IP_ROUTE_MULTIPATH
79114 fib_sync_up(dev);
79115 #endif
79116- atomic_inc(&net->ipv4.dev_addr_genid);
79117+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79118 rt_cache_flush(net);
79119 break;
79120 case NETDEV_DOWN:
79121diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
79122index 71b125c..f4c70b0 100644
79123--- a/net/ipv4/fib_semantics.c
79124+++ b/net/ipv4/fib_semantics.c
79125@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
79126 nh->nh_saddr = inet_select_addr(nh->nh_dev,
79127 nh->nh_gw,
79128 nh->nh_parent->fib_scope);
79129- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
79130+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
79131
79132 return nh->nh_saddr;
79133 }
79134diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
79135index 7880af9..70f92a3 100644
79136--- a/net/ipv4/inet_hashtables.c
79137+++ b/net/ipv4/inet_hashtables.c
79138@@ -18,12 +18,15 @@
79139 #include <linux/sched.h>
79140 #include <linux/slab.h>
79141 #include <linux/wait.h>
79142+#include <linux/security.h>
79143
79144 #include <net/inet_connection_sock.h>
79145 #include <net/inet_hashtables.h>
79146 #include <net/secure_seq.h>
79147 #include <net/ip.h>
79148
79149+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
79150+
79151 /*
79152 * Allocate and initialize a new local port bind bucket.
79153 * The bindhash mutex for snum's hash chain must be held here.
79154@@ -530,6 +533,8 @@ ok:
79155 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
79156 spin_unlock(&head->lock);
79157
79158+ gr_update_task_in_ip_table(current, inet_sk(sk));
79159+
79160 if (tw) {
79161 inet_twsk_deschedule(tw, death_row);
79162 while (twrefcnt) {
79163diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
79164index 000e3d2..5472da3 100644
79165--- a/net/ipv4/inetpeer.c
79166+++ b/net/ipv4/inetpeer.c
79167@@ -503,8 +503,8 @@ relookup:
79168 if (p) {
79169 p->daddr = *daddr;
79170 atomic_set(&p->refcnt, 1);
79171- atomic_set(&p->rid, 0);
79172- atomic_set(&p->ip_id_count,
79173+ atomic_set_unchecked(&p->rid, 0);
79174+ atomic_set_unchecked(&p->ip_id_count,
79175 (daddr->family == AF_INET) ?
79176 secure_ip_id(daddr->addr.a4) :
79177 secure_ipv6_id(daddr->addr.a6));
79178diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
79179index 8d5cc75..821fd11 100644
79180--- a/net/ipv4/ip_fragment.c
79181+++ b/net/ipv4/ip_fragment.c
79182@@ -322,7 +322,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
79183 return 0;
79184
79185 start = qp->rid;
79186- end = atomic_inc_return(&peer->rid);
79187+ end = atomic_inc_return_unchecked(&peer->rid);
79188 qp->rid = end;
79189
79190 rc = qp->q.fragments && (end - start) > max;
79191diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
79192index 14bbfcf..644f472 100644
79193--- a/net/ipv4/ip_sockglue.c
79194+++ b/net/ipv4/ip_sockglue.c
79195@@ -1151,7 +1151,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79196 len = min_t(unsigned int, len, opt->optlen);
79197 if (put_user(len, optlen))
79198 return -EFAULT;
79199- if (copy_to_user(optval, opt->__data, len))
79200+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
79201+ copy_to_user(optval, opt->__data, len))
79202 return -EFAULT;
79203 return 0;
79204 }
79205@@ -1282,7 +1283,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79206 if (sk->sk_type != SOCK_STREAM)
79207 return -ENOPROTOOPT;
79208
79209- msg.msg_control = optval;
79210+ msg.msg_control = (void __force_kernel *)optval;
79211 msg.msg_controllen = len;
79212 msg.msg_flags = flags;
79213
79214diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
79215index 798358b..73570b7 100644
79216--- a/net/ipv4/ipconfig.c
79217+++ b/net/ipv4/ipconfig.c
79218@@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
79219
79220 mm_segment_t oldfs = get_fs();
79221 set_fs(get_ds());
79222- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79223+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79224 set_fs(oldfs);
79225 return res;
79226 }
79227@@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
79228
79229 mm_segment_t oldfs = get_fs();
79230 set_fs(get_ds());
79231- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79232+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79233 set_fs(oldfs);
79234 return res;
79235 }
79236@@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
79237
79238 mm_segment_t oldfs = get_fs();
79239 set_fs(get_ds());
79240- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
79241+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
79242 set_fs(oldfs);
79243 return res;
79244 }
79245diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
79246index 97e61ea..cac1bbb 100644
79247--- a/net/ipv4/netfilter/arp_tables.c
79248+++ b/net/ipv4/netfilter/arp_tables.c
79249@@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
79250 #endif
79251
79252 static int get_info(struct net *net, void __user *user,
79253- const int *len, int compat)
79254+ int len, int compat)
79255 {
79256 char name[XT_TABLE_MAXNAMELEN];
79257 struct xt_table *t;
79258 int ret;
79259
79260- if (*len != sizeof(struct arpt_getinfo)) {
79261- duprintf("length %u != %Zu\n", *len,
79262+ if (len != sizeof(struct arpt_getinfo)) {
79263+ duprintf("length %u != %Zu\n", len,
79264 sizeof(struct arpt_getinfo));
79265 return -EINVAL;
79266 }
79267@@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
79268 info.size = private->size;
79269 strcpy(info.name, name);
79270
79271- if (copy_to_user(user, &info, *len) != 0)
79272+ if (copy_to_user(user, &info, len) != 0)
79273 ret = -EFAULT;
79274 else
79275 ret = 0;
79276@@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
79277
79278 switch (cmd) {
79279 case ARPT_SO_GET_INFO:
79280- ret = get_info(sock_net(sk), user, len, 1);
79281+ ret = get_info(sock_net(sk), user, *len, 1);
79282 break;
79283 case ARPT_SO_GET_ENTRIES:
79284 ret = compat_get_entries(sock_net(sk), user, len);
79285@@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
79286
79287 switch (cmd) {
79288 case ARPT_SO_GET_INFO:
79289- ret = get_info(sock_net(sk), user, len, 0);
79290+ ret = get_info(sock_net(sk), user, *len, 0);
79291 break;
79292
79293 case ARPT_SO_GET_ENTRIES:
79294diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
79295index 170b1fd..6105b91 100644
79296--- a/net/ipv4/netfilter/ip_tables.c
79297+++ b/net/ipv4/netfilter/ip_tables.c
79298@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
79299 #endif
79300
79301 static int get_info(struct net *net, void __user *user,
79302- const int *len, int compat)
79303+ int len, int compat)
79304 {
79305 char name[XT_TABLE_MAXNAMELEN];
79306 struct xt_table *t;
79307 int ret;
79308
79309- if (*len != sizeof(struct ipt_getinfo)) {
79310- duprintf("length %u != %zu\n", *len,
79311+ if (len != sizeof(struct ipt_getinfo)) {
79312+ duprintf("length %u != %zu\n", len,
79313 sizeof(struct ipt_getinfo));
79314 return -EINVAL;
79315 }
79316@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
79317 info.size = private->size;
79318 strcpy(info.name, name);
79319
79320- if (copy_to_user(user, &info, *len) != 0)
79321+ if (copy_to_user(user, &info, len) != 0)
79322 ret = -EFAULT;
79323 else
79324 ret = 0;
79325@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79326
79327 switch (cmd) {
79328 case IPT_SO_GET_INFO:
79329- ret = get_info(sock_net(sk), user, len, 1);
79330+ ret = get_info(sock_net(sk), user, *len, 1);
79331 break;
79332 case IPT_SO_GET_ENTRIES:
79333 ret = compat_get_entries(sock_net(sk), user, len);
79334@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79335
79336 switch (cmd) {
79337 case IPT_SO_GET_INFO:
79338- ret = get_info(sock_net(sk), user, len, 0);
79339+ ret = get_info(sock_net(sk), user, *len, 0);
79340 break;
79341
79342 case IPT_SO_GET_ENTRIES:
79343diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
79344index 8f3d054..c58d05d 100644
79345--- a/net/ipv4/ping.c
79346+++ b/net/ipv4/ping.c
79347@@ -843,7 +843,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
79348 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
79349 0, sock_i_ino(sp),
79350 atomic_read(&sp->sk_refcnt), sp,
79351- atomic_read(&sp->sk_drops), len);
79352+ atomic_read_unchecked(&sp->sk_drops), len);
79353 }
79354
79355 static int ping_seq_show(struct seq_file *seq, void *v)
79356diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
79357index 73d1e4d..3af0e8f 100644
79358--- a/net/ipv4/raw.c
79359+++ b/net/ipv4/raw.c
79360@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
79361 int raw_rcv(struct sock *sk, struct sk_buff *skb)
79362 {
79363 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
79364- atomic_inc(&sk->sk_drops);
79365+ atomic_inc_unchecked(&sk->sk_drops);
79366 kfree_skb(skb);
79367 return NET_RX_DROP;
79368 }
79369@@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
79370
79371 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
79372 {
79373+ struct icmp_filter filter;
79374+
79375 if (optlen > sizeof(struct icmp_filter))
79376 optlen = sizeof(struct icmp_filter);
79377- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
79378+ if (copy_from_user(&filter, optval, optlen))
79379 return -EFAULT;
79380+ raw_sk(sk)->filter = filter;
79381 return 0;
79382 }
79383
79384 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
79385 {
79386 int len, ret = -EFAULT;
79387+ struct icmp_filter filter;
79388
79389 if (get_user(len, optlen))
79390 goto out;
79391@@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
79392 if (len > sizeof(struct icmp_filter))
79393 len = sizeof(struct icmp_filter);
79394 ret = -EFAULT;
79395- if (put_user(len, optlen) ||
79396- copy_to_user(optval, &raw_sk(sk)->filter, len))
79397+ filter = raw_sk(sk)->filter;
79398+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
79399 goto out;
79400 ret = 0;
79401 out: return ret;
79402@@ -997,7 +1001,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
79403 0, 0L, 0,
79404 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
79405 0, sock_i_ino(sp),
79406- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
79407+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
79408 }
79409
79410 static int raw_seq_show(struct seq_file *seq, void *v)
79411diff --git a/net/ipv4/route.c b/net/ipv4/route.c
79412index df25142..e92a82a 100644
79413--- a/net/ipv4/route.c
79414+++ b/net/ipv4/route.c
79415@@ -2529,7 +2529,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
79416
79417 static __net_init int rt_genid_init(struct net *net)
79418 {
79419- atomic_set(&net->rt_genid, 0);
79420+ atomic_set_unchecked(&net->rt_genid, 0);
79421 get_random_bytes(&net->ipv4.dev_addr_genid,
79422 sizeof(net->ipv4.dev_addr_genid));
79423 return 0;
79424diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
79425index 181fc82..cc95f8c 100644
79426--- a/net/ipv4/tcp_input.c
79427+++ b/net/ipv4/tcp_input.c
79428@@ -4704,7 +4704,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
79429 * simplifies code)
79430 */
79431 static void
79432-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
79433+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
79434 struct sk_buff *head, struct sk_buff *tail,
79435 u32 start, u32 end)
79436 {
79437@@ -5536,6 +5536,9 @@ slow_path:
79438 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
79439 goto csum_error;
79440
79441+ if (!th->ack)
79442+ goto discard;
79443+
79444 /*
79445 * Standard slow path.
79446 */
79447@@ -5544,7 +5547,7 @@ slow_path:
79448 return 0;
79449
79450 step5:
79451- if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
79452+ if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
79453 goto discard;
79454
79455 /* ts_recent update must be made after we are sure that the packet
79456@@ -5836,6 +5839,7 @@ discard:
79457 tcp_paws_reject(&tp->rx_opt, 0))
79458 goto discard_and_undo;
79459
79460+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
79461 if (th->syn) {
79462 /* We see SYN without ACK. It is attempt of
79463 * simultaneous connect with crossed SYNs.
79464@@ -5886,6 +5890,7 @@ discard:
79465 goto discard;
79466 #endif
79467 }
79468+#endif
79469 /* "fifth, if neither of the SYN or RST bits is set then
79470 * drop the segment and return."
79471 */
79472@@ -5930,7 +5935,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79473 goto discard;
79474
79475 if (th->syn) {
79476- if (th->fin)
79477+ if (th->fin || th->urg || th->psh)
79478 goto discard;
79479 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
79480 return 1;
79481@@ -5977,11 +5982,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79482 if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
79483 goto discard;
79484 }
79485+
79486+ if (!th->ack)
79487+ goto discard;
79488+
79489 if (!tcp_validate_incoming(sk, skb, th, 0))
79490 return 0;
79491
79492 /* step 5: check the ACK field */
79493- if (th->ack) {
79494+ if (true) {
79495 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
79496
79497 switch (sk->sk_state) {
79498@@ -6131,8 +6140,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79499 }
79500 break;
79501 }
79502- } else
79503- goto discard;
79504+ }
79505
79506 /* ts_recent update must be made after we are sure that the packet
79507 * is in window.
79508diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
79509index bc3cb46..815ccd6 100644
79510--- a/net/ipv4/tcp_ipv4.c
79511+++ b/net/ipv4/tcp_ipv4.c
79512@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
79513 EXPORT_SYMBOL(sysctl_tcp_low_latency);
79514
79515
79516+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79517+extern int grsec_enable_blackhole;
79518+#endif
79519+
79520 #ifdef CONFIG_TCP_MD5SIG
79521 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
79522 __be32 daddr, __be32 saddr, const struct tcphdr *th);
79523@@ -1899,6 +1903,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
79524 return 0;
79525
79526 reset:
79527+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79528+ if (!grsec_enable_blackhole)
79529+#endif
79530 tcp_v4_send_reset(rsk, skb);
79531 discard:
79532 kfree_skb(skb);
79533@@ -1999,12 +2006,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
79534 TCP_SKB_CB(skb)->sacked = 0;
79535
79536 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
79537- if (!sk)
79538+ if (!sk) {
79539+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79540+ ret = 1;
79541+#endif
79542 goto no_tcp_socket;
79543-
79544+ }
79545 process:
79546- if (sk->sk_state == TCP_TIME_WAIT)
79547+ if (sk->sk_state == TCP_TIME_WAIT) {
79548+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79549+ ret = 2;
79550+#endif
79551 goto do_time_wait;
79552+ }
79553
79554 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
79555 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
79556@@ -2055,6 +2069,10 @@ no_tcp_socket:
79557 bad_packet:
79558 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
79559 } else {
79560+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79561+ if (!grsec_enable_blackhole || (ret == 1 &&
79562+ (skb->dev->flags & IFF_LOOPBACK)))
79563+#endif
79564 tcp_v4_send_reset(NULL, skb);
79565 }
79566
79567diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
79568index a7302d9..e3ec754 100644
79569--- a/net/ipv4/tcp_minisocks.c
79570+++ b/net/ipv4/tcp_minisocks.c
79571@@ -27,6 +27,10 @@
79572 #include <net/inet_common.h>
79573 #include <net/xfrm.h>
79574
79575+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79576+extern int grsec_enable_blackhole;
79577+#endif
79578+
79579 int sysctl_tcp_syncookies __read_mostly = 1;
79580 EXPORT_SYMBOL(sysctl_tcp_syncookies);
79581
79582@@ -742,7 +746,10 @@ embryonic_reset:
79583 * avoid becoming vulnerable to outside attack aiming at
79584 * resetting legit local connections.
79585 */
79586- req->rsk_ops->send_reset(sk, skb);
79587+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79588+ if (!grsec_enable_blackhole)
79589+#endif
79590+ req->rsk_ops->send_reset(sk, skb);
79591 } else if (fastopen) { /* received a valid RST pkt */
79592 reqsk_fastopen_remove(sk, req, true);
79593 tcp_reset(sk);
79594diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
79595index 4526fe6..1a34e43 100644
79596--- a/net/ipv4/tcp_probe.c
79597+++ b/net/ipv4/tcp_probe.c
79598@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
79599 if (cnt + width >= len)
79600 break;
79601
79602- if (copy_to_user(buf + cnt, tbuf, width))
79603+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
79604 return -EFAULT;
79605 cnt += width;
79606 }
79607diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
79608index d47c1b4..b0584de 100644
79609--- a/net/ipv4/tcp_timer.c
79610+++ b/net/ipv4/tcp_timer.c
79611@@ -22,6 +22,10 @@
79612 #include <linux/gfp.h>
79613 #include <net/tcp.h>
79614
79615+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79616+extern int grsec_lastack_retries;
79617+#endif
79618+
79619 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
79620 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
79621 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
79622@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
79623 }
79624 }
79625
79626+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79627+ if ((sk->sk_state == TCP_LAST_ACK) &&
79628+ (grsec_lastack_retries > 0) &&
79629+ (grsec_lastack_retries < retry_until))
79630+ retry_until = grsec_lastack_retries;
79631+#endif
79632+
79633 if (retransmits_timed_out(sk, retry_until,
79634 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
79635 /* Has it gone just too far? */
79636diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
79637index 79c8dbe..aceb1b6 100644
79638--- a/net/ipv4/udp.c
79639+++ b/net/ipv4/udp.c
79640@@ -87,6 +87,7 @@
79641 #include <linux/types.h>
79642 #include <linux/fcntl.h>
79643 #include <linux/module.h>
79644+#include <linux/security.h>
79645 #include <linux/socket.h>
79646 #include <linux/sockios.h>
79647 #include <linux/igmp.h>
79648@@ -111,6 +112,10 @@
79649 #include <trace/events/skb.h>
79650 #include "udp_impl.h"
79651
79652+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79653+extern int grsec_enable_blackhole;
79654+#endif
79655+
79656 struct udp_table udp_table __read_mostly;
79657 EXPORT_SYMBOL(udp_table);
79658
79659@@ -569,6 +574,9 @@ found:
79660 return s;
79661 }
79662
79663+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
79664+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
79665+
79666 /*
79667 * This routine is called by the ICMP module when it gets some
79668 * sort of error condition. If err < 0 then the socket should
79669@@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
79670 dport = usin->sin_port;
79671 if (dport == 0)
79672 return -EINVAL;
79673+
79674+ err = gr_search_udp_sendmsg(sk, usin);
79675+ if (err)
79676+ return err;
79677 } else {
79678 if (sk->sk_state != TCP_ESTABLISHED)
79679 return -EDESTADDRREQ;
79680+
79681+ err = gr_search_udp_sendmsg(sk, NULL);
79682+ if (err)
79683+ return err;
79684+
79685 daddr = inet->inet_daddr;
79686 dport = inet->inet_dport;
79687 /* Open fast path for connected socket.
79688@@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
79689 udp_lib_checksum_complete(skb)) {
79690 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
79691 IS_UDPLITE(sk));
79692- atomic_inc(&sk->sk_drops);
79693+ atomic_inc_unchecked(&sk->sk_drops);
79694 __skb_unlink(skb, rcvq);
79695 __skb_queue_tail(&list_kill, skb);
79696 }
79697@@ -1194,6 +1211,10 @@ try_again:
79698 if (!skb)
79699 goto out;
79700
79701+ err = gr_search_udp_recvmsg(sk, skb);
79702+ if (err)
79703+ goto out_free;
79704+
79705 ulen = skb->len - sizeof(struct udphdr);
79706 copied = len;
79707 if (copied > ulen)
79708@@ -1227,7 +1248,7 @@ try_again:
79709 if (unlikely(err)) {
79710 trace_kfree_skb(skb, udp_recvmsg);
79711 if (!peeked) {
79712- atomic_inc(&sk->sk_drops);
79713+ atomic_inc_unchecked(&sk->sk_drops);
79714 UDP_INC_STATS_USER(sock_net(sk),
79715 UDP_MIB_INERRORS, is_udplite);
79716 }
79717@@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
79718
79719 drop:
79720 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
79721- atomic_inc(&sk->sk_drops);
79722+ atomic_inc_unchecked(&sk->sk_drops);
79723 kfree_skb(skb);
79724 return -1;
79725 }
79726@@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
79727 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
79728
79729 if (!skb1) {
79730- atomic_inc(&sk->sk_drops);
79731+ atomic_inc_unchecked(&sk->sk_drops);
79732 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
79733 IS_UDPLITE(sk));
79734 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
79735@@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
79736 goto csum_error;
79737
79738 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
79739+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79740+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
79741+#endif
79742 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
79743
79744 /*
79745@@ -2119,7 +2143,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
79746 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
79747 0, sock_i_ino(sp),
79748 atomic_read(&sp->sk_refcnt), sp,
79749- atomic_read(&sp->sk_drops), len);
79750+ atomic_read_unchecked(&sp->sk_drops), len);
79751 }
79752
79753 int udp4_seq_show(struct seq_file *seq, void *v)
79754diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
79755index 0424e4e..308dd43 100644
79756--- a/net/ipv6/addrconf.c
79757+++ b/net/ipv6/addrconf.c
79758@@ -2121,7 +2121,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
79759 p.iph.ihl = 5;
79760 p.iph.protocol = IPPROTO_IPV6;
79761 p.iph.ttl = 64;
79762- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
79763+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
79764
79765 if (ops->ndo_do_ioctl) {
79766 mm_segment_t oldfs = get_fs();
79767diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
79768index d5cb3c4..b3e38d0 100644
79769--- a/net/ipv6/ip6_gre.c
79770+++ b/net/ipv6/ip6_gre.c
79771@@ -1353,7 +1353,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
79772 }
79773
79774
79775-static struct inet6_protocol ip6gre_protocol __read_mostly = {
79776+static struct inet6_protocol ip6gre_protocol = {
79777 .handler = ip6gre_rcv,
79778 .err_handler = ip6gre_err,
79779 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
79780diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
79781index e02faed..9780f28 100644
79782--- a/net/ipv6/ipv6_sockglue.c
79783+++ b/net/ipv6/ipv6_sockglue.c
79784@@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
79785 if (sk->sk_type != SOCK_STREAM)
79786 return -ENOPROTOOPT;
79787
79788- msg.msg_control = optval;
79789+ msg.msg_control = (void __force_kernel *)optval;
79790 msg.msg_controllen = len;
79791 msg.msg_flags = flags;
79792
79793diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
79794index d7cb045..8c0ded6 100644
79795--- a/net/ipv6/netfilter/ip6_tables.c
79796+++ b/net/ipv6/netfilter/ip6_tables.c
79797@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
79798 #endif
79799
79800 static int get_info(struct net *net, void __user *user,
79801- const int *len, int compat)
79802+ int len, int compat)
79803 {
79804 char name[XT_TABLE_MAXNAMELEN];
79805 struct xt_table *t;
79806 int ret;
79807
79808- if (*len != sizeof(struct ip6t_getinfo)) {
79809- duprintf("length %u != %zu\n", *len,
79810+ if (len != sizeof(struct ip6t_getinfo)) {
79811+ duprintf("length %u != %zu\n", len,
79812 sizeof(struct ip6t_getinfo));
79813 return -EINVAL;
79814 }
79815@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
79816 info.size = private->size;
79817 strcpy(info.name, name);
79818
79819- if (copy_to_user(user, &info, *len) != 0)
79820+ if (copy_to_user(user, &info, len) != 0)
79821 ret = -EFAULT;
79822 else
79823 ret = 0;
79824@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79825
79826 switch (cmd) {
79827 case IP6T_SO_GET_INFO:
79828- ret = get_info(sock_net(sk), user, len, 1);
79829+ ret = get_info(sock_net(sk), user, *len, 1);
79830 break;
79831 case IP6T_SO_GET_ENTRIES:
79832 ret = compat_get_entries(sock_net(sk), user, len);
79833@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79834
79835 switch (cmd) {
79836 case IP6T_SO_GET_INFO:
79837- ret = get_info(sock_net(sk), user, len, 0);
79838+ ret = get_info(sock_net(sk), user, *len, 0);
79839 break;
79840
79841 case IP6T_SO_GET_ENTRIES:
79842diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
79843index d8e95c7..81422bc 100644
79844--- a/net/ipv6/raw.c
79845+++ b/net/ipv6/raw.c
79846@@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
79847 {
79848 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
79849 skb_checksum_complete(skb)) {
79850- atomic_inc(&sk->sk_drops);
79851+ atomic_inc_unchecked(&sk->sk_drops);
79852 kfree_skb(skb);
79853 return NET_RX_DROP;
79854 }
79855@@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
79856 struct raw6_sock *rp = raw6_sk(sk);
79857
79858 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
79859- atomic_inc(&sk->sk_drops);
79860+ atomic_inc_unchecked(&sk->sk_drops);
79861 kfree_skb(skb);
79862 return NET_RX_DROP;
79863 }
79864@@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
79865
79866 if (inet->hdrincl) {
79867 if (skb_checksum_complete(skb)) {
79868- atomic_inc(&sk->sk_drops);
79869+ atomic_inc_unchecked(&sk->sk_drops);
79870 kfree_skb(skb);
79871 return NET_RX_DROP;
79872 }
79873@@ -604,7 +604,7 @@ out:
79874 return err;
79875 }
79876
79877-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
79878+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
79879 struct flowi6 *fl6, struct dst_entry **dstp,
79880 unsigned int flags)
79881 {
79882@@ -916,12 +916,15 @@ do_confirm:
79883 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
79884 char __user *optval, int optlen)
79885 {
79886+ struct icmp6_filter filter;
79887+
79888 switch (optname) {
79889 case ICMPV6_FILTER:
79890 if (optlen > sizeof(struct icmp6_filter))
79891 optlen = sizeof(struct icmp6_filter);
79892- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
79893+ if (copy_from_user(&filter, optval, optlen))
79894 return -EFAULT;
79895+ raw6_sk(sk)->filter = filter;
79896 return 0;
79897 default:
79898 return -ENOPROTOOPT;
79899@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
79900 char __user *optval, int __user *optlen)
79901 {
79902 int len;
79903+ struct icmp6_filter filter;
79904
79905 switch (optname) {
79906 case ICMPV6_FILTER:
79907@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
79908 len = sizeof(struct icmp6_filter);
79909 if (put_user(len, optlen))
79910 return -EFAULT;
79911- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
79912+ filter = raw6_sk(sk)->filter;
79913+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
79914 return -EFAULT;
79915 return 0;
79916 default:
79917@@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
79918 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
79919 0,
79920 sock_i_ino(sp),
79921- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
79922+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
79923 }
79924
79925 static int raw6_seq_show(struct seq_file *seq, void *v)
79926diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
79927index 73f2a6b..f8049a1 100644
79928--- a/net/ipv6/tcp_ipv6.c
79929+++ b/net/ipv6/tcp_ipv6.c
79930@@ -106,6 +106,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
79931 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
79932 }
79933
79934+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79935+extern int grsec_enable_blackhole;
79936+#endif
79937+
79938 static void tcp_v6_hash(struct sock *sk)
79939 {
79940 if (sk->sk_state != TCP_CLOSE) {
79941@@ -1525,6 +1529,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
79942 return 0;
79943
79944 reset:
79945+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79946+ if (!grsec_enable_blackhole)
79947+#endif
79948 tcp_v6_send_reset(sk, skb);
79949 discard:
79950 if (opt_skb)
79951@@ -1606,12 +1613,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
79952 TCP_SKB_CB(skb)->sacked = 0;
79953
79954 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
79955- if (!sk)
79956+ if (!sk) {
79957+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79958+ ret = 1;
79959+#endif
79960 goto no_tcp_socket;
79961+ }
79962
79963 process:
79964- if (sk->sk_state == TCP_TIME_WAIT)
79965+ if (sk->sk_state == TCP_TIME_WAIT) {
79966+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79967+ ret = 2;
79968+#endif
79969 goto do_time_wait;
79970+ }
79971
79972 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
79973 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
79974@@ -1660,6 +1675,10 @@ no_tcp_socket:
79975 bad_packet:
79976 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
79977 } else {
79978+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79979+ if (!grsec_enable_blackhole || (ret == 1 &&
79980+ (skb->dev->flags & IFF_LOOPBACK)))
79981+#endif
79982 tcp_v6_send_reset(NULL, skb);
79983 }
79984
79985diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
79986index fc99972..69397e8 100644
79987--- a/net/ipv6/udp.c
79988+++ b/net/ipv6/udp.c
79989@@ -51,6 +51,10 @@
79990 #include <trace/events/skb.h>
79991 #include "udp_impl.h"
79992
79993+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79994+extern int grsec_enable_blackhole;
79995+#endif
79996+
79997 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
79998 {
79999 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
80000@@ -395,7 +399,7 @@ try_again:
80001 if (unlikely(err)) {
80002 trace_kfree_skb(skb, udpv6_recvmsg);
80003 if (!peeked) {
80004- atomic_inc(&sk->sk_drops);
80005+ atomic_inc_unchecked(&sk->sk_drops);
80006 if (is_udp4)
80007 UDP_INC_STATS_USER(sock_net(sk),
80008 UDP_MIB_INERRORS,
80009@@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80010 return rc;
80011 drop:
80012 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
80013- atomic_inc(&sk->sk_drops);
80014+ atomic_inc_unchecked(&sk->sk_drops);
80015 kfree_skb(skb);
80016 return -1;
80017 }
80018@@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
80019 if (likely(skb1 == NULL))
80020 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
80021 if (!skb1) {
80022- atomic_inc(&sk->sk_drops);
80023+ atomic_inc_unchecked(&sk->sk_drops);
80024 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
80025 IS_UDPLITE(sk));
80026 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
80027@@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80028 goto discard;
80029
80030 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
80031+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80032+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80033+#endif
80034 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
80035
80036 kfree_skb(skb);
80037@@ -1473,7 +1480,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
80038 0,
80039 sock_i_ino(sp),
80040 atomic_read(&sp->sk_refcnt), sp,
80041- atomic_read(&sp->sk_drops));
80042+ atomic_read_unchecked(&sp->sk_drops));
80043 }
80044
80045 int udp6_seq_show(struct seq_file *seq, void *v)
80046diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
80047index 496ce2c..f79fac8 100644
80048--- a/net/irda/ircomm/ircomm_tty.c
80049+++ b/net/irda/ircomm/ircomm_tty.c
80050@@ -311,12 +311,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80051 add_wait_queue(&port->open_wait, &wait);
80052
80053 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
80054- __FILE__, __LINE__, tty->driver->name, port->count);
80055+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80056
80057 spin_lock_irqsave(&port->lock, flags);
80058 if (!tty_hung_up_p(filp)) {
80059 extra_count = 1;
80060- port->count--;
80061+ atomic_dec(&port->count);
80062 }
80063 spin_unlock_irqrestore(&port->lock, flags);
80064 port->blocked_open++;
80065@@ -352,7 +352,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80066 }
80067
80068 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
80069- __FILE__, __LINE__, tty->driver->name, port->count);
80070+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80071
80072 schedule();
80073 }
80074@@ -363,13 +363,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80075 if (extra_count) {
80076 /* ++ is not atomic, so this should be protected - Jean II */
80077 spin_lock_irqsave(&port->lock, flags);
80078- port->count++;
80079+ atomic_inc(&port->count);
80080 spin_unlock_irqrestore(&port->lock, flags);
80081 }
80082 port->blocked_open--;
80083
80084 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
80085- __FILE__, __LINE__, tty->driver->name, port->count);
80086+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80087
80088 if (!retval)
80089 port->flags |= ASYNC_NORMAL_ACTIVE;
80090@@ -443,12 +443,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
80091
80092 /* ++ is not atomic, so this should be protected - Jean II */
80093 spin_lock_irqsave(&self->port.lock, flags);
80094- self->port.count++;
80095+ atomic_inc(&self->port.count);
80096 spin_unlock_irqrestore(&self->port.lock, flags);
80097 tty_port_tty_set(&self->port, tty);
80098
80099 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
80100- self->line, self->port.count);
80101+ self->line, atomic_read(&self->port.count));
80102
80103 /* Not really used by us, but lets do it anyway */
80104 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
80105@@ -985,7 +985,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
80106 tty_kref_put(port->tty);
80107 }
80108 port->tty = NULL;
80109- port->count = 0;
80110+ atomic_set(&port->count, 0);
80111 spin_unlock_irqrestore(&port->lock, flags);
80112
80113 wake_up_interruptible(&port->open_wait);
80114@@ -1342,7 +1342,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
80115 seq_putc(m, '\n');
80116
80117 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
80118- seq_printf(m, "Open count: %d\n", self->port.count);
80119+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
80120 seq_printf(m, "Max data size: %d\n", self->max_data_size);
80121 seq_printf(m, "Max header size: %d\n", self->max_header_size);
80122
80123diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
80124index cd6f7a9..e63fe89 100644
80125--- a/net/iucv/af_iucv.c
80126+++ b/net/iucv/af_iucv.c
80127@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
80128
80129 write_lock_bh(&iucv_sk_list.lock);
80130
80131- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
80132+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80133 while (__iucv_get_sock_by_name(name)) {
80134 sprintf(name, "%08x",
80135- atomic_inc_return(&iucv_sk_list.autobind_name));
80136+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80137 }
80138
80139 write_unlock_bh(&iucv_sk_list.lock);
80140diff --git a/net/key/af_key.c b/net/key/af_key.c
80141index 08897a3..0b812ab 100644
80142--- a/net/key/af_key.c
80143+++ b/net/key/af_key.c
80144@@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
80145 static u32 get_acqseq(void)
80146 {
80147 u32 res;
80148- static atomic_t acqseq;
80149+ static atomic_unchecked_t acqseq;
80150
80151 do {
80152- res = atomic_inc_return(&acqseq);
80153+ res = atomic_inc_return_unchecked(&acqseq);
80154 } while (!res);
80155 return res;
80156 }
80157diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
80158index 7371f67..9897314 100644
80159--- a/net/mac80211/cfg.c
80160+++ b/net/mac80211/cfg.c
80161@@ -2594,7 +2594,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
80162 else
80163 local->probe_req_reg--;
80164
80165- if (!local->open_count)
80166+ if (!local_read(&local->open_count))
80167 break;
80168
80169 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
80170diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
80171index 3da215c..497a6e3 100644
80172--- a/net/mac80211/ieee80211_i.h
80173+++ b/net/mac80211/ieee80211_i.h
80174@@ -28,6 +28,7 @@
80175 #include <net/ieee80211_radiotap.h>
80176 #include <net/cfg80211.h>
80177 #include <net/mac80211.h>
80178+#include <asm/local.h>
80179 #include "key.h"
80180 #include "sta_info.h"
80181 #include "debug.h"
80182@@ -852,7 +853,7 @@ struct ieee80211_local {
80183 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
80184 spinlock_t queue_stop_reason_lock;
80185
80186- int open_count;
80187+ local_t open_count;
80188 int monitors, cooked_mntrs;
80189 /* number of interfaces with corresponding FIF_ flags */
80190 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
80191diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
80192index 0f5af91..4dba9e7 100644
80193--- a/net/mac80211/iface.c
80194+++ b/net/mac80211/iface.c
80195@@ -465,7 +465,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80196 break;
80197 }
80198
80199- if (local->open_count == 0) {
80200+ if (local_read(&local->open_count) == 0) {
80201 res = drv_start(local);
80202 if (res)
80203 goto err_del_bss;
80204@@ -508,7 +508,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80205 break;
80206 }
80207
80208- if (local->monitors == 0 && local->open_count == 0) {
80209+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
80210 res = ieee80211_add_virtual_monitor(local);
80211 if (res)
80212 goto err_stop;
80213@@ -616,7 +616,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80214 mutex_unlock(&local->mtx);
80215
80216 if (coming_up)
80217- local->open_count++;
80218+ local_inc(&local->open_count);
80219
80220 if (hw_reconf_flags)
80221 ieee80211_hw_config(local, hw_reconf_flags);
80222@@ -630,7 +630,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80223 err_del_interface:
80224 drv_remove_interface(local, sdata);
80225 err_stop:
80226- if (!local->open_count)
80227+ if (!local_read(&local->open_count))
80228 drv_stop(local);
80229 err_del_bss:
80230 sdata->bss = NULL;
80231@@ -762,7 +762,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80232 }
80233
80234 if (going_down)
80235- local->open_count--;
80236+ local_dec(&local->open_count);
80237
80238 switch (sdata->vif.type) {
80239 case NL80211_IFTYPE_AP_VLAN:
80240@@ -818,7 +818,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80241
80242 ieee80211_recalc_ps(local, -1);
80243
80244- if (local->open_count == 0) {
80245+ if (local_read(&local->open_count) == 0) {
80246 if (local->ops->napi_poll)
80247 napi_disable(&local->napi);
80248 ieee80211_clear_tx_pending(local);
80249@@ -850,7 +850,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80250 }
80251 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
80252
80253- if (local->monitors == local->open_count && local->monitors > 0)
80254+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
80255 ieee80211_add_virtual_monitor(local);
80256 }
80257
80258diff --git a/net/mac80211/main.c b/net/mac80211/main.c
80259index f57f597..e0a7c03 100644
80260--- a/net/mac80211/main.c
80261+++ b/net/mac80211/main.c
80262@@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
80263 local->hw.conf.power_level = power;
80264 }
80265
80266- if (changed && local->open_count) {
80267+ if (changed && local_read(&local->open_count)) {
80268 ret = drv_config(local, changed);
80269 /*
80270 * Goal:
80271diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
80272index 5c572e7..ecf75ce 100644
80273--- a/net/mac80211/pm.c
80274+++ b/net/mac80211/pm.c
80275@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80276 struct ieee80211_sub_if_data *sdata;
80277 struct sta_info *sta;
80278
80279- if (!local->open_count)
80280+ if (!local_read(&local->open_count))
80281 goto suspend;
80282
80283 ieee80211_scan_cancel(local);
80284@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80285 cancel_work_sync(&local->dynamic_ps_enable_work);
80286 del_timer_sync(&local->dynamic_ps_timer);
80287
80288- local->wowlan = wowlan && local->open_count;
80289+ local->wowlan = wowlan && local_read(&local->open_count);
80290 if (local->wowlan) {
80291 int err = drv_suspend(local, wowlan);
80292 if (err < 0) {
80293@@ -143,7 +143,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80294 drv_remove_interface(local, sdata);
80295
80296 /* stop hardware - this must stop RX */
80297- if (local->open_count)
80298+ if (local_read(&local->open_count))
80299 ieee80211_stop_device(local);
80300
80301 suspend:
80302diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
80303index 3313c11..bec9f17 100644
80304--- a/net/mac80211/rate.c
80305+++ b/net/mac80211/rate.c
80306@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
80307
80308 ASSERT_RTNL();
80309
80310- if (local->open_count)
80311+ if (local_read(&local->open_count))
80312 return -EBUSY;
80313
80314 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
80315diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
80316index c97a065..ff61928 100644
80317--- a/net/mac80211/rc80211_pid_debugfs.c
80318+++ b/net/mac80211/rc80211_pid_debugfs.c
80319@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
80320
80321 spin_unlock_irqrestore(&events->lock, status);
80322
80323- if (copy_to_user(buf, pb, p))
80324+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
80325 return -EFAULT;
80326
80327 return p;
80328diff --git a/net/mac80211/util.c b/net/mac80211/util.c
80329index 0151ae3..26709d3 100644
80330--- a/net/mac80211/util.c
80331+++ b/net/mac80211/util.c
80332@@ -1332,7 +1332,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
80333 }
80334 #endif
80335 /* everything else happens only if HW was up & running */
80336- if (!local->open_count)
80337+ if (!local_read(&local->open_count))
80338 goto wake_up;
80339
80340 /*
80341diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
80342index fefa514..0755f23 100644
80343--- a/net/netfilter/Kconfig
80344+++ b/net/netfilter/Kconfig
80345@@ -929,6 +929,16 @@ config NETFILTER_XT_MATCH_ESP
80346
80347 To compile it as a module, choose M here. If unsure, say N.
80348
80349+config NETFILTER_XT_MATCH_GRADM
80350+ tristate '"gradm" match support'
80351+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
80352+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
80353+ ---help---
80354+ The gradm match allows to match on grsecurity RBAC being enabled.
80355+ It is useful when iptables rules are applied early on bootup to
80356+ prevent connections to the machine (except from a trusted host)
80357+ while the RBAC system is disabled.
80358+
80359 config NETFILTER_XT_MATCH_HASHLIMIT
80360 tristate '"hashlimit" match support'
80361 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
80362diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
80363index 3259697..54d5393 100644
80364--- a/net/netfilter/Makefile
80365+++ b/net/netfilter/Makefile
80366@@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
80367 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
80368 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
80369 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
80370+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
80371 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
80372 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
80373 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
80374diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
80375index 1548df9..98ad9b4 100644
80376--- a/net/netfilter/ipvs/ip_vs_conn.c
80377+++ b/net/netfilter/ipvs/ip_vs_conn.c
80378@@ -557,7 +557,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
80379 /* Increase the refcnt counter of the dest */
80380 atomic_inc(&dest->refcnt);
80381
80382- conn_flags = atomic_read(&dest->conn_flags);
80383+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
80384 if (cp->protocol != IPPROTO_UDP)
80385 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
80386 flags = cp->flags;
80387@@ -902,7 +902,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
80388 atomic_set(&cp->refcnt, 1);
80389
80390 atomic_set(&cp->n_control, 0);
80391- atomic_set(&cp->in_pkts, 0);
80392+ atomic_set_unchecked(&cp->in_pkts, 0);
80393
80394 atomic_inc(&ipvs->conn_count);
80395 if (flags & IP_VS_CONN_F_NO_CPORT)
80396@@ -1183,7 +1183,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
80397
80398 /* Don't drop the entry if its number of incoming packets is not
80399 located in [0, 8] */
80400- i = atomic_read(&cp->in_pkts);
80401+ i = atomic_read_unchecked(&cp->in_pkts);
80402 if (i > 8 || i < 0) return 0;
80403
80404 if (!todrop_rate[i]) return 0;
80405diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
80406index 58918e2..4d177a9 100644
80407--- a/net/netfilter/ipvs/ip_vs_core.c
80408+++ b/net/netfilter/ipvs/ip_vs_core.c
80409@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
80410 ret = cp->packet_xmit(skb, cp, pd->pp);
80411 /* do not touch skb anymore */
80412
80413- atomic_inc(&cp->in_pkts);
80414+ atomic_inc_unchecked(&cp->in_pkts);
80415 ip_vs_conn_put(cp);
80416 return ret;
80417 }
80418@@ -1681,7 +1681,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
80419 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
80420 pkts = sysctl_sync_threshold(ipvs);
80421 else
80422- pkts = atomic_add_return(1, &cp->in_pkts);
80423+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80424
80425 if (ipvs->sync_state & IP_VS_STATE_MASTER)
80426 ip_vs_sync_conn(net, cp, pkts);
80427diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
80428index c4ee437..a774a74 100644
80429--- a/net/netfilter/ipvs/ip_vs_ctl.c
80430+++ b/net/netfilter/ipvs/ip_vs_ctl.c
80431@@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
80432 ip_vs_rs_hash(ipvs, dest);
80433 write_unlock_bh(&ipvs->rs_lock);
80434 }
80435- atomic_set(&dest->conn_flags, conn_flags);
80436+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
80437
80438 /* bind the service */
80439 if (!dest->svc) {
80440@@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
80441 " %-7s %-6d %-10d %-10d\n",
80442 &dest->addr.in6,
80443 ntohs(dest->port),
80444- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
80445+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
80446 atomic_read(&dest->weight),
80447 atomic_read(&dest->activeconns),
80448 atomic_read(&dest->inactconns));
80449@@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
80450 "%-7s %-6d %-10d %-10d\n",
80451 ntohl(dest->addr.ip),
80452 ntohs(dest->port),
80453- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
80454+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
80455 atomic_read(&dest->weight),
80456 atomic_read(&dest->activeconns),
80457 atomic_read(&dest->inactconns));
80458@@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
80459
80460 entry.addr = dest->addr.ip;
80461 entry.port = dest->port;
80462- entry.conn_flags = atomic_read(&dest->conn_flags);
80463+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
80464 entry.weight = atomic_read(&dest->weight);
80465 entry.u_threshold = dest->u_threshold;
80466 entry.l_threshold = dest->l_threshold;
80467@@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
80468 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
80469 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
80470 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
80471- (atomic_read(&dest->conn_flags) &
80472+ (atomic_read_unchecked(&dest->conn_flags) &
80473 IP_VS_CONN_F_FWD_MASK)) ||
80474 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
80475 atomic_read(&dest->weight)) ||
80476diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
80477index effa10c..9058928 100644
80478--- a/net/netfilter/ipvs/ip_vs_sync.c
80479+++ b/net/netfilter/ipvs/ip_vs_sync.c
80480@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
80481 cp = cp->control;
80482 if (cp) {
80483 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
80484- pkts = atomic_add_return(1, &cp->in_pkts);
80485+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80486 else
80487 pkts = sysctl_sync_threshold(ipvs);
80488 ip_vs_sync_conn(net, cp->control, pkts);
80489@@ -758,7 +758,7 @@ control:
80490 if (!cp)
80491 return;
80492 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
80493- pkts = atomic_add_return(1, &cp->in_pkts);
80494+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80495 else
80496 pkts = sysctl_sync_threshold(ipvs);
80497 goto sloop;
80498@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
80499
80500 if (opt)
80501 memcpy(&cp->in_seq, opt, sizeof(*opt));
80502- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
80503+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
80504 cp->state = state;
80505 cp->old_state = cp->state;
80506 /*
80507diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
80508index cc4c809..50f8fe5 100644
80509--- a/net/netfilter/ipvs/ip_vs_xmit.c
80510+++ b/net/netfilter/ipvs/ip_vs_xmit.c
80511@@ -1202,7 +1202,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
80512 else
80513 rc = NF_ACCEPT;
80514 /* do not touch skb anymore */
80515- atomic_inc(&cp->in_pkts);
80516+ atomic_inc_unchecked(&cp->in_pkts);
80517 goto out;
80518 }
80519
80520@@ -1323,7 +1323,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
80521 else
80522 rc = NF_ACCEPT;
80523 /* do not touch skb anymore */
80524- atomic_inc(&cp->in_pkts);
80525+ atomic_inc_unchecked(&cp->in_pkts);
80526 goto out;
80527 }
80528
80529diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
80530index 0f241be..2c9be6d 100644
80531--- a/net/netfilter/nf_conntrack_core.c
80532+++ b/net/netfilter/nf_conntrack_core.c
80533@@ -1532,6 +1532,10 @@ err_extend:
80534 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
80535 #define DYING_NULLS_VAL ((1<<30)+1)
80536
80537+#ifdef CONFIG_GRKERNSEC_HIDESYM
80538+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
80539+#endif
80540+
80541 static int nf_conntrack_init_net(struct net *net)
80542 {
80543 int ret;
80544@@ -1545,7 +1549,11 @@ static int nf_conntrack_init_net(struct net *net)
80545 goto err_stat;
80546 }
80547
80548+#ifdef CONFIG_GRKERNSEC_HIDESYM
80549+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
80550+#else
80551 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
80552+#endif
80553 if (!net->ct.slabname) {
80554 ret = -ENOMEM;
80555 goto err_slabname;
80556diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
80557index 9f199f2..719ad23 100644
80558--- a/net/netfilter/nfnetlink_log.c
80559+++ b/net/netfilter/nfnetlink_log.c
80560@@ -71,7 +71,7 @@ struct nfulnl_instance {
80561 };
80562
80563 static DEFINE_SPINLOCK(instances_lock);
80564-static atomic_t global_seq;
80565+static atomic_unchecked_t global_seq;
80566
80567 #define INSTANCE_BUCKETS 16
80568 static struct hlist_head instance_table[INSTANCE_BUCKETS];
80569@@ -527,7 +527,7 @@ __build_packet_message(struct nfulnl_instance *inst,
80570 /* global sequence number */
80571 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
80572 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
80573- htonl(atomic_inc_return(&global_seq))))
80574+ htonl(atomic_inc_return_unchecked(&global_seq))))
80575 goto nla_put_failure;
80576
80577 if (data_len) {
80578diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
80579new file mode 100644
80580index 0000000..c566332
80581--- /dev/null
80582+++ b/net/netfilter/xt_gradm.c
80583@@ -0,0 +1,51 @@
80584+/*
80585+ * gradm match for netfilter
80586