]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.7.4-201301252226.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.7.4-201301252226.patch
CommitLineData
cd21ccd6
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 74c25c8..deadba2 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125-linux
126+lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130@@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134-media
135 mconf
136+mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143+mkpiggy
144 mkprep
145 mkregtable
146 mktables
147@@ -186,6 +205,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151+parse-events*
152+pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156@@ -195,6 +216,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164@@ -204,7 +226,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168+realmode.lds
169+realmode.relocs
170 recordmcount
171+regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175@@ -214,8 +239,11 @@ series
176 setup
177 setup.bin
178 setup.elf
179+size_overflow_hash.h
180 sImage
181+slabinfo
182 sm_tbl*
183+sortextable
184 split-include
185 syscalltab.h
186 tables.c
187@@ -225,6 +253,7 @@ tftpboot.img
188 timeconst.h
189 times.h*
190 trix_boot.h
191+user_constants.h
192 utsrelease.h*
193 vdso-syms.lds
194 vdso.lds
195@@ -236,13 +265,17 @@ vdso32.lds
196 vdso32.so.dbg
197 vdso64.lds
198 vdso64.so.dbg
199+vdsox32.lds
200+vdsox32-syms.lds
201 version.h*
202 vmImage
203 vmlinux
204 vmlinux-*
205 vmlinux.aout
206 vmlinux.bin.all
207+vmlinux.bin.bz2
208 vmlinux.lds
209+vmlinux.relocs
210 vmlinuz
211 voffset.h
212 vsyscall.lds
213@@ -250,9 +283,11 @@ vsyscall_32.lds
214 wanxlfw.inc
215 uImage
216 unifdef
217+utsrelease.h
218 wakeup.bin
219 wakeup.elf
220 wakeup.lds
221 zImage*
222 zconf.hash.c
223+zconf.lex.c
224 zoffset.h
225diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
226index 9776f06..18b1856 100644
227--- a/Documentation/kernel-parameters.txt
228+++ b/Documentation/kernel-parameters.txt
229@@ -905,6 +905,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
230 gpt [EFI] Forces disk with valid GPT signature but
231 invalid Protective MBR to be treated as GPT.
232
233+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
234+ ignore grsecurity's /proc restrictions
235+
236 hashdist= [KNL,NUMA] Large hashes allocated during boot
237 are distributed across NUMA nodes. Defaults on
238 for 64-bit NUMA, off otherwise.
239@@ -2082,6 +2085,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
240 the specified number of seconds. This is to be used if
241 your oopses keep scrolling off the screen.
242
243+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
244+ virtualization environments that don't cope well with the
245+ expand down segment used by UDEREF on X86-32 or the frequent
246+ page table updates on X86-64.
247+
248+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
249+
250 pcbit= [HW,ISDN]
251
252 pcd. [PARIDE]
253diff --git a/Makefile b/Makefile
254index f9196bc..63b33e4 100644
255--- a/Makefile
256+++ b/Makefile
257@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
258
259 HOSTCC = gcc
260 HOSTCXX = g++
261-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
262-HOSTCXXFLAGS = -O2
263+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
264+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
265+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
266
267 # Decide whether to build built-in, modular, or both.
268 # Normally, just do built-in.
269@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
270 # Rules shared between *config targets and build targets
271
272 # Basic helpers built in scripts/
273-PHONY += scripts_basic
274-scripts_basic:
275+PHONY += scripts_basic gcc-plugins
276+scripts_basic: gcc-plugins
277 $(Q)$(MAKE) $(build)=scripts/basic
278 $(Q)rm -f .tmp_quiet_recordmcount
279
280@@ -575,6 +576,60 @@ else
281 KBUILD_CFLAGS += -O2
282 endif
283
284+ifndef DISABLE_PAX_PLUGINS
285+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
286+ifneq ($(PLUGINCC),)
287+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
288+ifndef CONFIG_UML
289+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
290+endif
291+endif
292+ifdef CONFIG_PAX_MEMORY_STACKLEAK
293+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
294+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
295+endif
296+ifdef CONFIG_KALLOCSTAT_PLUGIN
297+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
298+endif
299+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
300+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
301+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
302+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
303+endif
304+ifdef CONFIG_CHECKER_PLUGIN
305+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
306+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
307+endif
308+endif
309+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
310+ifdef CONFIG_PAX_SIZE_OVERFLOW
311+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
312+endif
313+ifdef CONFIG_PAX_LATENT_ENTROPY
314+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
315+endif
316+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
317+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
318+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
319+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
320+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
321+ifeq ($(KBUILD_EXTMOD),)
322+gcc-plugins:
323+ $(Q)$(MAKE) $(build)=tools/gcc
324+else
325+gcc-plugins: ;
326+endif
327+else
328+gcc-plugins:
329+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
330+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
331+else
332+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
333+endif
334+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
335+endif
336+endif
337+
338 include $(srctree)/arch/$(SRCARCH)/Makefile
339
340 ifdef CONFIG_READABLE_ASM
341@@ -731,7 +786,7 @@ export mod_sign_cmd
342
343
344 ifeq ($(KBUILD_EXTMOD),)
345-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
346+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
347
348 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
349 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
350@@ -778,6 +833,8 @@ endif
351
352 # The actual objects are generated when descending,
353 # make sure no implicit rule kicks in
354+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
355+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
356 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
357
358 # Handle descending into subdirectories listed in $(vmlinux-dirs)
359@@ -787,7 +844,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
360 # Error messages still appears in the original language
361
362 PHONY += $(vmlinux-dirs)
363-$(vmlinux-dirs): prepare scripts
364+$(vmlinux-dirs): gcc-plugins prepare scripts
365 $(Q)$(MAKE) $(build)=$@
366
367 # Store (new) KERNELRELASE string in include/config/kernel.release
368@@ -831,6 +888,7 @@ prepare0: archprepare FORCE
369 $(Q)$(MAKE) $(build)=.
370
371 # All the preparing..
372+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
373 prepare: prepare0
374
375 # Generate some files
376@@ -938,6 +996,8 @@ all: modules
377 # using awk while concatenating to the final file.
378
379 PHONY += modules
380+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
381+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
382 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
383 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
384 @$(kecho) ' Building modules, stage 2.';
385@@ -953,7 +1013,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
386
387 # Target to prepare building external modules
388 PHONY += modules_prepare
389-modules_prepare: prepare scripts
390+modules_prepare: gcc-plugins prepare scripts
391
392 # Target to install modules
393 PHONY += modules_install
394@@ -1013,7 +1073,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
395 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
396 signing_key.priv signing_key.x509 x509.genkey \
397 extra_certificates signing_key.x509.keyid \
398- signing_key.x509.signer
399+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
400
401 # clean - Delete most, but leave enough to build external modules
402 #
403@@ -1053,6 +1113,7 @@ distclean: mrproper
404 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
405 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
406 -o -name '.*.rej' \
407+ -o -name '.*.rej' -o -name '*.so' \
408 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
409 -type f -print | xargs rm -f
410
411@@ -1213,6 +1274,8 @@ PHONY += $(module-dirs) modules
412 $(module-dirs): crmodverdir $(objtree)/Module.symvers
413 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
414
415+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
416+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
417 modules: $(module-dirs)
418 @$(kecho) ' Building modules, stage 2.';
419 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
420@@ -1349,17 +1412,21 @@ else
421 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
422 endif
423
424-%.s: %.c prepare scripts FORCE
425+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
426+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
427+%.s: %.c gcc-plugins prepare scripts FORCE
428 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
429 %.i: %.c prepare scripts FORCE
430 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
431-%.o: %.c prepare scripts FORCE
432+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
433+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
434+%.o: %.c gcc-plugins prepare scripts FORCE
435 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
436 %.lst: %.c prepare scripts FORCE
437 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
438-%.s: %.S prepare scripts FORCE
439+%.s: %.S gcc-plugins prepare scripts FORCE
440 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
441-%.o: %.S prepare scripts FORCE
442+%.o: %.S gcc-plugins prepare scripts FORCE
443 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
444 %.symtypes: %.c prepare scripts FORCE
445 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
446@@ -1369,11 +1436,15 @@ endif
447 $(cmd_crmodverdir)
448 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
449 $(build)=$(build-dir)
450-%/: prepare scripts FORCE
451+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
452+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
453+%/: gcc-plugins prepare scripts FORCE
454 $(cmd_crmodverdir)
455 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
456 $(build)=$(build-dir)
457-%.ko: prepare scripts FORCE
458+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
459+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
460+%.ko: gcc-plugins prepare scripts FORCE
461 $(cmd_crmodverdir)
462 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
463 $(build)=$(build-dir) $(@:.ko=.o)
464diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
465index c2cbe4f..f7264b4 100644
466--- a/arch/alpha/include/asm/atomic.h
467+++ b/arch/alpha/include/asm/atomic.h
468@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
469 #define atomic_dec(v) atomic_sub(1,(v))
470 #define atomic64_dec(v) atomic64_sub(1,(v))
471
472+#define atomic64_read_unchecked(v) atomic64_read(v)
473+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
474+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
475+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
476+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
477+#define atomic64_inc_unchecked(v) atomic64_inc(v)
478+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
479+#define atomic64_dec_unchecked(v) atomic64_dec(v)
480+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
481+
482 #define smp_mb__before_atomic_dec() smp_mb()
483 #define smp_mb__after_atomic_dec() smp_mb()
484 #define smp_mb__before_atomic_inc() smp_mb()
485diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
486index ad368a9..fbe0f25 100644
487--- a/arch/alpha/include/asm/cache.h
488+++ b/arch/alpha/include/asm/cache.h
489@@ -4,19 +4,19 @@
490 #ifndef __ARCH_ALPHA_CACHE_H
491 #define __ARCH_ALPHA_CACHE_H
492
493+#include <linux/const.h>
494
495 /* Bytes per L1 (data) cache line. */
496 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
497-# define L1_CACHE_BYTES 64
498 # define L1_CACHE_SHIFT 6
499 #else
500 /* Both EV4 and EV5 are write-through, read-allocate,
501 direct-mapped, physical.
502 */
503-# define L1_CACHE_BYTES 32
504 # define L1_CACHE_SHIFT 5
505 #endif
506
507+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
508 #define SMP_CACHE_BYTES L1_CACHE_BYTES
509
510 #endif
511diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
512index 968d999..d36b2df 100644
513--- a/arch/alpha/include/asm/elf.h
514+++ b/arch/alpha/include/asm/elf.h
515@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
516
517 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
518
519+#ifdef CONFIG_PAX_ASLR
520+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
521+
522+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
523+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
524+#endif
525+
526 /* $0 is set by ld.so to a pointer to a function which might be
527 registered using atexit. This provides a mean for the dynamic
528 linker to call DT_FINI functions for shared libraries that have
529diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
530index bc2a0da..8ad11ee 100644
531--- a/arch/alpha/include/asm/pgalloc.h
532+++ b/arch/alpha/include/asm/pgalloc.h
533@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
534 pgd_set(pgd, pmd);
535 }
536
537+static inline void
538+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
539+{
540+ pgd_populate(mm, pgd, pmd);
541+}
542+
543 extern pgd_t *pgd_alloc(struct mm_struct *mm);
544
545 static inline void
546diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
547index 81a4342..348b927 100644
548--- a/arch/alpha/include/asm/pgtable.h
549+++ b/arch/alpha/include/asm/pgtable.h
550@@ -102,6 +102,17 @@ struct vm_area_struct;
551 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
552 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
553 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
554+
555+#ifdef CONFIG_PAX_PAGEEXEC
556+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
557+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
558+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
559+#else
560+# define PAGE_SHARED_NOEXEC PAGE_SHARED
561+# define PAGE_COPY_NOEXEC PAGE_COPY
562+# define PAGE_READONLY_NOEXEC PAGE_READONLY
563+#endif
564+
565 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
566
567 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
568diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
569index 2fd00b7..cfd5069 100644
570--- a/arch/alpha/kernel/module.c
571+++ b/arch/alpha/kernel/module.c
572@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
573
574 /* The small sections were sorted to the end of the segment.
575 The following should definitely cover them. */
576- gp = (u64)me->module_core + me->core_size - 0x8000;
577+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
578 got = sechdrs[me->arch.gotsecindex].sh_addr;
579
580 for (i = 0; i < n; i++) {
581diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
582index 14db93e..47bed62 100644
583--- a/arch/alpha/kernel/osf_sys.c
584+++ b/arch/alpha/kernel/osf_sys.c
585@@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
586 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
587
588 static unsigned long
589-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
590- unsigned long limit)
591+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
592+ unsigned long limit, unsigned long flags)
593 {
594 struct vm_area_struct *vma = find_vma(current->mm, addr);
595-
596+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
597 while (1) {
598 /* At this point: (!vma || addr < vma->vm_end). */
599 if (limit - len < addr)
600 return -ENOMEM;
601- if (!vma || addr + len <= vma->vm_start)
602+ if (check_heap_stack_gap(vma, addr, len, offset))
603 return addr;
604 addr = vma->vm_end;
605 vma = vma->vm_next;
606@@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
607 merely specific addresses, but regions of memory -- perhaps
608 this feature should be incorporated into all ports? */
609
610+#ifdef CONFIG_PAX_RANDMMAP
611+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
612+#endif
613+
614 if (addr) {
615- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
616+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
617 if (addr != (unsigned long) -ENOMEM)
618 return addr;
619 }
620
621 /* Next, try allocating at TASK_UNMAPPED_BASE. */
622- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
623- len, limit);
624+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
625+
626 if (addr != (unsigned long) -ENOMEM)
627 return addr;
628
629 /* Finally, try allocating in low memory. */
630- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
631+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
632
633 return addr;
634 }
635diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
636index 0c4132d..88f0d53 100644
637--- a/arch/alpha/mm/fault.c
638+++ b/arch/alpha/mm/fault.c
639@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
640 __reload_thread(pcb);
641 }
642
643+#ifdef CONFIG_PAX_PAGEEXEC
644+/*
645+ * PaX: decide what to do with offenders (regs->pc = fault address)
646+ *
647+ * returns 1 when task should be killed
648+ * 2 when patched PLT trampoline was detected
649+ * 3 when unpatched PLT trampoline was detected
650+ */
651+static int pax_handle_fetch_fault(struct pt_regs *regs)
652+{
653+
654+#ifdef CONFIG_PAX_EMUPLT
655+ int err;
656+
657+ do { /* PaX: patched PLT emulation #1 */
658+ unsigned int ldah, ldq, jmp;
659+
660+ err = get_user(ldah, (unsigned int *)regs->pc);
661+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
662+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
663+
664+ if (err)
665+ break;
666+
667+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
668+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
669+ jmp == 0x6BFB0000U)
670+ {
671+ unsigned long r27, addr;
672+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
673+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
674+
675+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
676+ err = get_user(r27, (unsigned long *)addr);
677+ if (err)
678+ break;
679+
680+ regs->r27 = r27;
681+ regs->pc = r27;
682+ return 2;
683+ }
684+ } while (0);
685+
686+ do { /* PaX: patched PLT emulation #2 */
687+ unsigned int ldah, lda, br;
688+
689+ err = get_user(ldah, (unsigned int *)regs->pc);
690+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
691+ err |= get_user(br, (unsigned int *)(regs->pc+8));
692+
693+ if (err)
694+ break;
695+
696+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
697+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
698+ (br & 0xFFE00000U) == 0xC3E00000U)
699+ {
700+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
701+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
702+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
703+
704+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
705+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
706+ return 2;
707+ }
708+ } while (0);
709+
710+ do { /* PaX: unpatched PLT emulation */
711+ unsigned int br;
712+
713+ err = get_user(br, (unsigned int *)regs->pc);
714+
715+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
716+ unsigned int br2, ldq, nop, jmp;
717+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
718+
719+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
720+ err = get_user(br2, (unsigned int *)addr);
721+ err |= get_user(ldq, (unsigned int *)(addr+4));
722+ err |= get_user(nop, (unsigned int *)(addr+8));
723+ err |= get_user(jmp, (unsigned int *)(addr+12));
724+ err |= get_user(resolver, (unsigned long *)(addr+16));
725+
726+ if (err)
727+ break;
728+
729+ if (br2 == 0xC3600000U &&
730+ ldq == 0xA77B000CU &&
731+ nop == 0x47FF041FU &&
732+ jmp == 0x6B7B0000U)
733+ {
734+ regs->r28 = regs->pc+4;
735+ regs->r27 = addr+16;
736+ regs->pc = resolver;
737+ return 3;
738+ }
739+ }
740+ } while (0);
741+#endif
742+
743+ return 1;
744+}
745+
746+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
747+{
748+ unsigned long i;
749+
750+ printk(KERN_ERR "PAX: bytes at PC: ");
751+ for (i = 0; i < 5; i++) {
752+ unsigned int c;
753+ if (get_user(c, (unsigned int *)pc+i))
754+ printk(KERN_CONT "???????? ");
755+ else
756+ printk(KERN_CONT "%08x ", c);
757+ }
758+ printk("\n");
759+}
760+#endif
761
762 /*
763 * This routine handles page faults. It determines the address,
764@@ -133,8 +251,29 @@ retry:
765 good_area:
766 si_code = SEGV_ACCERR;
767 if (cause < 0) {
768- if (!(vma->vm_flags & VM_EXEC))
769+ if (!(vma->vm_flags & VM_EXEC)) {
770+
771+#ifdef CONFIG_PAX_PAGEEXEC
772+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
773+ goto bad_area;
774+
775+ up_read(&mm->mmap_sem);
776+ switch (pax_handle_fetch_fault(regs)) {
777+
778+#ifdef CONFIG_PAX_EMUPLT
779+ case 2:
780+ case 3:
781+ return;
782+#endif
783+
784+ }
785+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
786+ do_group_exit(SIGKILL);
787+#else
788 goto bad_area;
789+#endif
790+
791+ }
792 } else if (!cause) {
793 /* Allow reads even for write-only mappings */
794 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
795diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
796index c79f61f..9ac0642 100644
797--- a/arch/arm/include/asm/atomic.h
798+++ b/arch/arm/include/asm/atomic.h
799@@ -17,17 +17,35 @@
800 #include <asm/barrier.h>
801 #include <asm/cmpxchg.h>
802
803+#ifdef CONFIG_GENERIC_ATOMIC64
804+#include <asm-generic/atomic64.h>
805+#endif
806+
807 #define ATOMIC_INIT(i) { (i) }
808
809 #ifdef __KERNEL__
810
811+#define _ASM_EXTABLE(from, to) \
812+" .pushsection __ex_table,\"a\"\n"\
813+" .align 3\n" \
814+" .long " #from ", " #to"\n" \
815+" .popsection"
816+
817 /*
818 * On ARM, ordinary assignment (str instruction) doesn't clear the local
819 * strex/ldrex monitor on some implementations. The reason we can use it for
820 * atomic_set() is the clrex or dummy strex done on every exception return.
821 */
822 #define atomic_read(v) (*(volatile int *)&(v)->counter)
823+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
824+{
825+ return v->counter;
826+}
827 #define atomic_set(v,i) (((v)->counter) = (i))
828+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
829+{
830+ v->counter = i;
831+}
832
833 #if __LINUX_ARM_ARCH__ >= 6
834
835@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
836 int result;
837
838 __asm__ __volatile__("@ atomic_add\n"
839+"1: ldrex %1, [%3]\n"
840+" adds %0, %1, %4\n"
841+
842+#ifdef CONFIG_PAX_REFCOUNT
843+" bvc 3f\n"
844+"2: bkpt 0xf103\n"
845+"3:\n"
846+#endif
847+
848+" strex %1, %0, [%3]\n"
849+" teq %1, #0\n"
850+" bne 1b"
851+
852+#ifdef CONFIG_PAX_REFCOUNT
853+"\n4:\n"
854+ _ASM_EXTABLE(2b, 4b)
855+#endif
856+
857+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
858+ : "r" (&v->counter), "Ir" (i)
859+ : "cc");
860+}
861+
862+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
863+{
864+ unsigned long tmp;
865+ int result;
866+
867+ __asm__ __volatile__("@ atomic_add_unchecked\n"
868 "1: ldrex %0, [%3]\n"
869 " add %0, %0, %4\n"
870 " strex %1, %0, [%3]\n"
871@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
872 smp_mb();
873
874 __asm__ __volatile__("@ atomic_add_return\n"
875+"1: ldrex %1, [%3]\n"
876+" adds %0, %1, %4\n"
877+
878+#ifdef CONFIG_PAX_REFCOUNT
879+" bvc 3f\n"
880+" mov %0, %1\n"
881+"2: bkpt 0xf103\n"
882+"3:\n"
883+#endif
884+
885+" strex %1, %0, [%3]\n"
886+" teq %1, #0\n"
887+" bne 1b"
888+
889+#ifdef CONFIG_PAX_REFCOUNT
890+"\n4:\n"
891+ _ASM_EXTABLE(2b, 4b)
892+#endif
893+
894+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
895+ : "r" (&v->counter), "Ir" (i)
896+ : "cc");
897+
898+ smp_mb();
899+
900+ return result;
901+}
902+
903+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
904+{
905+ unsigned long tmp;
906+ int result;
907+
908+ smp_mb();
909+
910+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
911 "1: ldrex %0, [%3]\n"
912 " add %0, %0, %4\n"
913 " strex %1, %0, [%3]\n"
914@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
915 int result;
916
917 __asm__ __volatile__("@ atomic_sub\n"
918+"1: ldrex %1, [%3]\n"
919+" subs %0, %1, %4\n"
920+
921+#ifdef CONFIG_PAX_REFCOUNT
922+" bvc 3f\n"
923+"2: bkpt 0xf103\n"
924+"3:\n"
925+#endif
926+
927+" strex %1, %0, [%3]\n"
928+" teq %1, #0\n"
929+" bne 1b"
930+
931+#ifdef CONFIG_PAX_REFCOUNT
932+"\n4:\n"
933+ _ASM_EXTABLE(2b, 4b)
934+#endif
935+
936+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
937+ : "r" (&v->counter), "Ir" (i)
938+ : "cc");
939+}
940+
941+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
942+{
943+ unsigned long tmp;
944+ int result;
945+
946+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
947 "1: ldrex %0, [%3]\n"
948 " sub %0, %0, %4\n"
949 " strex %1, %0, [%3]\n"
950@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
951 smp_mb();
952
953 __asm__ __volatile__("@ atomic_sub_return\n"
954-"1: ldrex %0, [%3]\n"
955-" sub %0, %0, %4\n"
956+"1: ldrex %1, [%3]\n"
957+" subs %0, %1, %4\n"
958+
959+#ifdef CONFIG_PAX_REFCOUNT
960+" bvc 3f\n"
961+" mov %0, %1\n"
962+"2: bkpt 0xf103\n"
963+"3:\n"
964+#endif
965+
966 " strex %1, %0, [%3]\n"
967 " teq %1, #0\n"
968 " bne 1b"
969+
970+#ifdef CONFIG_PAX_REFCOUNT
971+"\n4:\n"
972+ _ASM_EXTABLE(2b, 4b)
973+#endif
974+
975 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
976 : "r" (&v->counter), "Ir" (i)
977 : "cc");
978@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
979 return oldval;
980 }
981
982+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
983+{
984+ unsigned long oldval, res;
985+
986+ smp_mb();
987+
988+ do {
989+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
990+ "ldrex %1, [%3]\n"
991+ "mov %0, #0\n"
992+ "teq %1, %4\n"
993+ "strexeq %0, %5, [%3]\n"
994+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
995+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
996+ : "cc");
997+ } while (res);
998+
999+ smp_mb();
1000+
1001+ return oldval;
1002+}
1003+
1004 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1005 {
1006 unsigned long tmp, tmp2;
1007@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1008
1009 return val;
1010 }
1011+
1012+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1013+{
1014+ return atomic_add_return(i, v);
1015+}
1016+
1017 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1018+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1019+{
1020+ (void) atomic_add_return(i, v);
1021+}
1022
1023 static inline int atomic_sub_return(int i, atomic_t *v)
1024 {
1025@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1026 return val;
1027 }
1028 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1029+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1030+{
1031+ (void) atomic_sub_return(i, v);
1032+}
1033
1034 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1035 {
1036@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1037 return ret;
1038 }
1039
1040+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1041+{
1042+ return atomic_cmpxchg(v, old, new);
1043+}
1044+
1045 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1046 {
1047 unsigned long flags;
1048@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1049 #endif /* __LINUX_ARM_ARCH__ */
1050
1051 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1052+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1053+{
1054+ return xchg(&v->counter, new);
1055+}
1056
1057 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1058 {
1059@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1060 }
1061
1062 #define atomic_inc(v) atomic_add(1, v)
1063+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1064+{
1065+ atomic_add_unchecked(1, v);
1066+}
1067 #define atomic_dec(v) atomic_sub(1, v)
1068+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1069+{
1070+ atomic_sub_unchecked(1, v);
1071+}
1072
1073 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1074+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1075+{
1076+ return atomic_add_return_unchecked(1, v) == 0;
1077+}
1078 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1079 #define atomic_inc_return(v) (atomic_add_return(1, v))
1080+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1081+{
1082+ return atomic_add_return_unchecked(1, v);
1083+}
1084 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1085 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1086
1087@@ -241,6 +428,14 @@ typedef struct {
1088 u64 __aligned(8) counter;
1089 } atomic64_t;
1090
1091+#ifdef CONFIG_PAX_REFCOUNT
1092+typedef struct {
1093+ u64 __aligned(8) counter;
1094+} atomic64_unchecked_t;
1095+#else
1096+typedef atomic64_t atomic64_unchecked_t;
1097+#endif
1098+
1099 #define ATOMIC64_INIT(i) { (i) }
1100
1101 static inline u64 atomic64_read(const atomic64_t *v)
1102@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1103 return result;
1104 }
1105
1106+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1107+{
1108+ u64 result;
1109+
1110+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1111+" ldrexd %0, %H0, [%1]"
1112+ : "=&r" (result)
1113+ : "r" (&v->counter), "Qo" (v->counter)
1114+ );
1115+
1116+ return result;
1117+}
1118+
1119 static inline void atomic64_set(atomic64_t *v, u64 i)
1120 {
1121 u64 tmp;
1122@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1123 : "cc");
1124 }
1125
1126+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1127+{
1128+ u64 tmp;
1129+
1130+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1131+"1: ldrexd %0, %H0, [%2]\n"
1132+" strexd %0, %3, %H3, [%2]\n"
1133+" teq %0, #0\n"
1134+" bne 1b"
1135+ : "=&r" (tmp), "=Qo" (v->counter)
1136+ : "r" (&v->counter), "r" (i)
1137+ : "cc");
1138+}
1139+
1140 static inline void atomic64_add(u64 i, atomic64_t *v)
1141 {
1142 u64 result;
1143@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1144 __asm__ __volatile__("@ atomic64_add\n"
1145 "1: ldrexd %0, %H0, [%3]\n"
1146 " adds %0, %0, %4\n"
1147+" adcs %H0, %H0, %H4\n"
1148+
1149+#ifdef CONFIG_PAX_REFCOUNT
1150+" bvc 3f\n"
1151+"2: bkpt 0xf103\n"
1152+"3:\n"
1153+#endif
1154+
1155+" strexd %1, %0, %H0, [%3]\n"
1156+" teq %1, #0\n"
1157+" bne 1b"
1158+
1159+#ifdef CONFIG_PAX_REFCOUNT
1160+"\n4:\n"
1161+ _ASM_EXTABLE(2b, 4b)
1162+#endif
1163+
1164+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1165+ : "r" (&v->counter), "r" (i)
1166+ : "cc");
1167+}
1168+
1169+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1170+{
1171+ u64 result;
1172+ unsigned long tmp;
1173+
1174+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1175+"1: ldrexd %0, %H0, [%3]\n"
1176+" adds %0, %0, %4\n"
1177 " adc %H0, %H0, %H4\n"
1178 " strexd %1, %0, %H0, [%3]\n"
1179 " teq %1, #0\n"
1180@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1181
1182 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1183 {
1184- u64 result;
1185- unsigned long tmp;
1186+ u64 result, tmp;
1187
1188 smp_mb();
1189
1190 __asm__ __volatile__("@ atomic64_add_return\n"
1191+"1: ldrexd %1, %H1, [%3]\n"
1192+" adds %0, %1, %4\n"
1193+" adcs %H0, %H1, %H4\n"
1194+
1195+#ifdef CONFIG_PAX_REFCOUNT
1196+" bvc 3f\n"
1197+" mov %0, %1\n"
1198+" mov %H0, %H1\n"
1199+"2: bkpt 0xf103\n"
1200+"3:\n"
1201+#endif
1202+
1203+" strexd %1, %0, %H0, [%3]\n"
1204+" teq %1, #0\n"
1205+" bne 1b"
1206+
1207+#ifdef CONFIG_PAX_REFCOUNT
1208+"\n4:\n"
1209+ _ASM_EXTABLE(2b, 4b)
1210+#endif
1211+
1212+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1213+ : "r" (&v->counter), "r" (i)
1214+ : "cc");
1215+
1216+ smp_mb();
1217+
1218+ return result;
1219+}
1220+
1221+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1222+{
1223+ u64 result;
1224+ unsigned long tmp;
1225+
1226+ smp_mb();
1227+
1228+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1229 "1: ldrexd %0, %H0, [%3]\n"
1230 " adds %0, %0, %4\n"
1231 " adc %H0, %H0, %H4\n"
1232@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1233 __asm__ __volatile__("@ atomic64_sub\n"
1234 "1: ldrexd %0, %H0, [%3]\n"
1235 " subs %0, %0, %4\n"
1236+" sbcs %H0, %H0, %H4\n"
1237+
1238+#ifdef CONFIG_PAX_REFCOUNT
1239+" bvc 3f\n"
1240+"2: bkpt 0xf103\n"
1241+"3:\n"
1242+#endif
1243+
1244+" strexd %1, %0, %H0, [%3]\n"
1245+" teq %1, #0\n"
1246+" bne 1b"
1247+
1248+#ifdef CONFIG_PAX_REFCOUNT
1249+"\n4:\n"
1250+ _ASM_EXTABLE(2b, 4b)
1251+#endif
1252+
1253+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1254+ : "r" (&v->counter), "r" (i)
1255+ : "cc");
1256+}
1257+
1258+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1259+{
1260+ u64 result;
1261+ unsigned long tmp;
1262+
1263+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1264+"1: ldrexd %0, %H0, [%3]\n"
1265+" subs %0, %0, %4\n"
1266 " sbc %H0, %H0, %H4\n"
1267 " strexd %1, %0, %H0, [%3]\n"
1268 " teq %1, #0\n"
1269@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1270
1271 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1272 {
1273- u64 result;
1274- unsigned long tmp;
1275+ u64 result, tmp;
1276
1277 smp_mb();
1278
1279 __asm__ __volatile__("@ atomic64_sub_return\n"
1280-"1: ldrexd %0, %H0, [%3]\n"
1281-" subs %0, %0, %4\n"
1282-" sbc %H0, %H0, %H4\n"
1283+"1: ldrexd %1, %H1, [%3]\n"
1284+" subs %0, %1, %4\n"
1285+" sbcs %H0, %H1, %H4\n"
1286+
1287+#ifdef CONFIG_PAX_REFCOUNT
1288+" bvc 3f\n"
1289+" mov %0, %1\n"
1290+" mov %H0, %H1\n"
1291+"2: bkpt 0xf103\n"
1292+"3:\n"
1293+#endif
1294+
1295 " strexd %1, %0, %H0, [%3]\n"
1296 " teq %1, #0\n"
1297 " bne 1b"
1298+
1299+#ifdef CONFIG_PAX_REFCOUNT
1300+"\n4:\n"
1301+ _ASM_EXTABLE(2b, 4b)
1302+#endif
1303+
1304 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1305 : "r" (&v->counter), "r" (i)
1306 : "cc");
1307@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1308 return oldval;
1309 }
1310
1311+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1312+{
1313+ u64 oldval;
1314+ unsigned long res;
1315+
1316+ smp_mb();
1317+
1318+ do {
1319+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1320+ "ldrexd %1, %H1, [%3]\n"
1321+ "mov %0, #0\n"
1322+ "teq %1, %4\n"
1323+ "teqeq %H1, %H4\n"
1324+ "strexdeq %0, %5, %H5, [%3]"
1325+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1326+ : "r" (&ptr->counter), "r" (old), "r" (new)
1327+ : "cc");
1328+ } while (res);
1329+
1330+ smp_mb();
1331+
1332+ return oldval;
1333+}
1334+
1335 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1336 {
1337 u64 result;
1338@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1339
1340 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1341 {
1342- u64 result;
1343- unsigned long tmp;
1344+ u64 result, tmp;
1345
1346 smp_mb();
1347
1348 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1349-"1: ldrexd %0, %H0, [%3]\n"
1350-" subs %0, %0, #1\n"
1351-" sbc %H0, %H0, #0\n"
1352+"1: ldrexd %1, %H1, [%3]\n"
1353+" subs %0, %1, #1\n"
1354+" sbcs %H0, %H1, #0\n"
1355+
1356+#ifdef CONFIG_PAX_REFCOUNT
1357+" bvc 3f\n"
1358+" mov %0, %1\n"
1359+" mov %H0, %H1\n"
1360+"2: bkpt 0xf103\n"
1361+"3:\n"
1362+#endif
1363+
1364 " teq %H0, #0\n"
1365-" bmi 2f\n"
1366+" bmi 4f\n"
1367 " strexd %1, %0, %H0, [%3]\n"
1368 " teq %1, #0\n"
1369 " bne 1b\n"
1370-"2:"
1371+"4:\n"
1372+
1373+#ifdef CONFIG_PAX_REFCOUNT
1374+ _ASM_EXTABLE(2b, 4b)
1375+#endif
1376+
1377 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1378 : "r" (&v->counter)
1379 : "cc");
1380@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1381 " teq %0, %5\n"
1382 " teqeq %H0, %H5\n"
1383 " moveq %1, #0\n"
1384-" beq 2f\n"
1385+" beq 4f\n"
1386 " adds %0, %0, %6\n"
1387-" adc %H0, %H0, %H6\n"
1388+" adcs %H0, %H0, %H6\n"
1389+
1390+#ifdef CONFIG_PAX_REFCOUNT
1391+" bvc 3f\n"
1392+"2: bkpt 0xf103\n"
1393+"3:\n"
1394+#endif
1395+
1396 " strexd %2, %0, %H0, [%4]\n"
1397 " teq %2, #0\n"
1398 " bne 1b\n"
1399-"2:"
1400+"4:\n"
1401+
1402+#ifdef CONFIG_PAX_REFCOUNT
1403+ _ASM_EXTABLE(2b, 4b)
1404+#endif
1405+
1406 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1407 : "r" (&v->counter), "r" (u), "r" (a)
1408 : "cc");
1409@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1410
1411 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1412 #define atomic64_inc(v) atomic64_add(1LL, (v))
1413+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1414 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1415+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1416 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1417 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1418 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1419+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1420 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1421 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1422 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1423diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1424index 75fe66b..ba3dee4 100644
1425--- a/arch/arm/include/asm/cache.h
1426+++ b/arch/arm/include/asm/cache.h
1427@@ -4,8 +4,10 @@
1428 #ifndef __ASMARM_CACHE_H
1429 #define __ASMARM_CACHE_H
1430
1431+#include <linux/const.h>
1432+
1433 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1434-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1435+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1436
1437 /*
1438 * Memory returned by kmalloc() may be used for DMA, so we must make
1439@@ -24,5 +26,6 @@
1440 #endif
1441
1442 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1443+#define __read_only __attribute__ ((__section__(".data..read_only")))
1444
1445 #endif
1446diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1447index e1489c5..d418304 100644
1448--- a/arch/arm/include/asm/cacheflush.h
1449+++ b/arch/arm/include/asm/cacheflush.h
1450@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1451 void (*dma_unmap_area)(const void *, size_t, int);
1452
1453 void (*dma_flush_range)(const void *, const void *);
1454-};
1455+} __no_const;
1456
1457 /*
1458 * Select the calling method
1459diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1460index 7eb18c1..e38b6d2 100644
1461--- a/arch/arm/include/asm/cmpxchg.h
1462+++ b/arch/arm/include/asm/cmpxchg.h
1463@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1464
1465 #define xchg(ptr,x) \
1466 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1467+#define xchg_unchecked(ptr,x) \
1468+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1469
1470 #include <asm-generic/cmpxchg-local.h>
1471
1472diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
1473index ab98fdd..6b19938 100644
1474--- a/arch/arm/include/asm/delay.h
1475+++ b/arch/arm/include/asm/delay.h
1476@@ -24,9 +24,9 @@ extern struct arm_delay_ops {
1477 void (*delay)(unsigned long);
1478 void (*const_udelay)(unsigned long);
1479 void (*udelay)(unsigned long);
1480-} arm_delay_ops;
1481+} *arm_delay_ops;
1482
1483-#define __delay(n) arm_delay_ops.delay(n)
1484+#define __delay(n) arm_delay_ops->delay(n)
1485
1486 /*
1487 * This function intentionally does not exist; if you see references to
1488@@ -47,8 +47,8 @@ extern void __bad_udelay(void);
1489 * first constant multiplications gets optimized away if the delay is
1490 * a constant)
1491 */
1492-#define __udelay(n) arm_delay_ops.udelay(n)
1493-#define __const_udelay(n) arm_delay_ops.const_udelay(n)
1494+#define __udelay(n) arm_delay_ops->udelay(n)
1495+#define __const_udelay(n) arm_delay_ops->const_udelay(n)
1496
1497 #define udelay(n) \
1498 (__builtin_constant_p(n) ? \
1499diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1500index 38050b1..9d90e8b 100644
1501--- a/arch/arm/include/asm/elf.h
1502+++ b/arch/arm/include/asm/elf.h
1503@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1504 the loader. We need to make sure that it is out of the way of the program
1505 that it will "exec", and that there is sufficient room for the brk. */
1506
1507-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1508+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1509+
1510+#ifdef CONFIG_PAX_ASLR
1511+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1512+
1513+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1514+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1515+#endif
1516
1517 /* When the program starts, a1 contains a pointer to a function to be
1518 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1519@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1520 extern void elf_set_personality(const struct elf32_hdr *);
1521 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1522
1523-struct mm_struct;
1524-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1525-#define arch_randomize_brk arch_randomize_brk
1526-
1527 #endif
1528diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1529index 83eb2f7..ed77159 100644
1530--- a/arch/arm/include/asm/kmap_types.h
1531+++ b/arch/arm/include/asm/kmap_types.h
1532@@ -4,6 +4,6 @@
1533 /*
1534 * This is the "bare minimum". AIO seems to require this.
1535 */
1536-#define KM_TYPE_NR 16
1537+#define KM_TYPE_NR 17
1538
1539 #endif
1540diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1541index 195ac2f..2272f0d 100644
1542--- a/arch/arm/include/asm/mach/map.h
1543+++ b/arch/arm/include/asm/mach/map.h
1544@@ -34,6 +34,9 @@ struct map_desc {
1545 #define MT_MEMORY_ITCM 13
1546 #define MT_MEMORY_SO 14
1547 #define MT_MEMORY_DMA_READY 15
1548+#define MT_MEMORY_R 16
1549+#define MT_MEMORY_RW 17
1550+#define MT_MEMORY_RX 18
1551
1552 #ifdef CONFIG_MMU
1553 extern void iotable_init(struct map_desc *, int);
1554diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1555index 53426c6..c7baff3 100644
1556--- a/arch/arm/include/asm/outercache.h
1557+++ b/arch/arm/include/asm/outercache.h
1558@@ -35,7 +35,7 @@ struct outer_cache_fns {
1559 #endif
1560 void (*set_debug)(unsigned long);
1561 void (*resume)(void);
1562-};
1563+} __no_const;
1564
1565 #ifdef CONFIG_OUTER_CACHE
1566
1567diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1568index 812a494..71fc0b6 100644
1569--- a/arch/arm/include/asm/page.h
1570+++ b/arch/arm/include/asm/page.h
1571@@ -114,7 +114,7 @@ struct cpu_user_fns {
1572 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1573 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1574 unsigned long vaddr, struct vm_area_struct *vma);
1575-};
1576+} __no_const;
1577
1578 #ifdef MULTI_USER
1579 extern struct cpu_user_fns cpu_user;
1580diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1581index 943504f..1ad2de8 100644
1582--- a/arch/arm/include/asm/pgalloc.h
1583+++ b/arch/arm/include/asm/pgalloc.h
1584@@ -22,7 +22,7 @@
1585
1586 #ifdef CONFIG_MMU
1587
1588-#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
1589+#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PXNTABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
1590 #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
1591
1592 #ifdef CONFIG_ARM_LPAE
1593@@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1594 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1595 }
1596
1597+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1598+{
1599+ pud_populate(mm, pud, pmd);
1600+}
1601+
1602 #else /* !CONFIG_ARM_LPAE */
1603
1604 /*
1605@@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1606 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1607 #define pmd_free(mm, pmd) do { } while (0)
1608 #define pud_populate(mm,pmd,pte) BUG()
1609+#define pud_populate_kernel(mm,pmd,pte) BUG()
1610
1611 #endif /* CONFIG_ARM_LPAE */
1612
1613@@ -126,6 +132,16 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1614 __free_page(pte);
1615 }
1616
1617+static inline void __pmd_update(pmd_t *pmdp, pmdval_t prot)
1618+{
1619+ pmdval_t pmdval = pmd_val(*pmdp) | prot;
1620+ pmdp[0] = __pmd(pmdval);
1621+#ifndef CONFIG_ARM_LPAE
1622+ pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
1623+#endif
1624+ flush_pmd_entry(pmdp);
1625+}
1626+
1627 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1628 pmdval_t prot)
1629 {
1630diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1631index 5cfba15..d437dc2 100644
1632--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1633+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1634@@ -20,12 +20,15 @@
1635 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1636 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1637 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1638+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* PXN */
1639 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1640 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1641 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1642+
1643 /*
1644 * - section
1645 */
1646+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0)
1647 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1648 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1649 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1650@@ -37,6 +40,7 @@
1651 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1652 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1653 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1654+#define PMD_SECT_AP_RDONLY (_AT(pmdval_t, 0))
1655
1656 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1657 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1658diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1659index d795282..d82ff13 100644
1660--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1661+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1662@@ -32,6 +32,7 @@
1663 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1664 #define PMD_BIT4 (_AT(pmdval_t, 0))
1665 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1666+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1667
1668 /*
1669 * - section
1670@@ -41,9 +42,11 @@
1671 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1672 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1673 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1674+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1675 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
1676 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
1677 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
1678+#define PMD_SECT_AP_RDONLY (_AT(pmdval_t, 1) << 7)
1679 #define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
1680
1681 /*
1682@@ -66,6 +69,7 @@
1683 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1684 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1685 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1686+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1687 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1688
1689 /*
1690diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1691index b249035..4ab204b 100644
1692--- a/arch/arm/include/asm/pgtable-3level.h
1693+++ b/arch/arm/include/asm/pgtable-3level.h
1694@@ -73,6 +73,7 @@
1695 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1696 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1697 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1698+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1699 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1700 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1701 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1702@@ -80,6 +81,7 @@
1703 /*
1704 * To be used in assembly code with the upper page attributes.
1705 */
1706+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1707 #define L_PTE_XN_HIGH (1 << (54 - 32))
1708 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1709
1710diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1711index 08c1231..6a2d6b0 100644
1712--- a/arch/arm/include/asm/pgtable.h
1713+++ b/arch/arm/include/asm/pgtable.h
1714@@ -30,6 +30,9 @@
1715 #include <asm/pgtable-2level.h>
1716 #endif
1717
1718+#define ktla_ktva(addr) (addr)
1719+#define ktva_ktla(addr) (addr)
1720+
1721 /*
1722 * Just any arbitrary offset to the start of the vmalloc VM area: the
1723 * current 8MB value just means that there will be a 8MB "hole" after the
1724@@ -53,6 +56,17 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1725 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1726 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1727
1728+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1729+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
1730+
1731+#ifdef CONFIG_PAX_KERNEXEC
1732+static inline unsigned long pax_open_kernel(void) { return 0; /* TODO */ }
1733+static inline unsigned long pax_close_kernel(void) { return 0; /* TODO */ }
1734+#else
1735+static inline unsigned long pax_open_kernel(void) { return 0; }
1736+static inline unsigned long pax_close_kernel(void) { return 0; }
1737+#endif
1738+
1739 /*
1740 * This is the lowest virtual address we can permit any user space
1741 * mapping to be mapped at. This is particularly important for
1742@@ -73,23 +87,23 @@ extern pgprot_t pgprot_kernel;
1743
1744 #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
1745
1746-#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
1747-#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
1748-#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
1749-#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1750-#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
1751-#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1752-#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
1753+#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_PXN | L_PTE_RDONLY)
1754+#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN | L_PTE_PXN)
1755+#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_PXN)
1756+#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1757+#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1758+#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1759+#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1760 #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
1761 #define PAGE_KERNEL_EXEC pgprot_kernel
1762
1763-#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
1764-#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
1765-#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
1766-#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1767-#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
1768-#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1769-#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
1770+#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1771+#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN | L_PTE_PXN)
1772+#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_PXN)
1773+#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1774+#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1775+#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1776+#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1777
1778 #define __pgprot_modify(prot,mask,bits) \
1779 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
1780@@ -242,7 +256,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
1781
1782 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1783 {
1784- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
1785+ const pteval_t mask = L_PTE_XN | L_PTE_PXN | L_PTE_RDONLY | L_PTE_USER;
1786 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
1787 return pte;
1788 }
1789diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
1790index f3628fb..a0672dd 100644
1791--- a/arch/arm/include/asm/proc-fns.h
1792+++ b/arch/arm/include/asm/proc-fns.h
1793@@ -75,7 +75,7 @@ extern struct processor {
1794 unsigned int suspend_size;
1795 void (*do_suspend)(void *);
1796 void (*do_resume)(void *);
1797-} processor;
1798+} __do_const processor;
1799
1800 #ifndef MULTI_CPU
1801 extern void cpu_proc_init(void);
1802diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
1803index 2e3be16..4dc90fc 100644
1804--- a/arch/arm/include/asm/smp.h
1805+++ b/arch/arm/include/asm/smp.h
1806@@ -106,7 +106,7 @@ struct smp_operations {
1807 int (*cpu_disable)(unsigned int cpu);
1808 #endif
1809 #endif
1810-};
1811+} __no_const;
1812
1813 /*
1814 * set platform specific SMP operations
1815diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1816index 8477b4c..801a6a9 100644
1817--- a/arch/arm/include/asm/thread_info.h
1818+++ b/arch/arm/include/asm/thread_info.h
1819@@ -151,6 +151,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1820 #define TIF_SYSCALL_TRACE 8
1821 #define TIF_SYSCALL_AUDIT 9
1822 #define TIF_SYSCALL_TRACEPOINT 10
1823+
1824+/* within 8 bits of TIF_SYSCALL_TRACE
1825+ to meet flexible second operand requirements
1826+*/
1827+#define TIF_GRSEC_SETXID 11
1828+
1829 #define TIF_USING_IWMMXT 17
1830 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1831 #define TIF_RESTORE_SIGMASK 20
1832@@ -165,9 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1833 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
1834 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1835 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1836+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1837
1838 /* Checks for any syscall work in entry-common.S */
1839-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
1840+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | \
1841+ _TIF_GRSEC_SETXID)
1842
1843 /*
1844 * Change these and you break ASM code in entry-common.S
1845diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1846index 7e1f760..1af891c 100644
1847--- a/arch/arm/include/asm/uaccess.h
1848+++ b/arch/arm/include/asm/uaccess.h
1849@@ -22,6 +22,8 @@
1850 #define VERIFY_READ 0
1851 #define VERIFY_WRITE 1
1852
1853+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1854+
1855 /*
1856 * The exception table consists of pairs of addresses: the first is the
1857 * address of an instruction that is allowed to fault, and the second is
1858@@ -418,8 +420,23 @@ do { \
1859
1860
1861 #ifdef CONFIG_MMU
1862-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1863-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1864+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1865+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1866+
1867+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1868+{
1869+ if (!__builtin_constant_p(n))
1870+ check_object_size(to, n, false);
1871+ return ___copy_from_user(to, from, n);
1872+}
1873+
1874+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1875+{
1876+ if (!__builtin_constant_p(n))
1877+ check_object_size(from, n, true);
1878+ return ___copy_to_user(to, from, n);
1879+}
1880+
1881 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1882 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1883 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1884@@ -431,6 +448,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
1885
1886 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1887 {
1888+ if ((long)n < 0)
1889+ return n;
1890+
1891 if (access_ok(VERIFY_READ, from, n))
1892 n = __copy_from_user(to, from, n);
1893 else /* security hole - plug it */
1894@@ -440,6 +460,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1895
1896 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1897 {
1898+ if ((long)n < 0)
1899+ return n;
1900+
1901 if (access_ok(VERIFY_WRITE, to, n))
1902 n = __copy_to_user(to, from, n);
1903 return n;
1904diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
1905index 96ee092..37f1844 100644
1906--- a/arch/arm/include/uapi/asm/ptrace.h
1907+++ b/arch/arm/include/uapi/asm/ptrace.h
1908@@ -73,7 +73,7 @@
1909 * ARMv7 groups of PSR bits
1910 */
1911 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
1912-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
1913+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
1914 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
1915 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
1916
1917diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1918index 60d3b73..9168db0 100644
1919--- a/arch/arm/kernel/armksyms.c
1920+++ b/arch/arm/kernel/armksyms.c
1921@@ -89,8 +89,8 @@ EXPORT_SYMBOL(__memzero);
1922 #ifdef CONFIG_MMU
1923 EXPORT_SYMBOL(copy_page);
1924
1925-EXPORT_SYMBOL(__copy_from_user);
1926-EXPORT_SYMBOL(__copy_to_user);
1927+EXPORT_SYMBOL(___copy_from_user);
1928+EXPORT_SYMBOL(___copy_to_user);
1929 EXPORT_SYMBOL(__clear_user);
1930
1931 EXPORT_SYMBOL(__get_user_1);
1932diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
1933index 4eee351..e247728 100644
1934--- a/arch/arm/kernel/head.S
1935+++ b/arch/arm/kernel/head.S
1936@@ -52,7 +52,9 @@
1937 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
1938
1939 .macro pgtbl, rd, phys
1940- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
1941+ mov \rd, #TEXT_OFFSET
1942+ sub \rd, #PG_DIR_SIZE
1943+ add \rd, \rd, \phys
1944 .endm
1945
1946 /*
1947diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
1948index 1e9be5d..b9a75e1 100644
1949--- a/arch/arm/kernel/module.c
1950+++ b/arch/arm/kernel/module.c
1951@@ -37,12 +37,35 @@
1952 #endif
1953
1954 #ifdef CONFIG_MMU
1955-void *module_alloc(unsigned long size)
1956+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
1957 {
1958 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
1959- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
1960+ GFP_KERNEL, prot, -1,
1961 __builtin_return_address(0));
1962 }
1963+
1964+void *module_alloc(unsigned long size)
1965+{
1966+
1967+#ifdef CONFIG_PAX_KERNEXEC
1968+ return __module_alloc(size, PAGE_KERNEL);
1969+#else
1970+ return __module_alloc(size, PAGE_KERNEL_EXEC);
1971+#endif
1972+
1973+}
1974+
1975+#ifdef CONFIG_PAX_KERNEXEC
1976+void module_free_exec(struct module *mod, void *module_region)
1977+{
1978+ module_free(mod, module_region);
1979+}
1980+
1981+void *module_alloc_exec(unsigned long size)
1982+{
1983+ return __module_alloc(size, PAGE_KERNEL_EXEC);
1984+}
1985+#endif
1986 #endif
1987
1988 int
1989diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1990index 90084a6..bf4bcfb 100644
1991--- a/arch/arm/kernel/process.c
1992+++ b/arch/arm/kernel/process.c
1993@@ -28,7 +28,6 @@
1994 #include <linux/tick.h>
1995 #include <linux/utsname.h>
1996 #include <linux/uaccess.h>
1997-#include <linux/random.h>
1998 #include <linux/hw_breakpoint.h>
1999 #include <linux/cpuidle.h>
2000 #include <linux/leds.h>
2001@@ -256,9 +255,10 @@ void machine_power_off(void)
2002 machine_shutdown();
2003 if (pm_power_off)
2004 pm_power_off();
2005+ BUG();
2006 }
2007
2008-void machine_restart(char *cmd)
2009+__noreturn void machine_restart(char *cmd)
2010 {
2011 machine_shutdown();
2012
2013@@ -451,12 +451,6 @@ unsigned long get_wchan(struct task_struct *p)
2014 return 0;
2015 }
2016
2017-unsigned long arch_randomize_brk(struct mm_struct *mm)
2018-{
2019- unsigned long range_end = mm->brk + 0x02000000;
2020- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2021-}
2022-
2023 #ifdef CONFIG_MMU
2024 /*
2025 * The vectors page is always readable from user space for the
2026diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2027index 739db3a..7f4a272 100644
2028--- a/arch/arm/kernel/ptrace.c
2029+++ b/arch/arm/kernel/ptrace.c
2030@@ -916,6 +916,10 @@ enum ptrace_syscall_dir {
2031 PTRACE_SYSCALL_EXIT,
2032 };
2033
2034+#ifdef CONFIG_GRKERNSEC_SETXID
2035+extern void gr_delayed_cred_worker(void);
2036+#endif
2037+
2038 static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
2039 enum ptrace_syscall_dir dir)
2040 {
2041@@ -923,6 +927,11 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
2042
2043 current_thread_info()->syscall = scno;
2044
2045+#ifdef CONFIG_GRKERNSEC_SETXID
2046+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2047+ gr_delayed_cred_worker();
2048+#endif
2049+
2050 if (!test_thread_flag(TIF_SYSCALL_TRACE))
2051 return scno;
2052
2053diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2054index da1d1aa..5aaf182 100644
2055--- a/arch/arm/kernel/setup.c
2056+++ b/arch/arm/kernel/setup.c
2057@@ -99,19 +99,19 @@ EXPORT_SYMBOL(elf_hwcap);
2058
2059
2060 #ifdef MULTI_CPU
2061-struct processor processor __read_mostly;
2062+struct processor processor;
2063 #endif
2064 #ifdef MULTI_TLB
2065-struct cpu_tlb_fns cpu_tlb __read_mostly;
2066+struct cpu_tlb_fns cpu_tlb __read_only;
2067 #endif
2068 #ifdef MULTI_USER
2069-struct cpu_user_fns cpu_user __read_mostly;
2070+struct cpu_user_fns cpu_user __read_only;
2071 #endif
2072 #ifdef MULTI_CACHE
2073-struct cpu_cache_fns cpu_cache __read_mostly;
2074+struct cpu_cache_fns cpu_cache __read_only;
2075 #endif
2076 #ifdef CONFIG_OUTER_CACHE
2077-struct outer_cache_fns outer_cache __read_mostly;
2078+struct outer_cache_fns outer_cache __read_only;
2079 EXPORT_SYMBOL(outer_cache);
2080 #endif
2081
2082@@ -455,7 +455,7 @@ static void __init setup_processor(void)
2083 __cpu_architecture = __get_cpu_architecture();
2084
2085 #ifdef MULTI_CPU
2086- processor = *list->proc;
2087+ memcpy((void *)&processor, list->proc, sizeof processor);
2088 #endif
2089 #ifdef MULTI_TLB
2090 cpu_tlb = *list->tlb;
2091diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
2092index fbc8b26..000ded0 100644
2093--- a/arch/arm/kernel/smp.c
2094+++ b/arch/arm/kernel/smp.c
2095@@ -70,7 +70,7 @@ enum ipi_msg_type {
2096
2097 static DECLARE_COMPLETION(cpu_running);
2098
2099-static struct smp_operations smp_ops;
2100+static struct smp_operations smp_ops __read_only;
2101
2102 void __init smp_set_ops(struct smp_operations *ops)
2103 {
2104diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
2105index b0179b8..b54c6c1 100644
2106--- a/arch/arm/kernel/traps.c
2107+++ b/arch/arm/kernel/traps.c
2108@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2109 static int die_owner = -1;
2110 static unsigned int die_nest_count;
2111
2112+extern void gr_handle_kernel_exploit(void);
2113+
2114 static unsigned long oops_begin(void)
2115 {
2116 int cpu;
2117@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
2118 panic("Fatal exception in interrupt");
2119 if (panic_on_oops)
2120 panic("Fatal exception");
2121+
2122+ gr_handle_kernel_exploit();
2123+
2124 if (signr)
2125 do_exit(signr);
2126 }
2127diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
2128index 36ff15b..75d9e9d 100644
2129--- a/arch/arm/kernel/vmlinux.lds.S
2130+++ b/arch/arm/kernel/vmlinux.lds.S
2131@@ -8,7 +8,11 @@
2132 #include <asm/thread_info.h>
2133 #include <asm/memory.h>
2134 #include <asm/page.h>
2135-
2136+
2137+#ifdef CONFIG_PAX_KERNEXEC
2138+#include <asm/pgtable.h>
2139+#endif
2140+
2141 #define PROC_INFO \
2142 . = ALIGN(4); \
2143 VMLINUX_SYMBOL(__proc_info_begin) = .; \
2144@@ -90,6 +94,11 @@ SECTIONS
2145 _text = .;
2146 HEAD_TEXT
2147 }
2148+
2149+#ifdef CONFIG_PAX_KERNEXEC
2150+ . = ALIGN(1<<SECTION_SHIFT);
2151+#endif
2152+
2153 .text : { /* Real text segment */
2154 _stext = .; /* Text and read-only data */
2155 __exception_text_start = .;
2156@@ -133,6 +142,10 @@ SECTIONS
2157
2158 _etext = .; /* End of text and rodata section */
2159
2160+#ifdef CONFIG_PAX_KERNEXEC
2161+ . = ALIGN(1<<SECTION_SHIFT);
2162+#endif
2163+
2164 #ifndef CONFIG_XIP_KERNEL
2165 . = ALIGN(PAGE_SIZE);
2166 __init_begin = .;
2167@@ -192,6 +205,11 @@ SECTIONS
2168 . = PAGE_OFFSET + TEXT_OFFSET;
2169 #else
2170 __init_end = .;
2171+
2172+#ifdef CONFIG_PAX_KERNEXEC
2173+ . = ALIGN(1<<SECTION_SHIFT);
2174+#endif
2175+
2176 . = ALIGN(THREAD_SIZE);
2177 __data_loc = .;
2178 #endif
2179diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
2180index 66a477a..bee61d3 100644
2181--- a/arch/arm/lib/copy_from_user.S
2182+++ b/arch/arm/lib/copy_from_user.S
2183@@ -16,7 +16,7 @@
2184 /*
2185 * Prototype:
2186 *
2187- * size_t __copy_from_user(void *to, const void *from, size_t n)
2188+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
2189 *
2190 * Purpose:
2191 *
2192@@ -84,11 +84,11 @@
2193
2194 .text
2195
2196-ENTRY(__copy_from_user)
2197+ENTRY(___copy_from_user)
2198
2199 #include "copy_template.S"
2200
2201-ENDPROC(__copy_from_user)
2202+ENDPROC(___copy_from_user)
2203
2204 .pushsection .fixup,"ax"
2205 .align 0
2206diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
2207index 6ee2f67..d1cce76 100644
2208--- a/arch/arm/lib/copy_page.S
2209+++ b/arch/arm/lib/copy_page.S
2210@@ -10,6 +10,7 @@
2211 * ASM optimised string functions
2212 */
2213 #include <linux/linkage.h>
2214+#include <linux/const.h>
2215 #include <asm/assembler.h>
2216 #include <asm/asm-offsets.h>
2217 #include <asm/cache.h>
2218diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
2219index d066df6..df28194 100644
2220--- a/arch/arm/lib/copy_to_user.S
2221+++ b/arch/arm/lib/copy_to_user.S
2222@@ -16,7 +16,7 @@
2223 /*
2224 * Prototype:
2225 *
2226- * size_t __copy_to_user(void *to, const void *from, size_t n)
2227+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
2228 *
2229 * Purpose:
2230 *
2231@@ -88,11 +88,11 @@
2232 .text
2233
2234 ENTRY(__copy_to_user_std)
2235-WEAK(__copy_to_user)
2236+WEAK(___copy_to_user)
2237
2238 #include "copy_template.S"
2239
2240-ENDPROC(__copy_to_user)
2241+ENDPROC(___copy_to_user)
2242 ENDPROC(__copy_to_user_std)
2243
2244 .pushsection .fixup,"ax"
2245diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
2246index 0dc5385..45833ef 100644
2247--- a/arch/arm/lib/delay.c
2248+++ b/arch/arm/lib/delay.c
2249@@ -28,12 +28,14 @@
2250 /*
2251 * Default to the loop-based delay implementation.
2252 */
2253-struct arm_delay_ops arm_delay_ops = {
2254+static struct arm_delay_ops arm_loop_delay_ops = {
2255 .delay = __loop_delay,
2256 .const_udelay = __loop_const_udelay,
2257 .udelay = __loop_udelay,
2258 };
2259
2260+struct arm_delay_ops *arm_delay_ops __read_only = &arm_loop_delay_ops;
2261+
2262 static const struct delay_timer *delay_timer;
2263 static bool delay_calibrated;
2264
2265@@ -67,6 +69,12 @@ static void __timer_udelay(unsigned long usecs)
2266 __timer_const_udelay(usecs * UDELAY_MULT);
2267 }
2268
2269+static struct arm_delay_ops arm_timer_delay_ops = {
2270+ .delay = __timer_delay,
2271+ .const_udelay = __timer_const_udelay,
2272+ .udelay = __timer_udelay,
2273+};
2274+
2275 void __init register_current_timer_delay(const struct delay_timer *timer)
2276 {
2277 if (!delay_calibrated) {
2278@@ -74,9 +82,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
2279 delay_timer = timer;
2280 lpj_fine = timer->freq / HZ;
2281 loops_per_jiffy = lpj_fine;
2282- arm_delay_ops.delay = __timer_delay;
2283- arm_delay_ops.const_udelay = __timer_const_udelay;
2284- arm_delay_ops.udelay = __timer_udelay;
2285+ arm_delay_ops = &arm_timer_delay_ops;
2286 delay_calibrated = true;
2287 } else {
2288 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
2289diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
2290index 025f742..8432b08 100644
2291--- a/arch/arm/lib/uaccess_with_memcpy.c
2292+++ b/arch/arm/lib/uaccess_with_memcpy.c
2293@@ -104,7 +104,7 @@ out:
2294 }
2295
2296 unsigned long
2297-__copy_to_user(void __user *to, const void *from, unsigned long n)
2298+___copy_to_user(void __user *to, const void *from, unsigned long n)
2299 {
2300 /*
2301 * This test is stubbed out of the main function above to keep
2302diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
2303index 2c6c218..2b87c2d 100644
2304--- a/arch/arm/mach-kirkwood/common.c
2305+++ b/arch/arm/mach-kirkwood/common.c
2306@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
2307 clk_gate_ops.disable(hw);
2308 }
2309
2310-static struct clk_ops clk_gate_fn_ops;
2311+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
2312+{
2313+ return clk_gate_ops.is_enabled(hw);
2314+}
2315+
2316+static struct clk_ops clk_gate_fn_ops = {
2317+ .enable = clk_gate_fn_enable,
2318+ .disable = clk_gate_fn_disable,
2319+ .is_enabled = clk_gate_fn_is_enabled,
2320+};
2321
2322 static struct clk __init *clk_register_gate_fn(struct device *dev,
2323 const char *name,
2324@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
2325 gate_fn->fn_en = fn_en;
2326 gate_fn->fn_dis = fn_dis;
2327
2328- /* ops is the gate ops, but with our enable/disable functions */
2329- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
2330- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
2331- clk_gate_fn_ops = clk_gate_ops;
2332- clk_gate_fn_ops.enable = clk_gate_fn_enable;
2333- clk_gate_fn_ops.disable = clk_gate_fn_disable;
2334- }
2335-
2336 clk = clk_register(dev, &gate_fn->gate.hw);
2337
2338 if (IS_ERR(clk))
2339diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
2340index d95f727..12f10dd 100644
2341--- a/arch/arm/mach-omap2/board-n8x0.c
2342+++ b/arch/arm/mach-omap2/board-n8x0.c
2343@@ -589,7 +589,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
2344 }
2345 #endif
2346
2347-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
2348+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
2349 .late_init = n8x0_menelaus_late_init,
2350 };
2351
2352diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
2353index 87cc6d0..fd4f248 100644
2354--- a/arch/arm/mach-omap2/omap_hwmod.c
2355+++ b/arch/arm/mach-omap2/omap_hwmod.c
2356@@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
2357 int (*is_hardreset_asserted)(struct omap_hwmod *oh,
2358 struct omap_hwmod_rst_info *ohri);
2359 int (*init_clkdm)(struct omap_hwmod *oh);
2360-};
2361+} __no_const;
2362
2363 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
2364-static struct omap_hwmod_soc_ops soc_ops;
2365+static struct omap_hwmod_soc_ops soc_ops __read_only;
2366
2367 /* omap_hwmod_list contains all registered struct omap_hwmods */
2368 static LIST_HEAD(omap_hwmod_list);
2369diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
2370index 5dbf13f..9be36fd 100644
2371--- a/arch/arm/mm/fault.c
2372+++ b/arch/arm/mm/fault.c
2373@@ -25,6 +25,7 @@
2374 #include <asm/system_misc.h>
2375 #include <asm/system_info.h>
2376 #include <asm/tlbflush.h>
2377+#include <asm/sections.h>
2378
2379 #include "fault.h"
2380
2381@@ -138,6 +139,19 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
2382 if (fixup_exception(regs))
2383 return;
2384
2385+#ifdef CONFIG_PAX_KERNEXEC
2386+ if (fsr & FSR_WRITE) {
2387+ if (((unsigned long)_stext <= addr && addr < init_mm.end_code) || (MODULES_VADDR <= addr && addr < MODULES_END)) {
2388+ if (current->signal->curr_ip)
2389+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
2390+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
2391+ else
2392+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
2393+ current->comm, task_pid_nr(current), current_uid(), current_euid());
2394+ }
2395+ }
2396+#endif
2397+
2398 /*
2399 * No handler, we'll have to terminate things with extreme prejudice.
2400 */
2401@@ -174,6 +188,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
2402 }
2403 #endif
2404
2405+#ifdef CONFIG_PAX_PAGEEXEC
2406+ if (fsr & FSR_LNX_PF) {
2407+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
2408+ do_group_exit(SIGKILL);
2409+ }
2410+#endif
2411+
2412 tsk->thread.address = addr;
2413 tsk->thread.error_code = fsr;
2414 tsk->thread.trap_no = 14;
2415@@ -398,6 +419,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
2416 }
2417 #endif /* CONFIG_MMU */
2418
2419+#ifdef CONFIG_PAX_PAGEEXEC
2420+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2421+{
2422+ long i;
2423+
2424+ printk(KERN_ERR "PAX: bytes at PC: ");
2425+ for (i = 0; i < 20; i++) {
2426+ unsigned char c;
2427+ if (get_user(c, (__force unsigned char __user *)pc+i))
2428+ printk(KERN_CONT "?? ");
2429+ else
2430+ printk(KERN_CONT "%02x ", c);
2431+ }
2432+ printk("\n");
2433+
2434+ printk(KERN_ERR "PAX: bytes at SP-4: ");
2435+ for (i = -1; i < 20; i++) {
2436+ unsigned long c;
2437+ if (get_user(c, (__force unsigned long __user *)sp+i))
2438+ printk(KERN_CONT "???????? ");
2439+ else
2440+ printk(KERN_CONT "%08lx ", c);
2441+ }
2442+ printk("\n");
2443+}
2444+#endif
2445+
2446 /*
2447 * First Level Translation Fault Handler
2448 *
2449@@ -575,12 +623,41 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
2450 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
2451 struct siginfo info;
2452
2453+#ifdef CONFIG_PAX_KERNEXEC
2454+ if (!user_mode(regs) && is_xn_fault(ifsr)) {
2455+ if (current->signal->curr_ip)
2456+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n",
2457+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid(),
2458+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
2459+ else
2460+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n",
2461+ current->comm, task_pid_nr(current), current_uid(), current_euid(),
2462+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
2463+ goto die;
2464+ }
2465+#endif
2466+
2467+#ifdef CONFIG_PAX_REFCOUNT
2468+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
2469+ unsigned int bkpt;
2470+
2471+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
2472+ current->thread.error_code = ifsr;
2473+ current->thread.trap_no = 0;
2474+ pax_report_refcount_overflow(regs);
2475+ fixup_exception(regs);
2476+ return;
2477+ }
2478+ }
2479+#endif
2480+
2481 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
2482 return;
2483
2484 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
2485 inf->name, ifsr, addr);
2486
2487+die:
2488 info.si_signo = inf->sig;
2489 info.si_errno = 0;
2490 info.si_code = inf->code;
2491diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
2492index cf08bdf..f1a0383 100644
2493--- a/arch/arm/mm/fault.h
2494+++ b/arch/arm/mm/fault.h
2495@@ -3,6 +3,7 @@
2496
2497 /*
2498 * Fault status register encodings. We steal bit 31 for our own purposes.
2499+ * Set when the FSR value is from an instruction fault.
2500 */
2501 #define FSR_LNX_PF (1 << 31)
2502 #define FSR_WRITE (1 << 11)
2503@@ -22,6 +23,12 @@ static inline int fsr_fs(unsigned int fsr)
2504 }
2505 #endif
2506
2507+/* valid for LPAE and !LPAE */
2508+static inline int is_xn_fault(unsigned int fsr)
2509+{
2510+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
2511+}
2512+
2513 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
2514 unsigned long search_exception_table(unsigned long addr);
2515
2516diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
2517index ad722f1..46b670e 100644
2518--- a/arch/arm/mm/init.c
2519+++ b/arch/arm/mm/init.c
2520@@ -734,9 +734,43 @@ void __init mem_init(void)
2521
2522 void free_initmem(void)
2523 {
2524+
2525+#ifdef CONFIG_PAX_KERNEXEC
2526+ unsigned long addr;
2527+ pgd_t *pgd;
2528+ pud_t *pud;
2529+ pmd_t *pmd;
2530+#endif
2531+
2532 #ifdef CONFIG_HAVE_TCM
2533 extern char __tcm_start, __tcm_end;
2534+#endif
2535
2536+#ifdef CONFIG_PAX_KERNEXEC
2537+ /* make pages tables, etc before .text NX */
2538+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += PMD_SIZE) {
2539+ pgd = pgd_offset_k(addr);
2540+ pud = pud_offset(pgd, addr);
2541+ pmd = pmd_offset(pud, addr);
2542+ __pmd_update(pmd, PMD_SECT_XN);
2543+ }
2544+ /* make init NX */
2545+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += PMD_SIZE) {
2546+ pgd = pgd_offset_k(addr);
2547+ pud = pud_offset(pgd, addr);
2548+ pmd = pmd_offset(pud, addr);
2549+ __pmd_update(pmd, PMD_SECT_XN);
2550+ }
2551+ /* make kernel code/rodata read-only */
2552+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += PMD_SIZE) {
2553+ pgd = pgd_offset_k(addr);
2554+ pud = pud_offset(pgd, addr);
2555+ pmd = pmd_offset(pud, addr);
2556+ __pmd_update(pmd, PMD_SECT_AP_RDONLY);
2557+ }
2558+#endif
2559+
2560+#ifdef CONFIG_HAVE_TCM
2561 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
2562 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
2563 __phys_to_pfn(__pa(&__tcm_end)),
2564diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
2565index ce8cb19..b9fe4d7 100644
2566--- a/arch/arm/mm/mmap.c
2567+++ b/arch/arm/mm/mmap.c
2568@@ -72,6 +72,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
2569 unsigned long start_addr;
2570 int do_align = 0;
2571 int aliasing = cache_is_vipt_aliasing();
2572+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
2573
2574 /*
2575 * We only need to do colour alignment if either the I or D
2576@@ -93,6 +94,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
2577 if (len > TASK_SIZE)
2578 return -ENOMEM;
2579
2580+#ifdef CONFIG_PAX_RANDMMAP
2581+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
2582+#endif
2583+
2584 if (addr) {
2585 if (do_align)
2586 addr = COLOUR_ALIGN(addr, pgoff);
2587@@ -100,15 +105,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
2588 addr = PAGE_ALIGN(addr);
2589
2590 vma = find_vma(mm, addr);
2591- if (TASK_SIZE - len >= addr &&
2592- (!vma || addr + len <= vma->vm_start))
2593+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
2594 return addr;
2595 }
2596 if (len > mm->cached_hole_size) {
2597- start_addr = addr = mm->free_area_cache;
2598+ start_addr = addr = mm->free_area_cache;
2599 } else {
2600- start_addr = addr = mm->mmap_base;
2601- mm->cached_hole_size = 0;
2602+ start_addr = addr = mm->mmap_base;
2603+ mm->cached_hole_size = 0;
2604 }
2605
2606 full_search:
2607@@ -124,14 +128,14 @@ full_search:
2608 * Start a new search - just in case we missed
2609 * some holes.
2610 */
2611- if (start_addr != TASK_UNMAPPED_BASE) {
2612- start_addr = addr = TASK_UNMAPPED_BASE;
2613+ if (start_addr != mm->mmap_base) {
2614+ start_addr = addr = mm->mmap_base;
2615 mm->cached_hole_size = 0;
2616 goto full_search;
2617 }
2618 return -ENOMEM;
2619 }
2620- if (!vma || addr + len <= vma->vm_start) {
2621+ if (check_heap_stack_gap(vma, addr, len, offset)) {
2622 /*
2623 * Remember the place where we stopped the search:
2624 */
2625@@ -156,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2626 unsigned long addr = addr0;
2627 int do_align = 0;
2628 int aliasing = cache_is_vipt_aliasing();
2629+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
2630
2631 /*
2632 * We only need to do colour alignment if either the I or D
2633@@ -175,6 +180,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2634 return addr;
2635 }
2636
2637+#ifdef CONFIG_PAX_RANDMMAP
2638+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
2639+#endif
2640+
2641 /* requesting a specific address */
2642 if (addr) {
2643 if (do_align)
2644@@ -182,8 +191,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2645 else
2646 addr = PAGE_ALIGN(addr);
2647 vma = find_vma(mm, addr);
2648- if (TASK_SIZE - len >= addr &&
2649- (!vma || addr + len <= vma->vm_start))
2650+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
2651 return addr;
2652 }
2653
2654@@ -203,7 +211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2655 /* make sure it can fit in the remaining address space */
2656 if (addr > len) {
2657 vma = find_vma(mm, addr-len);
2658- if (!vma || addr <= vma->vm_start)
2659+ if (check_heap_stack_gap(vma, addr - len, len, offset))
2660 /* remember the address as a hint for next time */
2661 return (mm->free_area_cache = addr-len);
2662 }
2663@@ -212,17 +220,17 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2664 goto bottomup;
2665
2666 addr = mm->mmap_base - len;
2667- if (do_align)
2668- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2669
2670 do {
2671+ if (do_align)
2672+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2673 /*
2674 * Lookup failure means no vma is above this address,
2675 * else if new region fits below vma->vm_start,
2676 * return with success:
2677 */
2678 vma = find_vma(mm, addr);
2679- if (!vma || addr+len <= vma->vm_start)
2680+ if (check_heap_stack_gap(vma, addr, len, offset))
2681 /* remember the address as a hint for next time */
2682 return (mm->free_area_cache = addr);
2683
2684@@ -231,10 +239,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2685 mm->cached_hole_size = vma->vm_start - addr;
2686
2687 /* try just below the current vma->vm_start */
2688- addr = vma->vm_start - len;
2689- if (do_align)
2690- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2691- } while (len < vma->vm_start);
2692+ addr = skip_heap_stack_gap(vma, len, offset);
2693+ } while (!IS_ERR_VALUE(addr));
2694
2695 bottomup:
2696 /*
2697@@ -266,10 +272,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2698
2699 if (mmap_is_legacy()) {
2700 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
2701+
2702+#ifdef CONFIG_PAX_RANDMMAP
2703+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2704+ mm->mmap_base += mm->delta_mmap;
2705+#endif
2706+
2707 mm->get_unmapped_area = arch_get_unmapped_area;
2708 mm->unmap_area = arch_unmap_area;
2709 } else {
2710 mm->mmap_base = mmap_base(random_factor);
2711+
2712+#ifdef CONFIG_PAX_RANDMMAP
2713+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2714+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2715+#endif
2716+
2717 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2718 mm->unmap_area = arch_unmap_area_topdown;
2719 }
2720diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
2721index 99b47b9..ede7824 100644
2722--- a/arch/arm/mm/mmu.c
2723+++ b/arch/arm/mm/mmu.c
2724@@ -227,16 +227,16 @@ static struct mem_type mem_types[] = {
2725 [MT_UNCACHED] = {
2726 .prot_pte = PROT_PTE_DEVICE,
2727 .prot_l1 = PMD_TYPE_TABLE,
2728- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2729+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_XN,
2730 .domain = DOMAIN_IO,
2731 },
2732 [MT_CACHECLEAN] = {
2733- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2734+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_RDONLY,
2735 .domain = DOMAIN_KERNEL,
2736 },
2737 #ifndef CONFIG_ARM_LPAE
2738 [MT_MINICLEAN] = {
2739- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
2740+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE | PMD_SECT_AP_RDONLY,
2741 .domain = DOMAIN_KERNEL,
2742 },
2743 #endif
2744@@ -258,8 +258,26 @@ static struct mem_type mem_types[] = {
2745 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
2746 .domain = DOMAIN_KERNEL,
2747 },
2748+ [MT_MEMORY_R] = {
2749+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_RDONLY | L_PTE_XN,
2750+ .prot_l1 = PMD_TYPE_TABLE,
2751+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY | PMD_SECT_XN,
2752+ .domain = DOMAIN_KERNEL,
2753+ },
2754+ [MT_MEMORY_RW] = {
2755+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN,
2756+ .prot_l1 = PMD_TYPE_TABLE,
2757+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
2758+ .domain = DOMAIN_KERNEL,
2759+ },
2760+ [MT_MEMORY_RX] = {
2761+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_RDONLY,
2762+ .prot_l1 = PMD_TYPE_TABLE,
2763+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY,
2764+ .domain = DOMAIN_KERNEL,
2765+ },
2766 [MT_ROM] = {
2767- .prot_sect = PMD_TYPE_SECT,
2768+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY,
2769 .domain = DOMAIN_KERNEL,
2770 },
2771 [MT_MEMORY_NONCACHED] = {
2772@@ -273,7 +291,7 @@ static struct mem_type mem_types[] = {
2773 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
2774 L_PTE_XN,
2775 .prot_l1 = PMD_TYPE_TABLE,
2776- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2777+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_RDONLY,
2778 .domain = DOMAIN_KERNEL,
2779 },
2780 [MT_MEMORY_ITCM] = {
2781@@ -432,6 +450,8 @@ static void __init build_mem_type_table(void)
2782 * from SVC mode and no access from userspace.
2783 */
2784 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2785+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2786+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2787 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2788 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2789 #endif
2790@@ -450,6 +470,12 @@ static void __init build_mem_type_table(void)
2791 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
2792 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
2793 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
2794+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
2795+ mem_types[MT_MEMORY_R].prot_pte |= L_PTE_SHARED;
2796+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
2797+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
2798+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
2799+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
2800 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
2801 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
2802 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
2803@@ -503,6 +529,12 @@ static void __init build_mem_type_table(void)
2804 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
2805 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
2806 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
2807+ mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
2808+ mem_types[MT_MEMORY_R].prot_pte |= kern_pgprot;
2809+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
2810+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
2811+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
2812+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
2813 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
2814 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
2815 mem_types[MT_ROM].prot_sect |= cp->pmd;
2816@@ -1198,8 +1230,37 @@ static void __init map_lowmem(void)
2817 map.pfn = __phys_to_pfn(start);
2818 map.virtual = __phys_to_virt(start);
2819 map.length = end - start;
2820- map.type = MT_MEMORY;
2821
2822+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
2823+ struct map_desc kernel;
2824+ struct map_desc initmap;
2825+
2826+ /* when freeing initmem we will make this RW */
2827+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
2828+ initmap.virtual = (unsigned long)__init_begin;
2829+ initmap.length = _sdata - __init_begin;
2830+ initmap.type = MT_MEMORY;
2831+ create_mapping(&initmap);
2832+
2833+ /* when freeing initmem we will make this RX */
2834+ kernel.pfn = __phys_to_pfn(__pa(_stext));
2835+ kernel.virtual = (unsigned long)_stext;
2836+ kernel.length = __init_begin - _stext;
2837+ kernel.type = MT_MEMORY;
2838+ create_mapping(&kernel);
2839+
2840+ if (map.virtual < (unsigned long)_stext) {
2841+ map.length = (unsigned long)_stext - map.virtual;
2842+ map.type = MT_MEMORY;
2843+ create_mapping(&map);
2844+ }
2845+
2846+ map.pfn = __phys_to_pfn(__pa(_sdata));
2847+ map.virtual = (unsigned long)_sdata;
2848+ map.length = end - __pa(_sdata);
2849+ }
2850+
2851+ map.type = MT_MEMORY_RW;
2852 create_mapping(&map);
2853 }
2854 }
2855diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2856index ec63e4a..62aa5f1d 100644
2857--- a/arch/arm/plat-orion/include/plat/addr-map.h
2858+++ b/arch/arm/plat-orion/include/plat/addr-map.h
2859@@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2860 value in bridge_virt_base */
2861 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2862 const int win);
2863-};
2864+} __no_const;
2865
2866 /*
2867 * Information needed to setup one address mapping.
2868diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2869index f5144cd..71f6d1f 100644
2870--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2871+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2872@@ -47,7 +47,7 @@ struct samsung_dma_ops {
2873 int (*started)(unsigned ch);
2874 int (*flush)(unsigned ch);
2875 int (*stop)(unsigned ch);
2876-};
2877+} __no_const;
2878
2879 extern void *samsung_dmadev_get_ops(void);
2880 extern void *s3c_dma_get_ops(void);
2881diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2882index c3a58a1..78fbf54 100644
2883--- a/arch/avr32/include/asm/cache.h
2884+++ b/arch/avr32/include/asm/cache.h
2885@@ -1,8 +1,10 @@
2886 #ifndef __ASM_AVR32_CACHE_H
2887 #define __ASM_AVR32_CACHE_H
2888
2889+#include <linux/const.h>
2890+
2891 #define L1_CACHE_SHIFT 5
2892-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2893+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2894
2895 /*
2896 * Memory returned by kmalloc() may be used for DMA, so we must make
2897diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2898index e2c3287..6c4f98c 100644
2899--- a/arch/avr32/include/asm/elf.h
2900+++ b/arch/avr32/include/asm/elf.h
2901@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2902 the loader. We need to make sure that it is out of the way of the program
2903 that it will "exec", and that there is sufficient room for the brk. */
2904
2905-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2906+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2907
2908+#ifdef CONFIG_PAX_ASLR
2909+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2910+
2911+#define PAX_DELTA_MMAP_LEN 15
2912+#define PAX_DELTA_STACK_LEN 15
2913+#endif
2914
2915 /* This yields a mask that user programs can use to figure out what
2916 instruction set this CPU supports. This could be done in user space,
2917diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2918index 479330b..53717a8 100644
2919--- a/arch/avr32/include/asm/kmap_types.h
2920+++ b/arch/avr32/include/asm/kmap_types.h
2921@@ -2,9 +2,9 @@
2922 #define __ASM_AVR32_KMAP_TYPES_H
2923
2924 #ifdef CONFIG_DEBUG_HIGHMEM
2925-# define KM_TYPE_NR 29
2926+# define KM_TYPE_NR 30
2927 #else
2928-# define KM_TYPE_NR 14
2929+# define KM_TYPE_NR 15
2930 #endif
2931
2932 #endif /* __ASM_AVR32_KMAP_TYPES_H */
2933diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2934index b2f2d2d..d1c85cb 100644
2935--- a/arch/avr32/mm/fault.c
2936+++ b/arch/avr32/mm/fault.c
2937@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2938
2939 int exception_trace = 1;
2940
2941+#ifdef CONFIG_PAX_PAGEEXEC
2942+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2943+{
2944+ unsigned long i;
2945+
2946+ printk(KERN_ERR "PAX: bytes at PC: ");
2947+ for (i = 0; i < 20; i++) {
2948+ unsigned char c;
2949+ if (get_user(c, (unsigned char *)pc+i))
2950+ printk(KERN_CONT "???????? ");
2951+ else
2952+ printk(KERN_CONT "%02x ", c);
2953+ }
2954+ printk("\n");
2955+}
2956+#endif
2957+
2958 /*
2959 * This routine handles page faults. It determines the address and the
2960 * problem, and then passes it off to one of the appropriate routines.
2961@@ -174,6 +191,16 @@ bad_area:
2962 up_read(&mm->mmap_sem);
2963
2964 if (user_mode(regs)) {
2965+
2966+#ifdef CONFIG_PAX_PAGEEXEC
2967+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2968+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2969+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2970+ do_group_exit(SIGKILL);
2971+ }
2972+ }
2973+#endif
2974+
2975 if (exception_trace && printk_ratelimit())
2976 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2977 "sp %08lx ecr %lu\n",
2978diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2979index 568885a..f8008df 100644
2980--- a/arch/blackfin/include/asm/cache.h
2981+++ b/arch/blackfin/include/asm/cache.h
2982@@ -7,6 +7,7 @@
2983 #ifndef __ARCH_BLACKFIN_CACHE_H
2984 #define __ARCH_BLACKFIN_CACHE_H
2985
2986+#include <linux/const.h>
2987 #include <linux/linkage.h> /* for asmlinkage */
2988
2989 /*
2990@@ -14,7 +15,7 @@
2991 * Blackfin loads 32 bytes for cache
2992 */
2993 #define L1_CACHE_SHIFT 5
2994-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2995+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2996 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2997
2998 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2999diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
3000index aea2718..3639a60 100644
3001--- a/arch/cris/include/arch-v10/arch/cache.h
3002+++ b/arch/cris/include/arch-v10/arch/cache.h
3003@@ -1,8 +1,9 @@
3004 #ifndef _ASM_ARCH_CACHE_H
3005 #define _ASM_ARCH_CACHE_H
3006
3007+#include <linux/const.h>
3008 /* Etrax 100LX have 32-byte cache-lines. */
3009-#define L1_CACHE_BYTES 32
3010 #define L1_CACHE_SHIFT 5
3011+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3012
3013 #endif /* _ASM_ARCH_CACHE_H */
3014diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
3015index 7caf25d..ee65ac5 100644
3016--- a/arch/cris/include/arch-v32/arch/cache.h
3017+++ b/arch/cris/include/arch-v32/arch/cache.h
3018@@ -1,11 +1,12 @@
3019 #ifndef _ASM_CRIS_ARCH_CACHE_H
3020 #define _ASM_CRIS_ARCH_CACHE_H
3021
3022+#include <linux/const.h>
3023 #include <arch/hwregs/dma.h>
3024
3025 /* A cache-line is 32 bytes. */
3026-#define L1_CACHE_BYTES 32
3027 #define L1_CACHE_SHIFT 5
3028+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3029
3030 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
3031
3032diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
3033index b86329d..6709906 100644
3034--- a/arch/frv/include/asm/atomic.h
3035+++ b/arch/frv/include/asm/atomic.h
3036@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
3037 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
3038 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
3039
3040+#define atomic64_read_unchecked(v) atomic64_read(v)
3041+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3042+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3043+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3044+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3045+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3046+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3047+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3048+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3049+
3050 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
3051 {
3052 int c, old;
3053diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
3054index 2797163..c2a401d 100644
3055--- a/arch/frv/include/asm/cache.h
3056+++ b/arch/frv/include/asm/cache.h
3057@@ -12,10 +12,11 @@
3058 #ifndef __ASM_CACHE_H
3059 #define __ASM_CACHE_H
3060
3061+#include <linux/const.h>
3062
3063 /* bytes per L1 cache line */
3064 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
3065-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3066+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3067
3068 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
3069 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
3070diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
3071index 43901f2..0d8b865 100644
3072--- a/arch/frv/include/asm/kmap_types.h
3073+++ b/arch/frv/include/asm/kmap_types.h
3074@@ -2,6 +2,6 @@
3075 #ifndef _ASM_KMAP_TYPES_H
3076 #define _ASM_KMAP_TYPES_H
3077
3078-#define KM_TYPE_NR 17
3079+#define KM_TYPE_NR 18
3080
3081 #endif
3082diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
3083index 385fd30..3aaf4fe 100644
3084--- a/arch/frv/mm/elf-fdpic.c
3085+++ b/arch/frv/mm/elf-fdpic.c
3086@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3087 {
3088 struct vm_area_struct *vma;
3089 unsigned long limit;
3090+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
3091
3092 if (len > TASK_SIZE)
3093 return -ENOMEM;
3094@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3095 if (addr) {
3096 addr = PAGE_ALIGN(addr);
3097 vma = find_vma(current->mm, addr);
3098- if (TASK_SIZE - len >= addr &&
3099- (!vma || addr + len <= vma->vm_start))
3100+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3101 goto success;
3102 }
3103
3104@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3105 for (; vma; vma = vma->vm_next) {
3106 if (addr > limit)
3107 break;
3108- if (addr + len <= vma->vm_start)
3109+ if (check_heap_stack_gap(vma, addr, len, offset))
3110 goto success;
3111 addr = vma->vm_end;
3112 }
3113@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3114 for (; vma; vma = vma->vm_next) {
3115 if (addr > limit)
3116 break;
3117- if (addr + len <= vma->vm_start)
3118+ if (check_heap_stack_gap(vma, addr, len, offset))
3119 goto success;
3120 addr = vma->vm_end;
3121 }
3122diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
3123index f4ca594..adc72fd6 100644
3124--- a/arch/hexagon/include/asm/cache.h
3125+++ b/arch/hexagon/include/asm/cache.h
3126@@ -21,9 +21,11 @@
3127 #ifndef __ASM_CACHE_H
3128 #define __ASM_CACHE_H
3129
3130+#include <linux/const.h>
3131+
3132 /* Bytes per L1 cache line */
3133-#define L1_CACHE_SHIFT (5)
3134-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3135+#define L1_CACHE_SHIFT 5
3136+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3137
3138 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
3139 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
3140diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
3141index 6e6fe18..a6ae668 100644
3142--- a/arch/ia64/include/asm/atomic.h
3143+++ b/arch/ia64/include/asm/atomic.h
3144@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
3145 #define atomic64_inc(v) atomic64_add(1, (v))
3146 #define atomic64_dec(v) atomic64_sub(1, (v))
3147
3148+#define atomic64_read_unchecked(v) atomic64_read(v)
3149+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3150+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3151+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3152+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3153+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3154+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3155+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3156+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3157+
3158 /* Atomic operations are already serializing */
3159 #define smp_mb__before_atomic_dec() barrier()
3160 #define smp_mb__after_atomic_dec() barrier()
3161diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
3162index 988254a..e1ee885 100644
3163--- a/arch/ia64/include/asm/cache.h
3164+++ b/arch/ia64/include/asm/cache.h
3165@@ -1,6 +1,7 @@
3166 #ifndef _ASM_IA64_CACHE_H
3167 #define _ASM_IA64_CACHE_H
3168
3169+#include <linux/const.h>
3170
3171 /*
3172 * Copyright (C) 1998-2000 Hewlett-Packard Co
3173@@ -9,7 +10,7 @@
3174
3175 /* Bytes per L1 (data) cache line. */
3176 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
3177-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3178+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3179
3180 #ifdef CONFIG_SMP
3181 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
3182diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
3183index b5298eb..67c6e62 100644
3184--- a/arch/ia64/include/asm/elf.h
3185+++ b/arch/ia64/include/asm/elf.h
3186@@ -42,6 +42,13 @@
3187 */
3188 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
3189
3190+#ifdef CONFIG_PAX_ASLR
3191+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
3192+
3193+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
3194+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
3195+#endif
3196+
3197 #define PT_IA_64_UNWIND 0x70000001
3198
3199 /* IA-64 relocations: */
3200diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
3201index 96a8d92..617a1cf 100644
3202--- a/arch/ia64/include/asm/pgalloc.h
3203+++ b/arch/ia64/include/asm/pgalloc.h
3204@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
3205 pgd_val(*pgd_entry) = __pa(pud);
3206 }
3207
3208+static inline void
3209+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
3210+{
3211+ pgd_populate(mm, pgd_entry, pud);
3212+}
3213+
3214 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3215 {
3216 return quicklist_alloc(0, GFP_KERNEL, NULL);
3217@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
3218 pud_val(*pud_entry) = __pa(pmd);
3219 }
3220
3221+static inline void
3222+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
3223+{
3224+ pud_populate(mm, pud_entry, pmd);
3225+}
3226+
3227 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
3228 {
3229 return quicklist_alloc(0, GFP_KERNEL, NULL);
3230diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
3231index 815810c..d60bd4c 100644
3232--- a/arch/ia64/include/asm/pgtable.h
3233+++ b/arch/ia64/include/asm/pgtable.h
3234@@ -12,7 +12,7 @@
3235 * David Mosberger-Tang <davidm@hpl.hp.com>
3236 */
3237
3238-
3239+#include <linux/const.h>
3240 #include <asm/mman.h>
3241 #include <asm/page.h>
3242 #include <asm/processor.h>
3243@@ -142,6 +142,17 @@
3244 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3245 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3246 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
3247+
3248+#ifdef CONFIG_PAX_PAGEEXEC
3249+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
3250+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3251+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3252+#else
3253+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3254+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3255+# define PAGE_COPY_NOEXEC PAGE_COPY
3256+#endif
3257+
3258 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
3259 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
3260 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
3261diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
3262index 54ff557..70c88b7 100644
3263--- a/arch/ia64/include/asm/spinlock.h
3264+++ b/arch/ia64/include/asm/spinlock.h
3265@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
3266 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
3267
3268 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
3269- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
3270+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
3271 }
3272
3273 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
3274diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
3275index 449c8c0..50cdf87 100644
3276--- a/arch/ia64/include/asm/uaccess.h
3277+++ b/arch/ia64/include/asm/uaccess.h
3278@@ -42,6 +42,8 @@
3279 #include <asm/pgtable.h>
3280 #include <asm/io.h>
3281
3282+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3283+
3284 /*
3285 * For historical reasons, the following macros are grossly misnamed:
3286 */
3287@@ -240,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
3288 static inline unsigned long
3289 __copy_to_user (void __user *to, const void *from, unsigned long count)
3290 {
3291+ if (count > INT_MAX)
3292+ return count;
3293+
3294+ if (!__builtin_constant_p(count))
3295+ check_object_size(from, count, true);
3296+
3297 return __copy_user(to, (__force void __user *) from, count);
3298 }
3299
3300 static inline unsigned long
3301 __copy_from_user (void *to, const void __user *from, unsigned long count)
3302 {
3303+ if (count > INT_MAX)
3304+ return count;
3305+
3306+ if (!__builtin_constant_p(count))
3307+ check_object_size(to, count, false);
3308+
3309 return __copy_user((__force void __user *) to, from, count);
3310 }
3311
3312@@ -255,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
3313 ({ \
3314 void __user *__cu_to = (to); \
3315 const void *__cu_from = (from); \
3316- long __cu_len = (n); \
3317+ unsigned long __cu_len = (n); \
3318 \
3319- if (__access_ok(__cu_to, __cu_len, get_fs())) \
3320+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
3321+ if (!__builtin_constant_p(n)) \
3322+ check_object_size(__cu_from, __cu_len, true); \
3323 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
3324+ } \
3325 __cu_len; \
3326 })
3327
3328@@ -266,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
3329 ({ \
3330 void *__cu_to = (to); \
3331 const void __user *__cu_from = (from); \
3332- long __cu_len = (n); \
3333+ unsigned long __cu_len = (n); \
3334 \
3335 __chk_user_ptr(__cu_from); \
3336- if (__access_ok(__cu_from, __cu_len, get_fs())) \
3337+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
3338+ if (!__builtin_constant_p(n)) \
3339+ check_object_size(__cu_to, __cu_len, false); \
3340 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
3341+ } \
3342 __cu_len; \
3343 })
3344
3345diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
3346index 24603be..948052d 100644
3347--- a/arch/ia64/kernel/module.c
3348+++ b/arch/ia64/kernel/module.c
3349@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
3350 void
3351 module_free (struct module *mod, void *module_region)
3352 {
3353- if (mod && mod->arch.init_unw_table &&
3354- module_region == mod->module_init) {
3355+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
3356 unw_remove_unwind_table(mod->arch.init_unw_table);
3357 mod->arch.init_unw_table = NULL;
3358 }
3359@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
3360 }
3361
3362 static inline int
3363+in_init_rx (const struct module *mod, uint64_t addr)
3364+{
3365+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
3366+}
3367+
3368+static inline int
3369+in_init_rw (const struct module *mod, uint64_t addr)
3370+{
3371+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
3372+}
3373+
3374+static inline int
3375 in_init (const struct module *mod, uint64_t addr)
3376 {
3377- return addr - (uint64_t) mod->module_init < mod->init_size;
3378+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
3379+}
3380+
3381+static inline int
3382+in_core_rx (const struct module *mod, uint64_t addr)
3383+{
3384+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
3385+}
3386+
3387+static inline int
3388+in_core_rw (const struct module *mod, uint64_t addr)
3389+{
3390+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
3391 }
3392
3393 static inline int
3394 in_core (const struct module *mod, uint64_t addr)
3395 {
3396- return addr - (uint64_t) mod->module_core < mod->core_size;
3397+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
3398 }
3399
3400 static inline int
3401@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
3402 break;
3403
3404 case RV_BDREL:
3405- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
3406+ if (in_init_rx(mod, val))
3407+ val -= (uint64_t) mod->module_init_rx;
3408+ else if (in_init_rw(mod, val))
3409+ val -= (uint64_t) mod->module_init_rw;
3410+ else if (in_core_rx(mod, val))
3411+ val -= (uint64_t) mod->module_core_rx;
3412+ else if (in_core_rw(mod, val))
3413+ val -= (uint64_t) mod->module_core_rw;
3414 break;
3415
3416 case RV_LTV:
3417@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
3418 * addresses have been selected...
3419 */
3420 uint64_t gp;
3421- if (mod->core_size > MAX_LTOFF)
3422+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
3423 /*
3424 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
3425 * at the end of the module.
3426 */
3427- gp = mod->core_size - MAX_LTOFF / 2;
3428+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
3429 else
3430- gp = mod->core_size / 2;
3431- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
3432+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
3433+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
3434 mod->arch.gp = gp;
3435 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
3436 }
3437diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
3438index d9439ef..d0cac6b 100644
3439--- a/arch/ia64/kernel/sys_ia64.c
3440+++ b/arch/ia64/kernel/sys_ia64.c
3441@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
3442 unsigned long start_addr, align_mask = PAGE_SIZE - 1;
3443 struct mm_struct *mm = current->mm;
3444 struct vm_area_struct *vma;
3445+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3446
3447 if (len > RGN_MAP_LIMIT)
3448 return -ENOMEM;
3449@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
3450 if (REGION_NUMBER(addr) == RGN_HPAGE)
3451 addr = 0;
3452 #endif
3453+
3454+#ifdef CONFIG_PAX_RANDMMAP
3455+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3456+ addr = mm->free_area_cache;
3457+ else
3458+#endif
3459+
3460 if (!addr)
3461 addr = mm->free_area_cache;
3462
3463@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
3464 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
3465 /* At this point: (!vma || addr < vma->vm_end). */
3466 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
3467- if (start_addr != TASK_UNMAPPED_BASE) {
3468+ if (start_addr != mm->mmap_base) {
3469 /* Start a new search --- just in case we missed some holes. */
3470- addr = TASK_UNMAPPED_BASE;
3471+ addr = mm->mmap_base;
3472 goto full_search;
3473 }
3474 return -ENOMEM;
3475 }
3476- if (!vma || addr + len <= vma->vm_start) {
3477+ if (check_heap_stack_gap(vma, addr, len, offset)) {
3478 /* Remember the address where we stopped this search: */
3479 mm->free_area_cache = addr + len;
3480 return addr;
3481diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
3482index 0ccb28f..8992469 100644
3483--- a/arch/ia64/kernel/vmlinux.lds.S
3484+++ b/arch/ia64/kernel/vmlinux.lds.S
3485@@ -198,7 +198,7 @@ SECTIONS {
3486 /* Per-cpu data: */
3487 . = ALIGN(PERCPU_PAGE_SIZE);
3488 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
3489- __phys_per_cpu_start = __per_cpu_load;
3490+ __phys_per_cpu_start = per_cpu_load;
3491 /*
3492 * ensure percpu data fits
3493 * into percpu page size
3494diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
3495index 6cf0341..d352594 100644
3496--- a/arch/ia64/mm/fault.c
3497+++ b/arch/ia64/mm/fault.c
3498@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
3499 return pte_present(pte);
3500 }
3501
3502+#ifdef CONFIG_PAX_PAGEEXEC
3503+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3504+{
3505+ unsigned long i;
3506+
3507+ printk(KERN_ERR "PAX: bytes at PC: ");
3508+ for (i = 0; i < 8; i++) {
3509+ unsigned int c;
3510+ if (get_user(c, (unsigned int *)pc+i))
3511+ printk(KERN_CONT "???????? ");
3512+ else
3513+ printk(KERN_CONT "%08x ", c);
3514+ }
3515+ printk("\n");
3516+}
3517+#endif
3518+
3519 # define VM_READ_BIT 0
3520 # define VM_WRITE_BIT 1
3521 # define VM_EXEC_BIT 2
3522@@ -149,8 +166,21 @@ retry:
3523 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
3524 goto bad_area;
3525
3526- if ((vma->vm_flags & mask) != mask)
3527+ if ((vma->vm_flags & mask) != mask) {
3528+
3529+#ifdef CONFIG_PAX_PAGEEXEC
3530+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
3531+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
3532+ goto bad_area;
3533+
3534+ up_read(&mm->mmap_sem);
3535+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
3536+ do_group_exit(SIGKILL);
3537+ }
3538+#endif
3539+
3540 goto bad_area;
3541+ }
3542
3543 /*
3544 * If for any reason at all we couldn't handle the fault, make
3545diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
3546index 5ca674b..127c3cb 100644
3547--- a/arch/ia64/mm/hugetlbpage.c
3548+++ b/arch/ia64/mm/hugetlbpage.c
3549@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
3550 unsigned long pgoff, unsigned long flags)
3551 {
3552 struct vm_area_struct *vmm;
3553+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
3554
3555 if (len > RGN_MAP_LIMIT)
3556 return -ENOMEM;
3557@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
3558 /* At this point: (!vmm || addr < vmm->vm_end). */
3559 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
3560 return -ENOMEM;
3561- if (!vmm || (addr + len) <= vmm->vm_start)
3562+ if (check_heap_stack_gap(vmm, addr, len, offset))
3563 return addr;
3564 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
3565 }
3566diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
3567index 082e383..fb7be80 100644
3568--- a/arch/ia64/mm/init.c
3569+++ b/arch/ia64/mm/init.c
3570@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
3571 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
3572 vma->vm_end = vma->vm_start + PAGE_SIZE;
3573 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
3574+
3575+#ifdef CONFIG_PAX_PAGEEXEC
3576+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
3577+ vma->vm_flags &= ~VM_EXEC;
3578+
3579+#ifdef CONFIG_PAX_MPROTECT
3580+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
3581+ vma->vm_flags &= ~VM_MAYEXEC;
3582+#endif
3583+
3584+ }
3585+#endif
3586+
3587 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3588 down_write(&current->mm->mmap_sem);
3589 if (insert_vm_struct(current->mm, vma)) {
3590diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
3591index 40b3ee9..8c2c112 100644
3592--- a/arch/m32r/include/asm/cache.h
3593+++ b/arch/m32r/include/asm/cache.h
3594@@ -1,8 +1,10 @@
3595 #ifndef _ASM_M32R_CACHE_H
3596 #define _ASM_M32R_CACHE_H
3597
3598+#include <linux/const.h>
3599+
3600 /* L1 cache line size */
3601 #define L1_CACHE_SHIFT 4
3602-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3603+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3604
3605 #endif /* _ASM_M32R_CACHE_H */
3606diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
3607index 82abd15..d95ae5d 100644
3608--- a/arch/m32r/lib/usercopy.c
3609+++ b/arch/m32r/lib/usercopy.c
3610@@ -14,6 +14,9 @@
3611 unsigned long
3612 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
3613 {
3614+ if ((long)n < 0)
3615+ return n;
3616+
3617 prefetch(from);
3618 if (access_ok(VERIFY_WRITE, to, n))
3619 __copy_user(to,from,n);
3620@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
3621 unsigned long
3622 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
3623 {
3624+ if ((long)n < 0)
3625+ return n;
3626+
3627 prefetchw(to);
3628 if (access_ok(VERIFY_READ, from, n))
3629 __copy_user_zeroing(to,from,n);
3630diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
3631index 0395c51..5f26031 100644
3632--- a/arch/m68k/include/asm/cache.h
3633+++ b/arch/m68k/include/asm/cache.h
3634@@ -4,9 +4,11 @@
3635 #ifndef __ARCH_M68K_CACHE_H
3636 #define __ARCH_M68K_CACHE_H
3637
3638+#include <linux/const.h>
3639+
3640 /* bytes per L1 cache line */
3641 #define L1_CACHE_SHIFT 4
3642-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
3643+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3644
3645 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
3646
3647diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
3648index 4efe96a..60e8699 100644
3649--- a/arch/microblaze/include/asm/cache.h
3650+++ b/arch/microblaze/include/asm/cache.h
3651@@ -13,11 +13,12 @@
3652 #ifndef _ASM_MICROBLAZE_CACHE_H
3653 #define _ASM_MICROBLAZE_CACHE_H
3654
3655+#include <linux/const.h>
3656 #include <asm/registers.h>
3657
3658 #define L1_CACHE_SHIFT 5
3659 /* word-granular cache in microblaze */
3660-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3661+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3662
3663 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3664
3665diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
3666index 01cc6ba..bcb7a5d 100644
3667--- a/arch/mips/include/asm/atomic.h
3668+++ b/arch/mips/include/asm/atomic.h
3669@@ -21,6 +21,10 @@
3670 #include <asm/cmpxchg.h>
3671 #include <asm/war.h>
3672
3673+#ifdef CONFIG_GENERIC_ATOMIC64
3674+#include <asm-generic/atomic64.h>
3675+#endif
3676+
3677 #define ATOMIC_INIT(i) { (i) }
3678
3679 /*
3680@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3681 */
3682 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
3683
3684+#define atomic64_read_unchecked(v) atomic64_read(v)
3685+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3686+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3687+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3688+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3689+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3690+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3691+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3692+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3693+
3694 #endif /* CONFIG_64BIT */
3695
3696 /*
3697diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
3698index b4db69f..8f3b093 100644
3699--- a/arch/mips/include/asm/cache.h
3700+++ b/arch/mips/include/asm/cache.h
3701@@ -9,10 +9,11 @@
3702 #ifndef _ASM_CACHE_H
3703 #define _ASM_CACHE_H
3704
3705+#include <linux/const.h>
3706 #include <kmalloc.h>
3707
3708 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
3709-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3710+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3711
3712 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
3713 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3714diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
3715index 455c0ac..ad65fbe 100644
3716--- a/arch/mips/include/asm/elf.h
3717+++ b/arch/mips/include/asm/elf.h
3718@@ -372,13 +372,16 @@ extern const char *__elf_platform;
3719 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
3720 #endif
3721
3722+#ifdef CONFIG_PAX_ASLR
3723+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3724+
3725+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3726+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3727+#endif
3728+
3729 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
3730 struct linux_binprm;
3731 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3732 int uses_interp);
3733
3734-struct mm_struct;
3735-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3736-#define arch_randomize_brk arch_randomize_brk
3737-
3738 #endif /* _ASM_ELF_H */
3739diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
3740index c1f6afa..38cc6e9 100644
3741--- a/arch/mips/include/asm/exec.h
3742+++ b/arch/mips/include/asm/exec.h
3743@@ -12,6 +12,6 @@
3744 #ifndef _ASM_EXEC_H
3745 #define _ASM_EXEC_H
3746
3747-extern unsigned long arch_align_stack(unsigned long sp);
3748+#define arch_align_stack(x) ((x) & ~0xfUL)
3749
3750 #endif /* _ASM_EXEC_H */
3751diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
3752index da9bd7d..91aa7ab 100644
3753--- a/arch/mips/include/asm/page.h
3754+++ b/arch/mips/include/asm/page.h
3755@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
3756 #ifdef CONFIG_CPU_MIPS32
3757 typedef struct { unsigned long pte_low, pte_high; } pte_t;
3758 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
3759- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
3760+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
3761 #else
3762 typedef struct { unsigned long long pte; } pte_t;
3763 #define pte_val(x) ((x).pte)
3764diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
3765index 881d18b..cea38bc 100644
3766--- a/arch/mips/include/asm/pgalloc.h
3767+++ b/arch/mips/include/asm/pgalloc.h
3768@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3769 {
3770 set_pud(pud, __pud((unsigned long)pmd));
3771 }
3772+
3773+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3774+{
3775+ pud_populate(mm, pud, pmd);
3776+}
3777 #endif
3778
3779 /*
3780diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
3781index 18806a5..141ffcf 100644
3782--- a/arch/mips/include/asm/thread_info.h
3783+++ b/arch/mips/include/asm/thread_info.h
3784@@ -110,6 +110,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
3785 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
3786 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
3787 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
3788+/* li takes a 32bit immediate */
3789+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
3790 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
3791
3792 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3793@@ -125,15 +127,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
3794 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
3795 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
3796 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
3797+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3798+
3799+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
3800
3801 /* work to do in syscall_trace_leave() */
3802-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
3803+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
3804
3805 /* work to do on interrupt/exception return */
3806 #define _TIF_WORK_MASK \
3807 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
3808 /* work to do on any return to u-space */
3809-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
3810+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
3811
3812 #endif /* __KERNEL__ */
3813
3814diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
3815index 9fdd8bc..4bd7f1a 100644
3816--- a/arch/mips/kernel/binfmt_elfn32.c
3817+++ b/arch/mips/kernel/binfmt_elfn32.c
3818@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
3819 #undef ELF_ET_DYN_BASE
3820 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
3821
3822+#ifdef CONFIG_PAX_ASLR
3823+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3824+
3825+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3826+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3827+#endif
3828+
3829 #include <asm/processor.h>
3830 #include <linux/module.h>
3831 #include <linux/elfcore.h>
3832diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
3833index ff44823..97f8906 100644
3834--- a/arch/mips/kernel/binfmt_elfo32.c
3835+++ b/arch/mips/kernel/binfmt_elfo32.c
3836@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
3837 #undef ELF_ET_DYN_BASE
3838 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
3839
3840+#ifdef CONFIG_PAX_ASLR
3841+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3842+
3843+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3844+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3845+#endif
3846+
3847 #include <asm/processor.h>
3848
3849 /*
3850diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
3851index 69b17a9..9db82f9 100644
3852--- a/arch/mips/kernel/process.c
3853+++ b/arch/mips/kernel/process.c
3854@@ -478,15 +478,3 @@ unsigned long get_wchan(struct task_struct *task)
3855 out:
3856 return pc;
3857 }
3858-
3859-/*
3860- * Don't forget that the stack pointer must be aligned on a 8 bytes
3861- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
3862- */
3863-unsigned long arch_align_stack(unsigned long sp)
3864-{
3865- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3866- sp -= get_random_int() & ~PAGE_MASK;
3867-
3868- return sp & ALMASK;
3869-}
3870diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
3871index 4812c6d..2069554 100644
3872--- a/arch/mips/kernel/ptrace.c
3873+++ b/arch/mips/kernel/ptrace.c
3874@@ -528,6 +528,10 @@ static inline int audit_arch(void)
3875 return arch;
3876 }
3877
3878+#ifdef CONFIG_GRKERNSEC_SETXID
3879+extern void gr_delayed_cred_worker(void);
3880+#endif
3881+
3882 /*
3883 * Notification of system call entry/exit
3884 * - triggered by current->work.syscall_trace
3885@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
3886 /* do the secure computing check first */
3887 secure_computing_strict(regs->regs[2]);
3888
3889+#ifdef CONFIG_GRKERNSEC_SETXID
3890+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3891+ gr_delayed_cred_worker();
3892+#endif
3893+
3894 if (!(current->ptrace & PT_PTRACED))
3895 goto out;
3896
3897diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3898index 374f66e..1c882a0 100644
3899--- a/arch/mips/kernel/scall32-o32.S
3900+++ b/arch/mips/kernel/scall32-o32.S
3901@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3902
3903 stack_done:
3904 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3905- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3906+ li t1, _TIF_SYSCALL_WORK
3907 and t0, t1
3908 bnez t0, syscall_trace_entry # -> yes
3909
3910diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3911index 169de6a..f594a89 100644
3912--- a/arch/mips/kernel/scall64-64.S
3913+++ b/arch/mips/kernel/scall64-64.S
3914@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3915
3916 sd a3, PT_R26(sp) # save a3 for syscall restarting
3917
3918- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3919+ li t1, _TIF_SYSCALL_WORK
3920 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3921 and t0, t1, t0
3922 bnez t0, syscall_trace_entry
3923diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3924index 86ec03f..1235baf 100644
3925--- a/arch/mips/kernel/scall64-n32.S
3926+++ b/arch/mips/kernel/scall64-n32.S
3927@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3928
3929 sd a3, PT_R26(sp) # save a3 for syscall restarting
3930
3931- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3932+ li t1, _TIF_SYSCALL_WORK
3933 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3934 and t0, t1, t0
3935 bnez t0, n32_syscall_trace_entry
3936diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3937index 53c2d72..3734584 100644
3938--- a/arch/mips/kernel/scall64-o32.S
3939+++ b/arch/mips/kernel/scall64-o32.S
3940@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3941 PTR 4b, bad_stack
3942 .previous
3943
3944- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3945+ li t1, _TIF_SYSCALL_WORK
3946 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3947 and t0, t1, t0
3948 bnez t0, trace_a_syscall
3949diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3950index ddcec1e..c7f983e 100644
3951--- a/arch/mips/mm/fault.c
3952+++ b/arch/mips/mm/fault.c
3953@@ -27,6 +27,23 @@
3954 #include <asm/highmem.h> /* For VMALLOC_END */
3955 #include <linux/kdebug.h>
3956
3957+#ifdef CONFIG_PAX_PAGEEXEC
3958+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3959+{
3960+ unsigned long i;
3961+
3962+ printk(KERN_ERR "PAX: bytes at PC: ");
3963+ for (i = 0; i < 5; i++) {
3964+ unsigned int c;
3965+ if (get_user(c, (unsigned int *)pc+i))
3966+ printk(KERN_CONT "???????? ");
3967+ else
3968+ printk(KERN_CONT "%08x ", c);
3969+ }
3970+ printk("\n");
3971+}
3972+#endif
3973+
3974 /*
3975 * This routine handles page faults. It determines the address,
3976 * and the problem, and then passes it off to one of the appropriate
3977diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3978index 302d779..ee9ffb5 100644
3979--- a/arch/mips/mm/mmap.c
3980+++ b/arch/mips/mm/mmap.c
3981@@ -71,6 +71,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3982 struct vm_area_struct *vma;
3983 unsigned long addr = addr0;
3984 int do_color_align;
3985+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3986
3987 if (unlikely(len > TASK_SIZE))
3988 return -ENOMEM;
3989@@ -95,6 +96,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3990 do_color_align = 1;
3991
3992 /* requesting a specific address */
3993+
3994+#ifdef CONFIG_PAX_RANDMMAP
3995+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3996+#endif
3997+
3998 if (addr) {
3999 if (do_color_align)
4000 addr = COLOUR_ALIGN(addr, pgoff);
4001@@ -102,8 +108,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4002 addr = PAGE_ALIGN(addr);
4003
4004 vma = find_vma(mm, addr);
4005- if (TASK_SIZE - len >= addr &&
4006- (!vma || addr + len <= vma->vm_start))
4007+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
4008 return addr;
4009 }
4010
4011@@ -118,7 +123,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4012 /* At this point: (!vma || addr < vma->vm_end). */
4013 if (TASK_SIZE - len < addr)
4014 return -ENOMEM;
4015- if (!vma || addr + len <= vma->vm_start)
4016+ if (check_heap_stack_gap(vmm, addr, len, offset))
4017 return addr;
4018 addr = vma->vm_end;
4019 if (do_color_align)
4020@@ -145,7 +150,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4021 /* make sure it can fit in the remaining address space */
4022 if (likely(addr > len)) {
4023 vma = find_vma(mm, addr - len);
4024- if (!vma || addr <= vma->vm_start) {
4025+ if (check_heap_stack_gap(vmm, addr - len, len, offset))
4026 /* cache the address as a hint for next time */
4027 return mm->free_area_cache = addr - len;
4028 }
4029@@ -155,17 +160,17 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4030 goto bottomup;
4031
4032 addr = mm->mmap_base - len;
4033- if (do_color_align)
4034- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4035
4036 do {
4037+ if (do_color_align)
4038+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4039 /*
4040 * Lookup failure means no vma is above this address,
4041 * else if new region fits below vma->vm_start,
4042 * return with success:
4043 */
4044 vma = find_vma(mm, addr);
4045- if (likely(!vma || addr + len <= vma->vm_start)) {
4046+ if (check_heap_stack_gap(vmm, addr, len, offset)) {
4047 /* cache the address as a hint for next time */
4048 return mm->free_area_cache = addr;
4049 }
4050@@ -175,10 +180,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4051 mm->cached_hole_size = vma->vm_start - addr;
4052
4053 /* try just below the current vma->vm_start */
4054- addr = vma->vm_start - len;
4055- if (do_color_align)
4056- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4057- } while (likely(len < vma->vm_start));
4058+ addr = skip_heap_stack_gap(vma, len);
4059+ } while (!IS_ERR_VALUE(addr));
4060
4061 bottomup:
4062 /*
4063@@ -242,30 +245,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4064 mm->unmap_area = arch_unmap_area_topdown;
4065 }
4066 }
4067-
4068-static inline unsigned long brk_rnd(void)
4069-{
4070- unsigned long rnd = get_random_int();
4071-
4072- rnd = rnd << PAGE_SHIFT;
4073- /* 8MB for 32bit, 256MB for 64bit */
4074- if (TASK_IS_32BIT_ADDR)
4075- rnd = rnd & 0x7ffffful;
4076- else
4077- rnd = rnd & 0xffffffful;
4078-
4079- return rnd;
4080-}
4081-
4082-unsigned long arch_randomize_brk(struct mm_struct *mm)
4083-{
4084- unsigned long base = mm->brk;
4085- unsigned long ret;
4086-
4087- ret = PAGE_ALIGN(base + brk_rnd());
4088-
4089- if (ret < mm->brk)
4090- return mm->brk;
4091-
4092- return ret;
4093-}
4094diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
4095index 967d144..db12197 100644
4096--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
4097+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
4098@@ -11,12 +11,14 @@
4099 #ifndef _ASM_PROC_CACHE_H
4100 #define _ASM_PROC_CACHE_H
4101
4102+#include <linux/const.h>
4103+
4104 /* L1 cache */
4105
4106 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
4107 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
4108-#define L1_CACHE_BYTES 16 /* bytes per entry */
4109 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
4110+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
4111 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
4112
4113 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
4114diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
4115index bcb5df2..84fabd2 100644
4116--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
4117+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
4118@@ -16,13 +16,15 @@
4119 #ifndef _ASM_PROC_CACHE_H
4120 #define _ASM_PROC_CACHE_H
4121
4122+#include <linux/const.h>
4123+
4124 /*
4125 * L1 cache
4126 */
4127 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
4128 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
4129-#define L1_CACHE_BYTES 32 /* bytes per entry */
4130 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
4131+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
4132 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
4133
4134 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
4135diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
4136index 4ce7a01..449202a 100644
4137--- a/arch/openrisc/include/asm/cache.h
4138+++ b/arch/openrisc/include/asm/cache.h
4139@@ -19,11 +19,13 @@
4140 #ifndef __ASM_OPENRISC_CACHE_H
4141 #define __ASM_OPENRISC_CACHE_H
4142
4143+#include <linux/const.h>
4144+
4145 /* FIXME: How can we replace these with values from the CPU...
4146 * they shouldn't be hard-coded!
4147 */
4148
4149-#define L1_CACHE_BYTES 16
4150 #define L1_CACHE_SHIFT 4
4151+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4152
4153 #endif /* __ASM_OPENRISC_CACHE_H */
4154diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
4155index af9cf30..2aae9b2 100644
4156--- a/arch/parisc/include/asm/atomic.h
4157+++ b/arch/parisc/include/asm/atomic.h
4158@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
4159
4160 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4161
4162+#define atomic64_read_unchecked(v) atomic64_read(v)
4163+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4164+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4165+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4166+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4167+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4168+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4169+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4170+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4171+
4172 #endif /* !CONFIG_64BIT */
4173
4174
4175diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
4176index 47f11c7..3420df2 100644
4177--- a/arch/parisc/include/asm/cache.h
4178+++ b/arch/parisc/include/asm/cache.h
4179@@ -5,6 +5,7 @@
4180 #ifndef __ARCH_PARISC_CACHE_H
4181 #define __ARCH_PARISC_CACHE_H
4182
4183+#include <linux/const.h>
4184
4185 /*
4186 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
4187@@ -15,13 +16,13 @@
4188 * just ruin performance.
4189 */
4190 #ifdef CONFIG_PA20
4191-#define L1_CACHE_BYTES 64
4192 #define L1_CACHE_SHIFT 6
4193 #else
4194-#define L1_CACHE_BYTES 32
4195 #define L1_CACHE_SHIFT 5
4196 #endif
4197
4198+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4199+
4200 #ifndef __ASSEMBLY__
4201
4202 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4203diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
4204index 19f6cb1..6c78cf2 100644
4205--- a/arch/parisc/include/asm/elf.h
4206+++ b/arch/parisc/include/asm/elf.h
4207@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
4208
4209 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
4210
4211+#ifdef CONFIG_PAX_ASLR
4212+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4213+
4214+#define PAX_DELTA_MMAP_LEN 16
4215+#define PAX_DELTA_STACK_LEN 16
4216+#endif
4217+
4218 /* This yields a mask that user programs can use to figure out what
4219 instruction set this CPU supports. This could be done in user space,
4220 but it's not easy, and we've already done it here. */
4221diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
4222index fc987a1..6e068ef 100644
4223--- a/arch/parisc/include/asm/pgalloc.h
4224+++ b/arch/parisc/include/asm/pgalloc.h
4225@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
4226 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
4227 }
4228
4229+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
4230+{
4231+ pgd_populate(mm, pgd, pmd);
4232+}
4233+
4234 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
4235 {
4236 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
4237@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
4238 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
4239 #define pmd_free(mm, x) do { } while (0)
4240 #define pgd_populate(mm, pmd, pte) BUG()
4241+#define pgd_populate_kernel(mm, pmd, pte) BUG()
4242
4243 #endif
4244
4245diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
4246index ee99f23..802b0a1 100644
4247--- a/arch/parisc/include/asm/pgtable.h
4248+++ b/arch/parisc/include/asm/pgtable.h
4249@@ -212,6 +212,17 @@ struct vm_area_struct;
4250 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
4251 #define PAGE_COPY PAGE_EXECREAD
4252 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
4253+
4254+#ifdef CONFIG_PAX_PAGEEXEC
4255+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
4256+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
4257+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
4258+#else
4259+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4260+# define PAGE_COPY_NOEXEC PAGE_COPY
4261+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4262+#endif
4263+
4264 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
4265 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
4266 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
4267diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
4268index 4ba2c93..f5e3974 100644
4269--- a/arch/parisc/include/asm/uaccess.h
4270+++ b/arch/parisc/include/asm/uaccess.h
4271@@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
4272 const void __user *from,
4273 unsigned long n)
4274 {
4275- int sz = __compiletime_object_size(to);
4276+ size_t sz = __compiletime_object_size(to);
4277 int ret = -EFAULT;
4278
4279- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
4280+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
4281 ret = __copy_from_user(to, from, n);
4282 else
4283 copy_from_user_overflow();
4284diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
4285index 5e34ccf..672bc9c 100644
4286--- a/arch/parisc/kernel/module.c
4287+++ b/arch/parisc/kernel/module.c
4288@@ -98,16 +98,38 @@
4289
4290 /* three functions to determine where in the module core
4291 * or init pieces the location is */
4292+static inline int in_init_rx(struct module *me, void *loc)
4293+{
4294+ return (loc >= me->module_init_rx &&
4295+ loc < (me->module_init_rx + me->init_size_rx));
4296+}
4297+
4298+static inline int in_init_rw(struct module *me, void *loc)
4299+{
4300+ return (loc >= me->module_init_rw &&
4301+ loc < (me->module_init_rw + me->init_size_rw));
4302+}
4303+
4304 static inline int in_init(struct module *me, void *loc)
4305 {
4306- return (loc >= me->module_init &&
4307- loc <= (me->module_init + me->init_size));
4308+ return in_init_rx(me, loc) || in_init_rw(me, loc);
4309+}
4310+
4311+static inline int in_core_rx(struct module *me, void *loc)
4312+{
4313+ return (loc >= me->module_core_rx &&
4314+ loc < (me->module_core_rx + me->core_size_rx));
4315+}
4316+
4317+static inline int in_core_rw(struct module *me, void *loc)
4318+{
4319+ return (loc >= me->module_core_rw &&
4320+ loc < (me->module_core_rw + me->core_size_rw));
4321 }
4322
4323 static inline int in_core(struct module *me, void *loc)
4324 {
4325- return (loc >= me->module_core &&
4326- loc <= (me->module_core + me->core_size));
4327+ return in_core_rx(me, loc) || in_core_rw(me, loc);
4328 }
4329
4330 static inline int in_local(struct module *me, void *loc)
4331@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
4332 }
4333
4334 /* align things a bit */
4335- me->core_size = ALIGN(me->core_size, 16);
4336- me->arch.got_offset = me->core_size;
4337- me->core_size += gots * sizeof(struct got_entry);
4338+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
4339+ me->arch.got_offset = me->core_size_rw;
4340+ me->core_size_rw += gots * sizeof(struct got_entry);
4341
4342- me->core_size = ALIGN(me->core_size, 16);
4343- me->arch.fdesc_offset = me->core_size;
4344- me->core_size += fdescs * sizeof(Elf_Fdesc);
4345+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
4346+ me->arch.fdesc_offset = me->core_size_rw;
4347+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
4348
4349 me->arch.got_max = gots;
4350 me->arch.fdesc_max = fdescs;
4351@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
4352
4353 BUG_ON(value == 0);
4354
4355- got = me->module_core + me->arch.got_offset;
4356+ got = me->module_core_rw + me->arch.got_offset;
4357 for (i = 0; got[i].addr; i++)
4358 if (got[i].addr == value)
4359 goto out;
4360@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
4361 #ifdef CONFIG_64BIT
4362 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
4363 {
4364- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
4365+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
4366
4367 if (!value) {
4368 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
4369@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
4370
4371 /* Create new one */
4372 fdesc->addr = value;
4373- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
4374+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
4375 return (Elf_Addr)fdesc;
4376 }
4377 #endif /* CONFIG_64BIT */
4378@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
4379
4380 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
4381 end = table + sechdrs[me->arch.unwind_section].sh_size;
4382- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
4383+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
4384
4385 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
4386 me->arch.unwind_section, table, end, gp);
4387diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
4388index f76c108..92bad82 100644
4389--- a/arch/parisc/kernel/sys_parisc.c
4390+++ b/arch/parisc/kernel/sys_parisc.c
4391@@ -33,9 +33,11 @@
4392 #include <linux/utsname.h>
4393 #include <linux/personality.h>
4394
4395-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
4396+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
4397+ unsigned long flags)
4398 {
4399 struct vm_area_struct *vma;
4400+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4401
4402 addr = PAGE_ALIGN(addr);
4403
4404@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
4405 /* At this point: (!vma || addr < vma->vm_end). */
4406 if (TASK_SIZE - len < addr)
4407 return -ENOMEM;
4408- if (!vma || addr + len <= vma->vm_start)
4409+ if (check_heap_stack_gap(vma, addr, len, offset))
4410 return addr;
4411 addr = vma->vm_end;
4412 }
4413@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
4414 return offset & 0x3FF000;
4415 }
4416
4417-static unsigned long get_shared_area(struct address_space *mapping,
4418- unsigned long addr, unsigned long len, unsigned long pgoff)
4419+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
4420+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
4421 {
4422 struct vm_area_struct *vma;
4423 int offset = mapping ? get_offset(mapping) : 0;
4424+ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4425
4426 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
4427
4428@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
4429 /* At this point: (!vma || addr < vma->vm_end). */
4430 if (TASK_SIZE - len < addr)
4431 return -ENOMEM;
4432- if (!vma || addr + len <= vma->vm_start)
4433+ if (check_heap_stack_gap(vma, addr, len, rand_offset))
4434 return addr;
4435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
4436 if (addr < vma->vm_end) /* handle wraparound */
4437@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4438 if (flags & MAP_FIXED)
4439 return addr;
4440 if (!addr)
4441- addr = TASK_UNMAPPED_BASE;
4442+ addr = current->mm->mmap_base;
4443
4444 if (filp) {
4445- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
4446+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
4447 } else if(flags & MAP_SHARED) {
4448- addr = get_shared_area(NULL, addr, len, pgoff);
4449+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
4450 } else {
4451- addr = get_unshared_area(addr, len);
4452+ addr = get_unshared_area(filp, addr, len, flags);
4453 }
4454 return addr;
4455 }
4456diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
4457index 45ba99f..8e22c33 100644
4458--- a/arch/parisc/kernel/traps.c
4459+++ b/arch/parisc/kernel/traps.c
4460@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
4461
4462 down_read(&current->mm->mmap_sem);
4463 vma = find_vma(current->mm,regs->iaoq[0]);
4464- if (vma && (regs->iaoq[0] >= vma->vm_start)
4465- && (vma->vm_flags & VM_EXEC)) {
4466-
4467+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
4468 fault_address = regs->iaoq[0];
4469 fault_space = regs->iasq[0];
4470
4471diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
4472index 18162ce..94de376 100644
4473--- a/arch/parisc/mm/fault.c
4474+++ b/arch/parisc/mm/fault.c
4475@@ -15,6 +15,7 @@
4476 #include <linux/sched.h>
4477 #include <linux/interrupt.h>
4478 #include <linux/module.h>
4479+#include <linux/unistd.h>
4480
4481 #include <asm/uaccess.h>
4482 #include <asm/traps.h>
4483@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
4484 static unsigned long
4485 parisc_acctyp(unsigned long code, unsigned int inst)
4486 {
4487- if (code == 6 || code == 16)
4488+ if (code == 6 || code == 7 || code == 16)
4489 return VM_EXEC;
4490
4491 switch (inst & 0xf0000000) {
4492@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
4493 }
4494 #endif
4495
4496+#ifdef CONFIG_PAX_PAGEEXEC
4497+/*
4498+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
4499+ *
4500+ * returns 1 when task should be killed
4501+ * 2 when rt_sigreturn trampoline was detected
4502+ * 3 when unpatched PLT trampoline was detected
4503+ */
4504+static int pax_handle_fetch_fault(struct pt_regs *regs)
4505+{
4506+
4507+#ifdef CONFIG_PAX_EMUPLT
4508+ int err;
4509+
4510+ do { /* PaX: unpatched PLT emulation */
4511+ unsigned int bl, depwi;
4512+
4513+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
4514+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
4515+
4516+ if (err)
4517+ break;
4518+
4519+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
4520+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
4521+
4522+ err = get_user(ldw, (unsigned int *)addr);
4523+ err |= get_user(bv, (unsigned int *)(addr+4));
4524+ err |= get_user(ldw2, (unsigned int *)(addr+8));
4525+
4526+ if (err)
4527+ break;
4528+
4529+ if (ldw == 0x0E801096U &&
4530+ bv == 0xEAC0C000U &&
4531+ ldw2 == 0x0E881095U)
4532+ {
4533+ unsigned int resolver, map;
4534+
4535+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
4536+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
4537+ if (err)
4538+ break;
4539+
4540+ regs->gr[20] = instruction_pointer(regs)+8;
4541+ regs->gr[21] = map;
4542+ regs->gr[22] = resolver;
4543+ regs->iaoq[0] = resolver | 3UL;
4544+ regs->iaoq[1] = regs->iaoq[0] + 4;
4545+ return 3;
4546+ }
4547+ }
4548+ } while (0);
4549+#endif
4550+
4551+#ifdef CONFIG_PAX_EMUTRAMP
4552+
4553+#ifndef CONFIG_PAX_EMUSIGRT
4554+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
4555+ return 1;
4556+#endif
4557+
4558+ do { /* PaX: rt_sigreturn emulation */
4559+ unsigned int ldi1, ldi2, bel, nop;
4560+
4561+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
4562+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
4563+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
4564+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
4565+
4566+ if (err)
4567+ break;
4568+
4569+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
4570+ ldi2 == 0x3414015AU &&
4571+ bel == 0xE4008200U &&
4572+ nop == 0x08000240U)
4573+ {
4574+ regs->gr[25] = (ldi1 & 2) >> 1;
4575+ regs->gr[20] = __NR_rt_sigreturn;
4576+ regs->gr[31] = regs->iaoq[1] + 16;
4577+ regs->sr[0] = regs->iasq[1];
4578+ regs->iaoq[0] = 0x100UL;
4579+ regs->iaoq[1] = regs->iaoq[0] + 4;
4580+ regs->iasq[0] = regs->sr[2];
4581+ regs->iasq[1] = regs->sr[2];
4582+ return 2;
4583+ }
4584+ } while (0);
4585+#endif
4586+
4587+ return 1;
4588+}
4589+
4590+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4591+{
4592+ unsigned long i;
4593+
4594+ printk(KERN_ERR "PAX: bytes at PC: ");
4595+ for (i = 0; i < 5; i++) {
4596+ unsigned int c;
4597+ if (get_user(c, (unsigned int *)pc+i))
4598+ printk(KERN_CONT "???????? ");
4599+ else
4600+ printk(KERN_CONT "%08x ", c);
4601+ }
4602+ printk("\n");
4603+}
4604+#endif
4605+
4606 int fixup_exception(struct pt_regs *regs)
4607 {
4608 const struct exception_table_entry *fix;
4609@@ -192,8 +303,33 @@ good_area:
4610
4611 acc_type = parisc_acctyp(code,regs->iir);
4612
4613- if ((vma->vm_flags & acc_type) != acc_type)
4614+ if ((vma->vm_flags & acc_type) != acc_type) {
4615+
4616+#ifdef CONFIG_PAX_PAGEEXEC
4617+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
4618+ (address & ~3UL) == instruction_pointer(regs))
4619+ {
4620+ up_read(&mm->mmap_sem);
4621+ switch (pax_handle_fetch_fault(regs)) {
4622+
4623+#ifdef CONFIG_PAX_EMUPLT
4624+ case 3:
4625+ return;
4626+#endif
4627+
4628+#ifdef CONFIG_PAX_EMUTRAMP
4629+ case 2:
4630+ return;
4631+#endif
4632+
4633+ }
4634+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
4635+ do_group_exit(SIGKILL);
4636+ }
4637+#endif
4638+
4639 goto bad_area;
4640+ }
4641
4642 /*
4643 * If for any reason at all we couldn't handle the fault, make
4644diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
4645index e3b1d41..8e81edf 100644
4646--- a/arch/powerpc/include/asm/atomic.h
4647+++ b/arch/powerpc/include/asm/atomic.h
4648@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
4649 return t1;
4650 }
4651
4652+#define atomic64_read_unchecked(v) atomic64_read(v)
4653+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4654+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4655+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4656+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4657+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4658+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4659+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4660+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4661+
4662 #endif /* __powerpc64__ */
4663
4664 #endif /* __KERNEL__ */
4665diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
4666index 9e495c9..b6878e5 100644
4667--- a/arch/powerpc/include/asm/cache.h
4668+++ b/arch/powerpc/include/asm/cache.h
4669@@ -3,6 +3,7 @@
4670
4671 #ifdef __KERNEL__
4672
4673+#include <linux/const.h>
4674
4675 /* bytes per L1 cache line */
4676 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
4677@@ -22,7 +23,7 @@
4678 #define L1_CACHE_SHIFT 7
4679 #endif
4680
4681-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4682+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4683
4684 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4685
4686diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
4687index 6abf0a1..459d0f1 100644
4688--- a/arch/powerpc/include/asm/elf.h
4689+++ b/arch/powerpc/include/asm/elf.h
4690@@ -28,8 +28,19 @@
4691 the loader. We need to make sure that it is out of the way of the program
4692 that it will "exec", and that there is sufficient room for the brk. */
4693
4694-extern unsigned long randomize_et_dyn(unsigned long base);
4695-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
4696+#define ELF_ET_DYN_BASE (0x20000000)
4697+
4698+#ifdef CONFIG_PAX_ASLR
4699+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
4700+
4701+#ifdef __powerpc64__
4702+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
4703+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
4704+#else
4705+#define PAX_DELTA_MMAP_LEN 15
4706+#define PAX_DELTA_STACK_LEN 15
4707+#endif
4708+#endif
4709
4710 /*
4711 * Our registers are always unsigned longs, whether we're a 32 bit
4712@@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
4713 (0x7ff >> (PAGE_SHIFT - 12)) : \
4714 (0x3ffff >> (PAGE_SHIFT - 12)))
4715
4716-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4717-#define arch_randomize_brk arch_randomize_brk
4718-
4719-
4720 #ifdef CONFIG_SPU_BASE
4721 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
4722 #define NT_SPU 1
4723diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
4724index 8196e9c..d83a9f3 100644
4725--- a/arch/powerpc/include/asm/exec.h
4726+++ b/arch/powerpc/include/asm/exec.h
4727@@ -4,6 +4,6 @@
4728 #ifndef _ASM_POWERPC_EXEC_H
4729 #define _ASM_POWERPC_EXEC_H
4730
4731-extern unsigned long arch_align_stack(unsigned long sp);
4732+#define arch_align_stack(x) ((x) & ~0xfUL)
4733
4734 #endif /* _ASM_POWERPC_EXEC_H */
4735diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
4736index 5acabbd..7ea14fa 100644
4737--- a/arch/powerpc/include/asm/kmap_types.h
4738+++ b/arch/powerpc/include/asm/kmap_types.h
4739@@ -10,7 +10,7 @@
4740 * 2 of the License, or (at your option) any later version.
4741 */
4742
4743-#define KM_TYPE_NR 16
4744+#define KM_TYPE_NR 17
4745
4746 #endif /* __KERNEL__ */
4747 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
4748diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
4749index 8565c25..2865190 100644
4750--- a/arch/powerpc/include/asm/mman.h
4751+++ b/arch/powerpc/include/asm/mman.h
4752@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
4753 }
4754 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
4755
4756-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
4757+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
4758 {
4759 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
4760 }
4761diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
4762index f072e97..b436dee 100644
4763--- a/arch/powerpc/include/asm/page.h
4764+++ b/arch/powerpc/include/asm/page.h
4765@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
4766 * and needs to be executable. This means the whole heap ends
4767 * up being executable.
4768 */
4769-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
4770- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4771+#define VM_DATA_DEFAULT_FLAGS32 \
4772+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
4773+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4774
4775 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
4776 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4777@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
4778 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
4779 #endif
4780
4781+#define ktla_ktva(addr) (addr)
4782+#define ktva_ktla(addr) (addr)
4783+
4784 /*
4785 * Use the top bit of the higher-level page table entries to indicate whether
4786 * the entries we point to contain hugepages. This works because we know that
4787diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
4788index cd915d6..c10cee8 100644
4789--- a/arch/powerpc/include/asm/page_64.h
4790+++ b/arch/powerpc/include/asm/page_64.h
4791@@ -154,15 +154,18 @@ do { \
4792 * stack by default, so in the absence of a PT_GNU_STACK program header
4793 * we turn execute permission off.
4794 */
4795-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
4796- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4797+#define VM_STACK_DEFAULT_FLAGS32 \
4798+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
4799+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4800
4801 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
4802 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4803
4804+#ifndef CONFIG_PAX_PAGEEXEC
4805 #define VM_STACK_DEFAULT_FLAGS \
4806 (is_32bit_task() ? \
4807 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
4808+#endif
4809
4810 #include <asm-generic/getorder.h>
4811
4812diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
4813index 292725c..f87ae14 100644
4814--- a/arch/powerpc/include/asm/pgalloc-64.h
4815+++ b/arch/powerpc/include/asm/pgalloc-64.h
4816@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
4817 #ifndef CONFIG_PPC_64K_PAGES
4818
4819 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
4820+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
4821
4822 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4823 {
4824@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4825 pud_set(pud, (unsigned long)pmd);
4826 }
4827
4828+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4829+{
4830+ pud_populate(mm, pud, pmd);
4831+}
4832+
4833 #define pmd_populate(mm, pmd, pte_page) \
4834 pmd_populate_kernel(mm, pmd, page_address(pte_page))
4835 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
4836@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4837 #else /* CONFIG_PPC_64K_PAGES */
4838
4839 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
4840+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
4841
4842 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
4843 pte_t *pte)
4844diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
4845index a9cbd3b..3b67efa 100644
4846--- a/arch/powerpc/include/asm/pgtable.h
4847+++ b/arch/powerpc/include/asm/pgtable.h
4848@@ -2,6 +2,7 @@
4849 #define _ASM_POWERPC_PGTABLE_H
4850 #ifdef __KERNEL__
4851
4852+#include <linux/const.h>
4853 #ifndef __ASSEMBLY__
4854 #include <asm/processor.h> /* For TASK_SIZE */
4855 #include <asm/mmu.h>
4856diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
4857index 4aad413..85d86bf 100644
4858--- a/arch/powerpc/include/asm/pte-hash32.h
4859+++ b/arch/powerpc/include/asm/pte-hash32.h
4860@@ -21,6 +21,7 @@
4861 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
4862 #define _PAGE_USER 0x004 /* usermode access allowed */
4863 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
4864+#define _PAGE_EXEC _PAGE_GUARDED
4865 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
4866 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
4867 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
4868diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
4869index d24c141..b60696e 100644
4870--- a/arch/powerpc/include/asm/reg.h
4871+++ b/arch/powerpc/include/asm/reg.h
4872@@ -215,6 +215,7 @@
4873 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
4874 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
4875 #define DSISR_NOHPTE 0x40000000 /* no translation found */
4876+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
4877 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
4878 #define DSISR_ISSTORE 0x02000000 /* access was a store */
4879 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
4880diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
4881index 406b7b9..af63426 100644
4882--- a/arch/powerpc/include/asm/thread_info.h
4883+++ b/arch/powerpc/include/asm/thread_info.h
4884@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
4885 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
4886 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
4887 #define TIF_SINGLESTEP 8 /* singlestepping active */
4888-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
4889 #define TIF_SECCOMP 10 /* secure computing */
4890 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
4891 #define TIF_NOERROR 12 /* Force successful syscall return */
4892@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
4893 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
4894 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
4895 for stack store? */
4896+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
4897+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
4898+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
4899
4900 /* as above, but as bit values */
4901 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
4902@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
4903 #define _TIF_UPROBE (1<<TIF_UPROBE)
4904 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
4905 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
4906+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
4907 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
4908- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
4909+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
4910+ _TIF_GRSEC_SETXID)
4911
4912 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
4913 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
4914diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
4915index 4db4959..335e00c 100644
4916--- a/arch/powerpc/include/asm/uaccess.h
4917+++ b/arch/powerpc/include/asm/uaccess.h
4918@@ -13,6 +13,8 @@
4919 #define VERIFY_READ 0
4920 #define VERIFY_WRITE 1
4921
4922+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4923+
4924 /*
4925 * The fs value determines whether argument validity checking should be
4926 * performed or not. If get_fs() == USER_DS, checking is performed, with
4927@@ -318,52 +320,6 @@ do { \
4928 extern unsigned long __copy_tofrom_user(void __user *to,
4929 const void __user *from, unsigned long size);
4930
4931-#ifndef __powerpc64__
4932-
4933-static inline unsigned long copy_from_user(void *to,
4934- const void __user *from, unsigned long n)
4935-{
4936- unsigned long over;
4937-
4938- if (access_ok(VERIFY_READ, from, n))
4939- return __copy_tofrom_user((__force void __user *)to, from, n);
4940- if ((unsigned long)from < TASK_SIZE) {
4941- over = (unsigned long)from + n - TASK_SIZE;
4942- return __copy_tofrom_user((__force void __user *)to, from,
4943- n - over) + over;
4944- }
4945- return n;
4946-}
4947-
4948-static inline unsigned long copy_to_user(void __user *to,
4949- const void *from, unsigned long n)
4950-{
4951- unsigned long over;
4952-
4953- if (access_ok(VERIFY_WRITE, to, n))
4954- return __copy_tofrom_user(to, (__force void __user *)from, n);
4955- if ((unsigned long)to < TASK_SIZE) {
4956- over = (unsigned long)to + n - TASK_SIZE;
4957- return __copy_tofrom_user(to, (__force void __user *)from,
4958- n - over) + over;
4959- }
4960- return n;
4961-}
4962-
4963-#else /* __powerpc64__ */
4964-
4965-#define __copy_in_user(to, from, size) \
4966- __copy_tofrom_user((to), (from), (size))
4967-
4968-extern unsigned long copy_from_user(void *to, const void __user *from,
4969- unsigned long n);
4970-extern unsigned long copy_to_user(void __user *to, const void *from,
4971- unsigned long n);
4972-extern unsigned long copy_in_user(void __user *to, const void __user *from,
4973- unsigned long n);
4974-
4975-#endif /* __powerpc64__ */
4976-
4977 static inline unsigned long __copy_from_user_inatomic(void *to,
4978 const void __user *from, unsigned long n)
4979 {
4980@@ -387,6 +343,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
4981 if (ret == 0)
4982 return 0;
4983 }
4984+
4985+ if (!__builtin_constant_p(n))
4986+ check_object_size(to, n, false);
4987+
4988 return __copy_tofrom_user((__force void __user *)to, from, n);
4989 }
4990
4991@@ -413,6 +373,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4992 if (ret == 0)
4993 return 0;
4994 }
4995+
4996+ if (!__builtin_constant_p(n))
4997+ check_object_size(from, n, true);
4998+
4999 return __copy_tofrom_user(to, (__force const void __user *)from, n);
5000 }
5001
5002@@ -430,6 +394,92 @@ static inline unsigned long __copy_to_user(void __user *to,
5003 return __copy_to_user_inatomic(to, from, size);
5004 }
5005
5006+#ifndef __powerpc64__
5007+
5008+static inline unsigned long __must_check copy_from_user(void *to,
5009+ const void __user *from, unsigned long n)
5010+{
5011+ unsigned long over;
5012+
5013+ if ((long)n < 0)
5014+ return n;
5015+
5016+ if (access_ok(VERIFY_READ, from, n)) {
5017+ if (!__builtin_constant_p(n))
5018+ check_object_size(to, n, false);
5019+ return __copy_tofrom_user((__force void __user *)to, from, n);
5020+ }
5021+ if ((unsigned long)from < TASK_SIZE) {
5022+ over = (unsigned long)from + n - TASK_SIZE;
5023+ if (!__builtin_constant_p(n - over))
5024+ check_object_size(to, n - over, false);
5025+ return __copy_tofrom_user((__force void __user *)to, from,
5026+ n - over) + over;
5027+ }
5028+ return n;
5029+}
5030+
5031+static inline unsigned long __must_check copy_to_user(void __user *to,
5032+ const void *from, unsigned long n)
5033+{
5034+ unsigned long over;
5035+
5036+ if ((long)n < 0)
5037+ return n;
5038+
5039+ if (access_ok(VERIFY_WRITE, to, n)) {
5040+ if (!__builtin_constant_p(n))
5041+ check_object_size(from, n, true);
5042+ return __copy_tofrom_user(to, (__force void __user *)from, n);
5043+ }
5044+ if ((unsigned long)to < TASK_SIZE) {
5045+ over = (unsigned long)to + n - TASK_SIZE;
5046+ if (!__builtin_constant_p(n))
5047+ check_object_size(from, n - over, true);
5048+ return __copy_tofrom_user(to, (__force void __user *)from,
5049+ n - over) + over;
5050+ }
5051+ return n;
5052+}
5053+
5054+#else /* __powerpc64__ */
5055+
5056+#define __copy_in_user(to, from, size) \
5057+ __copy_tofrom_user((to), (from), (size))
5058+
5059+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
5060+{
5061+ if ((long)n < 0 || n > INT_MAX)
5062+ return n;
5063+
5064+ if (!__builtin_constant_p(n))
5065+ check_object_size(to, n, false);
5066+
5067+ if (likely(access_ok(VERIFY_READ, from, n)))
5068+ n = __copy_from_user(to, from, n);
5069+ else
5070+ memset(to, 0, n);
5071+ return n;
5072+}
5073+
5074+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
5075+{
5076+ if ((long)n < 0 || n > INT_MAX)
5077+ return n;
5078+
5079+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
5080+ if (!__builtin_constant_p(n))
5081+ check_object_size(from, n, true);
5082+ n = __copy_to_user(to, from, n);
5083+ }
5084+ return n;
5085+}
5086+
5087+extern unsigned long copy_in_user(void __user *to, const void __user *from,
5088+ unsigned long n);
5089+
5090+#endif /* __powerpc64__ */
5091+
5092 extern unsigned long __clear_user(void __user *addr, unsigned long size);
5093
5094 static inline unsigned long clear_user(void __user *addr, unsigned long size)
5095diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
5096index 4684e33..acc4d19e 100644
5097--- a/arch/powerpc/kernel/exceptions-64e.S
5098+++ b/arch/powerpc/kernel/exceptions-64e.S
5099@@ -715,6 +715,7 @@ storage_fault_common:
5100 std r14,_DAR(r1)
5101 std r15,_DSISR(r1)
5102 addi r3,r1,STACK_FRAME_OVERHEAD
5103+ bl .save_nvgprs
5104 mr r4,r14
5105 mr r5,r15
5106 ld r14,PACA_EXGEN+EX_R14(r13)
5107@@ -723,8 +724,7 @@ storage_fault_common:
5108 cmpdi r3,0
5109 bne- 1f
5110 b .ret_from_except_lite
5111-1: bl .save_nvgprs
5112- mr r5,r3
5113+1: mr r5,r3
5114 addi r3,r1,STACK_FRAME_OVERHEAD
5115 ld r4,_DAR(r1)
5116 bl .bad_page_fault
5117diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
5118index 10b658a..e542888 100644
5119--- a/arch/powerpc/kernel/exceptions-64s.S
5120+++ b/arch/powerpc/kernel/exceptions-64s.S
5121@@ -1013,10 +1013,10 @@ handle_page_fault:
5122 11: ld r4,_DAR(r1)
5123 ld r5,_DSISR(r1)
5124 addi r3,r1,STACK_FRAME_OVERHEAD
5125+ bl .save_nvgprs
5126 bl .do_page_fault
5127 cmpdi r3,0
5128 beq+ 12f
5129- bl .save_nvgprs
5130 mr r5,r3
5131 addi r3,r1,STACK_FRAME_OVERHEAD
5132 lwz r4,_DAR(r1)
5133diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
5134index 2e3200c..72095ce 100644
5135--- a/arch/powerpc/kernel/module_32.c
5136+++ b/arch/powerpc/kernel/module_32.c
5137@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
5138 me->arch.core_plt_section = i;
5139 }
5140 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
5141- printk("Module doesn't contain .plt or .init.plt sections.\n");
5142+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
5143 return -ENOEXEC;
5144 }
5145
5146@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
5147
5148 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
5149 /* Init, or core PLT? */
5150- if (location >= mod->module_core
5151- && location < mod->module_core + mod->core_size)
5152+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
5153+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
5154 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
5155- else
5156+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
5157+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
5158 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
5159+ else {
5160+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
5161+ return ~0UL;
5162+ }
5163
5164 /* Find this entry, or if that fails, the next avail. entry */
5165 while (entry->jump[0]) {
5166diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
5167index ba48233..16ac31d 100644
5168--- a/arch/powerpc/kernel/process.c
5169+++ b/arch/powerpc/kernel/process.c
5170@@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
5171 * Lookup NIP late so we have the best change of getting the
5172 * above info out without failing
5173 */
5174- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
5175- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
5176+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
5177+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
5178 #endif
5179 show_stack(current, (unsigned long *) regs->gpr[1]);
5180 if (!user_mode(regs))
5181@@ -1175,10 +1175,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
5182 newsp = stack[0];
5183 ip = stack[STACK_FRAME_LR_SAVE];
5184 if (!firstframe || ip != lr) {
5185- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
5186+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
5187 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5188 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
5189- printk(" (%pS)",
5190+ printk(" (%pA)",
5191 (void *)current->ret_stack[curr_frame].ret);
5192 curr_frame--;
5193 }
5194@@ -1198,7 +1198,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
5195 struct pt_regs *regs = (struct pt_regs *)
5196 (sp + STACK_FRAME_OVERHEAD);
5197 lr = regs->link;
5198- printk("--- Exception: %lx at %pS\n LR = %pS\n",
5199+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
5200 regs->trap, (void *)regs->nip, (void *)lr);
5201 firstframe = 1;
5202 }
5203@@ -1240,58 +1240,3 @@ void __ppc64_runlatch_off(void)
5204 mtspr(SPRN_CTRLT, ctrl);
5205 }
5206 #endif /* CONFIG_PPC64 */
5207-
5208-unsigned long arch_align_stack(unsigned long sp)
5209-{
5210- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5211- sp -= get_random_int() & ~PAGE_MASK;
5212- return sp & ~0xf;
5213-}
5214-
5215-static inline unsigned long brk_rnd(void)
5216-{
5217- unsigned long rnd = 0;
5218-
5219- /* 8MB for 32bit, 1GB for 64bit */
5220- if (is_32bit_task())
5221- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
5222- else
5223- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
5224-
5225- return rnd << PAGE_SHIFT;
5226-}
5227-
5228-unsigned long arch_randomize_brk(struct mm_struct *mm)
5229-{
5230- unsigned long base = mm->brk;
5231- unsigned long ret;
5232-
5233-#ifdef CONFIG_PPC_STD_MMU_64
5234- /*
5235- * If we are using 1TB segments and we are allowed to randomise
5236- * the heap, we can put it above 1TB so it is backed by a 1TB
5237- * segment. Otherwise the heap will be in the bottom 1TB
5238- * which always uses 256MB segments and this may result in a
5239- * performance penalty.
5240- */
5241- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
5242- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
5243-#endif
5244-
5245- ret = PAGE_ALIGN(base + brk_rnd());
5246-
5247- if (ret < mm->brk)
5248- return mm->brk;
5249-
5250- return ret;
5251-}
5252-
5253-unsigned long randomize_et_dyn(unsigned long base)
5254-{
5255- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
5256-
5257- if (ret < base)
5258- return base;
5259-
5260- return ret;
5261-}
5262diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
5263index 79d8e56..38ffcbb 100644
5264--- a/arch/powerpc/kernel/ptrace.c
5265+++ b/arch/powerpc/kernel/ptrace.c
5266@@ -1663,6 +1663,10 @@ long arch_ptrace(struct task_struct *child, long request,
5267 return ret;
5268 }
5269
5270+#ifdef CONFIG_GRKERNSEC_SETXID
5271+extern void gr_delayed_cred_worker(void);
5272+#endif
5273+
5274 /*
5275 * We must return the syscall number to actually look up in the table.
5276 * This can be -1L to skip running any syscall at all.
5277@@ -1673,6 +1677,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
5278
5279 secure_computing_strict(regs->gpr[0]);
5280
5281+#ifdef CONFIG_GRKERNSEC_SETXID
5282+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5283+ gr_delayed_cred_worker();
5284+#endif
5285+
5286 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
5287 tracehook_report_syscall_entry(regs))
5288 /*
5289@@ -1707,6 +1716,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
5290 {
5291 int step;
5292
5293+#ifdef CONFIG_GRKERNSEC_SETXID
5294+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5295+ gr_delayed_cred_worker();
5296+#endif
5297+
5298 audit_syscall_exit(regs);
5299
5300 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5301diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
5302index 804e323..79181c1 100644
5303--- a/arch/powerpc/kernel/signal_32.c
5304+++ b/arch/powerpc/kernel/signal_32.c
5305@@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
5306 /* Save user registers on the stack */
5307 frame = &rt_sf->uc.uc_mcontext;
5308 addr = frame;
5309- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
5310+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
5311 if (save_user_regs(regs, frame, 0, 1))
5312 goto badframe;
5313 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
5314diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
5315index d183f87..1867f1a 100644
5316--- a/arch/powerpc/kernel/signal_64.c
5317+++ b/arch/powerpc/kernel/signal_64.c
5318@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
5319 current->thread.fpscr.val = 0;
5320
5321 /* Set up to return from userspace. */
5322- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
5323+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
5324 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
5325 } else {
5326 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
5327diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
5328index 3251840..3f7c77a 100644
5329--- a/arch/powerpc/kernel/traps.c
5330+++ b/arch/powerpc/kernel/traps.c
5331@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
5332 return flags;
5333 }
5334
5335+extern void gr_handle_kernel_exploit(void);
5336+
5337 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
5338 int signr)
5339 {
5340@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
5341 panic("Fatal exception in interrupt");
5342 if (panic_on_oops)
5343 panic("Fatal exception");
5344+
5345+ gr_handle_kernel_exploit();
5346+
5347 do_exit(signr);
5348 }
5349
5350diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
5351index 1b2076f..835e4be 100644
5352--- a/arch/powerpc/kernel/vdso.c
5353+++ b/arch/powerpc/kernel/vdso.c
5354@@ -34,6 +34,7 @@
5355 #include <asm/firmware.h>
5356 #include <asm/vdso.h>
5357 #include <asm/vdso_datapage.h>
5358+#include <asm/mman.h>
5359
5360 #include "setup.h"
5361
5362@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
5363 vdso_base = VDSO32_MBASE;
5364 #endif
5365
5366- current->mm->context.vdso_base = 0;
5367+ current->mm->context.vdso_base = ~0UL;
5368
5369 /* vDSO has a problem and was disabled, just don't "enable" it for the
5370 * process
5371@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
5372 vdso_base = get_unmapped_area(NULL, vdso_base,
5373 (vdso_pages << PAGE_SHIFT) +
5374 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
5375- 0, 0);
5376+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
5377 if (IS_ERR_VALUE(vdso_base)) {
5378 rc = vdso_base;
5379 goto fail_mmapsem;
5380diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
5381index 5eea6f3..5d10396 100644
5382--- a/arch/powerpc/lib/usercopy_64.c
5383+++ b/arch/powerpc/lib/usercopy_64.c
5384@@ -9,22 +9,6 @@
5385 #include <linux/module.h>
5386 #include <asm/uaccess.h>
5387
5388-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5389-{
5390- if (likely(access_ok(VERIFY_READ, from, n)))
5391- n = __copy_from_user(to, from, n);
5392- else
5393- memset(to, 0, n);
5394- return n;
5395-}
5396-
5397-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5398-{
5399- if (likely(access_ok(VERIFY_WRITE, to, n)))
5400- n = __copy_to_user(to, from, n);
5401- return n;
5402-}
5403-
5404 unsigned long copy_in_user(void __user *to, const void __user *from,
5405 unsigned long n)
5406 {
5407@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
5408 return n;
5409 }
5410
5411-EXPORT_SYMBOL(copy_from_user);
5412-EXPORT_SYMBOL(copy_to_user);
5413 EXPORT_SYMBOL(copy_in_user);
5414
5415diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
5416index 0a6b283..7674925 100644
5417--- a/arch/powerpc/mm/fault.c
5418+++ b/arch/powerpc/mm/fault.c
5419@@ -32,6 +32,10 @@
5420 #include <linux/perf_event.h>
5421 #include <linux/magic.h>
5422 #include <linux/ratelimit.h>
5423+#include <linux/slab.h>
5424+#include <linux/pagemap.h>
5425+#include <linux/compiler.h>
5426+#include <linux/unistd.h>
5427
5428 #include <asm/firmware.h>
5429 #include <asm/page.h>
5430@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
5431 }
5432 #endif
5433
5434+#ifdef CONFIG_PAX_PAGEEXEC
5435+/*
5436+ * PaX: decide what to do with offenders (regs->nip = fault address)
5437+ *
5438+ * returns 1 when task should be killed
5439+ */
5440+static int pax_handle_fetch_fault(struct pt_regs *regs)
5441+{
5442+ return 1;
5443+}
5444+
5445+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5446+{
5447+ unsigned long i;
5448+
5449+ printk(KERN_ERR "PAX: bytes at PC: ");
5450+ for (i = 0; i < 5; i++) {
5451+ unsigned int c;
5452+ if (get_user(c, (unsigned int __user *)pc+i))
5453+ printk(KERN_CONT "???????? ");
5454+ else
5455+ printk(KERN_CONT "%08x ", c);
5456+ }
5457+ printk("\n");
5458+}
5459+#endif
5460+
5461 /*
5462 * Check whether the instruction at regs->nip is a store using
5463 * an update addressing form which will update r1.
5464@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
5465 * indicate errors in DSISR but can validly be set in SRR1.
5466 */
5467 if (trap == 0x400)
5468- error_code &= 0x48200000;
5469+ error_code &= 0x58200000;
5470 else
5471 is_write = error_code & DSISR_ISSTORE;
5472 #else
5473@@ -367,7 +398,7 @@ good_area:
5474 * "undefined". Of those that can be set, this is the only
5475 * one which seems bad.
5476 */
5477- if (error_code & 0x10000000)
5478+ if (error_code & DSISR_GUARDED)
5479 /* Guarded storage error. */
5480 goto bad_area;
5481 #endif /* CONFIG_8xx */
5482@@ -382,7 +413,7 @@ good_area:
5483 * processors use the same I/D cache coherency mechanism
5484 * as embedded.
5485 */
5486- if (error_code & DSISR_PROTFAULT)
5487+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
5488 goto bad_area;
5489 #endif /* CONFIG_PPC_STD_MMU */
5490
5491@@ -465,6 +496,23 @@ bad_area:
5492 bad_area_nosemaphore:
5493 /* User mode accesses cause a SIGSEGV */
5494 if (user_mode(regs)) {
5495+
5496+#ifdef CONFIG_PAX_PAGEEXEC
5497+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5498+#ifdef CONFIG_PPC_STD_MMU
5499+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
5500+#else
5501+ if (is_exec && regs->nip == address) {
5502+#endif
5503+ switch (pax_handle_fetch_fault(regs)) {
5504+ }
5505+
5506+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
5507+ do_group_exit(SIGKILL);
5508+ }
5509+ }
5510+#endif
5511+
5512 _exception(SIGSEGV, regs, code, address);
5513 return 0;
5514 }
5515diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
5516index 67a42ed..1c7210c 100644
5517--- a/arch/powerpc/mm/mmap_64.c
5518+++ b/arch/powerpc/mm/mmap_64.c
5519@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5520 */
5521 if (mmap_is_legacy()) {
5522 mm->mmap_base = TASK_UNMAPPED_BASE;
5523+
5524+#ifdef CONFIG_PAX_RANDMMAP
5525+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5526+ mm->mmap_base += mm->delta_mmap;
5527+#endif
5528+
5529 mm->get_unmapped_area = arch_get_unmapped_area;
5530 mm->unmap_area = arch_unmap_area;
5531 } else {
5532 mm->mmap_base = mmap_base();
5533+
5534+#ifdef CONFIG_PAX_RANDMMAP
5535+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5536+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5537+#endif
5538+
5539 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5540 mm->unmap_area = arch_unmap_area_topdown;
5541 }
5542diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
5543index 5829d2a..af84242 100644
5544--- a/arch/powerpc/mm/slice.c
5545+++ b/arch/powerpc/mm/slice.c
5546@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
5547 if ((mm->task_size - len) < addr)
5548 return 0;
5549 vma = find_vma(mm, addr);
5550- return (!vma || (addr + len) <= vma->vm_start);
5551+ return check_heap_stack_gap(vma, addr, len, 0);
5552 }
5553
5554 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
5555@@ -272,7 +272,7 @@ full_search:
5556 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
5557 continue;
5558 }
5559- if (!vma || addr + len <= vma->vm_start) {
5560+ if (check_heap_stack_gap(vma, addr, len, 0)) {
5561 /*
5562 * Remember the place where we stopped the search:
5563 */
5564@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5565 }
5566 }
5567
5568- addr = mm->mmap_base;
5569- while (addr > len) {
5570+ if (mm->mmap_base < len)
5571+ addr = -ENOMEM;
5572+ else
5573+ addr = mm->mmap_base - len;
5574+
5575+ while (!IS_ERR_VALUE(addr)) {
5576 /* Go down by chunk size */
5577- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
5578+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
5579
5580 /* Check for hit with different page size */
5581 mask = slice_range_to_mask(addr, len);
5582@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5583 * return with success:
5584 */
5585 vma = find_vma(mm, addr);
5586- if (!vma || (addr + len) <= vma->vm_start) {
5587+ if (check_heap_stack_gap(vma, addr, len, 0)) {
5588 /* remember the address as a hint for next time */
5589 if (use_cache)
5590 mm->free_area_cache = addr;
5591@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5592 mm->cached_hole_size = vma->vm_start - addr;
5593
5594 /* try just below the current vma->vm_start */
5595- addr = vma->vm_start;
5596+ addr = skip_heap_stack_gap(vma, len, 0);
5597 }
5598
5599 /*
5600@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
5601 if (fixed && addr > (mm->task_size - len))
5602 return -EINVAL;
5603
5604+#ifdef CONFIG_PAX_RANDMMAP
5605+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
5606+ addr = 0;
5607+#endif
5608+
5609 /* If hint, make sure it matches our alignment restrictions */
5610 if (!fixed && addr) {
5611 addr = _ALIGN_UP(addr, 1ul << pshift);
5612diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
5613index c797832..ce575c8 100644
5614--- a/arch/s390/include/asm/atomic.h
5615+++ b/arch/s390/include/asm/atomic.h
5616@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
5617 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
5618 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5619
5620+#define atomic64_read_unchecked(v) atomic64_read(v)
5621+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5622+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5623+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5624+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5625+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5626+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5627+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5628+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5629+
5630 #define smp_mb__before_atomic_dec() smp_mb()
5631 #define smp_mb__after_atomic_dec() smp_mb()
5632 #define smp_mb__before_atomic_inc() smp_mb()
5633diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
5634index 4d7ccac..d03d0ad 100644
5635--- a/arch/s390/include/asm/cache.h
5636+++ b/arch/s390/include/asm/cache.h
5637@@ -9,8 +9,10 @@
5638 #ifndef __ARCH_S390_CACHE_H
5639 #define __ARCH_S390_CACHE_H
5640
5641-#define L1_CACHE_BYTES 256
5642+#include <linux/const.h>
5643+
5644 #define L1_CACHE_SHIFT 8
5645+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5646 #define NET_SKB_PAD 32
5647
5648 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5649diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
5650index 178ff96..8c93bd1 100644
5651--- a/arch/s390/include/asm/elf.h
5652+++ b/arch/s390/include/asm/elf.h
5653@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
5654 the loader. We need to make sure that it is out of the way of the program
5655 that it will "exec", and that there is sufficient room for the brk. */
5656
5657-extern unsigned long randomize_et_dyn(unsigned long base);
5658-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
5659+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
5660+
5661+#ifdef CONFIG_PAX_ASLR
5662+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
5663+
5664+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
5665+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
5666+#endif
5667
5668 /* This yields a mask that user programs can use to figure out what
5669 instruction set this CPU supports. */
5670@@ -210,9 +216,6 @@ struct linux_binprm;
5671 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5672 int arch_setup_additional_pages(struct linux_binprm *, int);
5673
5674-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5675-#define arch_randomize_brk arch_randomize_brk
5676-
5677 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
5678
5679 #endif
5680diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
5681index c4a93d6..4d2a9b4 100644
5682--- a/arch/s390/include/asm/exec.h
5683+++ b/arch/s390/include/asm/exec.h
5684@@ -7,6 +7,6 @@
5685 #ifndef __ASM_EXEC_H
5686 #define __ASM_EXEC_H
5687
5688-extern unsigned long arch_align_stack(unsigned long sp);
5689+#define arch_align_stack(x) ((x) & ~0xfUL)
5690
5691 #endif /* __ASM_EXEC_H */
5692diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
5693index 34268df..ea97318 100644
5694--- a/arch/s390/include/asm/uaccess.h
5695+++ b/arch/s390/include/asm/uaccess.h
5696@@ -252,6 +252,10 @@ static inline unsigned long __must_check
5697 copy_to_user(void __user *to, const void *from, unsigned long n)
5698 {
5699 might_fault();
5700+
5701+ if ((long)n < 0)
5702+ return n;
5703+
5704 if (access_ok(VERIFY_WRITE, to, n))
5705 n = __copy_to_user(to, from, n);
5706 return n;
5707@@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
5708 static inline unsigned long __must_check
5709 __copy_from_user(void *to, const void __user *from, unsigned long n)
5710 {
5711+ if ((long)n < 0)
5712+ return n;
5713+
5714 if (__builtin_constant_p(n) && (n <= 256))
5715 return uaccess.copy_from_user_small(n, from, to);
5716 else
5717@@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
5718 static inline unsigned long __must_check
5719 copy_from_user(void *to, const void __user *from, unsigned long n)
5720 {
5721- unsigned int sz = __compiletime_object_size(to);
5722+ size_t sz = __compiletime_object_size(to);
5723
5724 might_fault();
5725- if (unlikely(sz != -1 && sz < n)) {
5726+
5727+ if ((long)n < 0)
5728+ return n;
5729+
5730+ if (unlikely(sz != (size_t)-1 && sz < n)) {
5731 copy_from_user_overflow();
5732 return n;
5733 }
5734diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
5735index 4610dea..cf0af21 100644
5736--- a/arch/s390/kernel/module.c
5737+++ b/arch/s390/kernel/module.c
5738@@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
5739
5740 /* Increase core size by size of got & plt and set start
5741 offsets for got and plt. */
5742- me->core_size = ALIGN(me->core_size, 4);
5743- me->arch.got_offset = me->core_size;
5744- me->core_size += me->arch.got_size;
5745- me->arch.plt_offset = me->core_size;
5746- me->core_size += me->arch.plt_size;
5747+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
5748+ me->arch.got_offset = me->core_size_rw;
5749+ me->core_size_rw += me->arch.got_size;
5750+ me->arch.plt_offset = me->core_size_rx;
5751+ me->core_size_rx += me->arch.plt_size;
5752 return 0;
5753 }
5754
5755@@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5756 if (info->got_initialized == 0) {
5757 Elf_Addr *gotent;
5758
5759- gotent = me->module_core + me->arch.got_offset +
5760+ gotent = me->module_core_rw + me->arch.got_offset +
5761 info->got_offset;
5762 *gotent = val;
5763 info->got_initialized = 1;
5764@@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5765 else if (r_type == R_390_GOTENT ||
5766 r_type == R_390_GOTPLTENT)
5767 *(unsigned int *) loc =
5768- (val + (Elf_Addr) me->module_core - loc) >> 1;
5769+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
5770 else if (r_type == R_390_GOT64 ||
5771 r_type == R_390_GOTPLT64)
5772 *(unsigned long *) loc = val;
5773@@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5774 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
5775 if (info->plt_initialized == 0) {
5776 unsigned int *ip;
5777- ip = me->module_core + me->arch.plt_offset +
5778+ ip = me->module_core_rx + me->arch.plt_offset +
5779 info->plt_offset;
5780 #ifndef CONFIG_64BIT
5781 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
5782@@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5783 val - loc + 0xffffUL < 0x1ffffeUL) ||
5784 (r_type == R_390_PLT32DBL &&
5785 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
5786- val = (Elf_Addr) me->module_core +
5787+ val = (Elf_Addr) me->module_core_rx +
5788 me->arch.plt_offset +
5789 info->plt_offset;
5790 val += rela->r_addend - loc;
5791@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5792 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
5793 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
5794 val = val + rela->r_addend -
5795- ((Elf_Addr) me->module_core + me->arch.got_offset);
5796+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
5797 if (r_type == R_390_GOTOFF16)
5798 *(unsigned short *) loc = val;
5799 else if (r_type == R_390_GOTOFF32)
5800@@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5801 break;
5802 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
5803 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
5804- val = (Elf_Addr) me->module_core + me->arch.got_offset +
5805+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
5806 rela->r_addend - loc;
5807 if (r_type == R_390_GOTPC)
5808 *(unsigned int *) loc = val;
5809diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
5810index cd31ad4..201c5a3 100644
5811--- a/arch/s390/kernel/process.c
5812+++ b/arch/s390/kernel/process.c
5813@@ -283,39 +283,3 @@ unsigned long get_wchan(struct task_struct *p)
5814 }
5815 return 0;
5816 }
5817-
5818-unsigned long arch_align_stack(unsigned long sp)
5819-{
5820- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5821- sp -= get_random_int() & ~PAGE_MASK;
5822- return sp & ~0xf;
5823-}
5824-
5825-static inline unsigned long brk_rnd(void)
5826-{
5827- /* 8MB for 32bit, 1GB for 64bit */
5828- if (is_32bit_task())
5829- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
5830- else
5831- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
5832-}
5833-
5834-unsigned long arch_randomize_brk(struct mm_struct *mm)
5835-{
5836- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
5837-
5838- if (ret < mm->brk)
5839- return mm->brk;
5840- return ret;
5841-}
5842-
5843-unsigned long randomize_et_dyn(unsigned long base)
5844-{
5845- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
5846-
5847- if (!(current->flags & PF_RANDOMIZE))
5848- return base;
5849- if (ret < base)
5850- return base;
5851- return ret;
5852-}
5853diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
5854index c59a5ef..3fae59c 100644
5855--- a/arch/s390/mm/mmap.c
5856+++ b/arch/s390/mm/mmap.c
5857@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5858 */
5859 if (mmap_is_legacy()) {
5860 mm->mmap_base = TASK_UNMAPPED_BASE;
5861+
5862+#ifdef CONFIG_PAX_RANDMMAP
5863+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5864+ mm->mmap_base += mm->delta_mmap;
5865+#endif
5866+
5867 mm->get_unmapped_area = arch_get_unmapped_area;
5868 mm->unmap_area = arch_unmap_area;
5869 } else {
5870 mm->mmap_base = mmap_base();
5871+
5872+#ifdef CONFIG_PAX_RANDMMAP
5873+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5874+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5875+#endif
5876+
5877 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5878 mm->unmap_area = arch_unmap_area_topdown;
5879 }
5880@@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5881 */
5882 if (mmap_is_legacy()) {
5883 mm->mmap_base = TASK_UNMAPPED_BASE;
5884+
5885+#ifdef CONFIG_PAX_RANDMMAP
5886+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5887+ mm->mmap_base += mm->delta_mmap;
5888+#endif
5889+
5890 mm->get_unmapped_area = s390_get_unmapped_area;
5891 mm->unmap_area = arch_unmap_area;
5892 } else {
5893 mm->mmap_base = mmap_base();
5894+
5895+#ifdef CONFIG_PAX_RANDMMAP
5896+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5897+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5898+#endif
5899+
5900 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
5901 mm->unmap_area = arch_unmap_area_topdown;
5902 }
5903diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
5904index ae3d59f..f65f075 100644
5905--- a/arch/score/include/asm/cache.h
5906+++ b/arch/score/include/asm/cache.h
5907@@ -1,7 +1,9 @@
5908 #ifndef _ASM_SCORE_CACHE_H
5909 #define _ASM_SCORE_CACHE_H
5910
5911+#include <linux/const.h>
5912+
5913 #define L1_CACHE_SHIFT 4
5914-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5915+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5916
5917 #endif /* _ASM_SCORE_CACHE_H */
5918diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
5919index f9f3cd5..58ff438 100644
5920--- a/arch/score/include/asm/exec.h
5921+++ b/arch/score/include/asm/exec.h
5922@@ -1,6 +1,6 @@
5923 #ifndef _ASM_SCORE_EXEC_H
5924 #define _ASM_SCORE_EXEC_H
5925
5926-extern unsigned long arch_align_stack(unsigned long sp);
5927+#define arch_align_stack(x) (x)
5928
5929 #endif /* _ASM_SCORE_EXEC_H */
5930diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
5931index 637970c..0b6556b 100644
5932--- a/arch/score/kernel/process.c
5933+++ b/arch/score/kernel/process.c
5934@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
5935
5936 return task_pt_regs(task)->cp0_epc;
5937 }
5938-
5939-unsigned long arch_align_stack(unsigned long sp)
5940-{
5941- return sp;
5942-}
5943diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
5944index ef9e555..331bd29 100644
5945--- a/arch/sh/include/asm/cache.h
5946+++ b/arch/sh/include/asm/cache.h
5947@@ -9,10 +9,11 @@
5948 #define __ASM_SH_CACHE_H
5949 #ifdef __KERNEL__
5950
5951+#include <linux/const.h>
5952 #include <linux/init.h>
5953 #include <cpu/cache.h>
5954
5955-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5956+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5957
5958 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5959
5960diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5961index afeb710..e8366ef 100644
5962--- a/arch/sh/mm/mmap.c
5963+++ b/arch/sh/mm/mmap.c
5964@@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5965 struct vm_area_struct *vma;
5966 unsigned long start_addr;
5967 int do_colour_align;
5968+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5969
5970 if (flags & MAP_FIXED) {
5971 /* We do not accept a shared mapping if it would violate
5972@@ -74,8 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5973 addr = PAGE_ALIGN(addr);
5974
5975 vma = find_vma(mm, addr);
5976- if (TASK_SIZE - len >= addr &&
5977- (!vma || addr + len <= vma->vm_start))
5978+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5979 return addr;
5980 }
5981
5982@@ -106,7 +106,7 @@ full_search:
5983 }
5984 return -ENOMEM;
5985 }
5986- if (likely(!vma || addr + len <= vma->vm_start)) {
5987+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
5988 /*
5989 * Remember the place where we stopped the search:
5990 */
5991@@ -131,6 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5992 struct mm_struct *mm = current->mm;
5993 unsigned long addr = addr0;
5994 int do_colour_align;
5995+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5996
5997 if (flags & MAP_FIXED) {
5998 /* We do not accept a shared mapping if it would violate
5999@@ -157,8 +158,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6000 addr = PAGE_ALIGN(addr);
6001
6002 vma = find_vma(mm, addr);
6003- if (TASK_SIZE - len >= addr &&
6004- (!vma || addr + len <= vma->vm_start))
6005+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6006 return addr;
6007 }
6008
6009@@ -179,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6010 /* make sure it can fit in the remaining address space */
6011 if (likely(addr > len)) {
6012 vma = find_vma(mm, addr-len);
6013- if (!vma || addr <= vma->vm_start) {
6014+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
6015 /* remember the address as a hint for next time */
6016 return (mm->free_area_cache = addr-len);
6017 }
6018@@ -188,18 +188,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6019 if (unlikely(mm->mmap_base < len))
6020 goto bottomup;
6021
6022- addr = mm->mmap_base-len;
6023- if (do_colour_align)
6024- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6025+ addr = mm->mmap_base - len;
6026
6027 do {
6028+ if (do_colour_align)
6029+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6030 /*
6031 * Lookup failure means no vma is above this address,
6032 * else if new region fits below vma->vm_start,
6033 * return with success:
6034 */
6035 vma = find_vma(mm, addr);
6036- if (likely(!vma || addr+len <= vma->vm_start)) {
6037+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
6038 /* remember the address as a hint for next time */
6039 return (mm->free_area_cache = addr);
6040 }
6041@@ -209,10 +209,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6042 mm->cached_hole_size = vma->vm_start - addr;
6043
6044 /* try just below the current vma->vm_start */
6045- addr = vma->vm_start-len;
6046- if (do_colour_align)
6047- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6048- } while (likely(len < vma->vm_start));
6049+ addr = skip_heap_stack_gap(vma, len, offset);
6050+ } while (!IS_ERR_VALUE(addr));
6051
6052 bottomup:
6053 /*
6054diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
6055index be56a24..443328f 100644
6056--- a/arch/sparc/include/asm/atomic_64.h
6057+++ b/arch/sparc/include/asm/atomic_64.h
6058@@ -14,18 +14,40 @@
6059 #define ATOMIC64_INIT(i) { (i) }
6060
6061 #define atomic_read(v) (*(volatile int *)&(v)->counter)
6062+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6063+{
6064+ return v->counter;
6065+}
6066 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
6067+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6068+{
6069+ return v->counter;
6070+}
6071
6072 #define atomic_set(v, i) (((v)->counter) = i)
6073+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6074+{
6075+ v->counter = i;
6076+}
6077 #define atomic64_set(v, i) (((v)->counter) = i)
6078+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6079+{
6080+ v->counter = i;
6081+}
6082
6083 extern void atomic_add(int, atomic_t *);
6084+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
6085 extern void atomic64_add(long, atomic64_t *);
6086+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
6087 extern void atomic_sub(int, atomic_t *);
6088+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
6089 extern void atomic64_sub(long, atomic64_t *);
6090+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
6091
6092 extern int atomic_add_ret(int, atomic_t *);
6093+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
6094 extern long atomic64_add_ret(long, atomic64_t *);
6095+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
6096 extern int atomic_sub_ret(int, atomic_t *);
6097 extern long atomic64_sub_ret(long, atomic64_t *);
6098
6099@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
6100 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
6101
6102 #define atomic_inc_return(v) atomic_add_ret(1, v)
6103+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6104+{
6105+ return atomic_add_ret_unchecked(1, v);
6106+}
6107 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
6108+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6109+{
6110+ return atomic64_add_ret_unchecked(1, v);
6111+}
6112
6113 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
6114 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
6115
6116 #define atomic_add_return(i, v) atomic_add_ret(i, v)
6117+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6118+{
6119+ return atomic_add_ret_unchecked(i, v);
6120+}
6121 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
6122+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6123+{
6124+ return atomic64_add_ret_unchecked(i, v);
6125+}
6126
6127 /*
6128 * atomic_inc_and_test - increment and test
6129@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
6130 * other cases.
6131 */
6132 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6133+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6134+{
6135+ return atomic_inc_return_unchecked(v) == 0;
6136+}
6137 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6138
6139 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
6140@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
6141 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
6142
6143 #define atomic_inc(v) atomic_add(1, v)
6144+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6145+{
6146+ atomic_add_unchecked(1, v);
6147+}
6148 #define atomic64_inc(v) atomic64_add(1, v)
6149+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6150+{
6151+ atomic64_add_unchecked(1, v);
6152+}
6153
6154 #define atomic_dec(v) atomic_sub(1, v)
6155+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6156+{
6157+ atomic_sub_unchecked(1, v);
6158+}
6159 #define atomic64_dec(v) atomic64_sub(1, v)
6160+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6161+{
6162+ atomic64_sub_unchecked(1, v);
6163+}
6164
6165 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
6166 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
6167
6168 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6169+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6170+{
6171+ return cmpxchg(&v->counter, old, new);
6172+}
6173 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
6174+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6175+{
6176+ return xchg(&v->counter, new);
6177+}
6178
6179 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
6180 {
6181- int c, old;
6182+ int c, old, new;
6183 c = atomic_read(v);
6184 for (;;) {
6185- if (unlikely(c == (u)))
6186+ if (unlikely(c == u))
6187 break;
6188- old = atomic_cmpxchg((v), c, c + (a));
6189+
6190+ asm volatile("addcc %2, %0, %0\n"
6191+
6192+#ifdef CONFIG_PAX_REFCOUNT
6193+ "tvs %%icc, 6\n"
6194+#endif
6195+
6196+ : "=r" (new)
6197+ : "0" (c), "ir" (a)
6198+ : "cc");
6199+
6200+ old = atomic_cmpxchg(v, c, new);
6201 if (likely(old == c))
6202 break;
6203 c = old;
6204@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
6205 #define atomic64_cmpxchg(v, o, n) \
6206 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6207 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
6208+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6209+{
6210+ return xchg(&v->counter, new);
6211+}
6212
6213 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
6214 {
6215- long c, old;
6216+ long c, old, new;
6217 c = atomic64_read(v);
6218 for (;;) {
6219- if (unlikely(c == (u)))
6220+ if (unlikely(c == u))
6221 break;
6222- old = atomic64_cmpxchg((v), c, c + (a));
6223+
6224+ asm volatile("addcc %2, %0, %0\n"
6225+
6226+#ifdef CONFIG_PAX_REFCOUNT
6227+ "tvs %%xcc, 6\n"
6228+#endif
6229+
6230+ : "=r" (new)
6231+ : "0" (c), "ir" (a)
6232+ : "cc");
6233+
6234+ old = atomic64_cmpxchg(v, c, new);
6235 if (likely(old == c))
6236 break;
6237 c = old;
6238 }
6239- return c != (u);
6240+ return c != u;
6241 }
6242
6243 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6244diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
6245index 5bb6991..5c2132e 100644
6246--- a/arch/sparc/include/asm/cache.h
6247+++ b/arch/sparc/include/asm/cache.h
6248@@ -7,10 +7,12 @@
6249 #ifndef _SPARC_CACHE_H
6250 #define _SPARC_CACHE_H
6251
6252+#include <linux/const.h>
6253+
6254 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
6255
6256 #define L1_CACHE_SHIFT 5
6257-#define L1_CACHE_BYTES 32
6258+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6259
6260 #ifdef CONFIG_SPARC32
6261 #define SMP_CACHE_BYTES_SHIFT 5
6262diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
6263index ac74a2c..a9e58af 100644
6264--- a/arch/sparc/include/asm/elf_32.h
6265+++ b/arch/sparc/include/asm/elf_32.h
6266@@ -114,6 +114,13 @@ typedef struct {
6267
6268 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
6269
6270+#ifdef CONFIG_PAX_ASLR
6271+#define PAX_ELF_ET_DYN_BASE 0x10000UL
6272+
6273+#define PAX_DELTA_MMAP_LEN 16
6274+#define PAX_DELTA_STACK_LEN 16
6275+#endif
6276+
6277 /* This yields a mask that user programs can use to figure out what
6278 instruction set this cpu supports. This can NOT be done in userspace
6279 on Sparc. */
6280diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
6281index 370ca1e..d4f4a98 100644
6282--- a/arch/sparc/include/asm/elf_64.h
6283+++ b/arch/sparc/include/asm/elf_64.h
6284@@ -189,6 +189,13 @@ typedef struct {
6285 #define ELF_ET_DYN_BASE 0x0000010000000000UL
6286 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
6287
6288+#ifdef CONFIG_PAX_ASLR
6289+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
6290+
6291+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
6292+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
6293+#endif
6294+
6295 extern unsigned long sparc64_elf_hwcap;
6296 #define ELF_HWCAP sparc64_elf_hwcap
6297
6298diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
6299index 9b1c36d..209298b 100644
6300--- a/arch/sparc/include/asm/pgalloc_32.h
6301+++ b/arch/sparc/include/asm/pgalloc_32.h
6302@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
6303 }
6304
6305 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
6306+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
6307
6308 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
6309 unsigned long address)
6310diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
6311index bcfe063..b333142 100644
6312--- a/arch/sparc/include/asm/pgalloc_64.h
6313+++ b/arch/sparc/include/asm/pgalloc_64.h
6314@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6315 }
6316
6317 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
6318+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
6319
6320 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
6321 {
6322diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
6323index 6fc1348..390c50a 100644
6324--- a/arch/sparc/include/asm/pgtable_32.h
6325+++ b/arch/sparc/include/asm/pgtable_32.h
6326@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
6327 #define PAGE_SHARED SRMMU_PAGE_SHARED
6328 #define PAGE_COPY SRMMU_PAGE_COPY
6329 #define PAGE_READONLY SRMMU_PAGE_RDONLY
6330+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
6331+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
6332+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
6333 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
6334
6335 /* Top-level page directory - dummy used by init-mm.
6336@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
6337
6338 /* xwr */
6339 #define __P000 PAGE_NONE
6340-#define __P001 PAGE_READONLY
6341-#define __P010 PAGE_COPY
6342-#define __P011 PAGE_COPY
6343+#define __P001 PAGE_READONLY_NOEXEC
6344+#define __P010 PAGE_COPY_NOEXEC
6345+#define __P011 PAGE_COPY_NOEXEC
6346 #define __P100 PAGE_READONLY
6347 #define __P101 PAGE_READONLY
6348 #define __P110 PAGE_COPY
6349 #define __P111 PAGE_COPY
6350
6351 #define __S000 PAGE_NONE
6352-#define __S001 PAGE_READONLY
6353-#define __S010 PAGE_SHARED
6354-#define __S011 PAGE_SHARED
6355+#define __S001 PAGE_READONLY_NOEXEC
6356+#define __S010 PAGE_SHARED_NOEXEC
6357+#define __S011 PAGE_SHARED_NOEXEC
6358 #define __S100 PAGE_READONLY
6359 #define __S101 PAGE_READONLY
6360 #define __S110 PAGE_SHARED
6361diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
6362index 79da178..c2eede8 100644
6363--- a/arch/sparc/include/asm/pgtsrmmu.h
6364+++ b/arch/sparc/include/asm/pgtsrmmu.h
6365@@ -115,6 +115,11 @@
6366 SRMMU_EXEC | SRMMU_REF)
6367 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
6368 SRMMU_EXEC | SRMMU_REF)
6369+
6370+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
6371+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
6372+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
6373+
6374 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
6375 SRMMU_DIRTY | SRMMU_REF)
6376
6377diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
6378index 9689176..63c18ea 100644
6379--- a/arch/sparc/include/asm/spinlock_64.h
6380+++ b/arch/sparc/include/asm/spinlock_64.h
6381@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
6382
6383 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
6384
6385-static void inline arch_read_lock(arch_rwlock_t *lock)
6386+static inline void arch_read_lock(arch_rwlock_t *lock)
6387 {
6388 unsigned long tmp1, tmp2;
6389
6390 __asm__ __volatile__ (
6391 "1: ldsw [%2], %0\n"
6392 " brlz,pn %0, 2f\n"
6393-"4: add %0, 1, %1\n"
6394+"4: addcc %0, 1, %1\n"
6395+
6396+#ifdef CONFIG_PAX_REFCOUNT
6397+" tvs %%icc, 6\n"
6398+#endif
6399+
6400 " cas [%2], %0, %1\n"
6401 " cmp %0, %1\n"
6402 " bne,pn %%icc, 1b\n"
6403@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
6404 " .previous"
6405 : "=&r" (tmp1), "=&r" (tmp2)
6406 : "r" (lock)
6407- : "memory");
6408+ : "memory", "cc");
6409 }
6410
6411-static int inline arch_read_trylock(arch_rwlock_t *lock)
6412+static inline int arch_read_trylock(arch_rwlock_t *lock)
6413 {
6414 int tmp1, tmp2;
6415
6416@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
6417 "1: ldsw [%2], %0\n"
6418 " brlz,a,pn %0, 2f\n"
6419 " mov 0, %0\n"
6420-" add %0, 1, %1\n"
6421+" addcc %0, 1, %1\n"
6422+
6423+#ifdef CONFIG_PAX_REFCOUNT
6424+" tvs %%icc, 6\n"
6425+#endif
6426+
6427 " cas [%2], %0, %1\n"
6428 " cmp %0, %1\n"
6429 " bne,pn %%icc, 1b\n"
6430@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
6431 return tmp1;
6432 }
6433
6434-static void inline arch_read_unlock(arch_rwlock_t *lock)
6435+static inline void arch_read_unlock(arch_rwlock_t *lock)
6436 {
6437 unsigned long tmp1, tmp2;
6438
6439 __asm__ __volatile__(
6440 "1: lduw [%2], %0\n"
6441-" sub %0, 1, %1\n"
6442+" subcc %0, 1, %1\n"
6443+
6444+#ifdef CONFIG_PAX_REFCOUNT
6445+" tvs %%icc, 6\n"
6446+#endif
6447+
6448 " cas [%2], %0, %1\n"
6449 " cmp %0, %1\n"
6450 " bne,pn %%xcc, 1b\n"
6451@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
6452 : "memory");
6453 }
6454
6455-static void inline arch_write_lock(arch_rwlock_t *lock)
6456+static inline void arch_write_lock(arch_rwlock_t *lock)
6457 {
6458 unsigned long mask, tmp1, tmp2;
6459
6460@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
6461 : "memory");
6462 }
6463
6464-static void inline arch_write_unlock(arch_rwlock_t *lock)
6465+static inline void arch_write_unlock(arch_rwlock_t *lock)
6466 {
6467 __asm__ __volatile__(
6468 " stw %%g0, [%0]"
6469@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
6470 : "memory");
6471 }
6472
6473-static int inline arch_write_trylock(arch_rwlock_t *lock)
6474+static inline int arch_write_trylock(arch_rwlock_t *lock)
6475 {
6476 unsigned long mask, tmp1, tmp2, result;
6477
6478diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
6479index 25849ae..924c54b 100644
6480--- a/arch/sparc/include/asm/thread_info_32.h
6481+++ b/arch/sparc/include/asm/thread_info_32.h
6482@@ -49,6 +49,8 @@ struct thread_info {
6483 unsigned long w_saved;
6484
6485 struct restart_block restart_block;
6486+
6487+ unsigned long lowest_stack;
6488 };
6489
6490 /*
6491diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
6492index a3fe4dc..cae132a 100644
6493--- a/arch/sparc/include/asm/thread_info_64.h
6494+++ b/arch/sparc/include/asm/thread_info_64.h
6495@@ -63,6 +63,8 @@ struct thread_info {
6496 struct pt_regs *kern_una_regs;
6497 unsigned int kern_una_insn;
6498
6499+ unsigned long lowest_stack;
6500+
6501 unsigned long fpregs[0] __attribute__ ((aligned(64)));
6502 };
6503
6504@@ -193,10 +195,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
6505 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
6506 /* flag bit 6 is available */
6507 #define TIF_32BIT 7 /* 32-bit binary */
6508-/* flag bit 8 is available */
6509+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
6510 #define TIF_SECCOMP 9 /* secure computing */
6511 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
6512 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
6513+
6514 /* NOTE: Thread flags >= 12 should be ones we have no interest
6515 * in using in assembly, else we can't use the mask as
6516 * an immediate value in instructions such as andcc.
6517@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
6518 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
6519 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6520 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
6521+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6522
6523 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
6524 _TIF_DO_NOTIFY_RESUME_MASK | \
6525 _TIF_NEED_RESCHED)
6526 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
6527
6528+#define _TIF_WORK_SYSCALL \
6529+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
6530+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6531+
6532+
6533 /*
6534 * Thread-synchronous status.
6535 *
6536diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
6537index 0167d26..9acd8ed 100644
6538--- a/arch/sparc/include/asm/uaccess.h
6539+++ b/arch/sparc/include/asm/uaccess.h
6540@@ -1,5 +1,13 @@
6541 #ifndef ___ASM_SPARC_UACCESS_H
6542 #define ___ASM_SPARC_UACCESS_H
6543+
6544+#ifdef __KERNEL__
6545+#ifndef __ASSEMBLY__
6546+#include <linux/types.h>
6547+extern void check_object_size(const void *ptr, unsigned long n, bool to);
6548+#endif
6549+#endif
6550+
6551 #if defined(__sparc__) && defined(__arch64__)
6552 #include <asm/uaccess_64.h>
6553 #else
6554diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
6555index 53a28dd..50c38c3 100644
6556--- a/arch/sparc/include/asm/uaccess_32.h
6557+++ b/arch/sparc/include/asm/uaccess_32.h
6558@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
6559
6560 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6561 {
6562- if (n && __access_ok((unsigned long) to, n))
6563+ if ((long)n < 0)
6564+ return n;
6565+
6566+ if (n && __access_ok((unsigned long) to, n)) {
6567+ if (!__builtin_constant_p(n))
6568+ check_object_size(from, n, true);
6569 return __copy_user(to, (__force void __user *) from, n);
6570- else
6571+ } else
6572 return n;
6573 }
6574
6575 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
6576 {
6577+ if ((long)n < 0)
6578+ return n;
6579+
6580+ if (!__builtin_constant_p(n))
6581+ check_object_size(from, n, true);
6582+
6583 return __copy_user(to, (__force void __user *) from, n);
6584 }
6585
6586 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6587 {
6588- if (n && __access_ok((unsigned long) from, n))
6589+ if ((long)n < 0)
6590+ return n;
6591+
6592+ if (n && __access_ok((unsigned long) from, n)) {
6593+ if (!__builtin_constant_p(n))
6594+ check_object_size(to, n, false);
6595 return __copy_user((__force void __user *) to, from, n);
6596- else
6597+ } else
6598 return n;
6599 }
6600
6601 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
6602 {
6603+ if ((long)n < 0)
6604+ return n;
6605+
6606 return __copy_user((__force void __user *) to, from, n);
6607 }
6608
6609diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
6610index 73083e1..2bc62a6 100644
6611--- a/arch/sparc/include/asm/uaccess_64.h
6612+++ b/arch/sparc/include/asm/uaccess_64.h
6613@@ -10,6 +10,7 @@
6614 #include <linux/compiler.h>
6615 #include <linux/string.h>
6616 #include <linux/thread_info.h>
6617+#include <linux/kernel.h>
6618 #include <asm/asi.h>
6619 #include <asm/spitfire.h>
6620 #include <asm-generic/uaccess-unaligned.h>
6621@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
6622 static inline unsigned long __must_check
6623 copy_from_user(void *to, const void __user *from, unsigned long size)
6624 {
6625- unsigned long ret = ___copy_from_user(to, from, size);
6626+ unsigned long ret;
6627
6628+ if ((long)size < 0 || size > INT_MAX)
6629+ return size;
6630+
6631+ if (!__builtin_constant_p(size))
6632+ check_object_size(to, size, false);
6633+
6634+ ret = ___copy_from_user(to, from, size);
6635 if (unlikely(ret))
6636 ret = copy_from_user_fixup(to, from, size);
6637
6638@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
6639 static inline unsigned long __must_check
6640 copy_to_user(void __user *to, const void *from, unsigned long size)
6641 {
6642- unsigned long ret = ___copy_to_user(to, from, size);
6643+ unsigned long ret;
6644
6645+ if ((long)size < 0 || size > INT_MAX)
6646+ return size;
6647+
6648+ if (!__builtin_constant_p(size))
6649+ check_object_size(from, size, true);
6650+
6651+ ret = ___copy_to_user(to, from, size);
6652 if (unlikely(ret))
6653 ret = copy_to_user_fixup(to, from, size);
6654 return ret;
6655diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
6656index 6cf591b..b49e65a 100644
6657--- a/arch/sparc/kernel/Makefile
6658+++ b/arch/sparc/kernel/Makefile
6659@@ -3,7 +3,7 @@
6660 #
6661
6662 asflags-y := -ansi
6663-ccflags-y := -Werror
6664+#ccflags-y := -Werror
6665
6666 extra-y := head_$(BITS).o
6667
6668diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
6669index 487bffb..955a925 100644
6670--- a/arch/sparc/kernel/process_32.c
6671+++ b/arch/sparc/kernel/process_32.c
6672@@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
6673
6674 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
6675 r->psr, r->pc, r->npc, r->y, print_tainted());
6676- printk("PC: <%pS>\n", (void *) r->pc);
6677+ printk("PC: <%pA>\n", (void *) r->pc);
6678 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6679 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
6680 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
6681 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6682 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
6683 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
6684- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
6685+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
6686
6687 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6688 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
6689@@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6690 rw = (struct reg_window32 *) fp;
6691 pc = rw->ins[7];
6692 printk("[%08lx : ", pc);
6693- printk("%pS ] ", (void *) pc);
6694+ printk("%pA ] ", (void *) pc);
6695 fp = rw->ins[6];
6696 } while (++count < 16);
6697 printk("\n");
6698diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
6699index c6e0c29..052832b 100644
6700--- a/arch/sparc/kernel/process_64.c
6701+++ b/arch/sparc/kernel/process_64.c
6702@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
6703 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
6704 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
6705 if (regs->tstate & TSTATE_PRIV)
6706- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
6707+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
6708 }
6709
6710 void show_regs(struct pt_regs *regs)
6711 {
6712 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
6713 regs->tpc, regs->tnpc, regs->y, print_tainted());
6714- printk("TPC: <%pS>\n", (void *) regs->tpc);
6715+ printk("TPC: <%pA>\n", (void *) regs->tpc);
6716 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
6717 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
6718 regs->u_regs[3]);
6719@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
6720 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
6721 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
6722 regs->u_regs[15]);
6723- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
6724+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
6725 show_regwindow(regs);
6726 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
6727 }
6728@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
6729 ((tp && tp->task) ? tp->task->pid : -1));
6730
6731 if (gp->tstate & TSTATE_PRIV) {
6732- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
6733+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
6734 (void *) gp->tpc,
6735 (void *) gp->o7,
6736 (void *) gp->i7,
6737diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
6738index 7ff45e4..a58f271 100644
6739--- a/arch/sparc/kernel/ptrace_64.c
6740+++ b/arch/sparc/kernel/ptrace_64.c
6741@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
6742 return ret;
6743 }
6744
6745+#ifdef CONFIG_GRKERNSEC_SETXID
6746+extern void gr_delayed_cred_worker(void);
6747+#endif
6748+
6749 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6750 {
6751 int ret = 0;
6752@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6753 /* do the secure computing check first */
6754 secure_computing_strict(regs->u_regs[UREG_G1]);
6755
6756+#ifdef CONFIG_GRKERNSEC_SETXID
6757+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6758+ gr_delayed_cred_worker();
6759+#endif
6760+
6761 if (test_thread_flag(TIF_SYSCALL_TRACE))
6762 ret = tracehook_report_syscall_entry(regs);
6763
6764@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6765
6766 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
6767 {
6768+#ifdef CONFIG_GRKERNSEC_SETXID
6769+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6770+ gr_delayed_cred_worker();
6771+#endif
6772+
6773 audit_syscall_exit(regs);
6774
6775 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6776diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
6777index 0c9b31b..55a8ba6 100644
6778--- a/arch/sparc/kernel/sys_sparc_32.c
6779+++ b/arch/sparc/kernel/sys_sparc_32.c
6780@@ -39,6 +39,7 @@ asmlinkage unsigned long sys_getpagesize(void)
6781 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
6782 {
6783 struct vm_area_struct * vmm;
6784+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6785
6786 if (flags & MAP_FIXED) {
6787 /* We do not accept a shared mapping if it would violate
6788@@ -54,7 +55,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6789 if (len > TASK_SIZE - PAGE_SIZE)
6790 return -ENOMEM;
6791 if (!addr)
6792- addr = TASK_UNMAPPED_BASE;
6793+ addr = current->mm->mmap_base;
6794
6795 if (flags & MAP_SHARED)
6796 addr = COLOUR_ALIGN(addr);
6797@@ -65,7 +66,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6798 /* At this point: (!vmm || addr < vmm->vm_end). */
6799 if (TASK_SIZE - PAGE_SIZE - len < addr)
6800 return -ENOMEM;
6801- if (!vmm || addr + len <= vmm->vm_start)
6802+ if (check_heap_stack_gap(vmm, addr, len, offset))
6803 return addr;
6804 addr = vmm->vm_end;
6805 if (flags & MAP_SHARED)
6806diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
6807index 878ef3d..609e41f 100644
6808--- a/arch/sparc/kernel/sys_sparc_64.c
6809+++ b/arch/sparc/kernel/sys_sparc_64.c
6810@@ -102,12 +102,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6811 unsigned long task_size = TASK_SIZE;
6812 unsigned long start_addr;
6813 int do_color_align;
6814+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6815
6816 if (flags & MAP_FIXED) {
6817 /* We do not accept a shared mapping if it would violate
6818 * cache aliasing constraints.
6819 */
6820- if ((flags & MAP_SHARED) &&
6821+ if ((filp || (flags & MAP_SHARED)) &&
6822 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6823 return -EINVAL;
6824 return addr;
6825@@ -122,6 +123,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6826 if (filp || (flags & MAP_SHARED))
6827 do_color_align = 1;
6828
6829+#ifdef CONFIG_PAX_RANDMMAP
6830+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6831+#endif
6832+
6833 if (addr) {
6834 if (do_color_align)
6835 addr = COLOUR_ALIGN(addr, pgoff);
6836@@ -129,15 +134,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6837 addr = PAGE_ALIGN(addr);
6838
6839 vma = find_vma(mm, addr);
6840- if (task_size - len >= addr &&
6841- (!vma || addr + len <= vma->vm_start))
6842+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6843 return addr;
6844 }
6845
6846 if (len > mm->cached_hole_size) {
6847- start_addr = addr = mm->free_area_cache;
6848+ start_addr = addr = mm->free_area_cache;
6849 } else {
6850- start_addr = addr = TASK_UNMAPPED_BASE;
6851+ start_addr = addr = mm->mmap_base;
6852 mm->cached_hole_size = 0;
6853 }
6854
6855@@ -157,14 +161,14 @@ full_search:
6856 vma = find_vma(mm, VA_EXCLUDE_END);
6857 }
6858 if (unlikely(task_size < addr)) {
6859- if (start_addr != TASK_UNMAPPED_BASE) {
6860- start_addr = addr = TASK_UNMAPPED_BASE;
6861+ if (start_addr != mm->mmap_base) {
6862+ start_addr = addr = mm->mmap_base;
6863 mm->cached_hole_size = 0;
6864 goto full_search;
6865 }
6866 return -ENOMEM;
6867 }
6868- if (likely(!vma || addr + len <= vma->vm_start)) {
6869+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
6870 /*
6871 * Remember the place where we stopped the search:
6872 */
6873@@ -190,6 +194,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6874 unsigned long task_size = STACK_TOP32;
6875 unsigned long addr = addr0;
6876 int do_color_align;
6877+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6878
6879 /* This should only ever run for 32-bit processes. */
6880 BUG_ON(!test_thread_flag(TIF_32BIT));
6881@@ -198,7 +203,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6882 /* We do not accept a shared mapping if it would violate
6883 * cache aliasing constraints.
6884 */
6885- if ((flags & MAP_SHARED) &&
6886+ if ((filp || (flags & MAP_SHARED)) &&
6887 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6888 return -EINVAL;
6889 return addr;
6890@@ -219,8 +224,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6891 addr = PAGE_ALIGN(addr);
6892
6893 vma = find_vma(mm, addr);
6894- if (task_size - len >= addr &&
6895- (!vma || addr + len <= vma->vm_start))
6896+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6897 return addr;
6898 }
6899
6900@@ -241,7 +245,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6901 /* make sure it can fit in the remaining address space */
6902 if (likely(addr > len)) {
6903 vma = find_vma(mm, addr-len);
6904- if (!vma || addr <= vma->vm_start) {
6905+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
6906 /* remember the address as a hint for next time */
6907 return (mm->free_area_cache = addr-len);
6908 }
6909@@ -250,18 +254,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6910 if (unlikely(mm->mmap_base < len))
6911 goto bottomup;
6912
6913- addr = mm->mmap_base-len;
6914- if (do_color_align)
6915- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6916+ addr = mm->mmap_base - len;
6917
6918 do {
6919+ if (do_color_align)
6920+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6921 /*
6922 * Lookup failure means no vma is above this address,
6923 * else if new region fits below vma->vm_start,
6924 * return with success:
6925 */
6926 vma = find_vma(mm, addr);
6927- if (likely(!vma || addr+len <= vma->vm_start)) {
6928+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
6929 /* remember the address as a hint for next time */
6930 return (mm->free_area_cache = addr);
6931 }
6932@@ -271,10 +275,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6933 mm->cached_hole_size = vma->vm_start - addr;
6934
6935 /* try just below the current vma->vm_start */
6936- addr = vma->vm_start-len;
6937- if (do_color_align)
6938- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6939- } while (likely(len < vma->vm_start));
6940+ addr = skip_heap_stack_gap(vma, len, offset);
6941+ } while (!IS_ERR_VALUE(addr));
6942
6943 bottomup:
6944 /*
6945@@ -373,6 +375,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6946 gap == RLIM_INFINITY ||
6947 sysctl_legacy_va_layout) {
6948 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6949+
6950+#ifdef CONFIG_PAX_RANDMMAP
6951+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6952+ mm->mmap_base += mm->delta_mmap;
6953+#endif
6954+
6955 mm->get_unmapped_area = arch_get_unmapped_area;
6956 mm->unmap_area = arch_unmap_area;
6957 } else {
6958@@ -385,6 +393,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6959 gap = (task_size / 6 * 5);
6960
6961 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
6962+
6963+#ifdef CONFIG_PAX_RANDMMAP
6964+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6965+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6966+#endif
6967+
6968 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6969 mm->unmap_area = arch_unmap_area_topdown;
6970 }
6971diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
6972index bf23477..b7425a6 100644
6973--- a/arch/sparc/kernel/syscalls.S
6974+++ b/arch/sparc/kernel/syscalls.S
6975@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
6976 #endif
6977 .align 32
6978 1: ldx [%g6 + TI_FLAGS], %l5
6979- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6980+ andcc %l5, _TIF_WORK_SYSCALL, %g0
6981 be,pt %icc, rtrap
6982 nop
6983 call syscall_trace_leave
6984@@ -189,7 +189,7 @@ linux_sparc_syscall32:
6985
6986 srl %i5, 0, %o5 ! IEU1
6987 srl %i2, 0, %o2 ! IEU0 Group
6988- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6989+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6990 bne,pn %icc, linux_syscall_trace32 ! CTI
6991 mov %i0, %l5 ! IEU1
6992 call %l7 ! CTI Group brk forced
6993@@ -212,7 +212,7 @@ linux_sparc_syscall:
6994
6995 mov %i3, %o3 ! IEU1
6996 mov %i4, %o4 ! IEU0 Group
6997- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6998+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6999 bne,pn %icc, linux_syscall_trace ! CTI Group
7000 mov %i0, %l5 ! IEU0
7001 2: call %l7 ! CTI Group brk forced
7002@@ -228,7 +228,7 @@ ret_sys_call:
7003
7004 cmp %o0, -ERESTART_RESTARTBLOCK
7005 bgeu,pn %xcc, 1f
7006- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
7007+ andcc %l0, _TIF_WORK_SYSCALL, %g0
7008 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
7009
7010 2:
7011diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
7012index a5785ea..405c5f7 100644
7013--- a/arch/sparc/kernel/traps_32.c
7014+++ b/arch/sparc/kernel/traps_32.c
7015@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
7016 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
7017 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
7018
7019+extern void gr_handle_kernel_exploit(void);
7020+
7021 void die_if_kernel(char *str, struct pt_regs *regs)
7022 {
7023 static int die_counter;
7024@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
7025 count++ < 30 &&
7026 (((unsigned long) rw) >= PAGE_OFFSET) &&
7027 !(((unsigned long) rw) & 0x7)) {
7028- printk("Caller[%08lx]: %pS\n", rw->ins[7],
7029+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
7030 (void *) rw->ins[7]);
7031 rw = (struct reg_window32 *)rw->ins[6];
7032 }
7033 }
7034 printk("Instruction DUMP:");
7035 instruction_dump ((unsigned long *) regs->pc);
7036- if(regs->psr & PSR_PS)
7037+ if(regs->psr & PSR_PS) {
7038+ gr_handle_kernel_exploit();
7039 do_exit(SIGKILL);
7040+ }
7041 do_exit(SIGSEGV);
7042 }
7043
7044diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
7045index b66a779..8e8d66c 100644
7046--- a/arch/sparc/kernel/traps_64.c
7047+++ b/arch/sparc/kernel/traps_64.c
7048@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
7049 i + 1,
7050 p->trapstack[i].tstate, p->trapstack[i].tpc,
7051 p->trapstack[i].tnpc, p->trapstack[i].tt);
7052- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
7053+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
7054 }
7055 }
7056
7057@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
7058
7059 lvl -= 0x100;
7060 if (regs->tstate & TSTATE_PRIV) {
7061+
7062+#ifdef CONFIG_PAX_REFCOUNT
7063+ if (lvl == 6)
7064+ pax_report_refcount_overflow(regs);
7065+#endif
7066+
7067 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
7068 die_if_kernel(buffer, regs);
7069 }
7070@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
7071 void bad_trap_tl1(struct pt_regs *regs, long lvl)
7072 {
7073 char buffer[32];
7074-
7075+
7076 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
7077 0, lvl, SIGTRAP) == NOTIFY_STOP)
7078 return;
7079
7080+#ifdef CONFIG_PAX_REFCOUNT
7081+ if (lvl == 6)
7082+ pax_report_refcount_overflow(regs);
7083+#endif
7084+
7085 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
7086
7087 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
7088@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
7089 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
7090 printk("%s" "ERROR(%d): ",
7091 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
7092- printk("TPC<%pS>\n", (void *) regs->tpc);
7093+ printk("TPC<%pA>\n", (void *) regs->tpc);
7094 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
7095 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
7096 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
7097@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
7098 smp_processor_id(),
7099 (type & 0x1) ? 'I' : 'D',
7100 regs->tpc);
7101- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
7102+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
7103 panic("Irrecoverable Cheetah+ parity error.");
7104 }
7105
7106@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
7107 smp_processor_id(),
7108 (type & 0x1) ? 'I' : 'D',
7109 regs->tpc);
7110- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
7111+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
7112 }
7113
7114 struct sun4v_error_entry {
7115@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
7116
7117 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
7118 regs->tpc, tl);
7119- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
7120+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
7121 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
7122- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
7123+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
7124 (void *) regs->u_regs[UREG_I7]);
7125 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
7126 "pte[%lx] error[%lx]\n",
7127@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
7128
7129 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
7130 regs->tpc, tl);
7131- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
7132+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
7133 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
7134- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
7135+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
7136 (void *) regs->u_regs[UREG_I7]);
7137 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
7138 "pte[%lx] error[%lx]\n",
7139@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
7140 fp = (unsigned long)sf->fp + STACK_BIAS;
7141 }
7142
7143- printk(" [%016lx] %pS\n", pc, (void *) pc);
7144+ printk(" [%016lx] %pA\n", pc, (void *) pc);
7145 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
7146 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
7147 int index = tsk->curr_ret_stack;
7148 if (tsk->ret_stack && index >= graph) {
7149 pc = tsk->ret_stack[index - graph].ret;
7150- printk(" [%016lx] %pS\n", pc, (void *) pc);
7151+ printk(" [%016lx] %pA\n", pc, (void *) pc);
7152 graph++;
7153 }
7154 }
7155@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
7156 return (struct reg_window *) (fp + STACK_BIAS);
7157 }
7158
7159+extern void gr_handle_kernel_exploit(void);
7160+
7161 void die_if_kernel(char *str, struct pt_regs *regs)
7162 {
7163 static int die_counter;
7164@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
7165 while (rw &&
7166 count++ < 30 &&
7167 kstack_valid(tp, (unsigned long) rw)) {
7168- printk("Caller[%016lx]: %pS\n", rw->ins[7],
7169+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
7170 (void *) rw->ins[7]);
7171
7172 rw = kernel_stack_up(rw);
7173@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
7174 }
7175 user_instruction_dump ((unsigned int __user *) regs->tpc);
7176 }
7177- if (regs->tstate & TSTATE_PRIV)
7178+ if (regs->tstate & TSTATE_PRIV) {
7179+ gr_handle_kernel_exploit();
7180 do_exit(SIGKILL);
7181+ }
7182 do_exit(SIGSEGV);
7183 }
7184 EXPORT_SYMBOL(die_if_kernel);
7185diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
7186index 8201c25e..072a2a7 100644
7187--- a/arch/sparc/kernel/unaligned_64.c
7188+++ b/arch/sparc/kernel/unaligned_64.c
7189@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
7190 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
7191
7192 if (__ratelimit(&ratelimit)) {
7193- printk("Kernel unaligned access at TPC[%lx] %pS\n",
7194+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
7195 regs->tpc, (void *) regs->tpc);
7196 }
7197 }
7198diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
7199index 8410065f2..4fd4ca22 100644
7200--- a/arch/sparc/lib/Makefile
7201+++ b/arch/sparc/lib/Makefile
7202@@ -2,7 +2,7 @@
7203 #
7204
7205 asflags-y := -ansi -DST_DIV0=0x02
7206-ccflags-y := -Werror
7207+#ccflags-y := -Werror
7208
7209 lib-$(CONFIG_SPARC32) += ashrdi3.o
7210 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
7211diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
7212index 85c233d..68500e0 100644
7213--- a/arch/sparc/lib/atomic_64.S
7214+++ b/arch/sparc/lib/atomic_64.S
7215@@ -17,7 +17,12 @@
7216 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
7217 BACKOFF_SETUP(%o2)
7218 1: lduw [%o1], %g1
7219- add %g1, %o0, %g7
7220+ addcc %g1, %o0, %g7
7221+
7222+#ifdef CONFIG_PAX_REFCOUNT
7223+ tvs %icc, 6
7224+#endif
7225+
7226 cas [%o1], %g1, %g7
7227 cmp %g1, %g7
7228 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7229@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
7230 2: BACKOFF_SPIN(%o2, %o3, 1b)
7231 ENDPROC(atomic_add)
7232
7233+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7234+ BACKOFF_SETUP(%o2)
7235+1: lduw [%o1], %g1
7236+ add %g1, %o0, %g7
7237+ cas [%o1], %g1, %g7
7238+ cmp %g1, %g7
7239+ bne,pn %icc, 2f
7240+ nop
7241+ retl
7242+ nop
7243+2: BACKOFF_SPIN(%o2, %o3, 1b)
7244+ENDPROC(atomic_add_unchecked)
7245+
7246 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7247 BACKOFF_SETUP(%o2)
7248 1: lduw [%o1], %g1
7249- sub %g1, %o0, %g7
7250+ subcc %g1, %o0, %g7
7251+
7252+#ifdef CONFIG_PAX_REFCOUNT
7253+ tvs %icc, 6
7254+#endif
7255+
7256 cas [%o1], %g1, %g7
7257 cmp %g1, %g7
7258 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7259@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7260 2: BACKOFF_SPIN(%o2, %o3, 1b)
7261 ENDPROC(atomic_sub)
7262
7263+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
7264+ BACKOFF_SETUP(%o2)
7265+1: lduw [%o1], %g1
7266+ sub %g1, %o0, %g7
7267+ cas [%o1], %g1, %g7
7268+ cmp %g1, %g7
7269+ bne,pn %icc, 2f
7270+ nop
7271+ retl
7272+ nop
7273+2: BACKOFF_SPIN(%o2, %o3, 1b)
7274+ENDPROC(atomic_sub_unchecked)
7275+
7276 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7277 BACKOFF_SETUP(%o2)
7278 1: lduw [%o1], %g1
7279- add %g1, %o0, %g7
7280+ addcc %g1, %o0, %g7
7281+
7282+#ifdef CONFIG_PAX_REFCOUNT
7283+ tvs %icc, 6
7284+#endif
7285+
7286 cas [%o1], %g1, %g7
7287 cmp %g1, %g7
7288 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7289@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7290 2: BACKOFF_SPIN(%o2, %o3, 1b)
7291 ENDPROC(atomic_add_ret)
7292
7293+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7294+ BACKOFF_SETUP(%o2)
7295+1: lduw [%o1], %g1
7296+ addcc %g1, %o0, %g7
7297+ cas [%o1], %g1, %g7
7298+ cmp %g1, %g7
7299+ bne,pn %icc, 2f
7300+ add %g7, %o0, %g7
7301+ sra %g7, 0, %o0
7302+ retl
7303+ nop
7304+2: BACKOFF_SPIN(%o2, %o3, 1b)
7305+ENDPROC(atomic_add_ret_unchecked)
7306+
7307 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
7308 BACKOFF_SETUP(%o2)
7309 1: lduw [%o1], %g1
7310- sub %g1, %o0, %g7
7311+ subcc %g1, %o0, %g7
7312+
7313+#ifdef CONFIG_PAX_REFCOUNT
7314+ tvs %icc, 6
7315+#endif
7316+
7317 cas [%o1], %g1, %g7
7318 cmp %g1, %g7
7319 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7320@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
7321 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
7322 BACKOFF_SETUP(%o2)
7323 1: ldx [%o1], %g1
7324- add %g1, %o0, %g7
7325+ addcc %g1, %o0, %g7
7326+
7327+#ifdef CONFIG_PAX_REFCOUNT
7328+ tvs %xcc, 6
7329+#endif
7330+
7331 casx [%o1], %g1, %g7
7332 cmp %g1, %g7
7333 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7334@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
7335 2: BACKOFF_SPIN(%o2, %o3, 1b)
7336 ENDPROC(atomic64_add)
7337
7338+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7339+ BACKOFF_SETUP(%o2)
7340+1: ldx [%o1], %g1
7341+ addcc %g1, %o0, %g7
7342+ casx [%o1], %g1, %g7
7343+ cmp %g1, %g7
7344+ bne,pn %xcc, 2f
7345+ nop
7346+ retl
7347+ nop
7348+2: BACKOFF_SPIN(%o2, %o3, 1b)
7349+ENDPROC(atomic64_add_unchecked)
7350+
7351 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7352 BACKOFF_SETUP(%o2)
7353 1: ldx [%o1], %g1
7354- sub %g1, %o0, %g7
7355+ subcc %g1, %o0, %g7
7356+
7357+#ifdef CONFIG_PAX_REFCOUNT
7358+ tvs %xcc, 6
7359+#endif
7360+
7361 casx [%o1], %g1, %g7
7362 cmp %g1, %g7
7363 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7364@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7365 2: BACKOFF_SPIN(%o2, %o3, 1b)
7366 ENDPROC(atomic64_sub)
7367
7368+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
7369+ BACKOFF_SETUP(%o2)
7370+1: ldx [%o1], %g1
7371+ subcc %g1, %o0, %g7
7372+ casx [%o1], %g1, %g7
7373+ cmp %g1, %g7
7374+ bne,pn %xcc, 2f
7375+ nop
7376+ retl
7377+ nop
7378+2: BACKOFF_SPIN(%o2, %o3, 1b)
7379+ENDPROC(atomic64_sub_unchecked)
7380+
7381 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7382 BACKOFF_SETUP(%o2)
7383 1: ldx [%o1], %g1
7384- add %g1, %o0, %g7
7385+ addcc %g1, %o0, %g7
7386+
7387+#ifdef CONFIG_PAX_REFCOUNT
7388+ tvs %xcc, 6
7389+#endif
7390+
7391 casx [%o1], %g1, %g7
7392 cmp %g1, %g7
7393 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7394@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7395 2: BACKOFF_SPIN(%o2, %o3, 1b)
7396 ENDPROC(atomic64_add_ret)
7397
7398+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7399+ BACKOFF_SETUP(%o2)
7400+1: ldx [%o1], %g1
7401+ addcc %g1, %o0, %g7
7402+ casx [%o1], %g1, %g7
7403+ cmp %g1, %g7
7404+ bne,pn %xcc, 2f
7405+ add %g7, %o0, %g7
7406+ mov %g7, %o0
7407+ retl
7408+ nop
7409+2: BACKOFF_SPIN(%o2, %o3, 1b)
7410+ENDPROC(atomic64_add_ret_unchecked)
7411+
7412 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
7413 BACKOFF_SETUP(%o2)
7414 1: ldx [%o1], %g1
7415- sub %g1, %o0, %g7
7416+ subcc %g1, %o0, %g7
7417+
7418+#ifdef CONFIG_PAX_REFCOUNT
7419+ tvs %xcc, 6
7420+#endif
7421+
7422 casx [%o1], %g1, %g7
7423 cmp %g1, %g7
7424 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7425diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
7426index 0c4e35e..745d3e4 100644
7427--- a/arch/sparc/lib/ksyms.c
7428+++ b/arch/sparc/lib/ksyms.c
7429@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
7430
7431 /* Atomic counter implementation. */
7432 EXPORT_SYMBOL(atomic_add);
7433+EXPORT_SYMBOL(atomic_add_unchecked);
7434 EXPORT_SYMBOL(atomic_add_ret);
7435+EXPORT_SYMBOL(atomic_add_ret_unchecked);
7436 EXPORT_SYMBOL(atomic_sub);
7437+EXPORT_SYMBOL(atomic_sub_unchecked);
7438 EXPORT_SYMBOL(atomic_sub_ret);
7439 EXPORT_SYMBOL(atomic64_add);
7440+EXPORT_SYMBOL(atomic64_add_unchecked);
7441 EXPORT_SYMBOL(atomic64_add_ret);
7442+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
7443 EXPORT_SYMBOL(atomic64_sub);
7444+EXPORT_SYMBOL(atomic64_sub_unchecked);
7445 EXPORT_SYMBOL(atomic64_sub_ret);
7446 EXPORT_SYMBOL(atomic64_dec_if_positive);
7447
7448diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
7449index 30c3ecc..736f015 100644
7450--- a/arch/sparc/mm/Makefile
7451+++ b/arch/sparc/mm/Makefile
7452@@ -2,7 +2,7 @@
7453 #
7454
7455 asflags-y := -ansi
7456-ccflags-y := -Werror
7457+#ccflags-y := -Werror
7458
7459 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
7460 obj-y += fault_$(BITS).o
7461diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
7462index e98bfda..ea8d221 100644
7463--- a/arch/sparc/mm/fault_32.c
7464+++ b/arch/sparc/mm/fault_32.c
7465@@ -21,6 +21,9 @@
7466 #include <linux/perf_event.h>
7467 #include <linux/interrupt.h>
7468 #include <linux/kdebug.h>
7469+#include <linux/slab.h>
7470+#include <linux/pagemap.h>
7471+#include <linux/compiler.h>
7472
7473 #include <asm/page.h>
7474 #include <asm/pgtable.h>
7475@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
7476 return safe_compute_effective_address(regs, insn);
7477 }
7478
7479+#ifdef CONFIG_PAX_PAGEEXEC
7480+#ifdef CONFIG_PAX_DLRESOLVE
7481+static void pax_emuplt_close(struct vm_area_struct *vma)
7482+{
7483+ vma->vm_mm->call_dl_resolve = 0UL;
7484+}
7485+
7486+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7487+{
7488+ unsigned int *kaddr;
7489+
7490+ vmf->page = alloc_page(GFP_HIGHUSER);
7491+ if (!vmf->page)
7492+ return VM_FAULT_OOM;
7493+
7494+ kaddr = kmap(vmf->page);
7495+ memset(kaddr, 0, PAGE_SIZE);
7496+ kaddr[0] = 0x9DE3BFA8U; /* save */
7497+ flush_dcache_page(vmf->page);
7498+ kunmap(vmf->page);
7499+ return VM_FAULT_MAJOR;
7500+}
7501+
7502+static const struct vm_operations_struct pax_vm_ops = {
7503+ .close = pax_emuplt_close,
7504+ .fault = pax_emuplt_fault
7505+};
7506+
7507+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7508+{
7509+ int ret;
7510+
7511+ INIT_LIST_HEAD(&vma->anon_vma_chain);
7512+ vma->vm_mm = current->mm;
7513+ vma->vm_start = addr;
7514+ vma->vm_end = addr + PAGE_SIZE;
7515+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7516+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7517+ vma->vm_ops = &pax_vm_ops;
7518+
7519+ ret = insert_vm_struct(current->mm, vma);
7520+ if (ret)
7521+ return ret;
7522+
7523+ ++current->mm->total_vm;
7524+ return 0;
7525+}
7526+#endif
7527+
7528+/*
7529+ * PaX: decide what to do with offenders (regs->pc = fault address)
7530+ *
7531+ * returns 1 when task should be killed
7532+ * 2 when patched PLT trampoline was detected
7533+ * 3 when unpatched PLT trampoline was detected
7534+ */
7535+static int pax_handle_fetch_fault(struct pt_regs *regs)
7536+{
7537+
7538+#ifdef CONFIG_PAX_EMUPLT
7539+ int err;
7540+
7541+ do { /* PaX: patched PLT emulation #1 */
7542+ unsigned int sethi1, sethi2, jmpl;
7543+
7544+ err = get_user(sethi1, (unsigned int *)regs->pc);
7545+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
7546+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
7547+
7548+ if (err)
7549+ break;
7550+
7551+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7552+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
7553+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
7554+ {
7555+ unsigned int addr;
7556+
7557+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7558+ addr = regs->u_regs[UREG_G1];
7559+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7560+ regs->pc = addr;
7561+ regs->npc = addr+4;
7562+ return 2;
7563+ }
7564+ } while (0);
7565+
7566+ do { /* PaX: patched PLT emulation #2 */
7567+ unsigned int ba;
7568+
7569+ err = get_user(ba, (unsigned int *)regs->pc);
7570+
7571+ if (err)
7572+ break;
7573+
7574+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
7575+ unsigned int addr;
7576+
7577+ if ((ba & 0xFFC00000U) == 0x30800000U)
7578+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7579+ else
7580+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7581+ regs->pc = addr;
7582+ regs->npc = addr+4;
7583+ return 2;
7584+ }
7585+ } while (0);
7586+
7587+ do { /* PaX: patched PLT emulation #3 */
7588+ unsigned int sethi, bajmpl, nop;
7589+
7590+ err = get_user(sethi, (unsigned int *)regs->pc);
7591+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
7592+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
7593+
7594+ if (err)
7595+ break;
7596+
7597+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7598+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
7599+ nop == 0x01000000U)
7600+ {
7601+ unsigned int addr;
7602+
7603+ addr = (sethi & 0x003FFFFFU) << 10;
7604+ regs->u_regs[UREG_G1] = addr;
7605+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
7606+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7607+ else
7608+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7609+ regs->pc = addr;
7610+ regs->npc = addr+4;
7611+ return 2;
7612+ }
7613+ } while (0);
7614+
7615+ do { /* PaX: unpatched PLT emulation step 1 */
7616+ unsigned int sethi, ba, nop;
7617+
7618+ err = get_user(sethi, (unsigned int *)regs->pc);
7619+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
7620+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
7621+
7622+ if (err)
7623+ break;
7624+
7625+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7626+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7627+ nop == 0x01000000U)
7628+ {
7629+ unsigned int addr, save, call;
7630+
7631+ if ((ba & 0xFFC00000U) == 0x30800000U)
7632+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7633+ else
7634+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7635+
7636+ err = get_user(save, (unsigned int *)addr);
7637+ err |= get_user(call, (unsigned int *)(addr+4));
7638+ err |= get_user(nop, (unsigned int *)(addr+8));
7639+ if (err)
7640+ break;
7641+
7642+#ifdef CONFIG_PAX_DLRESOLVE
7643+ if (save == 0x9DE3BFA8U &&
7644+ (call & 0xC0000000U) == 0x40000000U &&
7645+ nop == 0x01000000U)
7646+ {
7647+ struct vm_area_struct *vma;
7648+ unsigned long call_dl_resolve;
7649+
7650+ down_read(&current->mm->mmap_sem);
7651+ call_dl_resolve = current->mm->call_dl_resolve;
7652+ up_read(&current->mm->mmap_sem);
7653+ if (likely(call_dl_resolve))
7654+ goto emulate;
7655+
7656+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7657+
7658+ down_write(&current->mm->mmap_sem);
7659+ if (current->mm->call_dl_resolve) {
7660+ call_dl_resolve = current->mm->call_dl_resolve;
7661+ up_write(&current->mm->mmap_sem);
7662+ if (vma)
7663+ kmem_cache_free(vm_area_cachep, vma);
7664+ goto emulate;
7665+ }
7666+
7667+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7668+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7669+ up_write(&current->mm->mmap_sem);
7670+ if (vma)
7671+ kmem_cache_free(vm_area_cachep, vma);
7672+ return 1;
7673+ }
7674+
7675+ if (pax_insert_vma(vma, call_dl_resolve)) {
7676+ up_write(&current->mm->mmap_sem);
7677+ kmem_cache_free(vm_area_cachep, vma);
7678+ return 1;
7679+ }
7680+
7681+ current->mm->call_dl_resolve = call_dl_resolve;
7682+ up_write(&current->mm->mmap_sem);
7683+
7684+emulate:
7685+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7686+ regs->pc = call_dl_resolve;
7687+ regs->npc = addr+4;
7688+ return 3;
7689+ }
7690+#endif
7691+
7692+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7693+ if ((save & 0xFFC00000U) == 0x05000000U &&
7694+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7695+ nop == 0x01000000U)
7696+ {
7697+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7698+ regs->u_regs[UREG_G2] = addr + 4;
7699+ addr = (save & 0x003FFFFFU) << 10;
7700+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7701+ regs->pc = addr;
7702+ regs->npc = addr+4;
7703+ return 3;
7704+ }
7705+ }
7706+ } while (0);
7707+
7708+ do { /* PaX: unpatched PLT emulation step 2 */
7709+ unsigned int save, call, nop;
7710+
7711+ err = get_user(save, (unsigned int *)(regs->pc-4));
7712+ err |= get_user(call, (unsigned int *)regs->pc);
7713+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
7714+ if (err)
7715+ break;
7716+
7717+ if (save == 0x9DE3BFA8U &&
7718+ (call & 0xC0000000U) == 0x40000000U &&
7719+ nop == 0x01000000U)
7720+ {
7721+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
7722+
7723+ regs->u_regs[UREG_RETPC] = regs->pc;
7724+ regs->pc = dl_resolve;
7725+ regs->npc = dl_resolve+4;
7726+ return 3;
7727+ }
7728+ } while (0);
7729+#endif
7730+
7731+ return 1;
7732+}
7733+
7734+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7735+{
7736+ unsigned long i;
7737+
7738+ printk(KERN_ERR "PAX: bytes at PC: ");
7739+ for (i = 0; i < 8; i++) {
7740+ unsigned int c;
7741+ if (get_user(c, (unsigned int *)pc+i))
7742+ printk(KERN_CONT "???????? ");
7743+ else
7744+ printk(KERN_CONT "%08x ", c);
7745+ }
7746+ printk("\n");
7747+}
7748+#endif
7749+
7750 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
7751 int text_fault)
7752 {
7753@@ -230,6 +504,24 @@ good_area:
7754 if (!(vma->vm_flags & VM_WRITE))
7755 goto bad_area;
7756 } else {
7757+
7758+#ifdef CONFIG_PAX_PAGEEXEC
7759+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
7760+ up_read(&mm->mmap_sem);
7761+ switch (pax_handle_fetch_fault(regs)) {
7762+
7763+#ifdef CONFIG_PAX_EMUPLT
7764+ case 2:
7765+ case 3:
7766+ return;
7767+#endif
7768+
7769+ }
7770+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
7771+ do_group_exit(SIGKILL);
7772+ }
7773+#endif
7774+
7775 /* Allow reads even for write-only mappings */
7776 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
7777 goto bad_area;
7778diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
7779index 097aee7..5ca6697 100644
7780--- a/arch/sparc/mm/fault_64.c
7781+++ b/arch/sparc/mm/fault_64.c
7782@@ -21,6 +21,9 @@
7783 #include <linux/kprobes.h>
7784 #include <linux/kdebug.h>
7785 #include <linux/percpu.h>
7786+#include <linux/slab.h>
7787+#include <linux/pagemap.h>
7788+#include <linux/compiler.h>
7789
7790 #include <asm/page.h>
7791 #include <asm/pgtable.h>
7792@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
7793 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
7794 regs->tpc);
7795 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
7796- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
7797+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
7798 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
7799 dump_stack();
7800 unhandled_fault(regs->tpc, current, regs);
7801@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
7802 show_regs(regs);
7803 }
7804
7805+#ifdef CONFIG_PAX_PAGEEXEC
7806+#ifdef CONFIG_PAX_DLRESOLVE
7807+static void pax_emuplt_close(struct vm_area_struct *vma)
7808+{
7809+ vma->vm_mm->call_dl_resolve = 0UL;
7810+}
7811+
7812+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7813+{
7814+ unsigned int *kaddr;
7815+
7816+ vmf->page = alloc_page(GFP_HIGHUSER);
7817+ if (!vmf->page)
7818+ return VM_FAULT_OOM;
7819+
7820+ kaddr = kmap(vmf->page);
7821+ memset(kaddr, 0, PAGE_SIZE);
7822+ kaddr[0] = 0x9DE3BFA8U; /* save */
7823+ flush_dcache_page(vmf->page);
7824+ kunmap(vmf->page);
7825+ return VM_FAULT_MAJOR;
7826+}
7827+
7828+static const struct vm_operations_struct pax_vm_ops = {
7829+ .close = pax_emuplt_close,
7830+ .fault = pax_emuplt_fault
7831+};
7832+
7833+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7834+{
7835+ int ret;
7836+
7837+ INIT_LIST_HEAD(&vma->anon_vma_chain);
7838+ vma->vm_mm = current->mm;
7839+ vma->vm_start = addr;
7840+ vma->vm_end = addr + PAGE_SIZE;
7841+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7842+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7843+ vma->vm_ops = &pax_vm_ops;
7844+
7845+ ret = insert_vm_struct(current->mm, vma);
7846+ if (ret)
7847+ return ret;
7848+
7849+ ++current->mm->total_vm;
7850+ return 0;
7851+}
7852+#endif
7853+
7854+/*
7855+ * PaX: decide what to do with offenders (regs->tpc = fault address)
7856+ *
7857+ * returns 1 when task should be killed
7858+ * 2 when patched PLT trampoline was detected
7859+ * 3 when unpatched PLT trampoline was detected
7860+ */
7861+static int pax_handle_fetch_fault(struct pt_regs *regs)
7862+{
7863+
7864+#ifdef CONFIG_PAX_EMUPLT
7865+ int err;
7866+
7867+ do { /* PaX: patched PLT emulation #1 */
7868+ unsigned int sethi1, sethi2, jmpl;
7869+
7870+ err = get_user(sethi1, (unsigned int *)regs->tpc);
7871+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
7872+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
7873+
7874+ if (err)
7875+ break;
7876+
7877+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7878+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
7879+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
7880+ {
7881+ unsigned long addr;
7882+
7883+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7884+ addr = regs->u_regs[UREG_G1];
7885+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7886+
7887+ if (test_thread_flag(TIF_32BIT))
7888+ addr &= 0xFFFFFFFFUL;
7889+
7890+ regs->tpc = addr;
7891+ regs->tnpc = addr+4;
7892+ return 2;
7893+ }
7894+ } while (0);
7895+
7896+ do { /* PaX: patched PLT emulation #2 */
7897+ unsigned int ba;
7898+
7899+ err = get_user(ba, (unsigned int *)regs->tpc);
7900+
7901+ if (err)
7902+ break;
7903+
7904+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
7905+ unsigned long addr;
7906+
7907+ if ((ba & 0xFFC00000U) == 0x30800000U)
7908+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7909+ else
7910+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7911+
7912+ if (test_thread_flag(TIF_32BIT))
7913+ addr &= 0xFFFFFFFFUL;
7914+
7915+ regs->tpc = addr;
7916+ regs->tnpc = addr+4;
7917+ return 2;
7918+ }
7919+ } while (0);
7920+
7921+ do { /* PaX: patched PLT emulation #3 */
7922+ unsigned int sethi, bajmpl, nop;
7923+
7924+ err = get_user(sethi, (unsigned int *)regs->tpc);
7925+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
7926+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7927+
7928+ if (err)
7929+ break;
7930+
7931+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7932+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
7933+ nop == 0x01000000U)
7934+ {
7935+ unsigned long addr;
7936+
7937+ addr = (sethi & 0x003FFFFFU) << 10;
7938+ regs->u_regs[UREG_G1] = addr;
7939+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
7940+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7941+ else
7942+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7943+
7944+ if (test_thread_flag(TIF_32BIT))
7945+ addr &= 0xFFFFFFFFUL;
7946+
7947+ regs->tpc = addr;
7948+ regs->tnpc = addr+4;
7949+ return 2;
7950+ }
7951+ } while (0);
7952+
7953+ do { /* PaX: patched PLT emulation #4 */
7954+ unsigned int sethi, mov1, call, mov2;
7955+
7956+ err = get_user(sethi, (unsigned int *)regs->tpc);
7957+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7958+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
7959+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7960+
7961+ if (err)
7962+ break;
7963+
7964+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7965+ mov1 == 0x8210000FU &&
7966+ (call & 0xC0000000U) == 0x40000000U &&
7967+ mov2 == 0x9E100001U)
7968+ {
7969+ unsigned long addr;
7970+
7971+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7972+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7973+
7974+ if (test_thread_flag(TIF_32BIT))
7975+ addr &= 0xFFFFFFFFUL;
7976+
7977+ regs->tpc = addr;
7978+ regs->tnpc = addr+4;
7979+ return 2;
7980+ }
7981+ } while (0);
7982+
7983+ do { /* PaX: patched PLT emulation #5 */
7984+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7985+
7986+ err = get_user(sethi, (unsigned int *)regs->tpc);
7987+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7988+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7989+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7990+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7991+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7992+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7993+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7994+
7995+ if (err)
7996+ break;
7997+
7998+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7999+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
8000+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
8001+ (or1 & 0xFFFFE000U) == 0x82106000U &&
8002+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
8003+ sllx == 0x83287020U &&
8004+ jmpl == 0x81C04005U &&
8005+ nop == 0x01000000U)
8006+ {
8007+ unsigned long addr;
8008+
8009+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
8010+ regs->u_regs[UREG_G1] <<= 32;
8011+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
8012+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
8013+ regs->tpc = addr;
8014+ regs->tnpc = addr+4;
8015+ return 2;
8016+ }
8017+ } while (0);
8018+
8019+ do { /* PaX: patched PLT emulation #6 */
8020+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
8021+
8022+ err = get_user(sethi, (unsigned int *)regs->tpc);
8023+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
8024+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
8025+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
8026+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
8027+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
8028+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
8029+
8030+ if (err)
8031+ break;
8032+
8033+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8034+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
8035+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
8036+ sllx == 0x83287020U &&
8037+ (or & 0xFFFFE000U) == 0x8A116000U &&
8038+ jmpl == 0x81C04005U &&
8039+ nop == 0x01000000U)
8040+ {
8041+ unsigned long addr;
8042+
8043+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
8044+ regs->u_regs[UREG_G1] <<= 32;
8045+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
8046+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
8047+ regs->tpc = addr;
8048+ regs->tnpc = addr+4;
8049+ return 2;
8050+ }
8051+ } while (0);
8052+
8053+ do { /* PaX: unpatched PLT emulation step 1 */
8054+ unsigned int sethi, ba, nop;
8055+
8056+ err = get_user(sethi, (unsigned int *)regs->tpc);
8057+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
8058+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
8059+
8060+ if (err)
8061+ break;
8062+
8063+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8064+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
8065+ nop == 0x01000000U)
8066+ {
8067+ unsigned long addr;
8068+ unsigned int save, call;
8069+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
8070+
8071+ if ((ba & 0xFFC00000U) == 0x30800000U)
8072+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
8073+ else
8074+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
8075+
8076+ if (test_thread_flag(TIF_32BIT))
8077+ addr &= 0xFFFFFFFFUL;
8078+
8079+ err = get_user(save, (unsigned int *)addr);
8080+ err |= get_user(call, (unsigned int *)(addr+4));
8081+ err |= get_user(nop, (unsigned int *)(addr+8));
8082+ if (err)
8083+ break;
8084+
8085+#ifdef CONFIG_PAX_DLRESOLVE
8086+ if (save == 0x9DE3BFA8U &&
8087+ (call & 0xC0000000U) == 0x40000000U &&
8088+ nop == 0x01000000U)
8089+ {
8090+ struct vm_area_struct *vma;
8091+ unsigned long call_dl_resolve;
8092+
8093+ down_read(&current->mm->mmap_sem);
8094+ call_dl_resolve = current->mm->call_dl_resolve;
8095+ up_read(&current->mm->mmap_sem);
8096+ if (likely(call_dl_resolve))
8097+ goto emulate;
8098+
8099+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
8100+
8101+ down_write(&current->mm->mmap_sem);
8102+ if (current->mm->call_dl_resolve) {
8103+ call_dl_resolve = current->mm->call_dl_resolve;
8104+ up_write(&current->mm->mmap_sem);
8105+ if (vma)
8106+ kmem_cache_free(vm_area_cachep, vma);
8107+ goto emulate;
8108+ }
8109+
8110+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
8111+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
8112+ up_write(&current->mm->mmap_sem);
8113+ if (vma)
8114+ kmem_cache_free(vm_area_cachep, vma);
8115+ return 1;
8116+ }
8117+
8118+ if (pax_insert_vma(vma, call_dl_resolve)) {
8119+ up_write(&current->mm->mmap_sem);
8120+ kmem_cache_free(vm_area_cachep, vma);
8121+ return 1;
8122+ }
8123+
8124+ current->mm->call_dl_resolve = call_dl_resolve;
8125+ up_write(&current->mm->mmap_sem);
8126+
8127+emulate:
8128+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
8129+ regs->tpc = call_dl_resolve;
8130+ regs->tnpc = addr+4;
8131+ return 3;
8132+ }
8133+#endif
8134+
8135+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
8136+ if ((save & 0xFFC00000U) == 0x05000000U &&
8137+ (call & 0xFFFFE000U) == 0x85C0A000U &&
8138+ nop == 0x01000000U)
8139+ {
8140+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
8141+ regs->u_regs[UREG_G2] = addr + 4;
8142+ addr = (save & 0x003FFFFFU) << 10;
8143+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
8144+
8145+ if (test_thread_flag(TIF_32BIT))
8146+ addr &= 0xFFFFFFFFUL;
8147+
8148+ regs->tpc = addr;
8149+ regs->tnpc = addr+4;
8150+ return 3;
8151+ }
8152+
8153+ /* PaX: 64-bit PLT stub */
8154+ err = get_user(sethi1, (unsigned int *)addr);
8155+ err |= get_user(sethi2, (unsigned int *)(addr+4));
8156+ err |= get_user(or1, (unsigned int *)(addr+8));
8157+ err |= get_user(or2, (unsigned int *)(addr+12));
8158+ err |= get_user(sllx, (unsigned int *)(addr+16));
8159+ err |= get_user(add, (unsigned int *)(addr+20));
8160+ err |= get_user(jmpl, (unsigned int *)(addr+24));
8161+ err |= get_user(nop, (unsigned int *)(addr+28));
8162+ if (err)
8163+ break;
8164+
8165+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
8166+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
8167+ (or1 & 0xFFFFE000U) == 0x88112000U &&
8168+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
8169+ sllx == 0x89293020U &&
8170+ add == 0x8A010005U &&
8171+ jmpl == 0x89C14000U &&
8172+ nop == 0x01000000U)
8173+ {
8174+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
8175+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
8176+ regs->u_regs[UREG_G4] <<= 32;
8177+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
8178+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
8179+ regs->u_regs[UREG_G4] = addr + 24;
8180+ addr = regs->u_regs[UREG_G5];
8181+ regs->tpc = addr;
8182+ regs->tnpc = addr+4;
8183+ return 3;
8184+ }
8185+ }
8186+ } while (0);
8187+
8188+#ifdef CONFIG_PAX_DLRESOLVE
8189+ do { /* PaX: unpatched PLT emulation step 2 */
8190+ unsigned int save, call, nop;
8191+
8192+ err = get_user(save, (unsigned int *)(regs->tpc-4));
8193+ err |= get_user(call, (unsigned int *)regs->tpc);
8194+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
8195+ if (err)
8196+ break;
8197+
8198+ if (save == 0x9DE3BFA8U &&
8199+ (call & 0xC0000000U) == 0x40000000U &&
8200+ nop == 0x01000000U)
8201+ {
8202+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
8203+
8204+ if (test_thread_flag(TIF_32BIT))
8205+ dl_resolve &= 0xFFFFFFFFUL;
8206+
8207+ regs->u_regs[UREG_RETPC] = regs->tpc;
8208+ regs->tpc = dl_resolve;
8209+ regs->tnpc = dl_resolve+4;
8210+ return 3;
8211+ }
8212+ } while (0);
8213+#endif
8214+
8215+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
8216+ unsigned int sethi, ba, nop;
8217+
8218+ err = get_user(sethi, (unsigned int *)regs->tpc);
8219+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
8220+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
8221+
8222+ if (err)
8223+ break;
8224+
8225+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8226+ (ba & 0xFFF00000U) == 0x30600000U &&
8227+ nop == 0x01000000U)
8228+ {
8229+ unsigned long addr;
8230+
8231+ addr = (sethi & 0x003FFFFFU) << 10;
8232+ regs->u_regs[UREG_G1] = addr;
8233+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
8234+
8235+ if (test_thread_flag(TIF_32BIT))
8236+ addr &= 0xFFFFFFFFUL;
8237+
8238+ regs->tpc = addr;
8239+ regs->tnpc = addr+4;
8240+ return 2;
8241+ }
8242+ } while (0);
8243+
8244+#endif
8245+
8246+ return 1;
8247+}
8248+
8249+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8250+{
8251+ unsigned long i;
8252+
8253+ printk(KERN_ERR "PAX: bytes at PC: ");
8254+ for (i = 0; i < 8; i++) {
8255+ unsigned int c;
8256+ if (get_user(c, (unsigned int *)pc+i))
8257+ printk(KERN_CONT "???????? ");
8258+ else
8259+ printk(KERN_CONT "%08x ", c);
8260+ }
8261+ printk("\n");
8262+}
8263+#endif
8264+
8265 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
8266 {
8267 struct mm_struct *mm = current->mm;
8268@@ -341,6 +804,29 @@ retry:
8269 if (!vma)
8270 goto bad_area;
8271
8272+#ifdef CONFIG_PAX_PAGEEXEC
8273+ /* PaX: detect ITLB misses on non-exec pages */
8274+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
8275+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
8276+ {
8277+ if (address != regs->tpc)
8278+ goto good_area;
8279+
8280+ up_read(&mm->mmap_sem);
8281+ switch (pax_handle_fetch_fault(regs)) {
8282+
8283+#ifdef CONFIG_PAX_EMUPLT
8284+ case 2:
8285+ case 3:
8286+ return;
8287+#endif
8288+
8289+ }
8290+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
8291+ do_group_exit(SIGKILL);
8292+ }
8293+#endif
8294+
8295 /* Pure DTLB misses do not tell us whether the fault causing
8296 * load/store/atomic was a write or not, it only says that there
8297 * was no match. So in such a case we (carefully) read the
8298diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
8299index f76f83d..ee0d859 100644
8300--- a/arch/sparc/mm/hugetlbpage.c
8301+++ b/arch/sparc/mm/hugetlbpage.c
8302@@ -34,6 +34,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
8303 struct vm_area_struct * vma;
8304 unsigned long task_size = TASK_SIZE;
8305 unsigned long start_addr;
8306+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8307
8308 if (test_thread_flag(TIF_32BIT))
8309 task_size = STACK_TOP32;
8310@@ -67,7 +68,7 @@ full_search:
8311 }
8312 return -ENOMEM;
8313 }
8314- if (likely(!vma || addr + len <= vma->vm_start)) {
8315+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
8316 /*
8317 * Remember the place where we stopped the search:
8318 */
8319@@ -90,6 +91,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8320 struct vm_area_struct *vma;
8321 struct mm_struct *mm = current->mm;
8322 unsigned long addr = addr0;
8323+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8324
8325 /* This should only ever run for 32-bit processes. */
8326 BUG_ON(!test_thread_flag(TIF_32BIT));
8327@@ -106,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8328 /* make sure it can fit in the remaining address space */
8329 if (likely(addr > len)) {
8330 vma = find_vma(mm, addr-len);
8331- if (!vma || addr <= vma->vm_start) {
8332+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
8333 /* remember the address as a hint for next time */
8334 return (mm->free_area_cache = addr-len);
8335 }
8336@@ -115,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8337 if (unlikely(mm->mmap_base < len))
8338 goto bottomup;
8339
8340- addr = (mm->mmap_base-len) & HPAGE_MASK;
8341+ addr = mm->mmap_base - len;
8342
8343 do {
8344+ addr &= HPAGE_MASK;
8345 /*
8346 * Lookup failure means no vma is above this address,
8347 * else if new region fits below vma->vm_start,
8348 * return with success:
8349 */
8350 vma = find_vma(mm, addr);
8351- if (likely(!vma || addr+len <= vma->vm_start)) {
8352+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
8353 /* remember the address as a hint for next time */
8354 return (mm->free_area_cache = addr);
8355 }
8356@@ -134,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8357 mm->cached_hole_size = vma->vm_start - addr;
8358
8359 /* try just below the current vma->vm_start */
8360- addr = (vma->vm_start-len) & HPAGE_MASK;
8361- } while (likely(len < vma->vm_start));
8362+ addr = skip_heap_stack_gap(vma, len, offset);
8363+ } while (!IS_ERR_VALUE(addr));
8364
8365 bottomup:
8366 /*
8367@@ -163,6 +166,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
8368 struct mm_struct *mm = current->mm;
8369 struct vm_area_struct *vma;
8370 unsigned long task_size = TASK_SIZE;
8371+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8372
8373 if (test_thread_flag(TIF_32BIT))
8374 task_size = STACK_TOP32;
8375@@ -181,8 +185,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
8376 if (addr) {
8377 addr = ALIGN(addr, HPAGE_SIZE);
8378 vma = find_vma(mm, addr);
8379- if (task_size - len >= addr &&
8380- (!vma || addr + len <= vma->vm_start))
8381+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8382 return addr;
8383 }
8384 if (mm->get_unmapped_area == arch_get_unmapped_area)
8385diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
8386index f4500c6..889656c 100644
8387--- a/arch/tile/include/asm/atomic_64.h
8388+++ b/arch/tile/include/asm/atomic_64.h
8389@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8390
8391 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8392
8393+#define atomic64_read_unchecked(v) atomic64_read(v)
8394+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8395+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8396+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8397+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8398+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8399+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8400+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8401+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8402+
8403 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
8404 #define smp_mb__before_atomic_dec() smp_mb()
8405 #define smp_mb__after_atomic_dec() smp_mb()
8406diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
8407index a9a5299..0fce79e 100644
8408--- a/arch/tile/include/asm/cache.h
8409+++ b/arch/tile/include/asm/cache.h
8410@@ -15,11 +15,12 @@
8411 #ifndef _ASM_TILE_CACHE_H
8412 #define _ASM_TILE_CACHE_H
8413
8414+#include <linux/const.h>
8415 #include <arch/chip.h>
8416
8417 /* bytes per L1 data cache line */
8418 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
8419-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8420+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8421
8422 /* bytes per L2 cache line */
8423 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
8424diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
8425index 9ab078a..d6635c2 100644
8426--- a/arch/tile/include/asm/uaccess.h
8427+++ b/arch/tile/include/asm/uaccess.h
8428@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
8429 const void __user *from,
8430 unsigned long n)
8431 {
8432- int sz = __compiletime_object_size(to);
8433+ size_t sz = __compiletime_object_size(to);
8434
8435- if (likely(sz == -1 || sz >= n))
8436+ if (likely(sz == (size_t)-1 || sz >= n))
8437 n = _copy_from_user(to, from, n);
8438 else
8439 copy_from_user_overflow();
8440diff --git a/arch/um/Makefile b/arch/um/Makefile
8441index 133f7de..1d6f2f1 100644
8442--- a/arch/um/Makefile
8443+++ b/arch/um/Makefile
8444@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
8445 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
8446 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
8447
8448+ifdef CONSTIFY_PLUGIN
8449+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8450+endif
8451+
8452 #This will adjust *FLAGS accordingly to the platform.
8453 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
8454
8455diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
8456index 19e1bdd..3665b77 100644
8457--- a/arch/um/include/asm/cache.h
8458+++ b/arch/um/include/asm/cache.h
8459@@ -1,6 +1,7 @@
8460 #ifndef __UM_CACHE_H
8461 #define __UM_CACHE_H
8462
8463+#include <linux/const.h>
8464
8465 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
8466 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8467@@ -12,6 +13,6 @@
8468 # define L1_CACHE_SHIFT 5
8469 #endif
8470
8471-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8472+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8473
8474 #endif
8475diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
8476index 2e0a6b1..a64d0f5 100644
8477--- a/arch/um/include/asm/kmap_types.h
8478+++ b/arch/um/include/asm/kmap_types.h
8479@@ -8,6 +8,6 @@
8480
8481 /* No more #include "asm/arch/kmap_types.h" ! */
8482
8483-#define KM_TYPE_NR 14
8484+#define KM_TYPE_NR 15
8485
8486 #endif
8487diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
8488index 5ff53d9..5850cdf 100644
8489--- a/arch/um/include/asm/page.h
8490+++ b/arch/um/include/asm/page.h
8491@@ -14,6 +14,9 @@
8492 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
8493 #define PAGE_MASK (~(PAGE_SIZE-1))
8494
8495+#define ktla_ktva(addr) (addr)
8496+#define ktva_ktla(addr) (addr)
8497+
8498 #ifndef __ASSEMBLY__
8499
8500 struct page;
8501diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
8502index 0032f92..cd151e0 100644
8503--- a/arch/um/include/asm/pgtable-3level.h
8504+++ b/arch/um/include/asm/pgtable-3level.h
8505@@ -58,6 +58,7 @@
8506 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
8507 #define pud_populate(mm, pud, pmd) \
8508 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
8509+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8510
8511 #ifdef CONFIG_64BIT
8512 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
8513diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
8514index b6d699c..df7ac1d 100644
8515--- a/arch/um/kernel/process.c
8516+++ b/arch/um/kernel/process.c
8517@@ -387,22 +387,6 @@ int singlestepping(void * t)
8518 return 2;
8519 }
8520
8521-/*
8522- * Only x86 and x86_64 have an arch_align_stack().
8523- * All other arches have "#define arch_align_stack(x) (x)"
8524- * in their asm/system.h
8525- * As this is included in UML from asm-um/system-generic.h,
8526- * we can use it to behave as the subarch does.
8527- */
8528-#ifndef arch_align_stack
8529-unsigned long arch_align_stack(unsigned long sp)
8530-{
8531- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8532- sp -= get_random_int() % 8192;
8533- return sp & ~0xf;
8534-}
8535-#endif
8536-
8537 unsigned long get_wchan(struct task_struct *p)
8538 {
8539 unsigned long stack_page, sp, ip;
8540diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
8541index ad8f795..2c7eec6 100644
8542--- a/arch/unicore32/include/asm/cache.h
8543+++ b/arch/unicore32/include/asm/cache.h
8544@@ -12,8 +12,10 @@
8545 #ifndef __UNICORE_CACHE_H__
8546 #define __UNICORE_CACHE_H__
8547
8548-#define L1_CACHE_SHIFT (5)
8549-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8550+#include <linux/const.h>
8551+
8552+#define L1_CACHE_SHIFT 5
8553+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8554
8555 /*
8556 * Memory returned by kmalloc() may be used for DMA, so we must make
8557diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
8558index 46c3bff..c2286e7 100644
8559--- a/arch/x86/Kconfig
8560+++ b/arch/x86/Kconfig
8561@@ -241,7 +241,7 @@ config X86_HT
8562
8563 config X86_32_LAZY_GS
8564 def_bool y
8565- depends on X86_32 && !CC_STACKPROTECTOR
8566+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
8567
8568 config ARCH_HWEIGHT_CFLAGS
8569 string
8570@@ -1056,7 +1056,7 @@ choice
8571
8572 config NOHIGHMEM
8573 bool "off"
8574- depends on !X86_NUMAQ
8575+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
8576 ---help---
8577 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
8578 However, the address space of 32-bit x86 processors is only 4
8579@@ -1093,7 +1093,7 @@ config NOHIGHMEM
8580
8581 config HIGHMEM4G
8582 bool "4GB"
8583- depends on !X86_NUMAQ
8584+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
8585 ---help---
8586 Select this if you have a 32-bit processor and between 1 and 4
8587 gigabytes of physical RAM.
8588@@ -1147,7 +1147,7 @@ config PAGE_OFFSET
8589 hex
8590 default 0xB0000000 if VMSPLIT_3G_OPT
8591 default 0x80000000 if VMSPLIT_2G
8592- default 0x78000000 if VMSPLIT_2G_OPT
8593+ default 0x70000000 if VMSPLIT_2G_OPT
8594 default 0x40000000 if VMSPLIT_1G
8595 default 0xC0000000
8596 depends on X86_32
8597@@ -1548,6 +1548,7 @@ config SECCOMP
8598
8599 config CC_STACKPROTECTOR
8600 bool "Enable -fstack-protector buffer overflow detection"
8601+ depends on X86_64 || !PAX_MEMORY_UDEREF
8602 ---help---
8603 This option turns on the -fstack-protector GCC feature. This
8604 feature puts, at the beginning of functions, a canary value on
8605@@ -1605,6 +1606,7 @@ config KEXEC_JUMP
8606 config PHYSICAL_START
8607 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
8608 default "0x1000000"
8609+ range 0x400000 0x40000000
8610 ---help---
8611 This gives the physical address where the kernel is loaded.
8612
8613@@ -1668,6 +1670,7 @@ config X86_NEED_RELOCS
8614 config PHYSICAL_ALIGN
8615 hex "Alignment value to which kernel should be aligned" if X86_32
8616 default "0x1000000"
8617+ range 0x400000 0x1000000 if PAX_KERNEXEC
8618 range 0x2000 0x1000000
8619 ---help---
8620 This value puts the alignment restrictions on physical address
8621@@ -1699,9 +1702,10 @@ config HOTPLUG_CPU
8622 Say N if you want to disable CPU hotplug.
8623
8624 config COMPAT_VDSO
8625- def_bool y
8626+ def_bool n
8627 prompt "Compat VDSO support"
8628 depends on X86_32 || IA32_EMULATION
8629+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
8630 ---help---
8631 Map the 32-bit VDSO to the predictable old-style address too.
8632
8633diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
8634index f3b86d0..17fd30f 100644
8635--- a/arch/x86/Kconfig.cpu
8636+++ b/arch/x86/Kconfig.cpu
8637@@ -335,7 +335,7 @@ config X86_PPRO_FENCE
8638
8639 config X86_F00F_BUG
8640 def_bool y
8641- depends on M586MMX || M586TSC || M586 || M486 || M386
8642+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
8643
8644 config X86_INVD_BUG
8645 def_bool y
8646@@ -359,7 +359,7 @@ config X86_POPAD_OK
8647
8648 config X86_ALIGNMENT_16
8649 def_bool y
8650- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8651+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8652
8653 config X86_INTEL_USERCOPY
8654 def_bool y
8655@@ -405,7 +405,7 @@ config X86_CMPXCHG64
8656 # generates cmov.
8657 config X86_CMOV
8658 def_bool y
8659- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
8660+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
8661
8662 config X86_MINIMUM_CPU_FAMILY
8663 int
8664diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
8665index b322f12..652d0d9 100644
8666--- a/arch/x86/Kconfig.debug
8667+++ b/arch/x86/Kconfig.debug
8668@@ -84,7 +84,7 @@ config X86_PTDUMP
8669 config DEBUG_RODATA
8670 bool "Write protect kernel read-only data structures"
8671 default y
8672- depends on DEBUG_KERNEL
8673+ depends on DEBUG_KERNEL && BROKEN
8674 ---help---
8675 Mark the kernel read-only data as write-protected in the pagetables,
8676 in order to catch accidental (and incorrect) writes to such const
8677@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
8678
8679 config DEBUG_SET_MODULE_RONX
8680 bool "Set loadable kernel module data as NX and text as RO"
8681- depends on MODULES
8682+ depends on MODULES && BROKEN
8683 ---help---
8684 This option helps catch unintended modifications to loadable
8685 kernel module's text and read-only data. It also prevents execution
8686@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
8687
8688 config DEBUG_STRICT_USER_COPY_CHECKS
8689 bool "Strict copy size checks"
8690- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
8691+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
8692 ---help---
8693 Enabling this option turns a certain set of sanity checks for user
8694 copy operations into compile time failures.
8695diff --git a/arch/x86/Makefile b/arch/x86/Makefile
8696index 05afcca..b6ecb51 100644
8697--- a/arch/x86/Makefile
8698+++ b/arch/x86/Makefile
8699@@ -50,6 +50,7 @@ else
8700 UTS_MACHINE := x86_64
8701 CHECKFLAGS += -D__x86_64__ -m64
8702
8703+ biarch := $(call cc-option,-m64)
8704 KBUILD_AFLAGS += -m64
8705 KBUILD_CFLAGS += -m64
8706
8707@@ -229,3 +230,12 @@ define archhelp
8708 echo ' FDARGS="..." arguments for the booted kernel'
8709 echo ' FDINITRD=file initrd for the booted kernel'
8710 endef
8711+
8712+define OLD_LD
8713+
8714+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
8715+*** Please upgrade your binutils to 2.18 or newer
8716+endef
8717+
8718+archprepare:
8719+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
8720diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
8721index ccce0ed..fd9da25 100644
8722--- a/arch/x86/boot/Makefile
8723+++ b/arch/x86/boot/Makefile
8724@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
8725 $(call cc-option, -fno-stack-protector) \
8726 $(call cc-option, -mpreferred-stack-boundary=2)
8727 KBUILD_CFLAGS += $(call cc-option, -m32)
8728+ifdef CONSTIFY_PLUGIN
8729+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8730+endif
8731 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8732 GCOV_PROFILE := n
8733
8734diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
8735index 878e4b9..20537ab 100644
8736--- a/arch/x86/boot/bitops.h
8737+++ b/arch/x86/boot/bitops.h
8738@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8739 u8 v;
8740 const u32 *p = (const u32 *)addr;
8741
8742- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8743+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8744 return v;
8745 }
8746
8747@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8748
8749 static inline void set_bit(int nr, void *addr)
8750 {
8751- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8752+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8753 }
8754
8755 #endif /* BOOT_BITOPS_H */
8756diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
8757index 18997e5..83d9c67 100644
8758--- a/arch/x86/boot/boot.h
8759+++ b/arch/x86/boot/boot.h
8760@@ -85,7 +85,7 @@ static inline void io_delay(void)
8761 static inline u16 ds(void)
8762 {
8763 u16 seg;
8764- asm("movw %%ds,%0" : "=rm" (seg));
8765+ asm volatile("movw %%ds,%0" : "=rm" (seg));
8766 return seg;
8767 }
8768
8769@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
8770 static inline int memcmp(const void *s1, const void *s2, size_t len)
8771 {
8772 u8 diff;
8773- asm("repe; cmpsb; setnz %0"
8774+ asm volatile("repe; cmpsb; setnz %0"
8775 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
8776 return diff;
8777 }
8778diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
8779index 8a84501..b2d165f 100644
8780--- a/arch/x86/boot/compressed/Makefile
8781+++ b/arch/x86/boot/compressed/Makefile
8782@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
8783 KBUILD_CFLAGS += $(cflags-y)
8784 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
8785 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
8786+ifdef CONSTIFY_PLUGIN
8787+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8788+endif
8789
8790 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8791 GCOV_PROFILE := n
8792diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
8793index e87b0ca..2bce457 100644
8794--- a/arch/x86/boot/compressed/eboot.c
8795+++ b/arch/x86/boot/compressed/eboot.c
8796@@ -144,7 +144,6 @@ again:
8797 *addr = max_addr;
8798 }
8799
8800-free_pool:
8801 efi_call_phys1(sys_table->boottime->free_pool, map);
8802
8803 fail:
8804@@ -208,7 +207,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
8805 if (i == map_size / desc_size)
8806 status = EFI_NOT_FOUND;
8807
8808-free_pool:
8809 efi_call_phys1(sys_table->boottime->free_pool, map);
8810 fail:
8811 return status;
8812diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
8813index aa4aaf1..6656f2f 100644
8814--- a/arch/x86/boot/compressed/head_32.S
8815+++ b/arch/x86/boot/compressed/head_32.S
8816@@ -116,7 +116,7 @@ preferred_addr:
8817 notl %eax
8818 andl %eax, %ebx
8819 #else
8820- movl $LOAD_PHYSICAL_ADDR, %ebx
8821+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8822 #endif
8823
8824 /* Target address to relocate to for decompression */
8825@@ -202,7 +202,7 @@ relocated:
8826 * and where it was actually loaded.
8827 */
8828 movl %ebp, %ebx
8829- subl $LOAD_PHYSICAL_ADDR, %ebx
8830+ subl $____LOAD_PHYSICAL_ADDR, %ebx
8831 jz 2f /* Nothing to be done if loaded at compiled addr. */
8832 /*
8833 * Process relocations.
8834@@ -210,8 +210,7 @@ relocated:
8835
8836 1: subl $4, %edi
8837 movl (%edi), %ecx
8838- testl %ecx, %ecx
8839- jz 2f
8840+ jecxz 2f
8841 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
8842 jmp 1b
8843 2:
8844diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
8845index 2c4b171..e1fa5b1 100644
8846--- a/arch/x86/boot/compressed/head_64.S
8847+++ b/arch/x86/boot/compressed/head_64.S
8848@@ -91,7 +91,7 @@ ENTRY(startup_32)
8849 notl %eax
8850 andl %eax, %ebx
8851 #else
8852- movl $LOAD_PHYSICAL_ADDR, %ebx
8853+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8854 #endif
8855
8856 /* Target address to relocate to for decompression */
8857@@ -273,7 +273,7 @@ preferred_addr:
8858 notq %rax
8859 andq %rax, %rbp
8860 #else
8861- movq $LOAD_PHYSICAL_ADDR, %rbp
8862+ movq $____LOAD_PHYSICAL_ADDR, %rbp
8863 #endif
8864
8865 /* Target address to relocate to for decompression */
8866diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
8867index 88f7ff6..ed695dd 100644
8868--- a/arch/x86/boot/compressed/misc.c
8869+++ b/arch/x86/boot/compressed/misc.c
8870@@ -303,7 +303,7 @@ static void parse_elf(void *output)
8871 case PT_LOAD:
8872 #ifdef CONFIG_RELOCATABLE
8873 dest = output;
8874- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
8875+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
8876 #else
8877 dest = (void *)(phdr->p_paddr);
8878 #endif
8879@@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
8880 error("Destination address too large");
8881 #endif
8882 #ifndef CONFIG_RELOCATABLE
8883- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
8884+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
8885 error("Wrong destination address");
8886 #endif
8887
8888diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8889index 4d3ff03..e4972ff 100644
8890--- a/arch/x86/boot/cpucheck.c
8891+++ b/arch/x86/boot/cpucheck.c
8892@@ -74,7 +74,7 @@ static int has_fpu(void)
8893 u16 fcw = -1, fsw = -1;
8894 u32 cr0;
8895
8896- asm("movl %%cr0,%0" : "=r" (cr0));
8897+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
8898 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8899 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8900 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8901@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8902 {
8903 u32 f0, f1;
8904
8905- asm("pushfl ; "
8906+ asm volatile("pushfl ; "
8907 "pushfl ; "
8908 "popl %0 ; "
8909 "movl %0,%1 ; "
8910@@ -115,7 +115,7 @@ static void get_flags(void)
8911 set_bit(X86_FEATURE_FPU, cpu.flags);
8912
8913 if (has_eflag(X86_EFLAGS_ID)) {
8914- asm("cpuid"
8915+ asm volatile("cpuid"
8916 : "=a" (max_intel_level),
8917 "=b" (cpu_vendor[0]),
8918 "=d" (cpu_vendor[1]),
8919@@ -124,7 +124,7 @@ static void get_flags(void)
8920
8921 if (max_intel_level >= 0x00000001 &&
8922 max_intel_level <= 0x0000ffff) {
8923- asm("cpuid"
8924+ asm volatile("cpuid"
8925 : "=a" (tfms),
8926 "=c" (cpu.flags[4]),
8927 "=d" (cpu.flags[0])
8928@@ -136,7 +136,7 @@ static void get_flags(void)
8929 cpu.model += ((tfms >> 16) & 0xf) << 4;
8930 }
8931
8932- asm("cpuid"
8933+ asm volatile("cpuid"
8934 : "=a" (max_amd_level)
8935 : "a" (0x80000000)
8936 : "ebx", "ecx", "edx");
8937@@ -144,7 +144,7 @@ static void get_flags(void)
8938 if (max_amd_level >= 0x80000001 &&
8939 max_amd_level <= 0x8000ffff) {
8940 u32 eax = 0x80000001;
8941- asm("cpuid"
8942+ asm volatile("cpuid"
8943 : "+a" (eax),
8944 "=c" (cpu.flags[6]),
8945 "=d" (cpu.flags[1])
8946@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8947 u32 ecx = MSR_K7_HWCR;
8948 u32 eax, edx;
8949
8950- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8951+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8952 eax &= ~(1 << 15);
8953- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8954+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8955
8956 get_flags(); /* Make sure it really did something */
8957 err = check_flags();
8958@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8959 u32 ecx = MSR_VIA_FCR;
8960 u32 eax, edx;
8961
8962- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8963+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8964 eax |= (1<<1)|(1<<7);
8965- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8966+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8967
8968 set_bit(X86_FEATURE_CX8, cpu.flags);
8969 err = check_flags();
8970@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8971 u32 eax, edx;
8972 u32 level = 1;
8973
8974- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8975- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8976- asm("cpuid"
8977+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8978+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8979+ asm volatile("cpuid"
8980 : "+a" (level), "=d" (cpu.flags[0])
8981 : : "ecx", "ebx");
8982- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8983+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8984
8985 err = check_flags();
8986 }
8987diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8988index 8c132a6..13e5c96 100644
8989--- a/arch/x86/boot/header.S
8990+++ b/arch/x86/boot/header.S
8991@@ -387,10 +387,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
8992 # single linked list of
8993 # struct setup_data
8994
8995-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8996+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8997
8998 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8999+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
9000+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
9001+#else
9002 #define VO_INIT_SIZE (VO__end - VO__text)
9003+#endif
9004 #if ZO_INIT_SIZE > VO_INIT_SIZE
9005 #define INIT_SIZE ZO_INIT_SIZE
9006 #else
9007diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
9008index db75d07..8e6d0af 100644
9009--- a/arch/x86/boot/memory.c
9010+++ b/arch/x86/boot/memory.c
9011@@ -19,7 +19,7 @@
9012
9013 static int detect_memory_e820(void)
9014 {
9015- int count = 0;
9016+ unsigned int count = 0;
9017 struct biosregs ireg, oreg;
9018 struct e820entry *desc = boot_params.e820_map;
9019 static struct e820entry buf; /* static so it is zeroed */
9020diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
9021index 11e8c6e..fdbb1ed 100644
9022--- a/arch/x86/boot/video-vesa.c
9023+++ b/arch/x86/boot/video-vesa.c
9024@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
9025
9026 boot_params.screen_info.vesapm_seg = oreg.es;
9027 boot_params.screen_info.vesapm_off = oreg.di;
9028+ boot_params.screen_info.vesapm_size = oreg.cx;
9029 }
9030
9031 /*
9032diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
9033index 43eda28..5ab5fdb 100644
9034--- a/arch/x86/boot/video.c
9035+++ b/arch/x86/boot/video.c
9036@@ -96,7 +96,7 @@ static void store_mode_params(void)
9037 static unsigned int get_entry(void)
9038 {
9039 char entry_buf[4];
9040- int i, len = 0;
9041+ unsigned int i, len = 0;
9042 int key;
9043 unsigned int v;
9044
9045diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
9046index 5b577d5..3c1fed4 100644
9047--- a/arch/x86/crypto/aes-x86_64-asm_64.S
9048+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
9049@@ -8,6 +8,8 @@
9050 * including this sentence is retained in full.
9051 */
9052
9053+#include <asm/alternative-asm.h>
9054+
9055 .extern crypto_ft_tab
9056 .extern crypto_it_tab
9057 .extern crypto_fl_tab
9058@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
9059 je B192; \
9060 leaq 32(r9),r9;
9061
9062+#define ret pax_force_retaddr 0, 1; ret
9063+
9064 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
9065 movq r1,r2; \
9066 movq r3,r4; \
9067diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
9068index 3470624..201259d 100644
9069--- a/arch/x86/crypto/aesni-intel_asm.S
9070+++ b/arch/x86/crypto/aesni-intel_asm.S
9071@@ -31,6 +31,7 @@
9072
9073 #include <linux/linkage.h>
9074 #include <asm/inst.h>
9075+#include <asm/alternative-asm.h>
9076
9077 #ifdef __x86_64__
9078 .data
9079@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
9080 pop %r14
9081 pop %r13
9082 pop %r12
9083+ pax_force_retaddr 0, 1
9084 ret
9085+ENDPROC(aesni_gcm_dec)
9086
9087
9088 /*****************************************************************************
9089@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
9090 pop %r14
9091 pop %r13
9092 pop %r12
9093+ pax_force_retaddr 0, 1
9094 ret
9095+ENDPROC(aesni_gcm_enc)
9096
9097 #endif
9098
9099@@ -1714,6 +1719,7 @@ _key_expansion_256a:
9100 pxor %xmm1, %xmm0
9101 movaps %xmm0, (TKEYP)
9102 add $0x10, TKEYP
9103+ pax_force_retaddr_bts
9104 ret
9105
9106 .align 4
9107@@ -1738,6 +1744,7 @@ _key_expansion_192a:
9108 shufps $0b01001110, %xmm2, %xmm1
9109 movaps %xmm1, 0x10(TKEYP)
9110 add $0x20, TKEYP
9111+ pax_force_retaddr_bts
9112 ret
9113
9114 .align 4
9115@@ -1757,6 +1764,7 @@ _key_expansion_192b:
9116
9117 movaps %xmm0, (TKEYP)
9118 add $0x10, TKEYP
9119+ pax_force_retaddr_bts
9120 ret
9121
9122 .align 4
9123@@ -1769,6 +1777,7 @@ _key_expansion_256b:
9124 pxor %xmm1, %xmm2
9125 movaps %xmm2, (TKEYP)
9126 add $0x10, TKEYP
9127+ pax_force_retaddr_bts
9128 ret
9129
9130 /*
9131@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
9132 #ifndef __x86_64__
9133 popl KEYP
9134 #endif
9135+ pax_force_retaddr 0, 1
9136 ret
9137+ENDPROC(aesni_set_key)
9138
9139 /*
9140 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
9141@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
9142 popl KLEN
9143 popl KEYP
9144 #endif
9145+ pax_force_retaddr 0, 1
9146 ret
9147+ENDPROC(aesni_enc)
9148
9149 /*
9150 * _aesni_enc1: internal ABI
9151@@ -1959,6 +1972,7 @@ _aesni_enc1:
9152 AESENC KEY STATE
9153 movaps 0x70(TKEYP), KEY
9154 AESENCLAST KEY STATE
9155+ pax_force_retaddr_bts
9156 ret
9157
9158 /*
9159@@ -2067,6 +2081,7 @@ _aesni_enc4:
9160 AESENCLAST KEY STATE2
9161 AESENCLAST KEY STATE3
9162 AESENCLAST KEY STATE4
9163+ pax_force_retaddr_bts
9164 ret
9165
9166 /*
9167@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
9168 popl KLEN
9169 popl KEYP
9170 #endif
9171+ pax_force_retaddr 0, 1
9172 ret
9173+ENDPROC(aesni_dec)
9174
9175 /*
9176 * _aesni_dec1: internal ABI
9177@@ -2146,6 +2163,7 @@ _aesni_dec1:
9178 AESDEC KEY STATE
9179 movaps 0x70(TKEYP), KEY
9180 AESDECLAST KEY STATE
9181+ pax_force_retaddr_bts
9182 ret
9183
9184 /*
9185@@ -2254,6 +2272,7 @@ _aesni_dec4:
9186 AESDECLAST KEY STATE2
9187 AESDECLAST KEY STATE3
9188 AESDECLAST KEY STATE4
9189+ pax_force_retaddr_bts
9190 ret
9191
9192 /*
9193@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
9194 popl KEYP
9195 popl LEN
9196 #endif
9197+ pax_force_retaddr 0, 1
9198 ret
9199+ENDPROC(aesni_ecb_enc)
9200
9201 /*
9202 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
9203@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
9204 popl KEYP
9205 popl LEN
9206 #endif
9207+ pax_force_retaddr 0, 1
9208 ret
9209+ENDPROC(aesni_ecb_dec)
9210
9211 /*
9212 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
9213@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
9214 popl LEN
9215 popl IVP
9216 #endif
9217+ pax_force_retaddr 0, 1
9218 ret
9219+ENDPROC(aesni_cbc_enc)
9220
9221 /*
9222 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
9223@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
9224 popl LEN
9225 popl IVP
9226 #endif
9227+ pax_force_retaddr 0, 1
9228 ret
9229+ENDPROC(aesni_cbc_dec)
9230
9231 #ifdef __x86_64__
9232 .align 16
9233@@ -2526,6 +2553,7 @@ _aesni_inc_init:
9234 mov $1, TCTR_LOW
9235 MOVQ_R64_XMM TCTR_LOW INC
9236 MOVQ_R64_XMM CTR TCTR_LOW
9237+ pax_force_retaddr_bts
9238 ret
9239
9240 /*
9241@@ -2554,6 +2582,7 @@ _aesni_inc:
9242 .Linc_low:
9243 movaps CTR, IV
9244 PSHUFB_XMM BSWAP_MASK IV
9245+ pax_force_retaddr_bts
9246 ret
9247
9248 /*
9249@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
9250 .Lctr_enc_ret:
9251 movups IV, (IVP)
9252 .Lctr_enc_just_ret:
9253+ pax_force_retaddr 0, 1
9254 ret
9255+ENDPROC(aesni_ctr_enc)
9256 #endif
9257diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
9258index 391d245..67f35c2 100644
9259--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
9260+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
9261@@ -20,6 +20,8 @@
9262 *
9263 */
9264
9265+#include <asm/alternative-asm.h>
9266+
9267 .file "blowfish-x86_64-asm.S"
9268 .text
9269
9270@@ -151,9 +153,11 @@ __blowfish_enc_blk:
9271 jnz __enc_xor;
9272
9273 write_block();
9274+ pax_force_retaddr 0, 1
9275 ret;
9276 __enc_xor:
9277 xor_block();
9278+ pax_force_retaddr 0, 1
9279 ret;
9280
9281 .align 8
9282@@ -188,6 +192,7 @@ blowfish_dec_blk:
9283
9284 movq %r11, %rbp;
9285
9286+ pax_force_retaddr 0, 1
9287 ret;
9288
9289 /**********************************************************************
9290@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
9291
9292 popq %rbx;
9293 popq %rbp;
9294+ pax_force_retaddr 0, 1
9295 ret;
9296
9297 __enc_xor4:
9298@@ -349,6 +355,7 @@ __enc_xor4:
9299
9300 popq %rbx;
9301 popq %rbp;
9302+ pax_force_retaddr 0, 1
9303 ret;
9304
9305 .align 8
9306@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
9307 popq %rbx;
9308 popq %rbp;
9309
9310+ pax_force_retaddr 0, 1
9311 ret;
9312
9313diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
9314index 0b33743..7a56206 100644
9315--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
9316+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
9317@@ -20,6 +20,8 @@
9318 *
9319 */
9320
9321+#include <asm/alternative-asm.h>
9322+
9323 .file "camellia-x86_64-asm_64.S"
9324 .text
9325
9326@@ -229,12 +231,14 @@ __enc_done:
9327 enc_outunpack(mov, RT1);
9328
9329 movq RRBP, %rbp;
9330+ pax_force_retaddr 0, 1
9331 ret;
9332
9333 __enc_xor:
9334 enc_outunpack(xor, RT1);
9335
9336 movq RRBP, %rbp;
9337+ pax_force_retaddr 0, 1
9338 ret;
9339
9340 .global camellia_dec_blk;
9341@@ -275,6 +279,7 @@ __dec_rounds16:
9342 dec_outunpack();
9343
9344 movq RRBP, %rbp;
9345+ pax_force_retaddr 0, 1
9346 ret;
9347
9348 /**********************************************************************
9349@@ -468,6 +473,7 @@ __enc2_done:
9350
9351 movq RRBP, %rbp;
9352 popq %rbx;
9353+ pax_force_retaddr 0, 1
9354 ret;
9355
9356 __enc2_xor:
9357@@ -475,6 +481,7 @@ __enc2_xor:
9358
9359 movq RRBP, %rbp;
9360 popq %rbx;
9361+ pax_force_retaddr 0, 1
9362 ret;
9363
9364 .global camellia_dec_blk_2way;
9365@@ -517,4 +524,5 @@ __dec2_rounds16:
9366
9367 movq RRBP, %rbp;
9368 movq RXOR, %rbx;
9369+ pax_force_retaddr 0, 1
9370 ret;
9371diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9372index a41a3aa..bdf5753 100644
9373--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9374+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9375@@ -23,6 +23,8 @@
9376 *
9377 */
9378
9379+#include <asm/alternative-asm.h>
9380+
9381 .file "cast5-avx-x86_64-asm_64.S"
9382
9383 .extern cast5_s1
9384@@ -293,6 +295,7 @@ __skip_enc:
9385 leaq 3*(2*4*4)(%r11), %rax;
9386 outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9387
9388+ pax_force_retaddr 0, 1
9389 ret;
9390
9391 __enc_xor16:
9392@@ -303,6 +306,7 @@ __enc_xor16:
9393 leaq 3*(2*4*4)(%r11), %rax;
9394 outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9395
9396+ pax_force_retaddr 0, 1
9397 ret;
9398
9399 .align 16
9400@@ -369,6 +373,7 @@ __dec_tail:
9401 leaq 3*(2*4*4)(%r11), %rax;
9402 outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9403
9404+ pax_force_retaddr 0, 1
9405 ret;
9406
9407 __skip_dec:
9408diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9409index 218d283..819e6da 100644
9410--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9411+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9412@@ -23,6 +23,8 @@
9413 *
9414 */
9415
9416+#include <asm/alternative-asm.h>
9417+
9418 .file "cast6-avx-x86_64-asm_64.S"
9419
9420 .extern cast6_s1
9421@@ -324,12 +326,14 @@ __cast6_enc_blk_8way:
9422 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9423 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9424
9425+ pax_force_retaddr 0, 1
9426 ret;
9427
9428 __enc_xor8:
9429 outunpack_xor_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9430 outunpack_xor_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9431
9432+ pax_force_retaddr 0, 1
9433 ret;
9434
9435 .align 16
9436@@ -380,4 +384,5 @@ cast6_dec_blk_8way:
9437 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9438 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9439
9440+ pax_force_retaddr 0, 1
9441 ret;
9442diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
9443index 6214a9b..1f4fc9a 100644
9444--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
9445+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
9446@@ -1,3 +1,5 @@
9447+#include <asm/alternative-asm.h>
9448+
9449 # enter ECRYPT_encrypt_bytes
9450 .text
9451 .p2align 5
9452@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
9453 add %r11,%rsp
9454 mov %rdi,%rax
9455 mov %rsi,%rdx
9456+ pax_force_retaddr 0, 1
9457 ret
9458 # bytesatleast65:
9459 ._bytesatleast65:
9460@@ -891,6 +894,7 @@ ECRYPT_keysetup:
9461 add %r11,%rsp
9462 mov %rdi,%rax
9463 mov %rsi,%rdx
9464+ pax_force_retaddr
9465 ret
9466 # enter ECRYPT_ivsetup
9467 .text
9468@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
9469 add %r11,%rsp
9470 mov %rdi,%rax
9471 mov %rsi,%rdx
9472+ pax_force_retaddr
9473 ret
9474diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9475index 504106b..4e50951 100644
9476--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9477+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9478@@ -24,6 +24,8 @@
9479 *
9480 */
9481
9482+#include <asm/alternative-asm.h>
9483+
9484 .file "serpent-avx-x86_64-asm_64.S"
9485 .text
9486
9487@@ -638,12 +640,14 @@ __serpent_enc_blk_8way_avx:
9488 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9489 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9490
9491+ pax_force_retaddr
9492 ret;
9493
9494 __enc_xor8:
9495 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9496 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9497
9498+ pax_force_retaddr
9499 ret;
9500
9501 .align 8
9502@@ -701,4 +705,5 @@ serpent_dec_blk_8way_avx:
9503 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
9504 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
9505
9506+ pax_force_retaddr
9507 ret;
9508diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9509index 3ee1ff0..cbc568b 100644
9510--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9511+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9512@@ -24,6 +24,8 @@
9513 *
9514 */
9515
9516+#include <asm/alternative-asm.h>
9517+
9518 .file "serpent-sse2-x86_64-asm_64.S"
9519 .text
9520
9521@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
9522 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9523 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9524
9525+ pax_force_retaddr
9526 ret;
9527
9528 __enc_xor8:
9529 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9530 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9531
9532+ pax_force_retaddr
9533 ret;
9534
9535 .align 8
9536@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
9537 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
9538 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
9539
9540+ pax_force_retaddr
9541 ret;
9542diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
9543index 49d6987..df66bd4 100644
9544--- a/arch/x86/crypto/sha1_ssse3_asm.S
9545+++ b/arch/x86/crypto/sha1_ssse3_asm.S
9546@@ -28,6 +28,8 @@
9547 * (at your option) any later version.
9548 */
9549
9550+#include <asm/alternative-asm.h>
9551+
9552 #define CTX %rdi // arg1
9553 #define BUF %rsi // arg2
9554 #define CNT %rdx // arg3
9555@@ -104,6 +106,7 @@
9556 pop %r12
9557 pop %rbp
9558 pop %rbx
9559+ pax_force_retaddr 0, 1
9560 ret
9561
9562 .size \name, .-\name
9563diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9564index 1585abb..4a9af16 100644
9565--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9566+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9567@@ -23,6 +23,8 @@
9568 *
9569 */
9570
9571+#include <asm/alternative-asm.h>
9572+
9573 .file "twofish-avx-x86_64-asm_64.S"
9574 .text
9575
9576@@ -303,12 +305,14 @@ __twofish_enc_blk_8way:
9577 outunpack_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
9578 outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
9579
9580+ pax_force_retaddr
9581 ret;
9582
9583 __enc_xor8:
9584 outunpack_xor_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
9585 outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
9586
9587+ pax_force_retaddr
9588 ret;
9589
9590 .align 8
9591@@ -354,4 +358,5 @@ twofish_dec_blk_8way:
9592 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
9593 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
9594
9595+ pax_force_retaddr
9596 ret;
9597diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9598index 5b012a2..36d5364 100644
9599--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9600+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9601@@ -20,6 +20,8 @@
9602 *
9603 */
9604
9605+#include <asm/alternative-asm.h>
9606+
9607 .file "twofish-x86_64-asm-3way.S"
9608 .text
9609
9610@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
9611 popq %r13;
9612 popq %r14;
9613 popq %r15;
9614+ pax_force_retaddr 0, 1
9615 ret;
9616
9617 __enc_xor3:
9618@@ -271,6 +274,7 @@ __enc_xor3:
9619 popq %r13;
9620 popq %r14;
9621 popq %r15;
9622+ pax_force_retaddr 0, 1
9623 ret;
9624
9625 .global twofish_dec_blk_3way
9626@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
9627 popq %r13;
9628 popq %r14;
9629 popq %r15;
9630+ pax_force_retaddr 0, 1
9631 ret;
9632
9633diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
9634index 7bcf3fc..f53832f 100644
9635--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
9636+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
9637@@ -21,6 +21,7 @@
9638 .text
9639
9640 #include <asm/asm-offsets.h>
9641+#include <asm/alternative-asm.h>
9642
9643 #define a_offset 0
9644 #define b_offset 4
9645@@ -268,6 +269,7 @@ twofish_enc_blk:
9646
9647 popq R1
9648 movq $1,%rax
9649+ pax_force_retaddr 0, 1
9650 ret
9651
9652 twofish_dec_blk:
9653@@ -319,4 +321,5 @@ twofish_dec_blk:
9654
9655 popq R1
9656 movq $1,%rax
9657+ pax_force_retaddr 0, 1
9658 ret
9659diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
9660index 07b3a68..bd2a388 100644
9661--- a/arch/x86/ia32/ia32_aout.c
9662+++ b/arch/x86/ia32/ia32_aout.c
9663@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
9664 unsigned long dump_start, dump_size;
9665 struct user32 dump;
9666
9667+ memset(&dump, 0, sizeof(dump));
9668+
9669 fs = get_fs();
9670 set_fs(KERNEL_DS);
9671 has_dumped = 1;
9672diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
9673index efc6a95..95abfe2 100644
9674--- a/arch/x86/ia32/ia32_signal.c
9675+++ b/arch/x86/ia32/ia32_signal.c
9676@@ -163,8 +163,8 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
9677 }
9678 seg = get_fs();
9679 set_fs(KERNEL_DS);
9680- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
9681- (stack_t __force __user *) &uoss, regs->sp);
9682+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
9683+ (stack_t __force_user *) &uoss, regs->sp);
9684 set_fs(seg);
9685 if (ret >= 0 && uoss_ptr) {
9686 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
9687@@ -396,7 +396,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
9688 sp -= frame_size;
9689 /* Align the stack pointer according to the i386 ABI,
9690 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
9691- sp = ((sp + 4) & -16ul) - 4;
9692+ sp = ((sp - 12) & -16ul) - 4;
9693 return (void __user *) sp;
9694 }
9695
9696@@ -454,7 +454,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
9697 * These are actually not used anymore, but left because some
9698 * gdb versions depend on them as a marker.
9699 */
9700- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
9701+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
9702 } put_user_catch(err);
9703
9704 if (err)
9705@@ -496,7 +496,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9706 0xb8,
9707 __NR_ia32_rt_sigreturn,
9708 0x80cd,
9709- 0,
9710+ 0
9711 };
9712
9713 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
9714@@ -522,16 +522,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9715
9716 if (ka->sa.sa_flags & SA_RESTORER)
9717 restorer = ka->sa.sa_restorer;
9718+ else if (current->mm->context.vdso)
9719+ /* Return stub is in 32bit vsyscall page */
9720+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
9721 else
9722- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
9723- rt_sigreturn);
9724+ restorer = &frame->retcode;
9725 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
9726
9727 /*
9728 * Not actually used anymore, but left because some gdb
9729 * versions need it.
9730 */
9731- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
9732+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
9733 } put_user_catch(err);
9734
9735 err |= copy_siginfo_to_user32(&frame->info, info);
9736diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
9737index 076745f..ae8f6cb 100644
9738--- a/arch/x86/ia32/ia32entry.S
9739+++ b/arch/x86/ia32/ia32entry.S
9740@@ -15,8 +15,10 @@
9741 #include <asm/irqflags.h>
9742 #include <asm/asm.h>
9743 #include <asm/smap.h>
9744+#include <asm/pgtable.h>
9745 #include <linux/linkage.h>
9746 #include <linux/err.h>
9747+#include <asm/alternative-asm.h>
9748
9749 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
9750 #include <linux/elf-em.h>
9751@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
9752 ENDPROC(native_irq_enable_sysexit)
9753 #endif
9754
9755+ .macro pax_enter_kernel_user
9756+ pax_set_fptr_mask
9757+#ifdef CONFIG_PAX_MEMORY_UDEREF
9758+ call pax_enter_kernel_user
9759+#endif
9760+ .endm
9761+
9762+ .macro pax_exit_kernel_user
9763+#ifdef CONFIG_PAX_MEMORY_UDEREF
9764+ call pax_exit_kernel_user
9765+#endif
9766+#ifdef CONFIG_PAX_RANDKSTACK
9767+ pushq %rax
9768+ pushq %r11
9769+ call pax_randomize_kstack
9770+ popq %r11
9771+ popq %rax
9772+#endif
9773+ .endm
9774+
9775+.macro pax_erase_kstack
9776+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
9777+ call pax_erase_kstack
9778+#endif
9779+.endm
9780+
9781 /*
9782 * 32bit SYSENTER instruction entry.
9783 *
9784@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
9785 CFI_REGISTER rsp,rbp
9786 SWAPGS_UNSAFE_STACK
9787 movq PER_CPU_VAR(kernel_stack), %rsp
9788- addq $(KERNEL_STACK_OFFSET),%rsp
9789- /*
9790- * No need to follow this irqs on/off section: the syscall
9791- * disabled irqs, here we enable it straight after entry:
9792- */
9793- ENABLE_INTERRUPTS(CLBR_NONE)
9794 movl %ebp,%ebp /* zero extension */
9795 pushq_cfi $__USER32_DS
9796 /*CFI_REL_OFFSET ss,0*/
9797@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
9798 CFI_REL_OFFSET rsp,0
9799 pushfq_cfi
9800 /*CFI_REL_OFFSET rflags,0*/
9801- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
9802- CFI_REGISTER rip,r10
9803+ orl $X86_EFLAGS_IF,(%rsp)
9804+ GET_THREAD_INFO(%r11)
9805+ movl TI_sysenter_return(%r11), %r11d
9806+ CFI_REGISTER rip,r11
9807 pushq_cfi $__USER32_CS
9808 /*CFI_REL_OFFSET cs,0*/
9809 movl %eax, %eax
9810- pushq_cfi %r10
9811+ pushq_cfi %r11
9812 CFI_REL_OFFSET rip,0
9813 pushq_cfi %rax
9814 cld
9815 SAVE_ARGS 0,1,0
9816+ pax_enter_kernel_user
9817+
9818+#ifdef CONFIG_PAX_RANDKSTACK
9819+ pax_erase_kstack
9820+#endif
9821+
9822+ /*
9823+ * No need to follow this irqs on/off section: the syscall
9824+ * disabled irqs, here we enable it straight after entry:
9825+ */
9826+ ENABLE_INTERRUPTS(CLBR_NONE)
9827 /* no need to do an access_ok check here because rbp has been
9828 32bit zero extended */
9829+
9830+#ifdef CONFIG_PAX_MEMORY_UDEREF
9831+ mov $PAX_USER_SHADOW_BASE,%r11
9832+ add %r11,%rbp
9833+#endif
9834+
9835 ASM_STAC
9836 1: movl (%rbp),%ebp
9837 _ASM_EXTABLE(1b,ia32_badarg)
9838 ASM_CLAC
9839- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9840- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9841+ GET_THREAD_INFO(%r11)
9842+ orl $TS_COMPAT,TI_status(%r11)
9843+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9844 CFI_REMEMBER_STATE
9845 jnz sysenter_tracesys
9846 cmpq $(IA32_NR_syscalls-1),%rax
9847@@ -162,12 +204,15 @@ sysenter_do_call:
9848 sysenter_dispatch:
9849 call *ia32_sys_call_table(,%rax,8)
9850 movq %rax,RAX-ARGOFFSET(%rsp)
9851+ GET_THREAD_INFO(%r11)
9852 DISABLE_INTERRUPTS(CLBR_NONE)
9853 TRACE_IRQS_OFF
9854- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9855+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9856 jnz sysexit_audit
9857 sysexit_from_sys_call:
9858- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9859+ pax_exit_kernel_user
9860+ pax_erase_kstack
9861+ andl $~TS_COMPAT,TI_status(%r11)
9862 /* clear IF, that popfq doesn't enable interrupts early */
9863 andl $~0x200,EFLAGS-R11(%rsp)
9864 movl RIP-R11(%rsp),%edx /* User %eip */
9865@@ -193,6 +238,9 @@ sysexit_from_sys_call:
9866 movl %eax,%esi /* 2nd arg: syscall number */
9867 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
9868 call __audit_syscall_entry
9869+
9870+ pax_erase_kstack
9871+
9872 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
9873 cmpq $(IA32_NR_syscalls-1),%rax
9874 ja ia32_badsys
9875@@ -204,7 +252,7 @@ sysexit_from_sys_call:
9876 .endm
9877
9878 .macro auditsys_exit exit
9879- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9880+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9881 jnz ia32_ret_from_sys_call
9882 TRACE_IRQS_ON
9883 sti
9884@@ -215,11 +263,12 @@ sysexit_from_sys_call:
9885 1: setbe %al /* 1 if error, 0 if not */
9886 movzbl %al,%edi /* zero-extend that into %edi */
9887 call __audit_syscall_exit
9888+ GET_THREAD_INFO(%r11)
9889 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
9890 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
9891 cli
9892 TRACE_IRQS_OFF
9893- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9894+ testl %edi,TI_flags(%r11)
9895 jz \exit
9896 CLEAR_RREGS -ARGOFFSET
9897 jmp int_with_check
9898@@ -237,7 +286,7 @@ sysexit_audit:
9899
9900 sysenter_tracesys:
9901 #ifdef CONFIG_AUDITSYSCALL
9902- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9903+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9904 jz sysenter_auditsys
9905 #endif
9906 SAVE_REST
9907@@ -249,6 +298,9 @@ sysenter_tracesys:
9908 RESTORE_REST
9909 cmpq $(IA32_NR_syscalls-1),%rax
9910 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
9911+
9912+ pax_erase_kstack
9913+
9914 jmp sysenter_do_call
9915 CFI_ENDPROC
9916 ENDPROC(ia32_sysenter_target)
9917@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
9918 ENTRY(ia32_cstar_target)
9919 CFI_STARTPROC32 simple
9920 CFI_SIGNAL_FRAME
9921- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
9922+ CFI_DEF_CFA rsp,0
9923 CFI_REGISTER rip,rcx
9924 /*CFI_REGISTER rflags,r11*/
9925 SWAPGS_UNSAFE_STACK
9926 movl %esp,%r8d
9927 CFI_REGISTER rsp,r8
9928 movq PER_CPU_VAR(kernel_stack),%rsp
9929+ SAVE_ARGS 8*6,0,0
9930+ pax_enter_kernel_user
9931+
9932+#ifdef CONFIG_PAX_RANDKSTACK
9933+ pax_erase_kstack
9934+#endif
9935+
9936 /*
9937 * No need to follow this irqs on/off section: the syscall
9938 * disabled irqs and here we enable it straight after entry:
9939 */
9940 ENABLE_INTERRUPTS(CLBR_NONE)
9941- SAVE_ARGS 8,0,0
9942 movl %eax,%eax /* zero extension */
9943 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
9944 movq %rcx,RIP-ARGOFFSET(%rsp)
9945@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
9946 /* no need to do an access_ok check here because r8 has been
9947 32bit zero extended */
9948 /* hardware stack frame is complete now */
9949+
9950+#ifdef CONFIG_PAX_MEMORY_UDEREF
9951+ mov $PAX_USER_SHADOW_BASE,%r11
9952+ add %r11,%r8
9953+#endif
9954+
9955 ASM_STAC
9956 1: movl (%r8),%r9d
9957 _ASM_EXTABLE(1b,ia32_badarg)
9958 ASM_CLAC
9959- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9960- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9961+ GET_THREAD_INFO(%r11)
9962+ orl $TS_COMPAT,TI_status(%r11)
9963+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9964 CFI_REMEMBER_STATE
9965 jnz cstar_tracesys
9966 cmpq $IA32_NR_syscalls-1,%rax
9967@@ -319,12 +384,15 @@ cstar_do_call:
9968 cstar_dispatch:
9969 call *ia32_sys_call_table(,%rax,8)
9970 movq %rax,RAX-ARGOFFSET(%rsp)
9971+ GET_THREAD_INFO(%r11)
9972 DISABLE_INTERRUPTS(CLBR_NONE)
9973 TRACE_IRQS_OFF
9974- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9975+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9976 jnz sysretl_audit
9977 sysretl_from_sys_call:
9978- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9979+ pax_exit_kernel_user
9980+ pax_erase_kstack
9981+ andl $~TS_COMPAT,TI_status(%r11)
9982 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
9983 movl RIP-ARGOFFSET(%rsp),%ecx
9984 CFI_REGISTER rip,rcx
9985@@ -352,7 +420,7 @@ sysretl_audit:
9986
9987 cstar_tracesys:
9988 #ifdef CONFIG_AUDITSYSCALL
9989- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9990+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9991 jz cstar_auditsys
9992 #endif
9993 xchgl %r9d,%ebp
9994@@ -366,6 +434,9 @@ cstar_tracesys:
9995 xchgl %ebp,%r9d
9996 cmpq $(IA32_NR_syscalls-1),%rax
9997 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
9998+
9999+ pax_erase_kstack
10000+
10001 jmp cstar_do_call
10002 END(ia32_cstar_target)
10003
10004@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
10005 CFI_REL_OFFSET rip,RIP-RIP
10006 PARAVIRT_ADJUST_EXCEPTION_FRAME
10007 SWAPGS
10008- /*
10009- * No need to follow this irqs on/off section: the syscall
10010- * disabled irqs and here we enable it straight after entry:
10011- */
10012- ENABLE_INTERRUPTS(CLBR_NONE)
10013 movl %eax,%eax
10014 pushq_cfi %rax
10015 cld
10016 /* note the registers are not zero extended to the sf.
10017 this could be a problem. */
10018 SAVE_ARGS 0,1,0
10019- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10020- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10021+ pax_enter_kernel_user
10022+
10023+#ifdef CONFIG_PAX_RANDKSTACK
10024+ pax_erase_kstack
10025+#endif
10026+
10027+ /*
10028+ * No need to follow this irqs on/off section: the syscall
10029+ * disabled irqs and here we enable it straight after entry:
10030+ */
10031+ ENABLE_INTERRUPTS(CLBR_NONE)
10032+ GET_THREAD_INFO(%r11)
10033+ orl $TS_COMPAT,TI_status(%r11)
10034+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
10035 jnz ia32_tracesys
10036 cmpq $(IA32_NR_syscalls-1),%rax
10037 ja ia32_badsys
10038@@ -442,6 +520,9 @@ ia32_tracesys:
10039 RESTORE_REST
10040 cmpq $(IA32_NR_syscalls-1),%rax
10041 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
10042+
10043+ pax_erase_kstack
10044+
10045 jmp ia32_do_call
10046 END(ia32_syscall)
10047
10048diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
10049index 86d68d1..f9960fe 100644
10050--- a/arch/x86/ia32/sys_ia32.c
10051+++ b/arch/x86/ia32/sys_ia32.c
10052@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
10053 */
10054 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
10055 {
10056- typeof(ubuf->st_uid) uid = 0;
10057- typeof(ubuf->st_gid) gid = 0;
10058+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
10059+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
10060 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
10061 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
10062 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
10063@@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
10064 mm_segment_t old_fs = get_fs();
10065
10066 set_fs(KERNEL_DS);
10067- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
10068+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
10069 set_fs(old_fs);
10070 if (put_compat_timespec(&t, interval))
10071 return -EFAULT;
10072@@ -319,7 +319,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
10073 mm_segment_t old_fs = get_fs();
10074
10075 set_fs(KERNEL_DS);
10076- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
10077+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
10078 set_fs(old_fs);
10079 if (!ret) {
10080 switch (_NSIG_WORDS) {
10081@@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
10082 if (copy_siginfo_from_user32(&info, uinfo))
10083 return -EFAULT;
10084 set_fs(KERNEL_DS);
10085- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
10086+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
10087 set_fs(old_fs);
10088 return ret;
10089 }
10090@@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
10091 return -EFAULT;
10092
10093 set_fs(KERNEL_DS);
10094- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
10095+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
10096 count);
10097 set_fs(old_fs);
10098
10099diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
10100index 372231c..a5aa1a1 100644
10101--- a/arch/x86/include/asm/alternative-asm.h
10102+++ b/arch/x86/include/asm/alternative-asm.h
10103@@ -18,6 +18,45 @@
10104 .endm
10105 #endif
10106
10107+#ifdef KERNEXEC_PLUGIN
10108+ .macro pax_force_retaddr_bts rip=0
10109+ btsq $63,\rip(%rsp)
10110+ .endm
10111+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10112+ .macro pax_force_retaddr rip=0, reload=0
10113+ btsq $63,\rip(%rsp)
10114+ .endm
10115+ .macro pax_force_fptr ptr
10116+ btsq $63,\ptr
10117+ .endm
10118+ .macro pax_set_fptr_mask
10119+ .endm
10120+#endif
10121+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
10122+ .macro pax_force_retaddr rip=0, reload=0
10123+ .if \reload
10124+ pax_set_fptr_mask
10125+ .endif
10126+ orq %r10,\rip(%rsp)
10127+ .endm
10128+ .macro pax_force_fptr ptr
10129+ orq %r10,\ptr
10130+ .endm
10131+ .macro pax_set_fptr_mask
10132+ movabs $0x8000000000000000,%r10
10133+ .endm
10134+#endif
10135+#else
10136+ .macro pax_force_retaddr rip=0, reload=0
10137+ .endm
10138+ .macro pax_force_fptr ptr
10139+ .endm
10140+ .macro pax_force_retaddr_bts rip=0
10141+ .endm
10142+ .macro pax_set_fptr_mask
10143+ .endm
10144+#endif
10145+
10146 .macro altinstruction_entry orig alt feature orig_len alt_len
10147 .long \orig - .
10148 .long \alt - .
10149diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
10150index 58ed6d9..f1cbe58 100644
10151--- a/arch/x86/include/asm/alternative.h
10152+++ b/arch/x86/include/asm/alternative.h
10153@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
10154 ".pushsection .discard,\"aw\",@progbits\n" \
10155 DISCARD_ENTRY(1) \
10156 ".popsection\n" \
10157- ".pushsection .altinstr_replacement, \"ax\"\n" \
10158+ ".pushsection .altinstr_replacement, \"a\"\n" \
10159 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
10160 ".popsection"
10161
10162@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
10163 DISCARD_ENTRY(1) \
10164 DISCARD_ENTRY(2) \
10165 ".popsection\n" \
10166- ".pushsection .altinstr_replacement, \"ax\"\n" \
10167+ ".pushsection .altinstr_replacement, \"a\"\n" \
10168 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
10169 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
10170 ".popsection"
10171diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
10172index 3388034..ba52312 100644
10173--- a/arch/x86/include/asm/apic.h
10174+++ b/arch/x86/include/asm/apic.h
10175@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
10176
10177 #ifdef CONFIG_X86_LOCAL_APIC
10178
10179-extern unsigned int apic_verbosity;
10180+extern int apic_verbosity;
10181 extern int local_apic_timer_c2_ok;
10182
10183 extern int disable_apic;
10184@@ -391,7 +391,7 @@ struct apic {
10185 */
10186 int (*x86_32_numa_cpu_node)(int cpu);
10187 #endif
10188-};
10189+} __do_const;
10190
10191 /*
10192 * Pointer to the local APIC driver in use on this system (there's
10193diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
10194index 20370c6..a2eb9b0 100644
10195--- a/arch/x86/include/asm/apm.h
10196+++ b/arch/x86/include/asm/apm.h
10197@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
10198 __asm__ __volatile__(APM_DO_ZERO_SEGS
10199 "pushl %%edi\n\t"
10200 "pushl %%ebp\n\t"
10201- "lcall *%%cs:apm_bios_entry\n\t"
10202+ "lcall *%%ss:apm_bios_entry\n\t"
10203 "setc %%al\n\t"
10204 "popl %%ebp\n\t"
10205 "popl %%edi\n\t"
10206@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
10207 __asm__ __volatile__(APM_DO_ZERO_SEGS
10208 "pushl %%edi\n\t"
10209 "pushl %%ebp\n\t"
10210- "lcall *%%cs:apm_bios_entry\n\t"
10211+ "lcall *%%ss:apm_bios_entry\n\t"
10212 "setc %%bl\n\t"
10213 "popl %%ebp\n\t"
10214 "popl %%edi\n\t"
10215diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
10216index b6c3b82..b4c077a 100644
10217--- a/arch/x86/include/asm/atomic.h
10218+++ b/arch/x86/include/asm/atomic.h
10219@@ -22,7 +22,18 @@
10220 */
10221 static inline int atomic_read(const atomic_t *v)
10222 {
10223- return (*(volatile int *)&(v)->counter);
10224+ return (*(volatile const int *)&(v)->counter);
10225+}
10226+
10227+/**
10228+ * atomic_read_unchecked - read atomic variable
10229+ * @v: pointer of type atomic_unchecked_t
10230+ *
10231+ * Atomically reads the value of @v.
10232+ */
10233+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
10234+{
10235+ return (*(volatile const int *)&(v)->counter);
10236 }
10237
10238 /**
10239@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
10240 }
10241
10242 /**
10243+ * atomic_set_unchecked - set atomic variable
10244+ * @v: pointer of type atomic_unchecked_t
10245+ * @i: required value
10246+ *
10247+ * Atomically sets the value of @v to @i.
10248+ */
10249+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
10250+{
10251+ v->counter = i;
10252+}
10253+
10254+/**
10255 * atomic_add - add integer to atomic variable
10256 * @i: integer value to add
10257 * @v: pointer of type atomic_t
10258@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
10259 */
10260 static inline void atomic_add(int i, atomic_t *v)
10261 {
10262- asm volatile(LOCK_PREFIX "addl %1,%0"
10263+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10264+
10265+#ifdef CONFIG_PAX_REFCOUNT
10266+ "jno 0f\n"
10267+ LOCK_PREFIX "subl %1,%0\n"
10268+ "int $4\n0:\n"
10269+ _ASM_EXTABLE(0b, 0b)
10270+#endif
10271+
10272+ : "+m" (v->counter)
10273+ : "ir" (i));
10274+}
10275+
10276+/**
10277+ * atomic_add_unchecked - add integer to atomic variable
10278+ * @i: integer value to add
10279+ * @v: pointer of type atomic_unchecked_t
10280+ *
10281+ * Atomically adds @i to @v.
10282+ */
10283+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
10284+{
10285+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10286 : "+m" (v->counter)
10287 : "ir" (i));
10288 }
10289@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
10290 */
10291 static inline void atomic_sub(int i, atomic_t *v)
10292 {
10293- asm volatile(LOCK_PREFIX "subl %1,%0"
10294+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10295+
10296+#ifdef CONFIG_PAX_REFCOUNT
10297+ "jno 0f\n"
10298+ LOCK_PREFIX "addl %1,%0\n"
10299+ "int $4\n0:\n"
10300+ _ASM_EXTABLE(0b, 0b)
10301+#endif
10302+
10303+ : "+m" (v->counter)
10304+ : "ir" (i));
10305+}
10306+
10307+/**
10308+ * atomic_sub_unchecked - subtract integer from atomic variable
10309+ * @i: integer value to subtract
10310+ * @v: pointer of type atomic_unchecked_t
10311+ *
10312+ * Atomically subtracts @i from @v.
10313+ */
10314+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
10315+{
10316+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10317 : "+m" (v->counter)
10318 : "ir" (i));
10319 }
10320@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10321 {
10322 unsigned char c;
10323
10324- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
10325+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
10326+
10327+#ifdef CONFIG_PAX_REFCOUNT
10328+ "jno 0f\n"
10329+ LOCK_PREFIX "addl %2,%0\n"
10330+ "int $4\n0:\n"
10331+ _ASM_EXTABLE(0b, 0b)
10332+#endif
10333+
10334+ "sete %1\n"
10335 : "+m" (v->counter), "=qm" (c)
10336 : "ir" (i) : "memory");
10337 return c;
10338@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10339 */
10340 static inline void atomic_inc(atomic_t *v)
10341 {
10342- asm volatile(LOCK_PREFIX "incl %0"
10343+ asm volatile(LOCK_PREFIX "incl %0\n"
10344+
10345+#ifdef CONFIG_PAX_REFCOUNT
10346+ "jno 0f\n"
10347+ LOCK_PREFIX "decl %0\n"
10348+ "int $4\n0:\n"
10349+ _ASM_EXTABLE(0b, 0b)
10350+#endif
10351+
10352+ : "+m" (v->counter));
10353+}
10354+
10355+/**
10356+ * atomic_inc_unchecked - increment atomic variable
10357+ * @v: pointer of type atomic_unchecked_t
10358+ *
10359+ * Atomically increments @v by 1.
10360+ */
10361+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10362+{
10363+ asm volatile(LOCK_PREFIX "incl %0\n"
10364 : "+m" (v->counter));
10365 }
10366
10367@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
10368 */
10369 static inline void atomic_dec(atomic_t *v)
10370 {
10371- asm volatile(LOCK_PREFIX "decl %0"
10372+ asm volatile(LOCK_PREFIX "decl %0\n"
10373+
10374+#ifdef CONFIG_PAX_REFCOUNT
10375+ "jno 0f\n"
10376+ LOCK_PREFIX "incl %0\n"
10377+ "int $4\n0:\n"
10378+ _ASM_EXTABLE(0b, 0b)
10379+#endif
10380+
10381+ : "+m" (v->counter));
10382+}
10383+
10384+/**
10385+ * atomic_dec_unchecked - decrement atomic variable
10386+ * @v: pointer of type atomic_unchecked_t
10387+ *
10388+ * Atomically decrements @v by 1.
10389+ */
10390+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10391+{
10392+ asm volatile(LOCK_PREFIX "decl %0\n"
10393 : "+m" (v->counter));
10394 }
10395
10396@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
10397 {
10398 unsigned char c;
10399
10400- asm volatile(LOCK_PREFIX "decl %0; sete %1"
10401+ asm volatile(LOCK_PREFIX "decl %0\n"
10402+
10403+#ifdef CONFIG_PAX_REFCOUNT
10404+ "jno 0f\n"
10405+ LOCK_PREFIX "incl %0\n"
10406+ "int $4\n0:\n"
10407+ _ASM_EXTABLE(0b, 0b)
10408+#endif
10409+
10410+ "sete %1\n"
10411 : "+m" (v->counter), "=qm" (c)
10412 : : "memory");
10413 return c != 0;
10414@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
10415 {
10416 unsigned char c;
10417
10418- asm volatile(LOCK_PREFIX "incl %0; sete %1"
10419+ asm volatile(LOCK_PREFIX "incl %0\n"
10420+
10421+#ifdef CONFIG_PAX_REFCOUNT
10422+ "jno 0f\n"
10423+ LOCK_PREFIX "decl %0\n"
10424+ "int $4\n0:\n"
10425+ _ASM_EXTABLE(0b, 0b)
10426+#endif
10427+
10428+ "sete %1\n"
10429+ : "+m" (v->counter), "=qm" (c)
10430+ : : "memory");
10431+ return c != 0;
10432+}
10433+
10434+/**
10435+ * atomic_inc_and_test_unchecked - increment and test
10436+ * @v: pointer of type atomic_unchecked_t
10437+ *
10438+ * Atomically increments @v by 1
10439+ * and returns true if the result is zero, or false for all
10440+ * other cases.
10441+ */
10442+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10443+{
10444+ unsigned char c;
10445+
10446+ asm volatile(LOCK_PREFIX "incl %0\n"
10447+ "sete %1\n"
10448 : "+m" (v->counter), "=qm" (c)
10449 : : "memory");
10450 return c != 0;
10451@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10452 {
10453 unsigned char c;
10454
10455- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
10456+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
10457+
10458+#ifdef CONFIG_PAX_REFCOUNT
10459+ "jno 0f\n"
10460+ LOCK_PREFIX "subl %2,%0\n"
10461+ "int $4\n0:\n"
10462+ _ASM_EXTABLE(0b, 0b)
10463+#endif
10464+
10465+ "sets %1\n"
10466 : "+m" (v->counter), "=qm" (c)
10467 : "ir" (i) : "memory");
10468 return c;
10469@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
10470 goto no_xadd;
10471 #endif
10472 /* Modern 486+ processor */
10473- return i + xadd(&v->counter, i);
10474+ return i + xadd_check_overflow(&v->counter, i);
10475
10476 #ifdef CONFIG_M386
10477 no_xadd: /* Legacy 386 processor */
10478@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
10479 }
10480
10481 /**
10482+ * atomic_add_return_unchecked - add integer and return
10483+ * @i: integer value to add
10484+ * @v: pointer of type atomic_unchecked_t
10485+ *
10486+ * Atomically adds @i to @v and returns @i + @v
10487+ */
10488+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10489+{
10490+#ifdef CONFIG_M386
10491+ int __i;
10492+ unsigned long flags;
10493+ if (unlikely(boot_cpu_data.x86 <= 3))
10494+ goto no_xadd;
10495+#endif
10496+ /* Modern 486+ processor */
10497+ return i + xadd(&v->counter, i);
10498+
10499+#ifdef CONFIG_M386
10500+no_xadd: /* Legacy 386 processor */
10501+ raw_local_irq_save(flags);
10502+ __i = atomic_read_unchecked(v);
10503+ atomic_set_unchecked(v, i + __i);
10504+ raw_local_irq_restore(flags);
10505+ return i + __i;
10506+#endif
10507+}
10508+
10509+/**
10510 * atomic_sub_return - subtract integer and return
10511 * @v: pointer of type atomic_t
10512 * @i: integer value to subtract
10513@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
10514 }
10515
10516 #define atomic_inc_return(v) (atomic_add_return(1, v))
10517+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10518+{
10519+ return atomic_add_return_unchecked(1, v);
10520+}
10521 #define atomic_dec_return(v) (atomic_sub_return(1, v))
10522
10523 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
10524@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
10525 return cmpxchg(&v->counter, old, new);
10526 }
10527
10528+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10529+{
10530+ return cmpxchg(&v->counter, old, new);
10531+}
10532+
10533 static inline int atomic_xchg(atomic_t *v, int new)
10534 {
10535 return xchg(&v->counter, new);
10536 }
10537
10538+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10539+{
10540+ return xchg(&v->counter, new);
10541+}
10542+
10543 /**
10544 * __atomic_add_unless - add unless the number is already a given value
10545 * @v: pointer of type atomic_t
10546@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
10547 */
10548 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10549 {
10550- int c, old;
10551+ int c, old, new;
10552 c = atomic_read(v);
10553 for (;;) {
10554- if (unlikely(c == (u)))
10555+ if (unlikely(c == u))
10556 break;
10557- old = atomic_cmpxchg((v), c, c + (a));
10558+
10559+ asm volatile("addl %2,%0\n"
10560+
10561+#ifdef CONFIG_PAX_REFCOUNT
10562+ "jno 0f\n"
10563+ "subl %2,%0\n"
10564+ "int $4\n0:\n"
10565+ _ASM_EXTABLE(0b, 0b)
10566+#endif
10567+
10568+ : "=r" (new)
10569+ : "0" (c), "ir" (a));
10570+
10571+ old = atomic_cmpxchg(v, c, new);
10572 if (likely(old == c))
10573 break;
10574 c = old;
10575@@ -241,6 +458,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10576 }
10577
10578 /**
10579+ * atomic_inc_not_zero_hint - increment if not null
10580+ * @v: pointer of type atomic_t
10581+ * @hint: probable value of the atomic before the increment
10582+ *
10583+ * This version of atomic_inc_not_zero() gives a hint of probable
10584+ * value of the atomic. This helps processor to not read the memory
10585+ * before doing the atomic read/modify/write cycle, lowering
10586+ * number of bus transactions on some arches.
10587+ *
10588+ * Returns: 0 if increment was not done, 1 otherwise.
10589+ */
10590+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
10591+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
10592+{
10593+ int val, c = hint, new;
10594+
10595+ /* sanity test, should be removed by compiler if hint is a constant */
10596+ if (!hint)
10597+ return __atomic_add_unless(v, 1, 0);
10598+
10599+ do {
10600+ asm volatile("incl %0\n"
10601+
10602+#ifdef CONFIG_PAX_REFCOUNT
10603+ "jno 0f\n"
10604+ "decl %0\n"
10605+ "int $4\n0:\n"
10606+ _ASM_EXTABLE(0b, 0b)
10607+#endif
10608+
10609+ : "=r" (new)
10610+ : "0" (c));
10611+
10612+ val = atomic_cmpxchg(v, c, new);
10613+ if (val == c)
10614+ return 1;
10615+ c = val;
10616+ } while (c);
10617+
10618+ return 0;
10619+}
10620+
10621+/**
10622 * atomic_inc_short - increment of a short integer
10623 * @v: pointer to type int
10624 *
10625@@ -269,14 +529,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
10626 #endif
10627
10628 /* These are x86-specific, used by some header files */
10629-#define atomic_clear_mask(mask, addr) \
10630- asm volatile(LOCK_PREFIX "andl %0,%1" \
10631- : : "r" (~(mask)), "m" (*(addr)) : "memory")
10632+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
10633+{
10634+ asm volatile(LOCK_PREFIX "andl %1,%0"
10635+ : "+m" (v->counter)
10636+ : "r" (~(mask))
10637+ : "memory");
10638+}
10639
10640-#define atomic_set_mask(mask, addr) \
10641- asm volatile(LOCK_PREFIX "orl %0,%1" \
10642- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
10643- : "memory")
10644+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
10645+{
10646+ asm volatile(LOCK_PREFIX "andl %1,%0"
10647+ : "+m" (v->counter)
10648+ : "r" (~(mask))
10649+ : "memory");
10650+}
10651+
10652+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
10653+{
10654+ asm volatile(LOCK_PREFIX "orl %1,%0"
10655+ : "+m" (v->counter)
10656+ : "r" (mask)
10657+ : "memory");
10658+}
10659+
10660+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
10661+{
10662+ asm volatile(LOCK_PREFIX "orl %1,%0"
10663+ : "+m" (v->counter)
10664+ : "r" (mask)
10665+ : "memory");
10666+}
10667
10668 /* Atomic operations are already serializing on x86 */
10669 #define smp_mb__before_atomic_dec() barrier()
10670diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
10671index b154de7..aadebd8 100644
10672--- a/arch/x86/include/asm/atomic64_32.h
10673+++ b/arch/x86/include/asm/atomic64_32.h
10674@@ -12,6 +12,14 @@ typedef struct {
10675 u64 __aligned(8) counter;
10676 } atomic64_t;
10677
10678+#ifdef CONFIG_PAX_REFCOUNT
10679+typedef struct {
10680+ u64 __aligned(8) counter;
10681+} atomic64_unchecked_t;
10682+#else
10683+typedef atomic64_t atomic64_unchecked_t;
10684+#endif
10685+
10686 #define ATOMIC64_INIT(val) { (val) }
10687
10688 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
10689@@ -37,21 +45,31 @@ typedef struct {
10690 ATOMIC64_DECL_ONE(sym##_386)
10691
10692 ATOMIC64_DECL_ONE(add_386);
10693+ATOMIC64_DECL_ONE(add_unchecked_386);
10694 ATOMIC64_DECL_ONE(sub_386);
10695+ATOMIC64_DECL_ONE(sub_unchecked_386);
10696 ATOMIC64_DECL_ONE(inc_386);
10697+ATOMIC64_DECL_ONE(inc_unchecked_386);
10698 ATOMIC64_DECL_ONE(dec_386);
10699+ATOMIC64_DECL_ONE(dec_unchecked_386);
10700 #endif
10701
10702 #define alternative_atomic64(f, out, in...) \
10703 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
10704
10705 ATOMIC64_DECL(read);
10706+ATOMIC64_DECL(read_unchecked);
10707 ATOMIC64_DECL(set);
10708+ATOMIC64_DECL(set_unchecked);
10709 ATOMIC64_DECL(xchg);
10710 ATOMIC64_DECL(add_return);
10711+ATOMIC64_DECL(add_return_unchecked);
10712 ATOMIC64_DECL(sub_return);
10713+ATOMIC64_DECL(sub_return_unchecked);
10714 ATOMIC64_DECL(inc_return);
10715+ATOMIC64_DECL(inc_return_unchecked);
10716 ATOMIC64_DECL(dec_return);
10717+ATOMIC64_DECL(dec_return_unchecked);
10718 ATOMIC64_DECL(dec_if_positive);
10719 ATOMIC64_DECL(inc_not_zero);
10720 ATOMIC64_DECL(add_unless);
10721@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
10722 }
10723
10724 /**
10725+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
10726+ * @p: pointer to type atomic64_unchecked_t
10727+ * @o: expected value
10728+ * @n: new value
10729+ *
10730+ * Atomically sets @v to @n if it was equal to @o and returns
10731+ * the old value.
10732+ */
10733+
10734+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
10735+{
10736+ return cmpxchg64(&v->counter, o, n);
10737+}
10738+
10739+/**
10740 * atomic64_xchg - xchg atomic64 variable
10741 * @v: pointer to type atomic64_t
10742 * @n: value to assign
10743@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
10744 }
10745
10746 /**
10747+ * atomic64_set_unchecked - set atomic64 variable
10748+ * @v: pointer to type atomic64_unchecked_t
10749+ * @n: value to assign
10750+ *
10751+ * Atomically sets the value of @v to @n.
10752+ */
10753+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
10754+{
10755+ unsigned high = (unsigned)(i >> 32);
10756+ unsigned low = (unsigned)i;
10757+ alternative_atomic64(set, /* no output */,
10758+ "S" (v), "b" (low), "c" (high)
10759+ : "eax", "edx", "memory");
10760+}
10761+
10762+/**
10763 * atomic64_read - read atomic64 variable
10764 * @v: pointer to type atomic64_t
10765 *
10766@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
10767 }
10768
10769 /**
10770+ * atomic64_read_unchecked - read atomic64 variable
10771+ * @v: pointer to type atomic64_unchecked_t
10772+ *
10773+ * Atomically reads the value of @v and returns it.
10774+ */
10775+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
10776+{
10777+ long long r;
10778+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
10779+ return r;
10780+ }
10781+
10782+/**
10783 * atomic64_add_return - add and return
10784 * @i: integer value to add
10785 * @v: pointer to type atomic64_t
10786@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
10787 return i;
10788 }
10789
10790+/**
10791+ * atomic64_add_return_unchecked - add and return
10792+ * @i: integer value to add
10793+ * @v: pointer to type atomic64_unchecked_t
10794+ *
10795+ * Atomically adds @i to @v and returns @i + *@v
10796+ */
10797+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
10798+{
10799+ alternative_atomic64(add_return_unchecked,
10800+ ASM_OUTPUT2("+A" (i), "+c" (v)),
10801+ ASM_NO_INPUT_CLOBBER("memory"));
10802+ return i;
10803+}
10804+
10805 /*
10806 * Other variants with different arithmetic operators:
10807 */
10808@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
10809 return a;
10810 }
10811
10812+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10813+{
10814+ long long a;
10815+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
10816+ "S" (v) : "memory", "ecx");
10817+ return a;
10818+}
10819+
10820 static inline long long atomic64_dec_return(atomic64_t *v)
10821 {
10822 long long a;
10823@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
10824 }
10825
10826 /**
10827+ * atomic64_add_unchecked - add integer to atomic64 variable
10828+ * @i: integer value to add
10829+ * @v: pointer to type atomic64_unchecked_t
10830+ *
10831+ * Atomically adds @i to @v.
10832+ */
10833+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
10834+{
10835+ __alternative_atomic64(add_unchecked, add_return_unchecked,
10836+ ASM_OUTPUT2("+A" (i), "+c" (v)),
10837+ ASM_NO_INPUT_CLOBBER("memory"));
10838+ return i;
10839+}
10840+
10841+/**
10842 * atomic64_sub - subtract the atomic64 variable
10843 * @i: integer value to subtract
10844 * @v: pointer to type atomic64_t
10845diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
10846index 0e1cbfc..5623683 100644
10847--- a/arch/x86/include/asm/atomic64_64.h
10848+++ b/arch/x86/include/asm/atomic64_64.h
10849@@ -18,7 +18,19 @@
10850 */
10851 static inline long atomic64_read(const atomic64_t *v)
10852 {
10853- return (*(volatile long *)&(v)->counter);
10854+ return (*(volatile const long *)&(v)->counter);
10855+}
10856+
10857+/**
10858+ * atomic64_read_unchecked - read atomic64 variable
10859+ * @v: pointer of type atomic64_unchecked_t
10860+ *
10861+ * Atomically reads the value of @v.
10862+ * Doesn't imply a read memory barrier.
10863+ */
10864+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10865+{
10866+ return (*(volatile const long *)&(v)->counter);
10867 }
10868
10869 /**
10870@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
10871 }
10872
10873 /**
10874+ * atomic64_set_unchecked - set atomic64 variable
10875+ * @v: pointer to type atomic64_unchecked_t
10876+ * @i: required value
10877+ *
10878+ * Atomically sets the value of @v to @i.
10879+ */
10880+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10881+{
10882+ v->counter = i;
10883+}
10884+
10885+/**
10886 * atomic64_add - add integer to atomic64 variable
10887 * @i: integer value to add
10888 * @v: pointer to type atomic64_t
10889@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
10890 */
10891 static inline void atomic64_add(long i, atomic64_t *v)
10892 {
10893+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
10894+
10895+#ifdef CONFIG_PAX_REFCOUNT
10896+ "jno 0f\n"
10897+ LOCK_PREFIX "subq %1,%0\n"
10898+ "int $4\n0:\n"
10899+ _ASM_EXTABLE(0b, 0b)
10900+#endif
10901+
10902+ : "=m" (v->counter)
10903+ : "er" (i), "m" (v->counter));
10904+}
10905+
10906+/**
10907+ * atomic64_add_unchecked - add integer to atomic64 variable
10908+ * @i: integer value to add
10909+ * @v: pointer to type atomic64_unchecked_t
10910+ *
10911+ * Atomically adds @i to @v.
10912+ */
10913+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
10914+{
10915 asm volatile(LOCK_PREFIX "addq %1,%0"
10916 : "=m" (v->counter)
10917 : "er" (i), "m" (v->counter));
10918@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
10919 */
10920 static inline void atomic64_sub(long i, atomic64_t *v)
10921 {
10922- asm volatile(LOCK_PREFIX "subq %1,%0"
10923+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
10924+
10925+#ifdef CONFIG_PAX_REFCOUNT
10926+ "jno 0f\n"
10927+ LOCK_PREFIX "addq %1,%0\n"
10928+ "int $4\n0:\n"
10929+ _ASM_EXTABLE(0b, 0b)
10930+#endif
10931+
10932+ : "=m" (v->counter)
10933+ : "er" (i), "m" (v->counter));
10934+}
10935+
10936+/**
10937+ * atomic64_sub_unchecked - subtract the atomic64 variable
10938+ * @i: integer value to subtract
10939+ * @v: pointer to type atomic64_unchecked_t
10940+ *
10941+ * Atomically subtracts @i from @v.
10942+ */
10943+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
10944+{
10945+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
10946 : "=m" (v->counter)
10947 : "er" (i), "m" (v->counter));
10948 }
10949@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10950 {
10951 unsigned char c;
10952
10953- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
10954+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
10955+
10956+#ifdef CONFIG_PAX_REFCOUNT
10957+ "jno 0f\n"
10958+ LOCK_PREFIX "addq %2,%0\n"
10959+ "int $4\n0:\n"
10960+ _ASM_EXTABLE(0b, 0b)
10961+#endif
10962+
10963+ "sete %1\n"
10964 : "=m" (v->counter), "=qm" (c)
10965 : "er" (i), "m" (v->counter) : "memory");
10966 return c;
10967@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10968 */
10969 static inline void atomic64_inc(atomic64_t *v)
10970 {
10971+ asm volatile(LOCK_PREFIX "incq %0\n"
10972+
10973+#ifdef CONFIG_PAX_REFCOUNT
10974+ "jno 0f\n"
10975+ LOCK_PREFIX "decq %0\n"
10976+ "int $4\n0:\n"
10977+ _ASM_EXTABLE(0b, 0b)
10978+#endif
10979+
10980+ : "=m" (v->counter)
10981+ : "m" (v->counter));
10982+}
10983+
10984+/**
10985+ * atomic64_inc_unchecked - increment atomic64 variable
10986+ * @v: pointer to type atomic64_unchecked_t
10987+ *
10988+ * Atomically increments @v by 1.
10989+ */
10990+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10991+{
10992 asm volatile(LOCK_PREFIX "incq %0"
10993 : "=m" (v->counter)
10994 : "m" (v->counter));
10995@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
10996 */
10997 static inline void atomic64_dec(atomic64_t *v)
10998 {
10999- asm volatile(LOCK_PREFIX "decq %0"
11000+ asm volatile(LOCK_PREFIX "decq %0\n"
11001+
11002+#ifdef CONFIG_PAX_REFCOUNT
11003+ "jno 0f\n"
11004+ LOCK_PREFIX "incq %0\n"
11005+ "int $4\n0:\n"
11006+ _ASM_EXTABLE(0b, 0b)
11007+#endif
11008+
11009+ : "=m" (v->counter)
11010+ : "m" (v->counter));
11011+}
11012+
11013+/**
11014+ * atomic64_dec_unchecked - decrement atomic64 variable
11015+ * @v: pointer to type atomic64_t
11016+ *
11017+ * Atomically decrements @v by 1.
11018+ */
11019+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
11020+{
11021+ asm volatile(LOCK_PREFIX "decq %0\n"
11022 : "=m" (v->counter)
11023 : "m" (v->counter));
11024 }
11025@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
11026 {
11027 unsigned char c;
11028
11029- asm volatile(LOCK_PREFIX "decq %0; sete %1"
11030+ asm volatile(LOCK_PREFIX "decq %0\n"
11031+
11032+#ifdef CONFIG_PAX_REFCOUNT
11033+ "jno 0f\n"
11034+ LOCK_PREFIX "incq %0\n"
11035+ "int $4\n0:\n"
11036+ _ASM_EXTABLE(0b, 0b)
11037+#endif
11038+
11039+ "sete %1\n"
11040 : "=m" (v->counter), "=qm" (c)
11041 : "m" (v->counter) : "memory");
11042 return c != 0;
11043@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
11044 {
11045 unsigned char c;
11046
11047- asm volatile(LOCK_PREFIX "incq %0; sete %1"
11048+ asm volatile(LOCK_PREFIX "incq %0\n"
11049+
11050+#ifdef CONFIG_PAX_REFCOUNT
11051+ "jno 0f\n"
11052+ LOCK_PREFIX "decq %0\n"
11053+ "int $4\n0:\n"
11054+ _ASM_EXTABLE(0b, 0b)
11055+#endif
11056+
11057+ "sete %1\n"
11058 : "=m" (v->counter), "=qm" (c)
11059 : "m" (v->counter) : "memory");
11060 return c != 0;
11061@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
11062 {
11063 unsigned char c;
11064
11065- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
11066+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
11067+
11068+#ifdef CONFIG_PAX_REFCOUNT
11069+ "jno 0f\n"
11070+ LOCK_PREFIX "subq %2,%0\n"
11071+ "int $4\n0:\n"
11072+ _ASM_EXTABLE(0b, 0b)
11073+#endif
11074+
11075+ "sets %1\n"
11076 : "=m" (v->counter), "=qm" (c)
11077 : "er" (i), "m" (v->counter) : "memory");
11078 return c;
11079@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
11080 */
11081 static inline long atomic64_add_return(long i, atomic64_t *v)
11082 {
11083+ return i + xadd_check_overflow(&v->counter, i);
11084+}
11085+
11086+/**
11087+ * atomic64_add_return_unchecked - add and return
11088+ * @i: integer value to add
11089+ * @v: pointer to type atomic64_unchecked_t
11090+ *
11091+ * Atomically adds @i to @v and returns @i + @v
11092+ */
11093+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
11094+{
11095 return i + xadd(&v->counter, i);
11096 }
11097
11098@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
11099 }
11100
11101 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
11102+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
11103+{
11104+ return atomic64_add_return_unchecked(1, v);
11105+}
11106 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
11107
11108 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
11109@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
11110 return cmpxchg(&v->counter, old, new);
11111 }
11112
11113+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
11114+{
11115+ return cmpxchg(&v->counter, old, new);
11116+}
11117+
11118 static inline long atomic64_xchg(atomic64_t *v, long new)
11119 {
11120 return xchg(&v->counter, new);
11121@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
11122 */
11123 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
11124 {
11125- long c, old;
11126+ long c, old, new;
11127 c = atomic64_read(v);
11128 for (;;) {
11129- if (unlikely(c == (u)))
11130+ if (unlikely(c == u))
11131 break;
11132- old = atomic64_cmpxchg((v), c, c + (a));
11133+
11134+ asm volatile("add %2,%0\n"
11135+
11136+#ifdef CONFIG_PAX_REFCOUNT
11137+ "jno 0f\n"
11138+ "sub %2,%0\n"
11139+ "int $4\n0:\n"
11140+ _ASM_EXTABLE(0b, 0b)
11141+#endif
11142+
11143+ : "=r" (new)
11144+ : "0" (c), "ir" (a));
11145+
11146+ old = atomic64_cmpxchg(v, c, new);
11147 if (likely(old == c))
11148 break;
11149 c = old;
11150 }
11151- return c != (u);
11152+ return c != u;
11153 }
11154
11155 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11156diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
11157index 6dfd019..0c6699f 100644
11158--- a/arch/x86/include/asm/bitops.h
11159+++ b/arch/x86/include/asm/bitops.h
11160@@ -40,7 +40,7 @@
11161 * a mask operation on a byte.
11162 */
11163 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
11164-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
11165+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
11166 #define CONST_MASK(nr) (1 << ((nr) & 7))
11167
11168 /**
11169diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
11170index b13fe63..0dab13a 100644
11171--- a/arch/x86/include/asm/boot.h
11172+++ b/arch/x86/include/asm/boot.h
11173@@ -11,10 +11,15 @@
11174 #include <asm/pgtable_types.h>
11175
11176 /* Physical address where kernel should be loaded. */
11177-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
11178+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
11179 + (CONFIG_PHYSICAL_ALIGN - 1)) \
11180 & ~(CONFIG_PHYSICAL_ALIGN - 1))
11181
11182+#ifndef __ASSEMBLY__
11183+extern unsigned char __LOAD_PHYSICAL_ADDR[];
11184+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
11185+#endif
11186+
11187 /* Minimum kernel alignment, as a power of two */
11188 #ifdef CONFIG_X86_64
11189 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
11190diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
11191index 48f99f1..d78ebf9 100644
11192--- a/arch/x86/include/asm/cache.h
11193+++ b/arch/x86/include/asm/cache.h
11194@@ -5,12 +5,13 @@
11195
11196 /* L1 cache line size */
11197 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11198-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11199+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11200
11201 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
11202+#define __read_only __attribute__((__section__(".data..read_only")))
11203
11204 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
11205-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
11206+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
11207
11208 #ifdef CONFIG_X86_VSMP
11209 #ifdef CONFIG_SMP
11210diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
11211index 9863ee3..4a1f8e1 100644
11212--- a/arch/x86/include/asm/cacheflush.h
11213+++ b/arch/x86/include/asm/cacheflush.h
11214@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
11215 unsigned long pg_flags = pg->flags & _PGMT_MASK;
11216
11217 if (pg_flags == _PGMT_DEFAULT)
11218- return -1;
11219+ return ~0UL;
11220 else if (pg_flags == _PGMT_WC)
11221 return _PAGE_CACHE_WC;
11222 else if (pg_flags == _PGMT_UC_MINUS)
11223diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
11224index 46fc474..b02b0f9 100644
11225--- a/arch/x86/include/asm/checksum_32.h
11226+++ b/arch/x86/include/asm/checksum_32.h
11227@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
11228 int len, __wsum sum,
11229 int *src_err_ptr, int *dst_err_ptr);
11230
11231+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
11232+ int len, __wsum sum,
11233+ int *src_err_ptr, int *dst_err_ptr);
11234+
11235+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
11236+ int len, __wsum sum,
11237+ int *src_err_ptr, int *dst_err_ptr);
11238+
11239 /*
11240 * Note: when you get a NULL pointer exception here this means someone
11241 * passed in an incorrect kernel address to one of these functions.
11242@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
11243 int *err_ptr)
11244 {
11245 might_sleep();
11246- return csum_partial_copy_generic((__force void *)src, dst,
11247+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
11248 len, sum, err_ptr, NULL);
11249 }
11250
11251@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
11252 {
11253 might_sleep();
11254 if (access_ok(VERIFY_WRITE, dst, len))
11255- return csum_partial_copy_generic(src, (__force void *)dst,
11256+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
11257 len, sum, NULL, err_ptr);
11258
11259 if (len)
11260diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
11261index 8d871ea..c1a0dc9 100644
11262--- a/arch/x86/include/asm/cmpxchg.h
11263+++ b/arch/x86/include/asm/cmpxchg.h
11264@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
11265 __compiletime_error("Bad argument size for cmpxchg");
11266 extern void __xadd_wrong_size(void)
11267 __compiletime_error("Bad argument size for xadd");
11268+extern void __xadd_check_overflow_wrong_size(void)
11269+ __compiletime_error("Bad argument size for xadd_check_overflow");
11270 extern void __add_wrong_size(void)
11271 __compiletime_error("Bad argument size for add");
11272+extern void __add_check_overflow_wrong_size(void)
11273+ __compiletime_error("Bad argument size for add_check_overflow");
11274
11275 /*
11276 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
11277@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
11278 __ret; \
11279 })
11280
11281+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
11282+ ({ \
11283+ __typeof__ (*(ptr)) __ret = (arg); \
11284+ switch (sizeof(*(ptr))) { \
11285+ case __X86_CASE_L: \
11286+ asm volatile (lock #op "l %0, %1\n" \
11287+ "jno 0f\n" \
11288+ "mov %0,%1\n" \
11289+ "int $4\n0:\n" \
11290+ _ASM_EXTABLE(0b, 0b) \
11291+ : "+r" (__ret), "+m" (*(ptr)) \
11292+ : : "memory", "cc"); \
11293+ break; \
11294+ case __X86_CASE_Q: \
11295+ asm volatile (lock #op "q %q0, %1\n" \
11296+ "jno 0f\n" \
11297+ "mov %0,%1\n" \
11298+ "int $4\n0:\n" \
11299+ _ASM_EXTABLE(0b, 0b) \
11300+ : "+r" (__ret), "+m" (*(ptr)) \
11301+ : : "memory", "cc"); \
11302+ break; \
11303+ default: \
11304+ __ ## op ## _check_overflow_wrong_size(); \
11305+ } \
11306+ __ret; \
11307+ })
11308+
11309 /*
11310 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
11311 * Since this is generally used to protect other memory information, we
11312@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
11313 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
11314 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
11315
11316+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
11317+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
11318+
11319 #define __add(ptr, inc, lock) \
11320 ({ \
11321 __typeof__ (*(ptr)) __ret = (inc); \
11322diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
11323index 8c297aa..7a90f03 100644
11324--- a/arch/x86/include/asm/cpufeature.h
11325+++ b/arch/x86/include/asm/cpufeature.h
11326@@ -205,7 +205,7 @@
11327 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
11328 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
11329 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
11330-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
11331+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
11332 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
11333 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
11334 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
11335@@ -379,7 +379,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
11336 ".section .discard,\"aw\",@progbits\n"
11337 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
11338 ".previous\n"
11339- ".section .altinstr_replacement,\"ax\"\n"
11340+ ".section .altinstr_replacement,\"a\"\n"
11341 "3: movb $1,%0\n"
11342 "4:\n"
11343 ".previous\n"
11344diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
11345index 8bf1c06..f723dfd 100644
11346--- a/arch/x86/include/asm/desc.h
11347+++ b/arch/x86/include/asm/desc.h
11348@@ -4,6 +4,7 @@
11349 #include <asm/desc_defs.h>
11350 #include <asm/ldt.h>
11351 #include <asm/mmu.h>
11352+#include <asm/pgtable.h>
11353
11354 #include <linux/smp.h>
11355 #include <linux/percpu.h>
11356@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
11357
11358 desc->type = (info->read_exec_only ^ 1) << 1;
11359 desc->type |= info->contents << 2;
11360+ desc->type |= info->seg_not_present ^ 1;
11361
11362 desc->s = 1;
11363 desc->dpl = 0x3;
11364@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
11365 }
11366
11367 extern struct desc_ptr idt_descr;
11368-extern gate_desc idt_table[];
11369 extern struct desc_ptr nmi_idt_descr;
11370-extern gate_desc nmi_idt_table[];
11371-
11372-struct gdt_page {
11373- struct desc_struct gdt[GDT_ENTRIES];
11374-} __attribute__((aligned(PAGE_SIZE)));
11375-
11376-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
11377+extern gate_desc idt_table[256];
11378+extern gate_desc nmi_idt_table[256];
11379
11380+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
11381 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
11382 {
11383- return per_cpu(gdt_page, cpu).gdt;
11384+ return cpu_gdt_table[cpu];
11385 }
11386
11387 #ifdef CONFIG_X86_64
11388@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
11389 unsigned long base, unsigned dpl, unsigned flags,
11390 unsigned short seg)
11391 {
11392- gate->a = (seg << 16) | (base & 0xffff);
11393- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
11394+ gate->gate.offset_low = base;
11395+ gate->gate.seg = seg;
11396+ gate->gate.reserved = 0;
11397+ gate->gate.type = type;
11398+ gate->gate.s = 0;
11399+ gate->gate.dpl = dpl;
11400+ gate->gate.p = 1;
11401+ gate->gate.offset_high = base >> 16;
11402 }
11403
11404 #endif
11405@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
11406
11407 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
11408 {
11409+ pax_open_kernel();
11410 memcpy(&idt[entry], gate, sizeof(*gate));
11411+ pax_close_kernel();
11412 }
11413
11414 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
11415 {
11416+ pax_open_kernel();
11417 memcpy(&ldt[entry], desc, 8);
11418+ pax_close_kernel();
11419 }
11420
11421 static inline void
11422@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
11423 default: size = sizeof(*gdt); break;
11424 }
11425
11426+ pax_open_kernel();
11427 memcpy(&gdt[entry], desc, size);
11428+ pax_close_kernel();
11429 }
11430
11431 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
11432@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
11433
11434 static inline void native_load_tr_desc(void)
11435 {
11436+ pax_open_kernel();
11437 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
11438+ pax_close_kernel();
11439 }
11440
11441 static inline void native_load_gdt(const struct desc_ptr *dtr)
11442@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
11443 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
11444 unsigned int i;
11445
11446+ pax_open_kernel();
11447 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
11448 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
11449+ pax_close_kernel();
11450 }
11451
11452 #define _LDT_empty(info) \
11453@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
11454 }
11455
11456 #ifdef CONFIG_X86_64
11457-static inline void set_nmi_gate(int gate, void *addr)
11458+static inline void set_nmi_gate(int gate, const void *addr)
11459 {
11460 gate_desc s;
11461
11462@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
11463 }
11464 #endif
11465
11466-static inline void _set_gate(int gate, unsigned type, void *addr,
11467+static inline void _set_gate(int gate, unsigned type, const void *addr,
11468 unsigned dpl, unsigned ist, unsigned seg)
11469 {
11470 gate_desc s;
11471@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
11472 * Pentium F0 0F bugfix can have resulted in the mapped
11473 * IDT being write-protected.
11474 */
11475-static inline void set_intr_gate(unsigned int n, void *addr)
11476+static inline void set_intr_gate(unsigned int n, const void *addr)
11477 {
11478 BUG_ON((unsigned)n > 0xFF);
11479 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
11480@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
11481 /*
11482 * This routine sets up an interrupt gate at directory privilege level 3.
11483 */
11484-static inline void set_system_intr_gate(unsigned int n, void *addr)
11485+static inline void set_system_intr_gate(unsigned int n, const void *addr)
11486 {
11487 BUG_ON((unsigned)n > 0xFF);
11488 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
11489 }
11490
11491-static inline void set_system_trap_gate(unsigned int n, void *addr)
11492+static inline void set_system_trap_gate(unsigned int n, const void *addr)
11493 {
11494 BUG_ON((unsigned)n > 0xFF);
11495 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
11496 }
11497
11498-static inline void set_trap_gate(unsigned int n, void *addr)
11499+static inline void set_trap_gate(unsigned int n, const void *addr)
11500 {
11501 BUG_ON((unsigned)n > 0xFF);
11502 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
11503@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
11504 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
11505 {
11506 BUG_ON((unsigned)n > 0xFF);
11507- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
11508+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
11509 }
11510
11511-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
11512+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
11513 {
11514 BUG_ON((unsigned)n > 0xFF);
11515 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
11516 }
11517
11518-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
11519+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
11520 {
11521 BUG_ON((unsigned)n > 0xFF);
11522 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
11523 }
11524
11525+#ifdef CONFIG_X86_32
11526+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
11527+{
11528+ struct desc_struct d;
11529+
11530+ if (likely(limit))
11531+ limit = (limit - 1UL) >> PAGE_SHIFT;
11532+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
11533+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
11534+}
11535+#endif
11536+
11537 #endif /* _ASM_X86_DESC_H */
11538diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
11539index 278441f..b95a174 100644
11540--- a/arch/x86/include/asm/desc_defs.h
11541+++ b/arch/x86/include/asm/desc_defs.h
11542@@ -31,6 +31,12 @@ struct desc_struct {
11543 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
11544 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
11545 };
11546+ struct {
11547+ u16 offset_low;
11548+ u16 seg;
11549+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
11550+ unsigned offset_high: 16;
11551+ } gate;
11552 };
11553 } __attribute__((packed));
11554
11555diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
11556index 3778256..c5d4fce 100644
11557--- a/arch/x86/include/asm/e820.h
11558+++ b/arch/x86/include/asm/e820.h
11559@@ -69,7 +69,7 @@ struct e820map {
11560 #define ISA_START_ADDRESS 0xa0000
11561 #define ISA_END_ADDRESS 0x100000
11562
11563-#define BIOS_BEGIN 0x000a0000
11564+#define BIOS_BEGIN 0x000c0000
11565 #define BIOS_END 0x00100000
11566
11567 #define BIOS_ROM_BASE 0xffe00000
11568diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
11569index 5939f44..f8845f6 100644
11570--- a/arch/x86/include/asm/elf.h
11571+++ b/arch/x86/include/asm/elf.h
11572@@ -243,7 +243,25 @@ extern int force_personality32;
11573 the loader. We need to make sure that it is out of the way of the program
11574 that it will "exec", and that there is sufficient room for the brk. */
11575
11576+#ifdef CONFIG_PAX_SEGMEXEC
11577+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
11578+#else
11579 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
11580+#endif
11581+
11582+#ifdef CONFIG_PAX_ASLR
11583+#ifdef CONFIG_X86_32
11584+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
11585+
11586+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11587+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11588+#else
11589+#define PAX_ELF_ET_DYN_BASE 0x400000UL
11590+
11591+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11592+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11593+#endif
11594+#endif
11595
11596 /* This yields a mask that user programs can use to figure out what
11597 instruction set this CPU supports. This could be done in user space,
11598@@ -296,16 +314,12 @@ do { \
11599
11600 #define ARCH_DLINFO \
11601 do { \
11602- if (vdso_enabled) \
11603- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11604- (unsigned long)current->mm->context.vdso); \
11605+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11606 } while (0)
11607
11608 #define ARCH_DLINFO_X32 \
11609 do { \
11610- if (vdso_enabled) \
11611- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11612- (unsigned long)current->mm->context.vdso); \
11613+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11614 } while (0)
11615
11616 #define AT_SYSINFO 32
11617@@ -320,7 +334,7 @@ else \
11618
11619 #endif /* !CONFIG_X86_32 */
11620
11621-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
11622+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
11623
11624 #define VDSO_ENTRY \
11625 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
11626@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
11627 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
11628 #define compat_arch_setup_additional_pages syscall32_setup_pages
11629
11630-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
11631-#define arch_randomize_brk arch_randomize_brk
11632-
11633 /*
11634 * True on X86_32 or when emulating IA32 on X86_64
11635 */
11636diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
11637index 75ce3f4..882e801 100644
11638--- a/arch/x86/include/asm/emergency-restart.h
11639+++ b/arch/x86/include/asm/emergency-restart.h
11640@@ -13,6 +13,6 @@ enum reboot_type {
11641
11642 extern enum reboot_type reboot_type;
11643
11644-extern void machine_emergency_restart(void);
11645+extern void machine_emergency_restart(void) __noreturn;
11646
11647 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
11648diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
11649index 41ab26e..a88c9e6 100644
11650--- a/arch/x86/include/asm/fpu-internal.h
11651+++ b/arch/x86/include/asm/fpu-internal.h
11652@@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
11653 ({ \
11654 int err; \
11655 asm volatile(ASM_STAC "\n" \
11656- "1:" #insn "\n\t" \
11657+ "1:" \
11658+ __copyuser_seg \
11659+ #insn "\n\t" \
11660 "2: " ASM_CLAC "\n" \
11661 ".section .fixup,\"ax\"\n" \
11662 "3: movl $-1,%[err]\n" \
11663@@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
11664 "emms\n\t" /* clear stack tags */
11665 "fildl %P[addr]", /* set F?P to defined value */
11666 X86_FEATURE_FXSAVE_LEAK,
11667- [addr] "m" (tsk->thread.fpu.has_fpu));
11668+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
11669
11670 return fpu_restore_checking(&tsk->thread.fpu);
11671 }
11672diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
11673index f373046..02653e2 100644
11674--- a/arch/x86/include/asm/futex.h
11675+++ b/arch/x86/include/asm/futex.h
11676@@ -12,6 +12,7 @@
11677 #include <asm/smap.h>
11678
11679 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
11680+ typecheck(u32 __user *, uaddr); \
11681 asm volatile("\t" ASM_STAC "\n" \
11682 "1:\t" insn "\n" \
11683 "2:\t" ASM_CLAC "\n" \
11684@@ -20,15 +21,16 @@
11685 "\tjmp\t2b\n" \
11686 "\t.previous\n" \
11687 _ASM_EXTABLE(1b, 3b) \
11688- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
11689+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
11690 : "i" (-EFAULT), "0" (oparg), "1" (0))
11691
11692 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
11693+ typecheck(u32 __user *, uaddr); \
11694 asm volatile("\t" ASM_STAC "\n" \
11695 "1:\tmovl %2, %0\n" \
11696 "\tmovl\t%0, %3\n" \
11697 "\t" insn "\n" \
11698- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
11699+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
11700 "\tjnz\t1b\n" \
11701 "3:\t" ASM_CLAC "\n" \
11702 "\t.section .fixup,\"ax\"\n" \
11703@@ -38,7 +40,7 @@
11704 _ASM_EXTABLE(1b, 4b) \
11705 _ASM_EXTABLE(2b, 4b) \
11706 : "=&a" (oldval), "=&r" (ret), \
11707- "+m" (*uaddr), "=&r" (tem) \
11708+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
11709 : "r" (oparg), "i" (-EFAULT), "1" (0))
11710
11711 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11712@@ -65,10 +67,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11713
11714 switch (op) {
11715 case FUTEX_OP_SET:
11716- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
11717+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
11718 break;
11719 case FUTEX_OP_ADD:
11720- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
11721+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
11722 uaddr, oparg);
11723 break;
11724 case FUTEX_OP_OR:
11725@@ -128,14 +130,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
11726 return -EFAULT;
11727
11728 asm volatile("\t" ASM_STAC "\n"
11729- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
11730+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
11731 "2:\t" ASM_CLAC "\n"
11732 "\t.section .fixup, \"ax\"\n"
11733 "3:\tmov %3, %0\n"
11734 "\tjmp 2b\n"
11735 "\t.previous\n"
11736 _ASM_EXTABLE(1b, 3b)
11737- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
11738+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
11739 : "i" (-EFAULT), "r" (newval), "1" (oldval)
11740 : "memory"
11741 );
11742diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
11743index eb92a6e..b98b2f4 100644
11744--- a/arch/x86/include/asm/hw_irq.h
11745+++ b/arch/x86/include/asm/hw_irq.h
11746@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
11747 extern void enable_IO_APIC(void);
11748
11749 /* Statistics */
11750-extern atomic_t irq_err_count;
11751-extern atomic_t irq_mis_count;
11752+extern atomic_unchecked_t irq_err_count;
11753+extern atomic_unchecked_t irq_mis_count;
11754
11755 /* EISA */
11756 extern void eisa_set_level_irq(unsigned int irq);
11757diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
11758index d8e8eef..15b1179 100644
11759--- a/arch/x86/include/asm/io.h
11760+++ b/arch/x86/include/asm/io.h
11761@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
11762 return ioremap_nocache(offset, size);
11763 }
11764
11765-extern void iounmap(volatile void __iomem *addr);
11766+extern void iounmap(const volatile void __iomem *addr);
11767
11768 extern void set_iounmap_nonlazy(void);
11769
11770@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
11771
11772 #include <linux/vmalloc.h>
11773
11774+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11775+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11776+{
11777+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11778+}
11779+
11780+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11781+{
11782+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11783+}
11784+
11785 /*
11786 * Convert a virtual cached pointer to an uncached pointer
11787 */
11788diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
11789index bba3cf8..06bc8da 100644
11790--- a/arch/x86/include/asm/irqflags.h
11791+++ b/arch/x86/include/asm/irqflags.h
11792@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
11793 sti; \
11794 sysexit
11795
11796+#define GET_CR0_INTO_RDI mov %cr0, %rdi
11797+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
11798+#define GET_CR3_INTO_RDI mov %cr3, %rdi
11799+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
11800+
11801 #else
11802 #define INTERRUPT_RETURN iret
11803 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
11804diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
11805index d3ddd17..c9fb0cc 100644
11806--- a/arch/x86/include/asm/kprobes.h
11807+++ b/arch/x86/include/asm/kprobes.h
11808@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
11809 #define RELATIVEJUMP_SIZE 5
11810 #define RELATIVECALL_OPCODE 0xe8
11811 #define RELATIVE_ADDR_SIZE 4
11812-#define MAX_STACK_SIZE 64
11813-#define MIN_STACK_SIZE(ADDR) \
11814- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
11815- THREAD_SIZE - (unsigned long)(ADDR))) \
11816- ? (MAX_STACK_SIZE) \
11817- : (((unsigned long)current_thread_info()) + \
11818- THREAD_SIZE - (unsigned long)(ADDR)))
11819+#define MAX_STACK_SIZE 64UL
11820+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
11821
11822 #define flush_insn_slot(p) do { } while (0)
11823
11824diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
11825index b2e11f4..f293e2e 100644
11826--- a/arch/x86/include/asm/kvm_host.h
11827+++ b/arch/x86/include/asm/kvm_host.h
11828@@ -707,7 +707,7 @@ struct kvm_x86_ops {
11829 int (*check_intercept)(struct kvm_vcpu *vcpu,
11830 struct x86_instruction_info *info,
11831 enum x86_intercept_stage stage);
11832-};
11833+} __do_const;
11834
11835 struct kvm_arch_async_pf {
11836 u32 token;
11837diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
11838index c8bed0d..85c03fd 100644
11839--- a/arch/x86/include/asm/local.h
11840+++ b/arch/x86/include/asm/local.h
11841@@ -10,33 +10,97 @@ typedef struct {
11842 atomic_long_t a;
11843 } local_t;
11844
11845+typedef struct {
11846+ atomic_long_unchecked_t a;
11847+} local_unchecked_t;
11848+
11849 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
11850
11851 #define local_read(l) atomic_long_read(&(l)->a)
11852+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
11853 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
11854+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
11855
11856 static inline void local_inc(local_t *l)
11857 {
11858- asm volatile(_ASM_INC "%0"
11859+ asm volatile(_ASM_INC "%0\n"
11860+
11861+#ifdef CONFIG_PAX_REFCOUNT
11862+ "jno 0f\n"
11863+ _ASM_DEC "%0\n"
11864+ "int $4\n0:\n"
11865+ _ASM_EXTABLE(0b, 0b)
11866+#endif
11867+
11868+ : "+m" (l->a.counter));
11869+}
11870+
11871+static inline void local_inc_unchecked(local_unchecked_t *l)
11872+{
11873+ asm volatile(_ASM_INC "%0\n"
11874 : "+m" (l->a.counter));
11875 }
11876
11877 static inline void local_dec(local_t *l)
11878 {
11879- asm volatile(_ASM_DEC "%0"
11880+ asm volatile(_ASM_DEC "%0\n"
11881+
11882+#ifdef CONFIG_PAX_REFCOUNT
11883+ "jno 0f\n"
11884+ _ASM_INC "%0\n"
11885+ "int $4\n0:\n"
11886+ _ASM_EXTABLE(0b, 0b)
11887+#endif
11888+
11889+ : "+m" (l->a.counter));
11890+}
11891+
11892+static inline void local_dec_unchecked(local_unchecked_t *l)
11893+{
11894+ asm volatile(_ASM_DEC "%0\n"
11895 : "+m" (l->a.counter));
11896 }
11897
11898 static inline void local_add(long i, local_t *l)
11899 {
11900- asm volatile(_ASM_ADD "%1,%0"
11901+ asm volatile(_ASM_ADD "%1,%0\n"
11902+
11903+#ifdef CONFIG_PAX_REFCOUNT
11904+ "jno 0f\n"
11905+ _ASM_SUB "%1,%0\n"
11906+ "int $4\n0:\n"
11907+ _ASM_EXTABLE(0b, 0b)
11908+#endif
11909+
11910+ : "+m" (l->a.counter)
11911+ : "ir" (i));
11912+}
11913+
11914+static inline void local_add_unchecked(long i, local_unchecked_t *l)
11915+{
11916+ asm volatile(_ASM_ADD "%1,%0\n"
11917 : "+m" (l->a.counter)
11918 : "ir" (i));
11919 }
11920
11921 static inline void local_sub(long i, local_t *l)
11922 {
11923- asm volatile(_ASM_SUB "%1,%0"
11924+ asm volatile(_ASM_SUB "%1,%0\n"
11925+
11926+#ifdef CONFIG_PAX_REFCOUNT
11927+ "jno 0f\n"
11928+ _ASM_ADD "%1,%0\n"
11929+ "int $4\n0:\n"
11930+ _ASM_EXTABLE(0b, 0b)
11931+#endif
11932+
11933+ : "+m" (l->a.counter)
11934+ : "ir" (i));
11935+}
11936+
11937+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
11938+{
11939+ asm volatile(_ASM_SUB "%1,%0\n"
11940 : "+m" (l->a.counter)
11941 : "ir" (i));
11942 }
11943@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
11944 {
11945 unsigned char c;
11946
11947- asm volatile(_ASM_SUB "%2,%0; sete %1"
11948+ asm volatile(_ASM_SUB "%2,%0\n"
11949+
11950+#ifdef CONFIG_PAX_REFCOUNT
11951+ "jno 0f\n"
11952+ _ASM_ADD "%2,%0\n"
11953+ "int $4\n0:\n"
11954+ _ASM_EXTABLE(0b, 0b)
11955+#endif
11956+
11957+ "sete %1\n"
11958 : "+m" (l->a.counter), "=qm" (c)
11959 : "ir" (i) : "memory");
11960 return c;
11961@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
11962 {
11963 unsigned char c;
11964
11965- asm volatile(_ASM_DEC "%0; sete %1"
11966+ asm volatile(_ASM_DEC "%0\n"
11967+
11968+#ifdef CONFIG_PAX_REFCOUNT
11969+ "jno 0f\n"
11970+ _ASM_INC "%0\n"
11971+ "int $4\n0:\n"
11972+ _ASM_EXTABLE(0b, 0b)
11973+#endif
11974+
11975+ "sete %1\n"
11976 : "+m" (l->a.counter), "=qm" (c)
11977 : : "memory");
11978 return c != 0;
11979@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
11980 {
11981 unsigned char c;
11982
11983- asm volatile(_ASM_INC "%0; sete %1"
11984+ asm volatile(_ASM_INC "%0\n"
11985+
11986+#ifdef CONFIG_PAX_REFCOUNT
11987+ "jno 0f\n"
11988+ _ASM_DEC "%0\n"
11989+ "int $4\n0:\n"
11990+ _ASM_EXTABLE(0b, 0b)
11991+#endif
11992+
11993+ "sete %1\n"
11994 : "+m" (l->a.counter), "=qm" (c)
11995 : : "memory");
11996 return c != 0;
11997@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
11998 {
11999 unsigned char c;
12000
12001- asm volatile(_ASM_ADD "%2,%0; sets %1"
12002+ asm volatile(_ASM_ADD "%2,%0\n"
12003+
12004+#ifdef CONFIG_PAX_REFCOUNT
12005+ "jno 0f\n"
12006+ _ASM_SUB "%2,%0\n"
12007+ "int $4\n0:\n"
12008+ _ASM_EXTABLE(0b, 0b)
12009+#endif
12010+
12011+ "sets %1\n"
12012 : "+m" (l->a.counter), "=qm" (c)
12013 : "ir" (i) : "memory");
12014 return c;
12015@@ -132,7 +232,15 @@ static inline long local_add_return(long i, local_t *l)
12016 #endif
12017 /* Modern 486+ processor */
12018 __i = i;
12019- asm volatile(_ASM_XADD "%0, %1;"
12020+ asm volatile(_ASM_XADD "%0, %1\n"
12021+
12022+#ifdef CONFIG_PAX_REFCOUNT
12023+ "jno 0f\n"
12024+ _ASM_MOV "%0,%1\n"
12025+ "int $4\n0:\n"
12026+ _ASM_EXTABLE(0b, 0b)
12027+#endif
12028+
12029 : "+r" (i), "+m" (l->a.counter)
12030 : : "memory");
12031 return i + __i;
12032@@ -147,6 +255,38 @@ no_xadd: /* Legacy 386 processor */
12033 #endif
12034 }
12035
12036+/**
12037+ * local_add_return_unchecked - add and return
12038+ * @i: integer value to add
12039+ * @l: pointer to type local_unchecked_t
12040+ *
12041+ * Atomically adds @i to @l and returns @i + @l
12042+ */
12043+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
12044+{
12045+ long __i;
12046+#ifdef CONFIG_M386
12047+ unsigned long flags;
12048+ if (unlikely(boot_cpu_data.x86 <= 3))
12049+ goto no_xadd;
12050+#endif
12051+ /* Modern 486+ processor */
12052+ __i = i;
12053+ asm volatile(_ASM_XADD "%0, %1\n"
12054+ : "+r" (i), "+m" (l->a.counter)
12055+ : : "memory");
12056+ return i + __i;
12057+
12058+#ifdef CONFIG_M386
12059+no_xadd: /* Legacy 386 processor */
12060+ local_irq_save(flags);
12061+ __i = local_read_unchecked(l);
12062+ local_set_unchecked(l, i + __i);
12063+ local_irq_restore(flags);
12064+ return i + __i;
12065+#endif
12066+}
12067+
12068 static inline long local_sub_return(long i, local_t *l)
12069 {
12070 return local_add_return(-i, l);
12071@@ -157,6 +297,8 @@ static inline long local_sub_return(long i, local_t *l)
12072
12073 #define local_cmpxchg(l, o, n) \
12074 (cmpxchg_local(&((l)->a.counter), (o), (n)))
12075+#define local_cmpxchg_unchecked(l, o, n) \
12076+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
12077 /* Always has a lock prefix */
12078 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
12079
12080diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
12081index 593e51d..fa69c9a 100644
12082--- a/arch/x86/include/asm/mman.h
12083+++ b/arch/x86/include/asm/mman.h
12084@@ -5,4 +5,14 @@
12085
12086 #include <asm-generic/mman.h>
12087
12088+#ifdef __KERNEL__
12089+#ifndef __ASSEMBLY__
12090+#ifdef CONFIG_X86_32
12091+#define arch_mmap_check i386_mmap_check
12092+int i386_mmap_check(unsigned long addr, unsigned long len,
12093+ unsigned long flags);
12094+#endif
12095+#endif
12096+#endif
12097+
12098 #endif /* _ASM_X86_MMAN_H */
12099diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
12100index 5f55e69..e20bfb1 100644
12101--- a/arch/x86/include/asm/mmu.h
12102+++ b/arch/x86/include/asm/mmu.h
12103@@ -9,7 +9,7 @@
12104 * we put the segment information here.
12105 */
12106 typedef struct {
12107- void *ldt;
12108+ struct desc_struct *ldt;
12109 int size;
12110
12111 #ifdef CONFIG_X86_64
12112@@ -18,7 +18,19 @@ typedef struct {
12113 #endif
12114
12115 struct mutex lock;
12116- void *vdso;
12117+ unsigned long vdso;
12118+
12119+#ifdef CONFIG_X86_32
12120+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
12121+ unsigned long user_cs_base;
12122+ unsigned long user_cs_limit;
12123+
12124+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
12125+ cpumask_t cpu_user_cs_mask;
12126+#endif
12127+
12128+#endif
12129+#endif
12130 } mm_context_t;
12131
12132 #ifdef CONFIG_SMP
12133diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
12134index cdbf367..adb37ac 100644
12135--- a/arch/x86/include/asm/mmu_context.h
12136+++ b/arch/x86/include/asm/mmu_context.h
12137@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
12138
12139 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
12140 {
12141+
12142+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12143+ unsigned int i;
12144+ pgd_t *pgd;
12145+
12146+ pax_open_kernel();
12147+ pgd = get_cpu_pgd(smp_processor_id());
12148+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
12149+ set_pgd_batched(pgd+i, native_make_pgd(0));
12150+ pax_close_kernel();
12151+#endif
12152+
12153 #ifdef CONFIG_SMP
12154 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
12155 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
12156@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
12157 struct task_struct *tsk)
12158 {
12159 unsigned cpu = smp_processor_id();
12160+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12161+ int tlbstate = TLBSTATE_OK;
12162+#endif
12163
12164 if (likely(prev != next)) {
12165 #ifdef CONFIG_SMP
12166+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12167+ tlbstate = this_cpu_read(cpu_tlbstate.state);
12168+#endif
12169 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
12170 this_cpu_write(cpu_tlbstate.active_mm, next);
12171 #endif
12172 cpumask_set_cpu(cpu, mm_cpumask(next));
12173
12174 /* Re-load page tables */
12175+#ifdef CONFIG_PAX_PER_CPU_PGD
12176+ pax_open_kernel();
12177+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
12178+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
12179+ pax_close_kernel();
12180+ load_cr3(get_cpu_pgd(cpu));
12181+#else
12182 load_cr3(next->pgd);
12183+#endif
12184
12185 /* stop flush ipis for the previous mm */
12186 cpumask_clear_cpu(cpu, mm_cpumask(prev));
12187@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
12188 */
12189 if (unlikely(prev->context.ldt != next->context.ldt))
12190 load_LDT_nolock(&next->context);
12191- }
12192+
12193+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
12194+ if (!(__supported_pte_mask & _PAGE_NX)) {
12195+ smp_mb__before_clear_bit();
12196+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
12197+ smp_mb__after_clear_bit();
12198+ cpu_set(cpu, next->context.cpu_user_cs_mask);
12199+ }
12200+#endif
12201+
12202+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12203+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
12204+ prev->context.user_cs_limit != next->context.user_cs_limit))
12205+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
12206 #ifdef CONFIG_SMP
12207+ else if (unlikely(tlbstate != TLBSTATE_OK))
12208+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
12209+#endif
12210+#endif
12211+
12212+ }
12213 else {
12214+
12215+#ifdef CONFIG_PAX_PER_CPU_PGD
12216+ pax_open_kernel();
12217+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
12218+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
12219+ pax_close_kernel();
12220+ load_cr3(get_cpu_pgd(cpu));
12221+#endif
12222+
12223+#ifdef CONFIG_SMP
12224 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
12225 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
12226
12227@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
12228 * tlb flush IPI delivery. We must reload CR3
12229 * to make sure to use no freed page tables.
12230 */
12231+
12232+#ifndef CONFIG_PAX_PER_CPU_PGD
12233 load_cr3(next->pgd);
12234+#endif
12235+
12236 load_LDT_nolock(&next->context);
12237+
12238+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
12239+ if (!(__supported_pte_mask & _PAGE_NX))
12240+ cpu_set(cpu, next->context.cpu_user_cs_mask);
12241+#endif
12242+
12243+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12244+#ifdef CONFIG_PAX_PAGEEXEC
12245+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
12246+#endif
12247+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
12248+#endif
12249+
12250 }
12251+#endif
12252 }
12253-#endif
12254 }
12255
12256 #define activate_mm(prev, next) \
12257diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
12258index 9eae775..c914fea 100644
12259--- a/arch/x86/include/asm/module.h
12260+++ b/arch/x86/include/asm/module.h
12261@@ -5,6 +5,7 @@
12262
12263 #ifdef CONFIG_X86_64
12264 /* X86_64 does not define MODULE_PROC_FAMILY */
12265+#define MODULE_PROC_FAMILY ""
12266 #elif defined CONFIG_M386
12267 #define MODULE_PROC_FAMILY "386 "
12268 #elif defined CONFIG_M486
12269@@ -59,8 +60,20 @@
12270 #error unknown processor family
12271 #endif
12272
12273-#ifdef CONFIG_X86_32
12274-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
12275+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
12276+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
12277+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
12278+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
12279+#else
12280+#define MODULE_PAX_KERNEXEC ""
12281 #endif
12282
12283+#ifdef CONFIG_PAX_MEMORY_UDEREF
12284+#define MODULE_PAX_UDEREF "UDEREF "
12285+#else
12286+#define MODULE_PAX_UDEREF ""
12287+#endif
12288+
12289+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
12290+
12291 #endif /* _ASM_X86_MODULE_H */
12292diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
12293index 320f7bb..e89f8f8 100644
12294--- a/arch/x86/include/asm/page_64_types.h
12295+++ b/arch/x86/include/asm/page_64_types.h
12296@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
12297
12298 /* duplicated to the one in bootmem.h */
12299 extern unsigned long max_pfn;
12300-extern unsigned long phys_base;
12301+extern const unsigned long phys_base;
12302
12303 extern unsigned long __phys_addr(unsigned long);
12304 #define __phys_reloc_hide(x) (x)
12305diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
12306index a0facf3..c017b15 100644
12307--- a/arch/x86/include/asm/paravirt.h
12308+++ b/arch/x86/include/asm/paravirt.h
12309@@ -632,6 +632,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
12310 val);
12311 }
12312
12313+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12314+{
12315+ pgdval_t val = native_pgd_val(pgd);
12316+
12317+ if (sizeof(pgdval_t) > sizeof(long))
12318+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
12319+ val, (u64)val >> 32);
12320+ else
12321+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
12322+ val);
12323+}
12324+
12325 static inline void pgd_clear(pgd_t *pgdp)
12326 {
12327 set_pgd(pgdp, __pgd(0));
12328@@ -713,6 +725,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
12329 pv_mmu_ops.set_fixmap(idx, phys, flags);
12330 }
12331
12332+#ifdef CONFIG_PAX_KERNEXEC
12333+static inline unsigned long pax_open_kernel(void)
12334+{
12335+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
12336+}
12337+
12338+static inline unsigned long pax_close_kernel(void)
12339+{
12340+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
12341+}
12342+#else
12343+static inline unsigned long pax_open_kernel(void) { return 0; }
12344+static inline unsigned long pax_close_kernel(void) { return 0; }
12345+#endif
12346+
12347 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
12348
12349 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
12350@@ -929,7 +956,7 @@ extern void default_banner(void);
12351
12352 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
12353 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
12354-#define PARA_INDIRECT(addr) *%cs:addr
12355+#define PARA_INDIRECT(addr) *%ss:addr
12356 #endif
12357
12358 #define INTERRUPT_RETURN \
12359@@ -1004,6 +1031,21 @@ extern void default_banner(void);
12360 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
12361 CLBR_NONE, \
12362 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
12363+
12364+#define GET_CR0_INTO_RDI \
12365+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
12366+ mov %rax,%rdi
12367+
12368+#define SET_RDI_INTO_CR0 \
12369+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12370+
12371+#define GET_CR3_INTO_RDI \
12372+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
12373+ mov %rax,%rdi
12374+
12375+#define SET_RDI_INTO_CR3 \
12376+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
12377+
12378 #endif /* CONFIG_X86_32 */
12379
12380 #endif /* __ASSEMBLY__ */
12381diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
12382index 142236e..57cf5ea 100644
12383--- a/arch/x86/include/asm/paravirt_types.h
12384+++ b/arch/x86/include/asm/paravirt_types.h
12385@@ -312,6 +312,7 @@ struct pv_mmu_ops {
12386 struct paravirt_callee_save make_pud;
12387
12388 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
12389+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
12390 #endif /* PAGETABLE_LEVELS == 4 */
12391 #endif /* PAGETABLE_LEVELS >= 3 */
12392
12393@@ -323,6 +324,12 @@ struct pv_mmu_ops {
12394 an mfn. We can tell which is which from the index. */
12395 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
12396 phys_addr_t phys, pgprot_t flags);
12397+
12398+#ifdef CONFIG_PAX_KERNEXEC
12399+ unsigned long (*pax_open_kernel)(void);
12400+ unsigned long (*pax_close_kernel)(void);
12401+#endif
12402+
12403 };
12404
12405 struct arch_spinlock;
12406diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
12407index b4389a4..7024269 100644
12408--- a/arch/x86/include/asm/pgalloc.h
12409+++ b/arch/x86/include/asm/pgalloc.h
12410@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
12411 pmd_t *pmd, pte_t *pte)
12412 {
12413 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12414+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
12415+}
12416+
12417+static inline void pmd_populate_user(struct mm_struct *mm,
12418+ pmd_t *pmd, pte_t *pte)
12419+{
12420+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12421 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
12422 }
12423
12424@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
12425
12426 #ifdef CONFIG_X86_PAE
12427 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
12428+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
12429+{
12430+ pud_populate(mm, pudp, pmd);
12431+}
12432 #else /* !CONFIG_X86_PAE */
12433 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
12434 {
12435 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
12436 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
12437 }
12438+
12439+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
12440+{
12441+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
12442+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
12443+}
12444 #endif /* CONFIG_X86_PAE */
12445
12446 #if PAGETABLE_LEVELS > 3
12447@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
12448 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
12449 }
12450
12451+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
12452+{
12453+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
12454+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
12455+}
12456+
12457 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
12458 {
12459 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
12460diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
12461index f2b489c..4f7e2e5 100644
12462--- a/arch/x86/include/asm/pgtable-2level.h
12463+++ b/arch/x86/include/asm/pgtable-2level.h
12464@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
12465
12466 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12467 {
12468+ pax_open_kernel();
12469 *pmdp = pmd;
12470+ pax_close_kernel();
12471 }
12472
12473 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12474diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
12475index 4cc9f2b..5fd9226 100644
12476--- a/arch/x86/include/asm/pgtable-3level.h
12477+++ b/arch/x86/include/asm/pgtable-3level.h
12478@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12479
12480 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12481 {
12482+ pax_open_kernel();
12483 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
12484+ pax_close_kernel();
12485 }
12486
12487 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12488 {
12489+ pax_open_kernel();
12490 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
12491+ pax_close_kernel();
12492 }
12493
12494 /*
12495diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
12496index a1f780d..5f38ced4 100644
12497--- a/arch/x86/include/asm/pgtable.h
12498+++ b/arch/x86/include/asm/pgtable.h
12499@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
12500
12501 #ifndef __PAGETABLE_PUD_FOLDED
12502 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
12503+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
12504 #define pgd_clear(pgd) native_pgd_clear(pgd)
12505 #endif
12506
12507@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
12508
12509 #define arch_end_context_switch(prev) do {} while(0)
12510
12511+#define pax_open_kernel() native_pax_open_kernel()
12512+#define pax_close_kernel() native_pax_close_kernel()
12513 #endif /* CONFIG_PARAVIRT */
12514
12515+#define __HAVE_ARCH_PAX_OPEN_KERNEL
12516+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
12517+
12518+#ifdef CONFIG_PAX_KERNEXEC
12519+static inline unsigned long native_pax_open_kernel(void)
12520+{
12521+ unsigned long cr0;
12522+
12523+ preempt_disable();
12524+ barrier();
12525+ cr0 = read_cr0() ^ X86_CR0_WP;
12526+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
12527+ write_cr0(cr0);
12528+ return cr0 ^ X86_CR0_WP;
12529+}
12530+
12531+static inline unsigned long native_pax_close_kernel(void)
12532+{
12533+ unsigned long cr0;
12534+
12535+ cr0 = read_cr0() ^ X86_CR0_WP;
12536+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
12537+ write_cr0(cr0);
12538+ barrier();
12539+ preempt_enable_no_resched();
12540+ return cr0 ^ X86_CR0_WP;
12541+}
12542+#else
12543+static inline unsigned long native_pax_open_kernel(void) { return 0; }
12544+static inline unsigned long native_pax_close_kernel(void) { return 0; }
12545+#endif
12546+
12547 /*
12548 * The following only work if pte_present() is true.
12549 * Undefined behaviour if not..
12550 */
12551+static inline int pte_user(pte_t pte)
12552+{
12553+ return pte_val(pte) & _PAGE_USER;
12554+}
12555+
12556 static inline int pte_dirty(pte_t pte)
12557 {
12558 return pte_flags(pte) & _PAGE_DIRTY;
12559@@ -195,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
12560 return pte_clear_flags(pte, _PAGE_RW);
12561 }
12562
12563+static inline pte_t pte_mkread(pte_t pte)
12564+{
12565+ return __pte(pte_val(pte) | _PAGE_USER);
12566+}
12567+
12568 static inline pte_t pte_mkexec(pte_t pte)
12569 {
12570- return pte_clear_flags(pte, _PAGE_NX);
12571+#ifdef CONFIG_X86_PAE
12572+ if (__supported_pte_mask & _PAGE_NX)
12573+ return pte_clear_flags(pte, _PAGE_NX);
12574+ else
12575+#endif
12576+ return pte_set_flags(pte, _PAGE_USER);
12577+}
12578+
12579+static inline pte_t pte_exprotect(pte_t pte)
12580+{
12581+#ifdef CONFIG_X86_PAE
12582+ if (__supported_pte_mask & _PAGE_NX)
12583+ return pte_set_flags(pte, _PAGE_NX);
12584+ else
12585+#endif
12586+ return pte_clear_flags(pte, _PAGE_USER);
12587 }
12588
12589 static inline pte_t pte_mkdirty(pte_t pte)
12590@@ -389,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
12591 #endif
12592
12593 #ifndef __ASSEMBLY__
12594+
12595+#ifdef CONFIG_PAX_PER_CPU_PGD
12596+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
12597+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
12598+{
12599+ return cpu_pgd[cpu];
12600+}
12601+#endif
12602+
12603 #include <linux/mm_types.h>
12604
12605 static inline int pte_none(pte_t pte)
12606@@ -565,7 +634,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
12607
12608 static inline int pgd_bad(pgd_t pgd)
12609 {
12610- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
12611+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
12612 }
12613
12614 static inline int pgd_none(pgd_t pgd)
12615@@ -588,7 +657,12 @@ static inline int pgd_none(pgd_t pgd)
12616 * pgd_offset() returns a (pgd_t *)
12617 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
12618 */
12619-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
12620+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
12621+
12622+#ifdef CONFIG_PAX_PER_CPU_PGD
12623+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
12624+#endif
12625+
12626 /*
12627 * a shortcut which implies the use of the kernel's pgd, instead
12628 * of a process's
12629@@ -599,6 +673,20 @@ static inline int pgd_none(pgd_t pgd)
12630 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
12631 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
12632
12633+#ifdef CONFIG_X86_32
12634+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
12635+#else
12636+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
12637+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
12638+
12639+#ifdef CONFIG_PAX_MEMORY_UDEREF
12640+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
12641+#else
12642+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
12643+#endif
12644+
12645+#endif
12646+
12647 #ifndef __ASSEMBLY__
12648
12649 extern int direct_gbpages;
12650@@ -763,11 +851,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
12651 * dst and src can be on the same page, but the range must not overlap,
12652 * and must not cross a page boundary.
12653 */
12654-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
12655+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
12656 {
12657- memcpy(dst, src, count * sizeof(pgd_t));
12658+ pax_open_kernel();
12659+ while (count--)
12660+ *dst++ = *src++;
12661+ pax_close_kernel();
12662 }
12663
12664+#ifdef CONFIG_PAX_PER_CPU_PGD
12665+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
12666+#endif
12667+
12668+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12669+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
12670+#else
12671+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
12672+#endif
12673
12674 #include <asm-generic/pgtable.h>
12675 #endif /* __ASSEMBLY__ */
12676diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
12677index 8faa215..a8a17ea 100644
12678--- a/arch/x86/include/asm/pgtable_32.h
12679+++ b/arch/x86/include/asm/pgtable_32.h
12680@@ -25,9 +25,6 @@
12681 struct mm_struct;
12682 struct vm_area_struct;
12683
12684-extern pgd_t swapper_pg_dir[1024];
12685-extern pgd_t initial_page_table[1024];
12686-
12687 static inline void pgtable_cache_init(void) { }
12688 static inline void check_pgt_cache(void) { }
12689 void paging_init(void);
12690@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12691 # include <asm/pgtable-2level.h>
12692 #endif
12693
12694+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
12695+extern pgd_t initial_page_table[PTRS_PER_PGD];
12696+#ifdef CONFIG_X86_PAE
12697+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
12698+#endif
12699+
12700 #if defined(CONFIG_HIGHPTE)
12701 #define pte_offset_map(dir, address) \
12702 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
12703@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12704 /* Clear a kernel PTE and flush it from the TLB */
12705 #define kpte_clear_flush(ptep, vaddr) \
12706 do { \
12707+ pax_open_kernel(); \
12708 pte_clear(&init_mm, (vaddr), (ptep)); \
12709+ pax_close_kernel(); \
12710 __flush_tlb_one((vaddr)); \
12711 } while (0)
12712
12713@@ -75,6 +80,9 @@ do { \
12714
12715 #endif /* !__ASSEMBLY__ */
12716
12717+#define HAVE_ARCH_UNMAPPED_AREA
12718+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
12719+
12720 /*
12721 * kern_addr_valid() is (1) for FLATMEM and (0) for
12722 * SPARSEMEM and DISCONTIGMEM
12723diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
12724index ed5903b..c7fe163 100644
12725--- a/arch/x86/include/asm/pgtable_32_types.h
12726+++ b/arch/x86/include/asm/pgtable_32_types.h
12727@@ -8,7 +8,7 @@
12728 */
12729 #ifdef CONFIG_X86_PAE
12730 # include <asm/pgtable-3level_types.h>
12731-# define PMD_SIZE (1UL << PMD_SHIFT)
12732+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
12733 # define PMD_MASK (~(PMD_SIZE - 1))
12734 #else
12735 # include <asm/pgtable-2level_types.h>
12736@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
12737 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
12738 #endif
12739
12740+#ifdef CONFIG_PAX_KERNEXEC
12741+#ifndef __ASSEMBLY__
12742+extern unsigned char MODULES_EXEC_VADDR[];
12743+extern unsigned char MODULES_EXEC_END[];
12744+#endif
12745+#include <asm/boot.h>
12746+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
12747+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
12748+#else
12749+#define ktla_ktva(addr) (addr)
12750+#define ktva_ktla(addr) (addr)
12751+#endif
12752+
12753 #define MODULES_VADDR VMALLOC_START
12754 #define MODULES_END VMALLOC_END
12755 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
12756diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
12757index 47356f9..deb94a2 100644
12758--- a/arch/x86/include/asm/pgtable_64.h
12759+++ b/arch/x86/include/asm/pgtable_64.h
12760@@ -16,10 +16,14 @@
12761
12762 extern pud_t level3_kernel_pgt[512];
12763 extern pud_t level3_ident_pgt[512];
12764+extern pud_t level3_vmalloc_start_pgt[512];
12765+extern pud_t level3_vmalloc_end_pgt[512];
12766+extern pud_t level3_vmemmap_pgt[512];
12767+extern pud_t level2_vmemmap_pgt[512];
12768 extern pmd_t level2_kernel_pgt[512];
12769 extern pmd_t level2_fixmap_pgt[512];
12770-extern pmd_t level2_ident_pgt[512];
12771-extern pgd_t init_level4_pgt[];
12772+extern pmd_t level2_ident_pgt[512*2];
12773+extern pgd_t init_level4_pgt[512];
12774
12775 #define swapper_pg_dir init_level4_pgt
12776
12777@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12778
12779 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12780 {
12781+ pax_open_kernel();
12782 *pmdp = pmd;
12783+ pax_close_kernel();
12784 }
12785
12786 static inline void native_pmd_clear(pmd_t *pmd)
12787@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
12788
12789 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12790 {
12791+ pax_open_kernel();
12792 *pudp = pud;
12793+ pax_close_kernel();
12794 }
12795
12796 static inline void native_pud_clear(pud_t *pud)
12797@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
12798
12799 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
12800 {
12801+ pax_open_kernel();
12802+ *pgdp = pgd;
12803+ pax_close_kernel();
12804+}
12805+
12806+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12807+{
12808 *pgdp = pgd;
12809 }
12810
12811diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
12812index 766ea16..5b96cb3 100644
12813--- a/arch/x86/include/asm/pgtable_64_types.h
12814+++ b/arch/x86/include/asm/pgtable_64_types.h
12815@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
12816 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
12817 #define MODULES_END _AC(0xffffffffff000000, UL)
12818 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
12819+#define MODULES_EXEC_VADDR MODULES_VADDR
12820+#define MODULES_EXEC_END MODULES_END
12821+
12822+#define ktla_ktva(addr) (addr)
12823+#define ktva_ktla(addr) (addr)
12824
12825 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
12826diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
12827index ec8a1fc..7ccb593 100644
12828--- a/arch/x86/include/asm/pgtable_types.h
12829+++ b/arch/x86/include/asm/pgtable_types.h
12830@@ -16,13 +16,12 @@
12831 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
12832 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
12833 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
12834-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
12835+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
12836 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
12837 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
12838 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
12839-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
12840-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
12841-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
12842+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
12843+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
12844 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
12845
12846 /* If _PAGE_BIT_PRESENT is clear, we use these: */
12847@@ -40,7 +39,6 @@
12848 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
12849 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
12850 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
12851-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
12852 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
12853 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
12854 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
12855@@ -57,8 +55,10 @@
12856
12857 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
12858 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
12859-#else
12860+#elif defined(CONFIG_KMEMCHECK)
12861 #define _PAGE_NX (_AT(pteval_t, 0))
12862+#else
12863+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
12864 #endif
12865
12866 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
12867@@ -96,6 +96,9 @@
12868 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
12869 _PAGE_ACCESSED)
12870
12871+#define PAGE_READONLY_NOEXEC PAGE_READONLY
12872+#define PAGE_SHARED_NOEXEC PAGE_SHARED
12873+
12874 #define __PAGE_KERNEL_EXEC \
12875 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
12876 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
12877@@ -106,7 +109,7 @@
12878 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
12879 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
12880 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
12881-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
12882+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
12883 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
12884 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
12885 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
12886@@ -168,8 +171,8 @@
12887 * bits are combined, this will alow user to access the high address mapped
12888 * VDSO in the presence of CONFIG_COMPAT_VDSO
12889 */
12890-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
12891-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
12892+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12893+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12894 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
12895 #endif
12896
12897@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
12898 {
12899 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
12900 }
12901+#endif
12902
12903+#if PAGETABLE_LEVELS == 3
12904+#include <asm-generic/pgtable-nopud.h>
12905+#endif
12906+
12907+#if PAGETABLE_LEVELS == 2
12908+#include <asm-generic/pgtable-nopmd.h>
12909+#endif
12910+
12911+#ifndef __ASSEMBLY__
12912 #if PAGETABLE_LEVELS > 3
12913 typedef struct { pudval_t pud; } pud_t;
12914
12915@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
12916 return pud.pud;
12917 }
12918 #else
12919-#include <asm-generic/pgtable-nopud.h>
12920-
12921 static inline pudval_t native_pud_val(pud_t pud)
12922 {
12923 return native_pgd_val(pud.pgd);
12924@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
12925 return pmd.pmd;
12926 }
12927 #else
12928-#include <asm-generic/pgtable-nopmd.h>
12929-
12930 static inline pmdval_t native_pmd_val(pmd_t pmd)
12931 {
12932 return native_pgd_val(pmd.pud.pgd);
12933@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
12934
12935 extern pteval_t __supported_pte_mask;
12936 extern void set_nx(void);
12937-extern int nx_enabled;
12938
12939 #define pgprot_writecombine pgprot_writecombine
12940 extern pgprot_t pgprot_writecombine(pgprot_t prot);
12941diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
12942index ad1fc85..0b15fe1 100644
12943--- a/arch/x86/include/asm/processor.h
12944+++ b/arch/x86/include/asm/processor.h
12945@@ -289,7 +289,7 @@ struct tss_struct {
12946
12947 } ____cacheline_aligned;
12948
12949-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
12950+extern struct tss_struct init_tss[NR_CPUS];
12951
12952 /*
12953 * Save the original ist values for checking stack pointers during debugging
12954@@ -818,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
12955 */
12956 #define TASK_SIZE PAGE_OFFSET
12957 #define TASK_SIZE_MAX TASK_SIZE
12958+
12959+#ifdef CONFIG_PAX_SEGMEXEC
12960+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
12961+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
12962+#else
12963 #define STACK_TOP TASK_SIZE
12964-#define STACK_TOP_MAX STACK_TOP
12965+#endif
12966+
12967+#define STACK_TOP_MAX TASK_SIZE
12968
12969 #define INIT_THREAD { \
12970- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12971+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12972 .vm86_info = NULL, \
12973 .sysenter_cs = __KERNEL_CS, \
12974 .io_bitmap_ptr = NULL, \
12975@@ -836,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
12976 */
12977 #define INIT_TSS { \
12978 .x86_tss = { \
12979- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12980+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12981 .ss0 = __KERNEL_DS, \
12982 .ss1 = __KERNEL_CS, \
12983 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
12984@@ -847,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
12985 extern unsigned long thread_saved_pc(struct task_struct *tsk);
12986
12987 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
12988-#define KSTK_TOP(info) \
12989-({ \
12990- unsigned long *__ptr = (unsigned long *)(info); \
12991- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
12992-})
12993+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
12994
12995 /*
12996 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
12997@@ -866,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12998 #define task_pt_regs(task) \
12999 ({ \
13000 struct pt_regs *__regs__; \
13001- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
13002+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
13003 __regs__ - 1; \
13004 })
13005
13006@@ -876,13 +879,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
13007 /*
13008 * User space process size. 47bits minus one guard page.
13009 */
13010-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
13011+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
13012
13013 /* This decides where the kernel will search for a free chunk of vm
13014 * space during mmap's.
13015 */
13016 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
13017- 0xc0000000 : 0xFFFFe000)
13018+ 0xc0000000 : 0xFFFFf000)
13019
13020 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
13021 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
13022@@ -893,11 +896,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
13023 #define STACK_TOP_MAX TASK_SIZE_MAX
13024
13025 #define INIT_THREAD { \
13026- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
13027+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
13028 }
13029
13030 #define INIT_TSS { \
13031- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
13032+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
13033 }
13034
13035 /*
13036@@ -925,6 +928,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
13037 */
13038 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
13039
13040+#ifdef CONFIG_PAX_SEGMEXEC
13041+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
13042+#endif
13043+
13044 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
13045
13046 /* Get/set a process' ability to use the timestamp counter instruction */
13047@@ -985,12 +992,12 @@ extern bool cpu_has_amd_erratum(const int *);
13048 #define cpu_has_amd_erratum(x) (false)
13049 #endif /* CONFIG_CPU_SUP_AMD */
13050
13051-extern unsigned long arch_align_stack(unsigned long sp);
13052+#define arch_align_stack(x) ((x) & ~0xfUL)
13053 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
13054
13055 void default_idle(void);
13056 bool set_pm_idle_to_default(void);
13057
13058-void stop_this_cpu(void *dummy);
13059+void stop_this_cpu(void *dummy) __noreturn;
13060
13061 #endif /* _ASM_X86_PROCESSOR_H */
13062diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
13063index 19f16eb..b50624b 100644
13064--- a/arch/x86/include/asm/ptrace.h
13065+++ b/arch/x86/include/asm/ptrace.h
13066@@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
13067 }
13068
13069 /*
13070- * user_mode_vm(regs) determines whether a register set came from user mode.
13071+ * user_mode(regs) determines whether a register set came from user mode.
13072 * This is true if V8086 mode was enabled OR if the register set was from
13073 * protected mode with RPL-3 CS value. This tricky test checks that with
13074 * one comparison. Many places in the kernel can bypass this full check
13075- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
13076+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
13077+ * be used.
13078 */
13079-static inline int user_mode(struct pt_regs *regs)
13080+static inline int user_mode_novm(struct pt_regs *regs)
13081 {
13082 #ifdef CONFIG_X86_32
13083 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
13084 #else
13085- return !!(regs->cs & 3);
13086+ return !!(regs->cs & SEGMENT_RPL_MASK);
13087 #endif
13088 }
13089
13090-static inline int user_mode_vm(struct pt_regs *regs)
13091+static inline int user_mode(struct pt_regs *regs)
13092 {
13093 #ifdef CONFIG_X86_32
13094 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
13095 USER_RPL;
13096 #else
13097- return user_mode(regs);
13098+ return user_mode_novm(regs);
13099 #endif
13100 }
13101
13102@@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
13103 #ifdef CONFIG_X86_64
13104 static inline bool user_64bit_mode(struct pt_regs *regs)
13105 {
13106+ unsigned long cs = regs->cs & 0xffff;
13107 #ifndef CONFIG_PARAVIRT
13108 /*
13109 * On non-paravirt systems, this is the only long mode CPL 3
13110 * selector. We do not allow long mode selectors in the LDT.
13111 */
13112- return regs->cs == __USER_CS;
13113+ return cs == __USER_CS;
13114 #else
13115 /* Headers are too twisted for this to go in paravirt.h. */
13116- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
13117+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
13118 #endif
13119 }
13120 #endif
13121diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
13122index fe1ec5b..dc5c3fe 100644
13123--- a/arch/x86/include/asm/realmode.h
13124+++ b/arch/x86/include/asm/realmode.h
13125@@ -22,16 +22,14 @@ struct real_mode_header {
13126 #endif
13127 /* APM/BIOS reboot */
13128 u32 machine_real_restart_asm;
13129-#ifdef CONFIG_X86_64
13130 u32 machine_real_restart_seg;
13131-#endif
13132 };
13133
13134 /* This must match data at trampoline_32/64.S */
13135 struct trampoline_header {
13136 #ifdef CONFIG_X86_32
13137 u32 start;
13138- u16 gdt_pad;
13139+ u16 boot_cs;
13140 u16 gdt_limit;
13141 u32 gdt_base;
13142 #else
13143diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
13144index a82c4f1..f9c9696 100644
13145--- a/arch/x86/include/asm/reboot.h
13146+++ b/arch/x86/include/asm/reboot.h
13147@@ -6,12 +6,12 @@
13148 struct pt_regs;
13149
13150 struct machine_ops {
13151- void (*restart)(char *cmd);
13152- void (*halt)(void);
13153- void (*power_off)(void);
13154+ void (* __noreturn restart)(char *cmd);
13155+ void (* __noreturn halt)(void);
13156+ void (* __noreturn power_off)(void);
13157 void (*shutdown)(void);
13158 void (*crash_shutdown)(struct pt_regs *);
13159- void (*emergency_restart)(void);
13160+ void (* __noreturn emergency_restart)(void);
13161 };
13162
13163 extern struct machine_ops machine_ops;
13164diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
13165index 2dbe4a7..ce1db00 100644
13166--- a/arch/x86/include/asm/rwsem.h
13167+++ b/arch/x86/include/asm/rwsem.h
13168@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
13169 {
13170 asm volatile("# beginning down_read\n\t"
13171 LOCK_PREFIX _ASM_INC "(%1)\n\t"
13172+
13173+#ifdef CONFIG_PAX_REFCOUNT
13174+ "jno 0f\n"
13175+ LOCK_PREFIX _ASM_DEC "(%1)\n"
13176+ "int $4\n0:\n"
13177+ _ASM_EXTABLE(0b, 0b)
13178+#endif
13179+
13180 /* adds 0x00000001 */
13181 " jns 1f\n"
13182 " call call_rwsem_down_read_failed\n"
13183@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
13184 "1:\n\t"
13185 " mov %1,%2\n\t"
13186 " add %3,%2\n\t"
13187+
13188+#ifdef CONFIG_PAX_REFCOUNT
13189+ "jno 0f\n"
13190+ "sub %3,%2\n"
13191+ "int $4\n0:\n"
13192+ _ASM_EXTABLE(0b, 0b)
13193+#endif
13194+
13195 " jle 2f\n\t"
13196 LOCK_PREFIX " cmpxchg %2,%0\n\t"
13197 " jnz 1b\n\t"
13198@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
13199 long tmp;
13200 asm volatile("# beginning down_write\n\t"
13201 LOCK_PREFIX " xadd %1,(%2)\n\t"
13202+
13203+#ifdef CONFIG_PAX_REFCOUNT
13204+ "jno 0f\n"
13205+ "mov %1,(%2)\n"
13206+ "int $4\n0:\n"
13207+ _ASM_EXTABLE(0b, 0b)
13208+#endif
13209+
13210 /* adds 0xffff0001, returns the old value */
13211 " test %1,%1\n\t"
13212 /* was the count 0 before? */
13213@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
13214 long tmp;
13215 asm volatile("# beginning __up_read\n\t"
13216 LOCK_PREFIX " xadd %1,(%2)\n\t"
13217+
13218+#ifdef CONFIG_PAX_REFCOUNT
13219+ "jno 0f\n"
13220+ "mov %1,(%2)\n"
13221+ "int $4\n0:\n"
13222+ _ASM_EXTABLE(0b, 0b)
13223+#endif
13224+
13225 /* subtracts 1, returns the old value */
13226 " jns 1f\n\t"
13227 " call call_rwsem_wake\n" /* expects old value in %edx */
13228@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
13229 long tmp;
13230 asm volatile("# beginning __up_write\n\t"
13231 LOCK_PREFIX " xadd %1,(%2)\n\t"
13232+
13233+#ifdef CONFIG_PAX_REFCOUNT
13234+ "jno 0f\n"
13235+ "mov %1,(%2)\n"
13236+ "int $4\n0:\n"
13237+ _ASM_EXTABLE(0b, 0b)
13238+#endif
13239+
13240 /* subtracts 0xffff0001, returns the old value */
13241 " jns 1f\n\t"
13242 " call call_rwsem_wake\n" /* expects old value in %edx */
13243@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
13244 {
13245 asm volatile("# beginning __downgrade_write\n\t"
13246 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
13247+
13248+#ifdef CONFIG_PAX_REFCOUNT
13249+ "jno 0f\n"
13250+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
13251+ "int $4\n0:\n"
13252+ _ASM_EXTABLE(0b, 0b)
13253+#endif
13254+
13255 /*
13256 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
13257 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
13258@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
13259 */
13260 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
13261 {
13262- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
13263+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
13264+
13265+#ifdef CONFIG_PAX_REFCOUNT
13266+ "jno 0f\n"
13267+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
13268+ "int $4\n0:\n"
13269+ _ASM_EXTABLE(0b, 0b)
13270+#endif
13271+
13272 : "+m" (sem->count)
13273 : "er" (delta));
13274 }
13275@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
13276 */
13277 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
13278 {
13279- return delta + xadd(&sem->count, delta);
13280+ return delta + xadd_check_overflow(&sem->count, delta);
13281 }
13282
13283 #endif /* __KERNEL__ */
13284diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
13285index c48a950..c6d7468 100644
13286--- a/arch/x86/include/asm/segment.h
13287+++ b/arch/x86/include/asm/segment.h
13288@@ -64,10 +64,15 @@
13289 * 26 - ESPFIX small SS
13290 * 27 - per-cpu [ offset to per-cpu data area ]
13291 * 28 - stack_canary-20 [ for stack protector ]
13292- * 29 - unused
13293- * 30 - unused
13294+ * 29 - PCI BIOS CS
13295+ * 30 - PCI BIOS DS
13296 * 31 - TSS for double fault handler
13297 */
13298+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
13299+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
13300+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
13301+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
13302+
13303 #define GDT_ENTRY_TLS_MIN 6
13304 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
13305
13306@@ -79,6 +84,8 @@
13307
13308 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
13309
13310+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
13311+
13312 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
13313
13314 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
13315@@ -104,6 +111,12 @@
13316 #define __KERNEL_STACK_CANARY 0
13317 #endif
13318
13319+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
13320+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
13321+
13322+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
13323+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
13324+
13325 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
13326
13327 /*
13328@@ -141,7 +154,7 @@
13329 */
13330
13331 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
13332-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
13333+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
13334
13335
13336 #else
13337@@ -165,6 +178,8 @@
13338 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
13339 #define __USER32_DS __USER_DS
13340
13341+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
13342+
13343 #define GDT_ENTRY_TSS 8 /* needs two entries */
13344 #define GDT_ENTRY_LDT 10 /* needs two entries */
13345 #define GDT_ENTRY_TLS_MIN 12
13346@@ -185,6 +200,7 @@
13347 #endif
13348
13349 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
13350+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
13351 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
13352 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
13353 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
13354@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
13355 {
13356 unsigned long __limit;
13357 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
13358- return __limit + 1;
13359+ return __limit;
13360 }
13361
13362 #endif /* !__ASSEMBLY__ */
13363diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
13364index 4f19a15..e04d86f 100644
13365--- a/arch/x86/include/asm/smp.h
13366+++ b/arch/x86/include/asm/smp.h
13367@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
13368 /* cpus sharing the last level cache: */
13369 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
13370 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
13371-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
13372+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
13373
13374 static inline struct cpumask *cpu_sibling_mask(int cpu)
13375 {
13376@@ -190,14 +190,8 @@ extern unsigned disabled_cpus __cpuinitdata;
13377 extern int safe_smp_processor_id(void);
13378
13379 #elif defined(CONFIG_X86_64_SMP)
13380-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
13381-
13382-#define stack_smp_processor_id() \
13383-({ \
13384- struct thread_info *ti; \
13385- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
13386- ti->cpu; \
13387-})
13388+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
13389+#define stack_smp_processor_id() raw_smp_processor_id()
13390 #define safe_smp_processor_id() smp_processor_id()
13391
13392 #endif
13393diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
13394index 33692ea..350a534 100644
13395--- a/arch/x86/include/asm/spinlock.h
13396+++ b/arch/x86/include/asm/spinlock.h
13397@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
13398 static inline void arch_read_lock(arch_rwlock_t *rw)
13399 {
13400 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
13401+
13402+#ifdef CONFIG_PAX_REFCOUNT
13403+ "jno 0f\n"
13404+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
13405+ "int $4\n0:\n"
13406+ _ASM_EXTABLE(0b, 0b)
13407+#endif
13408+
13409 "jns 1f\n"
13410 "call __read_lock_failed\n\t"
13411 "1:\n"
13412@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
13413 static inline void arch_write_lock(arch_rwlock_t *rw)
13414 {
13415 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
13416+
13417+#ifdef CONFIG_PAX_REFCOUNT
13418+ "jno 0f\n"
13419+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
13420+ "int $4\n0:\n"
13421+ _ASM_EXTABLE(0b, 0b)
13422+#endif
13423+
13424 "jz 1f\n"
13425 "call __write_lock_failed\n\t"
13426 "1:\n"
13427@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
13428
13429 static inline void arch_read_unlock(arch_rwlock_t *rw)
13430 {
13431- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
13432+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
13433+
13434+#ifdef CONFIG_PAX_REFCOUNT
13435+ "jno 0f\n"
13436+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
13437+ "int $4\n0:\n"
13438+ _ASM_EXTABLE(0b, 0b)
13439+#endif
13440+
13441 :"+m" (rw->lock) : : "memory");
13442 }
13443
13444 static inline void arch_write_unlock(arch_rwlock_t *rw)
13445 {
13446- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
13447+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
13448+
13449+#ifdef CONFIG_PAX_REFCOUNT
13450+ "jno 0f\n"
13451+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
13452+ "int $4\n0:\n"
13453+ _ASM_EXTABLE(0b, 0b)
13454+#endif
13455+
13456 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
13457 }
13458
13459diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
13460index 6a99859..03cb807 100644
13461--- a/arch/x86/include/asm/stackprotector.h
13462+++ b/arch/x86/include/asm/stackprotector.h
13463@@ -47,7 +47,7 @@
13464 * head_32 for boot CPU and setup_per_cpu_areas() for others.
13465 */
13466 #define GDT_STACK_CANARY_INIT \
13467- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
13468+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
13469
13470 /*
13471 * Initialize the stackprotector canary value.
13472@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
13473
13474 static inline void load_stack_canary_segment(void)
13475 {
13476-#ifdef CONFIG_X86_32
13477+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
13478 asm volatile ("mov %0, %%gs" : : "r" (0));
13479 #endif
13480 }
13481diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
13482index 70bbe39..4ae2bd4 100644
13483--- a/arch/x86/include/asm/stacktrace.h
13484+++ b/arch/x86/include/asm/stacktrace.h
13485@@ -11,28 +11,20 @@
13486
13487 extern int kstack_depth_to_print;
13488
13489-struct thread_info;
13490+struct task_struct;
13491 struct stacktrace_ops;
13492
13493-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
13494- unsigned long *stack,
13495- unsigned long bp,
13496- const struct stacktrace_ops *ops,
13497- void *data,
13498- unsigned long *end,
13499- int *graph);
13500+typedef unsigned long walk_stack_t(struct task_struct *task,
13501+ void *stack_start,
13502+ unsigned long *stack,
13503+ unsigned long bp,
13504+ const struct stacktrace_ops *ops,
13505+ void *data,
13506+ unsigned long *end,
13507+ int *graph);
13508
13509-extern unsigned long
13510-print_context_stack(struct thread_info *tinfo,
13511- unsigned long *stack, unsigned long bp,
13512- const struct stacktrace_ops *ops, void *data,
13513- unsigned long *end, int *graph);
13514-
13515-extern unsigned long
13516-print_context_stack_bp(struct thread_info *tinfo,
13517- unsigned long *stack, unsigned long bp,
13518- const struct stacktrace_ops *ops, void *data,
13519- unsigned long *end, int *graph);
13520+extern walk_stack_t print_context_stack;
13521+extern walk_stack_t print_context_stack_bp;
13522
13523 /* Generic stack tracer with callbacks */
13524
13525@@ -40,7 +32,7 @@ struct stacktrace_ops {
13526 void (*address)(void *data, unsigned long address, int reliable);
13527 /* On negative return stop dumping */
13528 int (*stack)(void *data, char *name);
13529- walk_stack_t walk_stack;
13530+ walk_stack_t *walk_stack;
13531 };
13532
13533 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
13534diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
13535index 4ec45b3..a4f0a8a 100644
13536--- a/arch/x86/include/asm/switch_to.h
13537+++ b/arch/x86/include/asm/switch_to.h
13538@@ -108,7 +108,7 @@ do { \
13539 "call __switch_to\n\t" \
13540 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
13541 __switch_canary \
13542- "movq %P[thread_info](%%rsi),%%r8\n\t" \
13543+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
13544 "movq %%rax,%%rdi\n\t" \
13545 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
13546 "jnz ret_from_fork\n\t" \
13547@@ -119,7 +119,7 @@ do { \
13548 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
13549 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
13550 [_tif_fork] "i" (_TIF_FORK), \
13551- [thread_info] "i" (offsetof(struct task_struct, stack)), \
13552+ [thread_info] "m" (current_tinfo), \
13553 [current_task] "m" (current_task) \
13554 __switch_canary_iparam \
13555 : "memory", "cc" __EXTRA_CLOBBER)
13556diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
13557index 2d946e6..e453ec4 100644
13558--- a/arch/x86/include/asm/thread_info.h
13559+++ b/arch/x86/include/asm/thread_info.h
13560@@ -10,6 +10,7 @@
13561 #include <linux/compiler.h>
13562 #include <asm/page.h>
13563 #include <asm/types.h>
13564+#include <asm/percpu.h>
13565
13566 /*
13567 * low level task data that entry.S needs immediate access to
13568@@ -24,7 +25,6 @@ struct exec_domain;
13569 #include <linux/atomic.h>
13570
13571 struct thread_info {
13572- struct task_struct *task; /* main task structure */
13573 struct exec_domain *exec_domain; /* execution domain */
13574 __u32 flags; /* low level flags */
13575 __u32 status; /* thread synchronous flags */
13576@@ -34,19 +34,13 @@ struct thread_info {
13577 mm_segment_t addr_limit;
13578 struct restart_block restart_block;
13579 void __user *sysenter_return;
13580-#ifdef CONFIG_X86_32
13581- unsigned long previous_esp; /* ESP of the previous stack in
13582- case of nested (IRQ) stacks
13583- */
13584- __u8 supervisor_stack[0];
13585-#endif
13586+ unsigned long lowest_stack;
13587 unsigned int sig_on_uaccess_error:1;
13588 unsigned int uaccess_err:1; /* uaccess failed */
13589 };
13590
13591-#define INIT_THREAD_INFO(tsk) \
13592+#define INIT_THREAD_INFO \
13593 { \
13594- .task = &tsk, \
13595 .exec_domain = &default_exec_domain, \
13596 .flags = 0, \
13597 .cpu = 0, \
13598@@ -57,7 +51,7 @@ struct thread_info {
13599 }, \
13600 }
13601
13602-#define init_thread_info (init_thread_union.thread_info)
13603+#define init_thread_info (init_thread_union.stack)
13604 #define init_stack (init_thread_union.stack)
13605
13606 #else /* !__ASSEMBLY__ */
13607@@ -98,6 +92,7 @@ struct thread_info {
13608 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
13609 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
13610 #define TIF_X32 30 /* 32-bit native x86-64 binary */
13611+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
13612
13613 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
13614 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
13615@@ -122,17 +117,18 @@ struct thread_info {
13616 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
13617 #define _TIF_ADDR32 (1 << TIF_ADDR32)
13618 #define _TIF_X32 (1 << TIF_X32)
13619+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
13620
13621 /* work to do in syscall_trace_enter() */
13622 #define _TIF_WORK_SYSCALL_ENTRY \
13623 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
13624 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
13625- _TIF_NOHZ)
13626+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
13627
13628 /* work to do in syscall_trace_leave() */
13629 #define _TIF_WORK_SYSCALL_EXIT \
13630 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
13631- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
13632+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
13633
13634 /* work to do on interrupt/exception return */
13635 #define _TIF_WORK_MASK \
13636@@ -143,7 +139,7 @@ struct thread_info {
13637 /* work to do on any return to user space */
13638 #define _TIF_ALLWORK_MASK \
13639 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
13640- _TIF_NOHZ)
13641+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
13642
13643 /* Only used for 64 bit */
13644 #define _TIF_DO_NOTIFY_MASK \
13645@@ -159,45 +155,40 @@ struct thread_info {
13646
13647 #define PREEMPT_ACTIVE 0x10000000
13648
13649-#ifdef CONFIG_X86_32
13650-
13651-#define STACK_WARN (THREAD_SIZE/8)
13652-/*
13653- * macros/functions for gaining access to the thread information structure
13654- *
13655- * preempt_count needs to be 1 initially, until the scheduler is functional.
13656- */
13657-#ifndef __ASSEMBLY__
13658-
13659-
13660-/* how to get the current stack pointer from C */
13661-register unsigned long current_stack_pointer asm("esp") __used;
13662-
13663-/* how to get the thread information struct from C */
13664-static inline struct thread_info *current_thread_info(void)
13665-{
13666- return (struct thread_info *)
13667- (current_stack_pointer & ~(THREAD_SIZE - 1));
13668-}
13669-
13670-#else /* !__ASSEMBLY__ */
13671-
13672+#ifdef __ASSEMBLY__
13673 /* how to get the thread information struct from ASM */
13674 #define GET_THREAD_INFO(reg) \
13675- movl $-THREAD_SIZE, reg; \
13676- andl %esp, reg
13677+ mov PER_CPU_VAR(current_tinfo), reg
13678
13679 /* use this one if reg already contains %esp */
13680-#define GET_THREAD_INFO_WITH_ESP(reg) \
13681- andl $-THREAD_SIZE, reg
13682+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
13683+#else
13684+/* how to get the thread information struct from C */
13685+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
13686+
13687+static __always_inline struct thread_info *current_thread_info(void)
13688+{
13689+ return this_cpu_read_stable(current_tinfo);
13690+}
13691+#endif
13692+
13693+#ifdef CONFIG_X86_32
13694+
13695+#define STACK_WARN (THREAD_SIZE/8)
13696+/*
13697+ * macros/functions for gaining access to the thread information structure
13698+ *
13699+ * preempt_count needs to be 1 initially, until the scheduler is functional.
13700+ */
13701+#ifndef __ASSEMBLY__
13702+
13703+/* how to get the current stack pointer from C */
13704+register unsigned long current_stack_pointer asm("esp") __used;
13705
13706 #endif
13707
13708 #else /* X86_32 */
13709
13710-#include <asm/percpu.h>
13711-#define KERNEL_STACK_OFFSET (5*8)
13712-
13713 /*
13714 * macros/functions for gaining access to the thread information structure
13715 * preempt_count needs to be 1 initially, until the scheduler is functional.
13716@@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
13717 #ifndef __ASSEMBLY__
13718 DECLARE_PER_CPU(unsigned long, kernel_stack);
13719
13720-static inline struct thread_info *current_thread_info(void)
13721-{
13722- struct thread_info *ti;
13723- ti = (void *)(this_cpu_read_stable(kernel_stack) +
13724- KERNEL_STACK_OFFSET - THREAD_SIZE);
13725- return ti;
13726-}
13727-
13728-#else /* !__ASSEMBLY__ */
13729-
13730-/* how to get the thread information struct from ASM */
13731-#define GET_THREAD_INFO(reg) \
13732- movq PER_CPU_VAR(kernel_stack),reg ; \
13733- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
13734-
13735-/*
13736- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
13737- * a certain register (to be used in assembler memory operands).
13738- */
13739-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
13740-
13741+/* how to get the current stack pointer from C */
13742+register unsigned long current_stack_pointer asm("rsp") __used;
13743 #endif
13744
13745 #endif /* !X86_32 */
13746@@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
13747 extern void arch_task_cache_init(void);
13748 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
13749 extern void arch_release_task_struct(struct task_struct *tsk);
13750+
13751+#define __HAVE_THREAD_FUNCTIONS
13752+#define task_thread_info(task) (&(task)->tinfo)
13753+#define task_stack_page(task) ((task)->stack)
13754+#define setup_thread_stack(p, org) do {} while (0)
13755+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
13756+
13757 #endif
13758 #endif /* _ASM_X86_THREAD_INFO_H */
13759diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
13760index 7ccf8d1..7cdca12 100644
13761--- a/arch/x86/include/asm/uaccess.h
13762+++ b/arch/x86/include/asm/uaccess.h
13763@@ -7,6 +7,7 @@
13764 #include <linux/compiler.h>
13765 #include <linux/thread_info.h>
13766 #include <linux/string.h>
13767+#include <linux/sched.h>
13768 #include <asm/asm.h>
13769 #include <asm/page.h>
13770 #include <asm/smap.h>
13771@@ -14,6 +15,8 @@
13772 #define VERIFY_READ 0
13773 #define VERIFY_WRITE 1
13774
13775+extern void check_object_size(const void *ptr, unsigned long n, bool to);
13776+
13777 /*
13778 * The fs value determines whether argument validity checking should be
13779 * performed or not. If get_fs() == USER_DS, checking is performed, with
13780@@ -29,7 +32,12 @@
13781
13782 #define get_ds() (KERNEL_DS)
13783 #define get_fs() (current_thread_info()->addr_limit)
13784+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13785+void __set_fs(mm_segment_t x);
13786+void set_fs(mm_segment_t x);
13787+#else
13788 #define set_fs(x) (current_thread_info()->addr_limit = (x))
13789+#endif
13790
13791 #define segment_eq(a, b) ((a).seg == (b).seg)
13792
13793@@ -77,8 +85,33 @@
13794 * checks that the pointer is in the user space range - after calling
13795 * this function, memory access functions may still return -EFAULT.
13796 */
13797-#define access_ok(type, addr, size) \
13798- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
13799+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
13800+#define access_ok(type, addr, size) \
13801+({ \
13802+ long __size = size; \
13803+ unsigned long __addr = (unsigned long)addr; \
13804+ unsigned long __addr_ao = __addr & PAGE_MASK; \
13805+ unsigned long __end_ao = __addr + __size - 1; \
13806+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
13807+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
13808+ while(__addr_ao <= __end_ao) { \
13809+ char __c_ao; \
13810+ __addr_ao += PAGE_SIZE; \
13811+ if (__size > PAGE_SIZE) \
13812+ cond_resched(); \
13813+ if (__get_user(__c_ao, (char __user *)__addr)) \
13814+ break; \
13815+ if (type != VERIFY_WRITE) { \
13816+ __addr = __addr_ao; \
13817+ continue; \
13818+ } \
13819+ if (__put_user(__c_ao, (char __user *)__addr)) \
13820+ break; \
13821+ __addr = __addr_ao; \
13822+ } \
13823+ } \
13824+ __ret_ao; \
13825+})
13826
13827 /*
13828 * The exception table consists of pairs of addresses relative to the
13829@@ -189,13 +222,21 @@ extern int __get_user_bad(void);
13830 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
13831 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
13832
13833-
13834+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13835+#define __copyuser_seg "gs;"
13836+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
13837+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
13838+#else
13839+#define __copyuser_seg
13840+#define __COPYUSER_SET_ES
13841+#define __COPYUSER_RESTORE_ES
13842+#endif
13843
13844 #ifdef CONFIG_X86_32
13845 #define __put_user_asm_u64(x, addr, err, errret) \
13846 asm volatile(ASM_STAC "\n" \
13847- "1: movl %%eax,0(%2)\n" \
13848- "2: movl %%edx,4(%2)\n" \
13849+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
13850+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
13851 "3: " ASM_CLAC "\n" \
13852 ".section .fixup,\"ax\"\n" \
13853 "4: movl %3,%0\n" \
13854@@ -208,8 +249,8 @@ extern int __get_user_bad(void);
13855
13856 #define __put_user_asm_ex_u64(x, addr) \
13857 asm volatile(ASM_STAC "\n" \
13858- "1: movl %%eax,0(%1)\n" \
13859- "2: movl %%edx,4(%1)\n" \
13860+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
13861+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
13862 "3: " ASM_CLAC "\n" \
13863 _ASM_EXTABLE_EX(1b, 2b) \
13864 _ASM_EXTABLE_EX(2b, 3b) \
13865@@ -261,7 +302,7 @@ extern void __put_user_8(void);
13866 __typeof__(*(ptr)) __pu_val; \
13867 __chk_user_ptr(ptr); \
13868 might_fault(); \
13869- __pu_val = x; \
13870+ __pu_val = (x); \
13871 switch (sizeof(*(ptr))) { \
13872 case 1: \
13873 __put_user_x(1, __pu_val, ptr, __ret_pu); \
13874@@ -383,7 +424,7 @@ do { \
13875
13876 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13877 asm volatile(ASM_STAC "\n" \
13878- "1: mov"itype" %2,%"rtype"1\n" \
13879+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
13880 "2: " ASM_CLAC "\n" \
13881 ".section .fixup,\"ax\"\n" \
13882 "3: mov %3,%0\n" \
13883@@ -391,7 +432,7 @@ do { \
13884 " jmp 2b\n" \
13885 ".previous\n" \
13886 _ASM_EXTABLE(1b, 3b) \
13887- : "=r" (err), ltype(x) \
13888+ : "=r" (err), ltype (x) \
13889 : "m" (__m(addr)), "i" (errret), "0" (err))
13890
13891 #define __get_user_size_ex(x, ptr, size) \
13892@@ -416,7 +457,7 @@ do { \
13893 } while (0)
13894
13895 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
13896- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
13897+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
13898 "2:\n" \
13899 _ASM_EXTABLE_EX(1b, 2b) \
13900 : ltype(x) : "m" (__m(addr)))
13901@@ -433,13 +474,24 @@ do { \
13902 int __gu_err; \
13903 unsigned long __gu_val; \
13904 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
13905- (x) = (__force __typeof__(*(ptr)))__gu_val; \
13906+ (x) = (__typeof__(*(ptr)))__gu_val; \
13907 __gu_err; \
13908 })
13909
13910 /* FIXME: this hack is definitely wrong -AK */
13911 struct __large_struct { unsigned long buf[100]; };
13912-#define __m(x) (*(struct __large_struct __user *)(x))
13913+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13914+#define ____m(x) \
13915+({ \
13916+ unsigned long ____x = (unsigned long)(x); \
13917+ if (____x < PAX_USER_SHADOW_BASE) \
13918+ ____x += PAX_USER_SHADOW_BASE; \
13919+ (void __user *)____x; \
13920+})
13921+#else
13922+#define ____m(x) (x)
13923+#endif
13924+#define __m(x) (*(struct __large_struct __user *)____m(x))
13925
13926 /*
13927 * Tell gcc we read from memory instead of writing: this is because
13928@@ -448,7 +500,7 @@ struct __large_struct { unsigned long buf[100]; };
13929 */
13930 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13931 asm volatile(ASM_STAC "\n" \
13932- "1: mov"itype" %"rtype"1,%2\n" \
13933+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
13934 "2: " ASM_CLAC "\n" \
13935 ".section .fixup,\"ax\"\n" \
13936 "3: mov %3,%0\n" \
13937@@ -456,10 +508,10 @@ struct __large_struct { unsigned long buf[100]; };
13938 ".previous\n" \
13939 _ASM_EXTABLE(1b, 3b) \
13940 : "=r"(err) \
13941- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
13942+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
13943
13944 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
13945- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
13946+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
13947 "2:\n" \
13948 _ASM_EXTABLE_EX(1b, 2b) \
13949 : : ltype(x), "m" (__m(addr)))
13950@@ -498,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
13951 * On error, the variable @x is set to zero.
13952 */
13953
13954+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13955+#define __get_user(x, ptr) get_user((x), (ptr))
13956+#else
13957 #define __get_user(x, ptr) \
13958 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
13959+#endif
13960
13961 /**
13962 * __put_user: - Write a simple value into user space, with less checking.
13963@@ -521,8 +577,12 @@ struct __large_struct { unsigned long buf[100]; };
13964 * Returns zero on success, or -EFAULT on error.
13965 */
13966
13967+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13968+#define __put_user(x, ptr) put_user((x), (ptr))
13969+#else
13970 #define __put_user(x, ptr) \
13971 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
13972+#endif
13973
13974 #define __get_user_unaligned __get_user
13975 #define __put_user_unaligned __put_user
13976@@ -540,7 +600,7 @@ struct __large_struct { unsigned long buf[100]; };
13977 #define get_user_ex(x, ptr) do { \
13978 unsigned long __gue_val; \
13979 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
13980- (x) = (__force __typeof__(*(ptr)))__gue_val; \
13981+ (x) = (__typeof__(*(ptr)))__gue_val; \
13982 } while (0)
13983
13984 #ifdef CONFIG_X86_WP_WORKS_OK
13985@@ -574,8 +634,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
13986 extern __must_check long strlen_user(const char __user *str);
13987 extern __must_check long strnlen_user(const char __user *str, long n);
13988
13989-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13990-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13991+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13992+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13993
13994 /*
13995 * movsl can be slow when source and dest are not both 8-byte aligned
13996diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
13997index 7f760a9..00f93c0 100644
13998--- a/arch/x86/include/asm/uaccess_32.h
13999+++ b/arch/x86/include/asm/uaccess_32.h
14000@@ -11,15 +11,15 @@
14001 #include <asm/page.h>
14002
14003 unsigned long __must_check __copy_to_user_ll
14004- (void __user *to, const void *from, unsigned long n);
14005+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
14006 unsigned long __must_check __copy_from_user_ll
14007- (void *to, const void __user *from, unsigned long n);
14008+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
14009 unsigned long __must_check __copy_from_user_ll_nozero
14010- (void *to, const void __user *from, unsigned long n);
14011+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
14012 unsigned long __must_check __copy_from_user_ll_nocache
14013- (void *to, const void __user *from, unsigned long n);
14014+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
14015 unsigned long __must_check __copy_from_user_ll_nocache_nozero
14016- (void *to, const void __user *from, unsigned long n);
14017+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
14018
14019 /**
14020 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
14021@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
14022 static __always_inline unsigned long __must_check
14023 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
14024 {
14025+ if ((long)n < 0)
14026+ return n;
14027+
14028 if (__builtin_constant_p(n)) {
14029 unsigned long ret;
14030
14031@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
14032 return ret;
14033 }
14034 }
14035+ if (!__builtin_constant_p(n))
14036+ check_object_size(from, n, true);
14037 return __copy_to_user_ll(to, from, n);
14038 }
14039
14040@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
14041 __copy_to_user(void __user *to, const void *from, unsigned long n)
14042 {
14043 might_fault();
14044+
14045 return __copy_to_user_inatomic(to, from, n);
14046 }
14047
14048 static __always_inline unsigned long
14049 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
14050 {
14051+ if ((long)n < 0)
14052+ return n;
14053+
14054 /* Avoid zeroing the tail if the copy fails..
14055 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
14056 * but as the zeroing behaviour is only significant when n is not
14057@@ -137,6 +146,10 @@ static __always_inline unsigned long
14058 __copy_from_user(void *to, const void __user *from, unsigned long n)
14059 {
14060 might_fault();
14061+
14062+ if ((long)n < 0)
14063+ return n;
14064+
14065 if (__builtin_constant_p(n)) {
14066 unsigned long ret;
14067
14068@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
14069 return ret;
14070 }
14071 }
14072+ if (!__builtin_constant_p(n))
14073+ check_object_size(to, n, false);
14074 return __copy_from_user_ll(to, from, n);
14075 }
14076
14077@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
14078 const void __user *from, unsigned long n)
14079 {
14080 might_fault();
14081+
14082+ if ((long)n < 0)
14083+ return n;
14084+
14085 if (__builtin_constant_p(n)) {
14086 unsigned long ret;
14087
14088@@ -181,15 +200,19 @@ static __always_inline unsigned long
14089 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
14090 unsigned long n)
14091 {
14092- return __copy_from_user_ll_nocache_nozero(to, from, n);
14093+ if ((long)n < 0)
14094+ return n;
14095+
14096+ return __copy_from_user_ll_nocache_nozero(to, from, n);
14097 }
14098
14099-unsigned long __must_check copy_to_user(void __user *to,
14100- const void *from, unsigned long n);
14101-unsigned long __must_check _copy_from_user(void *to,
14102- const void __user *from,
14103- unsigned long n);
14104-
14105+extern void copy_to_user_overflow(void)
14106+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14107+ __compiletime_error("copy_to_user() buffer size is not provably correct")
14108+#else
14109+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
14110+#endif
14111+;
14112
14113 extern void copy_from_user_overflow(void)
14114 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14115@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
14116 #endif
14117 ;
14118
14119-static inline unsigned long __must_check copy_from_user(void *to,
14120- const void __user *from,
14121- unsigned long n)
14122+/**
14123+ * copy_to_user: - Copy a block of data into user space.
14124+ * @to: Destination address, in user space.
14125+ * @from: Source address, in kernel space.
14126+ * @n: Number of bytes to copy.
14127+ *
14128+ * Context: User context only. This function may sleep.
14129+ *
14130+ * Copy data from kernel space to user space.
14131+ *
14132+ * Returns number of bytes that could not be copied.
14133+ * On success, this will be zero.
14134+ */
14135+static inline unsigned long __must_check
14136+copy_to_user(void __user *to, const void *from, unsigned long n)
14137 {
14138- int sz = __compiletime_object_size(to);
14139+ size_t sz = __compiletime_object_size(from);
14140
14141- if (likely(sz == -1 || sz >= n))
14142- n = _copy_from_user(to, from, n);
14143- else
14144+ if (unlikely(sz != (size_t)-1 && sz < n))
14145+ copy_to_user_overflow();
14146+ else if (access_ok(VERIFY_WRITE, to, n))
14147+ n = __copy_to_user(to, from, n);
14148+ return n;
14149+}
14150+
14151+/**
14152+ * copy_from_user: - Copy a block of data from user space.
14153+ * @to: Destination address, in kernel space.
14154+ * @from: Source address, in user space.
14155+ * @n: Number of bytes to copy.
14156+ *
14157+ * Context: User context only. This function may sleep.
14158+ *
14159+ * Copy data from user space to kernel space.
14160+ *
14161+ * Returns number of bytes that could not be copied.
14162+ * On success, this will be zero.
14163+ *
14164+ * If some data could not be copied, this function will pad the copied
14165+ * data to the requested size using zero bytes.
14166+ */
14167+static inline unsigned long __must_check
14168+copy_from_user(void *to, const void __user *from, unsigned long n)
14169+{
14170+ size_t sz = __compiletime_object_size(to);
14171+
14172+ if (unlikely(sz != (size_t)-1 && sz < n))
14173 copy_from_user_overflow();
14174-
14175+ else if (access_ok(VERIFY_READ, from, n))
14176+ n = __copy_from_user(to, from, n);
14177+ else if ((long)n > 0) {
14178+ if (!__builtin_constant_p(n))
14179+ check_object_size(to, n, false);
14180+ memset(to, 0, n);
14181+ }
14182 return n;
14183 }
14184
14185diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
14186index 142810c..4389299 100644
14187--- a/arch/x86/include/asm/uaccess_64.h
14188+++ b/arch/x86/include/asm/uaccess_64.h
14189@@ -10,6 +10,9 @@
14190 #include <asm/alternative.h>
14191 #include <asm/cpufeature.h>
14192 #include <asm/page.h>
14193+#include <asm/pgtable.h>
14194+
14195+#define set_fs(x) (current_thread_info()->addr_limit = (x))
14196
14197 /*
14198 * Copy To/From Userspace
14199@@ -17,13 +20,13 @@
14200
14201 /* Handles exceptions in both to and from, but doesn't do access_ok */
14202 __must_check unsigned long
14203-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
14204+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
14205 __must_check unsigned long
14206-copy_user_generic_string(void *to, const void *from, unsigned len);
14207+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
14208 __must_check unsigned long
14209-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
14210+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
14211
14212-static __always_inline __must_check unsigned long
14213+static __always_inline __must_check __size_overflow(3) unsigned long
14214 copy_user_generic(void *to, const void *from, unsigned len)
14215 {
14216 unsigned ret;
14217@@ -41,142 +44,205 @@ copy_user_generic(void *to, const void *from, unsigned len)
14218 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
14219 "=d" (len)),
14220 "1" (to), "2" (from), "3" (len)
14221- : "memory", "rcx", "r8", "r9", "r10", "r11");
14222+ : "memory", "rcx", "r8", "r9", "r11");
14223 return ret;
14224 }
14225
14226+static __always_inline __must_check unsigned long
14227+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
14228+static __always_inline __must_check unsigned long
14229+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
14230 __must_check unsigned long
14231-_copy_to_user(void __user *to, const void *from, unsigned len);
14232-__must_check unsigned long
14233-_copy_from_user(void *to, const void __user *from, unsigned len);
14234-__must_check unsigned long
14235-copy_in_user(void __user *to, const void __user *from, unsigned len);
14236+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
14237+
14238+extern void copy_to_user_overflow(void)
14239+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14240+ __compiletime_error("copy_to_user() buffer size is not provably correct")
14241+#else
14242+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
14243+#endif
14244+;
14245+
14246+extern void copy_from_user_overflow(void)
14247+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14248+ __compiletime_error("copy_from_user() buffer size is not provably correct")
14249+#else
14250+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
14251+#endif
14252+;
14253
14254 static inline unsigned long __must_check copy_from_user(void *to,
14255 const void __user *from,
14256 unsigned long n)
14257 {
14258- int sz = __compiletime_object_size(to);
14259-
14260 might_fault();
14261- if (likely(sz == -1 || sz >= n))
14262- n = _copy_from_user(to, from, n);
14263-#ifdef CONFIG_DEBUG_VM
14264- else
14265- WARN(1, "Buffer overflow detected!\n");
14266-#endif
14267+
14268+ if (access_ok(VERIFY_READ, from, n))
14269+ n = __copy_from_user(to, from, n);
14270+ else if (n < INT_MAX) {
14271+ if (!__builtin_constant_p(n))
14272+ check_object_size(to, n, false);
14273+ memset(to, 0, n);
14274+ }
14275 return n;
14276 }
14277
14278 static __always_inline __must_check
14279-int copy_to_user(void __user *dst, const void *src, unsigned size)
14280+int copy_to_user(void __user *dst, const void *src, unsigned long size)
14281 {
14282 might_fault();
14283
14284- return _copy_to_user(dst, src, size);
14285+ if (access_ok(VERIFY_WRITE, dst, size))
14286+ size = __copy_to_user(dst, src, size);
14287+ return size;
14288 }
14289
14290 static __always_inline __must_check
14291-int __copy_from_user(void *dst, const void __user *src, unsigned size)
14292+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
14293 {
14294- int ret = 0;
14295+ size_t sz = __compiletime_object_size(dst);
14296+ unsigned ret = 0;
14297
14298 might_fault();
14299- if (!__builtin_constant_p(size))
14300- return copy_user_generic(dst, (__force void *)src, size);
14301+
14302+ if (size > INT_MAX)
14303+ return size;
14304+
14305+#ifdef CONFIG_PAX_MEMORY_UDEREF
14306+ if (!__access_ok(VERIFY_READ, src, size))
14307+ return size;
14308+#endif
14309+
14310+ if (unlikely(sz != (size_t)-1 && sz < size)) {
14311+ copy_from_user_overflow();
14312+ return size;
14313+ }
14314+
14315+ if (!__builtin_constant_p(size)) {
14316+ check_object_size(dst, size, false);
14317+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14318+ }
14319 switch (size) {
14320- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
14321+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
14322 ret, "b", "b", "=q", 1);
14323 return ret;
14324- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
14325+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
14326 ret, "w", "w", "=r", 2);
14327 return ret;
14328- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
14329+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
14330 ret, "l", "k", "=r", 4);
14331 return ret;
14332- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
14333+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14334 ret, "q", "", "=r", 8);
14335 return ret;
14336 case 10:
14337- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
14338+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14339 ret, "q", "", "=r", 10);
14340 if (unlikely(ret))
14341 return ret;
14342 __get_user_asm(*(u16 *)(8 + (char *)dst),
14343- (u16 __user *)(8 + (char __user *)src),
14344+ (const u16 __user *)(8 + (const char __user *)src),
14345 ret, "w", "w", "=r", 2);
14346 return ret;
14347 case 16:
14348- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
14349+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14350 ret, "q", "", "=r", 16);
14351 if (unlikely(ret))
14352 return ret;
14353 __get_user_asm(*(u64 *)(8 + (char *)dst),
14354- (u64 __user *)(8 + (char __user *)src),
14355+ (const u64 __user *)(8 + (const char __user *)src),
14356 ret, "q", "", "=r", 8);
14357 return ret;
14358 default:
14359- return copy_user_generic(dst, (__force void *)src, size);
14360+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14361 }
14362 }
14363
14364 static __always_inline __must_check
14365-int __copy_to_user(void __user *dst, const void *src, unsigned size)
14366+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
14367 {
14368- int ret = 0;
14369+ size_t sz = __compiletime_object_size(src);
14370+ unsigned ret = 0;
14371
14372 might_fault();
14373- if (!__builtin_constant_p(size))
14374- return copy_user_generic((__force void *)dst, src, size);
14375+
14376+ if (size > INT_MAX)
14377+ return size;
14378+
14379+#ifdef CONFIG_PAX_MEMORY_UDEREF
14380+ if (!__access_ok(VERIFY_WRITE, dst, size))
14381+ return size;
14382+#endif
14383+
14384+ if (unlikely(sz != (size_t)-1 && sz < size)) {
14385+ copy_to_user_overflow();
14386+ return size;
14387+ }
14388+
14389+ if (!__builtin_constant_p(size)) {
14390+ check_object_size(src, size, true);
14391+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14392+ }
14393 switch (size) {
14394- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
14395+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
14396 ret, "b", "b", "iq", 1);
14397 return ret;
14398- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
14399+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
14400 ret, "w", "w", "ir", 2);
14401 return ret;
14402- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
14403+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
14404 ret, "l", "k", "ir", 4);
14405 return ret;
14406- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
14407+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14408 ret, "q", "", "er", 8);
14409 return ret;
14410 case 10:
14411- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
14412+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14413 ret, "q", "", "er", 10);
14414 if (unlikely(ret))
14415 return ret;
14416 asm("":::"memory");
14417- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
14418+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
14419 ret, "w", "w", "ir", 2);
14420 return ret;
14421 case 16:
14422- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
14423+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14424 ret, "q", "", "er", 16);
14425 if (unlikely(ret))
14426 return ret;
14427 asm("":::"memory");
14428- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
14429+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
14430 ret, "q", "", "er", 8);
14431 return ret;
14432 default:
14433- return copy_user_generic((__force void *)dst, src, size);
14434+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14435 }
14436 }
14437
14438 static __always_inline __must_check
14439-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14440+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
14441 {
14442- int ret = 0;
14443+ unsigned ret = 0;
14444
14445 might_fault();
14446+
14447+ if (size > INT_MAX)
14448+ return size;
14449+
14450+#ifdef CONFIG_PAX_MEMORY_UDEREF
14451+ if (!__access_ok(VERIFY_READ, src, size))
14452+ return size;
14453+ if (!__access_ok(VERIFY_WRITE, dst, size))
14454+ return size;
14455+#endif
14456+
14457 if (!__builtin_constant_p(size))
14458- return copy_user_generic((__force void *)dst,
14459- (__force void *)src, size);
14460+ return copy_user_generic((__force_kernel void *)____m(dst),
14461+ (__force_kernel const void *)____m(src), size);
14462 switch (size) {
14463 case 1: {
14464 u8 tmp;
14465- __get_user_asm(tmp, (u8 __user *)src,
14466+ __get_user_asm(tmp, (const u8 __user *)src,
14467 ret, "b", "b", "=q", 1);
14468 if (likely(!ret))
14469 __put_user_asm(tmp, (u8 __user *)dst,
14470@@ -185,7 +251,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14471 }
14472 case 2: {
14473 u16 tmp;
14474- __get_user_asm(tmp, (u16 __user *)src,
14475+ __get_user_asm(tmp, (const u16 __user *)src,
14476 ret, "w", "w", "=r", 2);
14477 if (likely(!ret))
14478 __put_user_asm(tmp, (u16 __user *)dst,
14479@@ -195,7 +261,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14480
14481 case 4: {
14482 u32 tmp;
14483- __get_user_asm(tmp, (u32 __user *)src,
14484+ __get_user_asm(tmp, (const u32 __user *)src,
14485 ret, "l", "k", "=r", 4);
14486 if (likely(!ret))
14487 __put_user_asm(tmp, (u32 __user *)dst,
14488@@ -204,7 +270,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14489 }
14490 case 8: {
14491 u64 tmp;
14492- __get_user_asm(tmp, (u64 __user *)src,
14493+ __get_user_asm(tmp, (const u64 __user *)src,
14494 ret, "q", "", "=r", 8);
14495 if (likely(!ret))
14496 __put_user_asm(tmp, (u64 __user *)dst,
14497@@ -212,41 +278,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14498 return ret;
14499 }
14500 default:
14501- return copy_user_generic((__force void *)dst,
14502- (__force void *)src, size);
14503+ return copy_user_generic((__force_kernel void *)____m(dst),
14504+ (__force_kernel const void *)____m(src), size);
14505 }
14506 }
14507
14508 static __must_check __always_inline int
14509-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
14510+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
14511 {
14512- return copy_user_generic(dst, (__force const void *)src, size);
14513+ if (size > INT_MAX)
14514+ return size;
14515+
14516+#ifdef CONFIG_PAX_MEMORY_UDEREF
14517+ if (!__access_ok(VERIFY_READ, src, size))
14518+ return size;
14519+#endif
14520+
14521+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14522 }
14523
14524-static __must_check __always_inline int
14525-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
14526+static __must_check __always_inline unsigned long
14527+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
14528 {
14529- return copy_user_generic((__force void *)dst, src, size);
14530+ if (size > INT_MAX)
14531+ return size;
14532+
14533+#ifdef CONFIG_PAX_MEMORY_UDEREF
14534+ if (!__access_ok(VERIFY_WRITE, dst, size))
14535+ return size;
14536+#endif
14537+
14538+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14539 }
14540
14541-extern long __copy_user_nocache(void *dst, const void __user *src,
14542- unsigned size, int zerorest);
14543+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
14544+ unsigned long size, int zerorest) __size_overflow(3);
14545
14546-static inline int
14547-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
14548+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
14549 {
14550 might_sleep();
14551+
14552+ if (size > INT_MAX)
14553+ return size;
14554+
14555+#ifdef CONFIG_PAX_MEMORY_UDEREF
14556+ if (!__access_ok(VERIFY_READ, src, size))
14557+ return size;
14558+#endif
14559+
14560 return __copy_user_nocache(dst, src, size, 1);
14561 }
14562
14563-static inline int
14564-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14565- unsigned size)
14566+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14567+ unsigned long size)
14568 {
14569+ if (size > INT_MAX)
14570+ return size;
14571+
14572+#ifdef CONFIG_PAX_MEMORY_UDEREF
14573+ if (!__access_ok(VERIFY_READ, src, size))
14574+ return size;
14575+#endif
14576+
14577 return __copy_user_nocache(dst, src, size, 0);
14578 }
14579
14580-unsigned long
14581-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
14582+extern unsigned long
14583+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
14584
14585 #endif /* _ASM_X86_UACCESS_64_H */
14586diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
14587index 5b238981..77fdd78 100644
14588--- a/arch/x86/include/asm/word-at-a-time.h
14589+++ b/arch/x86/include/asm/word-at-a-time.h
14590@@ -11,7 +11,7 @@
14591 * and shift, for example.
14592 */
14593 struct word_at_a_time {
14594- const unsigned long one_bits, high_bits;
14595+ unsigned long one_bits, high_bits;
14596 };
14597
14598 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
14599diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
14600index 0415cda..b43d877 100644
14601--- a/arch/x86/include/asm/xsave.h
14602+++ b/arch/x86/include/asm/xsave.h
14603@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14604 return -EFAULT;
14605
14606 __asm__ __volatile__(ASM_STAC "\n"
14607- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
14608+ "1:"
14609+ __copyuser_seg
14610+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
14611 "2: " ASM_CLAC "\n"
14612 ".section .fixup,\"ax\"\n"
14613 "3: movl $-1,%[err]\n"
14614@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14615 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
14616 {
14617 int err;
14618- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
14619+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
14620 u32 lmask = mask;
14621 u32 hmask = mask >> 32;
14622
14623 __asm__ __volatile__(ASM_STAC "\n"
14624- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14625+ "1:"
14626+ __copyuser_seg
14627+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14628 "2: " ASM_CLAC "\n"
14629 ".section .fixup,\"ax\"\n"
14630 "3: movl $-1,%[err]\n"
14631diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
14632index 91ce48f..a48ea05 100644
14633--- a/arch/x86/kernel/Makefile
14634+++ b/arch/x86/kernel/Makefile
14635@@ -23,7 +23,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
14636 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
14637 obj-$(CONFIG_IRQ_WORK) += irq_work.o
14638 obj-y += probe_roms.o
14639-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
14640+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
14641 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
14642 obj-y += syscall_$(BITS).o
14643 obj-$(CONFIG_X86_64) += vsyscall_64.o
14644diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
14645index e651f7a..c995dc4 100644
14646--- a/arch/x86/kernel/acpi/boot.c
14647+++ b/arch/x86/kernel/acpi/boot.c
14648@@ -1576,7 +1576,7 @@ int __init acpi_boot_init(void)
14649 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
14650
14651 if (!acpi_noirq)
14652- x86_init.pci.init = pci_acpi_init;
14653+ *(void **)&x86_init.pci.init = pci_acpi_init;
14654
14655 return 0;
14656 }
14657diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
14658index 11676cf..a8cf3ec 100644
14659--- a/arch/x86/kernel/acpi/sleep.c
14660+++ b/arch/x86/kernel/acpi/sleep.c
14661@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
14662 #else /* CONFIG_64BIT */
14663 #ifdef CONFIG_SMP
14664 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
14665+
14666+ pax_open_kernel();
14667 early_gdt_descr.address =
14668 (unsigned long)get_cpu_gdt_table(smp_processor_id());
14669+ pax_close_kernel();
14670+
14671 initial_gs = per_cpu_offset(smp_processor_id());
14672 #endif
14673 initial_code = (unsigned long)wakeup_long64;
14674diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
14675index 13ab720..95d5442 100644
14676--- a/arch/x86/kernel/acpi/wakeup_32.S
14677+++ b/arch/x86/kernel/acpi/wakeup_32.S
14678@@ -30,13 +30,11 @@ wakeup_pmode_return:
14679 # and restore the stack ... but you need gdt for this to work
14680 movl saved_context_esp, %esp
14681
14682- movl %cs:saved_magic, %eax
14683- cmpl $0x12345678, %eax
14684+ cmpl $0x12345678, saved_magic
14685 jne bogus_magic
14686
14687 # jump to place where we left off
14688- movl saved_eip, %eax
14689- jmp *%eax
14690+ jmp *(saved_eip)
14691
14692 bogus_magic:
14693 jmp bogus_magic
14694diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
14695index ef5ccca..bd83949 100644
14696--- a/arch/x86/kernel/alternative.c
14697+++ b/arch/x86/kernel/alternative.c
14698@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
14699 */
14700 for (a = start; a < end; a++) {
14701 instr = (u8 *)&a->instr_offset + a->instr_offset;
14702+
14703+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14704+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14705+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
14706+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14707+#endif
14708+
14709 replacement = (u8 *)&a->repl_offset + a->repl_offset;
14710 BUG_ON(a->replacementlen > a->instrlen);
14711 BUG_ON(a->instrlen > sizeof(insnbuf));
14712@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
14713 for (poff = start; poff < end; poff++) {
14714 u8 *ptr = (u8 *)poff + *poff;
14715
14716+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14717+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14718+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
14719+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14720+#endif
14721+
14722 if (!*poff || ptr < text || ptr >= text_end)
14723 continue;
14724 /* turn DS segment override prefix into lock prefix */
14725- if (*ptr == 0x3e)
14726+ if (*ktla_ktva(ptr) == 0x3e)
14727 text_poke(ptr, ((unsigned char []){0xf0}), 1);
14728 }
14729 mutex_unlock(&text_mutex);
14730@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
14731 for (poff = start; poff < end; poff++) {
14732 u8 *ptr = (u8 *)poff + *poff;
14733
14734+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14735+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14736+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
14737+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14738+#endif
14739+
14740 if (!*poff || ptr < text || ptr >= text_end)
14741 continue;
14742 /* turn lock prefix into DS segment override prefix */
14743- if (*ptr == 0xf0)
14744+ if (*ktla_ktva(ptr) == 0xf0)
14745 text_poke(ptr, ((unsigned char []){0x3E}), 1);
14746 }
14747 mutex_unlock(&text_mutex);
14748@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
14749
14750 BUG_ON(p->len > MAX_PATCH_LEN);
14751 /* prep the buffer with the original instructions */
14752- memcpy(insnbuf, p->instr, p->len);
14753+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
14754 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
14755 (unsigned long)p->instr, p->len);
14756
14757@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
14758 if (!uniproc_patched || num_possible_cpus() == 1)
14759 free_init_pages("SMP alternatives",
14760 (unsigned long)__smp_locks,
14761- (unsigned long)__smp_locks_end);
14762+ PAGE_ALIGN((unsigned long)__smp_locks_end));
14763 #endif
14764
14765 apply_paravirt(__parainstructions, __parainstructions_end);
14766@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
14767 * instructions. And on the local CPU you need to be protected again NMI or MCE
14768 * handlers seeing an inconsistent instruction while you patch.
14769 */
14770-void *__init_or_module text_poke_early(void *addr, const void *opcode,
14771+void *__kprobes text_poke_early(void *addr, const void *opcode,
14772 size_t len)
14773 {
14774 unsigned long flags;
14775 local_irq_save(flags);
14776- memcpy(addr, opcode, len);
14777+
14778+ pax_open_kernel();
14779+ memcpy(ktla_ktva(addr), opcode, len);
14780 sync_core();
14781+ pax_close_kernel();
14782+
14783 local_irq_restore(flags);
14784 /* Could also do a CLFLUSH here to speed up CPU recovery; but
14785 that causes hangs on some VIA CPUs. */
14786@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
14787 */
14788 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
14789 {
14790- unsigned long flags;
14791- char *vaddr;
14792+ unsigned char *vaddr = ktla_ktva(addr);
14793 struct page *pages[2];
14794- int i;
14795+ size_t i;
14796
14797 if (!core_kernel_text((unsigned long)addr)) {
14798- pages[0] = vmalloc_to_page(addr);
14799- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
14800+ pages[0] = vmalloc_to_page(vaddr);
14801+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
14802 } else {
14803- pages[0] = virt_to_page(addr);
14804+ pages[0] = virt_to_page(vaddr);
14805 WARN_ON(!PageReserved(pages[0]));
14806- pages[1] = virt_to_page(addr + PAGE_SIZE);
14807+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
14808 }
14809 BUG_ON(!pages[0]);
14810- local_irq_save(flags);
14811- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
14812- if (pages[1])
14813- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
14814- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
14815- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
14816- clear_fixmap(FIX_TEXT_POKE0);
14817- if (pages[1])
14818- clear_fixmap(FIX_TEXT_POKE1);
14819- local_flush_tlb();
14820- sync_core();
14821- /* Could also do a CLFLUSH here to speed up CPU recovery; but
14822- that causes hangs on some VIA CPUs. */
14823+ text_poke_early(addr, opcode, len);
14824 for (i = 0; i < len; i++)
14825- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
14826- local_irq_restore(flags);
14827+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
14828 return addr;
14829 }
14830
14831diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
14832index e663112..21938a3 100644
14833--- a/arch/x86/kernel/amd_gart_64.c
14834+++ b/arch/x86/kernel/amd_gart_64.c
14835@@ -851,7 +851,7 @@ int __init gart_iommu_init(void)
14836
14837 flush_gart();
14838 dma_ops = &gart_dma_ops;
14839- x86_platform.iommu_shutdown = gart_iommu_shutdown;
14840+ *(void **)&x86_platform.iommu_shutdown = gart_iommu_shutdown;
14841 swiotlb = 0;
14842
14843 return 0;
14844diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
14845index d5fd66f..6119b16 100644
14846--- a/arch/x86/kernel/aperture_64.c
14847+++ b/arch/x86/kernel/aperture_64.c
14848@@ -390,7 +390,7 @@ int __init gart_iommu_hole_init(void)
14849
14850 iommu_detected = 1;
14851 gart_iommu_aperture = 1;
14852- x86_init.iommu.iommu_init = gart_iommu_init;
14853+ *(void **)&x86_init.iommu.iommu_init = gart_iommu_init;
14854
14855 ctl = read_pci_config(bus, slot, 3,
14856 AMD64_GARTAPERTURECTL);
14857diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
14858index b17416e..be6e5dc 100644
14859--- a/arch/x86/kernel/apic/apic.c
14860+++ b/arch/x86/kernel/apic/apic.c
14861@@ -185,7 +185,7 @@ int first_system_vector = 0xfe;
14862 /*
14863 * Debug level, exported for io_apic.c
14864 */
14865-unsigned int apic_verbosity;
14866+int apic_verbosity;
14867
14868 int pic_mode;
14869
14870@@ -1923,7 +1923,7 @@ void smp_error_interrupt(struct pt_regs *regs)
14871 apic_write(APIC_ESR, 0);
14872 v1 = apic_read(APIC_ESR);
14873 ack_APIC_irq();
14874- atomic_inc(&irq_err_count);
14875+ atomic_inc_unchecked(&irq_err_count);
14876
14877 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
14878 smp_processor_id(), v0 , v1);
14879@@ -2155,7 +2155,9 @@ void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
14880 for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
14881 /* Should happen once for each apic */
14882 WARN_ON((*drv)->eoi_write == eoi_write);
14883- (*drv)->eoi_write = eoi_write;
14884+ pax_open_kernel();
14885+ *(void **)&(*drv)->eoi_write = eoi_write;
14886+ pax_close_kernel();
14887 }
14888 }
14889
14890diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
14891index a65829a..6ddc249 100644
14892--- a/arch/x86/kernel/apic/apic_numachip.c
14893+++ b/arch/x86/kernel/apic/apic_numachip.c
14894@@ -178,7 +178,7 @@ static int __init numachip_system_init(void)
14895 if (!numachip_system)
14896 return 0;
14897
14898- x86_cpuinit.fixup_cpu_id = fixup_cpu_id;
14899+ *(void **)&x86_cpuinit.fixup_cpu_id = fixup_cpu_id;
14900
14901 map_csrs();
14902
14903diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
14904index 1817fa9..7bff097 100644
14905--- a/arch/x86/kernel/apic/io_apic.c
14906+++ b/arch/x86/kernel/apic/io_apic.c
14907@@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
14908 }
14909 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14910
14911-void lock_vector_lock(void)
14912+void lock_vector_lock(void) __acquires(vector_lock)
14913 {
14914 /* Used to the online set of cpus does not change
14915 * during assign_irq_vector.
14916@@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
14917 raw_spin_lock(&vector_lock);
14918 }
14919
14920-void unlock_vector_lock(void)
14921+void unlock_vector_lock(void) __releases(vector_lock)
14922 {
14923 raw_spin_unlock(&vector_lock);
14924 }
14925@@ -2411,7 +2411,7 @@ static void ack_apic_edge(struct irq_data *data)
14926 ack_APIC_irq();
14927 }
14928
14929-atomic_t irq_mis_count;
14930+atomic_unchecked_t irq_mis_count;
14931
14932 #ifdef CONFIG_GENERIC_PENDING_IRQ
14933 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
14934@@ -2552,7 +2552,7 @@ static void ack_apic_level(struct irq_data *data)
14935 * at the cpu.
14936 */
14937 if (!(v & (1 << (i & 0x1f)))) {
14938- atomic_inc(&irq_mis_count);
14939+ atomic_inc_unchecked(&irq_mis_count);
14940
14941 eoi_ioapic_irq(irq, cfg);
14942 }
14943diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
14944index d661ee9..512c0a1 100644
14945--- a/arch/x86/kernel/apic/numaq_32.c
14946+++ b/arch/x86/kernel/apic/numaq_32.c
14947@@ -257,14 +257,14 @@ static __init void early_check_numaq(void)
14948 early_get_smp_config();
14949
14950 if (found_numaq) {
14951- x86_init.mpparse.mpc_record = numaq_mpc_record;
14952- x86_init.mpparse.setup_ioapic_ids = x86_init_noop;
14953- x86_init.mpparse.mpc_apic_id = mpc_apic_id;
14954- x86_init.mpparse.smp_read_mpc_oem = smp_read_mpc_oem;
14955- x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
14956- x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
14957- x86_init.timers.tsc_pre_init = numaq_tsc_init;
14958- x86_init.pci.init = pci_numaq_init;
14959+ *(void **)&x86_init.mpparse.mpc_record = numaq_mpc_record;
14960+ *(void **)&x86_init.mpparse.setup_ioapic_ids = x86_init_noop;
14961+ *(void **)&x86_init.mpparse.mpc_apic_id = mpc_apic_id;
14962+ *(void **)&x86_init.mpparse.smp_read_mpc_oem = smp_read_mpc_oem;
14963+ *(void **)&x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
14964+ *(void **)&x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
14965+ *(void **)&x86_init.timers.tsc_pre_init = numaq_tsc_init;
14966+ *(void **)&x86_init.pci.init = pci_numaq_init;
14967 }
14968 }
14969
14970diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
14971index 8cfade9..8ea7b51 100644
14972--- a/arch/x86/kernel/apic/x2apic_uv_x.c
14973+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
14974@@ -139,8 +139,8 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
14975 is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE;
14976 pnodeid = early_get_pnodeid();
14977 early_get_apic_pnode_shift();
14978- x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
14979- x86_platform.nmi_init = uv_nmi_init;
14980+ *(void **)&x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
14981+ *(void **)&x86_platform.nmi_init = uv_nmi_init;
14982 if (!strcmp(oem_table_id, "UVL"))
14983 uv_system_type = UV_LEGACY_APIC;
14984 else if (!strcmp(oem_table_id, "UVX"))
14985diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
14986index d65464e..1035d31 100644
14987--- a/arch/x86/kernel/apm_32.c
14988+++ b/arch/x86/kernel/apm_32.c
14989@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
14990 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14991 * even though they are called in protected mode.
14992 */
14993-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14994+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14995 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14996
14997 static const char driver_version[] = "1.16ac"; /* no spaces */
14998@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
14999 BUG_ON(cpu != 0);
15000 gdt = get_cpu_gdt_table(cpu);
15001 save_desc_40 = gdt[0x40 / 8];
15002+
15003+ pax_open_kernel();
15004 gdt[0x40 / 8] = bad_bios_desc;
15005+ pax_close_kernel();
15006
15007 apm_irq_save(flags);
15008 APM_DO_SAVE_SEGS;
15009@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
15010 &call->esi);
15011 APM_DO_RESTORE_SEGS;
15012 apm_irq_restore(flags);
15013+
15014+ pax_open_kernel();
15015 gdt[0x40 / 8] = save_desc_40;
15016+ pax_close_kernel();
15017+
15018 put_cpu();
15019
15020 return call->eax & 0xff;
15021@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
15022 BUG_ON(cpu != 0);
15023 gdt = get_cpu_gdt_table(cpu);
15024 save_desc_40 = gdt[0x40 / 8];
15025+
15026+ pax_open_kernel();
15027 gdt[0x40 / 8] = bad_bios_desc;
15028+ pax_close_kernel();
15029
15030 apm_irq_save(flags);
15031 APM_DO_SAVE_SEGS;
15032@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
15033 &call->eax);
15034 APM_DO_RESTORE_SEGS;
15035 apm_irq_restore(flags);
15036+
15037+ pax_open_kernel();
15038 gdt[0x40 / 8] = save_desc_40;
15039+ pax_close_kernel();
15040+
15041 put_cpu();
15042 return error;
15043 }
15044@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
15045 * code to that CPU.
15046 */
15047 gdt = get_cpu_gdt_table(0);
15048+
15049+ pax_open_kernel();
15050 set_desc_base(&gdt[APM_CS >> 3],
15051 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
15052 set_desc_base(&gdt[APM_CS_16 >> 3],
15053 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
15054 set_desc_base(&gdt[APM_DS >> 3],
15055 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
15056+ pax_close_kernel();
15057
15058 proc_create("apm", 0, NULL, &apm_file_ops);
15059
15060diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
15061index 2861082..6d4718e 100644
15062--- a/arch/x86/kernel/asm-offsets.c
15063+++ b/arch/x86/kernel/asm-offsets.c
15064@@ -33,6 +33,8 @@ void common(void) {
15065 OFFSET(TI_status, thread_info, status);
15066 OFFSET(TI_addr_limit, thread_info, addr_limit);
15067 OFFSET(TI_preempt_count, thread_info, preempt_count);
15068+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
15069+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
15070
15071 BLANK();
15072 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
15073@@ -53,8 +55,26 @@ void common(void) {
15074 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
15075 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
15076 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
15077+
15078+#ifdef CONFIG_PAX_KERNEXEC
15079+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
15080 #endif
15081
15082+#ifdef CONFIG_PAX_MEMORY_UDEREF
15083+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
15084+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
15085+#ifdef CONFIG_X86_64
15086+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
15087+#endif
15088+#endif
15089+
15090+#endif
15091+
15092+ BLANK();
15093+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
15094+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
15095+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
15096+
15097 #ifdef CONFIG_XEN
15098 BLANK();
15099 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
15100diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
15101index 1b4754f..fbb4227 100644
15102--- a/arch/x86/kernel/asm-offsets_64.c
15103+++ b/arch/x86/kernel/asm-offsets_64.c
15104@@ -76,6 +76,7 @@ int main(void)
15105 BLANK();
15106 #undef ENTRY
15107
15108+ DEFINE(TSS_size, sizeof(struct tss_struct));
15109 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
15110 BLANK();
15111
15112diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
15113index a0e067d..9c7db16 100644
15114--- a/arch/x86/kernel/cpu/Makefile
15115+++ b/arch/x86/kernel/cpu/Makefile
15116@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
15117 CFLAGS_REMOVE_perf_event.o = -pg
15118 endif
15119
15120-# Make sure load_percpu_segment has no stackprotector
15121-nostackp := $(call cc-option, -fno-stack-protector)
15122-CFLAGS_common.o := $(nostackp)
15123-
15124 obj-y := intel_cacheinfo.o scattered.o topology.o
15125 obj-y += proc.o capflags.o powerflags.o common.o
15126 obj-y += vmware.o hypervisor.o mshyperv.o
15127diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
15128index 1b7d165..b9e2627 100644
15129--- a/arch/x86/kernel/cpu/amd.c
15130+++ b/arch/x86/kernel/cpu/amd.c
15131@@ -738,7 +738,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
15132 unsigned int size)
15133 {
15134 /* AMD errata T13 (order #21922) */
15135- if ((c->x86 == 6)) {
15136+ if (c->x86 == 6) {
15137 /* Duron Rev A0 */
15138 if (c->x86_model == 3 && c->x86_mask == 0)
15139 size = 64;
15140diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
15141index 7505f7b..d59dac0 100644
15142--- a/arch/x86/kernel/cpu/common.c
15143+++ b/arch/x86/kernel/cpu/common.c
15144@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
15145
15146 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
15147
15148-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
15149-#ifdef CONFIG_X86_64
15150- /*
15151- * We need valid kernel segments for data and code in long mode too
15152- * IRET will check the segment types kkeil 2000/10/28
15153- * Also sysret mandates a special GDT layout
15154- *
15155- * TLS descriptors are currently at a different place compared to i386.
15156- * Hopefully nobody expects them at a fixed place (Wine?)
15157- */
15158- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
15159- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
15160- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
15161- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
15162- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
15163- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
15164-#else
15165- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
15166- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
15167- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
15168- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
15169- /*
15170- * Segments used for calling PnP BIOS have byte granularity.
15171- * They code segments and data segments have fixed 64k limits,
15172- * the transfer segment sizes are set at run time.
15173- */
15174- /* 32-bit code */
15175- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
15176- /* 16-bit code */
15177- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
15178- /* 16-bit data */
15179- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
15180- /* 16-bit data */
15181- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
15182- /* 16-bit data */
15183- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
15184- /*
15185- * The APM segments have byte granularity and their bases
15186- * are set at run time. All have 64k limits.
15187- */
15188- /* 32-bit code */
15189- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
15190- /* 16-bit code */
15191- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
15192- /* data */
15193- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
15194-
15195- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
15196- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
15197- GDT_STACK_CANARY_INIT
15198-#endif
15199-} };
15200-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
15201-
15202 static int __init x86_xsave_setup(char *s)
15203 {
15204 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
15205@@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
15206 {
15207 struct desc_ptr gdt_descr;
15208
15209- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
15210+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15211 gdt_descr.size = GDT_SIZE - 1;
15212 load_gdt(&gdt_descr);
15213 /* Reload the per-cpu base */
15214@@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
15215 /* Filter out anything that depends on CPUID levels we don't have */
15216 filter_cpuid_features(c, true);
15217
15218+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15219+ setup_clear_cpu_cap(X86_FEATURE_SEP);
15220+#endif
15221+
15222 /* If the model name is still unset, do table lookup. */
15223 if (!c->x86_model_id[0]) {
15224 const char *p;
15225@@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
15226 }
15227 __setup("clearcpuid=", setup_disablecpuid);
15228
15229+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
15230+EXPORT_PER_CPU_SYMBOL(current_tinfo);
15231+
15232 #ifdef CONFIG_X86_64
15233 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
15234-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
15235- (unsigned long) nmi_idt_table };
15236+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
15237
15238 DEFINE_PER_CPU_FIRST(union irq_stack_union,
15239 irq_stack_union) __aligned(PAGE_SIZE);
15240@@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
15241 EXPORT_PER_CPU_SYMBOL(current_task);
15242
15243 DEFINE_PER_CPU(unsigned long, kernel_stack) =
15244- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
15245+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
15246 EXPORT_PER_CPU_SYMBOL(kernel_stack);
15247
15248 DEFINE_PER_CPU(char *, irq_stack_ptr) =
15249@@ -1178,7 +1130,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
15250 {
15251 memset(regs, 0, sizeof(struct pt_regs));
15252 regs->fs = __KERNEL_PERCPU;
15253- regs->gs = __KERNEL_STACK_CANARY;
15254+ savesegment(gs, regs->gs);
15255
15256 return regs;
15257 }
15258@@ -1233,7 +1185,7 @@ void __cpuinit cpu_init(void)
15259 int i;
15260
15261 cpu = stack_smp_processor_id();
15262- t = &per_cpu(init_tss, cpu);
15263+ t = init_tss + cpu;
15264 oist = &per_cpu(orig_ist, cpu);
15265
15266 #ifdef CONFIG_NUMA
15267@@ -1259,7 +1211,7 @@ void __cpuinit cpu_init(void)
15268 switch_to_new_gdt(cpu);
15269 loadsegment(fs, 0);
15270
15271- load_idt((const struct desc_ptr *)&idt_descr);
15272+ load_idt(&idt_descr);
15273
15274 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
15275 syscall_init();
15276@@ -1268,7 +1220,6 @@ void __cpuinit cpu_init(void)
15277 wrmsrl(MSR_KERNEL_GS_BASE, 0);
15278 barrier();
15279
15280- x86_configure_nx();
15281 if (cpu != 0)
15282 enable_x2apic();
15283
15284@@ -1321,7 +1272,7 @@ void __cpuinit cpu_init(void)
15285 {
15286 int cpu = smp_processor_id();
15287 struct task_struct *curr = current;
15288- struct tss_struct *t = &per_cpu(init_tss, cpu);
15289+ struct tss_struct *t = init_tss + cpu;
15290 struct thread_struct *thread = &curr->thread;
15291
15292 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
15293diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
15294index 198e019..867575e 100644
15295--- a/arch/x86/kernel/cpu/intel.c
15296+++ b/arch/x86/kernel/cpu/intel.c
15297@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
15298 * Update the IDT descriptor and reload the IDT so that
15299 * it uses the read-only mapped virtual address.
15300 */
15301- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
15302+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
15303 load_idt(&idt_descr);
15304 }
15305 #endif
15306diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
15307index 93c5451..3887433 100644
15308--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
15309+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
15310@@ -983,6 +983,22 @@ static struct attribute *default_attrs[] = {
15311 };
15312
15313 #ifdef CONFIG_AMD_NB
15314+static struct attribute *default_attrs_amd_nb[] = {
15315+ &type.attr,
15316+ &level.attr,
15317+ &coherency_line_size.attr,
15318+ &physical_line_partition.attr,
15319+ &ways_of_associativity.attr,
15320+ &number_of_sets.attr,
15321+ &size.attr,
15322+ &shared_cpu_map.attr,
15323+ &shared_cpu_list.attr,
15324+ NULL,
15325+ NULL,
15326+ NULL,
15327+ NULL
15328+};
15329+
15330 static struct attribute ** __cpuinit amd_l3_attrs(void)
15331 {
15332 static struct attribute **attrs;
15333@@ -993,18 +1009,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
15334
15335 n = ARRAY_SIZE(default_attrs);
15336
15337- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
15338- n += 2;
15339-
15340- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
15341- n += 1;
15342-
15343- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
15344- if (attrs == NULL)
15345- return attrs = default_attrs;
15346-
15347- for (n = 0; default_attrs[n]; n++)
15348- attrs[n] = default_attrs[n];
15349+ attrs = default_attrs_amd_nb;
15350
15351 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
15352 attrs[n++] = &cache_disable_0.attr;
15353@@ -1055,6 +1060,13 @@ static struct kobj_type ktype_cache = {
15354 .default_attrs = default_attrs,
15355 };
15356
15357+#ifdef CONFIG_AMD_NB
15358+static struct kobj_type ktype_cache_amd_nb = {
15359+ .sysfs_ops = &sysfs_ops,
15360+ .default_attrs = default_attrs_amd_nb,
15361+};
15362+#endif
15363+
15364 static struct kobj_type ktype_percpu_entry = {
15365 .sysfs_ops = &sysfs_ops,
15366 };
15367@@ -1120,20 +1132,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
15368 return retval;
15369 }
15370
15371+#ifdef CONFIG_AMD_NB
15372+ amd_l3_attrs();
15373+#endif
15374+
15375 for (i = 0; i < num_cache_leaves; i++) {
15376+ struct kobj_type *ktype;
15377+
15378 this_object = INDEX_KOBJECT_PTR(cpu, i);
15379 this_object->cpu = cpu;
15380 this_object->index = i;
15381
15382 this_leaf = CPUID4_INFO_IDX(cpu, i);
15383
15384- ktype_cache.default_attrs = default_attrs;
15385+ ktype = &ktype_cache;
15386 #ifdef CONFIG_AMD_NB
15387 if (this_leaf->base.nb)
15388- ktype_cache.default_attrs = amd_l3_attrs();
15389+ ktype = &ktype_cache_amd_nb;
15390 #endif
15391 retval = kobject_init_and_add(&(this_object->kobj),
15392- &ktype_cache,
15393+ ktype,
15394 per_cpu(ici_cache_kobject, cpu),
15395 "index%1lu", i);
15396 if (unlikely(retval)) {
15397diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
15398index 46cbf86..55c7292 100644
15399--- a/arch/x86/kernel/cpu/mcheck/mce.c
15400+++ b/arch/x86/kernel/cpu/mcheck/mce.c
15401@@ -45,6 +45,7 @@
15402 #include <asm/processor.h>
15403 #include <asm/mce.h>
15404 #include <asm/msr.h>
15405+#include <asm/local.h>
15406
15407 #include "mce-internal.h"
15408
15409@@ -254,7 +255,7 @@ static void print_mce(struct mce *m)
15410 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
15411 m->cs, m->ip);
15412
15413- if (m->cs == __KERNEL_CS)
15414+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
15415 print_symbol("{%s}", m->ip);
15416 pr_cont("\n");
15417 }
15418@@ -287,10 +288,10 @@ static void print_mce(struct mce *m)
15419
15420 #define PANIC_TIMEOUT 5 /* 5 seconds */
15421
15422-static atomic_t mce_paniced;
15423+static atomic_unchecked_t mce_paniced;
15424
15425 static int fake_panic;
15426-static atomic_t mce_fake_paniced;
15427+static atomic_unchecked_t mce_fake_paniced;
15428
15429 /* Panic in progress. Enable interrupts and wait for final IPI */
15430 static void wait_for_panic(void)
15431@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15432 /*
15433 * Make sure only one CPU runs in machine check panic
15434 */
15435- if (atomic_inc_return(&mce_paniced) > 1)
15436+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
15437 wait_for_panic();
15438 barrier();
15439
15440@@ -322,7 +323,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15441 console_verbose();
15442 } else {
15443 /* Don't log too much for fake panic */
15444- if (atomic_inc_return(&mce_fake_paniced) > 1)
15445+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
15446 return;
15447 }
15448 /* First print corrected ones that are still unlogged */
15449@@ -694,7 +695,7 @@ static int mce_timed_out(u64 *t)
15450 * might have been modified by someone else.
15451 */
15452 rmb();
15453- if (atomic_read(&mce_paniced))
15454+ if (atomic_read_unchecked(&mce_paniced))
15455 wait_for_panic();
15456 if (!monarch_timeout)
15457 goto out;
15458@@ -1659,7 +1660,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
15459 }
15460
15461 /* Call the installed machine check handler for this CPU setup. */
15462-void (*machine_check_vector)(struct pt_regs *, long error_code) =
15463+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
15464 unexpected_machine_check;
15465
15466 /*
15467@@ -1682,7 +1683,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
15468 return;
15469 }
15470
15471+ pax_open_kernel();
15472 machine_check_vector = do_machine_check;
15473+ pax_close_kernel();
15474
15475 __mcheck_cpu_init_generic();
15476 __mcheck_cpu_init_vendor(c);
15477@@ -1696,7 +1699,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
15478 */
15479
15480 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
15481-static int mce_chrdev_open_count; /* #times opened */
15482+static local_t mce_chrdev_open_count; /* #times opened */
15483 static int mce_chrdev_open_exclu; /* already open exclusive? */
15484
15485 static int mce_chrdev_open(struct inode *inode, struct file *file)
15486@@ -1704,7 +1707,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
15487 spin_lock(&mce_chrdev_state_lock);
15488
15489 if (mce_chrdev_open_exclu ||
15490- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
15491+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
15492 spin_unlock(&mce_chrdev_state_lock);
15493
15494 return -EBUSY;
15495@@ -1712,7 +1715,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
15496
15497 if (file->f_flags & O_EXCL)
15498 mce_chrdev_open_exclu = 1;
15499- mce_chrdev_open_count++;
15500+ local_inc(&mce_chrdev_open_count);
15501
15502 spin_unlock(&mce_chrdev_state_lock);
15503
15504@@ -1723,7 +1726,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
15505 {
15506 spin_lock(&mce_chrdev_state_lock);
15507
15508- mce_chrdev_open_count--;
15509+ local_dec(&mce_chrdev_open_count);
15510 mce_chrdev_open_exclu = 0;
15511
15512 spin_unlock(&mce_chrdev_state_lock);
15513@@ -2367,7 +2370,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
15514 return NOTIFY_OK;
15515 }
15516
15517-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
15518+static struct notifier_block mce_cpu_notifier __cpuinitconst = {
15519 .notifier_call = mce_cpu_callback,
15520 };
15521
15522@@ -2445,7 +2448,7 @@ struct dentry *mce_get_debugfs_dir(void)
15523 static void mce_reset(void)
15524 {
15525 cpu_missing = 0;
15526- atomic_set(&mce_fake_paniced, 0);
15527+ atomic_set_unchecked(&mce_fake_paniced, 0);
15528 atomic_set(&mce_executing, 0);
15529 atomic_set(&mce_callin, 0);
15530 atomic_set(&global_nwo, 0);
15531diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
15532index 2d5454c..51987eb 100644
15533--- a/arch/x86/kernel/cpu/mcheck/p5.c
15534+++ b/arch/x86/kernel/cpu/mcheck/p5.c
15535@@ -11,6 +11,7 @@
15536 #include <asm/processor.h>
15537 #include <asm/mce.h>
15538 #include <asm/msr.h>
15539+#include <asm/pgtable.h>
15540
15541 /* By default disabled */
15542 int mce_p5_enabled __read_mostly;
15543@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
15544 if (!cpu_has(c, X86_FEATURE_MCE))
15545 return;
15546
15547+ pax_open_kernel();
15548 machine_check_vector = pentium_machine_check;
15549+ pax_close_kernel();
15550 /* Make sure the vector pointer is visible before we enable MCEs: */
15551 wmb();
15552
15553diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
15554index 2d7998f..17c9de1 100644
15555--- a/arch/x86/kernel/cpu/mcheck/winchip.c
15556+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
15557@@ -10,6 +10,7 @@
15558 #include <asm/processor.h>
15559 #include <asm/mce.h>
15560 #include <asm/msr.h>
15561+#include <asm/pgtable.h>
15562
15563 /* Machine check handler for WinChip C6: */
15564 static void winchip_machine_check(struct pt_regs *regs, long error_code)
15565@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
15566 {
15567 u32 lo, hi;
15568
15569+ pax_open_kernel();
15570 machine_check_vector = winchip_machine_check;
15571+ pax_close_kernel();
15572 /* Make sure the vector pointer is visible before we enable MCEs: */
15573 wmb();
15574
15575diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
15576index 6b96110..0da73eb 100644
15577--- a/arch/x86/kernel/cpu/mtrr/main.c
15578+++ b/arch/x86/kernel/cpu/mtrr/main.c
15579@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
15580 u64 size_or_mask, size_and_mask;
15581 static bool mtrr_aps_delayed_init;
15582
15583-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
15584+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
15585
15586 const struct mtrr_ops *mtrr_if;
15587
15588diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
15589index df5e41f..816c719 100644
15590--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
15591+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15592@@ -25,7 +25,7 @@ struct mtrr_ops {
15593 int (*validate_add_page)(unsigned long base, unsigned long size,
15594 unsigned int type);
15595 int (*have_wrcomb)(void);
15596-};
15597+} __do_const;
15598
15599 extern int generic_get_free_region(unsigned long base, unsigned long size,
15600 int replace_reg);
15601diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
15602index 4a3374e..1ca3ecb 100644
15603--- a/arch/x86/kernel/cpu/perf_event.c
15604+++ b/arch/x86/kernel/cpu/perf_event.c
15605@@ -1765,7 +1765,7 @@ static unsigned long get_segment_base(unsigned int segment)
15606 if (idx > GDT_ENTRIES)
15607 return 0;
15608
15609- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
15610+ desc = get_cpu_gdt_table(smp_processor_id());
15611 }
15612
15613 return get_desc_base(desc + idx);
15614@@ -1855,7 +1855,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
15615 break;
15616
15617 perf_callchain_store(entry, frame.return_address);
15618- fp = frame.next_frame;
15619+ fp = (const void __force_user *)frame.next_frame;
15620 }
15621 }
15622
15623diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
15624index 324bb52..1a93d85 100644
15625--- a/arch/x86/kernel/cpu/perf_event_intel.c
15626+++ b/arch/x86/kernel/cpu/perf_event_intel.c
15627@@ -1949,10 +1949,10 @@ __init int intel_pmu_init(void)
15628 * v2 and above have a perf capabilities MSR
15629 */
15630 if (version > 1) {
15631- u64 capabilities;
15632+ u64 capabilities = x86_pmu.intel_cap.capabilities;
15633
15634- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
15635- x86_pmu.intel_cap.capabilities = capabilities;
15636+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
15637+ x86_pmu.intel_cap.capabilities = capabilities;
15638 }
15639
15640 intel_ds_init();
15641diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
15642index d22d0c4..088eb6f 100644
15643--- a/arch/x86/kernel/cpu/vmware.c
15644+++ b/arch/x86/kernel/cpu/vmware.c
15645@@ -79,7 +79,7 @@ static void __init vmware_platform_setup(void)
15646 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
15647
15648 if (ebx != UINT_MAX)
15649- x86_platform.calibrate_tsc = vmware_get_tsc_khz;
15650+ *(void **)&x86_platform.calibrate_tsc = vmware_get_tsc_khz;
15651 else
15652 printk(KERN_WARNING
15653 "Failed to get TSC freq from the hypervisor\n");
15654diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
15655index 13ad899..f642b9a 100644
15656--- a/arch/x86/kernel/crash.c
15657+++ b/arch/x86/kernel/crash.c
15658@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
15659 {
15660 #ifdef CONFIG_X86_32
15661 struct pt_regs fixed_regs;
15662-#endif
15663
15664-#ifdef CONFIG_X86_32
15665- if (!user_mode_vm(regs)) {
15666+ if (!user_mode(regs)) {
15667 crash_fixup_ss_esp(&fixed_regs, regs);
15668 regs = &fixed_regs;
15669 }
15670diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
15671index 37250fe..bf2ec74 100644
15672--- a/arch/x86/kernel/doublefault_32.c
15673+++ b/arch/x86/kernel/doublefault_32.c
15674@@ -11,7 +11,7 @@
15675
15676 #define DOUBLEFAULT_STACKSIZE (1024)
15677 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
15678-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
15679+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
15680
15681 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
15682
15683@@ -21,7 +21,7 @@ static void doublefault_fn(void)
15684 unsigned long gdt, tss;
15685
15686 store_gdt(&gdt_desc);
15687- gdt = gdt_desc.address;
15688+ gdt = (unsigned long)gdt_desc.address;
15689
15690 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
15691
15692@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
15693 /* 0x2 bit is always set */
15694 .flags = X86_EFLAGS_SF | 0x2,
15695 .sp = STACK_START,
15696- .es = __USER_DS,
15697+ .es = __KERNEL_DS,
15698 .cs = __KERNEL_CS,
15699 .ss = __KERNEL_DS,
15700- .ds = __USER_DS,
15701+ .ds = __KERNEL_DS,
15702 .fs = __KERNEL_PERCPU,
15703
15704 .__cr3 = __pa_nodebug(swapper_pg_dir),
15705diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
15706index ae42418b..787c16b 100644
15707--- a/arch/x86/kernel/dumpstack.c
15708+++ b/arch/x86/kernel/dumpstack.c
15709@@ -2,6 +2,9 @@
15710 * Copyright (C) 1991, 1992 Linus Torvalds
15711 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
15712 */
15713+#ifdef CONFIG_GRKERNSEC_HIDESYM
15714+#define __INCLUDED_BY_HIDESYM 1
15715+#endif
15716 #include <linux/kallsyms.h>
15717 #include <linux/kprobes.h>
15718 #include <linux/uaccess.h>
15719@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
15720 static void
15721 print_ftrace_graph_addr(unsigned long addr, void *data,
15722 const struct stacktrace_ops *ops,
15723- struct thread_info *tinfo, int *graph)
15724+ struct task_struct *task, int *graph)
15725 {
15726- struct task_struct *task;
15727 unsigned long ret_addr;
15728 int index;
15729
15730 if (addr != (unsigned long)return_to_handler)
15731 return;
15732
15733- task = tinfo->task;
15734 index = task->curr_ret_stack;
15735
15736 if (!task->ret_stack || index < *graph)
15737@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15738 static inline void
15739 print_ftrace_graph_addr(unsigned long addr, void *data,
15740 const struct stacktrace_ops *ops,
15741- struct thread_info *tinfo, int *graph)
15742+ struct task_struct *task, int *graph)
15743 { }
15744 #endif
15745
15746@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15747 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
15748 */
15749
15750-static inline int valid_stack_ptr(struct thread_info *tinfo,
15751- void *p, unsigned int size, void *end)
15752+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
15753 {
15754- void *t = tinfo;
15755 if (end) {
15756 if (p < end && p >= (end-THREAD_SIZE))
15757 return 1;
15758@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
15759 }
15760
15761 unsigned long
15762-print_context_stack(struct thread_info *tinfo,
15763+print_context_stack(struct task_struct *task, void *stack_start,
15764 unsigned long *stack, unsigned long bp,
15765 const struct stacktrace_ops *ops, void *data,
15766 unsigned long *end, int *graph)
15767 {
15768 struct stack_frame *frame = (struct stack_frame *)bp;
15769
15770- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
15771+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
15772 unsigned long addr;
15773
15774 addr = *stack;
15775@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
15776 } else {
15777 ops->address(data, addr, 0);
15778 }
15779- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
15780+ print_ftrace_graph_addr(addr, data, ops, task, graph);
15781 }
15782 stack++;
15783 }
15784@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
15785 EXPORT_SYMBOL_GPL(print_context_stack);
15786
15787 unsigned long
15788-print_context_stack_bp(struct thread_info *tinfo,
15789+print_context_stack_bp(struct task_struct *task, void *stack_start,
15790 unsigned long *stack, unsigned long bp,
15791 const struct stacktrace_ops *ops, void *data,
15792 unsigned long *end, int *graph)
15793@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
15794 struct stack_frame *frame = (struct stack_frame *)bp;
15795 unsigned long *ret_addr = &frame->return_address;
15796
15797- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
15798+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
15799 unsigned long addr = *ret_addr;
15800
15801 if (!__kernel_text_address(addr))
15802@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
15803 ops->address(data, addr, 1);
15804 frame = frame->next_frame;
15805 ret_addr = &frame->return_address;
15806- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
15807+ print_ftrace_graph_addr(addr, data, ops, task, graph);
15808 }
15809
15810 return (unsigned long)frame;
15811@@ -189,7 +188,7 @@ void dump_stack(void)
15812
15813 bp = stack_frame(current, NULL);
15814 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
15815- current->pid, current->comm, print_tainted(),
15816+ task_pid_nr(current), current->comm, print_tainted(),
15817 init_utsname()->release,
15818 (int)strcspn(init_utsname()->version, " "),
15819 init_utsname()->version);
15820@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
15821 }
15822 EXPORT_SYMBOL_GPL(oops_begin);
15823
15824+extern void gr_handle_kernel_exploit(void);
15825+
15826 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15827 {
15828 if (regs && kexec_should_crash(current))
15829@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15830 panic("Fatal exception in interrupt");
15831 if (panic_on_oops)
15832 panic("Fatal exception");
15833- do_exit(signr);
15834+
15835+ gr_handle_kernel_exploit();
15836+
15837+ do_group_exit(signr);
15838 }
15839
15840 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15841@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15842 print_modules();
15843 show_regs(regs);
15844 #ifdef CONFIG_X86_32
15845- if (user_mode_vm(regs)) {
15846+ if (user_mode(regs)) {
15847 sp = regs->sp;
15848 ss = regs->ss & 0xffff;
15849 } else {
15850@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
15851 unsigned long flags = oops_begin();
15852 int sig = SIGSEGV;
15853
15854- if (!user_mode_vm(regs))
15855+ if (!user_mode(regs))
15856 report_bug(regs->ip, regs);
15857
15858 if (__die(str, regs, err))
15859diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
15860index 1038a41..db2c12b 100644
15861--- a/arch/x86/kernel/dumpstack_32.c
15862+++ b/arch/x86/kernel/dumpstack_32.c
15863@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15864 bp = stack_frame(task, regs);
15865
15866 for (;;) {
15867- struct thread_info *context;
15868+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15869
15870- context = (struct thread_info *)
15871- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
15872- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
15873+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15874
15875- stack = (unsigned long *)context->previous_esp;
15876- if (!stack)
15877+ if (stack_start == task_stack_page(task))
15878 break;
15879+ stack = *(unsigned long **)stack_start;
15880 if (ops->stack(data, "IRQ") < 0)
15881 break;
15882 touch_nmi_watchdog();
15883@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
15884 {
15885 int i;
15886
15887- __show_regs(regs, !user_mode_vm(regs));
15888+ __show_regs(regs, !user_mode(regs));
15889
15890 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
15891 TASK_COMM_LEN, current->comm, task_pid_nr(current),
15892@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
15893 * When in-kernel, we also print out the stack and code at the
15894 * time of the fault..
15895 */
15896- if (!user_mode_vm(regs)) {
15897+ if (!user_mode(regs)) {
15898 unsigned int code_prologue = code_bytes * 43 / 64;
15899 unsigned int code_len = code_bytes;
15900 unsigned char c;
15901 u8 *ip;
15902+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
15903
15904 pr_emerg("Stack:\n");
15905 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
15906
15907 pr_emerg("Code:");
15908
15909- ip = (u8 *)regs->ip - code_prologue;
15910+ ip = (u8 *)regs->ip - code_prologue + cs_base;
15911 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
15912 /* try starting at IP */
15913- ip = (u8 *)regs->ip;
15914+ ip = (u8 *)regs->ip + cs_base;
15915 code_len = code_len - code_prologue + 1;
15916 }
15917 for (i = 0; i < code_len; i++, ip++) {
15918@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
15919 pr_cont(" Bad EIP value.");
15920 break;
15921 }
15922- if (ip == (u8 *)regs->ip)
15923+ if (ip == (u8 *)regs->ip + cs_base)
15924 pr_cont(" <%02x>", c);
15925 else
15926 pr_cont(" %02x", c);
15927@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
15928 {
15929 unsigned short ud2;
15930
15931+ ip = ktla_ktva(ip);
15932 if (ip < PAGE_OFFSET)
15933 return 0;
15934 if (probe_kernel_address((unsigned short *)ip, ud2))
15935@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
15936
15937 return ud2 == 0x0b0f;
15938 }
15939+
15940+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15941+void pax_check_alloca(unsigned long size)
15942+{
15943+ unsigned long sp = (unsigned long)&sp, stack_left;
15944+
15945+ /* all kernel stacks are of the same size */
15946+ stack_left = sp & (THREAD_SIZE - 1);
15947+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15948+}
15949+EXPORT_SYMBOL(pax_check_alloca);
15950+#endif
15951diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
15952index b653675..51cc8c0 100644
15953--- a/arch/x86/kernel/dumpstack_64.c
15954+++ b/arch/x86/kernel/dumpstack_64.c
15955@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15956 unsigned long *irq_stack_end =
15957 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
15958 unsigned used = 0;
15959- struct thread_info *tinfo;
15960 int graph = 0;
15961 unsigned long dummy;
15962+ void *stack_start;
15963
15964 if (!task)
15965 task = current;
15966@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15967 * current stack address. If the stacks consist of nested
15968 * exceptions
15969 */
15970- tinfo = task_thread_info(task);
15971 for (;;) {
15972 char *id;
15973 unsigned long *estack_end;
15974+
15975 estack_end = in_exception_stack(cpu, (unsigned long)stack,
15976 &used, &id);
15977
15978@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15979 if (ops->stack(data, id) < 0)
15980 break;
15981
15982- bp = ops->walk_stack(tinfo, stack, bp, ops,
15983+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
15984 data, estack_end, &graph);
15985 ops->stack(data, "<EOE>");
15986 /*
15987@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15988 * second-to-last pointer (index -2 to end) in the
15989 * exception stack:
15990 */
15991+ if ((u16)estack_end[-1] != __KERNEL_DS)
15992+ goto out;
15993 stack = (unsigned long *) estack_end[-2];
15994 continue;
15995 }
15996@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15997 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
15998 if (ops->stack(data, "IRQ") < 0)
15999 break;
16000- bp = ops->walk_stack(tinfo, stack, bp,
16001+ bp = ops->walk_stack(task, irq_stack, stack, bp,
16002 ops, data, irq_stack_end, &graph);
16003 /*
16004 * We link to the next stack (which would be
16005@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
16006 /*
16007 * This handles the process stack:
16008 */
16009- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
16010+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
16011+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
16012+out:
16013 put_cpu();
16014 }
16015 EXPORT_SYMBOL(dump_trace);
16016@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
16017 {
16018 int i;
16019 unsigned long sp;
16020- const int cpu = smp_processor_id();
16021+ const int cpu = raw_smp_processor_id();
16022 struct task_struct *cur = current;
16023
16024 sp = regs->sp;
16025@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
16026
16027 return ud2 == 0x0b0f;
16028 }
16029+
16030+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16031+void pax_check_alloca(unsigned long size)
16032+{
16033+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
16034+ unsigned cpu, used;
16035+ char *id;
16036+
16037+ /* check the process stack first */
16038+ stack_start = (unsigned long)task_stack_page(current);
16039+ stack_end = stack_start + THREAD_SIZE;
16040+ if (likely(stack_start <= sp && sp < stack_end)) {
16041+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
16042+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
16043+ return;
16044+ }
16045+
16046+ cpu = get_cpu();
16047+
16048+ /* check the irq stacks */
16049+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
16050+ stack_start = stack_end - IRQ_STACK_SIZE;
16051+ if (stack_start <= sp && sp < stack_end) {
16052+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
16053+ put_cpu();
16054+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
16055+ return;
16056+ }
16057+
16058+ /* check the exception stacks */
16059+ used = 0;
16060+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
16061+ stack_start = stack_end - EXCEPTION_STKSZ;
16062+ if (stack_end && stack_start <= sp && sp < stack_end) {
16063+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
16064+ put_cpu();
16065+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
16066+ return;
16067+ }
16068+
16069+ put_cpu();
16070+
16071+ /* unknown stack */
16072+ BUG();
16073+}
16074+EXPORT_SYMBOL(pax_check_alloca);
16075+#endif
16076diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
16077index 9b9f18b..9fcaa04 100644
16078--- a/arch/x86/kernel/early_printk.c
16079+++ b/arch/x86/kernel/early_printk.c
16080@@ -7,6 +7,7 @@
16081 #include <linux/pci_regs.h>
16082 #include <linux/pci_ids.h>
16083 #include <linux/errno.h>
16084+#include <linux/sched.h>
16085 #include <asm/io.h>
16086 #include <asm/processor.h>
16087 #include <asm/fcntl.h>
16088diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
16089index cf8639b..98fcee6 100644
16090--- a/arch/x86/kernel/entry_32.S
16091+++ b/arch/x86/kernel/entry_32.S
16092@@ -177,13 +177,153 @@
16093 /*CFI_REL_OFFSET gs, PT_GS*/
16094 .endm
16095 .macro SET_KERNEL_GS reg
16096+
16097+#ifdef CONFIG_CC_STACKPROTECTOR
16098 movl $(__KERNEL_STACK_CANARY), \reg
16099+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16100+ movl $(__USER_DS), \reg
16101+#else
16102+ xorl \reg, \reg
16103+#endif
16104+
16105 movl \reg, %gs
16106 .endm
16107
16108 #endif /* CONFIG_X86_32_LAZY_GS */
16109
16110-.macro SAVE_ALL
16111+.macro pax_enter_kernel
16112+#ifdef CONFIG_PAX_KERNEXEC
16113+ call pax_enter_kernel
16114+#endif
16115+.endm
16116+
16117+.macro pax_exit_kernel
16118+#ifdef CONFIG_PAX_KERNEXEC
16119+ call pax_exit_kernel
16120+#endif
16121+.endm
16122+
16123+#ifdef CONFIG_PAX_KERNEXEC
16124+ENTRY(pax_enter_kernel)
16125+#ifdef CONFIG_PARAVIRT
16126+ pushl %eax
16127+ pushl %ecx
16128+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
16129+ mov %eax, %esi
16130+#else
16131+ mov %cr0, %esi
16132+#endif
16133+ bts $16, %esi
16134+ jnc 1f
16135+ mov %cs, %esi
16136+ cmp $__KERNEL_CS, %esi
16137+ jz 3f
16138+ ljmp $__KERNEL_CS, $3f
16139+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
16140+2:
16141+#ifdef CONFIG_PARAVIRT
16142+ mov %esi, %eax
16143+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16144+#else
16145+ mov %esi, %cr0
16146+#endif
16147+3:
16148+#ifdef CONFIG_PARAVIRT
16149+ popl %ecx
16150+ popl %eax
16151+#endif
16152+ ret
16153+ENDPROC(pax_enter_kernel)
16154+
16155+ENTRY(pax_exit_kernel)
16156+#ifdef CONFIG_PARAVIRT
16157+ pushl %eax
16158+ pushl %ecx
16159+#endif
16160+ mov %cs, %esi
16161+ cmp $__KERNEXEC_KERNEL_CS, %esi
16162+ jnz 2f
16163+#ifdef CONFIG_PARAVIRT
16164+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
16165+ mov %eax, %esi
16166+#else
16167+ mov %cr0, %esi
16168+#endif
16169+ btr $16, %esi
16170+ ljmp $__KERNEL_CS, $1f
16171+1:
16172+#ifdef CONFIG_PARAVIRT
16173+ mov %esi, %eax
16174+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
16175+#else
16176+ mov %esi, %cr0
16177+#endif
16178+2:
16179+#ifdef CONFIG_PARAVIRT
16180+ popl %ecx
16181+ popl %eax
16182+#endif
16183+ ret
16184+ENDPROC(pax_exit_kernel)
16185+#endif
16186+
16187+.macro pax_erase_kstack
16188+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16189+ call pax_erase_kstack
16190+#endif
16191+.endm
16192+
16193+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16194+/*
16195+ * ebp: thread_info
16196+ */
16197+ENTRY(pax_erase_kstack)
16198+ pushl %edi
16199+ pushl %ecx
16200+ pushl %eax
16201+
16202+ mov TI_lowest_stack(%ebp), %edi
16203+ mov $-0xBEEF, %eax
16204+ std
16205+
16206+1: mov %edi, %ecx
16207+ and $THREAD_SIZE_asm - 1, %ecx
16208+ shr $2, %ecx
16209+ repne scasl
16210+ jecxz 2f
16211+
16212+ cmp $2*16, %ecx
16213+ jc 2f
16214+
16215+ mov $2*16, %ecx
16216+ repe scasl
16217+ jecxz 2f
16218+ jne 1b
16219+
16220+2: cld
16221+ mov %esp, %ecx
16222+ sub %edi, %ecx
16223+
16224+ cmp $THREAD_SIZE_asm, %ecx
16225+ jb 3f
16226+ ud2
16227+3:
16228+
16229+ shr $2, %ecx
16230+ rep stosl
16231+
16232+ mov TI_task_thread_sp0(%ebp), %edi
16233+ sub $128, %edi
16234+ mov %edi, TI_lowest_stack(%ebp)
16235+
16236+ popl %eax
16237+ popl %ecx
16238+ popl %edi
16239+ ret
16240+ENDPROC(pax_erase_kstack)
16241+#endif
16242+
16243+.macro __SAVE_ALL _DS
16244 cld
16245 PUSH_GS
16246 pushl_cfi %fs
16247@@ -206,7 +346,7 @@
16248 CFI_REL_OFFSET ecx, 0
16249 pushl_cfi %ebx
16250 CFI_REL_OFFSET ebx, 0
16251- movl $(__USER_DS), %edx
16252+ movl $\_DS, %edx
16253 movl %edx, %ds
16254 movl %edx, %es
16255 movl $(__KERNEL_PERCPU), %edx
16256@@ -214,6 +354,15 @@
16257 SET_KERNEL_GS %edx
16258 .endm
16259
16260+.macro SAVE_ALL
16261+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
16262+ __SAVE_ALL __KERNEL_DS
16263+ pax_enter_kernel
16264+#else
16265+ __SAVE_ALL __USER_DS
16266+#endif
16267+.endm
16268+
16269 .macro RESTORE_INT_REGS
16270 popl_cfi %ebx
16271 CFI_RESTORE ebx
16272@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
16273 popfl_cfi
16274 jmp syscall_exit
16275 CFI_ENDPROC
16276-END(ret_from_fork)
16277+ENDPROC(ret_from_fork)
16278
16279 ENTRY(ret_from_kernel_thread)
16280 CFI_STARTPROC
16281@@ -344,7 +493,15 @@ ret_from_intr:
16282 andl $SEGMENT_RPL_MASK, %eax
16283 #endif
16284 cmpl $USER_RPL, %eax
16285+
16286+#ifdef CONFIG_PAX_KERNEXEC
16287+ jae resume_userspace
16288+
16289+ pax_exit_kernel
16290+ jmp resume_kernel
16291+#else
16292 jb resume_kernel # not returning to v8086 or userspace
16293+#endif
16294
16295 ENTRY(resume_userspace)
16296 LOCKDEP_SYS_EXIT
16297@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
16298 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
16299 # int/exception return?
16300 jne work_pending
16301- jmp restore_all
16302-END(ret_from_exception)
16303+ jmp restore_all_pax
16304+ENDPROC(ret_from_exception)
16305
16306 #ifdef CONFIG_PREEMPT
16307 ENTRY(resume_kernel)
16308@@ -372,7 +529,7 @@ need_resched:
16309 jz restore_all
16310 call preempt_schedule_irq
16311 jmp need_resched
16312-END(resume_kernel)
16313+ENDPROC(resume_kernel)
16314 #endif
16315 CFI_ENDPROC
16316 /*
16317@@ -406,30 +563,45 @@ sysenter_past_esp:
16318 /*CFI_REL_OFFSET cs, 0*/
16319 /*
16320 * Push current_thread_info()->sysenter_return to the stack.
16321- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
16322- * pushed above; +8 corresponds to copy_thread's esp0 setting.
16323 */
16324- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
16325+ pushl_cfi $0
16326 CFI_REL_OFFSET eip, 0
16327
16328 pushl_cfi %eax
16329 SAVE_ALL
16330+ GET_THREAD_INFO(%ebp)
16331+ movl TI_sysenter_return(%ebp),%ebp
16332+ movl %ebp,PT_EIP(%esp)
16333 ENABLE_INTERRUPTS(CLBR_NONE)
16334
16335 /*
16336 * Load the potential sixth argument from user stack.
16337 * Careful about security.
16338 */
16339+ movl PT_OLDESP(%esp),%ebp
16340+
16341+#ifdef CONFIG_PAX_MEMORY_UDEREF
16342+ mov PT_OLDSS(%esp),%ds
16343+1: movl %ds:(%ebp),%ebp
16344+ push %ss
16345+ pop %ds
16346+#else
16347 cmpl $__PAGE_OFFSET-3,%ebp
16348 jae syscall_fault
16349 ASM_STAC
16350 1: movl (%ebp),%ebp
16351 ASM_CLAC
16352+#endif
16353+
16354 movl %ebp,PT_EBP(%esp)
16355 _ASM_EXTABLE(1b,syscall_fault)
16356
16357 GET_THREAD_INFO(%ebp)
16358
16359+#ifdef CONFIG_PAX_RANDKSTACK
16360+ pax_erase_kstack
16361+#endif
16362+
16363 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
16364 jnz sysenter_audit
16365 sysenter_do_call:
16366@@ -444,12 +616,24 @@ sysenter_do_call:
16367 testl $_TIF_ALLWORK_MASK, %ecx
16368 jne sysexit_audit
16369 sysenter_exit:
16370+
16371+#ifdef CONFIG_PAX_RANDKSTACK
16372+ pushl_cfi %eax
16373+ movl %esp, %eax
16374+ call pax_randomize_kstack
16375+ popl_cfi %eax
16376+#endif
16377+
16378+ pax_erase_kstack
16379+
16380 /* if something modifies registers it must also disable sysexit */
16381 movl PT_EIP(%esp), %edx
16382 movl PT_OLDESP(%esp), %ecx
16383 xorl %ebp,%ebp
16384 TRACE_IRQS_ON
16385 1: mov PT_FS(%esp), %fs
16386+2: mov PT_DS(%esp), %ds
16387+3: mov PT_ES(%esp), %es
16388 PTGS_TO_GS
16389 ENABLE_INTERRUPTS_SYSEXIT
16390
16391@@ -466,6 +650,9 @@ sysenter_audit:
16392 movl %eax,%edx /* 2nd arg: syscall number */
16393 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
16394 call __audit_syscall_entry
16395+
16396+ pax_erase_kstack
16397+
16398 pushl_cfi %ebx
16399 movl PT_EAX(%esp),%eax /* reload syscall number */
16400 jmp sysenter_do_call
16401@@ -491,10 +678,16 @@ sysexit_audit:
16402
16403 CFI_ENDPROC
16404 .pushsection .fixup,"ax"
16405-2: movl $0,PT_FS(%esp)
16406+4: movl $0,PT_FS(%esp)
16407+ jmp 1b
16408+5: movl $0,PT_DS(%esp)
16409+ jmp 1b
16410+6: movl $0,PT_ES(%esp)
16411 jmp 1b
16412 .popsection
16413- _ASM_EXTABLE(1b,2b)
16414+ _ASM_EXTABLE(1b,4b)
16415+ _ASM_EXTABLE(2b,5b)
16416+ _ASM_EXTABLE(3b,6b)
16417 PTGS_TO_GS_EX
16418 ENDPROC(ia32_sysenter_target)
16419
16420@@ -509,6 +702,11 @@ ENTRY(system_call)
16421 pushl_cfi %eax # save orig_eax
16422 SAVE_ALL
16423 GET_THREAD_INFO(%ebp)
16424+
16425+#ifdef CONFIG_PAX_RANDKSTACK
16426+ pax_erase_kstack
16427+#endif
16428+
16429 # system call tracing in operation / emulation
16430 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
16431 jnz syscall_trace_entry
16432@@ -527,6 +725,15 @@ syscall_exit:
16433 testl $_TIF_ALLWORK_MASK, %ecx # current->work
16434 jne syscall_exit_work
16435
16436+restore_all_pax:
16437+
16438+#ifdef CONFIG_PAX_RANDKSTACK
16439+ movl %esp, %eax
16440+ call pax_randomize_kstack
16441+#endif
16442+
16443+ pax_erase_kstack
16444+
16445 restore_all:
16446 TRACE_IRQS_IRET
16447 restore_all_notrace:
16448@@ -583,14 +790,34 @@ ldt_ss:
16449 * compensating for the offset by changing to the ESPFIX segment with
16450 * a base address that matches for the difference.
16451 */
16452-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
16453+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
16454 mov %esp, %edx /* load kernel esp */
16455 mov PT_OLDESP(%esp), %eax /* load userspace esp */
16456 mov %dx, %ax /* eax: new kernel esp */
16457 sub %eax, %edx /* offset (low word is 0) */
16458+#ifdef CONFIG_SMP
16459+ movl PER_CPU_VAR(cpu_number), %ebx
16460+ shll $PAGE_SHIFT_asm, %ebx
16461+ addl $cpu_gdt_table, %ebx
16462+#else
16463+ movl $cpu_gdt_table, %ebx
16464+#endif
16465 shr $16, %edx
16466- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
16467- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
16468+
16469+#ifdef CONFIG_PAX_KERNEXEC
16470+ mov %cr0, %esi
16471+ btr $16, %esi
16472+ mov %esi, %cr0
16473+#endif
16474+
16475+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
16476+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
16477+
16478+#ifdef CONFIG_PAX_KERNEXEC
16479+ bts $16, %esi
16480+ mov %esi, %cr0
16481+#endif
16482+
16483 pushl_cfi $__ESPFIX_SS
16484 pushl_cfi %eax /* new kernel esp */
16485 /* Disable interrupts, but do not irqtrace this section: we
16486@@ -619,20 +846,18 @@ work_resched:
16487 movl TI_flags(%ebp), %ecx
16488 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
16489 # than syscall tracing?
16490- jz restore_all
16491+ jz restore_all_pax
16492 testb $_TIF_NEED_RESCHED, %cl
16493 jnz work_resched
16494
16495 work_notifysig: # deal with pending signals and
16496 # notify-resume requests
16497+ movl %esp, %eax
16498 #ifdef CONFIG_VM86
16499 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
16500- movl %esp, %eax
16501 jne work_notifysig_v86 # returning to kernel-space or
16502 # vm86-space
16503 1:
16504-#else
16505- movl %esp, %eax
16506 #endif
16507 TRACE_IRQS_ON
16508 ENABLE_INTERRUPTS(CLBR_NONE)
16509@@ -653,7 +878,7 @@ work_notifysig_v86:
16510 movl %eax, %esp
16511 jmp 1b
16512 #endif
16513-END(work_pending)
16514+ENDPROC(work_pending)
16515
16516 # perform syscall exit tracing
16517 ALIGN
16518@@ -661,11 +886,14 @@ syscall_trace_entry:
16519 movl $-ENOSYS,PT_EAX(%esp)
16520 movl %esp, %eax
16521 call syscall_trace_enter
16522+
16523+ pax_erase_kstack
16524+
16525 /* What it returned is what we'll actually use. */
16526 cmpl $(NR_syscalls), %eax
16527 jnae syscall_call
16528 jmp syscall_exit
16529-END(syscall_trace_entry)
16530+ENDPROC(syscall_trace_entry)
16531
16532 # perform syscall exit tracing
16533 ALIGN
16534@@ -678,21 +906,25 @@ syscall_exit_work:
16535 movl %esp, %eax
16536 call syscall_trace_leave
16537 jmp resume_userspace
16538-END(syscall_exit_work)
16539+ENDPROC(syscall_exit_work)
16540 CFI_ENDPROC
16541
16542 RING0_INT_FRAME # can't unwind into user space anyway
16543 syscall_fault:
16544+#ifdef CONFIG_PAX_MEMORY_UDEREF
16545+ push %ss
16546+ pop %ds
16547+#endif
16548 ASM_CLAC
16549 GET_THREAD_INFO(%ebp)
16550 movl $-EFAULT,PT_EAX(%esp)
16551 jmp resume_userspace
16552-END(syscall_fault)
16553+ENDPROC(syscall_fault)
16554
16555 syscall_badsys:
16556 movl $-ENOSYS,PT_EAX(%esp)
16557 jmp resume_userspace
16558-END(syscall_badsys)
16559+ENDPROC(syscall_badsys)
16560 CFI_ENDPROC
16561 /*
16562 * End of kprobes section
16563@@ -763,6 +995,36 @@ ENTRY(ptregs_clone)
16564 CFI_ENDPROC
16565 ENDPROC(ptregs_clone)
16566
16567+ ALIGN;
16568+ENTRY(kernel_execve)
16569+ CFI_STARTPROC
16570+ pushl_cfi %ebp
16571+ sub $PT_OLDSS+4,%esp
16572+ pushl_cfi %edi
16573+ pushl_cfi %ecx
16574+ pushl_cfi %eax
16575+ lea 3*4(%esp),%edi
16576+ mov $PT_OLDSS/4+1,%ecx
16577+ xorl %eax,%eax
16578+ rep stosl
16579+ popl_cfi %eax
16580+ popl_cfi %ecx
16581+ popl_cfi %edi
16582+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
16583+ pushl_cfi %esp
16584+ call sys_execve
16585+ add $4,%esp
16586+ CFI_ADJUST_CFA_OFFSET -4
16587+ GET_THREAD_INFO(%ebp)
16588+ test %eax,%eax
16589+ jz syscall_exit
16590+ add $PT_OLDSS+4,%esp
16591+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
16592+ popl_cfi %ebp
16593+ ret
16594+ CFI_ENDPROC
16595+ENDPROC(kernel_execve)
16596+
16597 .macro FIXUP_ESPFIX_STACK
16598 /*
16599 * Switch back for ESPFIX stack to the normal zerobased stack
16600@@ -772,8 +1034,15 @@ ENDPROC(ptregs_clone)
16601 * normal stack and adjusts ESP with the matching offset.
16602 */
16603 /* fixup the stack */
16604- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
16605- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
16606+#ifdef CONFIG_SMP
16607+ movl PER_CPU_VAR(cpu_number), %ebx
16608+ shll $PAGE_SHIFT_asm, %ebx
16609+ addl $cpu_gdt_table, %ebx
16610+#else
16611+ movl $cpu_gdt_table, %ebx
16612+#endif
16613+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
16614+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
16615 shl $16, %eax
16616 addl %esp, %eax /* the adjusted stack pointer */
16617 pushl_cfi $__KERNEL_DS
16618@@ -826,7 +1095,7 @@ vector=vector+1
16619 .endr
16620 2: jmp common_interrupt
16621 .endr
16622-END(irq_entries_start)
16623+ENDPROC(irq_entries_start)
16624
16625 .previous
16626 END(interrupt)
16627@@ -877,7 +1146,7 @@ ENTRY(coprocessor_error)
16628 pushl_cfi $do_coprocessor_error
16629 jmp error_code
16630 CFI_ENDPROC
16631-END(coprocessor_error)
16632+ENDPROC(coprocessor_error)
16633
16634 ENTRY(simd_coprocessor_error)
16635 RING0_INT_FRAME
16636@@ -899,7 +1168,7 @@ ENTRY(simd_coprocessor_error)
16637 #endif
16638 jmp error_code
16639 CFI_ENDPROC
16640-END(simd_coprocessor_error)
16641+ENDPROC(simd_coprocessor_error)
16642
16643 ENTRY(device_not_available)
16644 RING0_INT_FRAME
16645@@ -908,18 +1177,18 @@ ENTRY(device_not_available)
16646 pushl_cfi $do_device_not_available
16647 jmp error_code
16648 CFI_ENDPROC
16649-END(device_not_available)
16650+ENDPROC(device_not_available)
16651
16652 #ifdef CONFIG_PARAVIRT
16653 ENTRY(native_iret)
16654 iret
16655 _ASM_EXTABLE(native_iret, iret_exc)
16656-END(native_iret)
16657+ENDPROC(native_iret)
16658
16659 ENTRY(native_irq_enable_sysexit)
16660 sti
16661 sysexit
16662-END(native_irq_enable_sysexit)
16663+ENDPROC(native_irq_enable_sysexit)
16664 #endif
16665
16666 ENTRY(overflow)
16667@@ -929,7 +1198,7 @@ ENTRY(overflow)
16668 pushl_cfi $do_overflow
16669 jmp error_code
16670 CFI_ENDPROC
16671-END(overflow)
16672+ENDPROC(overflow)
16673
16674 ENTRY(bounds)
16675 RING0_INT_FRAME
16676@@ -938,7 +1207,7 @@ ENTRY(bounds)
16677 pushl_cfi $do_bounds
16678 jmp error_code
16679 CFI_ENDPROC
16680-END(bounds)
16681+ENDPROC(bounds)
16682
16683 ENTRY(invalid_op)
16684 RING0_INT_FRAME
16685@@ -947,7 +1216,7 @@ ENTRY(invalid_op)
16686 pushl_cfi $do_invalid_op
16687 jmp error_code
16688 CFI_ENDPROC
16689-END(invalid_op)
16690+ENDPROC(invalid_op)
16691
16692 ENTRY(coprocessor_segment_overrun)
16693 RING0_INT_FRAME
16694@@ -956,7 +1225,7 @@ ENTRY(coprocessor_segment_overrun)
16695 pushl_cfi $do_coprocessor_segment_overrun
16696 jmp error_code
16697 CFI_ENDPROC
16698-END(coprocessor_segment_overrun)
16699+ENDPROC(coprocessor_segment_overrun)
16700
16701 ENTRY(invalid_TSS)
16702 RING0_EC_FRAME
16703@@ -964,7 +1233,7 @@ ENTRY(invalid_TSS)
16704 pushl_cfi $do_invalid_TSS
16705 jmp error_code
16706 CFI_ENDPROC
16707-END(invalid_TSS)
16708+ENDPROC(invalid_TSS)
16709
16710 ENTRY(segment_not_present)
16711 RING0_EC_FRAME
16712@@ -972,7 +1241,7 @@ ENTRY(segment_not_present)
16713 pushl_cfi $do_segment_not_present
16714 jmp error_code
16715 CFI_ENDPROC
16716-END(segment_not_present)
16717+ENDPROC(segment_not_present)
16718
16719 ENTRY(stack_segment)
16720 RING0_EC_FRAME
16721@@ -980,7 +1249,7 @@ ENTRY(stack_segment)
16722 pushl_cfi $do_stack_segment
16723 jmp error_code
16724 CFI_ENDPROC
16725-END(stack_segment)
16726+ENDPROC(stack_segment)
16727
16728 ENTRY(alignment_check)
16729 RING0_EC_FRAME
16730@@ -988,7 +1257,7 @@ ENTRY(alignment_check)
16731 pushl_cfi $do_alignment_check
16732 jmp error_code
16733 CFI_ENDPROC
16734-END(alignment_check)
16735+ENDPROC(alignment_check)
16736
16737 ENTRY(divide_error)
16738 RING0_INT_FRAME
16739@@ -997,7 +1266,7 @@ ENTRY(divide_error)
16740 pushl_cfi $do_divide_error
16741 jmp error_code
16742 CFI_ENDPROC
16743-END(divide_error)
16744+ENDPROC(divide_error)
16745
16746 #ifdef CONFIG_X86_MCE
16747 ENTRY(machine_check)
16748@@ -1007,7 +1276,7 @@ ENTRY(machine_check)
16749 pushl_cfi machine_check_vector
16750 jmp error_code
16751 CFI_ENDPROC
16752-END(machine_check)
16753+ENDPROC(machine_check)
16754 #endif
16755
16756 ENTRY(spurious_interrupt_bug)
16757@@ -1017,7 +1286,7 @@ ENTRY(spurious_interrupt_bug)
16758 pushl_cfi $do_spurious_interrupt_bug
16759 jmp error_code
16760 CFI_ENDPROC
16761-END(spurious_interrupt_bug)
16762+ENDPROC(spurious_interrupt_bug)
16763 /*
16764 * End of kprobes section
16765 */
16766@@ -1120,7 +1389,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
16767
16768 ENTRY(mcount)
16769 ret
16770-END(mcount)
16771+ENDPROC(mcount)
16772
16773 ENTRY(ftrace_caller)
16774 cmpl $0, function_trace_stop
16775@@ -1153,7 +1422,7 @@ ftrace_graph_call:
16776 .globl ftrace_stub
16777 ftrace_stub:
16778 ret
16779-END(ftrace_caller)
16780+ENDPROC(ftrace_caller)
16781
16782 ENTRY(ftrace_regs_caller)
16783 pushf /* push flags before compare (in cs location) */
16784@@ -1254,7 +1523,7 @@ trace:
16785 popl %ecx
16786 popl %eax
16787 jmp ftrace_stub
16788-END(mcount)
16789+ENDPROC(mcount)
16790 #endif /* CONFIG_DYNAMIC_FTRACE */
16791 #endif /* CONFIG_FUNCTION_TRACER */
16792
16793@@ -1272,7 +1541,7 @@ ENTRY(ftrace_graph_caller)
16794 popl %ecx
16795 popl %eax
16796 ret
16797-END(ftrace_graph_caller)
16798+ENDPROC(ftrace_graph_caller)
16799
16800 .globl return_to_handler
16801 return_to_handler:
16802@@ -1328,15 +1597,18 @@ error_code:
16803 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
16804 REG_TO_PTGS %ecx
16805 SET_KERNEL_GS %ecx
16806- movl $(__USER_DS), %ecx
16807+ movl $(__KERNEL_DS), %ecx
16808 movl %ecx, %ds
16809 movl %ecx, %es
16810+
16811+ pax_enter_kernel
16812+
16813 TRACE_IRQS_OFF
16814 movl %esp,%eax # pt_regs pointer
16815 call *%edi
16816 jmp ret_from_exception
16817 CFI_ENDPROC
16818-END(page_fault)
16819+ENDPROC(page_fault)
16820
16821 /*
16822 * Debug traps and NMI can happen at the one SYSENTER instruction
16823@@ -1379,7 +1651,7 @@ debug_stack_correct:
16824 call do_debug
16825 jmp ret_from_exception
16826 CFI_ENDPROC
16827-END(debug)
16828+ENDPROC(debug)
16829
16830 /*
16831 * NMI is doubly nasty. It can happen _while_ we're handling
16832@@ -1417,6 +1689,9 @@ nmi_stack_correct:
16833 xorl %edx,%edx # zero error code
16834 movl %esp,%eax # pt_regs pointer
16835 call do_nmi
16836+
16837+ pax_exit_kernel
16838+
16839 jmp restore_all_notrace
16840 CFI_ENDPROC
16841
16842@@ -1453,12 +1728,15 @@ nmi_espfix_stack:
16843 FIXUP_ESPFIX_STACK # %eax == %esp
16844 xorl %edx,%edx # zero error code
16845 call do_nmi
16846+
16847+ pax_exit_kernel
16848+
16849 RESTORE_REGS
16850 lss 12+4(%esp), %esp # back to espfix stack
16851 CFI_ADJUST_CFA_OFFSET -24
16852 jmp irq_return
16853 CFI_ENDPROC
16854-END(nmi)
16855+ENDPROC(nmi)
16856
16857 ENTRY(int3)
16858 RING0_INT_FRAME
16859@@ -1471,14 +1749,14 @@ ENTRY(int3)
16860 call do_int3
16861 jmp ret_from_exception
16862 CFI_ENDPROC
16863-END(int3)
16864+ENDPROC(int3)
16865
16866 ENTRY(general_protection)
16867 RING0_EC_FRAME
16868 pushl_cfi $do_general_protection
16869 jmp error_code
16870 CFI_ENDPROC
16871-END(general_protection)
16872+ENDPROC(general_protection)
16873
16874 #ifdef CONFIG_KVM_GUEST
16875 ENTRY(async_page_fault)
16876@@ -1487,7 +1765,7 @@ ENTRY(async_page_fault)
16877 pushl_cfi $do_async_page_fault
16878 jmp error_code
16879 CFI_ENDPROC
16880-END(async_page_fault)
16881+ENDPROC(async_page_fault)
16882 #endif
16883
16884 /*
16885diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
16886index 1328fe4..cb03298 100644
16887--- a/arch/x86/kernel/entry_64.S
16888+++ b/arch/x86/kernel/entry_64.S
16889@@ -59,6 +59,8 @@
16890 #include <asm/rcu.h>
16891 #include <asm/smap.h>
16892 #include <linux/err.h>
16893+#include <asm/pgtable.h>
16894+#include <asm/alternative-asm.h>
16895
16896 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
16897 #include <linux/elf-em.h>
16898@@ -80,8 +82,9 @@
16899 #ifdef CONFIG_DYNAMIC_FTRACE
16900
16901 ENTRY(function_hook)
16902+ pax_force_retaddr
16903 retq
16904-END(function_hook)
16905+ENDPROC(function_hook)
16906
16907 /* skip is set if stack has been adjusted */
16908 .macro ftrace_caller_setup skip=0
16909@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
16910 #endif
16911
16912 GLOBAL(ftrace_stub)
16913+ pax_force_retaddr
16914 retq
16915-END(ftrace_caller)
16916+ENDPROC(ftrace_caller)
16917
16918 ENTRY(ftrace_regs_caller)
16919 /* Save the current flags before compare (in SS location)*/
16920@@ -191,7 +195,7 @@ ftrace_restore_flags:
16921 popfq
16922 jmp ftrace_stub
16923
16924-END(ftrace_regs_caller)
16925+ENDPROC(ftrace_regs_caller)
16926
16927
16928 #else /* ! CONFIG_DYNAMIC_FTRACE */
16929@@ -212,6 +216,7 @@ ENTRY(function_hook)
16930 #endif
16931
16932 GLOBAL(ftrace_stub)
16933+ pax_force_retaddr
16934 retq
16935
16936 trace:
16937@@ -225,12 +230,13 @@ trace:
16938 #endif
16939 subq $MCOUNT_INSN_SIZE, %rdi
16940
16941+ pax_force_fptr ftrace_trace_function
16942 call *ftrace_trace_function
16943
16944 MCOUNT_RESTORE_FRAME
16945
16946 jmp ftrace_stub
16947-END(function_hook)
16948+ENDPROC(function_hook)
16949 #endif /* CONFIG_DYNAMIC_FTRACE */
16950 #endif /* CONFIG_FUNCTION_TRACER */
16951
16952@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
16953
16954 MCOUNT_RESTORE_FRAME
16955
16956+ pax_force_retaddr
16957 retq
16958-END(ftrace_graph_caller)
16959+ENDPROC(ftrace_graph_caller)
16960
16961 GLOBAL(return_to_handler)
16962 subq $24, %rsp
16963@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
16964 movq 8(%rsp), %rdx
16965 movq (%rsp), %rax
16966 addq $24, %rsp
16967+ pax_force_fptr %rdi
16968 jmp *%rdi
16969+ENDPROC(return_to_handler)
16970 #endif
16971
16972
16973@@ -284,6 +293,273 @@ ENTRY(native_usergs_sysret64)
16974 ENDPROC(native_usergs_sysret64)
16975 #endif /* CONFIG_PARAVIRT */
16976
16977+ .macro ljmpq sel, off
16978+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
16979+ .byte 0x48; ljmp *1234f(%rip)
16980+ .pushsection .rodata
16981+ .align 16
16982+ 1234: .quad \off; .word \sel
16983+ .popsection
16984+#else
16985+ pushq $\sel
16986+ pushq $\off
16987+ lretq
16988+#endif
16989+ .endm
16990+
16991+ .macro pax_enter_kernel
16992+ pax_set_fptr_mask
16993+#ifdef CONFIG_PAX_KERNEXEC
16994+ call pax_enter_kernel
16995+#endif
16996+ .endm
16997+
16998+ .macro pax_exit_kernel
16999+#ifdef CONFIG_PAX_KERNEXEC
17000+ call pax_exit_kernel
17001+#endif
17002+ .endm
17003+
17004+#ifdef CONFIG_PAX_KERNEXEC
17005+ENTRY(pax_enter_kernel)
17006+ pushq %rdi
17007+
17008+#ifdef CONFIG_PARAVIRT
17009+ PV_SAVE_REGS(CLBR_RDI)
17010+#endif
17011+
17012+ GET_CR0_INTO_RDI
17013+ bts $16,%rdi
17014+ jnc 3f
17015+ mov %cs,%edi
17016+ cmp $__KERNEL_CS,%edi
17017+ jnz 2f
17018+1:
17019+
17020+#ifdef CONFIG_PARAVIRT
17021+ PV_RESTORE_REGS(CLBR_RDI)
17022+#endif
17023+
17024+ popq %rdi
17025+ pax_force_retaddr
17026+ retq
17027+
17028+2: ljmpq __KERNEL_CS,1f
17029+3: ljmpq __KERNEXEC_KERNEL_CS,4f
17030+4: SET_RDI_INTO_CR0
17031+ jmp 1b
17032+ENDPROC(pax_enter_kernel)
17033+
17034+ENTRY(pax_exit_kernel)
17035+ pushq %rdi
17036+
17037+#ifdef CONFIG_PARAVIRT
17038+ PV_SAVE_REGS(CLBR_RDI)
17039+#endif
17040+
17041+ mov %cs,%rdi
17042+ cmp $__KERNEXEC_KERNEL_CS,%edi
17043+ jz 2f
17044+1:
17045+
17046+#ifdef CONFIG_PARAVIRT
17047+ PV_RESTORE_REGS(CLBR_RDI);
17048+#endif
17049+
17050+ popq %rdi
17051+ pax_force_retaddr
17052+ retq
17053+
17054+2: GET_CR0_INTO_RDI
17055+ btr $16,%rdi
17056+ ljmpq __KERNEL_CS,3f
17057+3: SET_RDI_INTO_CR0
17058+ jmp 1b
17059+ENDPROC(pax_exit_kernel)
17060+#endif
17061+
17062+ .macro pax_enter_kernel_user
17063+ pax_set_fptr_mask
17064+#ifdef CONFIG_PAX_MEMORY_UDEREF
17065+ call pax_enter_kernel_user
17066+#endif
17067+ .endm
17068+
17069+ .macro pax_exit_kernel_user
17070+#ifdef CONFIG_PAX_MEMORY_UDEREF
17071+ call pax_exit_kernel_user
17072+#endif
17073+#ifdef CONFIG_PAX_RANDKSTACK
17074+ pushq %rax
17075+ call pax_randomize_kstack
17076+ popq %rax
17077+#endif
17078+ .endm
17079+
17080+#ifdef CONFIG_PAX_MEMORY_UDEREF
17081+ENTRY(pax_enter_kernel_user)
17082+ pushq %rdi
17083+ pushq %rbx
17084+
17085+#ifdef CONFIG_PARAVIRT
17086+ PV_SAVE_REGS(CLBR_RDI)
17087+#endif
17088+
17089+ GET_CR3_INTO_RDI
17090+ mov %rdi,%rbx
17091+ add $__START_KERNEL_map,%rbx
17092+ sub phys_base(%rip),%rbx
17093+
17094+#ifdef CONFIG_PARAVIRT
17095+ pushq %rdi
17096+ cmpl $0, pv_info+PARAVIRT_enabled
17097+ jz 1f
17098+ i = 0
17099+ .rept USER_PGD_PTRS
17100+ mov i*8(%rbx),%rsi
17101+ mov $0,%sil
17102+ lea i*8(%rbx),%rdi
17103+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17104+ i = i + 1
17105+ .endr
17106+ jmp 2f
17107+1:
17108+#endif
17109+
17110+ i = 0
17111+ .rept USER_PGD_PTRS
17112+ movb $0,i*8(%rbx)
17113+ i = i + 1
17114+ .endr
17115+
17116+#ifdef CONFIG_PARAVIRT
17117+2: popq %rdi
17118+#endif
17119+ SET_RDI_INTO_CR3
17120+
17121+#ifdef CONFIG_PAX_KERNEXEC
17122+ GET_CR0_INTO_RDI
17123+ bts $16,%rdi
17124+ SET_RDI_INTO_CR0
17125+#endif
17126+
17127+#ifdef CONFIG_PARAVIRT
17128+ PV_RESTORE_REGS(CLBR_RDI)
17129+#endif
17130+
17131+ popq %rbx
17132+ popq %rdi
17133+ pax_force_retaddr
17134+ retq
17135+ENDPROC(pax_enter_kernel_user)
17136+
17137+ENTRY(pax_exit_kernel_user)
17138+ push %rdi
17139+
17140+#ifdef CONFIG_PARAVIRT
17141+ pushq %rbx
17142+ PV_SAVE_REGS(CLBR_RDI)
17143+#endif
17144+
17145+#ifdef CONFIG_PAX_KERNEXEC
17146+ GET_CR0_INTO_RDI
17147+ btr $16,%rdi
17148+ SET_RDI_INTO_CR0
17149+#endif
17150+
17151+ GET_CR3_INTO_RDI
17152+ add $__START_KERNEL_map,%rdi
17153+ sub phys_base(%rip),%rdi
17154+
17155+#ifdef CONFIG_PARAVIRT
17156+ cmpl $0, pv_info+PARAVIRT_enabled
17157+ jz 1f
17158+ mov %rdi,%rbx
17159+ i = 0
17160+ .rept USER_PGD_PTRS
17161+ mov i*8(%rbx),%rsi
17162+ mov $0x67,%sil
17163+ lea i*8(%rbx),%rdi
17164+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17165+ i = i + 1
17166+ .endr
17167+ jmp 2f
17168+1:
17169+#endif
17170+
17171+ i = 0
17172+ .rept USER_PGD_PTRS
17173+ movb $0x67,i*8(%rdi)
17174+ i = i + 1
17175+ .endr
17176+
17177+#ifdef CONFIG_PARAVIRT
17178+2: PV_RESTORE_REGS(CLBR_RDI)
17179+ popq %rbx
17180+#endif
17181+
17182+ popq %rdi
17183+ pax_force_retaddr
17184+ retq
17185+ENDPROC(pax_exit_kernel_user)
17186+#endif
17187+
17188+.macro pax_erase_kstack
17189+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17190+ call pax_erase_kstack
17191+#endif
17192+.endm
17193+
17194+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17195+ENTRY(pax_erase_kstack)
17196+ pushq %rdi
17197+ pushq %rcx
17198+ pushq %rax
17199+ pushq %r11
17200+
17201+ GET_THREAD_INFO(%r11)
17202+ mov TI_lowest_stack(%r11), %rdi
17203+ mov $-0xBEEF, %rax
17204+ std
17205+
17206+1: mov %edi, %ecx
17207+ and $THREAD_SIZE_asm - 1, %ecx
17208+ shr $3, %ecx
17209+ repne scasq
17210+ jecxz 2f
17211+
17212+ cmp $2*8, %ecx
17213+ jc 2f
17214+
17215+ mov $2*8, %ecx
17216+ repe scasq
17217+ jecxz 2f
17218+ jne 1b
17219+
17220+2: cld
17221+ mov %esp, %ecx
17222+ sub %edi, %ecx
17223+
17224+ cmp $THREAD_SIZE_asm, %rcx
17225+ jb 3f
17226+ ud2
17227+3:
17228+
17229+ shr $3, %ecx
17230+ rep stosq
17231+
17232+ mov TI_task_thread_sp0(%r11), %rdi
17233+ sub $256, %rdi
17234+ mov %rdi, TI_lowest_stack(%r11)
17235+
17236+ popq %r11
17237+ popq %rax
17238+ popq %rcx
17239+ popq %rdi
17240+ pax_force_retaddr
17241+ ret
17242+ENDPROC(pax_erase_kstack)
17243+#endif
17244
17245 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
17246 #ifdef CONFIG_TRACE_IRQFLAGS
17247@@ -375,8 +651,8 @@ ENDPROC(native_usergs_sysret64)
17248 .endm
17249
17250 .macro UNFAKE_STACK_FRAME
17251- addq $8*6, %rsp
17252- CFI_ADJUST_CFA_OFFSET -(6*8)
17253+ addq $8*6 + ARG_SKIP, %rsp
17254+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
17255 .endm
17256
17257 /*
17258@@ -463,7 +739,7 @@ ENDPROC(native_usergs_sysret64)
17259 movq %rsp, %rsi
17260
17261 leaq -RBP(%rsp),%rdi /* arg1 for handler */
17262- testl $3, CS-RBP(%rsi)
17263+ testb $3, CS-RBP(%rsi)
17264 je 1f
17265 SWAPGS
17266 /*
17267@@ -498,9 +774,10 @@ ENTRY(save_rest)
17268 movq_cfi r15, R15+16
17269 movq %r11, 8(%rsp) /* return address */
17270 FIXUP_TOP_OF_STACK %r11, 16
17271+ pax_force_retaddr
17272 ret
17273 CFI_ENDPROC
17274-END(save_rest)
17275+ENDPROC(save_rest)
17276
17277 /* save complete stack frame */
17278 .pushsection .kprobes.text, "ax"
17279@@ -529,9 +806,10 @@ ENTRY(save_paranoid)
17280 js 1f /* negative -> in kernel */
17281 SWAPGS
17282 xorl %ebx,%ebx
17283-1: ret
17284+1: pax_force_retaddr_bts
17285+ ret
17286 CFI_ENDPROC
17287-END(save_paranoid)
17288+ENDPROC(save_paranoid)
17289 .popsection
17290
17291 /*
17292@@ -553,7 +831,7 @@ ENTRY(ret_from_fork)
17293
17294 RESTORE_REST
17295
17296- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17297+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17298 jz 1f
17299
17300 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
17301@@ -571,7 +849,7 @@ ENTRY(ret_from_fork)
17302 RESTORE_REST
17303 jmp int_ret_from_sys_call
17304 CFI_ENDPROC
17305-END(ret_from_fork)
17306+ENDPROC(ret_from_fork)
17307
17308 /*
17309 * System call entry. Up to 6 arguments in registers are supported.
17310@@ -608,7 +886,7 @@ END(ret_from_fork)
17311 ENTRY(system_call)
17312 CFI_STARTPROC simple
17313 CFI_SIGNAL_FRAME
17314- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
17315+ CFI_DEF_CFA rsp,0
17316 CFI_REGISTER rip,rcx
17317 /*CFI_REGISTER rflags,r11*/
17318 SWAPGS_UNSAFE_STACK
17319@@ -621,16 +899,23 @@ GLOBAL(system_call_after_swapgs)
17320
17321 movq %rsp,PER_CPU_VAR(old_rsp)
17322 movq PER_CPU_VAR(kernel_stack),%rsp
17323+ SAVE_ARGS 8*6,0
17324+ pax_enter_kernel_user
17325+
17326+#ifdef CONFIG_PAX_RANDKSTACK
17327+ pax_erase_kstack
17328+#endif
17329+
17330 /*
17331 * No need to follow this irqs off/on section - it's straight
17332 * and short:
17333 */
17334 ENABLE_INTERRUPTS(CLBR_NONE)
17335- SAVE_ARGS 8,0
17336 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
17337 movq %rcx,RIP-ARGOFFSET(%rsp)
17338 CFI_REL_OFFSET rip,RIP-ARGOFFSET
17339- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
17340+ GET_THREAD_INFO(%rcx)
17341+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
17342 jnz tracesys
17343 system_call_fastpath:
17344 #if __SYSCALL_MASK == ~0
17345@@ -640,7 +925,7 @@ system_call_fastpath:
17346 cmpl $__NR_syscall_max,%eax
17347 #endif
17348 ja badsys
17349- movq %r10,%rcx
17350+ movq R10-ARGOFFSET(%rsp),%rcx
17351 call *sys_call_table(,%rax,8) # XXX: rip relative
17352 movq %rax,RAX-ARGOFFSET(%rsp)
17353 /*
17354@@ -654,10 +939,13 @@ sysret_check:
17355 LOCKDEP_SYS_EXIT
17356 DISABLE_INTERRUPTS(CLBR_NONE)
17357 TRACE_IRQS_OFF
17358- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
17359+ GET_THREAD_INFO(%rcx)
17360+ movl TI_flags(%rcx),%edx
17361 andl %edi,%edx
17362 jnz sysret_careful
17363 CFI_REMEMBER_STATE
17364+ pax_exit_kernel_user
17365+ pax_erase_kstack
17366 /*
17367 * sysretq will re-enable interrupts:
17368 */
17369@@ -709,14 +997,18 @@ badsys:
17370 * jump back to the normal fast path.
17371 */
17372 auditsys:
17373- movq %r10,%r9 /* 6th arg: 4th syscall arg */
17374+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
17375 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
17376 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
17377 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
17378 movq %rax,%rsi /* 2nd arg: syscall number */
17379 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
17380 call __audit_syscall_entry
17381+
17382+ pax_erase_kstack
17383+
17384 LOAD_ARGS 0 /* reload call-clobbered registers */
17385+ pax_set_fptr_mask
17386 jmp system_call_fastpath
17387
17388 /*
17389@@ -737,7 +1029,7 @@ sysret_audit:
17390 /* Do syscall tracing */
17391 tracesys:
17392 #ifdef CONFIG_AUDITSYSCALL
17393- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
17394+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
17395 jz auditsys
17396 #endif
17397 SAVE_REST
17398@@ -745,12 +1037,16 @@ tracesys:
17399 FIXUP_TOP_OF_STACK %rdi
17400 movq %rsp,%rdi
17401 call syscall_trace_enter
17402+
17403+ pax_erase_kstack
17404+
17405 /*
17406 * Reload arg registers from stack in case ptrace changed them.
17407 * We don't reload %rax because syscall_trace_enter() returned
17408 * the value it wants us to use in the table lookup.
17409 */
17410 LOAD_ARGS ARGOFFSET, 1
17411+ pax_set_fptr_mask
17412 RESTORE_REST
17413 #if __SYSCALL_MASK == ~0
17414 cmpq $__NR_syscall_max,%rax
17415@@ -759,7 +1055,7 @@ tracesys:
17416 cmpl $__NR_syscall_max,%eax
17417 #endif
17418 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
17419- movq %r10,%rcx /* fixup for C */
17420+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
17421 call *sys_call_table(,%rax,8)
17422 movq %rax,RAX-ARGOFFSET(%rsp)
17423 /* Use IRET because user could have changed frame */
17424@@ -780,7 +1076,9 @@ GLOBAL(int_with_check)
17425 andl %edi,%edx
17426 jnz int_careful
17427 andl $~TS_COMPAT,TI_status(%rcx)
17428- jmp retint_swapgs
17429+ pax_exit_kernel_user
17430+ pax_erase_kstack
17431+ jmp retint_swapgs_pax
17432
17433 /* Either reschedule or signal or syscall exit tracking needed. */
17434 /* First do a reschedule test. */
17435@@ -826,7 +1124,7 @@ int_restore_rest:
17436 TRACE_IRQS_OFF
17437 jmp int_with_check
17438 CFI_ENDPROC
17439-END(system_call)
17440+ENDPROC(system_call)
17441
17442 /*
17443 * Certain special system calls that need to save a complete full stack frame.
17444@@ -842,7 +1140,7 @@ ENTRY(\label)
17445 call \func
17446 jmp ptregscall_common
17447 CFI_ENDPROC
17448-END(\label)
17449+ENDPROC(\label)
17450 .endm
17451
17452 PTREGSCALL stub_clone, sys_clone, %r8
17453@@ -860,9 +1158,10 @@ ENTRY(ptregscall_common)
17454 movq_cfi_restore R12+8, r12
17455 movq_cfi_restore RBP+8, rbp
17456 movq_cfi_restore RBX+8, rbx
17457+ pax_force_retaddr
17458 ret $REST_SKIP /* pop extended registers */
17459 CFI_ENDPROC
17460-END(ptregscall_common)
17461+ENDPROC(ptregscall_common)
17462
17463 ENTRY(stub_execve)
17464 CFI_STARTPROC
17465@@ -876,7 +1175,7 @@ ENTRY(stub_execve)
17466 RESTORE_REST
17467 jmp int_ret_from_sys_call
17468 CFI_ENDPROC
17469-END(stub_execve)
17470+ENDPROC(stub_execve)
17471
17472 /*
17473 * sigreturn is special because it needs to restore all registers on return.
17474@@ -894,7 +1193,7 @@ ENTRY(stub_rt_sigreturn)
17475 RESTORE_REST
17476 jmp int_ret_from_sys_call
17477 CFI_ENDPROC
17478-END(stub_rt_sigreturn)
17479+ENDPROC(stub_rt_sigreturn)
17480
17481 #ifdef CONFIG_X86_X32_ABI
17482 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
17483@@ -962,7 +1261,7 @@ vector=vector+1
17484 2: jmp common_interrupt
17485 .endr
17486 CFI_ENDPROC
17487-END(irq_entries_start)
17488+ENDPROC(irq_entries_start)
17489
17490 .previous
17491 END(interrupt)
17492@@ -982,6 +1281,16 @@ END(interrupt)
17493 subq $ORIG_RAX-RBP, %rsp
17494 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
17495 SAVE_ARGS_IRQ
17496+#ifdef CONFIG_PAX_MEMORY_UDEREF
17497+ testb $3, CS(%rdi)
17498+ jnz 1f
17499+ pax_enter_kernel
17500+ jmp 2f
17501+1: pax_enter_kernel_user
17502+2:
17503+#else
17504+ pax_enter_kernel
17505+#endif
17506 call \func
17507 .endm
17508
17509@@ -1014,7 +1323,7 @@ ret_from_intr:
17510
17511 exit_intr:
17512 GET_THREAD_INFO(%rcx)
17513- testl $3,CS-ARGOFFSET(%rsp)
17514+ testb $3,CS-ARGOFFSET(%rsp)
17515 je retint_kernel
17516
17517 /* Interrupt came from user space */
17518@@ -1036,12 +1345,16 @@ retint_swapgs: /* return to user-space */
17519 * The iretq could re-enable interrupts:
17520 */
17521 DISABLE_INTERRUPTS(CLBR_ANY)
17522+ pax_exit_kernel_user
17523+retint_swapgs_pax:
17524 TRACE_IRQS_IRETQ
17525 SWAPGS
17526 jmp restore_args
17527
17528 retint_restore_args: /* return to kernel space */
17529 DISABLE_INTERRUPTS(CLBR_ANY)
17530+ pax_exit_kernel
17531+ pax_force_retaddr (RIP-ARGOFFSET)
17532 /*
17533 * The iretq could re-enable interrupts:
17534 */
17535@@ -1124,7 +1437,7 @@ ENTRY(retint_kernel)
17536 #endif
17537
17538 CFI_ENDPROC
17539-END(common_interrupt)
17540+ENDPROC(common_interrupt)
17541 /*
17542 * End of kprobes section
17543 */
17544@@ -1142,7 +1455,7 @@ ENTRY(\sym)
17545 interrupt \do_sym
17546 jmp ret_from_intr
17547 CFI_ENDPROC
17548-END(\sym)
17549+ENDPROC(\sym)
17550 .endm
17551
17552 #ifdef CONFIG_SMP
17553@@ -1198,12 +1511,22 @@ ENTRY(\sym)
17554 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17555 call error_entry
17556 DEFAULT_FRAME 0
17557+#ifdef CONFIG_PAX_MEMORY_UDEREF
17558+ testb $3, CS(%rsp)
17559+ jnz 1f
17560+ pax_enter_kernel
17561+ jmp 2f
17562+1: pax_enter_kernel_user
17563+2:
17564+#else
17565+ pax_enter_kernel
17566+#endif
17567 movq %rsp,%rdi /* pt_regs pointer */
17568 xorl %esi,%esi /* no error code */
17569 call \do_sym
17570 jmp error_exit /* %ebx: no swapgs flag */
17571 CFI_ENDPROC
17572-END(\sym)
17573+ENDPROC(\sym)
17574 .endm
17575
17576 .macro paranoidzeroentry sym do_sym
17577@@ -1216,15 +1539,25 @@ ENTRY(\sym)
17578 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17579 call save_paranoid
17580 TRACE_IRQS_OFF
17581+#ifdef CONFIG_PAX_MEMORY_UDEREF
17582+ testb $3, CS(%rsp)
17583+ jnz 1f
17584+ pax_enter_kernel
17585+ jmp 2f
17586+1: pax_enter_kernel_user
17587+2:
17588+#else
17589+ pax_enter_kernel
17590+#endif
17591 movq %rsp,%rdi /* pt_regs pointer */
17592 xorl %esi,%esi /* no error code */
17593 call \do_sym
17594 jmp paranoid_exit /* %ebx: no swapgs flag */
17595 CFI_ENDPROC
17596-END(\sym)
17597+ENDPROC(\sym)
17598 .endm
17599
17600-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
17601+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
17602 .macro paranoidzeroentry_ist sym do_sym ist
17603 ENTRY(\sym)
17604 INTR_FRAME
17605@@ -1235,14 +1568,30 @@ ENTRY(\sym)
17606 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17607 call save_paranoid
17608 TRACE_IRQS_OFF_DEBUG
17609+#ifdef CONFIG_PAX_MEMORY_UDEREF
17610+ testb $3, CS(%rsp)
17611+ jnz 1f
17612+ pax_enter_kernel
17613+ jmp 2f
17614+1: pax_enter_kernel_user
17615+2:
17616+#else
17617+ pax_enter_kernel
17618+#endif
17619 movq %rsp,%rdi /* pt_regs pointer */
17620 xorl %esi,%esi /* no error code */
17621+#ifdef CONFIG_SMP
17622+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
17623+ lea init_tss(%r12), %r12
17624+#else
17625+ lea init_tss(%rip), %r12
17626+#endif
17627 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
17628 call \do_sym
17629 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
17630 jmp paranoid_exit /* %ebx: no swapgs flag */
17631 CFI_ENDPROC
17632-END(\sym)
17633+ENDPROC(\sym)
17634 .endm
17635
17636 .macro errorentry sym do_sym
17637@@ -1254,13 +1603,23 @@ ENTRY(\sym)
17638 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17639 call error_entry
17640 DEFAULT_FRAME 0
17641+#ifdef CONFIG_PAX_MEMORY_UDEREF
17642+ testb $3, CS(%rsp)
17643+ jnz 1f
17644+ pax_enter_kernel
17645+ jmp 2f
17646+1: pax_enter_kernel_user
17647+2:
17648+#else
17649+ pax_enter_kernel
17650+#endif
17651 movq %rsp,%rdi /* pt_regs pointer */
17652 movq ORIG_RAX(%rsp),%rsi /* get error code */
17653 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17654 call \do_sym
17655 jmp error_exit /* %ebx: no swapgs flag */
17656 CFI_ENDPROC
17657-END(\sym)
17658+ENDPROC(\sym)
17659 .endm
17660
17661 /* error code is on the stack already */
17662@@ -1274,13 +1633,23 @@ ENTRY(\sym)
17663 call save_paranoid
17664 DEFAULT_FRAME 0
17665 TRACE_IRQS_OFF
17666+#ifdef CONFIG_PAX_MEMORY_UDEREF
17667+ testb $3, CS(%rsp)
17668+ jnz 1f
17669+ pax_enter_kernel
17670+ jmp 2f
17671+1: pax_enter_kernel_user
17672+2:
17673+#else
17674+ pax_enter_kernel
17675+#endif
17676 movq %rsp,%rdi /* pt_regs pointer */
17677 movq ORIG_RAX(%rsp),%rsi /* get error code */
17678 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17679 call \do_sym
17680 jmp paranoid_exit /* %ebx: no swapgs flag */
17681 CFI_ENDPROC
17682-END(\sym)
17683+ENDPROC(\sym)
17684 .endm
17685
17686 zeroentry divide_error do_divide_error
17687@@ -1310,9 +1679,10 @@ gs_change:
17688 2: mfence /* workaround */
17689 SWAPGS
17690 popfq_cfi
17691+ pax_force_retaddr
17692 ret
17693 CFI_ENDPROC
17694-END(native_load_gs_index)
17695+ENDPROC(native_load_gs_index)
17696
17697 _ASM_EXTABLE(gs_change,bad_gs)
17698 .section .fixup,"ax"
17699@@ -1340,9 +1710,10 @@ ENTRY(call_softirq)
17700 CFI_DEF_CFA_REGISTER rsp
17701 CFI_ADJUST_CFA_OFFSET -8
17702 decl PER_CPU_VAR(irq_count)
17703+ pax_force_retaddr
17704 ret
17705 CFI_ENDPROC
17706-END(call_softirq)
17707+ENDPROC(call_softirq)
17708
17709 #ifdef CONFIG_XEN
17710 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
17711@@ -1380,7 +1751,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
17712 decl PER_CPU_VAR(irq_count)
17713 jmp error_exit
17714 CFI_ENDPROC
17715-END(xen_do_hypervisor_callback)
17716+ENDPROC(xen_do_hypervisor_callback)
17717
17718 /*
17719 * Hypervisor uses this for application faults while it executes.
17720@@ -1439,7 +1810,7 @@ ENTRY(xen_failsafe_callback)
17721 SAVE_ALL
17722 jmp error_exit
17723 CFI_ENDPROC
17724-END(xen_failsafe_callback)
17725+ENDPROC(xen_failsafe_callback)
17726
17727 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
17728 xen_hvm_callback_vector xen_evtchn_do_upcall
17729@@ -1488,16 +1859,31 @@ ENTRY(paranoid_exit)
17730 TRACE_IRQS_OFF_DEBUG
17731 testl %ebx,%ebx /* swapgs needed? */
17732 jnz paranoid_restore
17733- testl $3,CS(%rsp)
17734+ testb $3,CS(%rsp)
17735 jnz paranoid_userspace
17736+#ifdef CONFIG_PAX_MEMORY_UDEREF
17737+ pax_exit_kernel
17738+ TRACE_IRQS_IRETQ 0
17739+ SWAPGS_UNSAFE_STACK
17740+ RESTORE_ALL 8
17741+ pax_force_retaddr_bts
17742+ jmp irq_return
17743+#endif
17744 paranoid_swapgs:
17745+#ifdef CONFIG_PAX_MEMORY_UDEREF
17746+ pax_exit_kernel_user
17747+#else
17748+ pax_exit_kernel
17749+#endif
17750 TRACE_IRQS_IRETQ 0
17751 SWAPGS_UNSAFE_STACK
17752 RESTORE_ALL 8
17753 jmp irq_return
17754 paranoid_restore:
17755+ pax_exit_kernel
17756 TRACE_IRQS_IRETQ_DEBUG 0
17757 RESTORE_ALL 8
17758+ pax_force_retaddr_bts
17759 jmp irq_return
17760 paranoid_userspace:
17761 GET_THREAD_INFO(%rcx)
17762@@ -1526,7 +1912,7 @@ paranoid_schedule:
17763 TRACE_IRQS_OFF
17764 jmp paranoid_userspace
17765 CFI_ENDPROC
17766-END(paranoid_exit)
17767+ENDPROC(paranoid_exit)
17768
17769 /*
17770 * Exception entry point. This expects an error code/orig_rax on the stack.
17771@@ -1553,12 +1939,13 @@ ENTRY(error_entry)
17772 movq_cfi r14, R14+8
17773 movq_cfi r15, R15+8
17774 xorl %ebx,%ebx
17775- testl $3,CS+8(%rsp)
17776+ testb $3,CS+8(%rsp)
17777 je error_kernelspace
17778 error_swapgs:
17779 SWAPGS
17780 error_sti:
17781 TRACE_IRQS_OFF
17782+ pax_force_retaddr_bts
17783 ret
17784
17785 /*
17786@@ -1585,7 +1972,7 @@ bstep_iret:
17787 movq %rcx,RIP+8(%rsp)
17788 jmp error_swapgs
17789 CFI_ENDPROC
17790-END(error_entry)
17791+ENDPROC(error_entry)
17792
17793
17794 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
17795@@ -1605,7 +1992,7 @@ ENTRY(error_exit)
17796 jnz retint_careful
17797 jmp retint_swapgs
17798 CFI_ENDPROC
17799-END(error_exit)
17800+ENDPROC(error_exit)
17801
17802 /*
17803 * Test if a given stack is an NMI stack or not.
17804@@ -1663,9 +2050,11 @@ ENTRY(nmi)
17805 * If %cs was not the kernel segment, then the NMI triggered in user
17806 * space, which means it is definitely not nested.
17807 */
17808+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
17809+ je 1f
17810 cmpl $__KERNEL_CS, 16(%rsp)
17811 jne first_nmi
17812-
17813+1:
17814 /*
17815 * Check the special variable on the stack to see if NMIs are
17816 * executing.
17817@@ -1824,6 +2213,17 @@ end_repeat_nmi:
17818 */
17819 movq %cr2, %r12
17820
17821+#ifdef CONFIG_PAX_MEMORY_UDEREF
17822+ testb $3, CS(%rsp)
17823+ jnz 1f
17824+ pax_enter_kernel
17825+ jmp 2f
17826+1: pax_enter_kernel_user
17827+2:
17828+#else
17829+ pax_enter_kernel
17830+#endif
17831+
17832 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
17833 movq %rsp,%rdi
17834 movq $-1,%rsi
17835@@ -1839,21 +2239,32 @@ end_repeat_nmi:
17836 testl %ebx,%ebx /* swapgs needed? */
17837 jnz nmi_restore
17838 nmi_swapgs:
17839+#ifdef CONFIG_PAX_MEMORY_UDEREF
17840+ pax_exit_kernel_user
17841+#else
17842+ pax_exit_kernel
17843+#endif
17844 SWAPGS_UNSAFE_STACK
17845+ RESTORE_ALL 8
17846+ /* Clear the NMI executing stack variable */
17847+ movq $0, 10*8(%rsp)
17848+ jmp irq_return
17849 nmi_restore:
17850+ pax_exit_kernel
17851 RESTORE_ALL 8
17852+ pax_force_retaddr_bts
17853 /* Clear the NMI executing stack variable */
17854 movq $0, 10*8(%rsp)
17855 jmp irq_return
17856 CFI_ENDPROC
17857-END(nmi)
17858+ENDPROC(nmi)
17859
17860 ENTRY(ignore_sysret)
17861 CFI_STARTPROC
17862 mov $-ENOSYS,%eax
17863 sysret
17864 CFI_ENDPROC
17865-END(ignore_sysret)
17866+ENDPROC(ignore_sysret)
17867
17868 /*
17869 * End of kprobes section
17870diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
17871index 1d41402..af9a46a 100644
17872--- a/arch/x86/kernel/ftrace.c
17873+++ b/arch/x86/kernel/ftrace.c
17874@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
17875 {
17876 unsigned char replaced[MCOUNT_INSN_SIZE];
17877
17878+ ip = ktla_ktva(ip);
17879+
17880 /*
17881 * Note: Due to modules and __init, code can
17882 * disappear and change, we need to protect against faulting
17883@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17884 unsigned char old[MCOUNT_INSN_SIZE], *new;
17885 int ret;
17886
17887- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
17888+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
17889 new = ftrace_call_replace(ip, (unsigned long)func);
17890
17891 /* See comment above by declaration of modifying_ftrace_code */
17892@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17893 /* Also update the regs callback function */
17894 if (!ret) {
17895 ip = (unsigned long)(&ftrace_regs_call);
17896- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
17897+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
17898 new = ftrace_call_replace(ip, (unsigned long)func);
17899 ret = ftrace_modify_code(ip, old, new);
17900 }
17901@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
17902 * kernel identity mapping to modify code.
17903 */
17904 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
17905- ip = (unsigned long)__va(__pa(ip));
17906+ ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
17907
17908 return probe_kernel_write((void *)ip, val, size);
17909 }
17910@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
17911 unsigned char replaced[MCOUNT_INSN_SIZE];
17912 unsigned char brk = BREAKPOINT_INSTRUCTION;
17913
17914- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
17915+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
17916 return -EFAULT;
17917
17918 /* Make sure it is what we expect it to be */
17919@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
17920 return ret;
17921
17922 fail_update:
17923- probe_kernel_write((void *)ip, &old_code[0], 1);
17924+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
17925 goto out;
17926 }
17927
17928@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
17929 {
17930 unsigned char code[MCOUNT_INSN_SIZE];
17931
17932+ ip = ktla_ktva(ip);
17933+
17934 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
17935 return -EFAULT;
17936
17937diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
17938index c18f59d..69ddbc4 100644
17939--- a/arch/x86/kernel/head32.c
17940+++ b/arch/x86/kernel/head32.c
17941@@ -18,20 +18,20 @@
17942 #include <asm/io_apic.h>
17943 #include <asm/bios_ebda.h>
17944 #include <asm/tlbflush.h>
17945+#include <asm/boot.h>
17946
17947 static void __init i386_default_early_setup(void)
17948 {
17949 /* Initialize 32bit specific setup functions */
17950- x86_init.resources.reserve_resources = i386_reserve_resources;
17951- x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
17952+ *(void **)&x86_init.resources.reserve_resources = i386_reserve_resources;
17953+ *(void **)&x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
17954
17955 reserve_ebda_region();
17956 }
17957
17958 void __init i386_start_kernel(void)
17959 {
17960- memblock_reserve(__pa_symbol(&_text),
17961- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
17962+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
17963
17964 #ifdef CONFIG_BLK_DEV_INITRD
17965 /* Reserve INITRD */
17966diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
17967index 4dac2f6..bc6a335 100644
17968--- a/arch/x86/kernel/head_32.S
17969+++ b/arch/x86/kernel/head_32.S
17970@@ -26,6 +26,12 @@
17971 /* Physical address */
17972 #define pa(X) ((X) - __PAGE_OFFSET)
17973
17974+#ifdef CONFIG_PAX_KERNEXEC
17975+#define ta(X) (X)
17976+#else
17977+#define ta(X) ((X) - __PAGE_OFFSET)
17978+#endif
17979+
17980 /*
17981 * References to members of the new_cpu_data structure.
17982 */
17983@@ -55,11 +61,7 @@
17984 * and small than max_low_pfn, otherwise will waste some page table entries
17985 */
17986
17987-#if PTRS_PER_PMD > 1
17988-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
17989-#else
17990-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
17991-#endif
17992+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
17993
17994 /* Number of possible pages in the lowmem region */
17995 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
17996@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
17997 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17998
17999 /*
18000+ * Real beginning of normal "text" segment
18001+ */
18002+ENTRY(stext)
18003+ENTRY(_stext)
18004+
18005+/*
18006 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
18007 * %esi points to the real-mode code as a 32-bit pointer.
18008 * CS and DS must be 4 GB flat segments, but we don't depend on
18009@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
18010 * can.
18011 */
18012 __HEAD
18013+
18014+#ifdef CONFIG_PAX_KERNEXEC
18015+ jmp startup_32
18016+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
18017+.fill PAGE_SIZE-5,1,0xcc
18018+#endif
18019+
18020 ENTRY(startup_32)
18021 movl pa(stack_start),%ecx
18022
18023@@ -106,6 +121,59 @@ ENTRY(startup_32)
18024 2:
18025 leal -__PAGE_OFFSET(%ecx),%esp
18026
18027+#ifdef CONFIG_SMP
18028+ movl $pa(cpu_gdt_table),%edi
18029+ movl $__per_cpu_load,%eax
18030+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
18031+ rorl $16,%eax
18032+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
18033+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
18034+ movl $__per_cpu_end - 1,%eax
18035+ subl $__per_cpu_start,%eax
18036+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
18037+#endif
18038+
18039+#ifdef CONFIG_PAX_MEMORY_UDEREF
18040+ movl $NR_CPUS,%ecx
18041+ movl $pa(cpu_gdt_table),%edi
18042+1:
18043+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
18044+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
18045+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
18046+ addl $PAGE_SIZE_asm,%edi
18047+ loop 1b
18048+#endif
18049+
18050+#ifdef CONFIG_PAX_KERNEXEC
18051+ movl $pa(boot_gdt),%edi
18052+ movl $__LOAD_PHYSICAL_ADDR,%eax
18053+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
18054+ rorl $16,%eax
18055+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
18056+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
18057+ rorl $16,%eax
18058+
18059+ ljmp $(__BOOT_CS),$1f
18060+1:
18061+
18062+ movl $NR_CPUS,%ecx
18063+ movl $pa(cpu_gdt_table),%edi
18064+ addl $__PAGE_OFFSET,%eax
18065+1:
18066+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
18067+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
18068+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
18069+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
18070+ rorl $16,%eax
18071+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
18072+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
18073+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
18074+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
18075+ rorl $16,%eax
18076+ addl $PAGE_SIZE_asm,%edi
18077+ loop 1b
18078+#endif
18079+
18080 /*
18081 * Clear BSS first so that there are no surprises...
18082 */
18083@@ -196,8 +264,11 @@ ENTRY(startup_32)
18084 movl %eax, pa(max_pfn_mapped)
18085
18086 /* Do early initialization of the fixmap area */
18087- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
18088- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
18089+#ifdef CONFIG_COMPAT_VDSO
18090+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
18091+#else
18092+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
18093+#endif
18094 #else /* Not PAE */
18095
18096 page_pde_offset = (__PAGE_OFFSET >> 20);
18097@@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
18098 movl %eax, pa(max_pfn_mapped)
18099
18100 /* Do early initialization of the fixmap area */
18101- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
18102- movl %eax,pa(initial_page_table+0xffc)
18103+#ifdef CONFIG_COMPAT_VDSO
18104+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
18105+#else
18106+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
18107+#endif
18108 #endif
18109
18110 #ifdef CONFIG_PARAVIRT
18111@@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
18112 cmpl $num_subarch_entries, %eax
18113 jae bad_subarch
18114
18115- movl pa(subarch_entries)(,%eax,4), %eax
18116- subl $__PAGE_OFFSET, %eax
18117- jmp *%eax
18118+ jmp *pa(subarch_entries)(,%eax,4)
18119
18120 bad_subarch:
18121 WEAK(lguest_entry)
18122@@ -256,10 +328,10 @@ WEAK(xen_entry)
18123 __INITDATA
18124
18125 subarch_entries:
18126- .long default_entry /* normal x86/PC */
18127- .long lguest_entry /* lguest hypervisor */
18128- .long xen_entry /* Xen hypervisor */
18129- .long default_entry /* Moorestown MID */
18130+ .long ta(default_entry) /* normal x86/PC */
18131+ .long ta(lguest_entry) /* lguest hypervisor */
18132+ .long ta(xen_entry) /* Xen hypervisor */
18133+ .long ta(default_entry) /* Moorestown MID */
18134 num_subarch_entries = (. - subarch_entries) / 4
18135 .previous
18136 #else
18137@@ -316,6 +388,7 @@ default_entry:
18138 movl pa(mmu_cr4_features),%eax
18139 movl %eax,%cr4
18140
18141+#ifdef CONFIG_X86_PAE
18142 testb $X86_CR4_PAE, %al # check if PAE is enabled
18143 jz 6f
18144
18145@@ -344,6 +417,9 @@ default_entry:
18146 /* Make changes effective */
18147 wrmsr
18148
18149+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
18150+#endif
18151+
18152 6:
18153
18154 /*
18155@@ -442,14 +518,20 @@ is386: movl $2,%ecx # set MP
18156 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
18157 movl %eax,%ss # after changing gdt.
18158
18159- movl $(__USER_DS),%eax # DS/ES contains default USER segment
18160+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
18161 movl %eax,%ds
18162 movl %eax,%es
18163
18164 movl $(__KERNEL_PERCPU), %eax
18165 movl %eax,%fs # set this cpu's percpu
18166
18167+#ifdef CONFIG_CC_STACKPROTECTOR
18168 movl $(__KERNEL_STACK_CANARY),%eax
18169+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18170+ movl $(__USER_DS),%eax
18171+#else
18172+ xorl %eax,%eax
18173+#endif
18174 movl %eax,%gs
18175
18176 xorl %eax,%eax # Clear LDT
18177@@ -526,8 +608,11 @@ setup_once:
18178 * relocation. Manually set base address in stack canary
18179 * segment descriptor.
18180 */
18181- movl $gdt_page,%eax
18182+ movl $cpu_gdt_table,%eax
18183 movl $stack_canary,%ecx
18184+#ifdef CONFIG_SMP
18185+ addl $__per_cpu_load,%ecx
18186+#endif
18187 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
18188 shrl $16, %ecx
18189 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
18190@@ -558,7 +643,7 @@ ENDPROC(early_idt_handlers)
18191 /* This is global to keep gas from relaxing the jumps */
18192 ENTRY(early_idt_handler)
18193 cld
18194- cmpl $2,%ss:early_recursion_flag
18195+ cmpl $1,%ss:early_recursion_flag
18196 je hlt_loop
18197 incl %ss:early_recursion_flag
18198
18199@@ -596,8 +681,8 @@ ENTRY(early_idt_handler)
18200 pushl (20+6*4)(%esp) /* trapno */
18201 pushl $fault_msg
18202 call printk
18203-#endif
18204 call dump_stack
18205+#endif
18206 hlt_loop:
18207 hlt
18208 jmp hlt_loop
18209@@ -616,8 +701,11 @@ ENDPROC(early_idt_handler)
18210 /* This is the default interrupt "handler" :-) */
18211 ALIGN
18212 ignore_int:
18213- cld
18214 #ifdef CONFIG_PRINTK
18215+ cmpl $2,%ss:early_recursion_flag
18216+ je hlt_loop
18217+ incl %ss:early_recursion_flag
18218+ cld
18219 pushl %eax
18220 pushl %ecx
18221 pushl %edx
18222@@ -626,9 +714,6 @@ ignore_int:
18223 movl $(__KERNEL_DS),%eax
18224 movl %eax,%ds
18225 movl %eax,%es
18226- cmpl $2,early_recursion_flag
18227- je hlt_loop
18228- incl early_recursion_flag
18229 pushl 16(%esp)
18230 pushl 24(%esp)
18231 pushl 32(%esp)
18232@@ -662,29 +747,43 @@ ENTRY(setup_once_ref)
18233 /*
18234 * BSS section
18235 */
18236-__PAGE_ALIGNED_BSS
18237- .align PAGE_SIZE
18238 #ifdef CONFIG_X86_PAE
18239+.section .initial_pg_pmd,"a",@progbits
18240 initial_pg_pmd:
18241 .fill 1024*KPMDS,4,0
18242 #else
18243+.section .initial_page_table,"a",@progbits
18244 ENTRY(initial_page_table)
18245 .fill 1024,4,0
18246 #endif
18247+.section .initial_pg_fixmap,"a",@progbits
18248 initial_pg_fixmap:
18249 .fill 1024,4,0
18250+.section .empty_zero_page,"a",@progbits
18251 ENTRY(empty_zero_page)
18252 .fill 4096,1,0
18253+.section .swapper_pg_dir,"a",@progbits
18254 ENTRY(swapper_pg_dir)
18255+#ifdef CONFIG_X86_PAE
18256+ .fill 4,8,0
18257+#else
18258 .fill 1024,4,0
18259+#endif
18260+
18261+/*
18262+ * The IDT has to be page-aligned to simplify the Pentium
18263+ * F0 0F bug workaround.. We have a special link segment
18264+ * for this.
18265+ */
18266+.section .idt,"a",@progbits
18267+ENTRY(idt_table)
18268+ .fill 256,8,0
18269
18270 /*
18271 * This starts the data section.
18272 */
18273 #ifdef CONFIG_X86_PAE
18274-__PAGE_ALIGNED_DATA
18275- /* Page-aligned for the benefit of paravirt? */
18276- .align PAGE_SIZE
18277+.section .initial_page_table,"a",@progbits
18278 ENTRY(initial_page_table)
18279 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
18280 # if KPMDS == 3
18281@@ -703,12 +802,20 @@ ENTRY(initial_page_table)
18282 # error "Kernel PMDs should be 1, 2 or 3"
18283 # endif
18284 .align PAGE_SIZE /* needs to be page-sized too */
18285+
18286+#ifdef CONFIG_PAX_PER_CPU_PGD
18287+ENTRY(cpu_pgd)
18288+ .rept NR_CPUS
18289+ .fill 4,8,0
18290+ .endr
18291+#endif
18292+
18293 #endif
18294
18295 .data
18296 .balign 4
18297 ENTRY(stack_start)
18298- .long init_thread_union+THREAD_SIZE
18299+ .long init_thread_union+THREAD_SIZE-8
18300
18301 __INITRODATA
18302 int_msg:
18303@@ -736,7 +843,7 @@ fault_msg:
18304 * segment size, and 32-bit linear address value:
18305 */
18306
18307- .data
18308+.section .rodata,"a",@progbits
18309 .globl boot_gdt_descr
18310 .globl idt_descr
18311
18312@@ -745,7 +852,7 @@ fault_msg:
18313 .word 0 # 32 bit align gdt_desc.address
18314 boot_gdt_descr:
18315 .word __BOOT_DS+7
18316- .long boot_gdt - __PAGE_OFFSET
18317+ .long pa(boot_gdt)
18318
18319 .word 0 # 32-bit align idt_desc.address
18320 idt_descr:
18321@@ -756,7 +863,7 @@ idt_descr:
18322 .word 0 # 32 bit align gdt_desc.address
18323 ENTRY(early_gdt_descr)
18324 .word GDT_ENTRIES*8-1
18325- .long gdt_page /* Overwritten for secondary CPUs */
18326+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
18327
18328 /*
18329 * The boot_gdt must mirror the equivalent in setup.S and is
18330@@ -765,5 +872,65 @@ ENTRY(early_gdt_descr)
18331 .align L1_CACHE_BYTES
18332 ENTRY(boot_gdt)
18333 .fill GDT_ENTRY_BOOT_CS,8,0
18334- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
18335- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
18336+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
18337+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
18338+
18339+ .align PAGE_SIZE_asm
18340+ENTRY(cpu_gdt_table)
18341+ .rept NR_CPUS
18342+ .quad 0x0000000000000000 /* NULL descriptor */
18343+ .quad 0x0000000000000000 /* 0x0b reserved */
18344+ .quad 0x0000000000000000 /* 0x13 reserved */
18345+ .quad 0x0000000000000000 /* 0x1b reserved */
18346+
18347+#ifdef CONFIG_PAX_KERNEXEC
18348+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
18349+#else
18350+ .quad 0x0000000000000000 /* 0x20 unused */
18351+#endif
18352+
18353+ .quad 0x0000000000000000 /* 0x28 unused */
18354+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
18355+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
18356+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
18357+ .quad 0x0000000000000000 /* 0x4b reserved */
18358+ .quad 0x0000000000000000 /* 0x53 reserved */
18359+ .quad 0x0000000000000000 /* 0x5b reserved */
18360+
18361+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
18362+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
18363+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
18364+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
18365+
18366+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
18367+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
18368+
18369+ /*
18370+ * Segments used for calling PnP BIOS have byte granularity.
18371+ * The code segments and data segments have fixed 64k limits,
18372+ * the transfer segment sizes are set at run time.
18373+ */
18374+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
18375+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
18376+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
18377+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
18378+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
18379+
18380+ /*
18381+ * The APM segments have byte granularity and their bases
18382+ * are set at run time. All have 64k limits.
18383+ */
18384+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
18385+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
18386+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
18387+
18388+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
18389+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
18390+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
18391+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
18392+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
18393+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
18394+
18395+ /* Be sure this is zeroed to avoid false validations in Xen */
18396+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
18397+ .endr
18398diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
18399index 94bf9cc..400455a 100644
18400--- a/arch/x86/kernel/head_64.S
18401+++ b/arch/x86/kernel/head_64.S
18402@@ -20,6 +20,8 @@
18403 #include <asm/processor-flags.h>
18404 #include <asm/percpu.h>
18405 #include <asm/nops.h>
18406+#include <asm/cpufeature.h>
18407+#include <asm/alternative-asm.h>
18408
18409 #ifdef CONFIG_PARAVIRT
18410 #include <asm/asm-offsets.h>
18411@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
18412 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
18413 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
18414 L3_START_KERNEL = pud_index(__START_KERNEL_map)
18415+L4_VMALLOC_START = pgd_index(VMALLOC_START)
18416+L3_VMALLOC_START = pud_index(VMALLOC_START)
18417+L4_VMALLOC_END = pgd_index(VMALLOC_END)
18418+L3_VMALLOC_END = pud_index(VMALLOC_END)
18419+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
18420+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
18421
18422 .text
18423 __HEAD
18424@@ -88,35 +96,23 @@ startup_64:
18425 */
18426 addq %rbp, init_level4_pgt + 0(%rip)
18427 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
18428+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
18429+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
18430+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
18431 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
18432
18433 addq %rbp, level3_ident_pgt + 0(%rip)
18434+#ifndef CONFIG_XEN
18435+ addq %rbp, level3_ident_pgt + 8(%rip)
18436+#endif
18437
18438- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
18439- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
18440+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
18441+
18442+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
18443+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
18444
18445 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
18446-
18447- /* Add an Identity mapping if I am above 1G */
18448- leaq _text(%rip), %rdi
18449- andq $PMD_PAGE_MASK, %rdi
18450-
18451- movq %rdi, %rax
18452- shrq $PUD_SHIFT, %rax
18453- andq $(PTRS_PER_PUD - 1), %rax
18454- jz ident_complete
18455-
18456- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
18457- leaq level3_ident_pgt(%rip), %rbx
18458- movq %rdx, 0(%rbx, %rax, 8)
18459-
18460- movq %rdi, %rax
18461- shrq $PMD_SHIFT, %rax
18462- andq $(PTRS_PER_PMD - 1), %rax
18463- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
18464- leaq level2_spare_pgt(%rip), %rbx
18465- movq %rdx, 0(%rbx, %rax, 8)
18466-ident_complete:
18467+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
18468
18469 /*
18470 * Fixup the kernel text+data virtual addresses. Note that
18471@@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
18472 * after the boot processor executes this code.
18473 */
18474
18475- /* Enable PAE mode and PGE */
18476- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
18477+ /* Enable PAE mode and PSE/PGE */
18478+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
18479 movq %rax, %cr4
18480
18481 /* Setup early boot stage 4 level pagetables. */
18482@@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
18483 movl $MSR_EFER, %ecx
18484 rdmsr
18485 btsl $_EFER_SCE, %eax /* Enable System Call */
18486- btl $20,%edi /* No Execute supported? */
18487+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
18488 jnc 1f
18489 btsl $_EFER_NX, %eax
18490+ leaq init_level4_pgt(%rip), %rdi
18491+#ifndef CONFIG_EFI
18492+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
18493+#endif
18494+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
18495+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
18496+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
18497+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
18498 1: wrmsr /* Make changes effective */
18499
18500 /* Setup cr0 */
18501@@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
18502 * jump. In addition we need to ensure %cs is set so we make this
18503 * a far return.
18504 */
18505+ pax_set_fptr_mask
18506 movq initial_code(%rip),%rax
18507 pushq $0 # fake return address to stop unwinder
18508 pushq $__KERNEL_CS # set correct cs
18509@@ -268,7 +273,7 @@ ENTRY(secondary_startup_64)
18510 bad_address:
18511 jmp bad_address
18512
18513- .section ".init.text","ax"
18514+ __INIT
18515 .globl early_idt_handlers
18516 early_idt_handlers:
18517 # 104(%rsp) %rflags
18518@@ -347,11 +352,15 @@ ENTRY(early_idt_handler)
18519 addq $16,%rsp # drop vector number and error code
18520 decl early_recursion_flag(%rip)
18521 INTERRUPT_RETURN
18522+ .previous
18523
18524+ __INITDATA
18525 .balign 4
18526 early_recursion_flag:
18527 .long 0
18528+ .previous
18529
18530+ .section .rodata,"a",@progbits
18531 #ifdef CONFIG_EARLY_PRINTK
18532 early_idt_msg:
18533 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
18534@@ -360,6 +369,7 @@ early_idt_ripmsg:
18535 #endif /* CONFIG_EARLY_PRINTK */
18536 .previous
18537
18538+ .section .rodata,"a",@progbits
18539 #define NEXT_PAGE(name) \
18540 .balign PAGE_SIZE; \
18541 ENTRY(name)
18542@@ -372,7 +382,6 @@ ENTRY(name)
18543 i = i + 1 ; \
18544 .endr
18545
18546- .data
18547 /*
18548 * This default setting generates an ident mapping at address 0x100000
18549 * and a mapping for the kernel that precisely maps virtual address
18550@@ -383,13 +392,41 @@ NEXT_PAGE(init_level4_pgt)
18551 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18552 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
18553 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18554+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
18555+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
18556+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
18557+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
18558+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
18559+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18560 .org init_level4_pgt + L4_START_KERNEL*8, 0
18561 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
18562 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
18563
18564+#ifdef CONFIG_PAX_PER_CPU_PGD
18565+NEXT_PAGE(cpu_pgd)
18566+ .rept NR_CPUS
18567+ .fill 512,8,0
18568+ .endr
18569+#endif
18570+
18571 NEXT_PAGE(level3_ident_pgt)
18572 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18573+#ifdef CONFIG_XEN
18574 .fill 511,8,0
18575+#else
18576+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
18577+ .fill 510,8,0
18578+#endif
18579+
18580+NEXT_PAGE(level3_vmalloc_start_pgt)
18581+ .fill 512,8,0
18582+
18583+NEXT_PAGE(level3_vmalloc_end_pgt)
18584+ .fill 512,8,0
18585+
18586+NEXT_PAGE(level3_vmemmap_pgt)
18587+ .fill L3_VMEMMAP_START,8,0
18588+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18589
18590 NEXT_PAGE(level3_kernel_pgt)
18591 .fill L3_START_KERNEL,8,0
18592@@ -397,20 +434,23 @@ NEXT_PAGE(level3_kernel_pgt)
18593 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
18594 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18595
18596+NEXT_PAGE(level2_vmemmap_pgt)
18597+ .fill 512,8,0
18598+
18599 NEXT_PAGE(level2_fixmap_pgt)
18600- .fill 506,8,0
18601- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18602- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
18603- .fill 5,8,0
18604+ .fill 507,8,0
18605+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
18606+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
18607+ .fill 4,8,0
18608
18609-NEXT_PAGE(level1_fixmap_pgt)
18610+NEXT_PAGE(level1_vsyscall_pgt)
18611 .fill 512,8,0
18612
18613-NEXT_PAGE(level2_ident_pgt)
18614- /* Since I easily can, map the first 1G.
18615+ /* Since I easily can, map the first 2G.
18616 * Don't set NX because code runs from these pages.
18617 */
18618- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
18619+NEXT_PAGE(level2_ident_pgt)
18620+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
18621
18622 NEXT_PAGE(level2_kernel_pgt)
18623 /*
18624@@ -423,37 +463,59 @@ NEXT_PAGE(level2_kernel_pgt)
18625 * If you want to increase this then increase MODULES_VADDR
18626 * too.)
18627 */
18628- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
18629- KERNEL_IMAGE_SIZE/PMD_SIZE)
18630-
18631-NEXT_PAGE(level2_spare_pgt)
18632- .fill 512, 8, 0
18633+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
18634
18635 #undef PMDS
18636 #undef NEXT_PAGE
18637
18638- .data
18639+ .align PAGE_SIZE
18640+ENTRY(cpu_gdt_table)
18641+ .rept NR_CPUS
18642+ .quad 0x0000000000000000 /* NULL descriptor */
18643+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
18644+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
18645+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
18646+ .quad 0x00cffb000000ffff /* __USER32_CS */
18647+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
18648+ .quad 0x00affb000000ffff /* __USER_CS */
18649+
18650+#ifdef CONFIG_PAX_KERNEXEC
18651+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
18652+#else
18653+ .quad 0x0 /* unused */
18654+#endif
18655+
18656+ .quad 0,0 /* TSS */
18657+ .quad 0,0 /* LDT */
18658+ .quad 0,0,0 /* three TLS descriptors */
18659+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
18660+ /* asm/segment.h:GDT_ENTRIES must match this */
18661+
18662+ /* zero the remaining page */
18663+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
18664+ .endr
18665+
18666 .align 16
18667 .globl early_gdt_descr
18668 early_gdt_descr:
18669 .word GDT_ENTRIES*8-1
18670 early_gdt_descr_base:
18671- .quad INIT_PER_CPU_VAR(gdt_page)
18672+ .quad cpu_gdt_table
18673
18674 ENTRY(phys_base)
18675 /* This must match the first entry in level2_kernel_pgt */
18676 .quad 0x0000000000000000
18677
18678 #include "../../x86/xen/xen-head.S"
18679-
18680- .section .bss, "aw", @nobits
18681+
18682+ .section .rodata,"a",@progbits
18683 .align L1_CACHE_BYTES
18684 ENTRY(idt_table)
18685- .skip IDT_ENTRIES * 16
18686+ .fill 512,8,0
18687
18688 .align L1_CACHE_BYTES
18689 ENTRY(nmi_idt_table)
18690- .skip IDT_ENTRIES * 16
18691+ .fill 512,8,0
18692
18693 __PAGE_ALIGNED_BSS
18694 .align PAGE_SIZE
18695diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
18696index 9c3bd4a..e1d9b35 100644
18697--- a/arch/x86/kernel/i386_ksyms_32.c
18698+++ b/arch/x86/kernel/i386_ksyms_32.c
18699@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
18700 EXPORT_SYMBOL(cmpxchg8b_emu);
18701 #endif
18702
18703+EXPORT_SYMBOL_GPL(cpu_gdt_table);
18704+
18705 /* Networking helper routines. */
18706 EXPORT_SYMBOL(csum_partial_copy_generic);
18707+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
18708+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
18709
18710 EXPORT_SYMBOL(__get_user_1);
18711 EXPORT_SYMBOL(__get_user_2);
18712@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
18713
18714 EXPORT_SYMBOL(csum_partial);
18715 EXPORT_SYMBOL(empty_zero_page);
18716+
18717+#ifdef CONFIG_PAX_KERNEXEC
18718+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
18719+#endif
18720diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
18721index 675a050..95febfd 100644
18722--- a/arch/x86/kernel/i387.c
18723+++ b/arch/x86/kernel/i387.c
18724@@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
18725 static inline bool interrupted_user_mode(void)
18726 {
18727 struct pt_regs *regs = get_irq_regs();
18728- return regs && user_mode_vm(regs);
18729+ return regs && user_mode(regs);
18730 }
18731
18732 /*
18733diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
18734index 9a5c460..dc4374d 100644
18735--- a/arch/x86/kernel/i8259.c
18736+++ b/arch/x86/kernel/i8259.c
18737@@ -209,7 +209,7 @@ spurious_8259A_irq:
18738 "spurious 8259A interrupt: IRQ%d.\n", irq);
18739 spurious_irq_mask |= irqmask;
18740 }
18741- atomic_inc(&irq_err_count);
18742+ atomic_inc_unchecked(&irq_err_count);
18743 /*
18744 * Theoretically we do not have to handle this IRQ,
18745 * but in Linux this does not cause problems and is
18746diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
18747index 8c96897..be66bfa 100644
18748--- a/arch/x86/kernel/ioport.c
18749+++ b/arch/x86/kernel/ioport.c
18750@@ -6,6 +6,7 @@
18751 #include <linux/sched.h>
18752 #include <linux/kernel.h>
18753 #include <linux/capability.h>
18754+#include <linux/security.h>
18755 #include <linux/errno.h>
18756 #include <linux/types.h>
18757 #include <linux/ioport.h>
18758@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18759
18760 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
18761 return -EINVAL;
18762+#ifdef CONFIG_GRKERNSEC_IO
18763+ if (turn_on && grsec_disable_privio) {
18764+ gr_handle_ioperm();
18765+ return -EPERM;
18766+ }
18767+#endif
18768 if (turn_on && !capable(CAP_SYS_RAWIO))
18769 return -EPERM;
18770
18771@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18772 * because the ->io_bitmap_max value must match the bitmap
18773 * contents:
18774 */
18775- tss = &per_cpu(init_tss, get_cpu());
18776+ tss = init_tss + get_cpu();
18777
18778 if (turn_on)
18779 bitmap_clear(t->io_bitmap_ptr, from, num);
18780@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
18781 return -EINVAL;
18782 /* Trying to gain more privileges? */
18783 if (level > old) {
18784+#ifdef CONFIG_GRKERNSEC_IO
18785+ if (grsec_disable_privio) {
18786+ gr_handle_iopl();
18787+ return -EPERM;
18788+ }
18789+#endif
18790 if (!capable(CAP_SYS_RAWIO))
18791 return -EPERM;
18792 }
18793diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
18794index e4595f1..ee3bfb8 100644
18795--- a/arch/x86/kernel/irq.c
18796+++ b/arch/x86/kernel/irq.c
18797@@ -18,7 +18,7 @@
18798 #include <asm/mce.h>
18799 #include <asm/hw_irq.h>
18800
18801-atomic_t irq_err_count;
18802+atomic_unchecked_t irq_err_count;
18803
18804 /* Function pointer for generic interrupt vector handling */
18805 void (*x86_platform_ipi_callback)(void) = NULL;
18806@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
18807 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
18808 seq_printf(p, " Machine check polls\n");
18809 #endif
18810- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
18811+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
18812 #if defined(CONFIG_X86_IO_APIC)
18813- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
18814+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
18815 #endif
18816 return 0;
18817 }
18818@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
18819
18820 u64 arch_irq_stat(void)
18821 {
18822- u64 sum = atomic_read(&irq_err_count);
18823+ u64 sum = atomic_read_unchecked(&irq_err_count);
18824
18825 #ifdef CONFIG_X86_IO_APIC
18826- sum += atomic_read(&irq_mis_count);
18827+ sum += atomic_read_unchecked(&irq_mis_count);
18828 #endif
18829 return sum;
18830 }
18831diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
18832index 344faf8..355f60d 100644
18833--- a/arch/x86/kernel/irq_32.c
18834+++ b/arch/x86/kernel/irq_32.c
18835@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
18836 __asm__ __volatile__("andl %%esp,%0" :
18837 "=r" (sp) : "0" (THREAD_SIZE - 1));
18838
18839- return sp < (sizeof(struct thread_info) + STACK_WARN);
18840+ return sp < STACK_WARN;
18841 }
18842
18843 static void print_stack_overflow(void)
18844@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
18845 * per-CPU IRQ handling contexts (thread information and stack)
18846 */
18847 union irq_ctx {
18848- struct thread_info tinfo;
18849- u32 stack[THREAD_SIZE/sizeof(u32)];
18850+ unsigned long previous_esp;
18851+ u32 stack[THREAD_SIZE/sizeof(u32)];
18852 } __attribute__((aligned(THREAD_SIZE)));
18853
18854 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
18855@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
18856 static inline int
18857 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18858 {
18859- union irq_ctx *curctx, *irqctx;
18860+ union irq_ctx *irqctx;
18861 u32 *isp, arg1, arg2;
18862
18863- curctx = (union irq_ctx *) current_thread_info();
18864 irqctx = __this_cpu_read(hardirq_ctx);
18865
18866 /*
18867@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18868 * handler) we can't do that and just have to keep using the
18869 * current stack (which is the irq stack already after all)
18870 */
18871- if (unlikely(curctx == irqctx))
18872+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
18873 return 0;
18874
18875 /* build the stack frame on the IRQ stack */
18876- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18877- irqctx->tinfo.task = curctx->tinfo.task;
18878- irqctx->tinfo.previous_esp = current_stack_pointer;
18879+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18880+ irqctx->previous_esp = current_stack_pointer;
18881
18882- /* Copy the preempt_count so that the [soft]irq checks work. */
18883- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
18884+#ifdef CONFIG_PAX_MEMORY_UDEREF
18885+ __set_fs(MAKE_MM_SEG(0));
18886+#endif
18887
18888 if (unlikely(overflow))
18889 call_on_stack(print_stack_overflow, isp);
18890@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18891 : "0" (irq), "1" (desc), "2" (isp),
18892 "D" (desc->handle_irq)
18893 : "memory", "cc", "ecx");
18894+
18895+#ifdef CONFIG_PAX_MEMORY_UDEREF
18896+ __set_fs(current_thread_info()->addr_limit);
18897+#endif
18898+
18899 return 1;
18900 }
18901
18902@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18903 */
18904 void __cpuinit irq_ctx_init(int cpu)
18905 {
18906- union irq_ctx *irqctx;
18907-
18908 if (per_cpu(hardirq_ctx, cpu))
18909 return;
18910
18911- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
18912- THREADINFO_GFP,
18913- THREAD_SIZE_ORDER));
18914- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
18915- irqctx->tinfo.cpu = cpu;
18916- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
18917- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18918-
18919- per_cpu(hardirq_ctx, cpu) = irqctx;
18920-
18921- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
18922- THREADINFO_GFP,
18923- THREAD_SIZE_ORDER));
18924- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
18925- irqctx->tinfo.cpu = cpu;
18926- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18927-
18928- per_cpu(softirq_ctx, cpu) = irqctx;
18929+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
18930+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
18931+
18932+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18933+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18934
18935 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18936 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18937@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
18938 asmlinkage void do_softirq(void)
18939 {
18940 unsigned long flags;
18941- struct thread_info *curctx;
18942 union irq_ctx *irqctx;
18943 u32 *isp;
18944
18945@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
18946 local_irq_save(flags);
18947
18948 if (local_softirq_pending()) {
18949- curctx = current_thread_info();
18950 irqctx = __this_cpu_read(softirq_ctx);
18951- irqctx->tinfo.task = curctx->task;
18952- irqctx->tinfo.previous_esp = current_stack_pointer;
18953+ irqctx->previous_esp = current_stack_pointer;
18954
18955 /* build the stack frame on the softirq stack */
18956- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18957+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18958+
18959+#ifdef CONFIG_PAX_MEMORY_UDEREF
18960+ __set_fs(MAKE_MM_SEG(0));
18961+#endif
18962
18963 call_on_stack(__do_softirq, isp);
18964+
18965+#ifdef CONFIG_PAX_MEMORY_UDEREF
18966+ __set_fs(current_thread_info()->addr_limit);
18967+#endif
18968+
18969 /*
18970 * Shouldn't happen, we returned above if in_interrupt():
18971 */
18972@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
18973 if (unlikely(!desc))
18974 return false;
18975
18976- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
18977+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
18978 if (unlikely(overflow))
18979 print_stack_overflow();
18980 desc->handle_irq(irq, desc);
18981diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
18982index d04d3ec..ea4b374 100644
18983--- a/arch/x86/kernel/irq_64.c
18984+++ b/arch/x86/kernel/irq_64.c
18985@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
18986 u64 estack_top, estack_bottom;
18987 u64 curbase = (u64)task_stack_page(current);
18988
18989- if (user_mode_vm(regs))
18990+ if (user_mode(regs))
18991 return;
18992
18993 if (regs->sp >= curbase + sizeof(struct thread_info) +
18994diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
18995index dc1404b..bbc43e7 100644
18996--- a/arch/x86/kernel/kdebugfs.c
18997+++ b/arch/x86/kernel/kdebugfs.c
18998@@ -27,7 +27,7 @@ struct setup_data_node {
18999 u32 len;
19000 };
19001
19002-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
19003+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
19004 size_t count, loff_t *ppos)
19005 {
19006 struct setup_data_node *node = file->private_data;
19007diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
19008index 836f832..a8bda67 100644
19009--- a/arch/x86/kernel/kgdb.c
19010+++ b/arch/x86/kernel/kgdb.c
19011@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
19012 #ifdef CONFIG_X86_32
19013 switch (regno) {
19014 case GDB_SS:
19015- if (!user_mode_vm(regs))
19016+ if (!user_mode(regs))
19017 *(unsigned long *)mem = __KERNEL_DS;
19018 break;
19019 case GDB_SP:
19020- if (!user_mode_vm(regs))
19021+ if (!user_mode(regs))
19022 *(unsigned long *)mem = kernel_stack_pointer(regs);
19023 break;
19024 case GDB_GS:
19025@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
19026 bp->attr.bp_addr = breakinfo[breakno].addr;
19027 bp->attr.bp_len = breakinfo[breakno].len;
19028 bp->attr.bp_type = breakinfo[breakno].type;
19029- info->address = breakinfo[breakno].addr;
19030+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
19031+ info->address = ktla_ktva(breakinfo[breakno].addr);
19032+ else
19033+ info->address = breakinfo[breakno].addr;
19034 info->len = breakinfo[breakno].len;
19035 info->type = breakinfo[breakno].type;
19036 val = arch_install_hw_breakpoint(bp);
19037@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
19038 case 'k':
19039 /* clear the trace bit */
19040 linux_regs->flags &= ~X86_EFLAGS_TF;
19041- atomic_set(&kgdb_cpu_doing_single_step, -1);
19042+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
19043
19044 /* set the trace bit if we're stepping */
19045 if (remcomInBuffer[0] == 's') {
19046 linux_regs->flags |= X86_EFLAGS_TF;
19047- atomic_set(&kgdb_cpu_doing_single_step,
19048+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
19049 raw_smp_processor_id());
19050 }
19051
19052@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
19053
19054 switch (cmd) {
19055 case DIE_DEBUG:
19056- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
19057+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
19058 if (user_mode(regs))
19059 return single_step_cont(regs, args);
19060 break;
19061@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
19062 #endif /* CONFIG_DEBUG_RODATA */
19063
19064 bpt->type = BP_BREAKPOINT;
19065- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
19066+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
19067 BREAK_INSTR_SIZE);
19068 if (err)
19069 return err;
19070- err = probe_kernel_write((char *)bpt->bpt_addr,
19071+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
19072 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
19073 #ifdef CONFIG_DEBUG_RODATA
19074 if (!err)
19075@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
19076 return -EBUSY;
19077 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
19078 BREAK_INSTR_SIZE);
19079- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
19080+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
19081 if (err)
19082 return err;
19083 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
19084@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
19085 if (mutex_is_locked(&text_mutex))
19086 goto knl_write;
19087 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
19088- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
19089+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
19090 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
19091 goto knl_write;
19092 return err;
19093 knl_write:
19094 #endif /* CONFIG_DEBUG_RODATA */
19095- return probe_kernel_write((char *)bpt->bpt_addr,
19096+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
19097 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
19098 }
19099
19100diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
19101index c5e410e..ed5a7f0 100644
19102--- a/arch/x86/kernel/kprobes-opt.c
19103+++ b/arch/x86/kernel/kprobes-opt.c
19104@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
19105 * Verify if the address gap is in 2GB range, because this uses
19106 * a relative jump.
19107 */
19108- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
19109+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
19110 if (abs(rel) > 0x7fffffff)
19111 return -ERANGE;
19112
19113@@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
19114 op->optinsn.size = ret;
19115
19116 /* Copy arch-dep-instance from template */
19117- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
19118+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
19119
19120 /* Set probe information */
19121 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
19122
19123 /* Set probe function call */
19124- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
19125+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
19126
19127 /* Set returning jmp instruction at the tail of out-of-line buffer */
19128- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
19129+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
19130 (u8 *)op->kp.addr + op->optinsn.size);
19131
19132 flush_icache_range((unsigned long) buf,
19133@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
19134 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
19135
19136 /* Backup instructions which will be replaced by jump address */
19137- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
19138+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
19139 RELATIVE_ADDR_SIZE);
19140
19141 insn_buf[0] = RELATIVEJUMP_OPCODE;
19142@@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
19143 /* This kprobe is really able to run optimized path. */
19144 op = container_of(p, struct optimized_kprobe, kp);
19145 /* Detour through copied instructions */
19146- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
19147+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
19148 if (!reenter)
19149 reset_current_kprobe();
19150 preempt_enable_no_resched();
19151diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
19152index 57916c0..9e0b9d0 100644
19153--- a/arch/x86/kernel/kprobes.c
19154+++ b/arch/x86/kernel/kprobes.c
19155@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
19156 s32 raddr;
19157 } __attribute__((packed)) *insn;
19158
19159- insn = (struct __arch_relative_insn *)from;
19160+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
19161+
19162+ pax_open_kernel();
19163 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
19164 insn->op = op;
19165+ pax_close_kernel();
19166 }
19167
19168 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
19169@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
19170 kprobe_opcode_t opcode;
19171 kprobe_opcode_t *orig_opcodes = opcodes;
19172
19173- if (search_exception_tables((unsigned long)opcodes))
19174+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
19175 return 0; /* Page fault may occur on this address. */
19176
19177 retry:
19178@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
19179 * for the first byte, we can recover the original instruction
19180 * from it and kp->opcode.
19181 */
19182- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19183+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19184 buf[0] = kp->opcode;
19185- return (unsigned long)buf;
19186+ return ktva_ktla((unsigned long)buf);
19187 }
19188
19189 /*
19190@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
19191 /* Another subsystem puts a breakpoint, failed to recover */
19192 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
19193 return 0;
19194+ pax_open_kernel();
19195 memcpy(dest, insn.kaddr, insn.length);
19196+ pax_close_kernel();
19197
19198 #ifdef CONFIG_X86_64
19199 if (insn_rip_relative(&insn)) {
19200@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
19201 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
19202 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
19203 disp = (u8 *) dest + insn_offset_displacement(&insn);
19204+ pax_open_kernel();
19205 *(s32 *) disp = (s32) newdisp;
19206+ pax_close_kernel();
19207 }
19208 #endif
19209 return insn.length;
19210@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
19211 * nor set current_kprobe, because it doesn't use single
19212 * stepping.
19213 */
19214- regs->ip = (unsigned long)p->ainsn.insn;
19215+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19216 preempt_enable_no_resched();
19217 return;
19218 }
19219@@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
19220 regs->flags &= ~X86_EFLAGS_IF;
19221 /* single step inline if the instruction is an int3 */
19222 if (p->opcode == BREAKPOINT_INSTRUCTION)
19223- regs->ip = (unsigned long)p->addr;
19224+ regs->ip = ktla_ktva((unsigned long)p->addr);
19225 else
19226- regs->ip = (unsigned long)p->ainsn.insn;
19227+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19228 }
19229
19230 /*
19231@@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
19232 setup_singlestep(p, regs, kcb, 0);
19233 return 1;
19234 }
19235- } else if (*addr != BREAKPOINT_INSTRUCTION) {
19236+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
19237 /*
19238 * The breakpoint instruction was removed right
19239 * after we hit it. Another cpu has removed
19240@@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
19241 " movq %rax, 152(%rsp)\n"
19242 RESTORE_REGS_STRING
19243 " popfq\n"
19244+#ifdef KERNEXEC_PLUGIN
19245+ " btsq $63,(%rsp)\n"
19246+#endif
19247 #else
19248 " pushf\n"
19249 SAVE_REGS_STRING
19250@@ -788,7 +798,7 @@ static void __kprobes
19251 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
19252 {
19253 unsigned long *tos = stack_addr(regs);
19254- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
19255+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
19256 unsigned long orig_ip = (unsigned long)p->addr;
19257 kprobe_opcode_t *insn = p->ainsn.insn;
19258
19259@@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
19260 struct die_args *args = data;
19261 int ret = NOTIFY_DONE;
19262
19263- if (args->regs && user_mode_vm(args->regs))
19264+ if (args->regs && user_mode(args->regs))
19265 return ret;
19266
19267 switch (val) {
19268diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
19269index 4180a87..4678e4f 100644
19270--- a/arch/x86/kernel/kvm.c
19271+++ b/arch/x86/kernel/kvm.c
19272@@ -267,7 +267,7 @@ static void __init paravirt_ops_setup(void)
19273 pv_info.paravirt_enabled = 1;
19274
19275 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
19276- pv_cpu_ops.io_delay = kvm_io_delay;
19277+ *(void **)&pv_cpu_ops.io_delay = kvm_io_delay;
19278
19279 #ifdef CONFIG_X86_IO_APIC
19280 no_timer_check = 1;
19281@@ -461,18 +461,18 @@ void __init kvm_guest_init(void)
19282 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
19283 spin_lock_init(&async_pf_sleepers[i].lock);
19284 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
19285- x86_init.irqs.trap_init = kvm_apf_trap_init;
19286+ *(void **)&x86_init.irqs.trap_init = kvm_apf_trap_init;
19287
19288 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
19289 has_steal_clock = 1;
19290- pv_time_ops.steal_clock = kvm_steal_clock;
19291+ *(void **)&pv_time_ops.steal_clock = kvm_steal_clock;
19292 }
19293
19294 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
19295 apic_set_eoi_write(kvm_guest_apic_eoi_write);
19296
19297 #ifdef CONFIG_SMP
19298- smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
19299+ *(void **)&smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
19300 register_cpu_notifier(&kvm_cpu_notifier);
19301 #else
19302 kvm_guest_cpu_init();
19303diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
19304index f1b42b3..27ac4e7 100644
19305--- a/arch/x86/kernel/kvmclock.c
19306+++ b/arch/x86/kernel/kvmclock.c
19307@@ -211,19 +211,19 @@ void __init kvmclock_init(void)
19308
19309 if (kvm_register_clock("boot clock"))
19310 return;
19311- pv_time_ops.sched_clock = kvm_clock_read;
19312- x86_platform.calibrate_tsc = kvm_get_tsc_khz;
19313- x86_platform.get_wallclock = kvm_get_wallclock;
19314- x86_platform.set_wallclock = kvm_set_wallclock;
19315+ *(void **)&pv_time_ops.sched_clock = kvm_clock_read;
19316+ *(void **)&x86_platform.calibrate_tsc = kvm_get_tsc_khz;
19317+ *(void **)&x86_platform.get_wallclock = kvm_get_wallclock;
19318+ *(void **)&x86_platform.set_wallclock = kvm_set_wallclock;
19319 #ifdef CONFIG_X86_LOCAL_APIC
19320- x86_cpuinit.early_percpu_clock_init =
19321+ *(void **)&x86_cpuinit.early_percpu_clock_init =
19322 kvm_setup_secondary_clock;
19323 #endif
19324- x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
19325- x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
19326- machine_ops.shutdown = kvm_shutdown;
19327+ *(void **)&x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
19328+ *(void **)&x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
19329+ *(void **)&machine_ops.shutdown = kvm_shutdown;
19330 #ifdef CONFIG_KEXEC
19331- machine_ops.crash_shutdown = kvm_crash_shutdown;
19332+ *(void **)&machine_ops.crash_shutdown = kvm_crash_shutdown;
19333 #endif
19334 kvm_get_preset_lpj();
19335 clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
19336diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
19337index ebc9873..1b9724b 100644
19338--- a/arch/x86/kernel/ldt.c
19339+++ b/arch/x86/kernel/ldt.c
19340@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
19341 if (reload) {
19342 #ifdef CONFIG_SMP
19343 preempt_disable();
19344- load_LDT(pc);
19345+ load_LDT_nolock(pc);
19346 if (!cpumask_equal(mm_cpumask(current->mm),
19347 cpumask_of(smp_processor_id())))
19348 smp_call_function(flush_ldt, current->mm, 1);
19349 preempt_enable();
19350 #else
19351- load_LDT(pc);
19352+ load_LDT_nolock(pc);
19353 #endif
19354 }
19355 if (oldsize) {
19356@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
19357 return err;
19358
19359 for (i = 0; i < old->size; i++)
19360- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
19361+ write_ldt_entry(new->ldt, i, old->ldt + i);
19362 return 0;
19363 }
19364
19365@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
19366 retval = copy_ldt(&mm->context, &old_mm->context);
19367 mutex_unlock(&old_mm->context.lock);
19368 }
19369+
19370+ if (tsk == current) {
19371+ mm->context.vdso = 0;
19372+
19373+#ifdef CONFIG_X86_32
19374+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19375+ mm->context.user_cs_base = 0UL;
19376+ mm->context.user_cs_limit = ~0UL;
19377+
19378+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
19379+ cpus_clear(mm->context.cpu_user_cs_mask);
19380+#endif
19381+
19382+#endif
19383+#endif
19384+
19385+ }
19386+
19387 return retval;
19388 }
19389
19390@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
19391 }
19392 }
19393
19394+#ifdef CONFIG_PAX_SEGMEXEC
19395+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
19396+ error = -EINVAL;
19397+ goto out_unlock;
19398+ }
19399+#endif
19400+
19401 fill_ldt(&ldt, &ldt_info);
19402 if (oldmode)
19403 ldt.avl = 0;
19404diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
19405index 5b19e4d..6476a76 100644
19406--- a/arch/x86/kernel/machine_kexec_32.c
19407+++ b/arch/x86/kernel/machine_kexec_32.c
19408@@ -26,7 +26,7 @@
19409 #include <asm/cacheflush.h>
19410 #include <asm/debugreg.h>
19411
19412-static void set_idt(void *newidt, __u16 limit)
19413+static void set_idt(struct desc_struct *newidt, __u16 limit)
19414 {
19415 struct desc_ptr curidt;
19416
19417@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
19418 }
19419
19420
19421-static void set_gdt(void *newgdt, __u16 limit)
19422+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
19423 {
19424 struct desc_ptr curgdt;
19425
19426@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
19427 }
19428
19429 control_page = page_address(image->control_code_page);
19430- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
19431+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
19432
19433 relocate_kernel_ptr = control_page;
19434 page_list[PA_CONTROL_PAGE] = __pa(control_page);
19435diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
19436index 3544aed..01ddc1c 100644
19437--- a/arch/x86/kernel/microcode_intel.c
19438+++ b/arch/x86/kernel/microcode_intel.c
19439@@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
19440
19441 static int get_ucode_user(void *to, const void *from, size_t n)
19442 {
19443- return copy_from_user(to, from, n);
19444+ return copy_from_user(to, (const void __force_user *)from, n);
19445 }
19446
19447 static enum ucode_state
19448 request_microcode_user(int cpu, const void __user *buf, size_t size)
19449 {
19450- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
19451+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
19452 }
19453
19454 static void microcode_fini_cpu(int cpu)
19455diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
19456index 216a4d7..b328f09 100644
19457--- a/arch/x86/kernel/module.c
19458+++ b/arch/x86/kernel/module.c
19459@@ -43,15 +43,60 @@ do { \
19460 } while (0)
19461 #endif
19462
19463-void *module_alloc(unsigned long size)
19464+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
19465 {
19466- if (PAGE_ALIGN(size) > MODULES_LEN)
19467+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
19468 return NULL;
19469 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
19470- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
19471+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
19472 -1, __builtin_return_address(0));
19473 }
19474
19475+void *module_alloc(unsigned long size)
19476+{
19477+
19478+#ifdef CONFIG_PAX_KERNEXEC
19479+ return __module_alloc(size, PAGE_KERNEL);
19480+#else
19481+ return __module_alloc(size, PAGE_KERNEL_EXEC);
19482+#endif
19483+
19484+}
19485+
19486+#ifdef CONFIG_PAX_KERNEXEC
19487+#ifdef CONFIG_X86_32
19488+void *module_alloc_exec(unsigned long size)
19489+{
19490+ struct vm_struct *area;
19491+
19492+ if (size == 0)
19493+ return NULL;
19494+
19495+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
19496+ return area ? area->addr : NULL;
19497+}
19498+EXPORT_SYMBOL(module_alloc_exec);
19499+
19500+void module_free_exec(struct module *mod, void *module_region)
19501+{
19502+ vunmap(module_region);
19503+}
19504+EXPORT_SYMBOL(module_free_exec);
19505+#else
19506+void module_free_exec(struct module *mod, void *module_region)
19507+{
19508+ module_free(mod, module_region);
19509+}
19510+EXPORT_SYMBOL(module_free_exec);
19511+
19512+void *module_alloc_exec(unsigned long size)
19513+{
19514+ return __module_alloc(size, PAGE_KERNEL_RX);
19515+}
19516+EXPORT_SYMBOL(module_alloc_exec);
19517+#endif
19518+#endif
19519+
19520 #ifdef CONFIG_X86_32
19521 int apply_relocate(Elf32_Shdr *sechdrs,
19522 const char *strtab,
19523@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19524 unsigned int i;
19525 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
19526 Elf32_Sym *sym;
19527- uint32_t *location;
19528+ uint32_t *plocation, location;
19529
19530 DEBUGP("Applying relocate section %u to %u\n",
19531 relsec, sechdrs[relsec].sh_info);
19532 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
19533 /* This is where to make the change */
19534- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
19535- + rel[i].r_offset;
19536+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
19537+ location = (uint32_t)plocation;
19538+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
19539+ plocation = ktla_ktva((void *)plocation);
19540 /* This is the symbol it is referring to. Note that all
19541 undefined symbols have been resolved. */
19542 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
19543@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19544 switch (ELF32_R_TYPE(rel[i].r_info)) {
19545 case R_386_32:
19546 /* We add the value into the location given */
19547- *location += sym->st_value;
19548+ pax_open_kernel();
19549+ *plocation += sym->st_value;
19550+ pax_close_kernel();
19551 break;
19552 case R_386_PC32:
19553 /* Add the value, subtract its position */
19554- *location += sym->st_value - (uint32_t)location;
19555+ pax_open_kernel();
19556+ *plocation += sym->st_value - location;
19557+ pax_close_kernel();
19558 break;
19559 default:
19560 pr_err("%s: Unknown relocation: %u\n",
19561@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
19562 case R_X86_64_NONE:
19563 break;
19564 case R_X86_64_64:
19565+ pax_open_kernel();
19566 *(u64 *)loc = val;
19567+ pax_close_kernel();
19568 break;
19569 case R_X86_64_32:
19570+ pax_open_kernel();
19571 *(u32 *)loc = val;
19572+ pax_close_kernel();
19573 if (val != *(u32 *)loc)
19574 goto overflow;
19575 break;
19576 case R_X86_64_32S:
19577+ pax_open_kernel();
19578 *(s32 *)loc = val;
19579+ pax_close_kernel();
19580 if ((s64)val != *(s32 *)loc)
19581 goto overflow;
19582 break;
19583 case R_X86_64_PC32:
19584 val -= (u64)loc;
19585+ pax_open_kernel();
19586 *(u32 *)loc = val;
19587+ pax_close_kernel();
19588+
19589 #if 0
19590 if ((s64)val != *(s32 *)loc)
19591 goto overflow;
19592diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
19593index f84f5c5..e27e54b 100644
19594--- a/arch/x86/kernel/nmi.c
19595+++ b/arch/x86/kernel/nmi.c
19596@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
19597 dotraplinkage notrace __kprobes void
19598 do_nmi(struct pt_regs *regs, long error_code)
19599 {
19600+
19601+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19602+ if (!user_mode(regs)) {
19603+ unsigned long cs = regs->cs & 0xFFFF;
19604+ unsigned long ip = ktva_ktla(regs->ip);
19605+
19606+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
19607+ regs->ip = ip;
19608+ }
19609+#endif
19610+
19611 nmi_nesting_preprocess(regs);
19612
19613 nmi_enter();
19614diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
19615index 676b8c7..870ba04 100644
19616--- a/arch/x86/kernel/paravirt-spinlocks.c
19617+++ b/arch/x86/kernel/paravirt-spinlocks.c
19618@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
19619 arch_spin_lock(lock);
19620 }
19621
19622-struct pv_lock_ops pv_lock_ops = {
19623+struct pv_lock_ops pv_lock_ops __read_only = {
19624 #ifdef CONFIG_SMP
19625 .spin_is_locked = __ticket_spin_is_locked,
19626 .spin_is_contended = __ticket_spin_is_contended,
19627diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
19628index 17fff18..0f5f957 100644
19629--- a/arch/x86/kernel/paravirt.c
19630+++ b/arch/x86/kernel/paravirt.c
19631@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
19632 {
19633 return x;
19634 }
19635+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19636+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
19637+#endif
19638
19639 void __init default_banner(void)
19640 {
19641@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
19642 if (opfunc == NULL)
19643 /* If there's no function, patch it with a ud2a (BUG) */
19644 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
19645- else if (opfunc == _paravirt_nop)
19646+ else if (opfunc == (void *)_paravirt_nop)
19647 /* If the operation is a nop, then nop the callsite */
19648 ret = paravirt_patch_nop();
19649
19650 /* identity functions just return their single argument */
19651- else if (opfunc == _paravirt_ident_32)
19652+ else if (opfunc == (void *)_paravirt_ident_32)
19653 ret = paravirt_patch_ident_32(insnbuf, len);
19654- else if (opfunc == _paravirt_ident_64)
19655+ else if (opfunc == (void *)_paravirt_ident_64)
19656 ret = paravirt_patch_ident_64(insnbuf, len);
19657+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19658+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
19659+ ret = paravirt_patch_ident_64(insnbuf, len);
19660+#endif
19661
19662 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
19663 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
19664@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
19665 if (insn_len > len || start == NULL)
19666 insn_len = len;
19667 else
19668- memcpy(insnbuf, start, insn_len);
19669+ memcpy(insnbuf, ktla_ktva(start), insn_len);
19670
19671 return insn_len;
19672 }
19673@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
19674 preempt_enable();
19675 }
19676
19677-struct pv_info pv_info = {
19678+struct pv_info pv_info __read_only = {
19679 .name = "bare hardware",
19680 .paravirt_enabled = 0,
19681 .kernel_rpl = 0,
19682@@ -324,7 +331,7 @@ struct pv_time_ops pv_time_ops = {
19683 .steal_clock = native_steal_clock,
19684 };
19685
19686-struct pv_irq_ops pv_irq_ops = {
19687+struct pv_irq_ops pv_irq_ops __read_only = {
19688 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
19689 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
19690 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
19691@@ -401,15 +408,20 @@ struct pv_apic_ops pv_apic_ops = {
19692 #endif
19693 };
19694
19695-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
19696+#ifdef CONFIG_X86_32
19697+#ifdef CONFIG_X86_PAE
19698+/* 64-bit pagetable entries */
19699+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
19700+#else
19701 /* 32-bit pagetable entries */
19702 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
19703+#endif
19704 #else
19705 /* 64-bit pagetable entries */
19706 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
19707 #endif
19708
19709-struct pv_mmu_ops pv_mmu_ops = {
19710+struct pv_mmu_ops pv_mmu_ops __read_only = {
19711
19712 .read_cr2 = native_read_cr2,
19713 .write_cr2 = native_write_cr2,
19714@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
19715 .make_pud = PTE_IDENT,
19716
19717 .set_pgd = native_set_pgd,
19718+ .set_pgd_batched = native_set_pgd_batched,
19719 #endif
19720 #endif /* PAGETABLE_LEVELS >= 3 */
19721
19722@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
19723 },
19724
19725 .set_fixmap = native_set_fixmap,
19726+
19727+#ifdef CONFIG_PAX_KERNEXEC
19728+ .pax_open_kernel = native_pax_open_kernel,
19729+ .pax_close_kernel = native_pax_close_kernel,
19730+#endif
19731+
19732 };
19733
19734 EXPORT_SYMBOL_GPL(pv_time_ops);
19735diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
19736index 299d493..79c13dd 100644
19737--- a/arch/x86/kernel/pci-calgary_64.c
19738+++ b/arch/x86/kernel/pci-calgary_64.c
19739@@ -1461,7 +1461,7 @@ int __init detect_calgary(void)
19740 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n",
19741 specified_table_size);
19742
19743- x86_init.iommu.iommu_init = calgary_iommu_init;
19744+ *(void **)&x86_init.iommu.iommu_init = calgary_iommu_init;
19745 }
19746 return calgary_found;
19747
19748diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
19749index 35ccf75..7a15747 100644
19750--- a/arch/x86/kernel/pci-iommu_table.c
19751+++ b/arch/x86/kernel/pci-iommu_table.c
19752@@ -2,7 +2,7 @@
19753 #include <asm/iommu_table.h>
19754 #include <linux/string.h>
19755 #include <linux/kallsyms.h>
19756-
19757+#include <linux/sched.h>
19758
19759 #define DEBUG 1
19760
19761diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
19762index b644e1c..4a6d379 100644
19763--- a/arch/x86/kernel/process.c
19764+++ b/arch/x86/kernel/process.c
19765@@ -36,7 +36,8 @@
19766 * section. Since TSS's are completely CPU-local, we want them
19767 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
19768 */
19769-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
19770+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
19771+EXPORT_SYMBOL(init_tss);
19772
19773 #ifdef CONFIG_X86_64
19774 static DEFINE_PER_CPU(unsigned char, is_idle);
19775@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
19776 task_xstate_cachep =
19777 kmem_cache_create("task_xstate", xstate_size,
19778 __alignof__(union thread_xstate),
19779- SLAB_PANIC | SLAB_NOTRACK, NULL);
19780+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
19781 }
19782
19783 /*
19784@@ -105,7 +106,7 @@ void exit_thread(void)
19785 unsigned long *bp = t->io_bitmap_ptr;
19786
19787 if (bp) {
19788- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
19789+ struct tss_struct *tss = init_tss + get_cpu();
19790
19791 t->io_bitmap_ptr = NULL;
19792 clear_thread_flag(TIF_IO_BITMAP);
19793@@ -136,7 +137,7 @@ void show_regs_common(void)
19794 board = dmi_get_system_info(DMI_BOARD_NAME);
19795
19796 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
19797- current->pid, current->comm, print_tainted(),
19798+ task_pid_nr(current), current->comm, print_tainted(),
19799 init_utsname()->release,
19800 (int)strcspn(init_utsname()->version, " "),
19801 init_utsname()->version,
19802@@ -149,6 +150,9 @@ void flush_thread(void)
19803 {
19804 struct task_struct *tsk = current;
19805
19806+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19807+ loadsegment(gs, 0);
19808+#endif
19809 flush_ptrace_hw_breakpoint(tsk);
19810 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
19811 drop_init_fpu(tsk);
19812@@ -336,7 +340,7 @@ static void __exit_idle(void)
19813 void exit_idle(void)
19814 {
19815 /* idle loop has pid 0 */
19816- if (current->pid)
19817+ if (task_pid_nr(current))
19818 return;
19819 __exit_idle();
19820 }
19821@@ -445,7 +449,7 @@ bool set_pm_idle_to_default(void)
19822
19823 return ret;
19824 }
19825-void stop_this_cpu(void *dummy)
19826+__noreturn void stop_this_cpu(void *dummy)
19827 {
19828 local_irq_disable();
19829 /*
19830@@ -673,16 +677,37 @@ static int __init idle_setup(char *str)
19831 }
19832 early_param("idle", idle_setup);
19833
19834-unsigned long arch_align_stack(unsigned long sp)
19835+#ifdef CONFIG_PAX_RANDKSTACK
19836+void pax_randomize_kstack(struct pt_regs *regs)
19837 {
19838- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
19839- sp -= get_random_int() % 8192;
19840- return sp & ~0xf;
19841-}
19842+ struct thread_struct *thread = &current->thread;
19843+ unsigned long time;
19844
19845-unsigned long arch_randomize_brk(struct mm_struct *mm)
19846-{
19847- unsigned long range_end = mm->brk + 0x02000000;
19848- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
19849-}
19850+ if (!randomize_va_space)
19851+ return;
19852+
19853+ if (v8086_mode(regs))
19854+ return;
19855
19856+ rdtscl(time);
19857+
19858+ /* P4 seems to return a 0 LSB, ignore it */
19859+#ifdef CONFIG_MPENTIUM4
19860+ time &= 0x3EUL;
19861+ time <<= 2;
19862+#elif defined(CONFIG_X86_64)
19863+ time &= 0xFUL;
19864+ time <<= 4;
19865+#else
19866+ time &= 0x1FUL;
19867+ time <<= 3;
19868+#endif
19869+
19870+ thread->sp0 ^= time;
19871+ load_sp0(init_tss + smp_processor_id(), thread);
19872+
19873+#ifdef CONFIG_X86_64
19874+ this_cpu_write(kernel_stack, thread->sp0);
19875+#endif
19876+}
19877+#endif
19878diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
19879index 44e0bff..5ceb99c 100644
19880--- a/arch/x86/kernel/process_32.c
19881+++ b/arch/x86/kernel/process_32.c
19882@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
19883 unsigned long thread_saved_pc(struct task_struct *tsk)
19884 {
19885 return ((unsigned long *)tsk->thread.sp)[3];
19886+//XXX return tsk->thread.eip;
19887 }
19888
19889 void __show_regs(struct pt_regs *regs, int all)
19890@@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
19891 unsigned long sp;
19892 unsigned short ss, gs;
19893
19894- if (user_mode_vm(regs)) {
19895+ if (user_mode(regs)) {
19896 sp = regs->sp;
19897 ss = regs->ss & 0xffff;
19898- gs = get_user_gs(regs);
19899 } else {
19900 sp = kernel_stack_pointer(regs);
19901 savesegment(ss, ss);
19902- savesegment(gs, gs);
19903 }
19904+ gs = get_user_gs(regs);
19905
19906 show_regs_common();
19907
19908 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
19909 (u16)regs->cs, regs->ip, regs->flags,
19910- smp_processor_id());
19911+ raw_smp_processor_id());
19912 print_symbol("EIP is at %s\n", regs->ip);
19913
19914 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
19915@@ -131,20 +131,21 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19916 unsigned long arg,
19917 struct task_struct *p, struct pt_regs *regs)
19918 {
19919- struct pt_regs *childregs = task_pt_regs(p);
19920+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
19921 struct task_struct *tsk;
19922 int err;
19923
19924 p->thread.sp = (unsigned long) childregs;
19925 p->thread.sp0 = (unsigned long) (childregs+1);
19926+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19927
19928 if (unlikely(!regs)) {
19929 /* kernel thread */
19930 memset(childregs, 0, sizeof(struct pt_regs));
19931 p->thread.ip = (unsigned long) ret_from_kernel_thread;
19932- task_user_gs(p) = __KERNEL_STACK_CANARY;
19933- childregs->ds = __USER_DS;
19934- childregs->es = __USER_DS;
19935+ savesegment(gs, childregs->gs);
19936+ childregs->ds = __KERNEL_DS;
19937+ childregs->es = __KERNEL_DS;
19938 childregs->fs = __KERNEL_PERCPU;
19939 childregs->bx = sp; /* function */
19940 childregs->bp = arg;
19941@@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19942 struct thread_struct *prev = &prev_p->thread,
19943 *next = &next_p->thread;
19944 int cpu = smp_processor_id();
19945- struct tss_struct *tss = &per_cpu(init_tss, cpu);
19946+ struct tss_struct *tss = init_tss + cpu;
19947 fpu_switch_t fpu;
19948
19949 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
19950@@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19951 */
19952 lazy_save_gs(prev->gs);
19953
19954+#ifdef CONFIG_PAX_MEMORY_UDEREF
19955+ __set_fs(task_thread_info(next_p)->addr_limit);
19956+#endif
19957+
19958 /*
19959 * Load the per-thread Thread-Local Storage descriptor.
19960 */
19961@@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19962 */
19963 arch_end_context_switch(next_p);
19964
19965+ this_cpu_write(current_task, next_p);
19966+ this_cpu_write(current_tinfo, &next_p->tinfo);
19967+
19968 /*
19969 * Restore %gs if needed (which is common)
19970 */
19971@@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19972
19973 switch_fpu_finish(next_p, fpu);
19974
19975- this_cpu_write(current_task, next_p);
19976-
19977 return prev_p;
19978 }
19979
19980@@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
19981 } while (count++ < 16);
19982 return 0;
19983 }
19984-
19985diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
19986index 16c6365..5d32218 100644
19987--- a/arch/x86/kernel/process_64.c
19988+++ b/arch/x86/kernel/process_64.c
19989@@ -153,10 +153,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19990 struct pt_regs *childregs;
19991 struct task_struct *me = current;
19992
19993- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
19994+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
19995 childregs = task_pt_regs(p);
19996 p->thread.sp = (unsigned long) childregs;
19997 p->thread.usersp = me->thread.usersp;
19998+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19999 set_tsk_thread_flag(p, TIF_FORK);
20000 p->fpu_counter = 0;
20001 p->thread.io_bitmap_ptr = NULL;
20002@@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20003 struct thread_struct *prev = &prev_p->thread;
20004 struct thread_struct *next = &next_p->thread;
20005 int cpu = smp_processor_id();
20006- struct tss_struct *tss = &per_cpu(init_tss, cpu);
20007+ struct tss_struct *tss = init_tss + cpu;
20008 unsigned fsindex, gsindex;
20009 fpu_switch_t fpu;
20010
20011@@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20012 prev->usersp = this_cpu_read(old_rsp);
20013 this_cpu_write(old_rsp, next->usersp);
20014 this_cpu_write(current_task, next_p);
20015+ this_cpu_write(current_tinfo, &next_p->tinfo);
20016
20017- this_cpu_write(kernel_stack,
20018- (unsigned long)task_stack_page(next_p) +
20019- THREAD_SIZE - KERNEL_STACK_OFFSET);
20020+ this_cpu_write(kernel_stack, next->sp0);
20021
20022 /*
20023 * Now maybe reload the debug registers and handle I/O bitmaps
20024@@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
20025 if (!p || p == current || p->state == TASK_RUNNING)
20026 return 0;
20027 stack = (unsigned long)task_stack_page(p);
20028- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
20029+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
20030 return 0;
20031 fp = *(u64 *)(p->thread.sp);
20032 do {
20033- if (fp < (unsigned long)stack ||
20034- fp >= (unsigned long)stack+THREAD_SIZE)
20035+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
20036 return 0;
20037 ip = *(u64 *)(fp+8);
20038 if (!in_sched_functions(ip))
20039diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
20040index 974b67e..12cb2b5 100644
20041--- a/arch/x86/kernel/ptrace.c
20042+++ b/arch/x86/kernel/ptrace.c
20043@@ -183,14 +183,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
20044 {
20045 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
20046 unsigned long sp = (unsigned long)&regs->sp;
20047- struct thread_info *tinfo;
20048
20049- if (context == (sp & ~(THREAD_SIZE - 1)))
20050+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
20051 return sp;
20052
20053- tinfo = (struct thread_info *)context;
20054- if (tinfo->previous_esp)
20055- return tinfo->previous_esp;
20056+ sp = *(unsigned long *)context;
20057+ if (sp)
20058+ return sp;
20059
20060 return (unsigned long)regs;
20061 }
20062@@ -587,7 +586,7 @@ static void ptrace_triggered(struct perf_event *bp,
20063 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
20064 {
20065 int i;
20066- int dr7 = 0;
20067+ unsigned long dr7 = 0;
20068 struct arch_hw_breakpoint *info;
20069
20070 for (i = 0; i < HBP_NUM; i++) {
20071@@ -855,7 +854,7 @@ long arch_ptrace(struct task_struct *child, long request,
20072 unsigned long addr, unsigned long data)
20073 {
20074 int ret;
20075- unsigned long __user *datap = (unsigned long __user *)data;
20076+ unsigned long __user *datap = (__force unsigned long __user *)data;
20077
20078 switch (request) {
20079 /* read the word at location addr in the USER area. */
20080@@ -940,14 +939,14 @@ long arch_ptrace(struct task_struct *child, long request,
20081 if ((int) addr < 0)
20082 return -EIO;
20083 ret = do_get_thread_area(child, addr,
20084- (struct user_desc __user *)data);
20085+ (__force struct user_desc __user *) data);
20086 break;
20087
20088 case PTRACE_SET_THREAD_AREA:
20089 if ((int) addr < 0)
20090 return -EIO;
20091 ret = do_set_thread_area(child, addr,
20092- (struct user_desc __user *)data, 0);
20093+ (__force struct user_desc __user *) data, 0);
20094 break;
20095 #endif
20096
20097@@ -1325,7 +1324,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
20098
20099 #ifdef CONFIG_X86_64
20100
20101-static struct user_regset x86_64_regsets[] __read_mostly = {
20102+static struct user_regset x86_64_regsets[] = {
20103 [REGSET_GENERAL] = {
20104 .core_note_type = NT_PRSTATUS,
20105 .n = sizeof(struct user_regs_struct) / sizeof(long),
20106@@ -1366,7 +1365,7 @@ static const struct user_regset_view user_x86_64_view = {
20107 #endif /* CONFIG_X86_64 */
20108
20109 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
20110-static struct user_regset x86_32_regsets[] __read_mostly = {
20111+static struct user_regset x86_32_regsets[] = {
20112 [REGSET_GENERAL] = {
20113 .core_note_type = NT_PRSTATUS,
20114 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
20115@@ -1419,13 +1418,13 @@ static const struct user_regset_view user_x86_32_view = {
20116 */
20117 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
20118
20119-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
20120+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
20121 {
20122 #ifdef CONFIG_X86_64
20123- x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
20124+ *(unsigned int *)&x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
20125 #endif
20126 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
20127- x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
20128+ *(unsigned int *)&x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
20129 #endif
20130 xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
20131 }
20132@@ -1454,7 +1453,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
20133 memset(info, 0, sizeof(*info));
20134 info->si_signo = SIGTRAP;
20135 info->si_code = si_code;
20136- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
20137+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
20138 }
20139
20140 void user_single_step_siginfo(struct task_struct *tsk,
20141@@ -1483,6 +1482,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20142 # define IS_IA32 0
20143 #endif
20144
20145+#ifdef CONFIG_GRKERNSEC_SETXID
20146+extern void gr_delayed_cred_worker(void);
20147+#endif
20148+
20149 /*
20150 * We must return the syscall number to actually look up in the table.
20151 * This can be -1L to skip running any syscall at all.
20152@@ -1493,6 +1496,11 @@ long syscall_trace_enter(struct pt_regs *regs)
20153
20154 rcu_user_exit();
20155
20156+#ifdef CONFIG_GRKERNSEC_SETXID
20157+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
20158+ gr_delayed_cred_worker();
20159+#endif
20160+
20161 /*
20162 * If we stepped into a sysenter/syscall insn, it trapped in
20163 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
20164@@ -1548,6 +1556,11 @@ void syscall_trace_leave(struct pt_regs *regs)
20165 */
20166 rcu_user_exit();
20167
20168+#ifdef CONFIG_GRKERNSEC_SETXID
20169+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
20170+ gr_delayed_cred_worker();
20171+#endif
20172+
20173 audit_syscall_exit(regs);
20174
20175 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
20176diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
20177index 42eb330..139955c 100644
20178--- a/arch/x86/kernel/pvclock.c
20179+++ b/arch/x86/kernel/pvclock.c
20180@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
20181 return pv_tsc_khz;
20182 }
20183
20184-static atomic64_t last_value = ATOMIC64_INIT(0);
20185+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
20186
20187 void pvclock_resume(void)
20188 {
20189- atomic64_set(&last_value, 0);
20190+ atomic64_set_unchecked(&last_value, 0);
20191 }
20192
20193 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
20194@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
20195 * updating at the same time, and one of them could be slightly behind,
20196 * making the assumption that last_value always go forward fail to hold.
20197 */
20198- last = atomic64_read(&last_value);
20199+ last = atomic64_read_unchecked(&last_value);
20200 do {
20201 if (ret < last)
20202 return last;
20203- last = atomic64_cmpxchg(&last_value, last, ret);
20204+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
20205 } while (unlikely(last != ret));
20206
20207 return ret;
20208diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
20209index 4e8ba39..e0186b3 100644
20210--- a/arch/x86/kernel/reboot.c
20211+++ b/arch/x86/kernel/reboot.c
20212@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
20213 EXPORT_SYMBOL(pm_power_off);
20214
20215 static const struct desc_ptr no_idt = {};
20216-static int reboot_mode;
20217+static unsigned short reboot_mode;
20218 enum reboot_type reboot_type = BOOT_ACPI;
20219 int reboot_force;
20220
20221@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
20222
20223 void __noreturn machine_real_restart(unsigned int type)
20224 {
20225+
20226+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
20227+ struct desc_struct *gdt;
20228+#endif
20229+
20230 local_irq_disable();
20231
20232 /*
20233@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
20234
20235 /* Jump to the identity-mapped low memory code */
20236 #ifdef CONFIG_X86_32
20237- asm volatile("jmpl *%0" : :
20238+
20239+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20240+ gdt = get_cpu_gdt_table(smp_processor_id());
20241+ pax_open_kernel();
20242+#ifdef CONFIG_PAX_MEMORY_UDEREF
20243+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
20244+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
20245+ loadsegment(ds, __KERNEL_DS);
20246+ loadsegment(es, __KERNEL_DS);
20247+ loadsegment(ss, __KERNEL_DS);
20248+#endif
20249+#ifdef CONFIG_PAX_KERNEXEC
20250+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
20251+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
20252+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
20253+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
20254+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
20255+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
20256+#endif
20257+ pax_close_kernel();
20258+#endif
20259+
20260+ asm volatile("ljmpl *%0" : :
20261 "rm" (real_mode_header->machine_real_restart_asm),
20262 "a" (type));
20263 #else
20264@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
20265 * try to force a triple fault and then cycle between hitting the keyboard
20266 * controller and doing that
20267 */
20268-static void native_machine_emergency_restart(void)
20269+static void __noreturn native_machine_emergency_restart(void)
20270 {
20271 int i;
20272 int attempt = 0;
20273@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
20274 #endif
20275 }
20276
20277-static void __machine_emergency_restart(int emergency)
20278+static void __noreturn __machine_emergency_restart(int emergency)
20279 {
20280 reboot_emergency = emergency;
20281 machine_ops.emergency_restart();
20282 }
20283
20284-static void native_machine_restart(char *__unused)
20285+static void __noreturn native_machine_restart(char *__unused)
20286 {
20287 pr_notice("machine restart\n");
20288
20289@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
20290 __machine_emergency_restart(0);
20291 }
20292
20293-static void native_machine_halt(void)
20294+static void __noreturn native_machine_halt(void)
20295 {
20296 /* Stop other cpus and apics */
20297 machine_shutdown();
20298@@ -679,7 +706,7 @@ static void native_machine_halt(void)
20299 stop_this_cpu(NULL);
20300 }
20301
20302-static void native_machine_power_off(void)
20303+static void __noreturn native_machine_power_off(void)
20304 {
20305 if (pm_power_off) {
20306 if (!reboot_force)
20307@@ -688,6 +715,7 @@ static void native_machine_power_off(void)
20308 }
20309 /* A fallback in case there is no PM info available */
20310 tboot_shutdown(TB_SHUTDOWN_HALT);
20311+ unreachable();
20312 }
20313
20314 struct machine_ops machine_ops = {
20315diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
20316index 7a6f3b3..bed145d7 100644
20317--- a/arch/x86/kernel/relocate_kernel_64.S
20318+++ b/arch/x86/kernel/relocate_kernel_64.S
20319@@ -11,6 +11,7 @@
20320 #include <asm/kexec.h>
20321 #include <asm/processor-flags.h>
20322 #include <asm/pgtable_types.h>
20323+#include <asm/alternative-asm.h>
20324
20325 /*
20326 * Must be relocatable PIC code callable as a C function
20327@@ -160,13 +161,14 @@ identity_mapped:
20328 xorq %rbp, %rbp
20329 xorq %r8, %r8
20330 xorq %r9, %r9
20331- xorq %r10, %r9
20332+ xorq %r10, %r10
20333 xorq %r11, %r11
20334 xorq %r12, %r12
20335 xorq %r13, %r13
20336 xorq %r14, %r14
20337 xorq %r15, %r15
20338
20339+ pax_force_retaddr 0, 1
20340 ret
20341
20342 1:
20343diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
20344index 86c524c..bae70c5 100644
20345--- a/arch/x86/kernel/setup.c
20346+++ b/arch/x86/kernel/setup.c
20347@@ -441,7 +441,7 @@ static void __init parse_setup_data(void)
20348
20349 switch (data->type) {
20350 case SETUP_E820_EXT:
20351- parse_e820_ext(data);
20352+ parse_e820_ext((struct setup_data __force_kernel *)data);
20353 break;
20354 case SETUP_DTB:
20355 add_dtb(pa_data);
20356@@ -708,7 +708,7 @@ static void __init trim_bios_range(void)
20357 * area (640->1Mb) as ram even though it is not.
20358 * take them out.
20359 */
20360- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
20361+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
20362
20363 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
20364 }
20365@@ -832,14 +832,14 @@ void __init setup_arch(char **cmdline_p)
20366
20367 if (!boot_params.hdr.root_flags)
20368 root_mountflags &= ~MS_RDONLY;
20369- init_mm.start_code = (unsigned long) _text;
20370- init_mm.end_code = (unsigned long) _etext;
20371+ init_mm.start_code = ktla_ktva((unsigned long) _text);
20372+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
20373 init_mm.end_data = (unsigned long) _edata;
20374 init_mm.brk = _brk_end;
20375
20376- code_resource.start = virt_to_phys(_text);
20377- code_resource.end = virt_to_phys(_etext)-1;
20378- data_resource.start = virt_to_phys(_etext);
20379+ code_resource.start = virt_to_phys(ktla_ktva(_text));
20380+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
20381+ data_resource.start = virt_to_phys(_sdata);
20382 data_resource.end = virt_to_phys(_edata)-1;
20383 bss_resource.start = virt_to_phys(&__bss_start);
20384 bss_resource.end = virt_to_phys(&__bss_stop)-1;
20385diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
20386index 5cdff03..5810740 100644
20387--- a/arch/x86/kernel/setup_percpu.c
20388+++ b/arch/x86/kernel/setup_percpu.c
20389@@ -21,19 +21,17 @@
20390 #include <asm/cpu.h>
20391 #include <asm/stackprotector.h>
20392
20393-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
20394+#ifdef CONFIG_SMP
20395+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
20396 EXPORT_PER_CPU_SYMBOL(cpu_number);
20397+#endif
20398
20399-#ifdef CONFIG_X86_64
20400 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
20401-#else
20402-#define BOOT_PERCPU_OFFSET 0
20403-#endif
20404
20405 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
20406 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
20407
20408-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
20409+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
20410 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
20411 };
20412 EXPORT_SYMBOL(__per_cpu_offset);
20413@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
20414 {
20415 #ifdef CONFIG_X86_32
20416 struct desc_struct gdt;
20417+ unsigned long base = per_cpu_offset(cpu);
20418
20419- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
20420- 0x2 | DESCTYPE_S, 0x8);
20421- gdt.s = 1;
20422+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
20423+ 0x83 | DESCTYPE_S, 0xC);
20424 write_gdt_entry(get_cpu_gdt_table(cpu),
20425 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
20426 #endif
20427@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
20428 /* alrighty, percpu areas up and running */
20429 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
20430 for_each_possible_cpu(cpu) {
20431+#ifdef CONFIG_CC_STACKPROTECTOR
20432+#ifdef CONFIG_X86_32
20433+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
20434+#endif
20435+#endif
20436 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
20437 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
20438 per_cpu(cpu_number, cpu) = cpu;
20439@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
20440 */
20441 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
20442 #endif
20443+#ifdef CONFIG_CC_STACKPROTECTOR
20444+#ifdef CONFIG_X86_32
20445+ if (!cpu)
20446+ per_cpu(stack_canary.canary, cpu) = canary;
20447+#endif
20448+#endif
20449 /*
20450 * Up to this point, the boot CPU has been using .init.data
20451 * area. Reload any changed state for the boot CPU.
20452diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
20453index 70b27ee..fcf827f 100644
20454--- a/arch/x86/kernel/signal.c
20455+++ b/arch/x86/kernel/signal.c
20456@@ -195,7 +195,7 @@ static unsigned long align_sigframe(unsigned long sp)
20457 * Align the stack pointer according to the i386 ABI,
20458 * i.e. so that on function entry ((sp + 4) & 15) == 0.
20459 */
20460- sp = ((sp + 4) & -16ul) - 4;
20461+ sp = ((sp - 12) & -16ul) - 4;
20462 #else /* !CONFIG_X86_32 */
20463 sp = round_down(sp, 16) - 8;
20464 #endif
20465@@ -303,9 +303,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20466 }
20467
20468 if (current->mm->context.vdso)
20469- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20470+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20471 else
20472- restorer = &frame->retcode;
20473+ restorer = (void __user *)&frame->retcode;
20474 if (ka->sa.sa_flags & SA_RESTORER)
20475 restorer = ka->sa.sa_restorer;
20476
20477@@ -319,7 +319,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20478 * reasons and because gdb uses it as a signature to notice
20479 * signal handler stack frames.
20480 */
20481- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
20482+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
20483
20484 if (err)
20485 return -EFAULT;
20486@@ -369,7 +369,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20487 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
20488
20489 /* Set up to return from userspace. */
20490- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20491+ if (current->mm->context.vdso)
20492+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20493+ else
20494+ restorer = (void __user *)&frame->retcode;
20495 if (ka->sa.sa_flags & SA_RESTORER)
20496 restorer = ka->sa.sa_restorer;
20497 put_user_ex(restorer, &frame->pretcode);
20498@@ -381,7 +384,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20499 * reasons and because gdb uses it as a signature to notice
20500 * signal handler stack frames.
20501 */
20502- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
20503+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
20504 } put_user_catch(err);
20505
20506 err |= copy_siginfo_to_user(&frame->info, info);
20507diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
20508index f3e2ec8..ad5287a 100644
20509--- a/arch/x86/kernel/smpboot.c
20510+++ b/arch/x86/kernel/smpboot.c
20511@@ -673,6 +673,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
20512 idle->thread.sp = (unsigned long) (((struct pt_regs *)
20513 (THREAD_SIZE + task_stack_page(idle))) - 1);
20514 per_cpu(current_task, cpu) = idle;
20515+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
20516
20517 #ifdef CONFIG_X86_32
20518 /* Stack for startup_32 can be just as for start_secondary onwards */
20519@@ -680,11 +681,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
20520 #else
20521 clear_tsk_thread_flag(idle, TIF_FORK);
20522 initial_gs = per_cpu_offset(cpu);
20523- per_cpu(kernel_stack, cpu) =
20524- (unsigned long)task_stack_page(idle) -
20525- KERNEL_STACK_OFFSET + THREAD_SIZE;
20526+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
20527 #endif
20528+
20529+ pax_open_kernel();
20530 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20531+ pax_close_kernel();
20532+
20533 initial_code = (unsigned long)start_secondary;
20534 stack_start = idle->thread.sp;
20535
20536@@ -823,6 +826,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
20537 /* the FPU context is blank, nobody can own it */
20538 __cpu_disable_lazy_restore(cpu);
20539
20540+#ifdef CONFIG_PAX_PER_CPU_PGD
20541+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
20542+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20543+ KERNEL_PGD_PTRS);
20544+#endif
20545+
20546+ /* the FPU context is blank, nobody can own it */
20547+ __cpu_disable_lazy_restore(cpu);
20548+
20549 err = do_boot_cpu(apicid, cpu, tidle);
20550 if (err) {
20551 pr_debug("do_boot_cpu failed %d\n", err);
20552diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
20553index cd3b243..4ba27a4 100644
20554--- a/arch/x86/kernel/step.c
20555+++ b/arch/x86/kernel/step.c
20556@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20557 struct desc_struct *desc;
20558 unsigned long base;
20559
20560- seg &= ~7UL;
20561+ seg >>= 3;
20562
20563 mutex_lock(&child->mm->context.lock);
20564- if (unlikely((seg >> 3) >= child->mm->context.size))
20565+ if (unlikely(seg >= child->mm->context.size))
20566 addr = -1L; /* bogus selector, access would fault */
20567 else {
20568 desc = child->mm->context.ldt + seg;
20569@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20570 addr += base;
20571 }
20572 mutex_unlock(&child->mm->context.lock);
20573- }
20574+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
20575+ addr = ktla_ktva(addr);
20576
20577 return addr;
20578 }
20579@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20580 unsigned char opcode[15];
20581 unsigned long addr = convert_ip_to_linear(child, regs);
20582
20583+ if (addr == -EINVAL)
20584+ return 0;
20585+
20586 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
20587 for (i = 0; i < copied; i++) {
20588 switch (opcode[i]) {
20589diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
20590new file mode 100644
20591index 0000000..26bb1af
20592--- /dev/null
20593+++ b/arch/x86/kernel/sys_i386_32.c
20594@@ -0,0 +1,249 @@
20595+/*
20596+ * This file contains various random system calls that
20597+ * have a non-standard calling sequence on the Linux/i386
20598+ * platform.
20599+ */
20600+
20601+#include <linux/errno.h>
20602+#include <linux/sched.h>
20603+#include <linux/mm.h>
20604+#include <linux/fs.h>
20605+#include <linux/smp.h>
20606+#include <linux/sem.h>
20607+#include <linux/msg.h>
20608+#include <linux/shm.h>
20609+#include <linux/stat.h>
20610+#include <linux/syscalls.h>
20611+#include <linux/mman.h>
20612+#include <linux/file.h>
20613+#include <linux/utsname.h>
20614+#include <linux/ipc.h>
20615+
20616+#include <linux/uaccess.h>
20617+#include <linux/unistd.h>
20618+
20619+#include <asm/syscalls.h>
20620+
20621+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
20622+{
20623+ unsigned long pax_task_size = TASK_SIZE;
20624+
20625+#ifdef CONFIG_PAX_SEGMEXEC
20626+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
20627+ pax_task_size = SEGMEXEC_TASK_SIZE;
20628+#endif
20629+
20630+ if (len > pax_task_size || addr > pax_task_size - len)
20631+ return -EINVAL;
20632+
20633+ return 0;
20634+}
20635+
20636+unsigned long
20637+arch_get_unmapped_area(struct file *filp, unsigned long addr,
20638+ unsigned long len, unsigned long pgoff, unsigned long flags)
20639+{
20640+ struct mm_struct *mm = current->mm;
20641+ struct vm_area_struct *vma;
20642+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20643+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
20644+
20645+#ifdef CONFIG_PAX_SEGMEXEC
20646+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20647+ pax_task_size = SEGMEXEC_TASK_SIZE;
20648+#endif
20649+
20650+ pax_task_size -= PAGE_SIZE;
20651+
20652+ if (len > pax_task_size)
20653+ return -ENOMEM;
20654+
20655+ if (flags & MAP_FIXED)
20656+ return addr;
20657+
20658+#ifdef CONFIG_PAX_RANDMMAP
20659+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20660+#endif
20661+
20662+ if (addr) {
20663+ addr = PAGE_ALIGN(addr);
20664+ if (pax_task_size - len >= addr) {
20665+ vma = find_vma(mm, addr);
20666+ if (check_heap_stack_gap(vma, addr, len, offset))
20667+ return addr;
20668+ }
20669+ }
20670+ if (len > mm->cached_hole_size) {
20671+ start_addr = addr = mm->free_area_cache;
20672+ } else {
20673+ start_addr = addr = mm->mmap_base;
20674+ mm->cached_hole_size = 0;
20675+ }
20676+
20677+#ifdef CONFIG_PAX_PAGEEXEC
20678+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
20679+ start_addr = 0x00110000UL;
20680+
20681+#ifdef CONFIG_PAX_RANDMMAP
20682+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20683+ start_addr += mm->delta_mmap & 0x03FFF000UL;
20684+#endif
20685+
20686+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
20687+ start_addr = addr = mm->mmap_base;
20688+ else
20689+ addr = start_addr;
20690+ }
20691+#endif
20692+
20693+full_search:
20694+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20695+ /* At this point: (!vma || addr < vma->vm_end). */
20696+ if (pax_task_size - len < addr) {
20697+ /*
20698+ * Start a new search - just in case we missed
20699+ * some holes.
20700+ */
20701+ if (start_addr != mm->mmap_base) {
20702+ start_addr = addr = mm->mmap_base;
20703+ mm->cached_hole_size = 0;
20704+ goto full_search;
20705+ }
20706+ return -ENOMEM;
20707+ }
20708+ if (check_heap_stack_gap(vma, addr, len, offset))
20709+ break;
20710+ if (addr + mm->cached_hole_size < vma->vm_start)
20711+ mm->cached_hole_size = vma->vm_start - addr;
20712+ addr = vma->vm_end;
20713+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
20714+ start_addr = addr = mm->mmap_base;
20715+ mm->cached_hole_size = 0;
20716+ goto full_search;
20717+ }
20718+ }
20719+
20720+ /*
20721+ * Remember the place where we stopped the search:
20722+ */
20723+ mm->free_area_cache = addr + len;
20724+ return addr;
20725+}
20726+
20727+unsigned long
20728+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20729+ const unsigned long len, const unsigned long pgoff,
20730+ const unsigned long flags)
20731+{
20732+ struct vm_area_struct *vma;
20733+ struct mm_struct *mm = current->mm;
20734+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
20735+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
20736+
20737+#ifdef CONFIG_PAX_SEGMEXEC
20738+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20739+ pax_task_size = SEGMEXEC_TASK_SIZE;
20740+#endif
20741+
20742+ pax_task_size -= PAGE_SIZE;
20743+
20744+ /* requested length too big for entire address space */
20745+ if (len > pax_task_size)
20746+ return -ENOMEM;
20747+
20748+ if (flags & MAP_FIXED)
20749+ return addr;
20750+
20751+#ifdef CONFIG_PAX_PAGEEXEC
20752+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
20753+ goto bottomup;
20754+#endif
20755+
20756+#ifdef CONFIG_PAX_RANDMMAP
20757+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20758+#endif
20759+
20760+ /* requesting a specific address */
20761+ if (addr) {
20762+ addr = PAGE_ALIGN(addr);
20763+ if (pax_task_size - len >= addr) {
20764+ vma = find_vma(mm, addr);
20765+ if (check_heap_stack_gap(vma, addr, len, offset))
20766+ return addr;
20767+ }
20768+ }
20769+
20770+ /* check if free_area_cache is useful for us */
20771+ if (len <= mm->cached_hole_size) {
20772+ mm->cached_hole_size = 0;
20773+ mm->free_area_cache = mm->mmap_base;
20774+ }
20775+
20776+ /* either no address requested or can't fit in requested address hole */
20777+ addr = mm->free_area_cache;
20778+
20779+ /* make sure it can fit in the remaining address space */
20780+ if (addr > len) {
20781+ vma = find_vma(mm, addr-len);
20782+ if (check_heap_stack_gap(vma, addr - len, len, offset))
20783+ /* remember the address as a hint for next time */
20784+ return (mm->free_area_cache = addr-len);
20785+ }
20786+
20787+ if (mm->mmap_base < len)
20788+ goto bottomup;
20789+
20790+ addr = mm->mmap_base-len;
20791+
20792+ do {
20793+ /*
20794+ * Lookup failure means no vma is above this address,
20795+ * else if new region fits below vma->vm_start,
20796+ * return with success:
20797+ */
20798+ vma = find_vma(mm, addr);
20799+ if (check_heap_stack_gap(vma, addr, len, offset))
20800+ /* remember the address as a hint for next time */
20801+ return (mm->free_area_cache = addr);
20802+
20803+ /* remember the largest hole we saw so far */
20804+ if (addr + mm->cached_hole_size < vma->vm_start)
20805+ mm->cached_hole_size = vma->vm_start - addr;
20806+
20807+ /* try just below the current vma->vm_start */
20808+ addr = skip_heap_stack_gap(vma, len, offset);
20809+ } while (!IS_ERR_VALUE(addr));
20810+
20811+bottomup:
20812+ /*
20813+ * A failed mmap() very likely causes application failure,
20814+ * so fall back to the bottom-up function here. This scenario
20815+ * can happen with large stack limits and large mmap()
20816+ * allocations.
20817+ */
20818+
20819+#ifdef CONFIG_PAX_SEGMEXEC
20820+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20821+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20822+ else
20823+#endif
20824+
20825+ mm->mmap_base = TASK_UNMAPPED_BASE;
20826+
20827+#ifdef CONFIG_PAX_RANDMMAP
20828+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20829+ mm->mmap_base += mm->delta_mmap;
20830+#endif
20831+
20832+ mm->free_area_cache = mm->mmap_base;
20833+ mm->cached_hole_size = ~0UL;
20834+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20835+ /*
20836+ * Restore the topdown base:
20837+ */
20838+ mm->mmap_base = base;
20839+ mm->free_area_cache = base;
20840+ mm->cached_hole_size = ~0UL;
20841+
20842+ return addr;
20843+}
20844diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
20845index b4d3c39..d699d77 100644
20846--- a/arch/x86/kernel/sys_x86_64.c
20847+++ b/arch/x86/kernel/sys_x86_64.c
20848@@ -95,8 +95,8 @@ out:
20849 return error;
20850 }
20851
20852-static void find_start_end(unsigned long flags, unsigned long *begin,
20853- unsigned long *end)
20854+static void find_start_end(struct mm_struct *mm, unsigned long flags,
20855+ unsigned long *begin, unsigned long *end)
20856 {
20857 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
20858 unsigned long new_begin;
20859@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
20860 *begin = new_begin;
20861 }
20862 } else {
20863- *begin = TASK_UNMAPPED_BASE;
20864+ *begin = mm->mmap_base;
20865 *end = TASK_SIZE;
20866 }
20867 }
20868@@ -128,20 +128,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
20869 struct vm_area_struct *vma;
20870 unsigned long start_addr;
20871 unsigned long begin, end;
20872+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
20873
20874 if (flags & MAP_FIXED)
20875 return addr;
20876
20877- find_start_end(flags, &begin, &end);
20878+ find_start_end(mm, flags, &begin, &end);
20879
20880 if (len > end)
20881 return -ENOMEM;
20882
20883+#ifdef CONFIG_PAX_RANDMMAP
20884+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20885+#endif
20886+
20887 if (addr) {
20888 addr = PAGE_ALIGN(addr);
20889 vma = find_vma(mm, addr);
20890- if (end - len >= addr &&
20891- (!vma || addr + len <= vma->vm_start))
20892+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
20893 return addr;
20894 }
20895 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
20896@@ -172,7 +176,7 @@ full_search:
20897 }
20898 return -ENOMEM;
20899 }
20900- if (!vma || addr + len <= vma->vm_start) {
20901+ if (check_heap_stack_gap(vma, addr, len, offset)) {
20902 /*
20903 * Remember the place where we stopped the search:
20904 */
20905@@ -195,7 +199,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20906 {
20907 struct vm_area_struct *vma;
20908 struct mm_struct *mm = current->mm;
20909- unsigned long addr = addr0, start_addr;
20910+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
20911+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
20912
20913 /* requested length too big for entire address space */
20914 if (len > TASK_SIZE)
20915@@ -208,13 +213,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20916 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
20917 goto bottomup;
20918
20919+#ifdef CONFIG_PAX_RANDMMAP
20920+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20921+#endif
20922+
20923 /* requesting a specific address */
20924 if (addr) {
20925 addr = PAGE_ALIGN(addr);
20926- vma = find_vma(mm, addr);
20927- if (TASK_SIZE - len >= addr &&
20928- (!vma || addr + len <= vma->vm_start))
20929- return addr;
20930+ if (TASK_SIZE - len >= addr) {
20931+ vma = find_vma(mm, addr);
20932+ if (check_heap_stack_gap(vma, addr, len, offset))
20933+ return addr;
20934+ }
20935 }
20936
20937 /* check if free_area_cache is useful for us */
20938@@ -240,7 +250,7 @@ try_again:
20939 * return with success:
20940 */
20941 vma = find_vma(mm, addr);
20942- if (!vma || addr+len <= vma->vm_start)
20943+ if (check_heap_stack_gap(vma, addr, len, offset))
20944 /* remember the address as a hint for next time */
20945 return mm->free_area_cache = addr;
20946
20947@@ -249,8 +259,8 @@ try_again:
20948 mm->cached_hole_size = vma->vm_start - addr;
20949
20950 /* try just below the current vma->vm_start */
20951- addr = vma->vm_start-len;
20952- } while (len < vma->vm_start);
20953+ addr = skip_heap_stack_gap(vma, len, offset);
20954+ } while (!IS_ERR_VALUE(addr));
20955
20956 fail:
20957 /*
20958@@ -270,13 +280,21 @@ bottomup:
20959 * can happen with large stack limits and large mmap()
20960 * allocations.
20961 */
20962+ mm->mmap_base = TASK_UNMAPPED_BASE;
20963+
20964+#ifdef CONFIG_PAX_RANDMMAP
20965+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20966+ mm->mmap_base += mm->delta_mmap;
20967+#endif
20968+
20969+ mm->free_area_cache = mm->mmap_base;
20970 mm->cached_hole_size = ~0UL;
20971- mm->free_area_cache = TASK_UNMAPPED_BASE;
20972 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20973 /*
20974 * Restore the topdown base:
20975 */
20976- mm->free_area_cache = mm->mmap_base;
20977+ mm->mmap_base = base;
20978+ mm->free_area_cache = base;
20979 mm->cached_hole_size = ~0UL;
20980
20981 return addr;
20982diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
20983index f84fe00..93fe08f 100644
20984--- a/arch/x86/kernel/tboot.c
20985+++ b/arch/x86/kernel/tboot.c
20986@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
20987
20988 void tboot_shutdown(u32 shutdown_type)
20989 {
20990- void (*shutdown)(void);
20991+ void (* __noreturn shutdown)(void);
20992
20993 if (!tboot_enabled())
20994 return;
20995@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
20996
20997 switch_to_tboot_pt();
20998
20999- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
21000+ shutdown = (void *)tboot->shutdown_entry;
21001 shutdown();
21002
21003 /* should not reach here */
21004@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
21005 return 0;
21006 }
21007
21008-static atomic_t ap_wfs_count;
21009+static atomic_unchecked_t ap_wfs_count;
21010
21011 static int tboot_wait_for_aps(int num_aps)
21012 {
21013@@ -324,9 +324,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
21014 {
21015 switch (action) {
21016 case CPU_DYING:
21017- atomic_inc(&ap_wfs_count);
21018+ atomic_inc_unchecked(&ap_wfs_count);
21019 if (num_online_cpus() == 1)
21020- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
21021+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
21022 return NOTIFY_BAD;
21023 break;
21024 }
21025@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
21026
21027 tboot_create_trampoline();
21028
21029- atomic_set(&ap_wfs_count, 0);
21030+ atomic_set_unchecked(&ap_wfs_count, 0);
21031 register_hotcpu_notifier(&tboot_cpu_notifier);
21032
21033 acpi_os_set_prepare_sleep(&tboot_sleep);
21034diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
21035index 24d3c91..d06b473 100644
21036--- a/arch/x86/kernel/time.c
21037+++ b/arch/x86/kernel/time.c
21038@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
21039 {
21040 unsigned long pc = instruction_pointer(regs);
21041
21042- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
21043+ if (!user_mode(regs) && in_lock_functions(pc)) {
21044 #ifdef CONFIG_FRAME_POINTER
21045- return *(unsigned long *)(regs->bp + sizeof(long));
21046+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
21047 #else
21048 unsigned long *sp =
21049 (unsigned long *)kernel_stack_pointer(regs);
21050@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
21051 * or above a saved flags. Eflags has bits 22-31 zero,
21052 * kernel addresses don't.
21053 */
21054+
21055+#ifdef CONFIG_PAX_KERNEXEC
21056+ return ktla_ktva(sp[0]);
21057+#else
21058 if (sp[0] >> 22)
21059 return sp[0];
21060 if (sp[1] >> 22)
21061 return sp[1];
21062 #endif
21063+
21064+#endif
21065 }
21066 return pc;
21067 }
21068diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
21069index 9d9d2f9..cad418a 100644
21070--- a/arch/x86/kernel/tls.c
21071+++ b/arch/x86/kernel/tls.c
21072@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
21073 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
21074 return -EINVAL;
21075
21076+#ifdef CONFIG_PAX_SEGMEXEC
21077+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
21078+ return -EINVAL;
21079+#endif
21080+
21081 set_tls_desc(p, idx, &info, 1);
21082
21083 return 0;
21084@@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
21085
21086 if (kbuf)
21087 info = kbuf;
21088- else if (__copy_from_user(infobuf, ubuf, count))
21089+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
21090 return -EFAULT;
21091 else
21092 info = infobuf;
21093diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
21094index 8276dc6..4ca48a2 100644
21095--- a/arch/x86/kernel/traps.c
21096+++ b/arch/x86/kernel/traps.c
21097@@ -71,12 +71,6 @@ asmlinkage int system_call(void);
21098
21099 /* Do we ignore FPU interrupts ? */
21100 char ignore_fpu_irq;
21101-
21102-/*
21103- * The IDT has to be page-aligned to simplify the Pentium
21104- * F0 0F bug workaround.
21105- */
21106-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
21107 #endif
21108
21109 DECLARE_BITMAP(used_vectors, NR_VECTORS);
21110@@ -109,11 +103,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
21111 }
21112
21113 static int __kprobes
21114-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
21115+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
21116 struct pt_regs *regs, long error_code)
21117 {
21118 #ifdef CONFIG_X86_32
21119- if (regs->flags & X86_VM_MASK) {
21120+ if (v8086_mode(regs)) {
21121 /*
21122 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
21123 * On nmi (interrupt 2), do_trap should not be called.
21124@@ -126,12 +120,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
21125 return -1;
21126 }
21127 #endif
21128- if (!user_mode(regs)) {
21129+ if (!user_mode_novm(regs)) {
21130 if (!fixup_exception(regs)) {
21131 tsk->thread.error_code = error_code;
21132 tsk->thread.trap_nr = trapnr;
21133+
21134+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21135+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
21136+ str = "PAX: suspicious stack segment fault";
21137+#endif
21138+
21139 die(str, regs, error_code);
21140 }
21141+
21142+#ifdef CONFIG_PAX_REFCOUNT
21143+ if (trapnr == 4)
21144+ pax_report_refcount_overflow(regs);
21145+#endif
21146+
21147 return 0;
21148 }
21149
21150@@ -139,7 +145,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
21151 }
21152
21153 static void __kprobes
21154-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21155+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
21156 long error_code, siginfo_t *info)
21157 {
21158 struct task_struct *tsk = current;
21159@@ -163,7 +169,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21160 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
21161 printk_ratelimit()) {
21162 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
21163- tsk->comm, tsk->pid, str,
21164+ tsk->comm, task_pid_nr(tsk), str,
21165 regs->ip, regs->sp, error_code);
21166 print_vma_addr(" in ", regs->ip);
21167 pr_cont("\n");
21168@@ -269,7 +275,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
21169 conditional_sti(regs);
21170
21171 #ifdef CONFIG_X86_32
21172- if (regs->flags & X86_VM_MASK) {
21173+ if (v8086_mode(regs)) {
21174 local_irq_enable();
21175 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
21176 goto exit;
21177@@ -277,18 +283,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
21178 #endif
21179
21180 tsk = current;
21181- if (!user_mode(regs)) {
21182+ if (!user_mode_novm(regs)) {
21183 if (fixup_exception(regs))
21184 goto exit;
21185
21186 tsk->thread.error_code = error_code;
21187 tsk->thread.trap_nr = X86_TRAP_GP;
21188 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
21189- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
21190+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
21191+
21192+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21193+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
21194+ die("PAX: suspicious general protection fault", regs, error_code);
21195+ else
21196+#endif
21197+
21198 die("general protection fault", regs, error_code);
21199+ }
21200 goto exit;
21201 }
21202
21203+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21204+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
21205+ struct mm_struct *mm = tsk->mm;
21206+ unsigned long limit;
21207+
21208+ down_write(&mm->mmap_sem);
21209+ limit = mm->context.user_cs_limit;
21210+ if (limit < TASK_SIZE) {
21211+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
21212+ up_write(&mm->mmap_sem);
21213+ return;
21214+ }
21215+ up_write(&mm->mmap_sem);
21216+ }
21217+#endif
21218+
21219 tsk->thread.error_code = error_code;
21220 tsk->thread.trap_nr = X86_TRAP_GP;
21221
21222@@ -443,7 +473,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21223 /* It's safe to allow irq's after DR6 has been saved */
21224 preempt_conditional_sti(regs);
21225
21226- if (regs->flags & X86_VM_MASK) {
21227+ if (v8086_mode(regs)) {
21228 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
21229 X86_TRAP_DB);
21230 preempt_conditional_cli(regs);
21231@@ -458,7 +488,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21232 * We already checked v86 mode above, so we can check for kernel mode
21233 * by just checking the CPL of CS.
21234 */
21235- if ((dr6 & DR_STEP) && !user_mode(regs)) {
21236+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
21237 tsk->thread.debugreg6 &= ~DR_STEP;
21238 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
21239 regs->flags &= ~X86_EFLAGS_TF;
21240@@ -490,7 +520,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
21241 return;
21242 conditional_sti(regs);
21243
21244- if (!user_mode_vm(regs))
21245+ if (!user_mode(regs))
21246 {
21247 if (!fixup_exception(regs)) {
21248 task->thread.error_code = error_code;
21249diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
21250index aafa555..a04691a 100644
21251--- a/arch/x86/kernel/uprobes.c
21252+++ b/arch/x86/kernel/uprobes.c
21253@@ -614,7 +614,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
21254 int ret = NOTIFY_DONE;
21255
21256 /* We are only interested in userspace traps */
21257- if (regs && !user_mode_vm(regs))
21258+ if (regs && !user_mode(regs))
21259 return NOTIFY_DONE;
21260
21261 switch (val) {
21262diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
21263index b9242ba..50c5edd 100644
21264--- a/arch/x86/kernel/verify_cpu.S
21265+++ b/arch/x86/kernel/verify_cpu.S
21266@@ -20,6 +20,7 @@
21267 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
21268 * arch/x86/kernel/trampoline_64.S: secondary processor verification
21269 * arch/x86/kernel/head_32.S: processor startup
21270+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
21271 *
21272 * verify_cpu, returns the status of longmode and SSE in register %eax.
21273 * 0: Success 1: Failure
21274diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
21275index 5c9687b..5f857d3 100644
21276--- a/arch/x86/kernel/vm86_32.c
21277+++ b/arch/x86/kernel/vm86_32.c
21278@@ -43,6 +43,7 @@
21279 #include <linux/ptrace.h>
21280 #include <linux/audit.h>
21281 #include <linux/stddef.h>
21282+#include <linux/grsecurity.h>
21283
21284 #include <asm/uaccess.h>
21285 #include <asm/io.h>
21286@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
21287 do_exit(SIGSEGV);
21288 }
21289
21290- tss = &per_cpu(init_tss, get_cpu());
21291+ tss = init_tss + get_cpu();
21292 current->thread.sp0 = current->thread.saved_sp0;
21293 current->thread.sysenter_cs = __KERNEL_CS;
21294 load_sp0(tss, &current->thread);
21295@@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
21296 struct task_struct *tsk;
21297 int tmp, ret = -EPERM;
21298
21299+#ifdef CONFIG_GRKERNSEC_VM86
21300+ if (!capable(CAP_SYS_RAWIO)) {
21301+ gr_handle_vm86();
21302+ goto out;
21303+ }
21304+#endif
21305+
21306 tsk = current;
21307 if (tsk->thread.saved_sp0)
21308 goto out;
21309@@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
21310 int tmp, ret;
21311 struct vm86plus_struct __user *v86;
21312
21313+#ifdef CONFIG_GRKERNSEC_VM86
21314+ if (!capable(CAP_SYS_RAWIO)) {
21315+ gr_handle_vm86();
21316+ ret = -EPERM;
21317+ goto out;
21318+ }
21319+#endif
21320+
21321 tsk = current;
21322 switch (cmd) {
21323 case VM86_REQUEST_IRQ:
21324@@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
21325 tsk->thread.saved_fs = info->regs32->fs;
21326 tsk->thread.saved_gs = get_user_gs(info->regs32);
21327
21328- tss = &per_cpu(init_tss, get_cpu());
21329+ tss = init_tss + get_cpu();
21330 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
21331 if (cpu_has_sep)
21332 tsk->thread.sysenter_cs = 0;
21333@@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
21334 goto cannot_handle;
21335 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
21336 goto cannot_handle;
21337- intr_ptr = (unsigned long __user *) (i << 2);
21338+ intr_ptr = (__force unsigned long __user *) (i << 2);
21339 if (get_user(segoffs, intr_ptr))
21340 goto cannot_handle;
21341 if ((segoffs >> 16) == BIOSSEG)
21342diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
21343index 22a1530..8fbaaad 100644
21344--- a/arch/x86/kernel/vmlinux.lds.S
21345+++ b/arch/x86/kernel/vmlinux.lds.S
21346@@ -26,6 +26,13 @@
21347 #include <asm/page_types.h>
21348 #include <asm/cache.h>
21349 #include <asm/boot.h>
21350+#include <asm/segment.h>
21351+
21352+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21353+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
21354+#else
21355+#define __KERNEL_TEXT_OFFSET 0
21356+#endif
21357
21358 #undef i386 /* in case the preprocessor is a 32bit one */
21359
21360@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
21361
21362 PHDRS {
21363 text PT_LOAD FLAGS(5); /* R_E */
21364+#ifdef CONFIG_X86_32
21365+ module PT_LOAD FLAGS(5); /* R_E */
21366+#endif
21367+#ifdef CONFIG_XEN
21368+ rodata PT_LOAD FLAGS(5); /* R_E */
21369+#else
21370+ rodata PT_LOAD FLAGS(4); /* R__ */
21371+#endif
21372 data PT_LOAD FLAGS(6); /* RW_ */
21373-#ifdef CONFIG_X86_64
21374+ init.begin PT_LOAD FLAGS(6); /* RW_ */
21375 #ifdef CONFIG_SMP
21376 percpu PT_LOAD FLAGS(6); /* RW_ */
21377 #endif
21378+ text.init PT_LOAD FLAGS(5); /* R_E */
21379+ text.exit PT_LOAD FLAGS(5); /* R_E */
21380 init PT_LOAD FLAGS(7); /* RWE */
21381-#endif
21382 note PT_NOTE FLAGS(0); /* ___ */
21383 }
21384
21385 SECTIONS
21386 {
21387 #ifdef CONFIG_X86_32
21388- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
21389- phys_startup_32 = startup_32 - LOAD_OFFSET;
21390+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
21391 #else
21392- . = __START_KERNEL;
21393- phys_startup_64 = startup_64 - LOAD_OFFSET;
21394+ . = __START_KERNEL;
21395 #endif
21396
21397 /* Text and read-only data */
21398- .text : AT(ADDR(.text) - LOAD_OFFSET) {
21399- _text = .;
21400+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21401 /* bootstrapping code */
21402+#ifdef CONFIG_X86_32
21403+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21404+#else
21405+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21406+#endif
21407+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21408+ _text = .;
21409 HEAD_TEXT
21410 #ifdef CONFIG_X86_32
21411 . = ALIGN(PAGE_SIZE);
21412@@ -108,13 +128,48 @@ SECTIONS
21413 IRQENTRY_TEXT
21414 *(.fixup)
21415 *(.gnu.warning)
21416- /* End of text section */
21417- _etext = .;
21418 } :text = 0x9090
21419
21420- NOTES :text :note
21421+ . += __KERNEL_TEXT_OFFSET;
21422
21423- EXCEPTION_TABLE(16) :text = 0x9090
21424+#ifdef CONFIG_X86_32
21425+ . = ALIGN(PAGE_SIZE);
21426+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21427+
21428+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21429+ MODULES_EXEC_VADDR = .;
21430+ BYTE(0)
21431+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21432+ . = ALIGN(HPAGE_SIZE) - 1;
21433+ MODULES_EXEC_END = .;
21434+#endif
21435+
21436+ } :module
21437+#endif
21438+
21439+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21440+ /* End of text section */
21441+ BYTE(0)
21442+ _etext = . - __KERNEL_TEXT_OFFSET;
21443+ }
21444+
21445+#ifdef CONFIG_X86_32
21446+ . = ALIGN(PAGE_SIZE);
21447+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21448+ *(.idt)
21449+ . = ALIGN(PAGE_SIZE);
21450+ *(.empty_zero_page)
21451+ *(.initial_pg_fixmap)
21452+ *(.initial_pg_pmd)
21453+ *(.initial_page_table)
21454+ *(.swapper_pg_dir)
21455+ } :rodata
21456+#endif
21457+
21458+ . = ALIGN(PAGE_SIZE);
21459+ NOTES :rodata :note
21460+
21461+ EXCEPTION_TABLE(16) :rodata
21462
21463 #if defined(CONFIG_DEBUG_RODATA)
21464 /* .text should occupy whole number of pages */
21465@@ -126,16 +181,20 @@ SECTIONS
21466
21467 /* Data */
21468 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21469+
21470+#ifdef CONFIG_PAX_KERNEXEC
21471+ . = ALIGN(HPAGE_SIZE);
21472+#else
21473+ . = ALIGN(PAGE_SIZE);
21474+#endif
21475+
21476 /* Start of data section */
21477 _sdata = .;
21478
21479 /* init_task */
21480 INIT_TASK_DATA(THREAD_SIZE)
21481
21482-#ifdef CONFIG_X86_32
21483- /* 32 bit has nosave before _edata */
21484 NOSAVE_DATA
21485-#endif
21486
21487 PAGE_ALIGNED_DATA(PAGE_SIZE)
21488
21489@@ -176,12 +235,19 @@ SECTIONS
21490 #endif /* CONFIG_X86_64 */
21491
21492 /* Init code and data - will be freed after init */
21493- . = ALIGN(PAGE_SIZE);
21494 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21495+ BYTE(0)
21496+
21497+#ifdef CONFIG_PAX_KERNEXEC
21498+ . = ALIGN(HPAGE_SIZE);
21499+#else
21500+ . = ALIGN(PAGE_SIZE);
21501+#endif
21502+
21503 __init_begin = .; /* paired with __init_end */
21504- }
21505+ } :init.begin
21506
21507-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21508+#ifdef CONFIG_SMP
21509 /*
21510 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21511 * output PHDR, so the next output section - .init.text - should
21512@@ -190,12 +256,27 @@ SECTIONS
21513 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
21514 #endif
21515
21516- INIT_TEXT_SECTION(PAGE_SIZE)
21517-#ifdef CONFIG_X86_64
21518- :init
21519-#endif
21520+ . = ALIGN(PAGE_SIZE);
21521+ init_begin = .;
21522+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
21523+ VMLINUX_SYMBOL(_sinittext) = .;
21524+ INIT_TEXT
21525+ VMLINUX_SYMBOL(_einittext) = .;
21526+ . = ALIGN(PAGE_SIZE);
21527+ } :text.init
21528
21529- INIT_DATA_SECTION(16)
21530+ /*
21531+ * .exit.text is discard at runtime, not link time, to deal with
21532+ * references from .altinstructions and .eh_frame
21533+ */
21534+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21535+ EXIT_TEXT
21536+ . = ALIGN(16);
21537+ } :text.exit
21538+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
21539+
21540+ . = ALIGN(PAGE_SIZE);
21541+ INIT_DATA_SECTION(16) :init
21542
21543 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
21544 __x86_cpu_dev_start = .;
21545@@ -257,19 +338,12 @@ SECTIONS
21546 }
21547
21548 . = ALIGN(8);
21549- /*
21550- * .exit.text is discard at runtime, not link time, to deal with
21551- * references from .altinstructions and .eh_frame
21552- */
21553- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
21554- EXIT_TEXT
21555- }
21556
21557 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
21558 EXIT_DATA
21559 }
21560
21561-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
21562+#ifndef CONFIG_SMP
21563 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
21564 #endif
21565
21566@@ -288,16 +362,10 @@ SECTIONS
21567 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
21568 __smp_locks = .;
21569 *(.smp_locks)
21570- . = ALIGN(PAGE_SIZE);
21571 __smp_locks_end = .;
21572+ . = ALIGN(PAGE_SIZE);
21573 }
21574
21575-#ifdef CONFIG_X86_64
21576- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
21577- NOSAVE_DATA
21578- }
21579-#endif
21580-
21581 /* BSS */
21582 . = ALIGN(PAGE_SIZE);
21583 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
21584@@ -313,6 +381,7 @@ SECTIONS
21585 __brk_base = .;
21586 . += 64 * 1024; /* 64k alignment slop space */
21587 *(.brk_reservation) /* areas brk users have reserved */
21588+ . = ALIGN(HPAGE_SIZE);
21589 __brk_limit = .;
21590 }
21591
21592@@ -339,13 +408,12 @@ SECTIONS
21593 * for the boot processor.
21594 */
21595 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
21596-INIT_PER_CPU(gdt_page);
21597 INIT_PER_CPU(irq_stack_union);
21598
21599 /*
21600 * Build-time check on the image size:
21601 */
21602-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
21603+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
21604 "kernel image bigger than KERNEL_IMAGE_SIZE");
21605
21606 #ifdef CONFIG_SMP
21607diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
21608index 992f890..0ab1aae 100644
21609--- a/arch/x86/kernel/vsmp_64.c
21610+++ b/arch/x86/kernel/vsmp_64.c
21611@@ -114,7 +114,7 @@ static void __init set_vsmp_pv_ops(void)
21612 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
21613 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
21614 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
21615- pv_init_ops.patch = vsmp_patch;
21616+ *(void **)&pv_init_ops.patch = vsmp_patch;
21617 ctl &= ~(1 << 4);
21618 }
21619 writel(ctl, address + 4);
21620@@ -217,8 +217,8 @@ static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
21621 static void vsmp_apic_post_init(void)
21622 {
21623 /* need to update phys_pkg_id */
21624- apic->phys_pkg_id = apicid_phys_pkg_id;
21625- apic->vector_allocation_domain = fill_vector_allocation_domain;
21626+ *(void **)&apic->phys_pkg_id = apicid_phys_pkg_id;
21627+ *(void **)&apic->vector_allocation_domain = fill_vector_allocation_domain;
21628 }
21629
21630 void __init vsmp_init(void)
21631@@ -227,7 +227,7 @@ void __init vsmp_init(void)
21632 if (!is_vsmp_box())
21633 return;
21634
21635- x86_platform.apic_post_init = vsmp_apic_post_init;
21636+ *(void **)&x86_platform.apic_post_init = vsmp_apic_post_init;
21637
21638 vsmp_cap_cpus();
21639
21640diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
21641index 3a3e8c9..1af9465 100644
21642--- a/arch/x86/kernel/vsyscall_64.c
21643+++ b/arch/x86/kernel/vsyscall_64.c
21644@@ -56,15 +56,13 @@
21645 DEFINE_VVAR(int, vgetcpu_mode);
21646 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
21647
21648-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
21649+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
21650
21651 static int __init vsyscall_setup(char *str)
21652 {
21653 if (str) {
21654 if (!strcmp("emulate", str))
21655 vsyscall_mode = EMULATE;
21656- else if (!strcmp("native", str))
21657- vsyscall_mode = NATIVE;
21658 else if (!strcmp("none", str))
21659 vsyscall_mode = NONE;
21660 else
21661@@ -315,8 +313,7 @@ done:
21662 return true;
21663
21664 sigsegv:
21665- force_sig(SIGSEGV, current);
21666- return true;
21667+ do_group_exit(SIGKILL);
21668 }
21669
21670 /*
21671@@ -369,10 +366,7 @@ void __init map_vsyscall(void)
21672 extern char __vvar_page;
21673 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
21674
21675- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
21676- vsyscall_mode == NATIVE
21677- ? PAGE_KERNEL_VSYSCALL
21678- : PAGE_KERNEL_VVAR);
21679+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
21680 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
21681 (unsigned long)VSYSCALL_START);
21682
21683diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21684index 1330dd1..d220b99 100644
21685--- a/arch/x86/kernel/x8664_ksyms_64.c
21686+++ b/arch/x86/kernel/x8664_ksyms_64.c
21687@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
21688 EXPORT_SYMBOL(copy_user_generic_unrolled);
21689 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
21690 EXPORT_SYMBOL(__copy_user_nocache);
21691-EXPORT_SYMBOL(_copy_from_user);
21692-EXPORT_SYMBOL(_copy_to_user);
21693
21694 EXPORT_SYMBOL(copy_page);
21695 EXPORT_SYMBOL(clear_page);
21696diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21697index ada87a3..afea76d 100644
21698--- a/arch/x86/kernel/xsave.c
21699+++ b/arch/x86/kernel/xsave.c
21700@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
21701 {
21702 int err;
21703
21704+ buf = (struct xsave_struct __user *)____m(buf);
21705 if (use_xsave())
21706 err = xsave_user(buf);
21707 else if (use_fxsr())
21708@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
21709 */
21710 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
21711 {
21712+ buf = (void __user *)____m(buf);
21713 if (use_xsave()) {
21714 if ((unsigned long)buf % 64 || fx_only) {
21715 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
21716diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
21717index ec79e77..420f5cc 100644
21718--- a/arch/x86/kvm/cpuid.c
21719+++ b/arch/x86/kvm/cpuid.c
21720@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21721 struct kvm_cpuid2 *cpuid,
21722 struct kvm_cpuid_entry2 __user *entries)
21723 {
21724- int r;
21725+ int r, i;
21726
21727 r = -E2BIG;
21728 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21729 goto out;
21730 r = -EFAULT;
21731- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21732- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21733+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21734 goto out;
21735+ for (i = 0; i < cpuid->nent; ++i) {
21736+ struct kvm_cpuid_entry2 cpuid_entry;
21737+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21738+ goto out;
21739+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
21740+ }
21741 vcpu->arch.cpuid_nent = cpuid->nent;
21742 kvm_apic_set_version(vcpu);
21743 kvm_x86_ops->cpuid_update(vcpu);
21744@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21745 struct kvm_cpuid2 *cpuid,
21746 struct kvm_cpuid_entry2 __user *entries)
21747 {
21748- int r;
21749+ int r, i;
21750
21751 r = -E2BIG;
21752 if (cpuid->nent < vcpu->arch.cpuid_nent)
21753 goto out;
21754 r = -EFAULT;
21755- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21756- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21757+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21758 goto out;
21759+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21760+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21761+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21762+ goto out;
21763+ }
21764 return 0;
21765
21766 out:
21767diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21768index bba39bf..296540a 100644
21769--- a/arch/x86/kvm/emulate.c
21770+++ b/arch/x86/kvm/emulate.c
21771@@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21772
21773 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
21774 do { \
21775+ unsigned long _tmp; \
21776 __asm__ __volatile__ ( \
21777 _PRE_EFLAGS("0", "4", "2") \
21778 _op _suffix " %"_x"3,%1; " \
21779@@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21780 /* Raw emulation: instruction has two explicit operands. */
21781 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
21782 do { \
21783- unsigned long _tmp; \
21784- \
21785 switch ((ctxt)->dst.bytes) { \
21786 case 2: \
21787 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
21788@@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21789
21790 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21791 do { \
21792- unsigned long _tmp; \
21793 switch ((ctxt)->dst.bytes) { \
21794 case 1: \
21795 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
21796diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21797index 43e9fad..3b7c059 100644
21798--- a/arch/x86/kvm/lapic.c
21799+++ b/arch/x86/kvm/lapic.c
21800@@ -55,7 +55,7 @@
21801 #define APIC_BUS_CYCLE_NS 1
21802
21803 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21804-#define apic_debug(fmt, arg...)
21805+#define apic_debug(fmt, arg...) do {} while (0)
21806
21807 #define APIC_LVT_NUM 6
21808 /* 14 is the version for Xeon and Pentium 8.4.8*/
21809diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21810index 714e2c0..3f7a086 100644
21811--- a/arch/x86/kvm/paging_tmpl.h
21812+++ b/arch/x86/kvm/paging_tmpl.h
21813@@ -208,7 +208,7 @@ retry_walk:
21814 if (unlikely(kvm_is_error_hva(host_addr)))
21815 goto error;
21816
21817- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
21818+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
21819 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
21820 goto error;
21821 walker->ptep_user[walker->level - 1] = ptep_user;
21822diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21823index d017df3..61ae42e 100644
21824--- a/arch/x86/kvm/svm.c
21825+++ b/arch/x86/kvm/svm.c
21826@@ -3500,7 +3500,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21827 int cpu = raw_smp_processor_id();
21828
21829 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
21830+
21831+ pax_open_kernel();
21832 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
21833+ pax_close_kernel();
21834+
21835 load_TR_desc();
21836 }
21837
21838@@ -3874,6 +3878,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
21839 #endif
21840 #endif
21841
21842+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21843+ __set_fs(current_thread_info()->addr_limit);
21844+#endif
21845+
21846 reload_tss(vcpu);
21847
21848 local_irq_disable();
21849diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21850index f858159..4ab7dba 100644
21851--- a/arch/x86/kvm/vmx.c
21852+++ b/arch/x86/kvm/vmx.c
21853@@ -1332,7 +1332,11 @@ static void reload_tss(void)
21854 struct desc_struct *descs;
21855
21856 descs = (void *)gdt->address;
21857+
21858+ pax_open_kernel();
21859 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21860+ pax_close_kernel();
21861+
21862 load_TR_desc();
21863 }
21864
21865@@ -1546,6 +1550,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
21866 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
21867 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
21868
21869+#ifdef CONFIG_PAX_PER_CPU_PGD
21870+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
21871+#endif
21872+
21873 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
21874 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
21875 vmx->loaded_vmcs->cpu = cpu;
21876@@ -2669,8 +2677,11 @@ static __init int hardware_setup(void)
21877 if (!cpu_has_vmx_flexpriority())
21878 flexpriority_enabled = 0;
21879
21880- if (!cpu_has_vmx_tpr_shadow())
21881- kvm_x86_ops->update_cr8_intercept = NULL;
21882+ if (!cpu_has_vmx_tpr_shadow()) {
21883+ pax_open_kernel();
21884+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21885+ pax_close_kernel();
21886+ }
21887
21888 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21889 kvm_disable_largepages();
21890@@ -3712,7 +3723,10 @@ static void vmx_set_constant_host_state(void)
21891
21892 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
21893 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
21894+
21895+#ifndef CONFIG_PAX_PER_CPU_PGD
21896 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
21897+#endif
21898
21899 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
21900 #ifdef CONFIG_X86_64
21901@@ -3733,7 +3747,7 @@ static void vmx_set_constant_host_state(void)
21902 native_store_idt(&dt);
21903 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
21904
21905- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
21906+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
21907
21908 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
21909 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
21910@@ -6279,6 +6293,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21911 "jmp 2f \n\t"
21912 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
21913 "2: "
21914+
21915+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21916+ "ljmp %[cs],$3f\n\t"
21917+ "3: "
21918+#endif
21919+
21920 /* Save guest registers, load host registers, keep flags */
21921 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
21922 "pop %0 \n\t"
21923@@ -6331,6 +6351,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21924 #endif
21925 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
21926 [wordsize]"i"(sizeof(ulong))
21927+
21928+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21929+ ,[cs]"i"(__KERNEL_CS)
21930+#endif
21931+
21932 : "cc", "memory"
21933 #ifdef CONFIG_X86_64
21934 , "rax", "rbx", "rdi", "rsi"
21935@@ -6344,7 +6369,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21936 if (debugctlmsr)
21937 update_debugctlmsr(debugctlmsr);
21938
21939-#ifndef CONFIG_X86_64
21940+#ifdef CONFIG_X86_32
21941 /*
21942 * The sysexit path does not restore ds/es, so we must set them to
21943 * a reasonable value ourselves.
21944@@ -6353,8 +6378,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21945 * may be executed in interrupt context, which saves and restore segments
21946 * around it, nullifying its effect.
21947 */
21948- loadsegment(ds, __USER_DS);
21949- loadsegment(es, __USER_DS);
21950+ loadsegment(ds, __KERNEL_DS);
21951+ loadsegment(es, __KERNEL_DS);
21952+ loadsegment(ss, __KERNEL_DS);
21953+
21954+#ifdef CONFIG_PAX_KERNEXEC
21955+ loadsegment(fs, __KERNEL_PERCPU);
21956+#endif
21957+
21958+#ifdef CONFIG_PAX_MEMORY_UDEREF
21959+ __set_fs(current_thread_info()->addr_limit);
21960+#endif
21961+
21962 #endif
21963
21964 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
21965diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21966index 4f76417..93429b5 100644
21967--- a/arch/x86/kvm/x86.c
21968+++ b/arch/x86/kvm/x86.c
21969@@ -1390,8 +1390,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
21970 {
21971 struct kvm *kvm = vcpu->kvm;
21972 int lm = is_long_mode(vcpu);
21973- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
21974- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
21975+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
21976+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
21977 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
21978 : kvm->arch.xen_hvm_config.blob_size_32;
21979 u32 page_num = data & ~PAGE_MASK;
21980@@ -2255,6 +2255,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
21981 if (n < msr_list.nmsrs)
21982 goto out;
21983 r = -EFAULT;
21984+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
21985+ goto out;
21986 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
21987 num_msrs_to_save * sizeof(u32)))
21988 goto out;
21989@@ -2379,7 +2381,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21990 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21991 struct kvm_interrupt *irq)
21992 {
21993- if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
21994+ if (irq->irq >= KVM_NR_INTERRUPTS)
21995 return -EINVAL;
21996 if (irqchip_in_kernel(vcpu->kvm))
21997 return -ENXIO;
21998@@ -4881,7 +4883,7 @@ static void kvm_set_mmio_spte_mask(void)
21999 kvm_mmu_set_mmio_spte_mask(mask);
22000 }
22001
22002-int kvm_arch_init(void *opaque)
22003+int kvm_arch_init(const void *opaque)
22004 {
22005 int r;
22006 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
22007diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
22008index 642d880..5dd034e 100644
22009--- a/arch/x86/lguest/boot.c
22010+++ b/arch/x86/lguest/boot.c
22011@@ -1116,12 +1116,12 @@ static u32 lguest_apic_safe_wait_icr_idle(void)
22012
22013 static void set_lguest_basic_apic_ops(void)
22014 {
22015- apic->read = lguest_apic_read;
22016- apic->write = lguest_apic_write;
22017- apic->icr_read = lguest_apic_icr_read;
22018- apic->icr_write = lguest_apic_icr_write;
22019- apic->wait_icr_idle = lguest_apic_wait_icr_idle;
22020- apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
22021+ *(void **)&apic->read = lguest_apic_read;
22022+ *(void **)&apic->write = lguest_apic_write;
22023+ *(void **)&apic->icr_read = lguest_apic_icr_read;
22024+ *(void **)&apic->icr_write = lguest_apic_icr_write;
22025+ *(void **)&apic->wait_icr_idle = lguest_apic_wait_icr_idle;
22026+ *(void **)&apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
22027 };
22028 #endif
22029
22030@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
22031 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
22032 * Launcher to reboot us.
22033 */
22034-static void lguest_restart(char *reason)
22035+static __noreturn void lguest_restart(char *reason)
22036 {
22037 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
22038+ BUG();
22039 }
22040
22041 /*G:050
22042@@ -1292,28 +1293,28 @@ __init void lguest_init(void)
22043 pv_irq_ops.safe_halt = lguest_safe_halt;
22044
22045 /* Setup operations */
22046- pv_init_ops.patch = lguest_patch;
22047+ *(void **)&pv_init_ops.patch = lguest_patch;
22048
22049 /* Intercepts of various CPU instructions */
22050- pv_cpu_ops.load_gdt = lguest_load_gdt;
22051- pv_cpu_ops.cpuid = lguest_cpuid;
22052- pv_cpu_ops.load_idt = lguest_load_idt;
22053- pv_cpu_ops.iret = lguest_iret;
22054- pv_cpu_ops.load_sp0 = lguest_load_sp0;
22055- pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
22056- pv_cpu_ops.set_ldt = lguest_set_ldt;
22057- pv_cpu_ops.load_tls = lguest_load_tls;
22058- pv_cpu_ops.set_debugreg = lguest_set_debugreg;
22059- pv_cpu_ops.clts = lguest_clts;
22060- pv_cpu_ops.read_cr0 = lguest_read_cr0;
22061- pv_cpu_ops.write_cr0 = lguest_write_cr0;
22062- pv_cpu_ops.read_cr4 = lguest_read_cr4;
22063- pv_cpu_ops.write_cr4 = lguest_write_cr4;
22064- pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
22065- pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
22066- pv_cpu_ops.wbinvd = lguest_wbinvd;
22067- pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
22068- pv_cpu_ops.end_context_switch = lguest_end_context_switch;
22069+ *(void **)&pv_cpu_ops.load_gdt = lguest_load_gdt;
22070+ *(void **)&pv_cpu_ops.cpuid = lguest_cpuid;
22071+ *(void **)&pv_cpu_ops.load_idt = lguest_load_idt;
22072+ *(void **)&pv_cpu_ops.iret = lguest_iret;
22073+ *(void **)&pv_cpu_ops.load_sp0 = lguest_load_sp0;
22074+ *(void **)&pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
22075+ *(void **)&pv_cpu_ops.set_ldt = lguest_set_ldt;
22076+ *(void **)&pv_cpu_ops.load_tls = lguest_load_tls;
22077+ *(void **)&pv_cpu_ops.set_debugreg = lguest_set_debugreg;
22078+ *(void **)&pv_cpu_ops.clts = lguest_clts;
22079+ *(void **)&pv_cpu_ops.read_cr0 = lguest_read_cr0;
22080+ *(void **)&pv_cpu_ops.write_cr0 = lguest_write_cr0;
22081+ *(void **)&pv_cpu_ops.read_cr4 = lguest_read_cr4;
22082+ *(void **)&pv_cpu_ops.write_cr4 = lguest_write_cr4;
22083+ *(void **)&pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
22084+ *(void **)&pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
22085+ *(void **)&pv_cpu_ops.wbinvd = lguest_wbinvd;
22086+ *(void **)&pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
22087+ *(void **)&pv_cpu_ops.end_context_switch = lguest_end_context_switch;
22088
22089 /* Pagetable management */
22090 pv_mmu_ops.write_cr3 = lguest_write_cr3;
22091@@ -1341,11 +1342,11 @@ __init void lguest_init(void)
22092 set_lguest_basic_apic_ops();
22093 #endif
22094
22095- x86_init.resources.memory_setup = lguest_memory_setup;
22096- x86_init.irqs.intr_init = lguest_init_IRQ;
22097- x86_init.timers.timer_init = lguest_time_init;
22098- x86_platform.calibrate_tsc = lguest_tsc_khz;
22099- x86_platform.get_wallclock = lguest_get_wallclock;
22100+ *(void **)&x86_init.resources.memory_setup = lguest_memory_setup;
22101+ *(void **)&x86_init.irqs.intr_init = lguest_init_IRQ;
22102+ *(void **)&x86_init.timers.timer_init = lguest_time_init;
22103+ *(void **)&x86_platform.calibrate_tsc = lguest_tsc_khz;
22104+ *(void **)&x86_platform.get_wallclock = lguest_get_wallclock;
22105
22106 /*
22107 * Now is a good time to look at the implementations of these functions
22108@@ -1434,7 +1435,7 @@ __init void lguest_init(void)
22109 * routine.
22110 */
22111 pm_power_off = lguest_power_off;
22112- machine_ops.restart = lguest_restart;
22113+ *(void **)&machine_ops.restart = lguest_restart;
22114
22115 /*
22116 * Now we're set up, call i386_start_kernel() in head32.c and we proceed
22117diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
22118index 00933d5..3a64af9 100644
22119--- a/arch/x86/lib/atomic64_386_32.S
22120+++ b/arch/x86/lib/atomic64_386_32.S
22121@@ -48,6 +48,10 @@ BEGIN(read)
22122 movl (v), %eax
22123 movl 4(v), %edx
22124 RET_ENDP
22125+BEGIN(read_unchecked)
22126+ movl (v), %eax
22127+ movl 4(v), %edx
22128+RET_ENDP
22129 #undef v
22130
22131 #define v %esi
22132@@ -55,6 +59,10 @@ BEGIN(set)
22133 movl %ebx, (v)
22134 movl %ecx, 4(v)
22135 RET_ENDP
22136+BEGIN(set_unchecked)
22137+ movl %ebx, (v)
22138+ movl %ecx, 4(v)
22139+RET_ENDP
22140 #undef v
22141
22142 #define v %esi
22143@@ -70,6 +78,20 @@ RET_ENDP
22144 BEGIN(add)
22145 addl %eax, (v)
22146 adcl %edx, 4(v)
22147+
22148+#ifdef CONFIG_PAX_REFCOUNT
22149+ jno 0f
22150+ subl %eax, (v)
22151+ sbbl %edx, 4(v)
22152+ int $4
22153+0:
22154+ _ASM_EXTABLE(0b, 0b)
22155+#endif
22156+
22157+RET_ENDP
22158+BEGIN(add_unchecked)
22159+ addl %eax, (v)
22160+ adcl %edx, 4(v)
22161 RET_ENDP
22162 #undef v
22163
22164@@ -77,6 +99,24 @@ RET_ENDP
22165 BEGIN(add_return)
22166 addl (v), %eax
22167 adcl 4(v), %edx
22168+
22169+#ifdef CONFIG_PAX_REFCOUNT
22170+ into
22171+1234:
22172+ _ASM_EXTABLE(1234b, 2f)
22173+#endif
22174+
22175+ movl %eax, (v)
22176+ movl %edx, 4(v)
22177+
22178+#ifdef CONFIG_PAX_REFCOUNT
22179+2:
22180+#endif
22181+
22182+RET_ENDP
22183+BEGIN(add_return_unchecked)
22184+ addl (v), %eax
22185+ adcl 4(v), %edx
22186 movl %eax, (v)
22187 movl %edx, 4(v)
22188 RET_ENDP
22189@@ -86,6 +126,20 @@ RET_ENDP
22190 BEGIN(sub)
22191 subl %eax, (v)
22192 sbbl %edx, 4(v)
22193+
22194+#ifdef CONFIG_PAX_REFCOUNT
22195+ jno 0f
22196+ addl %eax, (v)
22197+ adcl %edx, 4(v)
22198+ int $4
22199+0:
22200+ _ASM_EXTABLE(0b, 0b)
22201+#endif
22202+
22203+RET_ENDP
22204+BEGIN(sub_unchecked)
22205+ subl %eax, (v)
22206+ sbbl %edx, 4(v)
22207 RET_ENDP
22208 #undef v
22209
22210@@ -96,6 +150,27 @@ BEGIN(sub_return)
22211 sbbl $0, %edx
22212 addl (v), %eax
22213 adcl 4(v), %edx
22214+
22215+#ifdef CONFIG_PAX_REFCOUNT
22216+ into
22217+1234:
22218+ _ASM_EXTABLE(1234b, 2f)
22219+#endif
22220+
22221+ movl %eax, (v)
22222+ movl %edx, 4(v)
22223+
22224+#ifdef CONFIG_PAX_REFCOUNT
22225+2:
22226+#endif
22227+
22228+RET_ENDP
22229+BEGIN(sub_return_unchecked)
22230+ negl %edx
22231+ negl %eax
22232+ sbbl $0, %edx
22233+ addl (v), %eax
22234+ adcl 4(v), %edx
22235 movl %eax, (v)
22236 movl %edx, 4(v)
22237 RET_ENDP
22238@@ -105,6 +180,20 @@ RET_ENDP
22239 BEGIN(inc)
22240 addl $1, (v)
22241 adcl $0, 4(v)
22242+
22243+#ifdef CONFIG_PAX_REFCOUNT
22244+ jno 0f
22245+ subl $1, (v)
22246+ sbbl $0, 4(v)
22247+ int $4
22248+0:
22249+ _ASM_EXTABLE(0b, 0b)
22250+#endif
22251+
22252+RET_ENDP
22253+BEGIN(inc_unchecked)
22254+ addl $1, (v)
22255+ adcl $0, 4(v)
22256 RET_ENDP
22257 #undef v
22258
22259@@ -114,6 +203,26 @@ BEGIN(inc_return)
22260 movl 4(v), %edx
22261 addl $1, %eax
22262 adcl $0, %edx
22263+
22264+#ifdef CONFIG_PAX_REFCOUNT
22265+ into
22266+1234:
22267+ _ASM_EXTABLE(1234b, 2f)
22268+#endif
22269+
22270+ movl %eax, (v)
22271+ movl %edx, 4(v)
22272+
22273+#ifdef CONFIG_PAX_REFCOUNT
22274+2:
22275+#endif
22276+
22277+RET_ENDP
22278+BEGIN(inc_return_unchecked)
22279+ movl (v), %eax
22280+ movl 4(v), %edx
22281+ addl $1, %eax
22282+ adcl $0, %edx
22283 movl %eax, (v)
22284 movl %edx, 4(v)
22285 RET_ENDP
22286@@ -123,6 +232,20 @@ RET_ENDP
22287 BEGIN(dec)
22288 subl $1, (v)
22289 sbbl $0, 4(v)
22290+
22291+#ifdef CONFIG_PAX_REFCOUNT
22292+ jno 0f
22293+ addl $1, (v)
22294+ adcl $0, 4(v)
22295+ int $4
22296+0:
22297+ _ASM_EXTABLE(0b, 0b)
22298+#endif
22299+
22300+RET_ENDP
22301+BEGIN(dec_unchecked)
22302+ subl $1, (v)
22303+ sbbl $0, 4(v)
22304 RET_ENDP
22305 #undef v
22306
22307@@ -132,6 +255,26 @@ BEGIN(dec_return)
22308 movl 4(v), %edx
22309 subl $1, %eax
22310 sbbl $0, %edx
22311+
22312+#ifdef CONFIG_PAX_REFCOUNT
22313+ into
22314+1234:
22315+ _ASM_EXTABLE(1234b, 2f)
22316+#endif
22317+
22318+ movl %eax, (v)
22319+ movl %edx, 4(v)
22320+
22321+#ifdef CONFIG_PAX_REFCOUNT
22322+2:
22323+#endif
22324+
22325+RET_ENDP
22326+BEGIN(dec_return_unchecked)
22327+ movl (v), %eax
22328+ movl 4(v), %edx
22329+ subl $1, %eax
22330+ sbbl $0, %edx
22331 movl %eax, (v)
22332 movl %edx, 4(v)
22333 RET_ENDP
22334@@ -143,6 +286,13 @@ BEGIN(add_unless)
22335 adcl %edx, %edi
22336 addl (v), %eax
22337 adcl 4(v), %edx
22338+
22339+#ifdef CONFIG_PAX_REFCOUNT
22340+ into
22341+1234:
22342+ _ASM_EXTABLE(1234b, 2f)
22343+#endif
22344+
22345 cmpl %eax, %ecx
22346 je 3f
22347 1:
22348@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
22349 1:
22350 addl $1, %eax
22351 adcl $0, %edx
22352+
22353+#ifdef CONFIG_PAX_REFCOUNT
22354+ into
22355+1234:
22356+ _ASM_EXTABLE(1234b, 2f)
22357+#endif
22358+
22359 movl %eax, (v)
22360 movl %edx, 4(v)
22361 movl $1, %eax
22362@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
22363 movl 4(v), %edx
22364 subl $1, %eax
22365 sbbl $0, %edx
22366+
22367+#ifdef CONFIG_PAX_REFCOUNT
22368+ into
22369+1234:
22370+ _ASM_EXTABLE(1234b, 1f)
22371+#endif
22372+
22373 js 1f
22374 movl %eax, (v)
22375 movl %edx, 4(v)
22376diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
22377index f5cc9eb..51fa319 100644
22378--- a/arch/x86/lib/atomic64_cx8_32.S
22379+++ b/arch/x86/lib/atomic64_cx8_32.S
22380@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
22381 CFI_STARTPROC
22382
22383 read64 %ecx
22384+ pax_force_retaddr
22385 ret
22386 CFI_ENDPROC
22387 ENDPROC(atomic64_read_cx8)
22388
22389+ENTRY(atomic64_read_unchecked_cx8)
22390+ CFI_STARTPROC
22391+
22392+ read64 %ecx
22393+ pax_force_retaddr
22394+ ret
22395+ CFI_ENDPROC
22396+ENDPROC(atomic64_read_unchecked_cx8)
22397+
22398 ENTRY(atomic64_set_cx8)
22399 CFI_STARTPROC
22400
22401@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
22402 cmpxchg8b (%esi)
22403 jne 1b
22404
22405+ pax_force_retaddr
22406 ret
22407 CFI_ENDPROC
22408 ENDPROC(atomic64_set_cx8)
22409
22410+ENTRY(atomic64_set_unchecked_cx8)
22411+ CFI_STARTPROC
22412+
22413+1:
22414+/* we don't need LOCK_PREFIX since aligned 64-bit writes
22415+ * are atomic on 586 and newer */
22416+ cmpxchg8b (%esi)
22417+ jne 1b
22418+
22419+ pax_force_retaddr
22420+ ret
22421+ CFI_ENDPROC
22422+ENDPROC(atomic64_set_unchecked_cx8)
22423+
22424 ENTRY(atomic64_xchg_cx8)
22425 CFI_STARTPROC
22426
22427@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
22428 cmpxchg8b (%esi)
22429 jne 1b
22430
22431+ pax_force_retaddr
22432 ret
22433 CFI_ENDPROC
22434 ENDPROC(atomic64_xchg_cx8)
22435
22436-.macro addsub_return func ins insc
22437-ENTRY(atomic64_\func\()_return_cx8)
22438+.macro addsub_return func ins insc unchecked=""
22439+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
22440 CFI_STARTPROC
22441 SAVE ebp
22442 SAVE ebx
22443@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
22444 movl %edx, %ecx
22445 \ins\()l %esi, %ebx
22446 \insc\()l %edi, %ecx
22447+
22448+.ifb \unchecked
22449+#ifdef CONFIG_PAX_REFCOUNT
22450+ into
22451+2:
22452+ _ASM_EXTABLE(2b, 3f)
22453+#endif
22454+.endif
22455+
22456 LOCK_PREFIX
22457 cmpxchg8b (%ebp)
22458 jne 1b
22459-
22460-10:
22461 movl %ebx, %eax
22462 movl %ecx, %edx
22463+
22464+.ifb \unchecked
22465+#ifdef CONFIG_PAX_REFCOUNT
22466+3:
22467+#endif
22468+.endif
22469+
22470 RESTORE edi
22471 RESTORE esi
22472 RESTORE ebx
22473 RESTORE ebp
22474+ pax_force_retaddr
22475 ret
22476 CFI_ENDPROC
22477-ENDPROC(atomic64_\func\()_return_cx8)
22478+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
22479 .endm
22480
22481 addsub_return add add adc
22482 addsub_return sub sub sbb
22483+addsub_return add add adc _unchecked
22484+addsub_return sub sub sbb _unchecked
22485
22486-.macro incdec_return func ins insc
22487-ENTRY(atomic64_\func\()_return_cx8)
22488+.macro incdec_return func ins insc unchecked=""
22489+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
22490 CFI_STARTPROC
22491 SAVE ebx
22492
22493@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
22494 movl %edx, %ecx
22495 \ins\()l $1, %ebx
22496 \insc\()l $0, %ecx
22497+
22498+.ifb \unchecked
22499+#ifdef CONFIG_PAX_REFCOUNT
22500+ into
22501+2:
22502+ _ASM_EXTABLE(2b, 3f)
22503+#endif
22504+.endif
22505+
22506 LOCK_PREFIX
22507 cmpxchg8b (%esi)
22508 jne 1b
22509
22510-10:
22511 movl %ebx, %eax
22512 movl %ecx, %edx
22513+
22514+.ifb \unchecked
22515+#ifdef CONFIG_PAX_REFCOUNT
22516+3:
22517+#endif
22518+.endif
22519+
22520 RESTORE ebx
22521+ pax_force_retaddr
22522 ret
22523 CFI_ENDPROC
22524-ENDPROC(atomic64_\func\()_return_cx8)
22525+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
22526 .endm
22527
22528 incdec_return inc add adc
22529 incdec_return dec sub sbb
22530+incdec_return inc add adc _unchecked
22531+incdec_return dec sub sbb _unchecked
22532
22533 ENTRY(atomic64_dec_if_positive_cx8)
22534 CFI_STARTPROC
22535@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
22536 movl %edx, %ecx
22537 subl $1, %ebx
22538 sbb $0, %ecx
22539+
22540+#ifdef CONFIG_PAX_REFCOUNT
22541+ into
22542+1234:
22543+ _ASM_EXTABLE(1234b, 2f)
22544+#endif
22545+
22546 js 2f
22547 LOCK_PREFIX
22548 cmpxchg8b (%esi)
22549@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
22550 movl %ebx, %eax
22551 movl %ecx, %edx
22552 RESTORE ebx
22553+ pax_force_retaddr
22554 ret
22555 CFI_ENDPROC
22556 ENDPROC(atomic64_dec_if_positive_cx8)
22557@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
22558 movl %edx, %ecx
22559 addl %ebp, %ebx
22560 adcl %edi, %ecx
22561+
22562+#ifdef CONFIG_PAX_REFCOUNT
22563+ into
22564+1234:
22565+ _ASM_EXTABLE(1234b, 3f)
22566+#endif
22567+
22568 LOCK_PREFIX
22569 cmpxchg8b (%esi)
22570 jne 1b
22571@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
22572 CFI_ADJUST_CFA_OFFSET -8
22573 RESTORE ebx
22574 RESTORE ebp
22575+ pax_force_retaddr
22576 ret
22577 4:
22578 cmpl %edx, 4(%esp)
22579@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
22580 xorl %ecx, %ecx
22581 addl $1, %ebx
22582 adcl %edx, %ecx
22583+
22584+#ifdef CONFIG_PAX_REFCOUNT
22585+ into
22586+1234:
22587+ _ASM_EXTABLE(1234b, 3f)
22588+#endif
22589+
22590 LOCK_PREFIX
22591 cmpxchg8b (%esi)
22592 jne 1b
22593@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
22594 movl $1, %eax
22595 3:
22596 RESTORE ebx
22597+ pax_force_retaddr
22598 ret
22599 CFI_ENDPROC
22600 ENDPROC(atomic64_inc_not_zero_cx8)
22601diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
22602index 2af5df3..62b1a5a 100644
22603--- a/arch/x86/lib/checksum_32.S
22604+++ b/arch/x86/lib/checksum_32.S
22605@@ -29,7 +29,8 @@
22606 #include <asm/dwarf2.h>
22607 #include <asm/errno.h>
22608 #include <asm/asm.h>
22609-
22610+#include <asm/segment.h>
22611+
22612 /*
22613 * computes a partial checksum, e.g. for TCP/UDP fragments
22614 */
22615@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
22616
22617 #define ARGBASE 16
22618 #define FP 12
22619-
22620-ENTRY(csum_partial_copy_generic)
22621+
22622+ENTRY(csum_partial_copy_generic_to_user)
22623 CFI_STARTPROC
22624+
22625+#ifdef CONFIG_PAX_MEMORY_UDEREF
22626+ pushl_cfi %gs
22627+ popl_cfi %es
22628+ jmp csum_partial_copy_generic
22629+#endif
22630+
22631+ENTRY(csum_partial_copy_generic_from_user)
22632+
22633+#ifdef CONFIG_PAX_MEMORY_UDEREF
22634+ pushl_cfi %gs
22635+ popl_cfi %ds
22636+#endif
22637+
22638+ENTRY(csum_partial_copy_generic)
22639 subl $4,%esp
22640 CFI_ADJUST_CFA_OFFSET 4
22641 pushl_cfi %edi
22642@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
22643 jmp 4f
22644 SRC(1: movw (%esi), %bx )
22645 addl $2, %esi
22646-DST( movw %bx, (%edi) )
22647+DST( movw %bx, %es:(%edi) )
22648 addl $2, %edi
22649 addw %bx, %ax
22650 adcl $0, %eax
22651@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
22652 SRC(1: movl (%esi), %ebx )
22653 SRC( movl 4(%esi), %edx )
22654 adcl %ebx, %eax
22655-DST( movl %ebx, (%edi) )
22656+DST( movl %ebx, %es:(%edi) )
22657 adcl %edx, %eax
22658-DST( movl %edx, 4(%edi) )
22659+DST( movl %edx, %es:4(%edi) )
22660
22661 SRC( movl 8(%esi), %ebx )
22662 SRC( movl 12(%esi), %edx )
22663 adcl %ebx, %eax
22664-DST( movl %ebx, 8(%edi) )
22665+DST( movl %ebx, %es:8(%edi) )
22666 adcl %edx, %eax
22667-DST( movl %edx, 12(%edi) )
22668+DST( movl %edx, %es:12(%edi) )
22669
22670 SRC( movl 16(%esi), %ebx )
22671 SRC( movl 20(%esi), %edx )
22672 adcl %ebx, %eax
22673-DST( movl %ebx, 16(%edi) )
22674+DST( movl %ebx, %es:16(%edi) )
22675 adcl %edx, %eax
22676-DST( movl %edx, 20(%edi) )
22677+DST( movl %edx, %es:20(%edi) )
22678
22679 SRC( movl 24(%esi), %ebx )
22680 SRC( movl 28(%esi), %edx )
22681 adcl %ebx, %eax
22682-DST( movl %ebx, 24(%edi) )
22683+DST( movl %ebx, %es:24(%edi) )
22684 adcl %edx, %eax
22685-DST( movl %edx, 28(%edi) )
22686+DST( movl %edx, %es:28(%edi) )
22687
22688 lea 32(%esi), %esi
22689 lea 32(%edi), %edi
22690@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
22691 shrl $2, %edx # This clears CF
22692 SRC(3: movl (%esi), %ebx )
22693 adcl %ebx, %eax
22694-DST( movl %ebx, (%edi) )
22695+DST( movl %ebx, %es:(%edi) )
22696 lea 4(%esi), %esi
22697 lea 4(%edi), %edi
22698 dec %edx
22699@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
22700 jb 5f
22701 SRC( movw (%esi), %cx )
22702 leal 2(%esi), %esi
22703-DST( movw %cx, (%edi) )
22704+DST( movw %cx, %es:(%edi) )
22705 leal 2(%edi), %edi
22706 je 6f
22707 shll $16,%ecx
22708 SRC(5: movb (%esi), %cl )
22709-DST( movb %cl, (%edi) )
22710+DST( movb %cl, %es:(%edi) )
22711 6: addl %ecx, %eax
22712 adcl $0, %eax
22713 7:
22714@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
22715
22716 6001:
22717 movl ARGBASE+20(%esp), %ebx # src_err_ptr
22718- movl $-EFAULT, (%ebx)
22719+ movl $-EFAULT, %ss:(%ebx)
22720
22721 # zero the complete destination - computing the rest
22722 # is too much work
22723@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
22724
22725 6002:
22726 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22727- movl $-EFAULT,(%ebx)
22728+ movl $-EFAULT,%ss:(%ebx)
22729 jmp 5000b
22730
22731 .previous
22732
22733+ pushl_cfi %ss
22734+ popl_cfi %ds
22735+ pushl_cfi %ss
22736+ popl_cfi %es
22737 popl_cfi %ebx
22738 CFI_RESTORE ebx
22739 popl_cfi %esi
22740@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
22741 popl_cfi %ecx # equivalent to addl $4,%esp
22742 ret
22743 CFI_ENDPROC
22744-ENDPROC(csum_partial_copy_generic)
22745+ENDPROC(csum_partial_copy_generic_to_user)
22746
22747 #else
22748
22749 /* Version for PentiumII/PPro */
22750
22751 #define ROUND1(x) \
22752+ nop; nop; nop; \
22753 SRC(movl x(%esi), %ebx ) ; \
22754 addl %ebx, %eax ; \
22755- DST(movl %ebx, x(%edi) ) ;
22756+ DST(movl %ebx, %es:x(%edi)) ;
22757
22758 #define ROUND(x) \
22759+ nop; nop; nop; \
22760 SRC(movl x(%esi), %ebx ) ; \
22761 adcl %ebx, %eax ; \
22762- DST(movl %ebx, x(%edi) ) ;
22763+ DST(movl %ebx, %es:x(%edi)) ;
22764
22765 #define ARGBASE 12
22766-
22767-ENTRY(csum_partial_copy_generic)
22768+
22769+ENTRY(csum_partial_copy_generic_to_user)
22770 CFI_STARTPROC
22771+
22772+#ifdef CONFIG_PAX_MEMORY_UDEREF
22773+ pushl_cfi %gs
22774+ popl_cfi %es
22775+ jmp csum_partial_copy_generic
22776+#endif
22777+
22778+ENTRY(csum_partial_copy_generic_from_user)
22779+
22780+#ifdef CONFIG_PAX_MEMORY_UDEREF
22781+ pushl_cfi %gs
22782+ popl_cfi %ds
22783+#endif
22784+
22785+ENTRY(csum_partial_copy_generic)
22786 pushl_cfi %ebx
22787 CFI_REL_OFFSET ebx, 0
22788 pushl_cfi %edi
22789@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
22790 subl %ebx, %edi
22791 lea -1(%esi),%edx
22792 andl $-32,%edx
22793- lea 3f(%ebx,%ebx), %ebx
22794+ lea 3f(%ebx,%ebx,2), %ebx
22795 testl %esi, %esi
22796 jmp *%ebx
22797 1: addl $64,%esi
22798@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
22799 jb 5f
22800 SRC( movw (%esi), %dx )
22801 leal 2(%esi), %esi
22802-DST( movw %dx, (%edi) )
22803+DST( movw %dx, %es:(%edi) )
22804 leal 2(%edi), %edi
22805 je 6f
22806 shll $16,%edx
22807 5:
22808 SRC( movb (%esi), %dl )
22809-DST( movb %dl, (%edi) )
22810+DST( movb %dl, %es:(%edi) )
22811 6: addl %edx, %eax
22812 adcl $0, %eax
22813 7:
22814 .section .fixup, "ax"
22815 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22816- movl $-EFAULT, (%ebx)
22817+ movl $-EFAULT, %ss:(%ebx)
22818 # zero the complete destination (computing the rest is too much work)
22819 movl ARGBASE+8(%esp),%edi # dst
22820 movl ARGBASE+12(%esp),%ecx # len
22821@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
22822 rep; stosb
22823 jmp 7b
22824 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22825- movl $-EFAULT, (%ebx)
22826+ movl $-EFAULT, %ss:(%ebx)
22827 jmp 7b
22828 .previous
22829
22830+#ifdef CONFIG_PAX_MEMORY_UDEREF
22831+ pushl_cfi %ss
22832+ popl_cfi %ds
22833+ pushl_cfi %ss
22834+ popl_cfi %es
22835+#endif
22836+
22837 popl_cfi %esi
22838 CFI_RESTORE esi
22839 popl_cfi %edi
22840@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
22841 CFI_RESTORE ebx
22842 ret
22843 CFI_ENDPROC
22844-ENDPROC(csum_partial_copy_generic)
22845+ENDPROC(csum_partial_copy_generic_to_user)
22846
22847 #undef ROUND
22848 #undef ROUND1
22849diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22850index f2145cf..cea889d 100644
22851--- a/arch/x86/lib/clear_page_64.S
22852+++ b/arch/x86/lib/clear_page_64.S
22853@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
22854 movl $4096/8,%ecx
22855 xorl %eax,%eax
22856 rep stosq
22857+ pax_force_retaddr
22858 ret
22859 CFI_ENDPROC
22860 ENDPROC(clear_page_c)
22861@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
22862 movl $4096,%ecx
22863 xorl %eax,%eax
22864 rep stosb
22865+ pax_force_retaddr
22866 ret
22867 CFI_ENDPROC
22868 ENDPROC(clear_page_c_e)
22869@@ -43,6 +45,7 @@ ENTRY(clear_page)
22870 leaq 64(%rdi),%rdi
22871 jnz .Lloop
22872 nop
22873+ pax_force_retaddr
22874 ret
22875 CFI_ENDPROC
22876 .Lclear_page_end:
22877@@ -58,7 +61,7 @@ ENDPROC(clear_page)
22878
22879 #include <asm/cpufeature.h>
22880
22881- .section .altinstr_replacement,"ax"
22882+ .section .altinstr_replacement,"a"
22883 1: .byte 0xeb /* jmp <disp8> */
22884 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22885 2: .byte 0xeb /* jmp <disp8> */
22886diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
22887index 1e572c5..2a162cd 100644
22888--- a/arch/x86/lib/cmpxchg16b_emu.S
22889+++ b/arch/x86/lib/cmpxchg16b_emu.S
22890@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
22891
22892 popf
22893 mov $1, %al
22894+ pax_force_retaddr
22895 ret
22896
22897 not_same:
22898 popf
22899 xor %al,%al
22900+ pax_force_retaddr
22901 ret
22902
22903 CFI_ENDPROC
22904diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22905index 6b34d04..dccb07f 100644
22906--- a/arch/x86/lib/copy_page_64.S
22907+++ b/arch/x86/lib/copy_page_64.S
22908@@ -9,6 +9,7 @@ copy_page_c:
22909 CFI_STARTPROC
22910 movl $4096/8,%ecx
22911 rep movsq
22912+ pax_force_retaddr
22913 ret
22914 CFI_ENDPROC
22915 ENDPROC(copy_page_c)
22916@@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
22917
22918 ENTRY(copy_page)
22919 CFI_STARTPROC
22920- subq $2*8,%rsp
22921- CFI_ADJUST_CFA_OFFSET 2*8
22922+ subq $3*8,%rsp
22923+ CFI_ADJUST_CFA_OFFSET 3*8
22924 movq %rbx,(%rsp)
22925 CFI_REL_OFFSET rbx, 0
22926 movq %r12,1*8(%rsp)
22927 CFI_REL_OFFSET r12, 1*8
22928+ movq %r13,2*8(%rsp)
22929+ CFI_REL_OFFSET r13, 2*8
22930
22931 movl $(4096/64)-5,%ecx
22932 .p2align 4
22933@@ -37,7 +40,7 @@ ENTRY(copy_page)
22934 movq 16 (%rsi), %rdx
22935 movq 24 (%rsi), %r8
22936 movq 32 (%rsi), %r9
22937- movq 40 (%rsi), %r10
22938+ movq 40 (%rsi), %r13
22939 movq 48 (%rsi), %r11
22940 movq 56 (%rsi), %r12
22941
22942@@ -48,7 +51,7 @@ ENTRY(copy_page)
22943 movq %rdx, 16 (%rdi)
22944 movq %r8, 24 (%rdi)
22945 movq %r9, 32 (%rdi)
22946- movq %r10, 40 (%rdi)
22947+ movq %r13, 40 (%rdi)
22948 movq %r11, 48 (%rdi)
22949 movq %r12, 56 (%rdi)
22950
22951@@ -67,7 +70,7 @@ ENTRY(copy_page)
22952 movq 16 (%rsi), %rdx
22953 movq 24 (%rsi), %r8
22954 movq 32 (%rsi), %r9
22955- movq 40 (%rsi), %r10
22956+ movq 40 (%rsi), %r13
22957 movq 48 (%rsi), %r11
22958 movq 56 (%rsi), %r12
22959
22960@@ -76,7 +79,7 @@ ENTRY(copy_page)
22961 movq %rdx, 16 (%rdi)
22962 movq %r8, 24 (%rdi)
22963 movq %r9, 32 (%rdi)
22964- movq %r10, 40 (%rdi)
22965+ movq %r13, 40 (%rdi)
22966 movq %r11, 48 (%rdi)
22967 movq %r12, 56 (%rdi)
22968
22969@@ -89,8 +92,11 @@ ENTRY(copy_page)
22970 CFI_RESTORE rbx
22971 movq 1*8(%rsp),%r12
22972 CFI_RESTORE r12
22973- addq $2*8,%rsp
22974- CFI_ADJUST_CFA_OFFSET -2*8
22975+ movq 2*8(%rsp),%r13
22976+ CFI_RESTORE r13
22977+ addq $3*8,%rsp
22978+ CFI_ADJUST_CFA_OFFSET -3*8
22979+ pax_force_retaddr
22980 ret
22981 .Lcopy_page_end:
22982 CFI_ENDPROC
22983@@ -101,7 +107,7 @@ ENDPROC(copy_page)
22984
22985 #include <asm/cpufeature.h>
22986
22987- .section .altinstr_replacement,"ax"
22988+ .section .altinstr_replacement,"a"
22989 1: .byte 0xeb /* jmp <disp8> */
22990 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
22991 2:
22992diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
22993index a30ca15..d25fab6 100644
22994--- a/arch/x86/lib/copy_user_64.S
22995+++ b/arch/x86/lib/copy_user_64.S
22996@@ -18,6 +18,7 @@
22997 #include <asm/alternative-asm.h>
22998 #include <asm/asm.h>
22999 #include <asm/smap.h>
23000+#include <asm/pgtable.h>
23001
23002 /*
23003 * By placing feature2 after feature1 in altinstructions section, we logically
23004@@ -31,7 +32,7 @@
23005 .byte 0xe9 /* 32bit jump */
23006 .long \orig-1f /* by default jump to orig */
23007 1:
23008- .section .altinstr_replacement,"ax"
23009+ .section .altinstr_replacement,"a"
23010 2: .byte 0xe9 /* near jump with 32bit immediate */
23011 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
23012 3: .byte 0xe9 /* near jump with 32bit immediate */
23013@@ -70,47 +71,20 @@
23014 #endif
23015 .endm
23016
23017-/* Standard copy_to_user with segment limit checking */
23018-ENTRY(_copy_to_user)
23019- CFI_STARTPROC
23020- GET_THREAD_INFO(%rax)
23021- movq %rdi,%rcx
23022- addq %rdx,%rcx
23023- jc bad_to_user
23024- cmpq TI_addr_limit(%rax),%rcx
23025- ja bad_to_user
23026- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
23027- copy_user_generic_unrolled,copy_user_generic_string, \
23028- copy_user_enhanced_fast_string
23029- CFI_ENDPROC
23030-ENDPROC(_copy_to_user)
23031-
23032-/* Standard copy_from_user with segment limit checking */
23033-ENTRY(_copy_from_user)
23034- CFI_STARTPROC
23035- GET_THREAD_INFO(%rax)
23036- movq %rsi,%rcx
23037- addq %rdx,%rcx
23038- jc bad_from_user
23039- cmpq TI_addr_limit(%rax),%rcx
23040- ja bad_from_user
23041- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
23042- copy_user_generic_unrolled,copy_user_generic_string, \
23043- copy_user_enhanced_fast_string
23044- CFI_ENDPROC
23045-ENDPROC(_copy_from_user)
23046-
23047 .section .fixup,"ax"
23048 /* must zero dest */
23049 ENTRY(bad_from_user)
23050 bad_from_user:
23051 CFI_STARTPROC
23052+ testl %edx,%edx
23053+ js bad_to_user
23054 movl %edx,%ecx
23055 xorl %eax,%eax
23056 rep
23057 stosb
23058 bad_to_user:
23059 movl %edx,%eax
23060+ pax_force_retaddr
23061 ret
23062 CFI_ENDPROC
23063 ENDPROC(bad_from_user)
23064@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
23065 jz 17f
23066 1: movq (%rsi),%r8
23067 2: movq 1*8(%rsi),%r9
23068-3: movq 2*8(%rsi),%r10
23069+3: movq 2*8(%rsi),%rax
23070 4: movq 3*8(%rsi),%r11
23071 5: movq %r8,(%rdi)
23072 6: movq %r9,1*8(%rdi)
23073-7: movq %r10,2*8(%rdi)
23074+7: movq %rax,2*8(%rdi)
23075 8: movq %r11,3*8(%rdi)
23076 9: movq 4*8(%rsi),%r8
23077 10: movq 5*8(%rsi),%r9
23078-11: movq 6*8(%rsi),%r10
23079+11: movq 6*8(%rsi),%rax
23080 12: movq 7*8(%rsi),%r11
23081 13: movq %r8,4*8(%rdi)
23082 14: movq %r9,5*8(%rdi)
23083-15: movq %r10,6*8(%rdi)
23084+15: movq %rax,6*8(%rdi)
23085 16: movq %r11,7*8(%rdi)
23086 leaq 64(%rsi),%rsi
23087 leaq 64(%rdi),%rdi
23088@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
23089 jnz 21b
23090 23: xor %eax,%eax
23091 ASM_CLAC
23092+ pax_force_retaddr
23093 ret
23094
23095 .section .fixup,"ax"
23096@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
23097 movsb
23098 4: xorl %eax,%eax
23099 ASM_CLAC
23100+ pax_force_retaddr
23101 ret
23102
23103 .section .fixup,"ax"
23104@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
23105 movsb
23106 2: xorl %eax,%eax
23107 ASM_CLAC
23108+ pax_force_retaddr
23109 ret
23110
23111 .section .fixup,"ax"
23112diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
23113index 6a4f43c..f5f9e26 100644
23114--- a/arch/x86/lib/copy_user_nocache_64.S
23115+++ b/arch/x86/lib/copy_user_nocache_64.S
23116@@ -8,6 +8,7 @@
23117
23118 #include <linux/linkage.h>
23119 #include <asm/dwarf2.h>
23120+#include <asm/alternative-asm.h>
23121
23122 #define FIX_ALIGNMENT 1
23123
23124@@ -16,6 +17,7 @@
23125 #include <asm/thread_info.h>
23126 #include <asm/asm.h>
23127 #include <asm/smap.h>
23128+#include <asm/pgtable.h>
23129
23130 .macro ALIGN_DESTINATION
23131 #ifdef FIX_ALIGNMENT
23132@@ -49,6 +51,15 @@
23133 */
23134 ENTRY(__copy_user_nocache)
23135 CFI_STARTPROC
23136+
23137+#ifdef CONFIG_PAX_MEMORY_UDEREF
23138+ mov $PAX_USER_SHADOW_BASE,%rcx
23139+ cmp %rcx,%rsi
23140+ jae 1f
23141+ add %rcx,%rsi
23142+1:
23143+#endif
23144+
23145 ASM_STAC
23146 cmpl $8,%edx
23147 jb 20f /* less then 8 bytes, go to byte copy loop */
23148@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
23149 jz 17f
23150 1: movq (%rsi),%r8
23151 2: movq 1*8(%rsi),%r9
23152-3: movq 2*8(%rsi),%r10
23153+3: movq 2*8(%rsi),%rax
23154 4: movq 3*8(%rsi),%r11
23155 5: movnti %r8,(%rdi)
23156 6: movnti %r9,1*8(%rdi)
23157-7: movnti %r10,2*8(%rdi)
23158+7: movnti %rax,2*8(%rdi)
23159 8: movnti %r11,3*8(%rdi)
23160 9: movq 4*8(%rsi),%r8
23161 10: movq 5*8(%rsi),%r9
23162-11: movq 6*8(%rsi),%r10
23163+11: movq 6*8(%rsi),%rax
23164 12: movq 7*8(%rsi),%r11
23165 13: movnti %r8,4*8(%rdi)
23166 14: movnti %r9,5*8(%rdi)
23167-15: movnti %r10,6*8(%rdi)
23168+15: movnti %rax,6*8(%rdi)
23169 16: movnti %r11,7*8(%rdi)
23170 leaq 64(%rsi),%rsi
23171 leaq 64(%rdi),%rdi
23172@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
23173 23: xorl %eax,%eax
23174 ASM_CLAC
23175 sfence
23176+ pax_force_retaddr
23177 ret
23178
23179 .section .fixup,"ax"
23180diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
23181index 2419d5f..953ee51 100644
23182--- a/arch/x86/lib/csum-copy_64.S
23183+++ b/arch/x86/lib/csum-copy_64.S
23184@@ -9,6 +9,7 @@
23185 #include <asm/dwarf2.h>
23186 #include <asm/errno.h>
23187 #include <asm/asm.h>
23188+#include <asm/alternative-asm.h>
23189
23190 /*
23191 * Checksum copy with exception handling.
23192@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
23193 CFI_RESTORE rbp
23194 addq $7*8, %rsp
23195 CFI_ADJUST_CFA_OFFSET -7*8
23196+ pax_force_retaddr 0, 1
23197 ret
23198 CFI_RESTORE_STATE
23199
23200diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
23201index 25b7ae8..169fafc 100644
23202--- a/arch/x86/lib/csum-wrappers_64.c
23203+++ b/arch/x86/lib/csum-wrappers_64.c
23204@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
23205 len -= 2;
23206 }
23207 }
23208- isum = csum_partial_copy_generic((__force const void *)src,
23209+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
23210 dst, len, isum, errp, NULL);
23211 if (unlikely(*errp))
23212 goto out_err;
23213@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
23214 }
23215
23216 *errp = 0;
23217- return csum_partial_copy_generic(src, (void __force *)dst,
23218+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
23219 len, isum, NULL, errp);
23220 }
23221 EXPORT_SYMBOL(csum_partial_copy_to_user);
23222diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
23223index 156b9c8..b144132 100644
23224--- a/arch/x86/lib/getuser.S
23225+++ b/arch/x86/lib/getuser.S
23226@@ -34,17 +34,40 @@
23227 #include <asm/thread_info.h>
23228 #include <asm/asm.h>
23229 #include <asm/smap.h>
23230+#include <asm/segment.h>
23231+#include <asm/pgtable.h>
23232+#include <asm/alternative-asm.h>
23233+
23234+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23235+#define __copyuser_seg gs;
23236+#else
23237+#define __copyuser_seg
23238+#endif
23239
23240 .text
23241 ENTRY(__get_user_1)
23242 CFI_STARTPROC
23243+
23244+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23245 GET_THREAD_INFO(%_ASM_DX)
23246 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23247 jae bad_get_user
23248 ASM_STAC
23249-1: movzb (%_ASM_AX),%edx
23250+
23251+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23252+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23253+ cmp %_ASM_DX,%_ASM_AX
23254+ jae 1234f
23255+ add %_ASM_DX,%_ASM_AX
23256+1234:
23257+#endif
23258+
23259+#endif
23260+
23261+1: __copyuser_seg movzb (%_ASM_AX),%edx
23262 xor %eax,%eax
23263 ASM_CLAC
23264+ pax_force_retaddr
23265 ret
23266 CFI_ENDPROC
23267 ENDPROC(__get_user_1)
23268@@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
23269 ENTRY(__get_user_2)
23270 CFI_STARTPROC
23271 add $1,%_ASM_AX
23272+
23273+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23274 jc bad_get_user
23275 GET_THREAD_INFO(%_ASM_DX)
23276 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23277 jae bad_get_user
23278 ASM_STAC
23279-2: movzwl -1(%_ASM_AX),%edx
23280+
23281+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23282+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23283+ cmp %_ASM_DX,%_ASM_AX
23284+ jae 1234f
23285+ add %_ASM_DX,%_ASM_AX
23286+1234:
23287+#endif
23288+
23289+#endif
23290+
23291+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
23292 xor %eax,%eax
23293 ASM_CLAC
23294+ pax_force_retaddr
23295 ret
23296 CFI_ENDPROC
23297 ENDPROC(__get_user_2)
23298@@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
23299 ENTRY(__get_user_4)
23300 CFI_STARTPROC
23301 add $3,%_ASM_AX
23302+
23303+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23304 jc bad_get_user
23305 GET_THREAD_INFO(%_ASM_DX)
23306 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23307 jae bad_get_user
23308 ASM_STAC
23309-3: mov -3(%_ASM_AX),%edx
23310+
23311+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23312+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23313+ cmp %_ASM_DX,%_ASM_AX
23314+ jae 1234f
23315+ add %_ASM_DX,%_ASM_AX
23316+1234:
23317+#endif
23318+
23319+#endif
23320+
23321+3: __copyuser_seg mov -3(%_ASM_AX),%edx
23322 xor %eax,%eax
23323 ASM_CLAC
23324+ pax_force_retaddr
23325 ret
23326 CFI_ENDPROC
23327 ENDPROC(__get_user_4)
23328@@ -87,10 +138,20 @@ ENTRY(__get_user_8)
23329 GET_THREAD_INFO(%_ASM_DX)
23330 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23331 jae bad_get_user
23332+
23333+#ifdef CONFIG_PAX_MEMORY_UDEREF
23334+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23335+ cmp %_ASM_DX,%_ASM_AX
23336+ jae 1234f
23337+ add %_ASM_DX,%_ASM_AX
23338+1234:
23339+#endif
23340+
23341 ASM_STAC
23342 4: movq -7(%_ASM_AX),%_ASM_DX
23343 xor %eax,%eax
23344 ASM_CLAC
23345+ pax_force_retaddr
23346 ret
23347 CFI_ENDPROC
23348 ENDPROC(__get_user_8)
23349@@ -101,6 +162,7 @@ bad_get_user:
23350 xor %edx,%edx
23351 mov $(-EFAULT),%_ASM_AX
23352 ASM_CLAC
23353+ pax_force_retaddr
23354 ret
23355 CFI_ENDPROC
23356 END(bad_get_user)
23357diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
23358index 54fcffe..7be149e 100644
23359--- a/arch/x86/lib/insn.c
23360+++ b/arch/x86/lib/insn.c
23361@@ -20,8 +20,10 @@
23362
23363 #ifdef __KERNEL__
23364 #include <linux/string.h>
23365+#include <asm/pgtable_types.h>
23366 #else
23367 #include <string.h>
23368+#define ktla_ktva(addr) addr
23369 #endif
23370 #include <asm/inat.h>
23371 #include <asm/insn.h>
23372@@ -53,8 +55,8 @@
23373 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
23374 {
23375 memset(insn, 0, sizeof(*insn));
23376- insn->kaddr = kaddr;
23377- insn->next_byte = kaddr;
23378+ insn->kaddr = ktla_ktva(kaddr);
23379+ insn->next_byte = ktla_ktva(kaddr);
23380 insn->x86_64 = x86_64 ? 1 : 0;
23381 insn->opnd_bytes = 4;
23382 if (x86_64)
23383diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
23384index 05a95e7..326f2fa 100644
23385--- a/arch/x86/lib/iomap_copy_64.S
23386+++ b/arch/x86/lib/iomap_copy_64.S
23387@@ -17,6 +17,7 @@
23388
23389 #include <linux/linkage.h>
23390 #include <asm/dwarf2.h>
23391+#include <asm/alternative-asm.h>
23392
23393 /*
23394 * override generic version in lib/iomap_copy.c
23395@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
23396 CFI_STARTPROC
23397 movl %edx,%ecx
23398 rep movsd
23399+ pax_force_retaddr
23400 ret
23401 CFI_ENDPROC
23402 ENDPROC(__iowrite32_copy)
23403diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
23404index 1c273be..da9cc0e 100644
23405--- a/arch/x86/lib/memcpy_64.S
23406+++ b/arch/x86/lib/memcpy_64.S
23407@@ -33,6 +33,7 @@
23408 rep movsq
23409 movl %edx, %ecx
23410 rep movsb
23411+ pax_force_retaddr
23412 ret
23413 .Lmemcpy_e:
23414 .previous
23415@@ -49,6 +50,7 @@
23416 movq %rdi, %rax
23417 movq %rdx, %rcx
23418 rep movsb
23419+ pax_force_retaddr
23420 ret
23421 .Lmemcpy_e_e:
23422 .previous
23423@@ -76,13 +78,13 @@ ENTRY(memcpy)
23424 */
23425 movq 0*8(%rsi), %r8
23426 movq 1*8(%rsi), %r9
23427- movq 2*8(%rsi), %r10
23428+ movq 2*8(%rsi), %rcx
23429 movq 3*8(%rsi), %r11
23430 leaq 4*8(%rsi), %rsi
23431
23432 movq %r8, 0*8(%rdi)
23433 movq %r9, 1*8(%rdi)
23434- movq %r10, 2*8(%rdi)
23435+ movq %rcx, 2*8(%rdi)
23436 movq %r11, 3*8(%rdi)
23437 leaq 4*8(%rdi), %rdi
23438 jae .Lcopy_forward_loop
23439@@ -105,12 +107,12 @@ ENTRY(memcpy)
23440 subq $0x20, %rdx
23441 movq -1*8(%rsi), %r8
23442 movq -2*8(%rsi), %r9
23443- movq -3*8(%rsi), %r10
23444+ movq -3*8(%rsi), %rcx
23445 movq -4*8(%rsi), %r11
23446 leaq -4*8(%rsi), %rsi
23447 movq %r8, -1*8(%rdi)
23448 movq %r9, -2*8(%rdi)
23449- movq %r10, -3*8(%rdi)
23450+ movq %rcx, -3*8(%rdi)
23451 movq %r11, -4*8(%rdi)
23452 leaq -4*8(%rdi), %rdi
23453 jae .Lcopy_backward_loop
23454@@ -130,12 +132,13 @@ ENTRY(memcpy)
23455 */
23456 movq 0*8(%rsi), %r8
23457 movq 1*8(%rsi), %r9
23458- movq -2*8(%rsi, %rdx), %r10
23459+ movq -2*8(%rsi, %rdx), %rcx
23460 movq -1*8(%rsi, %rdx), %r11
23461 movq %r8, 0*8(%rdi)
23462 movq %r9, 1*8(%rdi)
23463- movq %r10, -2*8(%rdi, %rdx)
23464+ movq %rcx, -2*8(%rdi, %rdx)
23465 movq %r11, -1*8(%rdi, %rdx)
23466+ pax_force_retaddr
23467 retq
23468 .p2align 4
23469 .Lless_16bytes:
23470@@ -148,6 +151,7 @@ ENTRY(memcpy)
23471 movq -1*8(%rsi, %rdx), %r9
23472 movq %r8, 0*8(%rdi)
23473 movq %r9, -1*8(%rdi, %rdx)
23474+ pax_force_retaddr
23475 retq
23476 .p2align 4
23477 .Lless_8bytes:
23478@@ -161,6 +165,7 @@ ENTRY(memcpy)
23479 movl -4(%rsi, %rdx), %r8d
23480 movl %ecx, (%rdi)
23481 movl %r8d, -4(%rdi, %rdx)
23482+ pax_force_retaddr
23483 retq
23484 .p2align 4
23485 .Lless_3bytes:
23486@@ -179,6 +184,7 @@ ENTRY(memcpy)
23487 movb %cl, (%rdi)
23488
23489 .Lend:
23490+ pax_force_retaddr
23491 retq
23492 CFI_ENDPROC
23493 ENDPROC(memcpy)
23494diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
23495index ee16461..c39c199 100644
23496--- a/arch/x86/lib/memmove_64.S
23497+++ b/arch/x86/lib/memmove_64.S
23498@@ -61,13 +61,13 @@ ENTRY(memmove)
23499 5:
23500 sub $0x20, %rdx
23501 movq 0*8(%rsi), %r11
23502- movq 1*8(%rsi), %r10
23503+ movq 1*8(%rsi), %rcx
23504 movq 2*8(%rsi), %r9
23505 movq 3*8(%rsi), %r8
23506 leaq 4*8(%rsi), %rsi
23507
23508 movq %r11, 0*8(%rdi)
23509- movq %r10, 1*8(%rdi)
23510+ movq %rcx, 1*8(%rdi)
23511 movq %r9, 2*8(%rdi)
23512 movq %r8, 3*8(%rdi)
23513 leaq 4*8(%rdi), %rdi
23514@@ -81,10 +81,10 @@ ENTRY(memmove)
23515 4:
23516 movq %rdx, %rcx
23517 movq -8(%rsi, %rdx), %r11
23518- lea -8(%rdi, %rdx), %r10
23519+ lea -8(%rdi, %rdx), %r9
23520 shrq $3, %rcx
23521 rep movsq
23522- movq %r11, (%r10)
23523+ movq %r11, (%r9)
23524 jmp 13f
23525 .Lmemmove_end_forward:
23526
23527@@ -95,14 +95,14 @@ ENTRY(memmove)
23528 7:
23529 movq %rdx, %rcx
23530 movq (%rsi), %r11
23531- movq %rdi, %r10
23532+ movq %rdi, %r9
23533 leaq -8(%rsi, %rdx), %rsi
23534 leaq -8(%rdi, %rdx), %rdi
23535 shrq $3, %rcx
23536 std
23537 rep movsq
23538 cld
23539- movq %r11, (%r10)
23540+ movq %r11, (%r9)
23541 jmp 13f
23542
23543 /*
23544@@ -127,13 +127,13 @@ ENTRY(memmove)
23545 8:
23546 subq $0x20, %rdx
23547 movq -1*8(%rsi), %r11
23548- movq -2*8(%rsi), %r10
23549+ movq -2*8(%rsi), %rcx
23550 movq -3*8(%rsi), %r9
23551 movq -4*8(%rsi), %r8
23552 leaq -4*8(%rsi), %rsi
23553
23554 movq %r11, -1*8(%rdi)
23555- movq %r10, -2*8(%rdi)
23556+ movq %rcx, -2*8(%rdi)
23557 movq %r9, -3*8(%rdi)
23558 movq %r8, -4*8(%rdi)
23559 leaq -4*8(%rdi), %rdi
23560@@ -151,11 +151,11 @@ ENTRY(memmove)
23561 * Move data from 16 bytes to 31 bytes.
23562 */
23563 movq 0*8(%rsi), %r11
23564- movq 1*8(%rsi), %r10
23565+ movq 1*8(%rsi), %rcx
23566 movq -2*8(%rsi, %rdx), %r9
23567 movq -1*8(%rsi, %rdx), %r8
23568 movq %r11, 0*8(%rdi)
23569- movq %r10, 1*8(%rdi)
23570+ movq %rcx, 1*8(%rdi)
23571 movq %r9, -2*8(%rdi, %rdx)
23572 movq %r8, -1*8(%rdi, %rdx)
23573 jmp 13f
23574@@ -167,9 +167,9 @@ ENTRY(memmove)
23575 * Move data from 8 bytes to 15 bytes.
23576 */
23577 movq 0*8(%rsi), %r11
23578- movq -1*8(%rsi, %rdx), %r10
23579+ movq -1*8(%rsi, %rdx), %r9
23580 movq %r11, 0*8(%rdi)
23581- movq %r10, -1*8(%rdi, %rdx)
23582+ movq %r9, -1*8(%rdi, %rdx)
23583 jmp 13f
23584 10:
23585 cmpq $4, %rdx
23586@@ -178,9 +178,9 @@ ENTRY(memmove)
23587 * Move data from 4 bytes to 7 bytes.
23588 */
23589 movl (%rsi), %r11d
23590- movl -4(%rsi, %rdx), %r10d
23591+ movl -4(%rsi, %rdx), %r9d
23592 movl %r11d, (%rdi)
23593- movl %r10d, -4(%rdi, %rdx)
23594+ movl %r9d, -4(%rdi, %rdx)
23595 jmp 13f
23596 11:
23597 cmp $2, %rdx
23598@@ -189,9 +189,9 @@ ENTRY(memmove)
23599 * Move data from 2 bytes to 3 bytes.
23600 */
23601 movw (%rsi), %r11w
23602- movw -2(%rsi, %rdx), %r10w
23603+ movw -2(%rsi, %rdx), %r9w
23604 movw %r11w, (%rdi)
23605- movw %r10w, -2(%rdi, %rdx)
23606+ movw %r9w, -2(%rdi, %rdx)
23607 jmp 13f
23608 12:
23609 cmp $1, %rdx
23610@@ -202,6 +202,7 @@ ENTRY(memmove)
23611 movb (%rsi), %r11b
23612 movb %r11b, (%rdi)
23613 13:
23614+ pax_force_retaddr
23615 retq
23616 CFI_ENDPROC
23617
23618@@ -210,6 +211,7 @@ ENTRY(memmove)
23619 /* Forward moving data. */
23620 movq %rdx, %rcx
23621 rep movsb
23622+ pax_force_retaddr
23623 retq
23624 .Lmemmove_end_forward_efs:
23625 .previous
23626diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
23627index 2dcb380..963660a 100644
23628--- a/arch/x86/lib/memset_64.S
23629+++ b/arch/x86/lib/memset_64.S
23630@@ -30,6 +30,7 @@
23631 movl %edx,%ecx
23632 rep stosb
23633 movq %r9,%rax
23634+ pax_force_retaddr
23635 ret
23636 .Lmemset_e:
23637 .previous
23638@@ -52,6 +53,7 @@
23639 movq %rdx,%rcx
23640 rep stosb
23641 movq %r9,%rax
23642+ pax_force_retaddr
23643 ret
23644 .Lmemset_e_e:
23645 .previous
23646@@ -59,7 +61,7 @@
23647 ENTRY(memset)
23648 ENTRY(__memset)
23649 CFI_STARTPROC
23650- movq %rdi,%r10
23651+ movq %rdi,%r11
23652
23653 /* expand byte value */
23654 movzbl %sil,%ecx
23655@@ -117,7 +119,8 @@ ENTRY(__memset)
23656 jnz .Lloop_1
23657
23658 .Lende:
23659- movq %r10,%rax
23660+ movq %r11,%rax
23661+ pax_force_retaddr
23662 ret
23663
23664 CFI_RESTORE_STATE
23665diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
23666index c9f2d9b..e7fd2c0 100644
23667--- a/arch/x86/lib/mmx_32.c
23668+++ b/arch/x86/lib/mmx_32.c
23669@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23670 {
23671 void *p;
23672 int i;
23673+ unsigned long cr0;
23674
23675 if (unlikely(in_interrupt()))
23676 return __memcpy(to, from, len);
23677@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23678 kernel_fpu_begin();
23679
23680 __asm__ __volatile__ (
23681- "1: prefetch (%0)\n" /* This set is 28 bytes */
23682- " prefetch 64(%0)\n"
23683- " prefetch 128(%0)\n"
23684- " prefetch 192(%0)\n"
23685- " prefetch 256(%0)\n"
23686+ "1: prefetch (%1)\n" /* This set is 28 bytes */
23687+ " prefetch 64(%1)\n"
23688+ " prefetch 128(%1)\n"
23689+ " prefetch 192(%1)\n"
23690+ " prefetch 256(%1)\n"
23691 "2: \n"
23692 ".section .fixup, \"ax\"\n"
23693- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23694+ "3: \n"
23695+
23696+#ifdef CONFIG_PAX_KERNEXEC
23697+ " movl %%cr0, %0\n"
23698+ " movl %0, %%eax\n"
23699+ " andl $0xFFFEFFFF, %%eax\n"
23700+ " movl %%eax, %%cr0\n"
23701+#endif
23702+
23703+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23704+
23705+#ifdef CONFIG_PAX_KERNEXEC
23706+ " movl %0, %%cr0\n"
23707+#endif
23708+
23709 " jmp 2b\n"
23710 ".previous\n"
23711 _ASM_EXTABLE(1b, 3b)
23712- : : "r" (from));
23713+ : "=&r" (cr0) : "r" (from) : "ax");
23714
23715 for ( ; i > 5; i--) {
23716 __asm__ __volatile__ (
23717- "1: prefetch 320(%0)\n"
23718- "2: movq (%0), %%mm0\n"
23719- " movq 8(%0), %%mm1\n"
23720- " movq 16(%0), %%mm2\n"
23721- " movq 24(%0), %%mm3\n"
23722- " movq %%mm0, (%1)\n"
23723- " movq %%mm1, 8(%1)\n"
23724- " movq %%mm2, 16(%1)\n"
23725- " movq %%mm3, 24(%1)\n"
23726- " movq 32(%0), %%mm0\n"
23727- " movq 40(%0), %%mm1\n"
23728- " movq 48(%0), %%mm2\n"
23729- " movq 56(%0), %%mm3\n"
23730- " movq %%mm0, 32(%1)\n"
23731- " movq %%mm1, 40(%1)\n"
23732- " movq %%mm2, 48(%1)\n"
23733- " movq %%mm3, 56(%1)\n"
23734+ "1: prefetch 320(%1)\n"
23735+ "2: movq (%1), %%mm0\n"
23736+ " movq 8(%1), %%mm1\n"
23737+ " movq 16(%1), %%mm2\n"
23738+ " movq 24(%1), %%mm3\n"
23739+ " movq %%mm0, (%2)\n"
23740+ " movq %%mm1, 8(%2)\n"
23741+ " movq %%mm2, 16(%2)\n"
23742+ " movq %%mm3, 24(%2)\n"
23743+ " movq 32(%1), %%mm0\n"
23744+ " movq 40(%1), %%mm1\n"
23745+ " movq 48(%1), %%mm2\n"
23746+ " movq 56(%1), %%mm3\n"
23747+ " movq %%mm0, 32(%2)\n"
23748+ " movq %%mm1, 40(%2)\n"
23749+ " movq %%mm2, 48(%2)\n"
23750+ " movq %%mm3, 56(%2)\n"
23751 ".section .fixup, \"ax\"\n"
23752- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23753+ "3:\n"
23754+
23755+#ifdef CONFIG_PAX_KERNEXEC
23756+ " movl %%cr0, %0\n"
23757+ " movl %0, %%eax\n"
23758+ " andl $0xFFFEFFFF, %%eax\n"
23759+ " movl %%eax, %%cr0\n"
23760+#endif
23761+
23762+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23763+
23764+#ifdef CONFIG_PAX_KERNEXEC
23765+ " movl %0, %%cr0\n"
23766+#endif
23767+
23768 " jmp 2b\n"
23769 ".previous\n"
23770 _ASM_EXTABLE(1b, 3b)
23771- : : "r" (from), "r" (to) : "memory");
23772+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23773
23774 from += 64;
23775 to += 64;
23776@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
23777 static void fast_copy_page(void *to, void *from)
23778 {
23779 int i;
23780+ unsigned long cr0;
23781
23782 kernel_fpu_begin();
23783
23784@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
23785 * but that is for later. -AV
23786 */
23787 __asm__ __volatile__(
23788- "1: prefetch (%0)\n"
23789- " prefetch 64(%0)\n"
23790- " prefetch 128(%0)\n"
23791- " prefetch 192(%0)\n"
23792- " prefetch 256(%0)\n"
23793+ "1: prefetch (%1)\n"
23794+ " prefetch 64(%1)\n"
23795+ " prefetch 128(%1)\n"
23796+ " prefetch 192(%1)\n"
23797+ " prefetch 256(%1)\n"
23798 "2: \n"
23799 ".section .fixup, \"ax\"\n"
23800- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23801+ "3: \n"
23802+
23803+#ifdef CONFIG_PAX_KERNEXEC
23804+ " movl %%cr0, %0\n"
23805+ " movl %0, %%eax\n"
23806+ " andl $0xFFFEFFFF, %%eax\n"
23807+ " movl %%eax, %%cr0\n"
23808+#endif
23809+
23810+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23811+
23812+#ifdef CONFIG_PAX_KERNEXEC
23813+ " movl %0, %%cr0\n"
23814+#endif
23815+
23816 " jmp 2b\n"
23817 ".previous\n"
23818- _ASM_EXTABLE(1b, 3b) : : "r" (from));
23819+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23820
23821 for (i = 0; i < (4096-320)/64; i++) {
23822 __asm__ __volatile__ (
23823- "1: prefetch 320(%0)\n"
23824- "2: movq (%0), %%mm0\n"
23825- " movntq %%mm0, (%1)\n"
23826- " movq 8(%0), %%mm1\n"
23827- " movntq %%mm1, 8(%1)\n"
23828- " movq 16(%0), %%mm2\n"
23829- " movntq %%mm2, 16(%1)\n"
23830- " movq 24(%0), %%mm3\n"
23831- " movntq %%mm3, 24(%1)\n"
23832- " movq 32(%0), %%mm4\n"
23833- " movntq %%mm4, 32(%1)\n"
23834- " movq 40(%0), %%mm5\n"
23835- " movntq %%mm5, 40(%1)\n"
23836- " movq 48(%0), %%mm6\n"
23837- " movntq %%mm6, 48(%1)\n"
23838- " movq 56(%0), %%mm7\n"
23839- " movntq %%mm7, 56(%1)\n"
23840+ "1: prefetch 320(%1)\n"
23841+ "2: movq (%1), %%mm0\n"
23842+ " movntq %%mm0, (%2)\n"
23843+ " movq 8(%1), %%mm1\n"
23844+ " movntq %%mm1, 8(%2)\n"
23845+ " movq 16(%1), %%mm2\n"
23846+ " movntq %%mm2, 16(%2)\n"
23847+ " movq 24(%1), %%mm3\n"
23848+ " movntq %%mm3, 24(%2)\n"
23849+ " movq 32(%1), %%mm4\n"
23850+ " movntq %%mm4, 32(%2)\n"
23851+ " movq 40(%1), %%mm5\n"
23852+ " movntq %%mm5, 40(%2)\n"
23853+ " movq 48(%1), %%mm6\n"
23854+ " movntq %%mm6, 48(%2)\n"
23855+ " movq 56(%1), %%mm7\n"
23856+ " movntq %%mm7, 56(%2)\n"
23857 ".section .fixup, \"ax\"\n"
23858- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23859+ "3:\n"
23860+
23861+#ifdef CONFIG_PAX_KERNEXEC
23862+ " movl %%cr0, %0\n"
23863+ " movl %0, %%eax\n"
23864+ " andl $0xFFFEFFFF, %%eax\n"
23865+ " movl %%eax, %%cr0\n"
23866+#endif
23867+
23868+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23869+
23870+#ifdef CONFIG_PAX_KERNEXEC
23871+ " movl %0, %%cr0\n"
23872+#endif
23873+
23874 " jmp 2b\n"
23875 ".previous\n"
23876- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
23877+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23878
23879 from += 64;
23880 to += 64;
23881@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
23882 static void fast_copy_page(void *to, void *from)
23883 {
23884 int i;
23885+ unsigned long cr0;
23886
23887 kernel_fpu_begin();
23888
23889 __asm__ __volatile__ (
23890- "1: prefetch (%0)\n"
23891- " prefetch 64(%0)\n"
23892- " prefetch 128(%0)\n"
23893- " prefetch 192(%0)\n"
23894- " prefetch 256(%0)\n"
23895+ "1: prefetch (%1)\n"
23896+ " prefetch 64(%1)\n"
23897+ " prefetch 128(%1)\n"
23898+ " prefetch 192(%1)\n"
23899+ " prefetch 256(%1)\n"
23900 "2: \n"
23901 ".section .fixup, \"ax\"\n"
23902- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23903+ "3: \n"
23904+
23905+#ifdef CONFIG_PAX_KERNEXEC
23906+ " movl %%cr0, %0\n"
23907+ " movl %0, %%eax\n"
23908+ " andl $0xFFFEFFFF, %%eax\n"
23909+ " movl %%eax, %%cr0\n"
23910+#endif
23911+
23912+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23913+
23914+#ifdef CONFIG_PAX_KERNEXEC
23915+ " movl %0, %%cr0\n"
23916+#endif
23917+
23918 " jmp 2b\n"
23919 ".previous\n"
23920- _ASM_EXTABLE(1b, 3b) : : "r" (from));
23921+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23922
23923 for (i = 0; i < 4096/64; i++) {
23924 __asm__ __volatile__ (
23925- "1: prefetch 320(%0)\n"
23926- "2: movq (%0), %%mm0\n"
23927- " movq 8(%0), %%mm1\n"
23928- " movq 16(%0), %%mm2\n"
23929- " movq 24(%0), %%mm3\n"
23930- " movq %%mm0, (%1)\n"
23931- " movq %%mm1, 8(%1)\n"
23932- " movq %%mm2, 16(%1)\n"
23933- " movq %%mm3, 24(%1)\n"
23934- " movq 32(%0), %%mm0\n"
23935- " movq 40(%0), %%mm1\n"
23936- " movq 48(%0), %%mm2\n"
23937- " movq 56(%0), %%mm3\n"
23938- " movq %%mm0, 32(%1)\n"
23939- " movq %%mm1, 40(%1)\n"
23940- " movq %%mm2, 48(%1)\n"
23941- " movq %%mm3, 56(%1)\n"
23942+ "1: prefetch 320(%1)\n"
23943+ "2: movq (%1), %%mm0\n"
23944+ " movq 8(%1), %%mm1\n"
23945+ " movq 16(%1), %%mm2\n"
23946+ " movq 24(%1), %%mm3\n"
23947+ " movq %%mm0, (%2)\n"
23948+ " movq %%mm1, 8(%2)\n"
23949+ " movq %%mm2, 16(%2)\n"
23950+ " movq %%mm3, 24(%2)\n"
23951+ " movq 32(%1), %%mm0\n"
23952+ " movq 40(%1), %%mm1\n"
23953+ " movq 48(%1), %%mm2\n"
23954+ " movq 56(%1), %%mm3\n"
23955+ " movq %%mm0, 32(%2)\n"
23956+ " movq %%mm1, 40(%2)\n"
23957+ " movq %%mm2, 48(%2)\n"
23958+ " movq %%mm3, 56(%2)\n"
23959 ".section .fixup, \"ax\"\n"
23960- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23961+ "3:\n"
23962+
23963+#ifdef CONFIG_PAX_KERNEXEC
23964+ " movl %%cr0, %0\n"
23965+ " movl %0, %%eax\n"
23966+ " andl $0xFFFEFFFF, %%eax\n"
23967+ " movl %%eax, %%cr0\n"
23968+#endif
23969+
23970+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23971+
23972+#ifdef CONFIG_PAX_KERNEXEC
23973+ " movl %0, %%cr0\n"
23974+#endif
23975+
23976 " jmp 2b\n"
23977 ".previous\n"
23978 _ASM_EXTABLE(1b, 3b)
23979- : : "r" (from), "r" (to) : "memory");
23980+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23981
23982 from += 64;
23983 to += 64;
23984diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
23985index f6d13ee..aca5f0b 100644
23986--- a/arch/x86/lib/msr-reg.S
23987+++ b/arch/x86/lib/msr-reg.S
23988@@ -3,6 +3,7 @@
23989 #include <asm/dwarf2.h>
23990 #include <asm/asm.h>
23991 #include <asm/msr.h>
23992+#include <asm/alternative-asm.h>
23993
23994 #ifdef CONFIG_X86_64
23995 /*
23996@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
23997 CFI_STARTPROC
23998 pushq_cfi %rbx
23999 pushq_cfi %rbp
24000- movq %rdi, %r10 /* Save pointer */
24001+ movq %rdi, %r9 /* Save pointer */
24002 xorl %r11d, %r11d /* Return value */
24003 movl (%rdi), %eax
24004 movl 4(%rdi), %ecx
24005@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
24006 movl 28(%rdi), %edi
24007 CFI_REMEMBER_STATE
24008 1: \op
24009-2: movl %eax, (%r10)
24010+2: movl %eax, (%r9)
24011 movl %r11d, %eax /* Return value */
24012- movl %ecx, 4(%r10)
24013- movl %edx, 8(%r10)
24014- movl %ebx, 12(%r10)
24015- movl %ebp, 20(%r10)
24016- movl %esi, 24(%r10)
24017- movl %edi, 28(%r10)
24018+ movl %ecx, 4(%r9)
24019+ movl %edx, 8(%r9)
24020+ movl %ebx, 12(%r9)
24021+ movl %ebp, 20(%r9)
24022+ movl %esi, 24(%r9)
24023+ movl %edi, 28(%r9)
24024 popq_cfi %rbp
24025 popq_cfi %rbx
24026+ pax_force_retaddr
24027 ret
24028 3:
24029 CFI_RESTORE_STATE
24030diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
24031index fc6ba17..04471c5 100644
24032--- a/arch/x86/lib/putuser.S
24033+++ b/arch/x86/lib/putuser.S
24034@@ -16,7 +16,9 @@
24035 #include <asm/errno.h>
24036 #include <asm/asm.h>
24037 #include <asm/smap.h>
24038-
24039+#include <asm/segment.h>
24040+#include <asm/pgtable.h>
24041+#include <asm/alternative-asm.h>
24042
24043 /*
24044 * __put_user_X
24045@@ -30,57 +32,125 @@
24046 * as they get called from within inline assembly.
24047 */
24048
24049-#define ENTER CFI_STARTPROC ; \
24050- GET_THREAD_INFO(%_ASM_BX)
24051-#define EXIT ASM_CLAC ; \
24052- ret ; \
24053+#define ENTER CFI_STARTPROC
24054+#define EXIT ASM_CLAC ; \
24055+ pax_force_retaddr ; \
24056+ ret ; \
24057 CFI_ENDPROC
24058
24059+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24060+#define _DEST %_ASM_CX,%_ASM_BX
24061+#else
24062+#define _DEST %_ASM_CX
24063+#endif
24064+
24065+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24066+#define __copyuser_seg gs;
24067+#else
24068+#define __copyuser_seg
24069+#endif
24070+
24071 .text
24072 ENTRY(__put_user_1)
24073 ENTER
24074+
24075+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24076+ GET_THREAD_INFO(%_ASM_BX)
24077 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
24078 jae bad_put_user
24079 ASM_STAC
24080-1: movb %al,(%_ASM_CX)
24081+
24082+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24083+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24084+ cmp %_ASM_BX,%_ASM_CX
24085+ jb 1234f
24086+ xor %ebx,%ebx
24087+1234:
24088+#endif
24089+
24090+#endif
24091+
24092+1: __copyuser_seg movb %al,(_DEST)
24093 xor %eax,%eax
24094 EXIT
24095 ENDPROC(__put_user_1)
24096
24097 ENTRY(__put_user_2)
24098 ENTER
24099+
24100+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24101+ GET_THREAD_INFO(%_ASM_BX)
24102 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24103 sub $1,%_ASM_BX
24104 cmp %_ASM_BX,%_ASM_CX
24105 jae bad_put_user
24106 ASM_STAC
24107-2: movw %ax,(%_ASM_CX)
24108+
24109+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24110+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24111+ cmp %_ASM_BX,%_ASM_CX
24112+ jb 1234f
24113+ xor %ebx,%ebx
24114+1234:
24115+#endif
24116+
24117+#endif
24118+
24119+2: __copyuser_seg movw %ax,(_DEST)
24120 xor %eax,%eax
24121 EXIT
24122 ENDPROC(__put_user_2)
24123
24124 ENTRY(__put_user_4)
24125 ENTER
24126+
24127+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24128+ GET_THREAD_INFO(%_ASM_BX)
24129 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24130 sub $3,%_ASM_BX
24131 cmp %_ASM_BX,%_ASM_CX
24132 jae bad_put_user
24133 ASM_STAC
24134-3: movl %eax,(%_ASM_CX)
24135+
24136+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24137+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24138+ cmp %_ASM_BX,%_ASM_CX
24139+ jb 1234f
24140+ xor %ebx,%ebx
24141+1234:
24142+#endif
24143+
24144+#endif
24145+
24146+3: __copyuser_seg movl %eax,(_DEST)
24147 xor %eax,%eax
24148 EXIT
24149 ENDPROC(__put_user_4)
24150
24151 ENTRY(__put_user_8)
24152 ENTER
24153+
24154+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24155+ GET_THREAD_INFO(%_ASM_BX)
24156 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24157 sub $7,%_ASM_BX
24158 cmp %_ASM_BX,%_ASM_CX
24159 jae bad_put_user
24160 ASM_STAC
24161-4: mov %_ASM_AX,(%_ASM_CX)
24162+
24163+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24164+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24165+ cmp %_ASM_BX,%_ASM_CX
24166+ jb 1234f
24167+ xor %ebx,%ebx
24168+1234:
24169+#endif
24170+
24171+#endif
24172+
24173+4: __copyuser_seg mov %_ASM_AX,(_DEST)
24174 #ifdef CONFIG_X86_32
24175-5: movl %edx,4(%_ASM_CX)
24176+5: __copyuser_seg movl %edx,4(_DEST)
24177 #endif
24178 xor %eax,%eax
24179 EXIT
24180diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
24181index 1cad221..de671ee 100644
24182--- a/arch/x86/lib/rwlock.S
24183+++ b/arch/x86/lib/rwlock.S
24184@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
24185 FRAME
24186 0: LOCK_PREFIX
24187 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
24188+
24189+#ifdef CONFIG_PAX_REFCOUNT
24190+ jno 1234f
24191+ LOCK_PREFIX
24192+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
24193+ int $4
24194+1234:
24195+ _ASM_EXTABLE(1234b, 1234b)
24196+#endif
24197+
24198 1: rep; nop
24199 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
24200 jne 1b
24201 LOCK_PREFIX
24202 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
24203+
24204+#ifdef CONFIG_PAX_REFCOUNT
24205+ jno 1234f
24206+ LOCK_PREFIX
24207+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
24208+ int $4
24209+1234:
24210+ _ASM_EXTABLE(1234b, 1234b)
24211+#endif
24212+
24213 jnz 0b
24214 ENDFRAME
24215+ pax_force_retaddr
24216 ret
24217 CFI_ENDPROC
24218 END(__write_lock_failed)
24219@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
24220 FRAME
24221 0: LOCK_PREFIX
24222 READ_LOCK_SIZE(inc) (%__lock_ptr)
24223+
24224+#ifdef CONFIG_PAX_REFCOUNT
24225+ jno 1234f
24226+ LOCK_PREFIX
24227+ READ_LOCK_SIZE(dec) (%__lock_ptr)
24228+ int $4
24229+1234:
24230+ _ASM_EXTABLE(1234b, 1234b)
24231+#endif
24232+
24233 1: rep; nop
24234 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
24235 js 1b
24236 LOCK_PREFIX
24237 READ_LOCK_SIZE(dec) (%__lock_ptr)
24238+
24239+#ifdef CONFIG_PAX_REFCOUNT
24240+ jno 1234f
24241+ LOCK_PREFIX
24242+ READ_LOCK_SIZE(inc) (%__lock_ptr)
24243+ int $4
24244+1234:
24245+ _ASM_EXTABLE(1234b, 1234b)
24246+#endif
24247+
24248 js 0b
24249 ENDFRAME
24250+ pax_force_retaddr
24251 ret
24252 CFI_ENDPROC
24253 END(__read_lock_failed)
24254diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
24255index 5dff5f0..cadebf4 100644
24256--- a/arch/x86/lib/rwsem.S
24257+++ b/arch/x86/lib/rwsem.S
24258@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
24259 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
24260 CFI_RESTORE __ASM_REG(dx)
24261 restore_common_regs
24262+ pax_force_retaddr
24263 ret
24264 CFI_ENDPROC
24265 ENDPROC(call_rwsem_down_read_failed)
24266@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
24267 movq %rax,%rdi
24268 call rwsem_down_write_failed
24269 restore_common_regs
24270+ pax_force_retaddr
24271 ret
24272 CFI_ENDPROC
24273 ENDPROC(call_rwsem_down_write_failed)
24274@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
24275 movq %rax,%rdi
24276 call rwsem_wake
24277 restore_common_regs
24278-1: ret
24279+1: pax_force_retaddr
24280+ ret
24281 CFI_ENDPROC
24282 ENDPROC(call_rwsem_wake)
24283
24284@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
24285 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
24286 CFI_RESTORE __ASM_REG(dx)
24287 restore_common_regs
24288+ pax_force_retaddr
24289 ret
24290 CFI_ENDPROC
24291 ENDPROC(call_rwsem_downgrade_wake)
24292diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
24293index a63efd6..ccecad8 100644
24294--- a/arch/x86/lib/thunk_64.S
24295+++ b/arch/x86/lib/thunk_64.S
24296@@ -8,6 +8,7 @@
24297 #include <linux/linkage.h>
24298 #include <asm/dwarf2.h>
24299 #include <asm/calling.h>
24300+#include <asm/alternative-asm.h>
24301
24302 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
24303 .macro THUNK name, func, put_ret_addr_in_rdi=0
24304@@ -41,5 +42,6 @@
24305 SAVE_ARGS
24306 restore:
24307 RESTORE_ARGS
24308+ pax_force_retaddr
24309 ret
24310 CFI_ENDPROC
24311diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
24312index 98f6d6b6..d27f045 100644
24313--- a/arch/x86/lib/usercopy_32.c
24314+++ b/arch/x86/lib/usercopy_32.c
24315@@ -42,11 +42,13 @@ do { \
24316 int __d0; \
24317 might_fault(); \
24318 __asm__ __volatile__( \
24319+ __COPYUSER_SET_ES \
24320 ASM_STAC "\n" \
24321 "0: rep; stosl\n" \
24322 " movl %2,%0\n" \
24323 "1: rep; stosb\n" \
24324 "2: " ASM_CLAC "\n" \
24325+ __COPYUSER_RESTORE_ES \
24326 ".section .fixup,\"ax\"\n" \
24327 "3: lea 0(%2,%0,4),%0\n" \
24328 " jmp 2b\n" \
24329@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
24330
24331 #ifdef CONFIG_X86_INTEL_USERCOPY
24332 static unsigned long
24333-__copy_user_intel(void __user *to, const void *from, unsigned long size)
24334+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
24335 {
24336 int d0, d1;
24337 __asm__ __volatile__(
24338@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24339 " .align 2,0x90\n"
24340 "3: movl 0(%4), %%eax\n"
24341 "4: movl 4(%4), %%edx\n"
24342- "5: movl %%eax, 0(%3)\n"
24343- "6: movl %%edx, 4(%3)\n"
24344+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
24345+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
24346 "7: movl 8(%4), %%eax\n"
24347 "8: movl 12(%4),%%edx\n"
24348- "9: movl %%eax, 8(%3)\n"
24349- "10: movl %%edx, 12(%3)\n"
24350+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
24351+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
24352 "11: movl 16(%4), %%eax\n"
24353 "12: movl 20(%4), %%edx\n"
24354- "13: movl %%eax, 16(%3)\n"
24355- "14: movl %%edx, 20(%3)\n"
24356+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
24357+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
24358 "15: movl 24(%4), %%eax\n"
24359 "16: movl 28(%4), %%edx\n"
24360- "17: movl %%eax, 24(%3)\n"
24361- "18: movl %%edx, 28(%3)\n"
24362+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
24363+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
24364 "19: movl 32(%4), %%eax\n"
24365 "20: movl 36(%4), %%edx\n"
24366- "21: movl %%eax, 32(%3)\n"
24367- "22: movl %%edx, 36(%3)\n"
24368+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
24369+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
24370 "23: movl 40(%4), %%eax\n"
24371 "24: movl 44(%4), %%edx\n"
24372- "25: movl %%eax, 40(%3)\n"
24373- "26: movl %%edx, 44(%3)\n"
24374+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
24375+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
24376 "27: movl 48(%4), %%eax\n"
24377 "28: movl 52(%4), %%edx\n"
24378- "29: movl %%eax, 48(%3)\n"
24379- "30: movl %%edx, 52(%3)\n"
24380+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
24381+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
24382 "31: movl 56(%4), %%eax\n"
24383 "32: movl 60(%4), %%edx\n"
24384- "33: movl %%eax, 56(%3)\n"
24385- "34: movl %%edx, 60(%3)\n"
24386+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
24387+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
24388 " addl $-64, %0\n"
24389 " addl $64, %4\n"
24390 " addl $64, %3\n"
24391@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24392 " shrl $2, %0\n"
24393 " andl $3, %%eax\n"
24394 " cld\n"
24395+ __COPYUSER_SET_ES
24396 "99: rep; movsl\n"
24397 "36: movl %%eax, %0\n"
24398 "37: rep; movsb\n"
24399 "100:\n"
24400+ __COPYUSER_RESTORE_ES
24401 ".section .fixup,\"ax\"\n"
24402 "101: lea 0(%%eax,%0,4),%0\n"
24403 " jmp 100b\n"
24404@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24405 }
24406
24407 static unsigned long
24408+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
24409+{
24410+ int d0, d1;
24411+ __asm__ __volatile__(
24412+ " .align 2,0x90\n"
24413+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
24414+ " cmpl $67, %0\n"
24415+ " jbe 3f\n"
24416+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
24417+ " .align 2,0x90\n"
24418+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
24419+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
24420+ "5: movl %%eax, 0(%3)\n"
24421+ "6: movl %%edx, 4(%3)\n"
24422+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
24423+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
24424+ "9: movl %%eax, 8(%3)\n"
24425+ "10: movl %%edx, 12(%3)\n"
24426+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
24427+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
24428+ "13: movl %%eax, 16(%3)\n"
24429+ "14: movl %%edx, 20(%3)\n"
24430+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
24431+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
24432+ "17: movl %%eax, 24(%3)\n"
24433+ "18: movl %%edx, 28(%3)\n"
24434+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
24435+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
24436+ "21: movl %%eax, 32(%3)\n"
24437+ "22: movl %%edx, 36(%3)\n"
24438+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
24439+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
24440+ "25: movl %%eax, 40(%3)\n"
24441+ "26: movl %%edx, 44(%3)\n"
24442+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
24443+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
24444+ "29: movl %%eax, 48(%3)\n"
24445+ "30: movl %%edx, 52(%3)\n"
24446+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
24447+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
24448+ "33: movl %%eax, 56(%3)\n"
24449+ "34: movl %%edx, 60(%3)\n"
24450+ " addl $-64, %0\n"
24451+ " addl $64, %4\n"
24452+ " addl $64, %3\n"
24453+ " cmpl $63, %0\n"
24454+ " ja 1b\n"
24455+ "35: movl %0, %%eax\n"
24456+ " shrl $2, %0\n"
24457+ " andl $3, %%eax\n"
24458+ " cld\n"
24459+ "99: rep; "__copyuser_seg" movsl\n"
24460+ "36: movl %%eax, %0\n"
24461+ "37: rep; "__copyuser_seg" movsb\n"
24462+ "100:\n"
24463+ ".section .fixup,\"ax\"\n"
24464+ "101: lea 0(%%eax,%0,4),%0\n"
24465+ " jmp 100b\n"
24466+ ".previous\n"
24467+ _ASM_EXTABLE(1b,100b)
24468+ _ASM_EXTABLE(2b,100b)
24469+ _ASM_EXTABLE(3b,100b)
24470+ _ASM_EXTABLE(4b,100b)
24471+ _ASM_EXTABLE(5b,100b)
24472+ _ASM_EXTABLE(6b,100b)
24473+ _ASM_EXTABLE(7b,100b)
24474+ _ASM_EXTABLE(8b,100b)
24475+ _ASM_EXTABLE(9b,100b)
24476+ _ASM_EXTABLE(10b,100b)
24477+ _ASM_EXTABLE(11b,100b)
24478+ _ASM_EXTABLE(12b,100b)
24479+ _ASM_EXTABLE(13b,100b)
24480+ _ASM_EXTABLE(14b,100b)
24481+ _ASM_EXTABLE(15b,100b)
24482+ _ASM_EXTABLE(16b,100b)
24483+ _ASM_EXTABLE(17b,100b)
24484+ _ASM_EXTABLE(18b,100b)
24485+ _ASM_EXTABLE(19b,100b)
24486+ _ASM_EXTABLE(20b,100b)
24487+ _ASM_EXTABLE(21b,100b)
24488+ _ASM_EXTABLE(22b,100b)
24489+ _ASM_EXTABLE(23b,100b)
24490+ _ASM_EXTABLE(24b,100b)
24491+ _ASM_EXTABLE(25b,100b)
24492+ _ASM_EXTABLE(26b,100b)
24493+ _ASM_EXTABLE(27b,100b)
24494+ _ASM_EXTABLE(28b,100b)
24495+ _ASM_EXTABLE(29b,100b)
24496+ _ASM_EXTABLE(30b,100b)
24497+ _ASM_EXTABLE(31b,100b)
24498+ _ASM_EXTABLE(32b,100b)
24499+ _ASM_EXTABLE(33b,100b)
24500+ _ASM_EXTABLE(34b,100b)
24501+ _ASM_EXTABLE(35b,100b)
24502+ _ASM_EXTABLE(36b,100b)
24503+ _ASM_EXTABLE(37b,100b)
24504+ _ASM_EXTABLE(99b,101b)
24505+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
24506+ : "1"(to), "2"(from), "0"(size)
24507+ : "eax", "edx", "memory");
24508+ return size;
24509+}
24510+
24511+static unsigned long __size_overflow(3)
24512 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24513 {
24514 int d0, d1;
24515 __asm__ __volatile__(
24516 " .align 2,0x90\n"
24517- "0: movl 32(%4), %%eax\n"
24518+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24519 " cmpl $67, %0\n"
24520 " jbe 2f\n"
24521- "1: movl 64(%4), %%eax\n"
24522+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24523 " .align 2,0x90\n"
24524- "2: movl 0(%4), %%eax\n"
24525- "21: movl 4(%4), %%edx\n"
24526+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24527+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24528 " movl %%eax, 0(%3)\n"
24529 " movl %%edx, 4(%3)\n"
24530- "3: movl 8(%4), %%eax\n"
24531- "31: movl 12(%4),%%edx\n"
24532+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24533+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24534 " movl %%eax, 8(%3)\n"
24535 " movl %%edx, 12(%3)\n"
24536- "4: movl 16(%4), %%eax\n"
24537- "41: movl 20(%4), %%edx\n"
24538+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24539+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24540 " movl %%eax, 16(%3)\n"
24541 " movl %%edx, 20(%3)\n"
24542- "10: movl 24(%4), %%eax\n"
24543- "51: movl 28(%4), %%edx\n"
24544+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24545+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24546 " movl %%eax, 24(%3)\n"
24547 " movl %%edx, 28(%3)\n"
24548- "11: movl 32(%4), %%eax\n"
24549- "61: movl 36(%4), %%edx\n"
24550+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24551+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24552 " movl %%eax, 32(%3)\n"
24553 " movl %%edx, 36(%3)\n"
24554- "12: movl 40(%4), %%eax\n"
24555- "71: movl 44(%4), %%edx\n"
24556+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24557+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24558 " movl %%eax, 40(%3)\n"
24559 " movl %%edx, 44(%3)\n"
24560- "13: movl 48(%4), %%eax\n"
24561- "81: movl 52(%4), %%edx\n"
24562+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24563+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24564 " movl %%eax, 48(%3)\n"
24565 " movl %%edx, 52(%3)\n"
24566- "14: movl 56(%4), %%eax\n"
24567- "91: movl 60(%4), %%edx\n"
24568+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24569+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24570 " movl %%eax, 56(%3)\n"
24571 " movl %%edx, 60(%3)\n"
24572 " addl $-64, %0\n"
24573@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24574 " shrl $2, %0\n"
24575 " andl $3, %%eax\n"
24576 " cld\n"
24577- "6: rep; movsl\n"
24578+ "6: rep; "__copyuser_seg" movsl\n"
24579 " movl %%eax,%0\n"
24580- "7: rep; movsb\n"
24581+ "7: rep; "__copyuser_seg" movsb\n"
24582 "8:\n"
24583 ".section .fixup,\"ax\"\n"
24584 "9: lea 0(%%eax,%0,4),%0\n"
24585@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24586 * hyoshiok@miraclelinux.com
24587 */
24588
24589-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24590+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
24591 const void __user *from, unsigned long size)
24592 {
24593 int d0, d1;
24594
24595 __asm__ __volatile__(
24596 " .align 2,0x90\n"
24597- "0: movl 32(%4), %%eax\n"
24598+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24599 " cmpl $67, %0\n"
24600 " jbe 2f\n"
24601- "1: movl 64(%4), %%eax\n"
24602+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24603 " .align 2,0x90\n"
24604- "2: movl 0(%4), %%eax\n"
24605- "21: movl 4(%4), %%edx\n"
24606+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24607+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24608 " movnti %%eax, 0(%3)\n"
24609 " movnti %%edx, 4(%3)\n"
24610- "3: movl 8(%4), %%eax\n"
24611- "31: movl 12(%4),%%edx\n"
24612+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24613+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24614 " movnti %%eax, 8(%3)\n"
24615 " movnti %%edx, 12(%3)\n"
24616- "4: movl 16(%4), %%eax\n"
24617- "41: movl 20(%4), %%edx\n"
24618+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24619+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24620 " movnti %%eax, 16(%3)\n"
24621 " movnti %%edx, 20(%3)\n"
24622- "10: movl 24(%4), %%eax\n"
24623- "51: movl 28(%4), %%edx\n"
24624+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24625+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24626 " movnti %%eax, 24(%3)\n"
24627 " movnti %%edx, 28(%3)\n"
24628- "11: movl 32(%4), %%eax\n"
24629- "61: movl 36(%4), %%edx\n"
24630+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24631+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24632 " movnti %%eax, 32(%3)\n"
24633 " movnti %%edx, 36(%3)\n"
24634- "12: movl 40(%4), %%eax\n"
24635- "71: movl 44(%4), %%edx\n"
24636+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24637+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24638 " movnti %%eax, 40(%3)\n"
24639 " movnti %%edx, 44(%3)\n"
24640- "13: movl 48(%4), %%eax\n"
24641- "81: movl 52(%4), %%edx\n"
24642+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24643+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24644 " movnti %%eax, 48(%3)\n"
24645 " movnti %%edx, 52(%3)\n"
24646- "14: movl 56(%4), %%eax\n"
24647- "91: movl 60(%4), %%edx\n"
24648+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24649+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24650 " movnti %%eax, 56(%3)\n"
24651 " movnti %%edx, 60(%3)\n"
24652 " addl $-64, %0\n"
24653@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24654 " shrl $2, %0\n"
24655 " andl $3, %%eax\n"
24656 " cld\n"
24657- "6: rep; movsl\n"
24658+ "6: rep; "__copyuser_seg" movsl\n"
24659 " movl %%eax,%0\n"
24660- "7: rep; movsb\n"
24661+ "7: rep; "__copyuser_seg" movsb\n"
24662 "8:\n"
24663 ".section .fixup,\"ax\"\n"
24664 "9: lea 0(%%eax,%0,4),%0\n"
24665@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24666 return size;
24667 }
24668
24669-static unsigned long __copy_user_intel_nocache(void *to,
24670+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
24671 const void __user *from, unsigned long size)
24672 {
24673 int d0, d1;
24674
24675 __asm__ __volatile__(
24676 " .align 2,0x90\n"
24677- "0: movl 32(%4), %%eax\n"
24678+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24679 " cmpl $67, %0\n"
24680 " jbe 2f\n"
24681- "1: movl 64(%4), %%eax\n"
24682+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24683 " .align 2,0x90\n"
24684- "2: movl 0(%4), %%eax\n"
24685- "21: movl 4(%4), %%edx\n"
24686+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24687+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24688 " movnti %%eax, 0(%3)\n"
24689 " movnti %%edx, 4(%3)\n"
24690- "3: movl 8(%4), %%eax\n"
24691- "31: movl 12(%4),%%edx\n"
24692+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24693+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24694 " movnti %%eax, 8(%3)\n"
24695 " movnti %%edx, 12(%3)\n"
24696- "4: movl 16(%4), %%eax\n"
24697- "41: movl 20(%4), %%edx\n"
24698+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24699+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24700 " movnti %%eax, 16(%3)\n"
24701 " movnti %%edx, 20(%3)\n"
24702- "10: movl 24(%4), %%eax\n"
24703- "51: movl 28(%4), %%edx\n"
24704+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24705+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24706 " movnti %%eax, 24(%3)\n"
24707 " movnti %%edx, 28(%3)\n"
24708- "11: movl 32(%4), %%eax\n"
24709- "61: movl 36(%4), %%edx\n"
24710+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24711+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24712 " movnti %%eax, 32(%3)\n"
24713 " movnti %%edx, 36(%3)\n"
24714- "12: movl 40(%4), %%eax\n"
24715- "71: movl 44(%4), %%edx\n"
24716+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24717+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24718 " movnti %%eax, 40(%3)\n"
24719 " movnti %%edx, 44(%3)\n"
24720- "13: movl 48(%4), %%eax\n"
24721- "81: movl 52(%4), %%edx\n"
24722+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24723+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24724 " movnti %%eax, 48(%3)\n"
24725 " movnti %%edx, 52(%3)\n"
24726- "14: movl 56(%4), %%eax\n"
24727- "91: movl 60(%4), %%edx\n"
24728+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24729+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24730 " movnti %%eax, 56(%3)\n"
24731 " movnti %%edx, 60(%3)\n"
24732 " addl $-64, %0\n"
24733@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
24734 " shrl $2, %0\n"
24735 " andl $3, %%eax\n"
24736 " cld\n"
24737- "6: rep; movsl\n"
24738+ "6: rep; "__copyuser_seg" movsl\n"
24739 " movl %%eax,%0\n"
24740- "7: rep; movsb\n"
24741+ "7: rep; "__copyuser_seg" movsb\n"
24742 "8:\n"
24743 ".section .fixup,\"ax\"\n"
24744 "9: lea 0(%%eax,%0,4),%0\n"
24745@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
24746 */
24747 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
24748 unsigned long size);
24749-unsigned long __copy_user_intel(void __user *to, const void *from,
24750+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
24751+ unsigned long size);
24752+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
24753 unsigned long size);
24754 unsigned long __copy_user_zeroing_intel_nocache(void *to,
24755 const void __user *from, unsigned long size);
24756 #endif /* CONFIG_X86_INTEL_USERCOPY */
24757
24758 /* Generic arbitrary sized copy. */
24759-#define __copy_user(to, from, size) \
24760+#define __copy_user(to, from, size, prefix, set, restore) \
24761 do { \
24762 int __d0, __d1, __d2; \
24763 __asm__ __volatile__( \
24764+ set \
24765 " cmp $7,%0\n" \
24766 " jbe 1f\n" \
24767 " movl %1,%0\n" \
24768 " negl %0\n" \
24769 " andl $7,%0\n" \
24770 " subl %0,%3\n" \
24771- "4: rep; movsb\n" \
24772+ "4: rep; "prefix"movsb\n" \
24773 " movl %3,%0\n" \
24774 " shrl $2,%0\n" \
24775 " andl $3,%3\n" \
24776 " .align 2,0x90\n" \
24777- "0: rep; movsl\n" \
24778+ "0: rep; "prefix"movsl\n" \
24779 " movl %3,%0\n" \
24780- "1: rep; movsb\n" \
24781+ "1: rep; "prefix"movsb\n" \
24782 "2:\n" \
24783+ restore \
24784 ".section .fixup,\"ax\"\n" \
24785 "5: addl %3,%0\n" \
24786 " jmp 2b\n" \
24787@@ -538,14 +650,14 @@ do { \
24788 " negl %0\n" \
24789 " andl $7,%0\n" \
24790 " subl %0,%3\n" \
24791- "4: rep; movsb\n" \
24792+ "4: rep; "__copyuser_seg"movsb\n" \
24793 " movl %3,%0\n" \
24794 " shrl $2,%0\n" \
24795 " andl $3,%3\n" \
24796 " .align 2,0x90\n" \
24797- "0: rep; movsl\n" \
24798+ "0: rep; "__copyuser_seg"movsl\n" \
24799 " movl %3,%0\n" \
24800- "1: rep; movsb\n" \
24801+ "1: rep; "__copyuser_seg"movsb\n" \
24802 "2:\n" \
24803 ".section .fixup,\"ax\"\n" \
24804 "5: addl %3,%0\n" \
24805@@ -629,9 +741,9 @@ survive:
24806 #endif
24807 stac();
24808 if (movsl_is_ok(to, from, n))
24809- __copy_user(to, from, n);
24810+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
24811 else
24812- n = __copy_user_intel(to, from, n);
24813+ n = __generic_copy_to_user_intel(to, from, n);
24814 clac();
24815 return n;
24816 }
24817@@ -655,10 +767,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
24818 {
24819 stac();
24820 if (movsl_is_ok(to, from, n))
24821- __copy_user(to, from, n);
24822+ __copy_user(to, from, n, __copyuser_seg, "", "");
24823 else
24824- n = __copy_user_intel((void __user *)to,
24825- (const void *)from, n);
24826+ n = __generic_copy_from_user_intel(to, from, n);
24827 clac();
24828 return n;
24829 }
24830@@ -689,66 +800,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
24831 if (n > 64 && cpu_has_xmm2)
24832 n = __copy_user_intel_nocache(to, from, n);
24833 else
24834- __copy_user(to, from, n);
24835+ __copy_user(to, from, n, __copyuser_seg, "", "");
24836 #else
24837- __copy_user(to, from, n);
24838+ __copy_user(to, from, n, __copyuser_seg, "", "");
24839 #endif
24840 clac();
24841 return n;
24842 }
24843 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
24844
24845-/**
24846- * copy_to_user: - Copy a block of data into user space.
24847- * @to: Destination address, in user space.
24848- * @from: Source address, in kernel space.
24849- * @n: Number of bytes to copy.
24850- *
24851- * Context: User context only. This function may sleep.
24852- *
24853- * Copy data from kernel space to user space.
24854- *
24855- * Returns number of bytes that could not be copied.
24856- * On success, this will be zero.
24857- */
24858-unsigned long
24859-copy_to_user(void __user *to, const void *from, unsigned long n)
24860-{
24861- if (access_ok(VERIFY_WRITE, to, n))
24862- n = __copy_to_user(to, from, n);
24863- return n;
24864-}
24865-EXPORT_SYMBOL(copy_to_user);
24866-
24867-/**
24868- * copy_from_user: - Copy a block of data from user space.
24869- * @to: Destination address, in kernel space.
24870- * @from: Source address, in user space.
24871- * @n: Number of bytes to copy.
24872- *
24873- * Context: User context only. This function may sleep.
24874- *
24875- * Copy data from user space to kernel space.
24876- *
24877- * Returns number of bytes that could not be copied.
24878- * On success, this will be zero.
24879- *
24880- * If some data could not be copied, this function will pad the copied
24881- * data to the requested size using zero bytes.
24882- */
24883-unsigned long
24884-_copy_from_user(void *to, const void __user *from, unsigned long n)
24885-{
24886- if (access_ok(VERIFY_READ, from, n))
24887- n = __copy_from_user(to, from, n);
24888- else
24889- memset(to, 0, n);
24890- return n;
24891-}
24892-EXPORT_SYMBOL(_copy_from_user);
24893-
24894 void copy_from_user_overflow(void)
24895 {
24896 WARN(1, "Buffer overflow detected!\n");
24897 }
24898 EXPORT_SYMBOL(copy_from_user_overflow);
24899+
24900+void copy_to_user_overflow(void)
24901+{
24902+ WARN(1, "Buffer overflow detected!\n");
24903+}
24904+EXPORT_SYMBOL(copy_to_user_overflow);
24905+
24906+#ifdef CONFIG_PAX_MEMORY_UDEREF
24907+void __set_fs(mm_segment_t x)
24908+{
24909+ switch (x.seg) {
24910+ case 0:
24911+ loadsegment(gs, 0);
24912+ break;
24913+ case TASK_SIZE_MAX:
24914+ loadsegment(gs, __USER_DS);
24915+ break;
24916+ case -1UL:
24917+ loadsegment(gs, __KERNEL_DS);
24918+ break;
24919+ default:
24920+ BUG();
24921+ }
24922+ return;
24923+}
24924+EXPORT_SYMBOL(__set_fs);
24925+
24926+void set_fs(mm_segment_t x)
24927+{
24928+ current_thread_info()->addr_limit = x;
24929+ __set_fs(x);
24930+}
24931+EXPORT_SYMBOL(set_fs);
24932+#endif
24933diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
24934index 05928aa..b33dea1 100644
24935--- a/arch/x86/lib/usercopy_64.c
24936+++ b/arch/x86/lib/usercopy_64.c
24937@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
24938 _ASM_EXTABLE(0b,3b)
24939 _ASM_EXTABLE(1b,2b)
24940 : [size8] "=&c"(size), [dst] "=&D" (__d0)
24941- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
24942+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
24943 [zero] "r" (0UL), [eight] "r" (8UL));
24944 clac();
24945 return size;
24946@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
24947 }
24948 EXPORT_SYMBOL(clear_user);
24949
24950-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
24951+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
24952 {
24953- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
24954- return copy_user_generic((__force void *)to, (__force void *)from, len);
24955- }
24956- return len;
24957+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
24958+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
24959+ return len;
24960 }
24961 EXPORT_SYMBOL(copy_in_user);
24962
24963@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
24964 * it is not necessary to optimize tail handling.
24965 */
24966 unsigned long
24967-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
24968+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
24969 {
24970 char c;
24971 unsigned zero_len;
24972@@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
24973 clac();
24974 return len;
24975 }
24976+
24977+void copy_from_user_overflow(void)
24978+{
24979+ WARN(1, "Buffer overflow detected!\n");
24980+}
24981+EXPORT_SYMBOL(copy_from_user_overflow);
24982+
24983+void copy_to_user_overflow(void)
24984+{
24985+ WARN(1, "Buffer overflow detected!\n");
24986+}
24987+EXPORT_SYMBOL(copy_to_user_overflow);
24988diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
24989index 903ec1e..c4166b2 100644
24990--- a/arch/x86/mm/extable.c
24991+++ b/arch/x86/mm/extable.c
24992@@ -6,12 +6,24 @@
24993 static inline unsigned long
24994 ex_insn_addr(const struct exception_table_entry *x)
24995 {
24996- return (unsigned long)&x->insn + x->insn;
24997+ unsigned long reloc = 0;
24998+
24999+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25000+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
25001+#endif
25002+
25003+ return (unsigned long)&x->insn + x->insn + reloc;
25004 }
25005 static inline unsigned long
25006 ex_fixup_addr(const struct exception_table_entry *x)
25007 {
25008- return (unsigned long)&x->fixup + x->fixup;
25009+ unsigned long reloc = 0;
25010+
25011+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25012+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
25013+#endif
25014+
25015+ return (unsigned long)&x->fixup + x->fixup + reloc;
25016 }
25017
25018 int fixup_exception(struct pt_regs *regs)
25019@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
25020 unsigned long new_ip;
25021
25022 #ifdef CONFIG_PNPBIOS
25023- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
25024+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
25025 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
25026 extern u32 pnp_bios_is_utter_crap;
25027 pnp_bios_is_utter_crap = 1;
25028@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
25029 i += 4;
25030 p->fixup -= i;
25031 i += 4;
25032+
25033+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25034+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
25035+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
25036+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
25037+#endif
25038+
25039 }
25040 }
25041
25042diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
25043index 8e13ecb..831f2d0 100644
25044--- a/arch/x86/mm/fault.c
25045+++ b/arch/x86/mm/fault.c
25046@@ -13,12 +13,19 @@
25047 #include <linux/perf_event.h> /* perf_sw_event */
25048 #include <linux/hugetlb.h> /* hstate_index_to_shift */
25049 #include <linux/prefetch.h> /* prefetchw */
25050+#include <linux/unistd.h>
25051+#include <linux/compiler.h>
25052
25053 #include <asm/traps.h> /* dotraplinkage, ... */
25054 #include <asm/pgalloc.h> /* pgd_*(), ... */
25055 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
25056 #include <asm/fixmap.h> /* VSYSCALL_START */
25057 #include <asm/rcu.h> /* exception_enter(), ... */
25058+#include <asm/tlbflush.h>
25059+
25060+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25061+#include <asm/stacktrace.h>
25062+#endif
25063
25064 /*
25065 * Page fault error code bits:
25066@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
25067 int ret = 0;
25068
25069 /* kprobe_running() needs smp_processor_id() */
25070- if (kprobes_built_in() && !user_mode_vm(regs)) {
25071+ if (kprobes_built_in() && !user_mode(regs)) {
25072 preempt_disable();
25073 if (kprobe_running() && kprobe_fault_handler(regs, 14))
25074 ret = 1;
25075@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
25076 return !instr_lo || (instr_lo>>1) == 1;
25077 case 0x00:
25078 /* Prefetch instruction is 0x0F0D or 0x0F18 */
25079- if (probe_kernel_address(instr, opcode))
25080+ if (user_mode(regs)) {
25081+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25082+ return 0;
25083+ } else if (probe_kernel_address(instr, opcode))
25084 return 0;
25085
25086 *prefetch = (instr_lo == 0xF) &&
25087@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
25088 while (instr < max_instr) {
25089 unsigned char opcode;
25090
25091- if (probe_kernel_address(instr, opcode))
25092+ if (user_mode(regs)) {
25093+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25094+ break;
25095+ } else if (probe_kernel_address(instr, opcode))
25096 break;
25097
25098 instr++;
25099@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
25100 force_sig_info(si_signo, &info, tsk);
25101 }
25102
25103+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25104+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
25105+#endif
25106+
25107+#ifdef CONFIG_PAX_EMUTRAMP
25108+static int pax_handle_fetch_fault(struct pt_regs *regs);
25109+#endif
25110+
25111+#ifdef CONFIG_PAX_PAGEEXEC
25112+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
25113+{
25114+ pgd_t *pgd;
25115+ pud_t *pud;
25116+ pmd_t *pmd;
25117+
25118+ pgd = pgd_offset(mm, address);
25119+ if (!pgd_present(*pgd))
25120+ return NULL;
25121+ pud = pud_offset(pgd, address);
25122+ if (!pud_present(*pud))
25123+ return NULL;
25124+ pmd = pmd_offset(pud, address);
25125+ if (!pmd_present(*pmd))
25126+ return NULL;
25127+ return pmd;
25128+}
25129+#endif
25130+
25131 DEFINE_SPINLOCK(pgd_lock);
25132 LIST_HEAD(pgd_list);
25133
25134@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
25135 for (address = VMALLOC_START & PMD_MASK;
25136 address >= TASK_SIZE && address < FIXADDR_TOP;
25137 address += PMD_SIZE) {
25138+
25139+#ifdef CONFIG_PAX_PER_CPU_PGD
25140+ unsigned long cpu;
25141+#else
25142 struct page *page;
25143+#endif
25144
25145 spin_lock(&pgd_lock);
25146+
25147+#ifdef CONFIG_PAX_PER_CPU_PGD
25148+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25149+ pgd_t *pgd = get_cpu_pgd(cpu);
25150+ pmd_t *ret;
25151+#else
25152 list_for_each_entry(page, &pgd_list, lru) {
25153+ pgd_t *pgd = page_address(page);
25154 spinlock_t *pgt_lock;
25155 pmd_t *ret;
25156
25157@@ -243,8 +296,13 @@ void vmalloc_sync_all(void)
25158 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25159
25160 spin_lock(pgt_lock);
25161- ret = vmalloc_sync_one(page_address(page), address);
25162+#endif
25163+
25164+ ret = vmalloc_sync_one(pgd, address);
25165+
25166+#ifndef CONFIG_PAX_PER_CPU_PGD
25167 spin_unlock(pgt_lock);
25168+#endif
25169
25170 if (!ret)
25171 break;
25172@@ -278,6 +336,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
25173 * an interrupt in the middle of a task switch..
25174 */
25175 pgd_paddr = read_cr3();
25176+
25177+#ifdef CONFIG_PAX_PER_CPU_PGD
25178+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
25179+#endif
25180+
25181 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
25182 if (!pmd_k)
25183 return -1;
25184@@ -373,7 +436,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
25185 * happen within a race in page table update. In the later
25186 * case just flush:
25187 */
25188+
25189+#ifdef CONFIG_PAX_PER_CPU_PGD
25190+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
25191+ pgd = pgd_offset_cpu(smp_processor_id(), address);
25192+#else
25193 pgd = pgd_offset(current->active_mm, address);
25194+#endif
25195+
25196 pgd_ref = pgd_offset_k(address);
25197 if (pgd_none(*pgd_ref))
25198 return -1;
25199@@ -541,7 +611,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
25200 static int is_errata100(struct pt_regs *regs, unsigned long address)
25201 {
25202 #ifdef CONFIG_X86_64
25203- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
25204+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
25205 return 1;
25206 #endif
25207 return 0;
25208@@ -568,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
25209 }
25210
25211 static const char nx_warning[] = KERN_CRIT
25212-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
25213+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
25214
25215 static void
25216 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25217@@ -577,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25218 if (!oops_may_print())
25219 return;
25220
25221- if (error_code & PF_INSTR) {
25222+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
25223 unsigned int level;
25224
25225 pte_t *pte = lookup_address(address, &level);
25226
25227 if (pte && pte_present(*pte) && !pte_exec(*pte))
25228- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
25229+ printk(nx_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
25230 }
25231
25232+#ifdef CONFIG_PAX_KERNEXEC
25233+ if (init_mm.start_code <= address && address < init_mm.end_code) {
25234+ if (current->signal->curr_ip)
25235+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25236+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
25237+ else
25238+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25239+ current->comm, task_pid_nr(current), current_uid(), current_euid());
25240+ }
25241+#endif
25242+
25243 printk(KERN_ALERT "BUG: unable to handle kernel ");
25244 if (address < PAGE_SIZE)
25245 printk(KERN_CONT "NULL pointer dereference");
25246@@ -749,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25247 }
25248 #endif
25249
25250+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25251+ if (pax_is_fetch_fault(regs, error_code, address)) {
25252+
25253+#ifdef CONFIG_PAX_EMUTRAMP
25254+ switch (pax_handle_fetch_fault(regs)) {
25255+ case 2:
25256+ return;
25257+ }
25258+#endif
25259+
25260+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25261+ do_group_exit(SIGKILL);
25262+ }
25263+#endif
25264+
25265 if (unlikely(show_unhandled_signals))
25266 show_signal_msg(regs, error_code, address, tsk);
25267
25268@@ -845,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
25269 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
25270 printk(KERN_ERR
25271 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
25272- tsk->comm, tsk->pid, address);
25273+ tsk->comm, task_pid_nr(tsk), address);
25274 code = BUS_MCEERR_AR;
25275 }
25276 #endif
25277@@ -901,6 +997,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
25278 return 1;
25279 }
25280
25281+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25282+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
25283+{
25284+ pte_t *pte;
25285+ pmd_t *pmd;
25286+ spinlock_t *ptl;
25287+ unsigned char pte_mask;
25288+
25289+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
25290+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
25291+ return 0;
25292+
25293+ /* PaX: it's our fault, let's handle it if we can */
25294+
25295+ /* PaX: take a look at read faults before acquiring any locks */
25296+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
25297+ /* instruction fetch attempt from a protected page in user mode */
25298+ up_read(&mm->mmap_sem);
25299+
25300+#ifdef CONFIG_PAX_EMUTRAMP
25301+ switch (pax_handle_fetch_fault(regs)) {
25302+ case 2:
25303+ return 1;
25304+ }
25305+#endif
25306+
25307+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25308+ do_group_exit(SIGKILL);
25309+ }
25310+
25311+ pmd = pax_get_pmd(mm, address);
25312+ if (unlikely(!pmd))
25313+ return 0;
25314+
25315+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
25316+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
25317+ pte_unmap_unlock(pte, ptl);
25318+ return 0;
25319+ }
25320+
25321+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
25322+ /* write attempt to a protected page in user mode */
25323+ pte_unmap_unlock(pte, ptl);
25324+ return 0;
25325+ }
25326+
25327+#ifdef CONFIG_SMP
25328+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
25329+#else
25330+ if (likely(address > get_limit(regs->cs)))
25331+#endif
25332+ {
25333+ set_pte(pte, pte_mkread(*pte));
25334+ __flush_tlb_one(address);
25335+ pte_unmap_unlock(pte, ptl);
25336+ up_read(&mm->mmap_sem);
25337+ return 1;
25338+ }
25339+
25340+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
25341+
25342+ /*
25343+ * PaX: fill DTLB with user rights and retry
25344+ */
25345+ __asm__ __volatile__ (
25346+ "orb %2,(%1)\n"
25347+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
25348+/*
25349+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
25350+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
25351+ * page fault when examined during a TLB load attempt. this is true not only
25352+ * for PTEs holding a non-present entry but also present entries that will
25353+ * raise a page fault (such as those set up by PaX, or the copy-on-write
25354+ * mechanism). in effect it means that we do *not* need to flush the TLBs
25355+ * for our target pages since their PTEs are simply not in the TLBs at all.
25356+
25357+ * the best thing in omitting it is that we gain around 15-20% speed in the
25358+ * fast path of the page fault handler and can get rid of tracing since we
25359+ * can no longer flush unintended entries.
25360+ */
25361+ "invlpg (%0)\n"
25362+#endif
25363+ __copyuser_seg"testb $0,(%0)\n"
25364+ "xorb %3,(%1)\n"
25365+ :
25366+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
25367+ : "memory", "cc");
25368+ pte_unmap_unlock(pte, ptl);
25369+ up_read(&mm->mmap_sem);
25370+ return 1;
25371+}
25372+#endif
25373+
25374 /*
25375 * Handle a spurious fault caused by a stale TLB entry.
25376 *
25377@@ -973,6 +1162,9 @@ int show_unhandled_signals = 1;
25378 static inline int
25379 access_error(unsigned long error_code, struct vm_area_struct *vma)
25380 {
25381+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
25382+ return 1;
25383+
25384 if (error_code & PF_WRITE) {
25385 /* write, present and write, not present: */
25386 if (unlikely(!(vma->vm_flags & VM_WRITE)))
25387@@ -1001,7 +1193,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
25388 if (error_code & PF_USER)
25389 return false;
25390
25391- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
25392+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
25393 return false;
25394
25395 return true;
25396@@ -1017,18 +1209,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
25397 {
25398 struct vm_area_struct *vma;
25399 struct task_struct *tsk;
25400- unsigned long address;
25401 struct mm_struct *mm;
25402 int fault;
25403 int write = error_code & PF_WRITE;
25404 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
25405 (write ? FAULT_FLAG_WRITE : 0);
25406
25407- tsk = current;
25408- mm = tsk->mm;
25409-
25410 /* Get the faulting address: */
25411- address = read_cr2();
25412+ unsigned long address = read_cr2();
25413+
25414+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25415+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
25416+ if (!search_exception_tables(regs->ip)) {
25417+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25418+ bad_area_nosemaphore(regs, error_code, address);
25419+ return;
25420+ }
25421+ if (address < PAX_USER_SHADOW_BASE) {
25422+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25423+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
25424+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
25425+ } else
25426+ address -= PAX_USER_SHADOW_BASE;
25427+ }
25428+#endif
25429+
25430+ tsk = current;
25431+ mm = tsk->mm;
25432
25433 /*
25434 * Detect and handle instructions that would cause a page fault for
25435@@ -1089,7 +1296,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
25436 * User-mode registers count as a user access even for any
25437 * potential system fault or CPU buglet:
25438 */
25439- if (user_mode_vm(regs)) {
25440+ if (user_mode(regs)) {
25441 local_irq_enable();
25442 error_code |= PF_USER;
25443 } else {
25444@@ -1151,6 +1358,11 @@ retry:
25445 might_sleep();
25446 }
25447
25448+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25449+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
25450+ return;
25451+#endif
25452+
25453 vma = find_vma(mm, address);
25454 if (unlikely(!vma)) {
25455 bad_area(regs, error_code, address);
25456@@ -1162,18 +1374,24 @@ retry:
25457 bad_area(regs, error_code, address);
25458 return;
25459 }
25460- if (error_code & PF_USER) {
25461- /*
25462- * Accessing the stack below %sp is always a bug.
25463- * The large cushion allows instructions like enter
25464- * and pusha to work. ("enter $65535, $31" pushes
25465- * 32 pointers and then decrements %sp by 65535.)
25466- */
25467- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
25468- bad_area(regs, error_code, address);
25469- return;
25470- }
25471+ /*
25472+ * Accessing the stack below %sp is always a bug.
25473+ * The large cushion allows instructions like enter
25474+ * and pusha to work. ("enter $65535, $31" pushes
25475+ * 32 pointers and then decrements %sp by 65535.)
25476+ */
25477+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
25478+ bad_area(regs, error_code, address);
25479+ return;
25480 }
25481+
25482+#ifdef CONFIG_PAX_SEGMEXEC
25483+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
25484+ bad_area(regs, error_code, address);
25485+ return;
25486+ }
25487+#endif
25488+
25489 if (unlikely(expand_stack(vma, address))) {
25490 bad_area(regs, error_code, address);
25491 return;
25492@@ -1237,3 +1455,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25493 __do_page_fault(regs, error_code);
25494 exception_exit(regs);
25495 }
25496+
25497+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25498+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
25499+{
25500+ struct mm_struct *mm = current->mm;
25501+ unsigned long ip = regs->ip;
25502+
25503+ if (v8086_mode(regs))
25504+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
25505+
25506+#ifdef CONFIG_PAX_PAGEEXEC
25507+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
25508+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
25509+ return true;
25510+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
25511+ return true;
25512+ return false;
25513+ }
25514+#endif
25515+
25516+#ifdef CONFIG_PAX_SEGMEXEC
25517+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
25518+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
25519+ return true;
25520+ return false;
25521+ }
25522+#endif
25523+
25524+ return false;
25525+}
25526+#endif
25527+
25528+#ifdef CONFIG_PAX_EMUTRAMP
25529+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
25530+{
25531+ int err;
25532+
25533+ do { /* PaX: libffi trampoline emulation */
25534+ unsigned char mov, jmp;
25535+ unsigned int addr1, addr2;
25536+
25537+#ifdef CONFIG_X86_64
25538+ if ((regs->ip + 9) >> 32)
25539+ break;
25540+#endif
25541+
25542+ err = get_user(mov, (unsigned char __user *)regs->ip);
25543+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25544+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25545+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25546+
25547+ if (err)
25548+ break;
25549+
25550+ if (mov == 0xB8 && jmp == 0xE9) {
25551+ regs->ax = addr1;
25552+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25553+ return 2;
25554+ }
25555+ } while (0);
25556+
25557+ do { /* PaX: gcc trampoline emulation #1 */
25558+ unsigned char mov1, mov2;
25559+ unsigned short jmp;
25560+ unsigned int addr1, addr2;
25561+
25562+#ifdef CONFIG_X86_64
25563+ if ((regs->ip + 11) >> 32)
25564+ break;
25565+#endif
25566+
25567+ err = get_user(mov1, (unsigned char __user *)regs->ip);
25568+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25569+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
25570+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25571+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
25572+
25573+ if (err)
25574+ break;
25575+
25576+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
25577+ regs->cx = addr1;
25578+ regs->ax = addr2;
25579+ regs->ip = addr2;
25580+ return 2;
25581+ }
25582+ } while (0);
25583+
25584+ do { /* PaX: gcc trampoline emulation #2 */
25585+ unsigned char mov, jmp;
25586+ unsigned int addr1, addr2;
25587+
25588+#ifdef CONFIG_X86_64
25589+ if ((regs->ip + 9) >> 32)
25590+ break;
25591+#endif
25592+
25593+ err = get_user(mov, (unsigned char __user *)regs->ip);
25594+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25595+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25596+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25597+
25598+ if (err)
25599+ break;
25600+
25601+ if (mov == 0xB9 && jmp == 0xE9) {
25602+ regs->cx = addr1;
25603+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25604+ return 2;
25605+ }
25606+ } while (0);
25607+
25608+ return 1; /* PaX in action */
25609+}
25610+
25611+#ifdef CONFIG_X86_64
25612+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
25613+{
25614+ int err;
25615+
25616+ do { /* PaX: libffi trampoline emulation */
25617+ unsigned short mov1, mov2, jmp1;
25618+ unsigned char stcclc, jmp2;
25619+ unsigned long addr1, addr2;
25620+
25621+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25622+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25623+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25624+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25625+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
25626+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
25627+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
25628+
25629+ if (err)
25630+ break;
25631+
25632+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25633+ regs->r11 = addr1;
25634+ regs->r10 = addr2;
25635+ if (stcclc == 0xF8)
25636+ regs->flags &= ~X86_EFLAGS_CF;
25637+ else
25638+ regs->flags |= X86_EFLAGS_CF;
25639+ regs->ip = addr1;
25640+ return 2;
25641+ }
25642+ } while (0);
25643+
25644+ do { /* PaX: gcc trampoline emulation #1 */
25645+ unsigned short mov1, mov2, jmp1;
25646+ unsigned char jmp2;
25647+ unsigned int addr1;
25648+ unsigned long addr2;
25649+
25650+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25651+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
25652+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
25653+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
25654+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
25655+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
25656+
25657+ if (err)
25658+ break;
25659+
25660+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25661+ regs->r11 = addr1;
25662+ regs->r10 = addr2;
25663+ regs->ip = addr1;
25664+ return 2;
25665+ }
25666+ } while (0);
25667+
25668+ do { /* PaX: gcc trampoline emulation #2 */
25669+ unsigned short mov1, mov2, jmp1;
25670+ unsigned char jmp2;
25671+ unsigned long addr1, addr2;
25672+
25673+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25674+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25675+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25676+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25677+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
25678+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
25679+
25680+ if (err)
25681+ break;
25682+
25683+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25684+ regs->r11 = addr1;
25685+ regs->r10 = addr2;
25686+ regs->ip = addr1;
25687+ return 2;
25688+ }
25689+ } while (0);
25690+
25691+ return 1; /* PaX in action */
25692+}
25693+#endif
25694+
25695+/*
25696+ * PaX: decide what to do with offenders (regs->ip = fault address)
25697+ *
25698+ * returns 1 when task should be killed
25699+ * 2 when gcc trampoline was detected
25700+ */
25701+static int pax_handle_fetch_fault(struct pt_regs *regs)
25702+{
25703+ if (v8086_mode(regs))
25704+ return 1;
25705+
25706+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
25707+ return 1;
25708+
25709+#ifdef CONFIG_X86_32
25710+ return pax_handle_fetch_fault_32(regs);
25711+#else
25712+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
25713+ return pax_handle_fetch_fault_32(regs);
25714+ else
25715+ return pax_handle_fetch_fault_64(regs);
25716+#endif
25717+}
25718+#endif
25719+
25720+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25721+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
25722+{
25723+ long i;
25724+
25725+ printk(KERN_ERR "PAX: bytes at PC: ");
25726+ for (i = 0; i < 20; i++) {
25727+ unsigned char c;
25728+ if (get_user(c, (unsigned char __force_user *)pc+i))
25729+ printk(KERN_CONT "?? ");
25730+ else
25731+ printk(KERN_CONT "%02x ", c);
25732+ }
25733+ printk("\n");
25734+
25735+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
25736+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
25737+ unsigned long c;
25738+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
25739+#ifdef CONFIG_X86_32
25740+ printk(KERN_CONT "???????? ");
25741+#else
25742+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
25743+ printk(KERN_CONT "???????? ???????? ");
25744+ else
25745+ printk(KERN_CONT "???????????????? ");
25746+#endif
25747+ } else {
25748+#ifdef CONFIG_X86_64
25749+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
25750+ printk(KERN_CONT "%08x ", (unsigned int)c);
25751+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
25752+ } else
25753+#endif
25754+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
25755+ }
25756+ }
25757+ printk("\n");
25758+}
25759+#endif
25760+
25761+/**
25762+ * probe_kernel_write(): safely attempt to write to a location
25763+ * @dst: address to write to
25764+ * @src: pointer to the data that shall be written
25765+ * @size: size of the data chunk
25766+ *
25767+ * Safely write to address @dst from the buffer at @src. If a kernel fault
25768+ * happens, handle that and return -EFAULT.
25769+ */
25770+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
25771+{
25772+ long ret;
25773+ mm_segment_t old_fs = get_fs();
25774+
25775+ set_fs(KERNEL_DS);
25776+ pagefault_disable();
25777+ pax_open_kernel();
25778+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
25779+ pax_close_kernel();
25780+ pagefault_enable();
25781+ set_fs(old_fs);
25782+
25783+ return ret ? -EFAULT : 0;
25784+}
25785diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
25786index dd74e46..7d26398 100644
25787--- a/arch/x86/mm/gup.c
25788+++ b/arch/x86/mm/gup.c
25789@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
25790 addr = start;
25791 len = (unsigned long) nr_pages << PAGE_SHIFT;
25792 end = start + len;
25793- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25794+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25795 (void __user *)start, len)))
25796 return 0;
25797
25798diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
25799index 6f31ee5..8ee4164 100644
25800--- a/arch/x86/mm/highmem_32.c
25801+++ b/arch/x86/mm/highmem_32.c
25802@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
25803 idx = type + KM_TYPE_NR*smp_processor_id();
25804 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25805 BUG_ON(!pte_none(*(kmap_pte-idx)));
25806+
25807+ pax_open_kernel();
25808 set_pte(kmap_pte-idx, mk_pte(page, prot));
25809+ pax_close_kernel();
25810+
25811 arch_flush_lazy_mmu_mode();
25812
25813 return (void *)vaddr;
25814diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
25815index 937bff5..a354c44 100644
25816--- a/arch/x86/mm/hugetlbpage.c
25817+++ b/arch/x86/mm/hugetlbpage.c
25818@@ -276,13 +276,21 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
25819 struct hstate *h = hstate_file(file);
25820 struct mm_struct *mm = current->mm;
25821 struct vm_area_struct *vma;
25822- unsigned long start_addr;
25823+ unsigned long start_addr, pax_task_size = TASK_SIZE;
25824+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
25825+
25826+#ifdef CONFIG_PAX_SEGMEXEC
25827+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25828+ pax_task_size = SEGMEXEC_TASK_SIZE;
25829+#endif
25830+
25831+ pax_task_size -= PAGE_SIZE;
25832
25833 if (len > mm->cached_hole_size) {
25834- start_addr = mm->free_area_cache;
25835+ start_addr = mm->free_area_cache;
25836 } else {
25837- start_addr = TASK_UNMAPPED_BASE;
25838- mm->cached_hole_size = 0;
25839+ start_addr = mm->mmap_base;
25840+ mm->cached_hole_size = 0;
25841 }
25842
25843 full_search:
25844@@ -290,26 +298,27 @@ full_search:
25845
25846 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
25847 /* At this point: (!vma || addr < vma->vm_end). */
25848- if (TASK_SIZE - len < addr) {
25849+ if (pax_task_size - len < addr) {
25850 /*
25851 * Start a new search - just in case we missed
25852 * some holes.
25853 */
25854- if (start_addr != TASK_UNMAPPED_BASE) {
25855- start_addr = TASK_UNMAPPED_BASE;
25856+ if (start_addr != mm->mmap_base) {
25857+ start_addr = mm->mmap_base;
25858 mm->cached_hole_size = 0;
25859 goto full_search;
25860 }
25861 return -ENOMEM;
25862 }
25863- if (!vma || addr + len <= vma->vm_start) {
25864- mm->free_area_cache = addr + len;
25865- return addr;
25866- }
25867+ if (check_heap_stack_gap(vma, addr, len, offset))
25868+ break;
25869 if (addr + mm->cached_hole_size < vma->vm_start)
25870 mm->cached_hole_size = vma->vm_start - addr;
25871 addr = ALIGN(vma->vm_end, huge_page_size(h));
25872 }
25873+
25874+ mm->free_area_cache = addr + len;
25875+ return addr;
25876 }
25877
25878 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25879@@ -320,9 +329,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25880 struct mm_struct *mm = current->mm;
25881 struct vm_area_struct *vma;
25882 unsigned long base = mm->mmap_base;
25883- unsigned long addr = addr0;
25884+ unsigned long addr;
25885 unsigned long largest_hole = mm->cached_hole_size;
25886- unsigned long start_addr;
25887+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
25888
25889 /* don't allow allocations above current base */
25890 if (mm->free_area_cache > base)
25891@@ -332,16 +341,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25892 largest_hole = 0;
25893 mm->free_area_cache = base;
25894 }
25895-try_again:
25896- start_addr = mm->free_area_cache;
25897
25898 /* make sure it can fit in the remaining address space */
25899 if (mm->free_area_cache < len)
25900 goto fail;
25901
25902 /* either no address requested or can't fit in requested address hole */
25903- addr = (mm->free_area_cache - len) & huge_page_mask(h);
25904+ addr = mm->free_area_cache - len;
25905 do {
25906+ addr &= huge_page_mask(h);
25907 /*
25908 * Lookup failure means no vma is above this address,
25909 * i.e. return with success:
25910@@ -350,10 +358,10 @@ try_again:
25911 if (!vma)
25912 return addr;
25913
25914- if (addr + len <= vma->vm_start) {
25915+ if (check_heap_stack_gap(vma, addr, len, offset)) {
25916 /* remember the address as a hint for next time */
25917- mm->cached_hole_size = largest_hole;
25918- return (mm->free_area_cache = addr);
25919+ mm->cached_hole_size = largest_hole;
25920+ return (mm->free_area_cache = addr);
25921 } else if (mm->free_area_cache == vma->vm_end) {
25922 /* pull free_area_cache down to the first hole */
25923 mm->free_area_cache = vma->vm_start;
25924@@ -362,29 +370,34 @@ try_again:
25925
25926 /* remember the largest hole we saw so far */
25927 if (addr + largest_hole < vma->vm_start)
25928- largest_hole = vma->vm_start - addr;
25929+ largest_hole = vma->vm_start - addr;
25930
25931 /* try just below the current vma->vm_start */
25932- addr = (vma->vm_start - len) & huge_page_mask(h);
25933- } while (len <= vma->vm_start);
25934+ addr = skip_heap_stack_gap(vma, len, offset);
25935+ } while (!IS_ERR_VALUE(addr));
25936
25937 fail:
25938 /*
25939- * if hint left us with no space for the requested
25940- * mapping then try again:
25941- */
25942- if (start_addr != base) {
25943- mm->free_area_cache = base;
25944- largest_hole = 0;
25945- goto try_again;
25946- }
25947- /*
25948 * A failed mmap() very likely causes application failure,
25949 * so fall back to the bottom-up function here. This scenario
25950 * can happen with large stack limits and large mmap()
25951 * allocations.
25952 */
25953- mm->free_area_cache = TASK_UNMAPPED_BASE;
25954+
25955+#ifdef CONFIG_PAX_SEGMEXEC
25956+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25957+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
25958+ else
25959+#endif
25960+
25961+ mm->mmap_base = TASK_UNMAPPED_BASE;
25962+
25963+#ifdef CONFIG_PAX_RANDMMAP
25964+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25965+ mm->mmap_base += mm->delta_mmap;
25966+#endif
25967+
25968+ mm->free_area_cache = mm->mmap_base;
25969 mm->cached_hole_size = ~0UL;
25970 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
25971 len, pgoff, flags);
25972@@ -392,6 +405,7 @@ fail:
25973 /*
25974 * Restore the topdown base:
25975 */
25976+ mm->mmap_base = base;
25977 mm->free_area_cache = base;
25978 mm->cached_hole_size = ~0UL;
25979
25980@@ -405,10 +419,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25981 struct hstate *h = hstate_file(file);
25982 struct mm_struct *mm = current->mm;
25983 struct vm_area_struct *vma;
25984+ unsigned long pax_task_size = TASK_SIZE;
25985+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
25986
25987 if (len & ~huge_page_mask(h))
25988 return -EINVAL;
25989- if (len > TASK_SIZE)
25990+
25991+#ifdef CONFIG_PAX_SEGMEXEC
25992+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25993+ pax_task_size = SEGMEXEC_TASK_SIZE;
25994+#endif
25995+
25996+ pax_task_size -= PAGE_SIZE;
25997+
25998+ if (len > pax_task_size)
25999 return -ENOMEM;
26000
26001 if (flags & MAP_FIXED) {
26002@@ -420,8 +444,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26003 if (addr) {
26004 addr = ALIGN(addr, huge_page_size(h));
26005 vma = find_vma(mm, addr);
26006- if (TASK_SIZE - len >= addr &&
26007- (!vma || addr + len <= vma->vm_start))
26008+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26009 return addr;
26010 }
26011 if (mm->get_unmapped_area == arch_get_unmapped_area)
26012diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
26013index d7aea41..0fc945b 100644
26014--- a/arch/x86/mm/init.c
26015+++ b/arch/x86/mm/init.c
26016@@ -4,6 +4,7 @@
26017 #include <linux/swap.h>
26018 #include <linux/memblock.h>
26019 #include <linux/bootmem.h> /* for max_low_pfn */
26020+#include <linux/tboot.h>
26021
26022 #include <asm/cacheflush.h>
26023 #include <asm/e820.h>
26024@@ -16,6 +17,8 @@
26025 #include <asm/tlb.h>
26026 #include <asm/proto.h>
26027 #include <asm/dma.h> /* for MAX_DMA_PFN */
26028+#include <asm/desc.h>
26029+#include <asm/bios_ebda.h>
26030
26031 unsigned long __initdata pgt_buf_start;
26032 unsigned long __meminitdata pgt_buf_end;
26033@@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
26034 {
26035 int i;
26036 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
26037- unsigned long start = 0, good_end;
26038+ unsigned long start = 0x100000, good_end;
26039 phys_addr_t base;
26040
26041 for (i = 0; i < nr_range; i++) {
26042@@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
26043 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
26044 * mmio resources as well as potential bios/acpi data regions.
26045 */
26046+
26047+#ifdef CONFIG_GRKERNSEC_KMEM
26048+static unsigned int ebda_start __read_only;
26049+static unsigned int ebda_end __read_only;
26050+#endif
26051+
26052 int devmem_is_allowed(unsigned long pagenr)
26053 {
26054- if (pagenr < 256)
26055+#ifdef CONFIG_GRKERNSEC_KMEM
26056+ /* allow BDA */
26057+ if (!pagenr)
26058 return 1;
26059+ /* allow EBDA */
26060+ if (pagenr >= ebda_start && pagenr < ebda_end)
26061+ return 1;
26062+ /* if tboot is in use, allow access to its hardcoded serial log range */
26063+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
26064+ return 1;
26065+#else
26066+ if (!pagenr)
26067+ return 1;
26068+#ifdef CONFIG_VM86
26069+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
26070+ return 1;
26071+#endif
26072+#endif
26073+
26074+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26075+ return 1;
26076+#ifdef CONFIG_GRKERNSEC_KMEM
26077+ /* throw out everything else below 1MB */
26078+ if (pagenr <= 256)
26079+ return 0;
26080+#endif
26081 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
26082 return 0;
26083 if (!page_is_ram(pagenr))
26084@@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
26085 #endif
26086 }
26087
26088+#ifdef CONFIG_GRKERNSEC_KMEM
26089+static inline void gr_init_ebda(void)
26090+{
26091+ unsigned int ebda_addr;
26092+ unsigned int ebda_size = 0;
26093+
26094+ ebda_addr = get_bios_ebda();
26095+ if (ebda_addr) {
26096+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
26097+ ebda_size <<= 10;
26098+ }
26099+ if (ebda_addr && ebda_size) {
26100+ ebda_start = ebda_addr >> PAGE_SHIFT;
26101+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
26102+ } else {
26103+ ebda_start = 0x9f000 >> PAGE_SHIFT;
26104+ ebda_end = 0xa0000 >> PAGE_SHIFT;
26105+ }
26106+}
26107+#else
26108+static inline void gr_init_ebda(void) { }
26109+#endif
26110+
26111 void free_initmem(void)
26112 {
26113+#ifdef CONFIG_PAX_KERNEXEC
26114+#ifdef CONFIG_X86_32
26115+ /* PaX: limit KERNEL_CS to actual size */
26116+ unsigned long addr, limit;
26117+ struct desc_struct d;
26118+ int cpu;
26119+#else
26120+ pgd_t *pgd;
26121+ pud_t *pud;
26122+ pmd_t *pmd;
26123+ unsigned long addr, end;
26124+#endif
26125+#endif
26126+
26127+ gr_init_ebda();
26128+
26129+#ifdef CONFIG_PAX_KERNEXEC
26130+#ifdef CONFIG_X86_32
26131+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
26132+ limit = (limit - 1UL) >> PAGE_SHIFT;
26133+
26134+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
26135+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26136+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
26137+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
26138+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
26139+ }
26140+
26141+ /* PaX: make KERNEL_CS read-only */
26142+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
26143+ if (!paravirt_enabled())
26144+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
26145+/*
26146+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
26147+ pgd = pgd_offset_k(addr);
26148+ pud = pud_offset(pgd, addr);
26149+ pmd = pmd_offset(pud, addr);
26150+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26151+ }
26152+*/
26153+#ifdef CONFIG_X86_PAE
26154+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
26155+/*
26156+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
26157+ pgd = pgd_offset_k(addr);
26158+ pud = pud_offset(pgd, addr);
26159+ pmd = pmd_offset(pud, addr);
26160+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26161+ }
26162+*/
26163+#endif
26164+
26165+#ifdef CONFIG_MODULES
26166+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
26167+#endif
26168+
26169+#else
26170+ /* PaX: make kernel code/rodata read-only, rest non-executable */
26171+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
26172+ pgd = pgd_offset_k(addr);
26173+ pud = pud_offset(pgd, addr);
26174+ pmd = pmd_offset(pud, addr);
26175+ if (!pmd_present(*pmd))
26176+ continue;
26177+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
26178+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26179+ else
26180+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26181+ }
26182+
26183+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
26184+ end = addr + KERNEL_IMAGE_SIZE;
26185+ for (; addr < end; addr += PMD_SIZE) {
26186+ pgd = pgd_offset_k(addr);
26187+ pud = pud_offset(pgd, addr);
26188+ pmd = pmd_offset(pud, addr);
26189+ if (!pmd_present(*pmd))
26190+ continue;
26191+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
26192+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26193+ }
26194+#endif
26195+
26196+ flush_tlb_all();
26197+#endif
26198+
26199 free_init_pages("unused kernel memory",
26200 (unsigned long)(&__init_begin),
26201 (unsigned long)(&__init_end));
26202diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
26203index 11a5800..4bd9977 100644
26204--- a/arch/x86/mm/init_32.c
26205+++ b/arch/x86/mm/init_32.c
26206@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
26207 }
26208
26209 /*
26210- * Creates a middle page table and puts a pointer to it in the
26211- * given global directory entry. This only returns the gd entry
26212- * in non-PAE compilation mode, since the middle layer is folded.
26213- */
26214-static pmd_t * __init one_md_table_init(pgd_t *pgd)
26215-{
26216- pud_t *pud;
26217- pmd_t *pmd_table;
26218-
26219-#ifdef CONFIG_X86_PAE
26220- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
26221- if (after_bootmem)
26222- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
26223- else
26224- pmd_table = (pmd_t *)alloc_low_page();
26225- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
26226- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
26227- pud = pud_offset(pgd, 0);
26228- BUG_ON(pmd_table != pmd_offset(pud, 0));
26229-
26230- return pmd_table;
26231- }
26232-#endif
26233- pud = pud_offset(pgd, 0);
26234- pmd_table = pmd_offset(pud, 0);
26235-
26236- return pmd_table;
26237-}
26238-
26239-/*
26240 * Create a page table and place a pointer to it in a middle page
26241 * directory entry:
26242 */
26243@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
26244 page_table = (pte_t *)alloc_low_page();
26245
26246 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
26247+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26248+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
26249+#else
26250 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
26251+#endif
26252 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
26253 }
26254
26255 return pte_offset_kernel(pmd, 0);
26256 }
26257
26258+static pmd_t * __init one_md_table_init(pgd_t *pgd)
26259+{
26260+ pud_t *pud;
26261+ pmd_t *pmd_table;
26262+
26263+ pud = pud_offset(pgd, 0);
26264+ pmd_table = pmd_offset(pud, 0);
26265+
26266+ return pmd_table;
26267+}
26268+
26269 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
26270 {
26271 int pgd_idx = pgd_index(vaddr);
26272@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26273 int pgd_idx, pmd_idx;
26274 unsigned long vaddr;
26275 pgd_t *pgd;
26276+ pud_t *pud;
26277 pmd_t *pmd;
26278 pte_t *pte = NULL;
26279
26280@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26281 pgd = pgd_base + pgd_idx;
26282
26283 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
26284- pmd = one_md_table_init(pgd);
26285- pmd = pmd + pmd_index(vaddr);
26286+ pud = pud_offset(pgd, vaddr);
26287+ pmd = pmd_offset(pud, vaddr);
26288+
26289+#ifdef CONFIG_X86_PAE
26290+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26291+#endif
26292+
26293 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
26294 pmd++, pmd_idx++) {
26295 pte = page_table_kmap_check(one_page_table_init(pmd),
26296@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26297 }
26298 }
26299
26300-static inline int is_kernel_text(unsigned long addr)
26301+static inline int is_kernel_text(unsigned long start, unsigned long end)
26302 {
26303- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
26304- return 1;
26305- return 0;
26306+ if ((start > ktla_ktva((unsigned long)_etext) ||
26307+ end <= ktla_ktva((unsigned long)_stext)) &&
26308+ (start > ktla_ktva((unsigned long)_einittext) ||
26309+ end <= ktla_ktva((unsigned long)_sinittext)) &&
26310+
26311+#ifdef CONFIG_ACPI_SLEEP
26312+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
26313+#endif
26314+
26315+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
26316+ return 0;
26317+ return 1;
26318 }
26319
26320 /*
26321@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
26322 unsigned long last_map_addr = end;
26323 unsigned long start_pfn, end_pfn;
26324 pgd_t *pgd_base = swapper_pg_dir;
26325- int pgd_idx, pmd_idx, pte_ofs;
26326+ unsigned int pgd_idx, pmd_idx, pte_ofs;
26327 unsigned long pfn;
26328 pgd_t *pgd;
26329+ pud_t *pud;
26330 pmd_t *pmd;
26331 pte_t *pte;
26332 unsigned pages_2m, pages_4k;
26333@@ -280,8 +281,13 @@ repeat:
26334 pfn = start_pfn;
26335 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26336 pgd = pgd_base + pgd_idx;
26337- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
26338- pmd = one_md_table_init(pgd);
26339+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
26340+ pud = pud_offset(pgd, 0);
26341+ pmd = pmd_offset(pud, 0);
26342+
26343+#ifdef CONFIG_X86_PAE
26344+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26345+#endif
26346
26347 if (pfn >= end_pfn)
26348 continue;
26349@@ -293,14 +299,13 @@ repeat:
26350 #endif
26351 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
26352 pmd++, pmd_idx++) {
26353- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
26354+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
26355
26356 /*
26357 * Map with big pages if possible, otherwise
26358 * create normal page tables:
26359 */
26360 if (use_pse) {
26361- unsigned int addr2;
26362 pgprot_t prot = PAGE_KERNEL_LARGE;
26363 /*
26364 * first pass will use the same initial
26365@@ -310,11 +315,7 @@ repeat:
26366 __pgprot(PTE_IDENT_ATTR |
26367 _PAGE_PSE);
26368
26369- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
26370- PAGE_OFFSET + PAGE_SIZE-1;
26371-
26372- if (is_kernel_text(addr) ||
26373- is_kernel_text(addr2))
26374+ if (is_kernel_text(address, address + PMD_SIZE))
26375 prot = PAGE_KERNEL_LARGE_EXEC;
26376
26377 pages_2m++;
26378@@ -331,7 +332,7 @@ repeat:
26379 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26380 pte += pte_ofs;
26381 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
26382- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
26383+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
26384 pgprot_t prot = PAGE_KERNEL;
26385 /*
26386 * first pass will use the same initial
26387@@ -339,7 +340,7 @@ repeat:
26388 */
26389 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
26390
26391- if (is_kernel_text(addr))
26392+ if (is_kernel_text(address, address + PAGE_SIZE))
26393 prot = PAGE_KERNEL_EXEC;
26394
26395 pages_4k++;
26396@@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
26397
26398 pud = pud_offset(pgd, va);
26399 pmd = pmd_offset(pud, va);
26400- if (!pmd_present(*pmd))
26401+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
26402 break;
26403
26404 pte = pte_offset_kernel(pmd, va);
26405@@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
26406
26407 static void __init pagetable_init(void)
26408 {
26409- pgd_t *pgd_base = swapper_pg_dir;
26410-
26411- permanent_kmaps_init(pgd_base);
26412+ permanent_kmaps_init(swapper_pg_dir);
26413 }
26414
26415-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26416+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26417 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26418
26419 /* user-defined highmem size */
26420@@ -731,6 +730,12 @@ void __init mem_init(void)
26421
26422 pci_iommu_alloc();
26423
26424+#ifdef CONFIG_PAX_PER_CPU_PGD
26425+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26426+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26427+ KERNEL_PGD_PTRS);
26428+#endif
26429+
26430 #ifdef CONFIG_FLATMEM
26431 BUG_ON(!mem_map);
26432 #endif
26433@@ -757,7 +762,7 @@ void __init mem_init(void)
26434 reservedpages++;
26435
26436 codesize = (unsigned long) &_etext - (unsigned long) &_text;
26437- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
26438+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
26439 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
26440
26441 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
26442@@ -798,10 +803,10 @@ void __init mem_init(void)
26443 ((unsigned long)&__init_end -
26444 (unsigned long)&__init_begin) >> 10,
26445
26446- (unsigned long)&_etext, (unsigned long)&_edata,
26447- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
26448+ (unsigned long)&_sdata, (unsigned long)&_edata,
26449+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
26450
26451- (unsigned long)&_text, (unsigned long)&_etext,
26452+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
26453 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
26454
26455 /*
26456@@ -879,6 +884,7 @@ void set_kernel_text_rw(void)
26457 if (!kernel_set_to_readonly)
26458 return;
26459
26460+ start = ktla_ktva(start);
26461 pr_debug("Set kernel text: %lx - %lx for read write\n",
26462 start, start+size);
26463
26464@@ -893,6 +899,7 @@ void set_kernel_text_ro(void)
26465 if (!kernel_set_to_readonly)
26466 return;
26467
26468+ start = ktla_ktva(start);
26469 pr_debug("Set kernel text: %lx - %lx for read only\n",
26470 start, start+size);
26471
26472@@ -921,6 +928,7 @@ void mark_rodata_ro(void)
26473 unsigned long start = PFN_ALIGN(_text);
26474 unsigned long size = PFN_ALIGN(_etext) - start;
26475
26476+ start = ktla_ktva(start);
26477 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
26478 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
26479 size >> 10);
26480diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
26481index 3baff25..8b37564 100644
26482--- a/arch/x86/mm/init_64.c
26483+++ b/arch/x86/mm/init_64.c
26484@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
26485 * around without checking the pgd every time.
26486 */
26487
26488-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
26489+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
26490 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26491
26492 int force_personality32;
26493@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26494
26495 for (address = start; address <= end; address += PGDIR_SIZE) {
26496 const pgd_t *pgd_ref = pgd_offset_k(address);
26497+
26498+#ifdef CONFIG_PAX_PER_CPU_PGD
26499+ unsigned long cpu;
26500+#else
26501 struct page *page;
26502+#endif
26503
26504 if (pgd_none(*pgd_ref))
26505 continue;
26506
26507 spin_lock(&pgd_lock);
26508+
26509+#ifdef CONFIG_PAX_PER_CPU_PGD
26510+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26511+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
26512+#else
26513 list_for_each_entry(page, &pgd_list, lru) {
26514 pgd_t *pgd;
26515 spinlock_t *pgt_lock;
26516@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26517 /* the pgt_lock only for Xen */
26518 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
26519 spin_lock(pgt_lock);
26520+#endif
26521
26522 if (pgd_none(*pgd))
26523 set_pgd(pgd, *pgd_ref);
26524@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26525 BUG_ON(pgd_page_vaddr(*pgd)
26526 != pgd_page_vaddr(*pgd_ref));
26527
26528+#ifndef CONFIG_PAX_PER_CPU_PGD
26529 spin_unlock(pgt_lock);
26530+#endif
26531+
26532 }
26533 spin_unlock(&pgd_lock);
26534 }
26535@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
26536 {
26537 if (pgd_none(*pgd)) {
26538 pud_t *pud = (pud_t *)spp_getpage();
26539- pgd_populate(&init_mm, pgd, pud);
26540+ pgd_populate_kernel(&init_mm, pgd, pud);
26541 if (pud != pud_offset(pgd, 0))
26542 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
26543 pud, pud_offset(pgd, 0));
26544@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
26545 {
26546 if (pud_none(*pud)) {
26547 pmd_t *pmd = (pmd_t *) spp_getpage();
26548- pud_populate(&init_mm, pud, pmd);
26549+ pud_populate_kernel(&init_mm, pud, pmd);
26550 if (pmd != pmd_offset(pud, 0))
26551 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
26552 pmd, pmd_offset(pud, 0));
26553@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
26554 pmd = fill_pmd(pud, vaddr);
26555 pte = fill_pte(pmd, vaddr);
26556
26557+ pax_open_kernel();
26558 set_pte(pte, new_pte);
26559+ pax_close_kernel();
26560
26561 /*
26562 * It's enough to flush this one mapping.
26563@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
26564 pgd = pgd_offset_k((unsigned long)__va(phys));
26565 if (pgd_none(*pgd)) {
26566 pud = (pud_t *) spp_getpage();
26567- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
26568- _PAGE_USER));
26569+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
26570 }
26571 pud = pud_offset(pgd, (unsigned long)__va(phys));
26572 if (pud_none(*pud)) {
26573 pmd = (pmd_t *) spp_getpage();
26574- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
26575- _PAGE_USER));
26576+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
26577 }
26578 pmd = pmd_offset(pud, phys);
26579 BUG_ON(!pmd_none(*pmd));
26580@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
26581 if (pfn >= pgt_buf_top)
26582 panic("alloc_low_page: ran out of memory");
26583
26584- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
26585+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
26586 clear_page(adr);
26587 *phys = pfn * PAGE_SIZE;
26588 return adr;
26589@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
26590
26591 phys = __pa(virt);
26592 left = phys & (PAGE_SIZE - 1);
26593- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
26594+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
26595 adr = (void *)(((unsigned long)adr) | left);
26596
26597 return adr;
26598@@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
26599 unmap_low_page(pmd);
26600
26601 spin_lock(&init_mm.page_table_lock);
26602- pud_populate(&init_mm, pud, __va(pmd_phys));
26603+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
26604 spin_unlock(&init_mm.page_table_lock);
26605 }
26606 __flush_tlb_all();
26607@@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
26608 unmap_low_page(pud);
26609
26610 spin_lock(&init_mm.page_table_lock);
26611- pgd_populate(&init_mm, pgd, __va(pud_phys));
26612+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
26613 spin_unlock(&init_mm.page_table_lock);
26614 pgd_changed = true;
26615 }
26616@@ -691,6 +705,12 @@ void __init mem_init(void)
26617
26618 pci_iommu_alloc();
26619
26620+#ifdef CONFIG_PAX_PER_CPU_PGD
26621+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26622+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26623+ KERNEL_PGD_PTRS);
26624+#endif
26625+
26626 /* clear_bss() already clear the empty_zero_page */
26627
26628 reservedpages = 0;
26629@@ -851,8 +871,8 @@ int kern_addr_valid(unsigned long addr)
26630 static struct vm_area_struct gate_vma = {
26631 .vm_start = VSYSCALL_START,
26632 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
26633- .vm_page_prot = PAGE_READONLY_EXEC,
26634- .vm_flags = VM_READ | VM_EXEC
26635+ .vm_page_prot = PAGE_READONLY,
26636+ .vm_flags = VM_READ
26637 };
26638
26639 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26640@@ -886,7 +906,7 @@ int in_gate_area_no_mm(unsigned long addr)
26641
26642 const char *arch_vma_name(struct vm_area_struct *vma)
26643 {
26644- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26645+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26646 return "[vdso]";
26647 if (vma == &gate_vma)
26648 return "[vsyscall]";
26649diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
26650index 7b179b4..6bd1777 100644
26651--- a/arch/x86/mm/iomap_32.c
26652+++ b/arch/x86/mm/iomap_32.c
26653@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
26654 type = kmap_atomic_idx_push();
26655 idx = type + KM_TYPE_NR * smp_processor_id();
26656 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26657+
26658+ pax_open_kernel();
26659 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
26660+ pax_close_kernel();
26661+
26662 arch_flush_lazy_mmu_mode();
26663
26664 return (void *)vaddr;
26665diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
26666index 78fe3f1..2f9433c 100644
26667--- a/arch/x86/mm/ioremap.c
26668+++ b/arch/x86/mm/ioremap.c
26669@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
26670 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
26671 int is_ram = page_is_ram(pfn);
26672
26673- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
26674+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
26675 return NULL;
26676 WARN_ON_ONCE(is_ram);
26677 }
26678@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
26679 *
26680 * Caller must ensure there is only one unmapping for the same pointer.
26681 */
26682-void iounmap(volatile void __iomem *addr)
26683+void iounmap(const volatile void __iomem *addr)
26684 {
26685 struct vm_struct *p, *o;
26686
26687@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
26688
26689 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
26690 if (page_is_ram(start >> PAGE_SHIFT))
26691+#ifdef CONFIG_HIGHMEM
26692+ if ((start >> PAGE_SHIFT) < max_low_pfn)
26693+#endif
26694 return __va(phys);
26695
26696 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
26697@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
26698 early_param("early_ioremap_debug", early_ioremap_debug_setup);
26699
26700 static __initdata int after_paging_init;
26701-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
26702+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
26703
26704 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
26705 {
26706@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
26707 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
26708
26709 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
26710- memset(bm_pte, 0, sizeof(bm_pte));
26711- pmd_populate_kernel(&init_mm, pmd, bm_pte);
26712+ pmd_populate_user(&init_mm, pmd, bm_pte);
26713
26714 /*
26715 * The boot-ioremap range spans multiple pmds, for which
26716diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
26717index d87dd6d..bf3fa66 100644
26718--- a/arch/x86/mm/kmemcheck/kmemcheck.c
26719+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
26720@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
26721 * memory (e.g. tracked pages)? For now, we need this to avoid
26722 * invoking kmemcheck for PnP BIOS calls.
26723 */
26724- if (regs->flags & X86_VM_MASK)
26725+ if (v8086_mode(regs))
26726 return false;
26727- if (regs->cs != __KERNEL_CS)
26728+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
26729 return false;
26730
26731 pte = kmemcheck_pte_lookup(address);
26732diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
26733index 845df68..1d8d29f 100644
26734--- a/arch/x86/mm/mmap.c
26735+++ b/arch/x86/mm/mmap.c
26736@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
26737 * Leave an at least ~128 MB hole with possible stack randomization.
26738 */
26739 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
26740-#define MAX_GAP (TASK_SIZE/6*5)
26741+#define MAX_GAP (pax_task_size/6*5)
26742
26743 static int mmap_is_legacy(void)
26744 {
26745@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
26746 return rnd << PAGE_SHIFT;
26747 }
26748
26749-static unsigned long mmap_base(void)
26750+static unsigned long mmap_base(struct mm_struct *mm)
26751 {
26752 unsigned long gap = rlimit(RLIMIT_STACK);
26753+ unsigned long pax_task_size = TASK_SIZE;
26754+
26755+#ifdef CONFIG_PAX_SEGMEXEC
26756+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26757+ pax_task_size = SEGMEXEC_TASK_SIZE;
26758+#endif
26759
26760 if (gap < MIN_GAP)
26761 gap = MIN_GAP;
26762 else if (gap > MAX_GAP)
26763 gap = MAX_GAP;
26764
26765- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
26766+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
26767 }
26768
26769 /*
26770 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
26771 * does, but not when emulating X86_32
26772 */
26773-static unsigned long mmap_legacy_base(void)
26774+static unsigned long mmap_legacy_base(struct mm_struct *mm)
26775 {
26776- if (mmap_is_ia32())
26777+ if (mmap_is_ia32()) {
26778+
26779+#ifdef CONFIG_PAX_SEGMEXEC
26780+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26781+ return SEGMEXEC_TASK_UNMAPPED_BASE;
26782+ else
26783+#endif
26784+
26785 return TASK_UNMAPPED_BASE;
26786- else
26787+ } else
26788 return TASK_UNMAPPED_BASE + mmap_rnd();
26789 }
26790
26791@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
26792 void arch_pick_mmap_layout(struct mm_struct *mm)
26793 {
26794 if (mmap_is_legacy()) {
26795- mm->mmap_base = mmap_legacy_base();
26796+ mm->mmap_base = mmap_legacy_base(mm);
26797+
26798+#ifdef CONFIG_PAX_RANDMMAP
26799+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26800+ mm->mmap_base += mm->delta_mmap;
26801+#endif
26802+
26803 mm->get_unmapped_area = arch_get_unmapped_area;
26804 mm->unmap_area = arch_unmap_area;
26805 } else {
26806- mm->mmap_base = mmap_base();
26807+ mm->mmap_base = mmap_base(mm);
26808+
26809+#ifdef CONFIG_PAX_RANDMMAP
26810+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26811+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
26812+#endif
26813+
26814 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
26815 mm->unmap_area = arch_unmap_area_topdown;
26816 }
26817diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
26818index dc0b727..f612039 100644
26819--- a/arch/x86/mm/mmio-mod.c
26820+++ b/arch/x86/mm/mmio-mod.c
26821@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
26822 break;
26823 default:
26824 {
26825- unsigned char *ip = (unsigned char *)instptr;
26826+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
26827 my_trace->opcode = MMIO_UNKNOWN_OP;
26828 my_trace->width = 0;
26829 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
26830@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
26831 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26832 void __iomem *addr)
26833 {
26834- static atomic_t next_id;
26835+ static atomic_unchecked_t next_id;
26836 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
26837 /* These are page-unaligned. */
26838 struct mmiotrace_map map = {
26839@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26840 .private = trace
26841 },
26842 .phys = offset,
26843- .id = atomic_inc_return(&next_id)
26844+ .id = atomic_inc_return_unchecked(&next_id)
26845 };
26846 map.map_id = trace->id;
26847
26848@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
26849 ioremap_trace_core(offset, size, addr);
26850 }
26851
26852-static void iounmap_trace_core(volatile void __iomem *addr)
26853+static void iounmap_trace_core(const volatile void __iomem *addr)
26854 {
26855 struct mmiotrace_map map = {
26856 .phys = 0,
26857@@ -328,7 +328,7 @@ not_enabled:
26858 }
26859 }
26860
26861-void mmiotrace_iounmap(volatile void __iomem *addr)
26862+void mmiotrace_iounmap(const volatile void __iomem *addr)
26863 {
26864 might_sleep();
26865 if (is_enabled()) /* recheck and proper locking in *_core() */
26866diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
26867index b008656..773eac2 100644
26868--- a/arch/x86/mm/pageattr-test.c
26869+++ b/arch/x86/mm/pageattr-test.c
26870@@ -36,7 +36,7 @@ enum {
26871
26872 static int pte_testbit(pte_t pte)
26873 {
26874- return pte_flags(pte) & _PAGE_UNUSED1;
26875+ return pte_flags(pte) & _PAGE_CPA_TEST;
26876 }
26877
26878 struct split_state {
26879diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
26880index a718e0d..77419bc 100644
26881--- a/arch/x86/mm/pageattr.c
26882+++ b/arch/x86/mm/pageattr.c
26883@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26884 */
26885 #ifdef CONFIG_PCI_BIOS
26886 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
26887- pgprot_val(forbidden) |= _PAGE_NX;
26888+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26889 #endif
26890
26891 /*
26892@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26893 * Does not cover __inittext since that is gone later on. On
26894 * 64bit we do not enforce !NX on the low mapping
26895 */
26896- if (within(address, (unsigned long)_text, (unsigned long)_etext))
26897- pgprot_val(forbidden) |= _PAGE_NX;
26898+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
26899+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26900
26901+#ifdef CONFIG_DEBUG_RODATA
26902 /*
26903 * The .rodata section needs to be read-only. Using the pfn
26904 * catches all aliases.
26905@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26906 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
26907 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
26908 pgprot_val(forbidden) |= _PAGE_RW;
26909+#endif
26910
26911 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
26912 /*
26913@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26914 }
26915 #endif
26916
26917+#ifdef CONFIG_PAX_KERNEXEC
26918+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
26919+ pgprot_val(forbidden) |= _PAGE_RW;
26920+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26921+ }
26922+#endif
26923+
26924 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
26925
26926 return prot;
26927@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
26928 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
26929 {
26930 /* change init_mm */
26931+ pax_open_kernel();
26932 set_pte_atomic(kpte, pte);
26933+
26934 #ifdef CONFIG_X86_32
26935 if (!SHARED_KERNEL_PMD) {
26936+
26937+#ifdef CONFIG_PAX_PER_CPU_PGD
26938+ unsigned long cpu;
26939+#else
26940 struct page *page;
26941+#endif
26942
26943+#ifdef CONFIG_PAX_PER_CPU_PGD
26944+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26945+ pgd_t *pgd = get_cpu_pgd(cpu);
26946+#else
26947 list_for_each_entry(page, &pgd_list, lru) {
26948- pgd_t *pgd;
26949+ pgd_t *pgd = (pgd_t *)page_address(page);
26950+#endif
26951+
26952 pud_t *pud;
26953 pmd_t *pmd;
26954
26955- pgd = (pgd_t *)page_address(page) + pgd_index(address);
26956+ pgd += pgd_index(address);
26957 pud = pud_offset(pgd, address);
26958 pmd = pmd_offset(pud, address);
26959 set_pte_atomic((pte_t *)pmd, pte);
26960 }
26961 }
26962 #endif
26963+ pax_close_kernel();
26964 }
26965
26966 static int
26967diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
26968index 0eb572e..92f5c1e 100644
26969--- a/arch/x86/mm/pat.c
26970+++ b/arch/x86/mm/pat.c
26971@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
26972
26973 if (!entry) {
26974 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
26975- current->comm, current->pid, start, end - 1);
26976+ current->comm, task_pid_nr(current), start, end - 1);
26977 return -EINVAL;
26978 }
26979
26980@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26981
26982 while (cursor < to) {
26983 if (!devmem_is_allowed(pfn)) {
26984- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
26985- current->comm, from, to - 1);
26986+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
26987+ current->comm, from, to - 1, cursor);
26988 return 0;
26989 }
26990 cursor += PAGE_SIZE;
26991@@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
26992 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
26993 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
26994 "for [mem %#010Lx-%#010Lx]\n",
26995- current->comm, current->pid,
26996+ current->comm, task_pid_nr(current),
26997 cattr_name(flags),
26998 base, (unsigned long long)(base + size-1));
26999 return -EINVAL;
27000@@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
27001 flags = lookup_memtype(paddr);
27002 if (want_flags != flags) {
27003 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
27004- current->comm, current->pid,
27005+ current->comm, task_pid_nr(current),
27006 cattr_name(want_flags),
27007 (unsigned long long)paddr,
27008 (unsigned long long)(paddr + size - 1),
27009@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
27010 free_memtype(paddr, paddr + size);
27011 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
27012 " for [mem %#010Lx-%#010Lx], got %s\n",
27013- current->comm, current->pid,
27014+ current->comm, task_pid_nr(current),
27015 cattr_name(want_flags),
27016 (unsigned long long)paddr,
27017 (unsigned long long)(paddr + size - 1),
27018diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
27019index 9f0614d..92ae64a 100644
27020--- a/arch/x86/mm/pf_in.c
27021+++ b/arch/x86/mm/pf_in.c
27022@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
27023 int i;
27024 enum reason_type rv = OTHERS;
27025
27026- p = (unsigned char *)ins_addr;
27027+ p = (unsigned char *)ktla_ktva(ins_addr);
27028 p += skip_prefix(p, &prf);
27029 p += get_opcode(p, &opcode);
27030
27031@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
27032 struct prefix_bits prf;
27033 int i;
27034
27035- p = (unsigned char *)ins_addr;
27036+ p = (unsigned char *)ktla_ktva(ins_addr);
27037 p += skip_prefix(p, &prf);
27038 p += get_opcode(p, &opcode);
27039
27040@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
27041 struct prefix_bits prf;
27042 int i;
27043
27044- p = (unsigned char *)ins_addr;
27045+ p = (unsigned char *)ktla_ktva(ins_addr);
27046 p += skip_prefix(p, &prf);
27047 p += get_opcode(p, &opcode);
27048
27049@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
27050 struct prefix_bits prf;
27051 int i;
27052
27053- p = (unsigned char *)ins_addr;
27054+ p = (unsigned char *)ktla_ktva(ins_addr);
27055 p += skip_prefix(p, &prf);
27056 p += get_opcode(p, &opcode);
27057 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
27058@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
27059 struct prefix_bits prf;
27060 int i;
27061
27062- p = (unsigned char *)ins_addr;
27063+ p = (unsigned char *)ktla_ktva(ins_addr);
27064 p += skip_prefix(p, &prf);
27065 p += get_opcode(p, &opcode);
27066 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
27067diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
27068index 8573b83..4f3ed7e 100644
27069--- a/arch/x86/mm/pgtable.c
27070+++ b/arch/x86/mm/pgtable.c
27071@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
27072 list_del(&page->lru);
27073 }
27074
27075-#define UNSHARED_PTRS_PER_PGD \
27076- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27077+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27078+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
27079
27080+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
27081+{
27082+ unsigned int count = USER_PGD_PTRS;
27083
27084+ while (count--)
27085+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
27086+}
27087+#endif
27088+
27089+#ifdef CONFIG_PAX_PER_CPU_PGD
27090+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
27091+{
27092+ unsigned int count = USER_PGD_PTRS;
27093+
27094+ while (count--) {
27095+ pgd_t pgd;
27096+
27097+#ifdef CONFIG_X86_64
27098+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
27099+#else
27100+ pgd = *src++;
27101+#endif
27102+
27103+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27104+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
27105+#endif
27106+
27107+ *dst++ = pgd;
27108+ }
27109+
27110+}
27111+#endif
27112+
27113+#ifdef CONFIG_X86_64
27114+#define pxd_t pud_t
27115+#define pyd_t pgd_t
27116+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
27117+#define pxd_free(mm, pud) pud_free((mm), (pud))
27118+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
27119+#define pyd_offset(mm, address) pgd_offset((mm), (address))
27120+#define PYD_SIZE PGDIR_SIZE
27121+#else
27122+#define pxd_t pmd_t
27123+#define pyd_t pud_t
27124+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
27125+#define pxd_free(mm, pud) pmd_free((mm), (pud))
27126+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
27127+#define pyd_offset(mm, address) pud_offset((mm), (address))
27128+#define PYD_SIZE PUD_SIZE
27129+#endif
27130+
27131+#ifdef CONFIG_PAX_PER_CPU_PGD
27132+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
27133+static inline void pgd_dtor(pgd_t *pgd) {}
27134+#else
27135 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
27136 {
27137 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
27138@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
27139 pgd_list_del(pgd);
27140 spin_unlock(&pgd_lock);
27141 }
27142+#endif
27143
27144 /*
27145 * List of all pgd's needed for non-PAE so it can invalidate entries
27146@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
27147 * -- wli
27148 */
27149
27150-#ifdef CONFIG_X86_PAE
27151+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27152 /*
27153 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
27154 * updating the top-level pagetable entries to guarantee the
27155@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
27156 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
27157 * and initialize the kernel pmds here.
27158 */
27159-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
27160+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27161
27162 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27163 {
27164@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27165 */
27166 flush_tlb_mm(mm);
27167 }
27168+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
27169+#define PREALLOCATED_PXDS USER_PGD_PTRS
27170 #else /* !CONFIG_X86_PAE */
27171
27172 /* No need to prepopulate any pagetable entries in non-PAE modes. */
27173-#define PREALLOCATED_PMDS 0
27174+#define PREALLOCATED_PXDS 0
27175
27176 #endif /* CONFIG_X86_PAE */
27177
27178-static void free_pmds(pmd_t *pmds[])
27179+static void free_pxds(pxd_t *pxds[])
27180 {
27181 int i;
27182
27183- for(i = 0; i < PREALLOCATED_PMDS; i++)
27184- if (pmds[i])
27185- free_page((unsigned long)pmds[i]);
27186+ for(i = 0; i < PREALLOCATED_PXDS; i++)
27187+ if (pxds[i])
27188+ free_page((unsigned long)pxds[i]);
27189 }
27190
27191-static int preallocate_pmds(pmd_t *pmds[])
27192+static int preallocate_pxds(pxd_t *pxds[])
27193 {
27194 int i;
27195 bool failed = false;
27196
27197- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27198- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
27199- if (pmd == NULL)
27200+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27201+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
27202+ if (pxd == NULL)
27203 failed = true;
27204- pmds[i] = pmd;
27205+ pxds[i] = pxd;
27206 }
27207
27208 if (failed) {
27209- free_pmds(pmds);
27210+ free_pxds(pxds);
27211 return -ENOMEM;
27212 }
27213
27214@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
27215 * preallocate which never got a corresponding vma will need to be
27216 * freed manually.
27217 */
27218-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
27219+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
27220 {
27221 int i;
27222
27223- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27224+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27225 pgd_t pgd = pgdp[i];
27226
27227 if (pgd_val(pgd) != 0) {
27228- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
27229+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
27230
27231- pgdp[i] = native_make_pgd(0);
27232+ set_pgd(pgdp + i, native_make_pgd(0));
27233
27234- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
27235- pmd_free(mm, pmd);
27236+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
27237+ pxd_free(mm, pxd);
27238 }
27239 }
27240 }
27241
27242-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
27243+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
27244 {
27245- pud_t *pud;
27246+ pyd_t *pyd;
27247 unsigned long addr;
27248 int i;
27249
27250- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
27251+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
27252 return;
27253
27254- pud = pud_offset(pgd, 0);
27255+#ifdef CONFIG_X86_64
27256+ pyd = pyd_offset(mm, 0L);
27257+#else
27258+ pyd = pyd_offset(pgd, 0L);
27259+#endif
27260
27261- for (addr = i = 0; i < PREALLOCATED_PMDS;
27262- i++, pud++, addr += PUD_SIZE) {
27263- pmd_t *pmd = pmds[i];
27264+ for (addr = i = 0; i < PREALLOCATED_PXDS;
27265+ i++, pyd++, addr += PYD_SIZE) {
27266+ pxd_t *pxd = pxds[i];
27267
27268 if (i >= KERNEL_PGD_BOUNDARY)
27269- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27270- sizeof(pmd_t) * PTRS_PER_PMD);
27271+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27272+ sizeof(pxd_t) * PTRS_PER_PMD);
27273
27274- pud_populate(mm, pud, pmd);
27275+ pyd_populate(mm, pyd, pxd);
27276 }
27277 }
27278
27279 pgd_t *pgd_alloc(struct mm_struct *mm)
27280 {
27281 pgd_t *pgd;
27282- pmd_t *pmds[PREALLOCATED_PMDS];
27283+ pxd_t *pxds[PREALLOCATED_PXDS];
27284
27285 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
27286
27287@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27288
27289 mm->pgd = pgd;
27290
27291- if (preallocate_pmds(pmds) != 0)
27292+ if (preallocate_pxds(pxds) != 0)
27293 goto out_free_pgd;
27294
27295 if (paravirt_pgd_alloc(mm) != 0)
27296- goto out_free_pmds;
27297+ goto out_free_pxds;
27298
27299 /*
27300 * Make sure that pre-populating the pmds is atomic with
27301@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27302 spin_lock(&pgd_lock);
27303
27304 pgd_ctor(mm, pgd);
27305- pgd_prepopulate_pmd(mm, pgd, pmds);
27306+ pgd_prepopulate_pxd(mm, pgd, pxds);
27307
27308 spin_unlock(&pgd_lock);
27309
27310 return pgd;
27311
27312-out_free_pmds:
27313- free_pmds(pmds);
27314+out_free_pxds:
27315+ free_pxds(pxds);
27316 out_free_pgd:
27317 free_page((unsigned long)pgd);
27318 out:
27319@@ -295,7 +356,7 @@ out:
27320
27321 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
27322 {
27323- pgd_mop_up_pmds(mm, pgd);
27324+ pgd_mop_up_pxds(mm, pgd);
27325 pgd_dtor(pgd);
27326 paravirt_pgd_free(mm, pgd);
27327 free_page((unsigned long)pgd);
27328diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
27329index a69bcb8..19068ab 100644
27330--- a/arch/x86/mm/pgtable_32.c
27331+++ b/arch/x86/mm/pgtable_32.c
27332@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
27333 return;
27334 }
27335 pte = pte_offset_kernel(pmd, vaddr);
27336+
27337+ pax_open_kernel();
27338 if (pte_val(pteval))
27339 set_pte_at(&init_mm, vaddr, pte, pteval);
27340 else
27341 pte_clear(&init_mm, vaddr, pte);
27342+ pax_close_kernel();
27343
27344 /*
27345 * It's enough to flush this one mapping.
27346diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
27347index 410531d..0f16030 100644
27348--- a/arch/x86/mm/setup_nx.c
27349+++ b/arch/x86/mm/setup_nx.c
27350@@ -5,8 +5,10 @@
27351 #include <asm/pgtable.h>
27352 #include <asm/proto.h>
27353
27354+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27355 static int disable_nx __cpuinitdata;
27356
27357+#ifndef CONFIG_PAX_PAGEEXEC
27358 /*
27359 * noexec = on|off
27360 *
27361@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
27362 return 0;
27363 }
27364 early_param("noexec", noexec_setup);
27365+#endif
27366+
27367+#endif
27368
27369 void __cpuinit x86_configure_nx(void)
27370 {
27371+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27372 if (cpu_has_nx && !disable_nx)
27373 __supported_pte_mask |= _PAGE_NX;
27374 else
27375+#endif
27376 __supported_pte_mask &= ~_PAGE_NX;
27377 }
27378
27379diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
27380index 60f926c..a710970 100644
27381--- a/arch/x86/mm/tlb.c
27382+++ b/arch/x86/mm/tlb.c
27383@@ -48,7 +48,11 @@ void leave_mm(int cpu)
27384 BUG();
27385 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
27386 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
27387+
27388+#ifndef CONFIG_PAX_PER_CPU_PGD
27389 load_cr3(swapper_pg_dir);
27390+#endif
27391+
27392 }
27393 }
27394 EXPORT_SYMBOL_GPL(leave_mm);
27395diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
27396index 877b9a1..a8ecf42 100644
27397--- a/arch/x86/net/bpf_jit.S
27398+++ b/arch/x86/net/bpf_jit.S
27399@@ -9,6 +9,7 @@
27400 */
27401 #include <linux/linkage.h>
27402 #include <asm/dwarf2.h>
27403+#include <asm/alternative-asm.h>
27404
27405 /*
27406 * Calling convention :
27407@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
27408 jle bpf_slow_path_word
27409 mov (SKBDATA,%rsi),%eax
27410 bswap %eax /* ntohl() */
27411+ pax_force_retaddr
27412 ret
27413
27414 sk_load_half:
27415@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
27416 jle bpf_slow_path_half
27417 movzwl (SKBDATA,%rsi),%eax
27418 rol $8,%ax # ntohs()
27419+ pax_force_retaddr
27420 ret
27421
27422 sk_load_byte:
27423@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
27424 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
27425 jle bpf_slow_path_byte
27426 movzbl (SKBDATA,%rsi),%eax
27427+ pax_force_retaddr
27428 ret
27429
27430 /**
27431@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
27432 movzbl (SKBDATA,%rsi),%ebx
27433 and $15,%bl
27434 shl $2,%bl
27435+ pax_force_retaddr
27436 ret
27437
27438 /* rsi contains offset and can be scratched */
27439@@ -109,6 +114,7 @@ bpf_slow_path_word:
27440 js bpf_error
27441 mov -12(%rbp),%eax
27442 bswap %eax
27443+ pax_force_retaddr
27444 ret
27445
27446 bpf_slow_path_half:
27447@@ -117,12 +123,14 @@ bpf_slow_path_half:
27448 mov -12(%rbp),%ax
27449 rol $8,%ax
27450 movzwl %ax,%eax
27451+ pax_force_retaddr
27452 ret
27453
27454 bpf_slow_path_byte:
27455 bpf_slow_path_common(1)
27456 js bpf_error
27457 movzbl -12(%rbp),%eax
27458+ pax_force_retaddr
27459 ret
27460
27461 bpf_slow_path_byte_msh:
27462@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
27463 and $15,%al
27464 shl $2,%al
27465 xchg %eax,%ebx
27466+ pax_force_retaddr
27467 ret
27468
27469 #define sk_negative_common(SIZE) \
27470@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
27471 sk_negative_common(4)
27472 mov (%rax), %eax
27473 bswap %eax
27474+ pax_force_retaddr
27475 ret
27476
27477 bpf_slow_path_half_neg:
27478@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
27479 mov (%rax),%ax
27480 rol $8,%ax
27481 movzwl %ax,%eax
27482+ pax_force_retaddr
27483 ret
27484
27485 bpf_slow_path_byte_neg:
27486@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
27487 .globl sk_load_byte_negative_offset
27488 sk_negative_common(1)
27489 movzbl (%rax), %eax
27490+ pax_force_retaddr
27491 ret
27492
27493 bpf_slow_path_byte_msh_neg:
27494@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
27495 and $15,%al
27496 shl $2,%al
27497 xchg %eax,%ebx
27498+ pax_force_retaddr
27499 ret
27500
27501 bpf_error:
27502@@ -197,4 +210,5 @@ bpf_error:
27503 xor %eax,%eax
27504 mov -8(%rbp),%rbx
27505 leaveq
27506+ pax_force_retaddr
27507 ret
27508diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
27509index 520d2bd..b895ef4 100644
27510--- a/arch/x86/net/bpf_jit_comp.c
27511+++ b/arch/x86/net/bpf_jit_comp.c
27512@@ -11,6 +11,7 @@
27513 #include <asm/cacheflush.h>
27514 #include <linux/netdevice.h>
27515 #include <linux/filter.h>
27516+#include <linux/random.h>
27517
27518 /*
27519 * Conventions :
27520@@ -48,13 +49,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
27521 return ptr + len;
27522 }
27523
27524+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27525+#define MAX_INSTR_CODE_SIZE 96
27526+#else
27527+#define MAX_INSTR_CODE_SIZE 64
27528+#endif
27529+
27530 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
27531
27532 #define EMIT1(b1) EMIT(b1, 1)
27533 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
27534 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
27535 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
27536+
27537+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27538+/* original constant will appear in ecx */
27539+#define DILUTE_CONST_SEQUENCE(_off, _key) \
27540+do { \
27541+ /* mov ecx, randkey */ \
27542+ EMIT1(0xb9); \
27543+ EMIT(_key, 4); \
27544+ /* xor ecx, randkey ^ off */ \
27545+ EMIT2(0x81, 0xf1); \
27546+ EMIT((_key) ^ (_off), 4); \
27547+} while (0)
27548+
27549+#define EMIT1_off32(b1, _off) \
27550+do { \
27551+ switch (b1) { \
27552+ case 0x05: /* add eax, imm32 */ \
27553+ case 0x2d: /* sub eax, imm32 */ \
27554+ case 0x25: /* and eax, imm32 */ \
27555+ case 0x0d: /* or eax, imm32 */ \
27556+ case 0xb8: /* mov eax, imm32 */ \
27557+ case 0x3d: /* cmp eax, imm32 */ \
27558+ case 0xa9: /* test eax, imm32 */ \
27559+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27560+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
27561+ break; \
27562+ case 0xbb: /* mov ebx, imm32 */ \
27563+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27564+ /* mov ebx, ecx */ \
27565+ EMIT2(0x89, 0xcb); \
27566+ break; \
27567+ case 0xbe: /* mov esi, imm32 */ \
27568+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27569+ /* mov esi, ecx */ \
27570+ EMIT2(0x89, 0xce); \
27571+ break; \
27572+ case 0xe9: /* jmp rel imm32 */ \
27573+ EMIT1(b1); \
27574+ EMIT(_off, 4); \
27575+ /* prevent fall-through, we're not called if off = 0 */ \
27576+ EMIT(0xcccccccc, 4); \
27577+ EMIT(0xcccccccc, 4); \
27578+ break; \
27579+ default: \
27580+ EMIT1(b1); \
27581+ EMIT(_off, 4); \
27582+ } \
27583+} while (0)
27584+
27585+#define EMIT2_off32(b1, b2, _off) \
27586+do { \
27587+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
27588+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
27589+ EMIT(randkey, 4); \
27590+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
27591+ EMIT((_off) - randkey, 4); \
27592+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
27593+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27594+ /* imul eax, ecx */ \
27595+ EMIT3(0x0f, 0xaf, 0xc1); \
27596+ } else { \
27597+ EMIT2(b1, b2); \
27598+ EMIT(_off, 4); \
27599+ } \
27600+} while (0)
27601+#else
27602 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
27603+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
27604+#endif
27605
27606 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
27607 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
27608@@ -89,6 +164,24 @@ do { \
27609 #define X86_JBE 0x76
27610 #define X86_JA 0x77
27611
27612+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27613+#define APPEND_FLOW_VERIFY() \
27614+do { \
27615+ /* mov ecx, randkey */ \
27616+ EMIT1(0xb9); \
27617+ EMIT(randkey, 4); \
27618+ /* cmp ecx, randkey */ \
27619+ EMIT2(0x81, 0xf9); \
27620+ EMIT(randkey, 4); \
27621+ /* jz after 8 int 3s */ \
27622+ EMIT2(0x74, 0x08); \
27623+ EMIT(0xcccccccc, 4); \
27624+ EMIT(0xcccccccc, 4); \
27625+} while (0)
27626+#else
27627+#define APPEND_FLOW_VERIFY() do { } while (0)
27628+#endif
27629+
27630 #define EMIT_COND_JMP(op, offset) \
27631 do { \
27632 if (is_near(offset)) \
27633@@ -96,6 +189,7 @@ do { \
27634 else { \
27635 EMIT2(0x0f, op + 0x10); \
27636 EMIT(offset, 4); /* jxx .+off32 */ \
27637+ APPEND_FLOW_VERIFY(); \
27638 } \
27639 } while (0)
27640
27641@@ -120,12 +214,17 @@ static inline void bpf_flush_icache(void *start, void *end)
27642 set_fs(old_fs);
27643 }
27644
27645+struct bpf_jit_work {
27646+ struct work_struct work;
27647+ void *image;
27648+};
27649+
27650 #define CHOOSE_LOAD_FUNC(K, func) \
27651 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
27652
27653 void bpf_jit_compile(struct sk_filter *fp)
27654 {
27655- u8 temp[64];
27656+ u8 temp[MAX_INSTR_CODE_SIZE];
27657 u8 *prog;
27658 unsigned int proglen, oldproglen = 0;
27659 int ilen, i;
27660@@ -138,6 +237,9 @@ void bpf_jit_compile(struct sk_filter *fp)
27661 unsigned int *addrs;
27662 const struct sock_filter *filter = fp->insns;
27663 int flen = fp->len;
27664+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27665+ unsigned int randkey;
27666+#endif
27667
27668 if (!bpf_jit_enable)
27669 return;
27670@@ -146,11 +248,19 @@ void bpf_jit_compile(struct sk_filter *fp)
27671 if (addrs == NULL)
27672 return;
27673
27674+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
27675+ if (!fp->work)
27676+ goto out;
27677+
27678+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27679+ randkey = get_random_int();
27680+#endif
27681+
27682 /* Before first pass, make a rough estimation of addrs[]
27683- * each bpf instruction is translated to less than 64 bytes
27684+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
27685 */
27686 for (proglen = 0, i = 0; i < flen; i++) {
27687- proglen += 64;
27688+ proglen += MAX_INSTR_CODE_SIZE;
27689 addrs[i] = proglen;
27690 }
27691 cleanup_addr = proglen; /* epilogue address */
27692@@ -258,10 +368,8 @@ void bpf_jit_compile(struct sk_filter *fp)
27693 case BPF_S_ALU_MUL_K: /* A *= K */
27694 if (is_imm8(K))
27695 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
27696- else {
27697- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
27698- EMIT(K, 4);
27699- }
27700+ else
27701+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
27702 break;
27703 case BPF_S_ALU_DIV_X: /* A /= X; */
27704 seen |= SEEN_XREG;
27705@@ -301,13 +409,23 @@ void bpf_jit_compile(struct sk_filter *fp)
27706 break;
27707 case BPF_S_ALU_MOD_K: /* A %= K; */
27708 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
27709+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27710+ DILUTE_CONST_SEQUENCE(K, randkey);
27711+#else
27712 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
27713+#endif
27714 EMIT2(0xf7, 0xf1); /* div %ecx */
27715 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
27716 break;
27717 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
27718+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27719+ DILUTE_CONST_SEQUENCE(K, randkey);
27720+ // imul rax, rcx
27721+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
27722+#else
27723 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
27724 EMIT(K, 4);
27725+#endif
27726 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
27727 break;
27728 case BPF_S_ALU_AND_X:
27729@@ -543,8 +661,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
27730 if (is_imm8(K)) {
27731 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
27732 } else {
27733- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
27734- EMIT(K, 4);
27735+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
27736 }
27737 } else {
27738 EMIT2(0x89,0xde); /* mov %ebx,%esi */
27739@@ -627,17 +744,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27740 break;
27741 default:
27742 /* hmm, too complex filter, give up with jit compiler */
27743- goto out;
27744+ goto error;
27745 }
27746 ilen = prog - temp;
27747 if (image) {
27748 if (unlikely(proglen + ilen > oldproglen)) {
27749 pr_err("bpb_jit_compile fatal error\n");
27750- kfree(addrs);
27751- module_free(NULL, image);
27752- return;
27753+ module_free_exec(NULL, image);
27754+ goto error;
27755 }
27756+ pax_open_kernel();
27757 memcpy(image + proglen, temp, ilen);
27758+ pax_close_kernel();
27759 }
27760 proglen += ilen;
27761 addrs[i] = proglen;
27762@@ -658,11 +776,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27763 break;
27764 }
27765 if (proglen == oldproglen) {
27766- image = module_alloc(max_t(unsigned int,
27767- proglen,
27768- sizeof(struct work_struct)));
27769+ image = module_alloc_exec(proglen);
27770 if (!image)
27771- goto out;
27772+ goto error;
27773 }
27774 oldproglen = proglen;
27775 }
27776@@ -678,7 +794,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27777 bpf_flush_icache(image, image + proglen);
27778
27779 fp->bpf_func = (void *)image;
27780- }
27781+ } else
27782+error:
27783+ kfree(fp->work);
27784+
27785 out:
27786 kfree(addrs);
27787 return;
27788@@ -686,18 +805,20 @@ out:
27789
27790 static void jit_free_defer(struct work_struct *arg)
27791 {
27792- module_free(NULL, arg);
27793+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
27794+ kfree(arg);
27795 }
27796
27797 /* run from softirq, we must use a work_struct to call
27798- * module_free() from process context
27799+ * module_free_exec() from process context
27800 */
27801 void bpf_jit_free(struct sk_filter *fp)
27802 {
27803 if (fp->bpf_func != sk_run_filter) {
27804- struct work_struct *work = (struct work_struct *)fp->bpf_func;
27805+ struct work_struct *work = &fp->work->work;
27806
27807 INIT_WORK(work, jit_free_defer);
27808+ fp->work->image = fp->bpf_func;
27809 schedule_work(work);
27810 }
27811 }
27812diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
27813index d6aa6e8..266395a 100644
27814--- a/arch/x86/oprofile/backtrace.c
27815+++ b/arch/x86/oprofile/backtrace.c
27816@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
27817 struct stack_frame_ia32 *fp;
27818 unsigned long bytes;
27819
27820- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
27821+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
27822 if (bytes != sizeof(bufhead))
27823 return NULL;
27824
27825- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
27826+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
27827
27828 oprofile_add_trace(bufhead[0].return_address);
27829
27830@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
27831 struct stack_frame bufhead[2];
27832 unsigned long bytes;
27833
27834- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
27835+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
27836 if (bytes != sizeof(bufhead))
27837 return NULL;
27838
27839@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
27840 {
27841 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
27842
27843- if (!user_mode_vm(regs)) {
27844+ if (!user_mode(regs)) {
27845 unsigned long stack = kernel_stack_pointer(regs);
27846 if (depth)
27847 dump_trace(NULL, regs, (unsigned long *)stack, 0,
27848diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
27849index 192397c..5ba6f9e 100644
27850--- a/arch/x86/pci/acpi.c
27851+++ b/arch/x86/pci/acpi.c
27852@@ -568,7 +568,7 @@ int __init pci_acpi_init(void)
27853 acpi_irq_penalty_init();
27854 pcibios_enable_irq = acpi_pci_irq_enable;
27855 pcibios_disable_irq = acpi_pci_irq_disable;
27856- x86_init.pci.init_irq = x86_init_noop;
27857+ *(void **)&x86_init.pci.init_irq = x86_init_noop;
27858
27859 if (pci_routeirq) {
27860 /*
27861diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
27862index e14a2ff..3fd6b58 100644
27863--- a/arch/x86/pci/mrst.c
27864+++ b/arch/x86/pci/mrst.c
27865@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
27866 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
27867 pci_mmcfg_late_init();
27868 pcibios_enable_irq = mrst_pci_irq_enable;
27869- pci_root_ops = pci_mrst_ops;
27870+ pax_open_kernel();
27871+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
27872+ pax_close_kernel();
27873 pci_soc_mode = 1;
27874 /* Continue with standard init */
27875 return 1;
27876diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
27877index da8fe05..7ee6704 100644
27878--- a/arch/x86/pci/pcbios.c
27879+++ b/arch/x86/pci/pcbios.c
27880@@ -79,50 +79,93 @@ union bios32 {
27881 static struct {
27882 unsigned long address;
27883 unsigned short segment;
27884-} bios32_indirect = { 0, __KERNEL_CS };
27885+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
27886
27887 /*
27888 * Returns the entry point for the given service, NULL on error
27889 */
27890
27891-static unsigned long bios32_service(unsigned long service)
27892+static unsigned long __devinit bios32_service(unsigned long service)
27893 {
27894 unsigned char return_code; /* %al */
27895 unsigned long address; /* %ebx */
27896 unsigned long length; /* %ecx */
27897 unsigned long entry; /* %edx */
27898 unsigned long flags;
27899+ struct desc_struct d, *gdt;
27900
27901 local_irq_save(flags);
27902- __asm__("lcall *(%%edi); cld"
27903+
27904+ gdt = get_cpu_gdt_table(smp_processor_id());
27905+
27906+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
27907+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27908+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
27909+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27910+
27911+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
27912 : "=a" (return_code),
27913 "=b" (address),
27914 "=c" (length),
27915 "=d" (entry)
27916 : "0" (service),
27917 "1" (0),
27918- "D" (&bios32_indirect));
27919+ "D" (&bios32_indirect),
27920+ "r"(__PCIBIOS_DS)
27921+ : "memory");
27922+
27923+ pax_open_kernel();
27924+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
27925+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
27926+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
27927+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
27928+ pax_close_kernel();
27929+
27930 local_irq_restore(flags);
27931
27932 switch (return_code) {
27933- case 0:
27934- return address + entry;
27935- case 0x80: /* Not present */
27936- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27937- return 0;
27938- default: /* Shouldn't happen */
27939- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27940- service, return_code);
27941+ case 0: {
27942+ int cpu;
27943+ unsigned char flags;
27944+
27945+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
27946+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
27947+ printk(KERN_WARNING "bios32_service: not valid\n");
27948 return 0;
27949+ }
27950+ address = address + PAGE_OFFSET;
27951+ length += 16UL; /* some BIOSs underreport this... */
27952+ flags = 4;
27953+ if (length >= 64*1024*1024) {
27954+ length >>= PAGE_SHIFT;
27955+ flags |= 8;
27956+ }
27957+
27958+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27959+ gdt = get_cpu_gdt_table(cpu);
27960+ pack_descriptor(&d, address, length, 0x9b, flags);
27961+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27962+ pack_descriptor(&d, address, length, 0x93, flags);
27963+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27964+ }
27965+ return entry;
27966+ }
27967+ case 0x80: /* Not present */
27968+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27969+ return 0;
27970+ default: /* Shouldn't happen */
27971+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27972+ service, return_code);
27973+ return 0;
27974 }
27975 }
27976
27977 static struct {
27978 unsigned long address;
27979 unsigned short segment;
27980-} pci_indirect = { 0, __KERNEL_CS };
27981+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
27982
27983-static int pci_bios_present;
27984+static int pci_bios_present __read_only;
27985
27986 static int __devinit check_pcibios(void)
27987 {
27988@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
27989 unsigned long flags, pcibios_entry;
27990
27991 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
27992- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
27993+ pci_indirect.address = pcibios_entry;
27994
27995 local_irq_save(flags);
27996- __asm__(
27997- "lcall *(%%edi); cld\n\t"
27998+ __asm__("movw %w6, %%ds\n\t"
27999+ "lcall *%%ss:(%%edi); cld\n\t"
28000+ "push %%ss\n\t"
28001+ "pop %%ds\n\t"
28002 "jc 1f\n\t"
28003 "xor %%ah, %%ah\n"
28004 "1:"
28005@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
28006 "=b" (ebx),
28007 "=c" (ecx)
28008 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
28009- "D" (&pci_indirect)
28010+ "D" (&pci_indirect),
28011+ "r" (__PCIBIOS_DS)
28012 : "memory");
28013 local_irq_restore(flags);
28014
28015@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28016
28017 switch (len) {
28018 case 1:
28019- __asm__("lcall *(%%esi); cld\n\t"
28020+ __asm__("movw %w6, %%ds\n\t"
28021+ "lcall *%%ss:(%%esi); cld\n\t"
28022+ "push %%ss\n\t"
28023+ "pop %%ds\n\t"
28024 "jc 1f\n\t"
28025 "xor %%ah, %%ah\n"
28026 "1:"
28027@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28028 : "1" (PCIBIOS_READ_CONFIG_BYTE),
28029 "b" (bx),
28030 "D" ((long)reg),
28031- "S" (&pci_indirect));
28032+ "S" (&pci_indirect),
28033+ "r" (__PCIBIOS_DS));
28034 /*
28035 * Zero-extend the result beyond 8 bits, do not trust the
28036 * BIOS having done it:
28037@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28038 *value &= 0xff;
28039 break;
28040 case 2:
28041- __asm__("lcall *(%%esi); cld\n\t"
28042+ __asm__("movw %w6, %%ds\n\t"
28043+ "lcall *%%ss:(%%esi); cld\n\t"
28044+ "push %%ss\n\t"
28045+ "pop %%ds\n\t"
28046 "jc 1f\n\t"
28047 "xor %%ah, %%ah\n"
28048 "1:"
28049@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28050 : "1" (PCIBIOS_READ_CONFIG_WORD),
28051 "b" (bx),
28052 "D" ((long)reg),
28053- "S" (&pci_indirect));
28054+ "S" (&pci_indirect),
28055+ "r" (__PCIBIOS_DS));
28056 /*
28057 * Zero-extend the result beyond 16 bits, do not trust the
28058 * BIOS having done it:
28059@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28060 *value &= 0xffff;
28061 break;
28062 case 4:
28063- __asm__("lcall *(%%esi); cld\n\t"
28064+ __asm__("movw %w6, %%ds\n\t"
28065+ "lcall *%%ss:(%%esi); cld\n\t"
28066+ "push %%ss\n\t"
28067+ "pop %%ds\n\t"
28068 "jc 1f\n\t"
28069 "xor %%ah, %%ah\n"
28070 "1:"
28071@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28072 : "1" (PCIBIOS_READ_CONFIG_DWORD),
28073 "b" (bx),
28074 "D" ((long)reg),
28075- "S" (&pci_indirect));
28076+ "S" (&pci_indirect),
28077+ "r" (__PCIBIOS_DS));
28078 break;
28079 }
28080
28081@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28082
28083 switch (len) {
28084 case 1:
28085- __asm__("lcall *(%%esi); cld\n\t"
28086+ __asm__("movw %w6, %%ds\n\t"
28087+ "lcall *%%ss:(%%esi); cld\n\t"
28088+ "push %%ss\n\t"
28089+ "pop %%ds\n\t"
28090 "jc 1f\n\t"
28091 "xor %%ah, %%ah\n"
28092 "1:"
28093@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28094 "c" (value),
28095 "b" (bx),
28096 "D" ((long)reg),
28097- "S" (&pci_indirect));
28098+ "S" (&pci_indirect),
28099+ "r" (__PCIBIOS_DS));
28100 break;
28101 case 2:
28102- __asm__("lcall *(%%esi); cld\n\t"
28103+ __asm__("movw %w6, %%ds\n\t"
28104+ "lcall *%%ss:(%%esi); cld\n\t"
28105+ "push %%ss\n\t"
28106+ "pop %%ds\n\t"
28107 "jc 1f\n\t"
28108 "xor %%ah, %%ah\n"
28109 "1:"
28110@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28111 "c" (value),
28112 "b" (bx),
28113 "D" ((long)reg),
28114- "S" (&pci_indirect));
28115+ "S" (&pci_indirect),
28116+ "r" (__PCIBIOS_DS));
28117 break;
28118 case 4:
28119- __asm__("lcall *(%%esi); cld\n\t"
28120+ __asm__("movw %w6, %%ds\n\t"
28121+ "lcall *%%ss:(%%esi); cld\n\t"
28122+ "push %%ss\n\t"
28123+ "pop %%ds\n\t"
28124 "jc 1f\n\t"
28125 "xor %%ah, %%ah\n"
28126 "1:"
28127@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28128 "c" (value),
28129 "b" (bx),
28130 "D" ((long)reg),
28131- "S" (&pci_indirect));
28132+ "S" (&pci_indirect),
28133+ "r" (__PCIBIOS_DS));
28134 break;
28135 }
28136
28137@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28138
28139 DBG("PCI: Fetching IRQ routing table... ");
28140 __asm__("push %%es\n\t"
28141+ "movw %w8, %%ds\n\t"
28142 "push %%ds\n\t"
28143 "pop %%es\n\t"
28144- "lcall *(%%esi); cld\n\t"
28145+ "lcall *%%ss:(%%esi); cld\n\t"
28146 "pop %%es\n\t"
28147+ "push %%ss\n\t"
28148+ "pop %%ds\n"
28149 "jc 1f\n\t"
28150 "xor %%ah, %%ah\n"
28151 "1:"
28152@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28153 "1" (0),
28154 "D" ((long) &opt),
28155 "S" (&pci_indirect),
28156- "m" (opt)
28157+ "m" (opt),
28158+ "r" (__PCIBIOS_DS)
28159 : "memory");
28160 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
28161 if (ret & 0xff00)
28162@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28163 {
28164 int ret;
28165
28166- __asm__("lcall *(%%esi); cld\n\t"
28167+ __asm__("movw %w5, %%ds\n\t"
28168+ "lcall *%%ss:(%%esi); cld\n\t"
28169+ "push %%ss\n\t"
28170+ "pop %%ds\n"
28171 "jc 1f\n\t"
28172 "xor %%ah, %%ah\n"
28173 "1:"
28174@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28175 : "0" (PCIBIOS_SET_PCI_HW_INT),
28176 "b" ((dev->bus->number << 8) | dev->devfn),
28177 "c" ((irq << 8) | (pin + 10)),
28178- "S" (&pci_indirect));
28179+ "S" (&pci_indirect),
28180+ "r" (__PCIBIOS_DS));
28181 return !(ret & 0xff00);
28182 }
28183 EXPORT_SYMBOL(pcibios_set_irq_routing);
28184diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
28185index 56ab749..3cb792a 100644
28186--- a/arch/x86/pci/xen.c
28187+++ b/arch/x86/pci/xen.c
28188@@ -395,9 +395,9 @@ int __init pci_xen_init(void)
28189 #endif
28190
28191 #ifdef CONFIG_PCI_MSI
28192- x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
28193- x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28194- x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
28195+ *(void **)&x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
28196+ *(void **)&x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28197+ *(void **)&x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
28198 #endif
28199 return 0;
28200 }
28201@@ -416,8 +416,8 @@ int __init pci_xen_hvm_init(void)
28202 #endif
28203
28204 #ifdef CONFIG_PCI_MSI
28205- x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
28206- x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28207+ *(void **)&x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
28208+ *(void **)&x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28209 #endif
28210 return 0;
28211 }
28212@@ -474,9 +474,9 @@ int __init pci_xen_initial_domain(void)
28213 int irq;
28214
28215 #ifdef CONFIG_PCI_MSI
28216- x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
28217- x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28218- x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
28219+ *(void **)&x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
28220+ *(void **)&x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
28221+ *(void **)&x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
28222 #endif
28223 xen_setup_acpi_sci();
28224 __acpi_register_gsi = acpi_register_gsi_xen;
28225diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
28226index ad44391..acef4b5 100644
28227--- a/arch/x86/platform/efi/efi.c
28228+++ b/arch/x86/platform/efi/efi.c
28229@@ -738,8 +738,8 @@ void __init efi_init(void)
28230 }
28231 #ifdef CONFIG_X86_32
28232 if (efi_is_native()) {
28233- x86_platform.get_wallclock = efi_get_time;
28234- x86_platform.set_wallclock = efi_set_rtc_mmss;
28235+ *(void **)&x86_platform.get_wallclock = efi_get_time;
28236+ *(void **)&x86_platform.set_wallclock = efi_set_rtc_mmss;
28237 }
28238 #endif
28239
28240diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
28241index 40e4469..1ab536e 100644
28242--- a/arch/x86/platform/efi/efi_32.c
28243+++ b/arch/x86/platform/efi/efi_32.c
28244@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
28245 {
28246 struct desc_ptr gdt_descr;
28247
28248+#ifdef CONFIG_PAX_KERNEXEC
28249+ struct desc_struct d;
28250+#endif
28251+
28252 local_irq_save(efi_rt_eflags);
28253
28254 load_cr3(initial_page_table);
28255 __flush_tlb_all();
28256
28257+#ifdef CONFIG_PAX_KERNEXEC
28258+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
28259+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
28260+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
28261+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
28262+#endif
28263+
28264 gdt_descr.address = __pa(get_cpu_gdt_table(0));
28265 gdt_descr.size = GDT_SIZE - 1;
28266 load_gdt(&gdt_descr);
28267@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
28268 {
28269 struct desc_ptr gdt_descr;
28270
28271+#ifdef CONFIG_PAX_KERNEXEC
28272+ struct desc_struct d;
28273+
28274+ memset(&d, 0, sizeof d);
28275+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
28276+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
28277+#endif
28278+
28279 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
28280 gdt_descr.size = GDT_SIZE - 1;
28281 load_gdt(&gdt_descr);
28282diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
28283index fbe66e6..eae5e38 100644
28284--- a/arch/x86/platform/efi/efi_stub_32.S
28285+++ b/arch/x86/platform/efi/efi_stub_32.S
28286@@ -6,7 +6,9 @@
28287 */
28288
28289 #include <linux/linkage.h>
28290+#include <linux/init.h>
28291 #include <asm/page_types.h>
28292+#include <asm/segment.h>
28293
28294 /*
28295 * efi_call_phys(void *, ...) is a function with variable parameters.
28296@@ -20,7 +22,7 @@
28297 * service functions will comply with gcc calling convention, too.
28298 */
28299
28300-.text
28301+__INIT
28302 ENTRY(efi_call_phys)
28303 /*
28304 * 0. The function can only be called in Linux kernel. So CS has been
28305@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
28306 * The mapping of lower virtual memory has been created in prelog and
28307 * epilog.
28308 */
28309- movl $1f, %edx
28310- subl $__PAGE_OFFSET, %edx
28311- jmp *%edx
28312+#ifdef CONFIG_PAX_KERNEXEC
28313+ movl $(__KERNEXEC_EFI_DS), %edx
28314+ mov %edx, %ds
28315+ mov %edx, %es
28316+ mov %edx, %ss
28317+ addl $2f,(1f)
28318+ ljmp *(1f)
28319+
28320+__INITDATA
28321+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
28322+.previous
28323+
28324+2:
28325+ subl $2b,(1b)
28326+#else
28327+ jmp 1f-__PAGE_OFFSET
28328 1:
28329+#endif
28330
28331 /*
28332 * 2. Now on the top of stack is the return
28333@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
28334 * parameter 2, ..., param n. To make things easy, we save the return
28335 * address of efi_call_phys in a global variable.
28336 */
28337- popl %edx
28338- movl %edx, saved_return_addr
28339- /* get the function pointer into ECX*/
28340- popl %ecx
28341- movl %ecx, efi_rt_function_ptr
28342- movl $2f, %edx
28343- subl $__PAGE_OFFSET, %edx
28344- pushl %edx
28345+ popl (saved_return_addr)
28346+ popl (efi_rt_function_ptr)
28347
28348 /*
28349 * 3. Clear PG bit in %CR0.
28350@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
28351 /*
28352 * 5. Call the physical function.
28353 */
28354- jmp *%ecx
28355+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
28356
28357-2:
28358 /*
28359 * 6. After EFI runtime service returns, control will return to
28360 * following instruction. We'd better readjust stack pointer first.
28361@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
28362 movl %cr0, %edx
28363 orl $0x80000000, %edx
28364 movl %edx, %cr0
28365- jmp 1f
28366-1:
28367+
28368 /*
28369 * 8. Now restore the virtual mode from flat mode by
28370 * adding EIP with PAGE_OFFSET.
28371 */
28372- movl $1f, %edx
28373- jmp *%edx
28374+#ifdef CONFIG_PAX_KERNEXEC
28375+ movl $(__KERNEL_DS), %edx
28376+ mov %edx, %ds
28377+ mov %edx, %es
28378+ mov %edx, %ss
28379+ ljmp $(__KERNEL_CS),$1f
28380+#else
28381+ jmp 1f+__PAGE_OFFSET
28382+#endif
28383 1:
28384
28385 /*
28386 * 9. Balance the stack. And because EAX contain the return value,
28387 * we'd better not clobber it.
28388 */
28389- leal efi_rt_function_ptr, %edx
28390- movl (%edx), %ecx
28391- pushl %ecx
28392+ pushl (efi_rt_function_ptr)
28393
28394 /*
28395- * 10. Push the saved return address onto the stack and return.
28396+ * 10. Return to the saved return address.
28397 */
28398- leal saved_return_addr, %edx
28399- movl (%edx), %ecx
28400- pushl %ecx
28401- ret
28402+ jmpl *(saved_return_addr)
28403 ENDPROC(efi_call_phys)
28404 .previous
28405
28406-.data
28407+__INITDATA
28408 saved_return_addr:
28409 .long 0
28410 efi_rt_function_ptr:
28411diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
28412index 4c07cca..2c8427d 100644
28413--- a/arch/x86/platform/efi/efi_stub_64.S
28414+++ b/arch/x86/platform/efi/efi_stub_64.S
28415@@ -7,6 +7,7 @@
28416 */
28417
28418 #include <linux/linkage.h>
28419+#include <asm/alternative-asm.h>
28420
28421 #define SAVE_XMM \
28422 mov %rsp, %rax; \
28423@@ -40,6 +41,7 @@ ENTRY(efi_call0)
28424 call *%rdi
28425 addq $32, %rsp
28426 RESTORE_XMM
28427+ pax_force_retaddr 0, 1
28428 ret
28429 ENDPROC(efi_call0)
28430
28431@@ -50,6 +52,7 @@ ENTRY(efi_call1)
28432 call *%rdi
28433 addq $32, %rsp
28434 RESTORE_XMM
28435+ pax_force_retaddr 0, 1
28436 ret
28437 ENDPROC(efi_call1)
28438
28439@@ -60,6 +63,7 @@ ENTRY(efi_call2)
28440 call *%rdi
28441 addq $32, %rsp
28442 RESTORE_XMM
28443+ pax_force_retaddr 0, 1
28444 ret
28445 ENDPROC(efi_call2)
28446
28447@@ -71,6 +75,7 @@ ENTRY(efi_call3)
28448 call *%rdi
28449 addq $32, %rsp
28450 RESTORE_XMM
28451+ pax_force_retaddr 0, 1
28452 ret
28453 ENDPROC(efi_call3)
28454
28455@@ -83,6 +88,7 @@ ENTRY(efi_call4)
28456 call *%rdi
28457 addq $32, %rsp
28458 RESTORE_XMM
28459+ pax_force_retaddr 0, 1
28460 ret
28461 ENDPROC(efi_call4)
28462
28463@@ -96,6 +102,7 @@ ENTRY(efi_call5)
28464 call *%rdi
28465 addq $48, %rsp
28466 RESTORE_XMM
28467+ pax_force_retaddr 0, 1
28468 ret
28469 ENDPROC(efi_call5)
28470
28471@@ -112,5 +119,6 @@ ENTRY(efi_call6)
28472 call *%rdi
28473 addq $48, %rsp
28474 RESTORE_XMM
28475+ pax_force_retaddr 0, 1
28476 ret
28477 ENDPROC(efi_call6)
28478diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
28479index fd41a92..bc8091d 100644
28480--- a/arch/x86/platform/mrst/mrst.c
28481+++ b/arch/x86/platform/mrst/mrst.c
28482@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
28483 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
28484 int sfi_mrtc_num;
28485
28486-static void mrst_power_off(void)
28487+static __noreturn void mrst_power_off(void)
28488 {
28489+ BUG();
28490 }
28491
28492-static void mrst_reboot(void)
28493+static __noreturn void mrst_reboot(void)
28494 {
28495 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
28496+ BUG();
28497 }
28498
28499 /* parse all the mtimer info to a static mtimer array */
28500@@ -233,14 +235,14 @@ static void __init mrst_time_init(void)
28501 case MRST_TIMER_APBT_ONLY:
28502 break;
28503 case MRST_TIMER_LAPIC_APBT:
28504- x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
28505- x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
28506+ *(void **)&x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
28507+ *(void **)&x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
28508 break;
28509 default:
28510 if (!boot_cpu_has(X86_FEATURE_ARAT))
28511 break;
28512- x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
28513- x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
28514+ *(void **)&x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
28515+ *(void **)&x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
28516 return;
28517 }
28518 /* we need at least one APB timer */
28519@@ -282,35 +284,35 @@ static unsigned char mrst_get_nmi_reason(void)
28520 */
28521 void __init x86_mrst_early_setup(void)
28522 {
28523- x86_init.resources.probe_roms = x86_init_noop;
28524- x86_init.resources.reserve_resources = x86_init_noop;
28525+ *(void **)&x86_init.resources.probe_roms = x86_init_noop;
28526+ *(void **)&x86_init.resources.reserve_resources = x86_init_noop;
28527
28528- x86_init.timers.timer_init = mrst_time_init;
28529- x86_init.timers.setup_percpu_clockev = x86_init_noop;
28530+ *(void **)&x86_init.timers.timer_init = mrst_time_init;
28531+ *(void **)&x86_init.timers.setup_percpu_clockev = x86_init_noop;
28532
28533- x86_init.irqs.pre_vector_init = x86_init_noop;
28534+ *(void **)&x86_init.irqs.pre_vector_init = x86_init_noop;
28535
28536- x86_init.oem.arch_setup = mrst_arch_setup;
28537+ *(void **)&x86_init.oem.arch_setup = mrst_arch_setup;
28538
28539- x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
28540+ *(void **)&x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
28541
28542- x86_platform.calibrate_tsc = mrst_calibrate_tsc;
28543- x86_platform.i8042_detect = mrst_i8042_detect;
28544- x86_init.timers.wallclock_init = mrst_rtc_init;
28545- x86_platform.get_nmi_reason = mrst_get_nmi_reason;
28546+ *(void **)&x86_platform.calibrate_tsc = mrst_calibrate_tsc;
28547+ *(void **)&x86_platform.i8042_detect = mrst_i8042_detect;
28548+ *(void **)&x86_init.timers.wallclock_init = mrst_rtc_init;
28549+ *(void **)&x86_platform.get_nmi_reason = mrst_get_nmi_reason;
28550
28551- x86_init.pci.init = pci_mrst_init;
28552- x86_init.pci.fixup_irqs = x86_init_noop;
28553+ *(void **)&x86_init.pci.init = pci_mrst_init;
28554+ *(void **)&x86_init.pci.fixup_irqs = x86_init_noop;
28555
28556 legacy_pic = &null_legacy_pic;
28557
28558 /* Moorestown specific power_off/restart method */
28559 pm_power_off = mrst_power_off;
28560- machine_ops.emergency_restart = mrst_reboot;
28561+ *(void **)&machine_ops.emergency_restart = mrst_reboot;
28562
28563 /* Avoid searching for BIOS MP tables */
28564- x86_init.mpparse.find_smp_config = x86_init_noop;
28565- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
28566+ *(void **)&x86_init.mpparse.find_smp_config = x86_init_noop;
28567+ *(void **)&x86_init.mpparse.get_smp_config = x86_init_uint_noop;
28568 set_bit(MP_BUS_ISA, mp_bus_not_pci);
28569 }
28570
28571diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
28572index 225bd0f..22e8086 100644
28573--- a/arch/x86/platform/mrst/vrtc.c
28574+++ b/arch/x86/platform/mrst/vrtc.c
28575@@ -120,8 +120,8 @@ void __init mrst_rtc_init(void)
28576
28577 vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC,
28578 vrtc_paddr);
28579- x86_platform.get_wallclock = vrtc_get_time;
28580- x86_platform.set_wallclock = vrtc_set_mmss;
28581+ *(void **)&x86_platform.get_wallclock = vrtc_get_time;
28582+ *(void **)&x86_platform.set_wallclock = vrtc_set_mmss;
28583 }
28584
28585 /*
28586diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
28587index 2737608..0d62cc2 100644
28588--- a/arch/x86/platform/olpc/olpc.c
28589+++ b/arch/x86/platform/olpc/olpc.c
28590@@ -395,7 +395,7 @@ static int __init olpc_init(void)
28591 * XO-1 only. */
28592 if (olpc_platform_info.boardrev < olpc_board_pre(0xd0) &&
28593 !cs5535_has_vsa2())
28594- x86_init.pci.arch_init = pci_olpc_init;
28595+ *(void **)&x86_init.pci.arch_init = pci_olpc_init;
28596 #endif
28597
28598 if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */
28599diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
28600index d6ee929..3637cb5 100644
28601--- a/arch/x86/platform/olpc/olpc_dt.c
28602+++ b/arch/x86/platform/olpc/olpc_dt.c
28603@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
28604 return res;
28605 }
28606
28607-static struct of_pdt_ops prom_olpc_ops __initdata = {
28608+static struct of_pdt_ops prom_olpc_ops __initconst = {
28609 .nextprop = olpc_dt_nextprop,
28610 .getproplen = olpc_dt_getproplen,
28611 .getproperty = olpc_dt_getproperty,
28612diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
28613index 218cdb1..c1178eb 100644
28614--- a/arch/x86/power/cpu.c
28615+++ b/arch/x86/power/cpu.c
28616@@ -132,7 +132,7 @@ static void do_fpu_end(void)
28617 static void fix_processor_context(void)
28618 {
28619 int cpu = smp_processor_id();
28620- struct tss_struct *t = &per_cpu(init_tss, cpu);
28621+ struct tss_struct *t = init_tss + cpu;
28622
28623 set_tss_desc(cpu, t); /*
28624 * This just modifies memory; should not be
28625@@ -142,8 +142,6 @@ static void fix_processor_context(void)
28626 */
28627
28628 #ifdef CONFIG_X86_64
28629- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
28630-
28631 syscall_init(); /* This sets MSR_*STAR and related */
28632 #endif
28633 load_TR_desc(); /* This does ltr */
28634diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
28635index cbca565..bae7133 100644
28636--- a/arch/x86/realmode/init.c
28637+++ b/arch/x86/realmode/init.c
28638@@ -62,7 +62,13 @@ void __init setup_real_mode(void)
28639 __va(real_mode_header->trampoline_header);
28640
28641 #ifdef CONFIG_X86_32
28642- trampoline_header->start = __pa(startup_32_smp);
28643+ trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
28644+
28645+#ifdef CONFIG_PAX_KERNEXEC
28646+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
28647+#endif
28648+
28649+ trampoline_header->boot_cs = __BOOT_CS;
28650 trampoline_header->gdt_limit = __BOOT_DS + 7;
28651 trampoline_header->gdt_base = __pa(boot_gdt);
28652 #else
28653diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
28654index 8869287..d577672 100644
28655--- a/arch/x86/realmode/rm/Makefile
28656+++ b/arch/x86/realmode/rm/Makefile
28657@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
28658 $(call cc-option, -fno-unit-at-a-time)) \
28659 $(call cc-option, -fno-stack-protector) \
28660 $(call cc-option, -mpreferred-stack-boundary=2)
28661+ifdef CONSTIFY_PLUGIN
28662+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
28663+endif
28664 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
28665 GCOV_PROFILE := n
28666diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
28667index a28221d..93c40f1 100644
28668--- a/arch/x86/realmode/rm/header.S
28669+++ b/arch/x86/realmode/rm/header.S
28670@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
28671 #endif
28672 /* APM/BIOS reboot */
28673 .long pa_machine_real_restart_asm
28674-#ifdef CONFIG_X86_64
28675+#ifdef CONFIG_X86_32
28676+ .long __KERNEL_CS
28677+#else
28678 .long __KERNEL32_CS
28679 #endif
28680 END(real_mode_header)
28681diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
28682index c1b2791..f9e31c7 100644
28683--- a/arch/x86/realmode/rm/trampoline_32.S
28684+++ b/arch/x86/realmode/rm/trampoline_32.S
28685@@ -25,6 +25,12 @@
28686 #include <asm/page_types.h>
28687 #include "realmode.h"
28688
28689+#ifdef CONFIG_PAX_KERNEXEC
28690+#define ta(X) (X)
28691+#else
28692+#define ta(X) (pa_ ## X)
28693+#endif
28694+
28695 .text
28696 .code16
28697
28698@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
28699
28700 cli # We should be safe anyway
28701
28702- movl tr_start, %eax # where we need to go
28703-
28704 movl $0xA5A5A5A5, trampoline_status
28705 # write marker for master knows we're running
28706
28707@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
28708 movw $1, %dx # protected mode (PE) bit
28709 lmsw %dx # into protected mode
28710
28711- ljmpl $__BOOT_CS, $pa_startup_32
28712+ ljmpl *(trampoline_header)
28713
28714 .section ".text32","ax"
28715 .code32
28716@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
28717 .balign 8
28718 GLOBAL(trampoline_header)
28719 tr_start: .space 4
28720- tr_gdt_pad: .space 2
28721+ tr_boot_cs: .space 2
28722 tr_gdt: .space 6
28723 END(trampoline_header)
28724
28725diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
28726index bb360dc..3e5945f 100644
28727--- a/arch/x86/realmode/rm/trampoline_64.S
28728+++ b/arch/x86/realmode/rm/trampoline_64.S
28729@@ -107,7 +107,7 @@ ENTRY(startup_32)
28730 wrmsr
28731
28732 # Enable paging and in turn activate Long Mode
28733- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
28734+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
28735 movl %eax, %cr0
28736
28737 /*
28738diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
28739index 5a1847d..deccb30 100644
28740--- a/arch/x86/tools/relocs.c
28741+++ b/arch/x86/tools/relocs.c
28742@@ -12,10 +12,13 @@
28743 #include <regex.h>
28744 #include <tools/le_byteshift.h>
28745
28746+#include "../../../include/generated/autoconf.h"
28747+
28748 static void die(char *fmt, ...);
28749
28750 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
28751 static Elf32_Ehdr ehdr;
28752+static Elf32_Phdr *phdr;
28753 static unsigned long reloc_count, reloc_idx;
28754 static unsigned long *relocs;
28755 static unsigned long reloc16_count, reloc16_idx;
28756@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
28757 }
28758 }
28759
28760+static void read_phdrs(FILE *fp)
28761+{
28762+ unsigned int i;
28763+
28764+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
28765+ if (!phdr) {
28766+ die("Unable to allocate %d program headers\n",
28767+ ehdr.e_phnum);
28768+ }
28769+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
28770+ die("Seek to %d failed: %s\n",
28771+ ehdr.e_phoff, strerror(errno));
28772+ }
28773+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
28774+ die("Cannot read ELF program headers: %s\n",
28775+ strerror(errno));
28776+ }
28777+ for(i = 0; i < ehdr.e_phnum; i++) {
28778+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
28779+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
28780+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
28781+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
28782+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
28783+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
28784+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
28785+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
28786+ }
28787+
28788+}
28789+
28790 static void read_shdrs(FILE *fp)
28791 {
28792- int i;
28793+ unsigned int i;
28794 Elf32_Shdr shdr;
28795
28796 secs = calloc(ehdr.e_shnum, sizeof(struct section));
28797@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
28798
28799 static void read_strtabs(FILE *fp)
28800 {
28801- int i;
28802+ unsigned int i;
28803 for (i = 0; i < ehdr.e_shnum; i++) {
28804 struct section *sec = &secs[i];
28805 if (sec->shdr.sh_type != SHT_STRTAB) {
28806@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
28807
28808 static void read_symtabs(FILE *fp)
28809 {
28810- int i,j;
28811+ unsigned int i,j;
28812 for (i = 0; i < ehdr.e_shnum; i++) {
28813 struct section *sec = &secs[i];
28814 if (sec->shdr.sh_type != SHT_SYMTAB) {
28815@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
28816 }
28817
28818
28819-static void read_relocs(FILE *fp)
28820+static void read_relocs(FILE *fp, int use_real_mode)
28821 {
28822- int i,j;
28823+ unsigned int i,j;
28824+ uint32_t base;
28825+
28826 for (i = 0; i < ehdr.e_shnum; i++) {
28827 struct section *sec = &secs[i];
28828 if (sec->shdr.sh_type != SHT_REL) {
28829@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
28830 die("Cannot read symbol table: %s\n",
28831 strerror(errno));
28832 }
28833+ base = 0;
28834+
28835+#ifdef CONFIG_X86_32
28836+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
28837+ if (phdr[j].p_type != PT_LOAD )
28838+ continue;
28839+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
28840+ continue;
28841+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
28842+ break;
28843+ }
28844+#endif
28845+
28846 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
28847 Elf32_Rel *rel = &sec->reltab[j];
28848- rel->r_offset = elf32_to_cpu(rel->r_offset);
28849+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
28850 rel->r_info = elf32_to_cpu(rel->r_info);
28851 }
28852 }
28853@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
28854
28855 static void print_absolute_symbols(void)
28856 {
28857- int i;
28858+ unsigned int i;
28859 printf("Absolute symbols\n");
28860 printf(" Num: Value Size Type Bind Visibility Name\n");
28861 for (i = 0; i < ehdr.e_shnum; i++) {
28862 struct section *sec = &secs[i];
28863 char *sym_strtab;
28864- int j;
28865+ unsigned int j;
28866
28867 if (sec->shdr.sh_type != SHT_SYMTAB) {
28868 continue;
28869@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
28870
28871 static void print_absolute_relocs(void)
28872 {
28873- int i, printed = 0;
28874+ unsigned int i, printed = 0;
28875
28876 for (i = 0; i < ehdr.e_shnum; i++) {
28877 struct section *sec = &secs[i];
28878 struct section *sec_applies, *sec_symtab;
28879 char *sym_strtab;
28880 Elf32_Sym *sh_symtab;
28881- int j;
28882+ unsigned int j;
28883 if (sec->shdr.sh_type != SHT_REL) {
28884 continue;
28885 }
28886@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
28887 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
28888 int use_real_mode)
28889 {
28890- int i;
28891+ unsigned int i;
28892 /* Walk through the relocations */
28893 for (i = 0; i < ehdr.e_shnum; i++) {
28894 char *sym_strtab;
28895 Elf32_Sym *sh_symtab;
28896 struct section *sec_applies, *sec_symtab;
28897- int j;
28898+ unsigned int j;
28899 struct section *sec = &secs[i];
28900
28901 if (sec->shdr.sh_type != SHT_REL) {
28902@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
28903 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
28904 r_type = ELF32_R_TYPE(rel->r_info);
28905
28906+ if (!use_real_mode) {
28907+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
28908+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
28909+ continue;
28910+
28911+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
28912+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
28913+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
28914+ continue;
28915+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
28916+ continue;
28917+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
28918+ continue;
28919+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
28920+ continue;
28921+#endif
28922+ }
28923+
28924 shn_abs = sym->st_shndx == SHN_ABS;
28925
28926 switch (r_type) {
28927@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
28928
28929 static void emit_relocs(int as_text, int use_real_mode)
28930 {
28931- int i;
28932+ unsigned int i;
28933 /* Count how many relocations I have and allocate space for them. */
28934 reloc_count = 0;
28935 walk_relocs(count_reloc, use_real_mode);
28936@@ -808,10 +874,11 @@ int main(int argc, char **argv)
28937 fname, strerror(errno));
28938 }
28939 read_ehdr(fp);
28940+ read_phdrs(fp);
28941 read_shdrs(fp);
28942 read_strtabs(fp);
28943 read_symtabs(fp);
28944- read_relocs(fp);
28945+ read_relocs(fp, use_real_mode);
28946 if (show_absolute_syms) {
28947 print_absolute_symbols();
28948 return 0;
28949diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
28950index fd14be1..e3c79c0 100644
28951--- a/arch/x86/vdso/Makefile
28952+++ b/arch/x86/vdso/Makefile
28953@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
28954 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
28955 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
28956
28957-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28958+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28959 GCOV_PROFILE := n
28960
28961 #
28962diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
28963index 0faad64..39ef157 100644
28964--- a/arch/x86/vdso/vdso32-setup.c
28965+++ b/arch/x86/vdso/vdso32-setup.c
28966@@ -25,6 +25,7 @@
28967 #include <asm/tlbflush.h>
28968 #include <asm/vdso.h>
28969 #include <asm/proto.h>
28970+#include <asm/mman.h>
28971
28972 enum {
28973 VDSO_DISABLED = 0,
28974@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
28975 void enable_sep_cpu(void)
28976 {
28977 int cpu = get_cpu();
28978- struct tss_struct *tss = &per_cpu(init_tss, cpu);
28979+ struct tss_struct *tss = init_tss + cpu;
28980
28981 if (!boot_cpu_has(X86_FEATURE_SEP)) {
28982 put_cpu();
28983@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
28984 gate_vma.vm_start = FIXADDR_USER_START;
28985 gate_vma.vm_end = FIXADDR_USER_END;
28986 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
28987- gate_vma.vm_page_prot = __P101;
28988+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
28989
28990 return 0;
28991 }
28992@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28993 if (compat)
28994 addr = VDSO_HIGH_BASE;
28995 else {
28996- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
28997+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
28998 if (IS_ERR_VALUE(addr)) {
28999 ret = addr;
29000 goto up_fail;
29001 }
29002 }
29003
29004- current->mm->context.vdso = (void *)addr;
29005+ current->mm->context.vdso = addr;
29006
29007 if (compat_uses_vma || !compat) {
29008 /*
29009@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
29010 }
29011
29012 current_thread_info()->sysenter_return =
29013- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
29014+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
29015
29016 up_fail:
29017 if (ret)
29018- current->mm->context.vdso = NULL;
29019+ current->mm->context.vdso = 0;
29020
29021 up_write(&mm->mmap_sem);
29022
29023@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
29024
29025 const char *arch_vma_name(struct vm_area_struct *vma)
29026 {
29027- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
29028+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
29029 return "[vdso]";
29030+
29031+#ifdef CONFIG_PAX_SEGMEXEC
29032+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
29033+ return "[vdso]";
29034+#endif
29035+
29036 return NULL;
29037 }
29038
29039@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
29040 * Check to see if the corresponding task was created in compat vdso
29041 * mode.
29042 */
29043- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
29044+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
29045 return &gate_vma;
29046 return NULL;
29047 }
29048diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
29049index 00aaf04..4a26505 100644
29050--- a/arch/x86/vdso/vma.c
29051+++ b/arch/x86/vdso/vma.c
29052@@ -16,8 +16,6 @@
29053 #include <asm/vdso.h>
29054 #include <asm/page.h>
29055
29056-unsigned int __read_mostly vdso_enabled = 1;
29057-
29058 extern char vdso_start[], vdso_end[];
29059 extern unsigned short vdso_sync_cpuid;
29060
29061@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
29062 * unaligned here as a result of stack start randomization.
29063 */
29064 addr = PAGE_ALIGN(addr);
29065- addr = align_addr(addr, NULL, ALIGN_VDSO);
29066
29067 return addr;
29068 }
29069@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
29070 unsigned size)
29071 {
29072 struct mm_struct *mm = current->mm;
29073- unsigned long addr;
29074+ unsigned long addr = 0;
29075 int ret;
29076
29077- if (!vdso_enabled)
29078- return 0;
29079-
29080 down_write(&mm->mmap_sem);
29081+
29082+#ifdef CONFIG_PAX_RANDMMAP
29083+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
29084+#endif
29085+
29086 addr = vdso_addr(mm->start_stack, size);
29087+ addr = align_addr(addr, NULL, ALIGN_VDSO);
29088 addr = get_unmapped_area(NULL, addr, size, 0, 0);
29089 if (IS_ERR_VALUE(addr)) {
29090 ret = addr;
29091 goto up_fail;
29092 }
29093
29094- current->mm->context.vdso = (void *)addr;
29095+ mm->context.vdso = addr;
29096
29097 ret = install_special_mapping(mm, addr, size,
29098 VM_READ|VM_EXEC|
29099 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
29100 pages);
29101- if (ret) {
29102- current->mm->context.vdso = NULL;
29103- goto up_fail;
29104- }
29105+ if (ret)
29106+ mm->context.vdso = 0;
29107
29108 up_fail:
29109 up_write(&mm->mmap_sem);
29110@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
29111 vdsox32_size);
29112 }
29113 #endif
29114-
29115-static __init int vdso_setup(char *s)
29116-{
29117- vdso_enabled = simple_strtoul(s, NULL, 0);
29118- return 0;
29119-}
29120-__setup("vdso=", vdso_setup);
29121diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
29122index 7005ced..530d6eb 100644
29123--- a/arch/x86/xen/apic.c
29124+++ b/arch/x86/xen/apic.c
29125@@ -30,5 +30,5 @@ static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
29126
29127 void __init xen_init_apic(void)
29128 {
29129- x86_io_apic_ops.read = xen_io_apic_read;
29130+ *(void **)&x86_io_apic_ops.read = xen_io_apic_read;
29131 }
29132diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
29133index 586d838..9181904 100644
29134--- a/arch/x86/xen/enlighten.c
29135+++ b/arch/x86/xen/enlighten.c
29136@@ -99,8 +99,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
29137
29138 struct shared_info xen_dummy_shared_info;
29139
29140-void *xen_initial_gdt;
29141-
29142 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
29143 __read_mostly int xen_have_vector_callback;
29144 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
29145@@ -473,8 +471,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
29146 {
29147 unsigned long va = dtr->address;
29148 unsigned int size = dtr->size + 1;
29149- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
29150- unsigned long frames[pages];
29151+ unsigned long frames[65536 / PAGE_SIZE];
29152 int f;
29153
29154 /*
29155@@ -522,8 +519,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
29156 {
29157 unsigned long va = dtr->address;
29158 unsigned int size = dtr->size + 1;
29159- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
29160- unsigned long frames[pages];
29161+ unsigned long frames[65536 / PAGE_SIZE];
29162 int f;
29163
29164 /*
29165@@ -918,21 +914,21 @@ static u32 xen_safe_apic_wait_icr_idle(void)
29166
29167 static void set_xen_basic_apic_ops(void)
29168 {
29169- apic->read = xen_apic_read;
29170- apic->write = xen_apic_write;
29171- apic->icr_read = xen_apic_icr_read;
29172- apic->icr_write = xen_apic_icr_write;
29173- apic->wait_icr_idle = xen_apic_wait_icr_idle;
29174- apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
29175- apic->set_apic_id = xen_set_apic_id;
29176- apic->get_apic_id = xen_get_apic_id;
29177+ *(void **)&apic->read = xen_apic_read;
29178+ *(void **)&apic->write = xen_apic_write;
29179+ *(void **)&apic->icr_read = xen_apic_icr_read;
29180+ *(void **)&apic->icr_write = xen_apic_icr_write;
29181+ *(void **)&apic->wait_icr_idle = xen_apic_wait_icr_idle;
29182+ *(void **)&apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
29183+ *(void **)&apic->set_apic_id = xen_set_apic_id;
29184+ *(void **)&apic->get_apic_id = xen_get_apic_id;
29185
29186 #ifdef CONFIG_SMP
29187- apic->send_IPI_allbutself = xen_send_IPI_allbutself;
29188- apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
29189- apic->send_IPI_mask = xen_send_IPI_mask;
29190- apic->send_IPI_all = xen_send_IPI_all;
29191- apic->send_IPI_self = xen_send_IPI_self;
29192+ *(void **)&apic->send_IPI_allbutself = xen_send_IPI_allbutself;
29193+ *(void **)&apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
29194+ *(void **)&apic->send_IPI_mask = xen_send_IPI_mask;
29195+ *(void **)&apic->send_IPI_all = xen_send_IPI_all;
29196+ *(void **)&apic->send_IPI_self = xen_send_IPI_self;
29197 #endif
29198 }
29199
29200@@ -1222,30 +1218,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
29201 #endif
29202 };
29203
29204-static void xen_reboot(int reason)
29205+static __noreturn void xen_reboot(int reason)
29206 {
29207 struct sched_shutdown r = { .reason = reason };
29208
29209- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
29210- BUG();
29211+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
29212+ BUG();
29213 }
29214
29215-static void xen_restart(char *msg)
29216+static __noreturn void xen_restart(char *msg)
29217 {
29218 xen_reboot(SHUTDOWN_reboot);
29219 }
29220
29221-static void xen_emergency_restart(void)
29222+static __noreturn void xen_emergency_restart(void)
29223 {
29224 xen_reboot(SHUTDOWN_reboot);
29225 }
29226
29227-static void xen_machine_halt(void)
29228+static __noreturn void xen_machine_halt(void)
29229 {
29230 xen_reboot(SHUTDOWN_poweroff);
29231 }
29232
29233-static void xen_machine_power_off(void)
29234+static __noreturn void xen_machine_power_off(void)
29235 {
29236 if (pm_power_off)
29237 pm_power_off();
29238@@ -1290,14 +1286,14 @@ static const struct machine_ops xen_machine_ops __initconst = {
29239 */
29240 static void __init xen_setup_stackprotector(void)
29241 {
29242- pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
29243- pv_cpu_ops.load_gdt = xen_load_gdt_boot;
29244+ *(void **)&pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
29245+ *(void **)&pv_cpu_ops.load_gdt = xen_load_gdt_boot;
29246
29247 setup_stack_canary_segment(0);
29248 switch_to_new_gdt(0);
29249
29250- pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
29251- pv_cpu_ops.load_gdt = xen_load_gdt;
29252+ *(void **)&pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
29253+ *(void **)&pv_cpu_ops.load_gdt = xen_load_gdt;
29254 }
29255
29256 /* First C function to be called on Xen boot */
29257@@ -1315,13 +1311,13 @@ asmlinkage void __init xen_start_kernel(void)
29258
29259 /* Install Xen paravirt ops */
29260 pv_info = xen_info;
29261- pv_init_ops = xen_init_ops;
29262- pv_cpu_ops = xen_cpu_ops;
29263- pv_apic_ops = xen_apic_ops;
29264+ memcpy((void *)&pv_init_ops, &xen_init_ops, sizeof pv_init_ops);
29265+ memcpy((void *)&pv_cpu_ops, &xen_cpu_ops, sizeof pv_cpu_ops);
29266+ memcpy((void *)&pv_apic_ops, &xen_apic_ops, sizeof pv_apic_ops);
29267
29268- x86_init.resources.memory_setup = xen_memory_setup;
29269- x86_init.oem.arch_setup = xen_arch_setup;
29270- x86_init.oem.banner = xen_banner;
29271+ *(void **)&x86_init.resources.memory_setup = xen_memory_setup;
29272+ *(void **)&x86_init.oem.arch_setup = xen_arch_setup;
29273+ *(void **)&x86_init.oem.banner = xen_banner;
29274
29275 xen_init_time_ops();
29276
29277@@ -1347,7 +1343,17 @@ asmlinkage void __init xen_start_kernel(void)
29278 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
29279
29280 /* Work out if we support NX */
29281- x86_configure_nx();
29282+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29283+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
29284+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
29285+ unsigned l, h;
29286+
29287+ __supported_pte_mask |= _PAGE_NX;
29288+ rdmsr(MSR_EFER, l, h);
29289+ l |= EFER_NX;
29290+ wrmsr(MSR_EFER, l, h);
29291+ }
29292+#endif
29293
29294 xen_setup_features();
29295
29296@@ -1376,14 +1382,7 @@ asmlinkage void __init xen_start_kernel(void)
29297 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
29298 }
29299
29300- machine_ops = xen_machine_ops;
29301-
29302- /*
29303- * The only reliable way to retain the initial address of the
29304- * percpu gdt_page is to remember it here, so we can go and
29305- * mark it RW later, when the initial percpu area is freed.
29306- */
29307- xen_initial_gdt = &per_cpu(gdt_page, 0);
29308+ memcpy((void *)&machine_ops, &xen_machine_ops, sizeof machine_ops);
29309
29310 xen_smp_init();
29311
29312@@ -1450,7 +1449,7 @@ asmlinkage void __init xen_start_kernel(void)
29313 add_preferred_console("tty", 0, NULL);
29314 add_preferred_console("hvc", 0, NULL);
29315 if (pci_xen)
29316- x86_init.pci.arch_init = pci_xen_init;
29317+ *(void **)&x86_init.pci.arch_init = pci_xen_init;
29318 } else {
29319 const struct dom0_vga_console_info *info =
29320 (void *)((char *)xen_start_info +
29321@@ -1476,8 +1475,8 @@ asmlinkage void __init xen_start_kernel(void)
29322 xen_acpi_sleep_register();
29323
29324 /* Avoid searching for BIOS MP tables */
29325- x86_init.mpparse.find_smp_config = x86_init_noop;
29326- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
29327+ *(void **)&x86_init.mpparse.find_smp_config = x86_init_noop;
29328+ *(void **)&x86_init.mpparse.get_smp_config = x86_init_uint_noop;
29329 }
29330 #ifdef CONFIG_PCI
29331 /* PCI BIOS service won't work from a PV guest. */
29332@@ -1583,7 +1582,7 @@ static void __init xen_hvm_guest_init(void)
29333 xen_hvm_smp_init();
29334 register_cpu_notifier(&xen_hvm_cpu_notifier);
29335 xen_unplug_emulated_devices();
29336- x86_init.irqs.intr_init = xen_init_IRQ;
29337+ *(void **)&x86_init.irqs.intr_init = xen_init_IRQ;
29338 xen_hvm_init_time_ops();
29339 xen_hvm_init_mmu_ops();
29340 }
29341diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
29342index 01a4dc0..3ca0cc9 100644
29343--- a/arch/x86/xen/irq.c
29344+++ b/arch/x86/xen/irq.c
29345@@ -130,5 +130,5 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
29346 void __init xen_init_irq_ops(void)
29347 {
29348 pv_irq_ops = xen_irq_ops;
29349- x86_init.irqs.intr_init = xen_init_IRQ;
29350+ *(void **)&x86_init.irqs.intr_init = xen_init_IRQ;
29351 }
29352diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
29353index dcf5f2d..5f72fe7 100644
29354--- a/arch/x86/xen/mmu.c
29355+++ b/arch/x86/xen/mmu.c
29356@@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
29357 /* L3_k[510] -> level2_kernel_pgt
29358 * L3_i[511] -> level2_fixmap_pgt */
29359 convert_pfn_mfn(level3_kernel_pgt);
29360+ convert_pfn_mfn(level3_vmalloc_start_pgt);
29361+ convert_pfn_mfn(level3_vmalloc_end_pgt);
29362+ convert_pfn_mfn(level3_vmemmap_pgt);
29363
29364 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
29365 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
29366@@ -1910,8 +1913,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
29367 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
29368 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
29369 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
29370+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
29371+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
29372+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
29373 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
29374 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
29375+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
29376 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
29377 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
29378
29379@@ -2097,6 +2104,7 @@ static void __init xen_post_allocator_init(void)
29380 pv_mmu_ops.set_pud = xen_set_pud;
29381 #if PAGETABLE_LEVELS == 4
29382 pv_mmu_ops.set_pgd = xen_set_pgd;
29383+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
29384 #endif
29385
29386 /* This will work as long as patching hasn't happened yet
29387@@ -2178,6 +2186,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
29388 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
29389 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
29390 .set_pgd = xen_set_pgd_hyper,
29391+ .set_pgd_batched = xen_set_pgd_hyper,
29392
29393 .alloc_pud = xen_alloc_pmd_init,
29394 .release_pud = xen_release_pmd_init,
29395@@ -2197,8 +2206,8 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
29396
29397 void __init xen_init_mmu_ops(void)
29398 {
29399- x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
29400- x86_init.paging.pagetable_init = xen_pagetable_init;
29401+ *(void **)&x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
29402+ *(void **)&x86_init.paging.pagetable_init = xen_pagetable_init;
29403 pv_mmu_ops = xen_mmu_ops;
29404
29405 memset(dummy_mapping, 0xff, PAGE_SIZE);
29406diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
29407index 353c50f..a0b9b0d 100644
29408--- a/arch/x86/xen/smp.c
29409+++ b/arch/x86/xen/smp.c
29410@@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
29411 {
29412 BUG_ON(smp_processor_id() != 0);
29413 native_smp_prepare_boot_cpu();
29414-
29415- /* We've switched to the "real" per-cpu gdt, so make sure the
29416- old memory can be recycled */
29417- make_lowmem_page_readwrite(xen_initial_gdt);
29418-
29419 xen_filter_cpu_maps();
29420 xen_setup_vcpu_info_placement();
29421 }
29422@@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
29423 gdt = get_cpu_gdt_table(cpu);
29424
29425 ctxt->flags = VGCF_IN_KERNEL;
29426- ctxt->user_regs.ds = __USER_DS;
29427- ctxt->user_regs.es = __USER_DS;
29428+ ctxt->user_regs.ds = __KERNEL_DS;
29429+ ctxt->user_regs.es = __KERNEL_DS;
29430 ctxt->user_regs.ss = __KERNEL_DS;
29431 #ifdef CONFIG_X86_32
29432 ctxt->user_regs.fs = __KERNEL_PERCPU;
29433- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
29434+ savesegment(gs, ctxt->user_regs.gs);
29435 #else
29436 ctxt->gs_base_kernel = per_cpu_offset(cpu);
29437 #endif
29438@@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
29439 int rc;
29440
29441 per_cpu(current_task, cpu) = idle;
29442+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
29443 #ifdef CONFIG_X86_32
29444 irq_ctx_init(cpu);
29445 #else
29446 clear_tsk_thread_flag(idle, TIF_FORK);
29447- per_cpu(kernel_stack, cpu) =
29448- (unsigned long)task_stack_page(idle) -
29449- KERNEL_STACK_OFFSET + THREAD_SIZE;
29450+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
29451 #endif
29452 xen_setup_runstate_info(cpu);
29453 xen_setup_timer(cpu);
29454@@ -637,7 +631,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
29455
29456 void __init xen_smp_init(void)
29457 {
29458- smp_ops = xen_smp_ops;
29459+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
29460 xen_fill_possible_map();
29461 xen_init_spinlocks();
29462 }
29463@@ -672,10 +666,10 @@ void __init xen_hvm_smp_init(void)
29464 {
29465 if (!xen_have_vector_callback)
29466 return;
29467- smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
29468- smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
29469- smp_ops.cpu_up = xen_hvm_cpu_up;
29470- smp_ops.cpu_die = xen_hvm_cpu_die;
29471- smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
29472- smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
29473+ *(void **)&smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
29474+ *(void **)&smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
29475+ *(void **)&smp_ops.cpu_up = xen_hvm_cpu_up;
29476+ *(void **)&smp_ops.cpu_die = xen_hvm_cpu_die;
29477+ *(void **)&smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
29478+ *(void **)&smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
29479 }
29480diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
29481index 83e866d..ef60385 100644
29482--- a/arch/x86/xen/spinlock.c
29483+++ b/arch/x86/xen/spinlock.c
29484@@ -390,12 +390,12 @@ void __init xen_init_spinlocks(void)
29485 {
29486 BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
29487
29488- pv_lock_ops.spin_is_locked = xen_spin_is_locked;
29489- pv_lock_ops.spin_is_contended = xen_spin_is_contended;
29490- pv_lock_ops.spin_lock = xen_spin_lock;
29491- pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
29492- pv_lock_ops.spin_trylock = xen_spin_trylock;
29493- pv_lock_ops.spin_unlock = xen_spin_unlock;
29494+ *(void **)&pv_lock_ops.spin_is_locked = xen_spin_is_locked;
29495+ *(void **)&pv_lock_ops.spin_is_contended = xen_spin_is_contended;
29496+ *(void **)&pv_lock_ops.spin_lock = xen_spin_lock;
29497+ *(void **)&pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
29498+ *(void **)&pv_lock_ops.spin_trylock = xen_spin_trylock;
29499+ *(void **)&pv_lock_ops.spin_unlock = xen_spin_unlock;
29500 }
29501
29502 #ifdef CONFIG_XEN_DEBUG_FS
29503diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
29504index 0296a95..3c51a2d 100644
29505--- a/arch/x86/xen/time.c
29506+++ b/arch/x86/xen/time.c
29507@@ -481,15 +481,15 @@ static void __init xen_time_init(void)
29508
29509 void __init xen_init_time_ops(void)
29510 {
29511- pv_time_ops = xen_time_ops;
29512+ memcpy((void *)&pv_time_ops, &xen_time_ops, sizeof pv_time_ops);
29513
29514- x86_init.timers.timer_init = xen_time_init;
29515- x86_init.timers.setup_percpu_clockev = x86_init_noop;
29516- x86_cpuinit.setup_percpu_clockev = x86_init_noop;
29517+ *(void **)&x86_init.timers.timer_init = xen_time_init;
29518+ *(void **)&x86_init.timers.setup_percpu_clockev = x86_init_noop;
29519+ *(void **)&x86_cpuinit.setup_percpu_clockev = x86_init_noop;
29520
29521- x86_platform.calibrate_tsc = xen_tsc_khz;
29522- x86_platform.get_wallclock = xen_get_wallclock;
29523- x86_platform.set_wallclock = xen_set_wallclock;
29524+ *(void **)&x86_platform.calibrate_tsc = xen_tsc_khz;
29525+ *(void **)&x86_platform.get_wallclock = xen_get_wallclock;
29526+ *(void **)&x86_platform.set_wallclock = xen_set_wallclock;
29527 }
29528
29529 #ifdef CONFIG_XEN_PVHVM
29530@@ -514,12 +514,12 @@ void __init xen_hvm_init_time_ops(void)
29531 return;
29532 }
29533
29534- pv_time_ops = xen_time_ops;
29535- x86_init.timers.setup_percpu_clockev = xen_time_init;
29536- x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
29537+ memcpy((void *)&pv_time_ops, &xen_time_ops, sizeof pv_time_ops);
29538+ *(void **)&x86_init.timers.setup_percpu_clockev = xen_time_init;
29539+ *(void **)&x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
29540
29541- x86_platform.calibrate_tsc = xen_tsc_khz;
29542- x86_platform.get_wallclock = xen_get_wallclock;
29543- x86_platform.set_wallclock = xen_set_wallclock;
29544+ *(void **)&x86_platform.calibrate_tsc = xen_tsc_khz;
29545+ *(void **)&x86_platform.get_wallclock = xen_get_wallclock;
29546+ *(void **)&x86_platform.set_wallclock = xen_set_wallclock;
29547 }
29548 #endif
29549diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
29550index f9643fc..602e8af 100644
29551--- a/arch/x86/xen/xen-asm_32.S
29552+++ b/arch/x86/xen/xen-asm_32.S
29553@@ -84,14 +84,14 @@ ENTRY(xen_iret)
29554 ESP_OFFSET=4 # bytes pushed onto stack
29555
29556 /*
29557- * Store vcpu_info pointer for easy access. Do it this way to
29558- * avoid having to reload %fs
29559+ * Store vcpu_info pointer for easy access.
29560 */
29561 #ifdef CONFIG_SMP
29562- GET_THREAD_INFO(%eax)
29563- movl TI_cpu(%eax), %eax
29564- movl __per_cpu_offset(,%eax,4), %eax
29565- mov xen_vcpu(%eax), %eax
29566+ push %fs
29567+ mov $(__KERNEL_PERCPU), %eax
29568+ mov %eax, %fs
29569+ mov PER_CPU_VAR(xen_vcpu), %eax
29570+ pop %fs
29571 #else
29572 movl xen_vcpu, %eax
29573 #endif
29574diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
29575index 7faed58..ba4427c 100644
29576--- a/arch/x86/xen/xen-head.S
29577+++ b/arch/x86/xen/xen-head.S
29578@@ -19,6 +19,17 @@ ENTRY(startup_xen)
29579 #ifdef CONFIG_X86_32
29580 mov %esi,xen_start_info
29581 mov $init_thread_union+THREAD_SIZE,%esp
29582+#ifdef CONFIG_SMP
29583+ movl $cpu_gdt_table,%edi
29584+ movl $__per_cpu_load,%eax
29585+ movw %ax,__KERNEL_PERCPU + 2(%edi)
29586+ rorl $16,%eax
29587+ movb %al,__KERNEL_PERCPU + 4(%edi)
29588+ movb %ah,__KERNEL_PERCPU + 7(%edi)
29589+ movl $__per_cpu_end - 1,%eax
29590+ subl $__per_cpu_start,%eax
29591+ movw %ax,__KERNEL_PERCPU + 0(%edi)
29592+#endif
29593 #else
29594 mov %rsi,xen_start_info
29595 mov $init_thread_union+THREAD_SIZE,%rsp
29596diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
29597index a95b417..b6dbd0b 100644
29598--- a/arch/x86/xen/xen-ops.h
29599+++ b/arch/x86/xen/xen-ops.h
29600@@ -10,8 +10,6 @@
29601 extern const char xen_hypervisor_callback[];
29602 extern const char xen_failsafe_callback[];
29603
29604-extern void *xen_initial_gdt;
29605-
29606 struct trap_info;
29607 void xen_copy_trap_info(struct trap_info *traps);
29608
29609diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
29610index 525bd3d..ef888b1 100644
29611--- a/arch/xtensa/variants/dc232b/include/variant/core.h
29612+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
29613@@ -119,9 +119,9 @@
29614 ----------------------------------------------------------------------*/
29615
29616 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
29617-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
29618 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
29619 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
29620+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
29621
29622 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
29623 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
29624diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
29625index 2f33760..835e50a 100644
29626--- a/arch/xtensa/variants/fsf/include/variant/core.h
29627+++ b/arch/xtensa/variants/fsf/include/variant/core.h
29628@@ -11,6 +11,7 @@
29629 #ifndef _XTENSA_CORE_H
29630 #define _XTENSA_CORE_H
29631
29632+#include <linux/const.h>
29633
29634 /****************************************************************************
29635 Parameters Useful for Any Code, USER or PRIVILEGED
29636@@ -112,9 +113,9 @@
29637 ----------------------------------------------------------------------*/
29638
29639 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
29640-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
29641 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
29642 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
29643+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
29644
29645 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
29646 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
29647diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
29648index af00795..2bb8105 100644
29649--- a/arch/xtensa/variants/s6000/include/variant/core.h
29650+++ b/arch/xtensa/variants/s6000/include/variant/core.h
29651@@ -11,6 +11,7 @@
29652 #ifndef _XTENSA_CORE_CONFIGURATION_H
29653 #define _XTENSA_CORE_CONFIGURATION_H
29654
29655+#include <linux/const.h>
29656
29657 /****************************************************************************
29658 Parameters Useful for Any Code, USER or PRIVILEGED
29659@@ -118,9 +119,9 @@
29660 ----------------------------------------------------------------------*/
29661
29662 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
29663-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
29664 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
29665 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
29666+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
29667
29668 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
29669 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
29670diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
29671index 58916af..9cb880b 100644
29672--- a/block/blk-iopoll.c
29673+++ b/block/blk-iopoll.c
29674@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
29675 }
29676 EXPORT_SYMBOL(blk_iopoll_complete);
29677
29678-static void blk_iopoll_softirq(struct softirq_action *h)
29679+static void blk_iopoll_softirq(void)
29680 {
29681 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
29682 int rearm = 0, budget = blk_iopoll_budget;
29683diff --git a/block/blk-map.c b/block/blk-map.c
29684index 623e1cd..ca1e109 100644
29685--- a/block/blk-map.c
29686+++ b/block/blk-map.c
29687@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
29688 if (!len || !kbuf)
29689 return -EINVAL;
29690
29691- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
29692+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
29693 if (do_copy)
29694 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
29695 else
29696diff --git a/block/blk-softirq.c b/block/blk-softirq.c
29697index 467c8de..4bddc6d 100644
29698--- a/block/blk-softirq.c
29699+++ b/block/blk-softirq.c
29700@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
29701 * Softirq action handler - move entries to local list and loop over them
29702 * while passing them to the queue registered handler.
29703 */
29704-static void blk_done_softirq(struct softirq_action *h)
29705+static void blk_done_softirq(void)
29706 {
29707 struct list_head *cpu_list, local_list;
29708
29709diff --git a/block/bsg.c b/block/bsg.c
29710index ff64ae3..593560c 100644
29711--- a/block/bsg.c
29712+++ b/block/bsg.c
29713@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
29714 struct sg_io_v4 *hdr, struct bsg_device *bd,
29715 fmode_t has_write_perm)
29716 {
29717+ unsigned char tmpcmd[sizeof(rq->__cmd)];
29718+ unsigned char *cmdptr;
29719+
29720 if (hdr->request_len > BLK_MAX_CDB) {
29721 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
29722 if (!rq->cmd)
29723 return -ENOMEM;
29724- }
29725+ cmdptr = rq->cmd;
29726+ } else
29727+ cmdptr = tmpcmd;
29728
29729- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
29730+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
29731 hdr->request_len))
29732 return -EFAULT;
29733
29734+ if (cmdptr != rq->cmd)
29735+ memcpy(rq->cmd, cmdptr, hdr->request_len);
29736+
29737 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
29738 if (blk_verify_command(rq->cmd, has_write_perm))
29739 return -EPERM;
29740diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
29741index 7c668c8..db3521c 100644
29742--- a/block/compat_ioctl.c
29743+++ b/block/compat_ioctl.c
29744@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
29745 err |= __get_user(f->spec1, &uf->spec1);
29746 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
29747 err |= __get_user(name, &uf->name);
29748- f->name = compat_ptr(name);
29749+ f->name = (void __force_kernel *)compat_ptr(name);
29750 if (err) {
29751 err = -EFAULT;
29752 goto out;
29753diff --git a/block/partitions/efi.c b/block/partitions/efi.c
29754index 6296b40..417c00f 100644
29755--- a/block/partitions/efi.c
29756+++ b/block/partitions/efi.c
29757@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
29758 if (!gpt)
29759 return NULL;
29760
29761+ if (!le32_to_cpu(gpt->num_partition_entries))
29762+ return NULL;
29763+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
29764+ if (!pte)
29765+ return NULL;
29766+
29767 count = le32_to_cpu(gpt->num_partition_entries) *
29768 le32_to_cpu(gpt->sizeof_partition_entry);
29769- if (!count)
29770- return NULL;
29771- pte = kzalloc(count, GFP_KERNEL);
29772- if (!pte)
29773- return NULL;
29774-
29775 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
29776 (u8 *) pte,
29777 count) < count) {
29778diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
29779index 9a87daa..fb17486 100644
29780--- a/block/scsi_ioctl.c
29781+++ b/block/scsi_ioctl.c
29782@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
29783 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
29784 struct sg_io_hdr *hdr, fmode_t mode)
29785 {
29786- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
29787+ unsigned char tmpcmd[sizeof(rq->__cmd)];
29788+ unsigned char *cmdptr;
29789+
29790+ if (rq->cmd != rq->__cmd)
29791+ cmdptr = rq->cmd;
29792+ else
29793+ cmdptr = tmpcmd;
29794+
29795+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
29796 return -EFAULT;
29797+
29798+ if (cmdptr != rq->cmd)
29799+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
29800+
29801 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
29802 return -EPERM;
29803
29804@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
29805 int err;
29806 unsigned int in_len, out_len, bytes, opcode, cmdlen;
29807 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
29808+ unsigned char tmpcmd[sizeof(rq->__cmd)];
29809+ unsigned char *cmdptr;
29810
29811 if (!sic)
29812 return -EINVAL;
29813@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
29814 */
29815 err = -EFAULT;
29816 rq->cmd_len = cmdlen;
29817- if (copy_from_user(rq->cmd, sic->data, cmdlen))
29818+
29819+ if (rq->cmd != rq->__cmd)
29820+ cmdptr = rq->cmd;
29821+ else
29822+ cmdptr = tmpcmd;
29823+
29824+ if (copy_from_user(cmdptr, sic->data, cmdlen))
29825 goto error;
29826
29827+ if (rq->cmd != cmdptr)
29828+ memcpy(rq->cmd, cmdptr, cmdlen);
29829+
29830 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
29831 goto error;
29832
29833diff --git a/crypto/cryptd.c b/crypto/cryptd.c
29834index 7bdd61b..afec999 100644
29835--- a/crypto/cryptd.c
29836+++ b/crypto/cryptd.c
29837@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
29838
29839 struct cryptd_blkcipher_request_ctx {
29840 crypto_completion_t complete;
29841-};
29842+} __no_const;
29843
29844 struct cryptd_hash_ctx {
29845 struct crypto_shash *child;
29846@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
29847
29848 struct cryptd_aead_request_ctx {
29849 crypto_completion_t complete;
29850-};
29851+} __no_const;
29852
29853 static void cryptd_queue_worker(struct work_struct *work);
29854
29855diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
29856index e6defd8..c26a225 100644
29857--- a/drivers/acpi/apei/cper.c
29858+++ b/drivers/acpi/apei/cper.c
29859@@ -38,12 +38,12 @@
29860 */
29861 u64 cper_next_record_id(void)
29862 {
29863- static atomic64_t seq;
29864+ static atomic64_unchecked_t seq;
29865
29866- if (!atomic64_read(&seq))
29867- atomic64_set(&seq, ((u64)get_seconds()) << 32);
29868+ if (!atomic64_read_unchecked(&seq))
29869+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
29870
29871- return atomic64_inc_return(&seq);
29872+ return atomic64_inc_return_unchecked(&seq);
29873 }
29874 EXPORT_SYMBOL_GPL(cper_next_record_id);
29875
29876diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
29877index 7586544..636a2f0 100644
29878--- a/drivers/acpi/ec_sys.c
29879+++ b/drivers/acpi/ec_sys.c
29880@@ -12,6 +12,7 @@
29881 #include <linux/acpi.h>
29882 #include <linux/debugfs.h>
29883 #include <linux/module.h>
29884+#include <linux/uaccess.h>
29885 #include "internal.h"
29886
29887 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
29888@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
29889 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
29890 */
29891 unsigned int size = EC_SPACE_SIZE;
29892- u8 *data = (u8 *) buf;
29893+ u8 data;
29894 loff_t init_off = *off;
29895 int err = 0;
29896
29897@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
29898 size = count;
29899
29900 while (size) {
29901- err = ec_read(*off, &data[*off - init_off]);
29902+ err = ec_read(*off, &data);
29903 if (err)
29904 return err;
29905+ if (put_user(data, &buf[*off - init_off]))
29906+ return -EFAULT;
29907 *off += 1;
29908 size--;
29909 }
29910@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
29911
29912 unsigned int size = count;
29913 loff_t init_off = *off;
29914- u8 *data = (u8 *) buf;
29915 int err = 0;
29916
29917 if (*off >= EC_SPACE_SIZE)
29918@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
29919 }
29920
29921 while (size) {
29922- u8 byte_write = data[*off - init_off];
29923+ u8 byte_write;
29924+ if (get_user(byte_write, &buf[*off - init_off]))
29925+ return -EFAULT;
29926 err = ec_write(*off, byte_write);
29927 if (err)
29928 return err;
29929diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
29930index 27adb09..1ed412d 100644
29931--- a/drivers/acpi/proc.c
29932+++ b/drivers/acpi/proc.c
29933@@ -360,19 +360,13 @@ acpi_system_write_wakeup_device(struct file *file,
29934 size_t count, loff_t * ppos)
29935 {
29936 struct list_head *node, *next;
29937- char strbuf[5];
29938- char str[5] = "";
29939- unsigned int len = count;
29940+ char strbuf[5] = {0};
29941
29942- if (len > 4)
29943- len = 4;
29944- if (len < 0)
29945+ if (count > 4)
29946+ count = 4;
29947+ if (copy_from_user(strbuf, buffer, count))
29948 return -EFAULT;
29949-
29950- if (copy_from_user(strbuf, buffer, len))
29951- return -EFAULT;
29952- strbuf[len] = '\0';
29953- sscanf(strbuf, "%s", str);
29954+ strbuf[count] = '\0';
29955
29956 mutex_lock(&acpi_device_lock);
29957 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
29958@@ -381,7 +375,7 @@ acpi_system_write_wakeup_device(struct file *file,
29959 if (!dev->wakeup.flags.valid)
29960 continue;
29961
29962- if (!strncmp(dev->pnp.bus_id, str, 4)) {
29963+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
29964 if (device_can_wakeup(&dev->dev)) {
29965 bool enable = !device_may_wakeup(&dev->dev);
29966 device_set_wakeup_enable(&dev->dev, enable);
29967diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
29968index bd4e5dc..0497b66 100644
29969--- a/drivers/acpi/processor_driver.c
29970+++ b/drivers/acpi/processor_driver.c
29971@@ -552,7 +552,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
29972 return 0;
29973 #endif
29974
29975- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
29976+ BUG_ON(pr->id >= nr_cpu_ids);
29977
29978 /*
29979 * Buggy BIOS check
29980diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
29981index 586362e..ca71b9b 100644
29982--- a/drivers/ata/libata-core.c
29983+++ b/drivers/ata/libata-core.c
29984@@ -4775,7 +4775,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
29985 struct ata_port *ap;
29986 unsigned int tag;
29987
29988- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29989+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29990 ap = qc->ap;
29991
29992 qc->flags = 0;
29993@@ -4791,7 +4791,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
29994 struct ata_port *ap;
29995 struct ata_link *link;
29996
29997- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29998+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29999 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
30000 ap = qc->ap;
30001 link = qc->dev->link;
30002@@ -5887,6 +5887,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
30003 return;
30004
30005 spin_lock(&lock);
30006+ pax_open_kernel();
30007
30008 for (cur = ops->inherits; cur; cur = cur->inherits) {
30009 void **inherit = (void **)cur;
30010@@ -5900,8 +5901,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
30011 if (IS_ERR(*pp))
30012 *pp = NULL;
30013
30014- ops->inherits = NULL;
30015+ *(struct ata_port_operations **)&ops->inherits = NULL;
30016
30017+ pax_close_kernel();
30018 spin_unlock(&lock);
30019 }
30020
30021diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
30022index 371fd2c..0836c78 100644
30023--- a/drivers/ata/pata_arasan_cf.c
30024+++ b/drivers/ata/pata_arasan_cf.c
30025@@ -861,7 +861,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
30026 /* Handle platform specific quirks */
30027 if (pdata->quirk) {
30028 if (pdata->quirk & CF_BROKEN_PIO) {
30029- ap->ops->set_piomode = NULL;
30030+ pax_open_kernel();
30031+ *(void **)&ap->ops->set_piomode = NULL;
30032+ pax_close_kernel();
30033 ap->pio_mask = 0;
30034 }
30035 if (pdata->quirk & CF_BROKEN_MWDMA)
30036diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
30037index f9b983a..887b9d8 100644
30038--- a/drivers/atm/adummy.c
30039+++ b/drivers/atm/adummy.c
30040@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
30041 vcc->pop(vcc, skb);
30042 else
30043 dev_kfree_skb_any(skb);
30044- atomic_inc(&vcc->stats->tx);
30045+ atomic_inc_unchecked(&vcc->stats->tx);
30046
30047 return 0;
30048 }
30049diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
30050index ff7bb8a..568fc0b 100644
30051--- a/drivers/atm/ambassador.c
30052+++ b/drivers/atm/ambassador.c
30053@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
30054 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
30055
30056 // VC layer stats
30057- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30058+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30059
30060 // free the descriptor
30061 kfree (tx_descr);
30062@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
30063 dump_skb ("<<<", vc, skb);
30064
30065 // VC layer stats
30066- atomic_inc(&atm_vcc->stats->rx);
30067+ atomic_inc_unchecked(&atm_vcc->stats->rx);
30068 __net_timestamp(skb);
30069 // end of our responsibility
30070 atm_vcc->push (atm_vcc, skb);
30071@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
30072 } else {
30073 PRINTK (KERN_INFO, "dropped over-size frame");
30074 // should we count this?
30075- atomic_inc(&atm_vcc->stats->rx_drop);
30076+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30077 }
30078
30079 } else {
30080@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
30081 }
30082
30083 if (check_area (skb->data, skb->len)) {
30084- atomic_inc(&atm_vcc->stats->tx_err);
30085+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
30086 return -ENOMEM; // ?
30087 }
30088
30089diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
30090index b22d71c..d6e1049 100644
30091--- a/drivers/atm/atmtcp.c
30092+++ b/drivers/atm/atmtcp.c
30093@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30094 if (vcc->pop) vcc->pop(vcc,skb);
30095 else dev_kfree_skb(skb);
30096 if (dev_data) return 0;
30097- atomic_inc(&vcc->stats->tx_err);
30098+ atomic_inc_unchecked(&vcc->stats->tx_err);
30099 return -ENOLINK;
30100 }
30101 size = skb->len+sizeof(struct atmtcp_hdr);
30102@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30103 if (!new_skb) {
30104 if (vcc->pop) vcc->pop(vcc,skb);
30105 else dev_kfree_skb(skb);
30106- atomic_inc(&vcc->stats->tx_err);
30107+ atomic_inc_unchecked(&vcc->stats->tx_err);
30108 return -ENOBUFS;
30109 }
30110 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
30111@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30112 if (vcc->pop) vcc->pop(vcc,skb);
30113 else dev_kfree_skb(skb);
30114 out_vcc->push(out_vcc,new_skb);
30115- atomic_inc(&vcc->stats->tx);
30116- atomic_inc(&out_vcc->stats->rx);
30117+ atomic_inc_unchecked(&vcc->stats->tx);
30118+ atomic_inc_unchecked(&out_vcc->stats->rx);
30119 return 0;
30120 }
30121
30122@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
30123 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
30124 read_unlock(&vcc_sklist_lock);
30125 if (!out_vcc) {
30126- atomic_inc(&vcc->stats->tx_err);
30127+ atomic_inc_unchecked(&vcc->stats->tx_err);
30128 goto done;
30129 }
30130 skb_pull(skb,sizeof(struct atmtcp_hdr));
30131@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
30132 __net_timestamp(new_skb);
30133 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
30134 out_vcc->push(out_vcc,new_skb);
30135- atomic_inc(&vcc->stats->tx);
30136- atomic_inc(&out_vcc->stats->rx);
30137+ atomic_inc_unchecked(&vcc->stats->tx);
30138+ atomic_inc_unchecked(&out_vcc->stats->rx);
30139 done:
30140 if (vcc->pop) vcc->pop(vcc,skb);
30141 else dev_kfree_skb(skb);
30142diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
30143index 81e44f7..498ea36 100644
30144--- a/drivers/atm/eni.c
30145+++ b/drivers/atm/eni.c
30146@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
30147 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
30148 vcc->dev->number);
30149 length = 0;
30150- atomic_inc(&vcc->stats->rx_err);
30151+ atomic_inc_unchecked(&vcc->stats->rx_err);
30152 }
30153 else {
30154 length = ATM_CELL_SIZE-1; /* no HEC */
30155@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
30156 size);
30157 }
30158 eff = length = 0;
30159- atomic_inc(&vcc->stats->rx_err);
30160+ atomic_inc_unchecked(&vcc->stats->rx_err);
30161 }
30162 else {
30163 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
30164@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
30165 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
30166 vcc->dev->number,vcc->vci,length,size << 2,descr);
30167 length = eff = 0;
30168- atomic_inc(&vcc->stats->rx_err);
30169+ atomic_inc_unchecked(&vcc->stats->rx_err);
30170 }
30171 }
30172 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
30173@@ -767,7 +767,7 @@ rx_dequeued++;
30174 vcc->push(vcc,skb);
30175 pushed++;
30176 }
30177- atomic_inc(&vcc->stats->rx);
30178+ atomic_inc_unchecked(&vcc->stats->rx);
30179 }
30180 wake_up(&eni_dev->rx_wait);
30181 }
30182@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
30183 PCI_DMA_TODEVICE);
30184 if (vcc->pop) vcc->pop(vcc,skb);
30185 else dev_kfree_skb_irq(skb);
30186- atomic_inc(&vcc->stats->tx);
30187+ atomic_inc_unchecked(&vcc->stats->tx);
30188 wake_up(&eni_dev->tx_wait);
30189 dma_complete++;
30190 }
30191diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
30192index 86fed1b..6dc4721 100644
30193--- a/drivers/atm/firestream.c
30194+++ b/drivers/atm/firestream.c
30195@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
30196 }
30197 }
30198
30199- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30200+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30201
30202 fs_dprintk (FS_DEBUG_TXMEM, "i");
30203 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
30204@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
30205 #endif
30206 skb_put (skb, qe->p1 & 0xffff);
30207 ATM_SKB(skb)->vcc = atm_vcc;
30208- atomic_inc(&atm_vcc->stats->rx);
30209+ atomic_inc_unchecked(&atm_vcc->stats->rx);
30210 __net_timestamp(skb);
30211 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
30212 atm_vcc->push (atm_vcc, skb);
30213@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
30214 kfree (pe);
30215 }
30216 if (atm_vcc)
30217- atomic_inc(&atm_vcc->stats->rx_drop);
30218+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30219 break;
30220 case 0x1f: /* Reassembly abort: no buffers. */
30221 /* Silently increment error counter. */
30222 if (atm_vcc)
30223- atomic_inc(&atm_vcc->stats->rx_drop);
30224+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30225 break;
30226 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
30227 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
30228diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
30229index 361f5ae..7fc552d 100644
30230--- a/drivers/atm/fore200e.c
30231+++ b/drivers/atm/fore200e.c
30232@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
30233 #endif
30234 /* check error condition */
30235 if (*entry->status & STATUS_ERROR)
30236- atomic_inc(&vcc->stats->tx_err);
30237+ atomic_inc_unchecked(&vcc->stats->tx_err);
30238 else
30239- atomic_inc(&vcc->stats->tx);
30240+ atomic_inc_unchecked(&vcc->stats->tx);
30241 }
30242 }
30243
30244@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
30245 if (skb == NULL) {
30246 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
30247
30248- atomic_inc(&vcc->stats->rx_drop);
30249+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30250 return -ENOMEM;
30251 }
30252
30253@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
30254
30255 dev_kfree_skb_any(skb);
30256
30257- atomic_inc(&vcc->stats->rx_drop);
30258+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30259 return -ENOMEM;
30260 }
30261
30262 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
30263
30264 vcc->push(vcc, skb);
30265- atomic_inc(&vcc->stats->rx);
30266+ atomic_inc_unchecked(&vcc->stats->rx);
30267
30268 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
30269
30270@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
30271 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
30272 fore200e->atm_dev->number,
30273 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
30274- atomic_inc(&vcc->stats->rx_err);
30275+ atomic_inc_unchecked(&vcc->stats->rx_err);
30276 }
30277 }
30278
30279@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
30280 goto retry_here;
30281 }
30282
30283- atomic_inc(&vcc->stats->tx_err);
30284+ atomic_inc_unchecked(&vcc->stats->tx_err);
30285
30286 fore200e->tx_sat++;
30287 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
30288diff --git a/drivers/atm/he.c b/drivers/atm/he.c
30289index b182c2f..1c6fa8a 100644
30290--- a/drivers/atm/he.c
30291+++ b/drivers/atm/he.c
30292@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30293
30294 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
30295 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
30296- atomic_inc(&vcc->stats->rx_drop);
30297+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30298 goto return_host_buffers;
30299 }
30300
30301@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30302 RBRQ_LEN_ERR(he_dev->rbrq_head)
30303 ? "LEN_ERR" : "",
30304 vcc->vpi, vcc->vci);
30305- atomic_inc(&vcc->stats->rx_err);
30306+ atomic_inc_unchecked(&vcc->stats->rx_err);
30307 goto return_host_buffers;
30308 }
30309
30310@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30311 vcc->push(vcc, skb);
30312 spin_lock(&he_dev->global_lock);
30313
30314- atomic_inc(&vcc->stats->rx);
30315+ atomic_inc_unchecked(&vcc->stats->rx);
30316
30317 return_host_buffers:
30318 ++pdus_assembled;
30319@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
30320 tpd->vcc->pop(tpd->vcc, tpd->skb);
30321 else
30322 dev_kfree_skb_any(tpd->skb);
30323- atomic_inc(&tpd->vcc->stats->tx_err);
30324+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
30325 }
30326 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
30327 return;
30328@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30329 vcc->pop(vcc, skb);
30330 else
30331 dev_kfree_skb_any(skb);
30332- atomic_inc(&vcc->stats->tx_err);
30333+ atomic_inc_unchecked(&vcc->stats->tx_err);
30334 return -EINVAL;
30335 }
30336
30337@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30338 vcc->pop(vcc, skb);
30339 else
30340 dev_kfree_skb_any(skb);
30341- atomic_inc(&vcc->stats->tx_err);
30342+ atomic_inc_unchecked(&vcc->stats->tx_err);
30343 return -EINVAL;
30344 }
30345 #endif
30346@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30347 vcc->pop(vcc, skb);
30348 else
30349 dev_kfree_skb_any(skb);
30350- atomic_inc(&vcc->stats->tx_err);
30351+ atomic_inc_unchecked(&vcc->stats->tx_err);
30352 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30353 return -ENOMEM;
30354 }
30355@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30356 vcc->pop(vcc, skb);
30357 else
30358 dev_kfree_skb_any(skb);
30359- atomic_inc(&vcc->stats->tx_err);
30360+ atomic_inc_unchecked(&vcc->stats->tx_err);
30361 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30362 return -ENOMEM;
30363 }
30364@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30365 __enqueue_tpd(he_dev, tpd, cid);
30366 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30367
30368- atomic_inc(&vcc->stats->tx);
30369+ atomic_inc_unchecked(&vcc->stats->tx);
30370
30371 return 0;
30372 }
30373diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
30374index 7d01c2a..4e3ac01 100644
30375--- a/drivers/atm/horizon.c
30376+++ b/drivers/atm/horizon.c
30377@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
30378 {
30379 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
30380 // VC layer stats
30381- atomic_inc(&vcc->stats->rx);
30382+ atomic_inc_unchecked(&vcc->stats->rx);
30383 __net_timestamp(skb);
30384 // end of our responsibility
30385 vcc->push (vcc, skb);
30386@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
30387 dev->tx_iovec = NULL;
30388
30389 // VC layer stats
30390- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30391+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30392
30393 // free the skb
30394 hrz_kfree_skb (skb);
30395diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
30396index 8974bd2..b856f85 100644
30397--- a/drivers/atm/idt77252.c
30398+++ b/drivers/atm/idt77252.c
30399@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
30400 else
30401 dev_kfree_skb(skb);
30402
30403- atomic_inc(&vcc->stats->tx);
30404+ atomic_inc_unchecked(&vcc->stats->tx);
30405 }
30406
30407 atomic_dec(&scq->used);
30408@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30409 if ((sb = dev_alloc_skb(64)) == NULL) {
30410 printk("%s: Can't allocate buffers for aal0.\n",
30411 card->name);
30412- atomic_add(i, &vcc->stats->rx_drop);
30413+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
30414 break;
30415 }
30416 if (!atm_charge(vcc, sb->truesize)) {
30417 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
30418 card->name);
30419- atomic_add(i - 1, &vcc->stats->rx_drop);
30420+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
30421 dev_kfree_skb(sb);
30422 break;
30423 }
30424@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30425 ATM_SKB(sb)->vcc = vcc;
30426 __net_timestamp(sb);
30427 vcc->push(vcc, sb);
30428- atomic_inc(&vcc->stats->rx);
30429+ atomic_inc_unchecked(&vcc->stats->rx);
30430
30431 cell += ATM_CELL_PAYLOAD;
30432 }
30433@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30434 "(CDC: %08x)\n",
30435 card->name, len, rpp->len, readl(SAR_REG_CDC));
30436 recycle_rx_pool_skb(card, rpp);
30437- atomic_inc(&vcc->stats->rx_err);
30438+ atomic_inc_unchecked(&vcc->stats->rx_err);
30439 return;
30440 }
30441 if (stat & SAR_RSQE_CRC) {
30442 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
30443 recycle_rx_pool_skb(card, rpp);
30444- atomic_inc(&vcc->stats->rx_err);
30445+ atomic_inc_unchecked(&vcc->stats->rx_err);
30446 return;
30447 }
30448 if (skb_queue_len(&rpp->queue) > 1) {
30449@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30450 RXPRINTK("%s: Can't alloc RX skb.\n",
30451 card->name);
30452 recycle_rx_pool_skb(card, rpp);
30453- atomic_inc(&vcc->stats->rx_err);
30454+ atomic_inc_unchecked(&vcc->stats->rx_err);
30455 return;
30456 }
30457 if (!atm_charge(vcc, skb->truesize)) {
30458@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30459 __net_timestamp(skb);
30460
30461 vcc->push(vcc, skb);
30462- atomic_inc(&vcc->stats->rx);
30463+ atomic_inc_unchecked(&vcc->stats->rx);
30464
30465 return;
30466 }
30467@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30468 __net_timestamp(skb);
30469
30470 vcc->push(vcc, skb);
30471- atomic_inc(&vcc->stats->rx);
30472+ atomic_inc_unchecked(&vcc->stats->rx);
30473
30474 if (skb->truesize > SAR_FB_SIZE_3)
30475 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
30476@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
30477 if (vcc->qos.aal != ATM_AAL0) {
30478 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
30479 card->name, vpi, vci);
30480- atomic_inc(&vcc->stats->rx_drop);
30481+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30482 goto drop;
30483 }
30484
30485 if ((sb = dev_alloc_skb(64)) == NULL) {
30486 printk("%s: Can't allocate buffers for AAL0.\n",
30487 card->name);
30488- atomic_inc(&vcc->stats->rx_err);
30489+ atomic_inc_unchecked(&vcc->stats->rx_err);
30490 goto drop;
30491 }
30492
30493@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
30494 ATM_SKB(sb)->vcc = vcc;
30495 __net_timestamp(sb);
30496 vcc->push(vcc, sb);
30497- atomic_inc(&vcc->stats->rx);
30498+ atomic_inc_unchecked(&vcc->stats->rx);
30499
30500 drop:
30501 skb_pull(queue, 64);
30502@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30503
30504 if (vc == NULL) {
30505 printk("%s: NULL connection in send().\n", card->name);
30506- atomic_inc(&vcc->stats->tx_err);
30507+ atomic_inc_unchecked(&vcc->stats->tx_err);
30508 dev_kfree_skb(skb);
30509 return -EINVAL;
30510 }
30511 if (!test_bit(VCF_TX, &vc->flags)) {
30512 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
30513- atomic_inc(&vcc->stats->tx_err);
30514+ atomic_inc_unchecked(&vcc->stats->tx_err);
30515 dev_kfree_skb(skb);
30516 return -EINVAL;
30517 }
30518@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30519 break;
30520 default:
30521 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
30522- atomic_inc(&vcc->stats->tx_err);
30523+ atomic_inc_unchecked(&vcc->stats->tx_err);
30524 dev_kfree_skb(skb);
30525 return -EINVAL;
30526 }
30527
30528 if (skb_shinfo(skb)->nr_frags != 0) {
30529 printk("%s: No scatter-gather yet.\n", card->name);
30530- atomic_inc(&vcc->stats->tx_err);
30531+ atomic_inc_unchecked(&vcc->stats->tx_err);
30532 dev_kfree_skb(skb);
30533 return -EINVAL;
30534 }
30535@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30536
30537 err = queue_skb(card, vc, skb, oam);
30538 if (err) {
30539- atomic_inc(&vcc->stats->tx_err);
30540+ atomic_inc_unchecked(&vcc->stats->tx_err);
30541 dev_kfree_skb(skb);
30542 return err;
30543 }
30544@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
30545 skb = dev_alloc_skb(64);
30546 if (!skb) {
30547 printk("%s: Out of memory in send_oam().\n", card->name);
30548- atomic_inc(&vcc->stats->tx_err);
30549+ atomic_inc_unchecked(&vcc->stats->tx_err);
30550 return -ENOMEM;
30551 }
30552 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
30553diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
30554index 96cce6d..62c3ec5 100644
30555--- a/drivers/atm/iphase.c
30556+++ b/drivers/atm/iphase.c
30557@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
30558 status = (u_short) (buf_desc_ptr->desc_mode);
30559 if (status & (RX_CER | RX_PTE | RX_OFL))
30560 {
30561- atomic_inc(&vcc->stats->rx_err);
30562+ atomic_inc_unchecked(&vcc->stats->rx_err);
30563 IF_ERR(printk("IA: bad packet, dropping it");)
30564 if (status & RX_CER) {
30565 IF_ERR(printk(" cause: packet CRC error\n");)
30566@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
30567 len = dma_addr - buf_addr;
30568 if (len > iadev->rx_buf_sz) {
30569 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
30570- atomic_inc(&vcc->stats->rx_err);
30571+ atomic_inc_unchecked(&vcc->stats->rx_err);
30572 goto out_free_desc;
30573 }
30574
30575@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30576 ia_vcc = INPH_IA_VCC(vcc);
30577 if (ia_vcc == NULL)
30578 {
30579- atomic_inc(&vcc->stats->rx_err);
30580+ atomic_inc_unchecked(&vcc->stats->rx_err);
30581 atm_return(vcc, skb->truesize);
30582 dev_kfree_skb_any(skb);
30583 goto INCR_DLE;
30584@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30585 if ((length > iadev->rx_buf_sz) || (length >
30586 (skb->len - sizeof(struct cpcs_trailer))))
30587 {
30588- atomic_inc(&vcc->stats->rx_err);
30589+ atomic_inc_unchecked(&vcc->stats->rx_err);
30590 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
30591 length, skb->len);)
30592 atm_return(vcc, skb->truesize);
30593@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30594
30595 IF_RX(printk("rx_dle_intr: skb push");)
30596 vcc->push(vcc,skb);
30597- atomic_inc(&vcc->stats->rx);
30598+ atomic_inc_unchecked(&vcc->stats->rx);
30599 iadev->rx_pkt_cnt++;
30600 }
30601 INCR_DLE:
30602@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
30603 {
30604 struct k_sonet_stats *stats;
30605 stats = &PRIV(_ia_dev[board])->sonet_stats;
30606- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
30607- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
30608- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
30609- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
30610- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
30611- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
30612- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
30613- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
30614- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
30615+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
30616+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
30617+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
30618+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
30619+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
30620+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
30621+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
30622+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
30623+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
30624 }
30625 ia_cmds.status = 0;
30626 break;
30627@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30628 if ((desc == 0) || (desc > iadev->num_tx_desc))
30629 {
30630 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
30631- atomic_inc(&vcc->stats->tx);
30632+ atomic_inc_unchecked(&vcc->stats->tx);
30633 if (vcc->pop)
30634 vcc->pop(vcc, skb);
30635 else
30636@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30637 ATM_DESC(skb) = vcc->vci;
30638 skb_queue_tail(&iadev->tx_dma_q, skb);
30639
30640- atomic_inc(&vcc->stats->tx);
30641+ atomic_inc_unchecked(&vcc->stats->tx);
30642 iadev->tx_pkt_cnt++;
30643 /* Increment transaction counter */
30644 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
30645
30646 #if 0
30647 /* add flow control logic */
30648- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
30649+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
30650 if (iavcc->vc_desc_cnt > 10) {
30651 vcc->tx_quota = vcc->tx_quota * 3 / 4;
30652 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
30653diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
30654index 68c7588..7036683 100644
30655--- a/drivers/atm/lanai.c
30656+++ b/drivers/atm/lanai.c
30657@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
30658 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
30659 lanai_endtx(lanai, lvcc);
30660 lanai_free_skb(lvcc->tx.atmvcc, skb);
30661- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
30662+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
30663 }
30664
30665 /* Try to fill the buffer - don't call unless there is backlog */
30666@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
30667 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
30668 __net_timestamp(skb);
30669 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
30670- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
30671+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
30672 out:
30673 lvcc->rx.buf.ptr = end;
30674 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
30675@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30676 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
30677 "vcc %d\n", lanai->number, (unsigned int) s, vci);
30678 lanai->stats.service_rxnotaal5++;
30679- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30680+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30681 return 0;
30682 }
30683 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
30684@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30685 int bytes;
30686 read_unlock(&vcc_sklist_lock);
30687 DPRINTK("got trashed rx pdu on vci %d\n", vci);
30688- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30689+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30690 lvcc->stats.x.aal5.service_trash++;
30691 bytes = (SERVICE_GET_END(s) * 16) -
30692 (((unsigned long) lvcc->rx.buf.ptr) -
30693@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30694 }
30695 if (s & SERVICE_STREAM) {
30696 read_unlock(&vcc_sklist_lock);
30697- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30698+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30699 lvcc->stats.x.aal5.service_stream++;
30700 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
30701 "PDU on VCI %d!\n", lanai->number, vci);
30702@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30703 return 0;
30704 }
30705 DPRINTK("got rx crc error on vci %d\n", vci);
30706- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30707+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30708 lvcc->stats.x.aal5.service_rxcrc++;
30709 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30710 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30711diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30712index 1c70c45..300718d 100644
30713--- a/drivers/atm/nicstar.c
30714+++ b/drivers/atm/nicstar.c
30715@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30716 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
30717 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
30718 card->index);
30719- atomic_inc(&vcc->stats->tx_err);
30720+ atomic_inc_unchecked(&vcc->stats->tx_err);
30721 dev_kfree_skb_any(skb);
30722 return -EINVAL;
30723 }
30724@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30725 if (!vc->tx) {
30726 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
30727 card->index);
30728- atomic_inc(&vcc->stats->tx_err);
30729+ atomic_inc_unchecked(&vcc->stats->tx_err);
30730 dev_kfree_skb_any(skb);
30731 return -EINVAL;
30732 }
30733@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30734 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
30735 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
30736 card->index);
30737- atomic_inc(&vcc->stats->tx_err);
30738+ atomic_inc_unchecked(&vcc->stats->tx_err);
30739 dev_kfree_skb_any(skb);
30740 return -EINVAL;
30741 }
30742
30743 if (skb_shinfo(skb)->nr_frags != 0) {
30744 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30745- atomic_inc(&vcc->stats->tx_err);
30746+ atomic_inc_unchecked(&vcc->stats->tx_err);
30747 dev_kfree_skb_any(skb);
30748 return -EINVAL;
30749 }
30750@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30751 }
30752
30753 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
30754- atomic_inc(&vcc->stats->tx_err);
30755+ atomic_inc_unchecked(&vcc->stats->tx_err);
30756 dev_kfree_skb_any(skb);
30757 return -EIO;
30758 }
30759- atomic_inc(&vcc->stats->tx);
30760+ atomic_inc_unchecked(&vcc->stats->tx);
30761
30762 return 0;
30763 }
30764@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30765 printk
30766 ("nicstar%d: Can't allocate buffers for aal0.\n",
30767 card->index);
30768- atomic_add(i, &vcc->stats->rx_drop);
30769+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
30770 break;
30771 }
30772 if (!atm_charge(vcc, sb->truesize)) {
30773 RXPRINTK
30774 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
30775 card->index);
30776- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
30777+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
30778 dev_kfree_skb_any(sb);
30779 break;
30780 }
30781@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30782 ATM_SKB(sb)->vcc = vcc;
30783 __net_timestamp(sb);
30784 vcc->push(vcc, sb);
30785- atomic_inc(&vcc->stats->rx);
30786+ atomic_inc_unchecked(&vcc->stats->rx);
30787 cell += ATM_CELL_PAYLOAD;
30788 }
30789
30790@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30791 if (iovb == NULL) {
30792 printk("nicstar%d: Out of iovec buffers.\n",
30793 card->index);
30794- atomic_inc(&vcc->stats->rx_drop);
30795+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30796 recycle_rx_buf(card, skb);
30797 return;
30798 }
30799@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30800 small or large buffer itself. */
30801 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
30802 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30803- atomic_inc(&vcc->stats->rx_err);
30804+ atomic_inc_unchecked(&vcc->stats->rx_err);
30805 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30806 NS_MAX_IOVECS);
30807 NS_PRV_IOVCNT(iovb) = 0;
30808@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30809 ("nicstar%d: Expected a small buffer, and this is not one.\n",
30810 card->index);
30811 which_list(card, skb);
30812- atomic_inc(&vcc->stats->rx_err);
30813+ atomic_inc_unchecked(&vcc->stats->rx_err);
30814 recycle_rx_buf(card, skb);
30815 vc->rx_iov = NULL;
30816 recycle_iov_buf(card, iovb);
30817@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30818 ("nicstar%d: Expected a large buffer, and this is not one.\n",
30819 card->index);
30820 which_list(card, skb);
30821- atomic_inc(&vcc->stats->rx_err);
30822+ atomic_inc_unchecked(&vcc->stats->rx_err);
30823 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30824 NS_PRV_IOVCNT(iovb));
30825 vc->rx_iov = NULL;
30826@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30827 printk(" - PDU size mismatch.\n");
30828 else
30829 printk(".\n");
30830- atomic_inc(&vcc->stats->rx_err);
30831+ atomic_inc_unchecked(&vcc->stats->rx_err);
30832 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30833 NS_PRV_IOVCNT(iovb));
30834 vc->rx_iov = NULL;
30835@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30836 /* skb points to a small buffer */
30837 if (!atm_charge(vcc, skb->truesize)) {
30838 push_rxbufs(card, skb);
30839- atomic_inc(&vcc->stats->rx_drop);
30840+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30841 } else {
30842 skb_put(skb, len);
30843 dequeue_sm_buf(card, skb);
30844@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30845 ATM_SKB(skb)->vcc = vcc;
30846 __net_timestamp(skb);
30847 vcc->push(vcc, skb);
30848- atomic_inc(&vcc->stats->rx);
30849+ atomic_inc_unchecked(&vcc->stats->rx);
30850 }
30851 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
30852 struct sk_buff *sb;
30853@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30854 if (len <= NS_SMBUFSIZE) {
30855 if (!atm_charge(vcc, sb->truesize)) {
30856 push_rxbufs(card, sb);
30857- atomic_inc(&vcc->stats->rx_drop);
30858+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30859 } else {
30860 skb_put(sb, len);
30861 dequeue_sm_buf(card, sb);
30862@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30863 ATM_SKB(sb)->vcc = vcc;
30864 __net_timestamp(sb);
30865 vcc->push(vcc, sb);
30866- atomic_inc(&vcc->stats->rx);
30867+ atomic_inc_unchecked(&vcc->stats->rx);
30868 }
30869
30870 push_rxbufs(card, skb);
30871@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30872
30873 if (!atm_charge(vcc, skb->truesize)) {
30874 push_rxbufs(card, skb);
30875- atomic_inc(&vcc->stats->rx_drop);
30876+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30877 } else {
30878 dequeue_lg_buf(card, skb);
30879 #ifdef NS_USE_DESTRUCTORS
30880@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30881 ATM_SKB(skb)->vcc = vcc;
30882 __net_timestamp(skb);
30883 vcc->push(vcc, skb);
30884- atomic_inc(&vcc->stats->rx);
30885+ atomic_inc_unchecked(&vcc->stats->rx);
30886 }
30887
30888 push_rxbufs(card, sb);
30889@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30890 printk
30891 ("nicstar%d: Out of huge buffers.\n",
30892 card->index);
30893- atomic_inc(&vcc->stats->rx_drop);
30894+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30895 recycle_iovec_rx_bufs(card,
30896 (struct iovec *)
30897 iovb->data,
30898@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30899 card->hbpool.count++;
30900 } else
30901 dev_kfree_skb_any(hb);
30902- atomic_inc(&vcc->stats->rx_drop);
30903+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30904 } else {
30905 /* Copy the small buffer to the huge buffer */
30906 sb = (struct sk_buff *)iov->iov_base;
30907@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30908 #endif /* NS_USE_DESTRUCTORS */
30909 __net_timestamp(hb);
30910 vcc->push(vcc, hb);
30911- atomic_inc(&vcc->stats->rx);
30912+ atomic_inc_unchecked(&vcc->stats->rx);
30913 }
30914 }
30915
30916diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30917index 1853a45..cf2426d 100644
30918--- a/drivers/atm/solos-pci.c
30919+++ b/drivers/atm/solos-pci.c
30920@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
30921 }
30922 atm_charge(vcc, skb->truesize);
30923 vcc->push(vcc, skb);
30924- atomic_inc(&vcc->stats->rx);
30925+ atomic_inc_unchecked(&vcc->stats->rx);
30926 break;
30927
30928 case PKT_STATUS:
30929@@ -1010,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30930 vcc = SKB_CB(oldskb)->vcc;
30931
30932 if (vcc) {
30933- atomic_inc(&vcc->stats->tx);
30934+ atomic_inc_unchecked(&vcc->stats->tx);
30935 solos_pop(vcc, oldskb);
30936 } else
30937 dev_kfree_skb_irq(oldskb);
30938diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30939index 0215934..ce9f5b1 100644
30940--- a/drivers/atm/suni.c
30941+++ b/drivers/atm/suni.c
30942@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30943
30944
30945 #define ADD_LIMITED(s,v) \
30946- atomic_add((v),&stats->s); \
30947- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30948+ atomic_add_unchecked((v),&stats->s); \
30949+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30950
30951
30952 static void suni_hz(unsigned long from_timer)
30953diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30954index 5120a96..e2572bd 100644
30955--- a/drivers/atm/uPD98402.c
30956+++ b/drivers/atm/uPD98402.c
30957@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30958 struct sonet_stats tmp;
30959 int error = 0;
30960
30961- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30962+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30963 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30964 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30965 if (zero && !error) {
30966@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30967
30968
30969 #define ADD_LIMITED(s,v) \
30970- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30971- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30972- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30973+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30974+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30975+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30976
30977
30978 static void stat_event(struct atm_dev *dev)
30979@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
30980 if (reason & uPD98402_INT_PFM) stat_event(dev);
30981 if (reason & uPD98402_INT_PCO) {
30982 (void) GET(PCOCR); /* clear interrupt cause */
30983- atomic_add(GET(HECCT),
30984+ atomic_add_unchecked(GET(HECCT),
30985 &PRIV(dev)->sonet_stats.uncorr_hcs);
30986 }
30987 if ((reason & uPD98402_INT_RFO) &&
30988@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
30989 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30990 uPD98402_INT_LOS),PIMR); /* enable them */
30991 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30992- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30993- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30994- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30995+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30996+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30997+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30998 return 0;
30999 }
31000
31001diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
31002index abe4e20..83c4727 100644
31003--- a/drivers/atm/zatm.c
31004+++ b/drivers/atm/zatm.c
31005@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
31006 }
31007 if (!size) {
31008 dev_kfree_skb_irq(skb);
31009- if (vcc) atomic_inc(&vcc->stats->rx_err);
31010+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
31011 continue;
31012 }
31013 if (!atm_charge(vcc,skb->truesize)) {
31014@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
31015 skb->len = size;
31016 ATM_SKB(skb)->vcc = vcc;
31017 vcc->push(vcc,skb);
31018- atomic_inc(&vcc->stats->rx);
31019+ atomic_inc_unchecked(&vcc->stats->rx);
31020 }
31021 zout(pos & 0xffff,MTA(mbx));
31022 #if 0 /* probably a stupid idea */
31023@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
31024 skb_queue_head(&zatm_vcc->backlog,skb);
31025 break;
31026 }
31027- atomic_inc(&vcc->stats->tx);
31028+ atomic_inc_unchecked(&vcc->stats->tx);
31029 wake_up(&zatm_vcc->tx_wait);
31030 }
31031
31032diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
31033index 147d1a4..d0fd4b0 100644
31034--- a/drivers/base/devtmpfs.c
31035+++ b/drivers/base/devtmpfs.c
31036@@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
31037 if (!thread)
31038 return 0;
31039
31040- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
31041+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
31042 if (err)
31043 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
31044 else
31045diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
31046index e6ee5e8..98ad7fc 100644
31047--- a/drivers/base/power/wakeup.c
31048+++ b/drivers/base/power/wakeup.c
31049@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
31050 * They need to be modified together atomically, so it's better to use one
31051 * atomic variable to hold them both.
31052 */
31053-static atomic_t combined_event_count = ATOMIC_INIT(0);
31054+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
31055
31056 #define IN_PROGRESS_BITS (sizeof(int) * 4)
31057 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
31058
31059 static void split_counters(unsigned int *cnt, unsigned int *inpr)
31060 {
31061- unsigned int comb = atomic_read(&combined_event_count);
31062+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
31063
31064 *cnt = (comb >> IN_PROGRESS_BITS);
31065 *inpr = comb & MAX_IN_PROGRESS;
31066@@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
31067 ws->start_prevent_time = ws->last_time;
31068
31069 /* Increment the counter of events in progress. */
31070- cec = atomic_inc_return(&combined_event_count);
31071+ cec = atomic_inc_return_unchecked(&combined_event_count);
31072
31073 trace_wakeup_source_activate(ws->name, cec);
31074 }
31075@@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
31076 * Increment the counter of registered wakeup events and decrement the
31077 * couter of wakeup events in progress simultaneously.
31078 */
31079- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
31080+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
31081 trace_wakeup_source_deactivate(ws->name, cec);
31082
31083 split_counters(&cnt, &inpr);
31084diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
31085index ca83f96..69d4ea9 100644
31086--- a/drivers/block/cciss.c
31087+++ b/drivers/block/cciss.c
31088@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
31089 int err;
31090 u32 cp;
31091
31092+ memset(&arg64, 0, sizeof(arg64));
31093+
31094 err = 0;
31095 err |=
31096 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
31097@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
31098 while (!list_empty(&h->reqQ)) {
31099 c = list_entry(h->reqQ.next, CommandList_struct, list);
31100 /* can't do anything if fifo is full */
31101- if ((h->access.fifo_full(h))) {
31102+ if ((h->access->fifo_full(h))) {
31103 dev_warn(&h->pdev->dev, "fifo full\n");
31104 break;
31105 }
31106@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
31107 h->Qdepth--;
31108
31109 /* Tell the controller execute command */
31110- h->access.submit_command(h, c);
31111+ h->access->submit_command(h, c);
31112
31113 /* Put job onto the completed Q */
31114 addQ(&h->cmpQ, c);
31115@@ -3443,17 +3445,17 @@ startio:
31116
31117 static inline unsigned long get_next_completion(ctlr_info_t *h)
31118 {
31119- return h->access.command_completed(h);
31120+ return h->access->command_completed(h);
31121 }
31122
31123 static inline int interrupt_pending(ctlr_info_t *h)
31124 {
31125- return h->access.intr_pending(h);
31126+ return h->access->intr_pending(h);
31127 }
31128
31129 static inline long interrupt_not_for_us(ctlr_info_t *h)
31130 {
31131- return ((h->access.intr_pending(h) == 0) ||
31132+ return ((h->access->intr_pending(h) == 0) ||
31133 (h->interrupts_enabled == 0));
31134 }
31135
31136@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
31137 u32 a;
31138
31139 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31140- return h->access.command_completed(h);
31141+ return h->access->command_completed(h);
31142
31143 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31144 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31145@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
31146 trans_support & CFGTBL_Trans_use_short_tags);
31147
31148 /* Change the access methods to the performant access methods */
31149- h->access = SA5_performant_access;
31150+ h->access = &SA5_performant_access;
31151 h->transMethod = CFGTBL_Trans_Performant;
31152
31153 return;
31154@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
31155 if (prod_index < 0)
31156 return -ENODEV;
31157 h->product_name = products[prod_index].product_name;
31158- h->access = *(products[prod_index].access);
31159+ h->access = products[prod_index].access;
31160
31161 if (cciss_board_disabled(h)) {
31162 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31163@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
31164 }
31165
31166 /* make sure the board interrupts are off */
31167- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31168+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31169 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
31170 if (rc)
31171 goto clean2;
31172@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
31173 * fake ones to scoop up any residual completions.
31174 */
31175 spin_lock_irqsave(&h->lock, flags);
31176- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31177+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31178 spin_unlock_irqrestore(&h->lock, flags);
31179 free_irq(h->intr[h->intr_mode], h);
31180 rc = cciss_request_irq(h, cciss_msix_discard_completions,
31181@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
31182 dev_info(&h->pdev->dev, "Board READY.\n");
31183 dev_info(&h->pdev->dev,
31184 "Waiting for stale completions to drain.\n");
31185- h->access.set_intr_mask(h, CCISS_INTR_ON);
31186+ h->access->set_intr_mask(h, CCISS_INTR_ON);
31187 msleep(10000);
31188- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31189+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31190
31191 rc = controller_reset_failed(h->cfgtable);
31192 if (rc)
31193@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
31194 cciss_scsi_setup(h);
31195
31196 /* Turn the interrupts on so we can service requests */
31197- h->access.set_intr_mask(h, CCISS_INTR_ON);
31198+ h->access->set_intr_mask(h, CCISS_INTR_ON);
31199
31200 /* Get the firmware version */
31201 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
31202@@ -5210,7 +5212,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
31203 kfree(flush_buf);
31204 if (return_code != IO_OK)
31205 dev_warn(&h->pdev->dev, "Error flushing cache\n");
31206- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31207+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31208 free_irq(h->intr[h->intr_mode], h);
31209 }
31210
31211diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
31212index 7fda30e..eb5dfe0 100644
31213--- a/drivers/block/cciss.h
31214+++ b/drivers/block/cciss.h
31215@@ -101,7 +101,7 @@ struct ctlr_info
31216 /* information about each logical volume */
31217 drive_info_struct *drv[CISS_MAX_LUN];
31218
31219- struct access_method access;
31220+ struct access_method *access;
31221
31222 /* queue and queue Info */
31223 struct list_head reqQ;
31224diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
31225index 9125bbe..eede5c8 100644
31226--- a/drivers/block/cpqarray.c
31227+++ b/drivers/block/cpqarray.c
31228@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
31229 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
31230 goto Enomem4;
31231 }
31232- hba[i]->access.set_intr_mask(hba[i], 0);
31233+ hba[i]->access->set_intr_mask(hba[i], 0);
31234 if (request_irq(hba[i]->intr, do_ida_intr,
31235 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
31236 {
31237@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
31238 add_timer(&hba[i]->timer);
31239
31240 /* Enable IRQ now that spinlock and rate limit timer are set up */
31241- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
31242+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
31243
31244 for(j=0; j<NWD; j++) {
31245 struct gendisk *disk = ida_gendisk[i][j];
31246@@ -694,7 +694,7 @@ DBGINFO(
31247 for(i=0; i<NR_PRODUCTS; i++) {
31248 if (board_id == products[i].board_id) {
31249 c->product_name = products[i].product_name;
31250- c->access = *(products[i].access);
31251+ c->access = products[i].access;
31252 break;
31253 }
31254 }
31255@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
31256 hba[ctlr]->intr = intr;
31257 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
31258 hba[ctlr]->product_name = products[j].product_name;
31259- hba[ctlr]->access = *(products[j].access);
31260+ hba[ctlr]->access = products[j].access;
31261 hba[ctlr]->ctlr = ctlr;
31262 hba[ctlr]->board_id = board_id;
31263 hba[ctlr]->pci_dev = NULL; /* not PCI */
31264@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
31265
31266 while((c = h->reqQ) != NULL) {
31267 /* Can't do anything if we're busy */
31268- if (h->access.fifo_full(h) == 0)
31269+ if (h->access->fifo_full(h) == 0)
31270 return;
31271
31272 /* Get the first entry from the request Q */
31273@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
31274 h->Qdepth--;
31275
31276 /* Tell the controller to do our bidding */
31277- h->access.submit_command(h, c);
31278+ h->access->submit_command(h, c);
31279
31280 /* Get onto the completion Q */
31281 addQ(&h->cmpQ, c);
31282@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
31283 unsigned long flags;
31284 __u32 a,a1;
31285
31286- istat = h->access.intr_pending(h);
31287+ istat = h->access->intr_pending(h);
31288 /* Is this interrupt for us? */
31289 if (istat == 0)
31290 return IRQ_NONE;
31291@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
31292 */
31293 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
31294 if (istat & FIFO_NOT_EMPTY) {
31295- while((a = h->access.command_completed(h))) {
31296+ while((a = h->access->command_completed(h))) {
31297 a1 = a; a &= ~3;
31298 if ((c = h->cmpQ) == NULL)
31299 {
31300@@ -1449,11 +1449,11 @@ static int sendcmd(
31301 /*
31302 * Disable interrupt
31303 */
31304- info_p->access.set_intr_mask(info_p, 0);
31305+ info_p->access->set_intr_mask(info_p, 0);
31306 /* Make sure there is room in the command FIFO */
31307 /* Actually it should be completely empty at this time. */
31308 for (i = 200000; i > 0; i--) {
31309- temp = info_p->access.fifo_full(info_p);
31310+ temp = info_p->access->fifo_full(info_p);
31311 if (temp != 0) {
31312 break;
31313 }
31314@@ -1466,7 +1466,7 @@ DBG(
31315 /*
31316 * Send the cmd
31317 */
31318- info_p->access.submit_command(info_p, c);
31319+ info_p->access->submit_command(info_p, c);
31320 complete = pollcomplete(ctlr);
31321
31322 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
31323@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
31324 * we check the new geometry. Then turn interrupts back on when
31325 * we're done.
31326 */
31327- host->access.set_intr_mask(host, 0);
31328+ host->access->set_intr_mask(host, 0);
31329 getgeometry(ctlr);
31330- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
31331+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
31332
31333 for(i=0; i<NWD; i++) {
31334 struct gendisk *disk = ida_gendisk[ctlr][i];
31335@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
31336 /* Wait (up to 2 seconds) for a command to complete */
31337
31338 for (i = 200000; i > 0; i--) {
31339- done = hba[ctlr]->access.command_completed(hba[ctlr]);
31340+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
31341 if (done == 0) {
31342 udelay(10); /* a short fixed delay */
31343 } else
31344diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
31345index be73e9d..7fbf140 100644
31346--- a/drivers/block/cpqarray.h
31347+++ b/drivers/block/cpqarray.h
31348@@ -99,7 +99,7 @@ struct ctlr_info {
31349 drv_info_t drv[NWD];
31350 struct proc_dir_entry *proc;
31351
31352- struct access_method access;
31353+ struct access_method *access;
31354
31355 cmdlist_t *reqQ;
31356 cmdlist_t *cmpQ;
31357diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
31358index b953cc7..e3dc580 100644
31359--- a/drivers/block/drbd/drbd_int.h
31360+++ b/drivers/block/drbd/drbd_int.h
31361@@ -735,7 +735,7 @@ struct drbd_request;
31362 struct drbd_epoch {
31363 struct list_head list;
31364 unsigned int barrier_nr;
31365- atomic_t epoch_size; /* increased on every request added. */
31366+ atomic_unchecked_t epoch_size; /* increased on every request added. */
31367 atomic_t active; /* increased on every req. added, and dec on every finished. */
31368 unsigned long flags;
31369 };
31370@@ -1116,7 +1116,7 @@ struct drbd_conf {
31371 void *int_dig_in;
31372 void *int_dig_vv;
31373 wait_queue_head_t seq_wait;
31374- atomic_t packet_seq;
31375+ atomic_unchecked_t packet_seq;
31376 unsigned int peer_seq;
31377 spinlock_t peer_seq_lock;
31378 unsigned int minor;
31379@@ -1658,30 +1658,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
31380
31381 static inline void drbd_tcp_cork(struct socket *sock)
31382 {
31383- int __user val = 1;
31384+ int val = 1;
31385 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
31386- (char __user *)&val, sizeof(val));
31387+ (char __force_user *)&val, sizeof(val));
31388 }
31389
31390 static inline void drbd_tcp_uncork(struct socket *sock)
31391 {
31392- int __user val = 0;
31393+ int val = 0;
31394 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
31395- (char __user *)&val, sizeof(val));
31396+ (char __force_user *)&val, sizeof(val));
31397 }
31398
31399 static inline void drbd_tcp_nodelay(struct socket *sock)
31400 {
31401- int __user val = 1;
31402+ int val = 1;
31403 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
31404- (char __user *)&val, sizeof(val));
31405+ (char __force_user *)&val, sizeof(val));
31406 }
31407
31408 static inline void drbd_tcp_quickack(struct socket *sock)
31409 {
31410- int __user val = 2;
31411+ int val = 2;
31412 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
31413- (char __user *)&val, sizeof(val));
31414+ (char __force_user *)&val, sizeof(val));
31415 }
31416
31417 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
31418diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
31419index f55683a..2101b96 100644
31420--- a/drivers/block/drbd/drbd_main.c
31421+++ b/drivers/block/drbd/drbd_main.c
31422@@ -2556,7 +2556,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
31423 p.sector = sector;
31424 p.block_id = block_id;
31425 p.blksize = blksize;
31426- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
31427+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
31428
31429 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
31430 return false;
31431@@ -2854,7 +2854,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
31432
31433 p.sector = cpu_to_be64(req->sector);
31434 p.block_id = (unsigned long)req;
31435- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
31436+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
31437
31438 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
31439
31440@@ -3139,7 +3139,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
31441 atomic_set(&mdev->unacked_cnt, 0);
31442 atomic_set(&mdev->local_cnt, 0);
31443 atomic_set(&mdev->net_cnt, 0);
31444- atomic_set(&mdev->packet_seq, 0);
31445+ atomic_set_unchecked(&mdev->packet_seq, 0);
31446 atomic_set(&mdev->pp_in_use, 0);
31447 atomic_set(&mdev->pp_in_use_by_net, 0);
31448 atomic_set(&mdev->rs_sect_in, 0);
31449@@ -3221,8 +3221,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
31450 mdev->receiver.t_state);
31451
31452 /* no need to lock it, I'm the only thread alive */
31453- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
31454- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
31455+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
31456+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
31457 mdev->al_writ_cnt =
31458 mdev->bm_writ_cnt =
31459 mdev->read_cnt =
31460diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
31461index edb490a..ecd69da 100644
31462--- a/drivers/block/drbd/drbd_nl.c
31463+++ b/drivers/block/drbd/drbd_nl.c
31464@@ -2407,7 +2407,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
31465 module_put(THIS_MODULE);
31466 }
31467
31468-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
31469+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
31470
31471 static unsigned short *
31472 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
31473@@ -2478,7 +2478,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
31474 cn_reply->id.idx = CN_IDX_DRBD;
31475 cn_reply->id.val = CN_VAL_DRBD;
31476
31477- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
31478+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
31479 cn_reply->ack = 0; /* not used here. */
31480 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31481 (int)((char *)tl - (char *)reply->tag_list);
31482@@ -2510,7 +2510,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
31483 cn_reply->id.idx = CN_IDX_DRBD;
31484 cn_reply->id.val = CN_VAL_DRBD;
31485
31486- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
31487+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
31488 cn_reply->ack = 0; /* not used here. */
31489 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31490 (int)((char *)tl - (char *)reply->tag_list);
31491@@ -2588,7 +2588,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
31492 cn_reply->id.idx = CN_IDX_DRBD;
31493 cn_reply->id.val = CN_VAL_DRBD;
31494
31495- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
31496+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
31497 cn_reply->ack = 0; // not used here.
31498 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31499 (int)((char*)tl - (char*)reply->tag_list);
31500@@ -2627,7 +2627,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
31501 cn_reply->id.idx = CN_IDX_DRBD;
31502 cn_reply->id.val = CN_VAL_DRBD;
31503
31504- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
31505+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
31506 cn_reply->ack = 0; /* not used here. */
31507 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31508 (int)((char *)tl - (char *)reply->tag_list);
31509diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
31510index c74ca2d..860c819 100644
31511--- a/drivers/block/drbd/drbd_receiver.c
31512+++ b/drivers/block/drbd/drbd_receiver.c
31513@@ -898,7 +898,7 @@ retry:
31514 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
31515 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
31516
31517- atomic_set(&mdev->packet_seq, 0);
31518+ atomic_set_unchecked(&mdev->packet_seq, 0);
31519 mdev->peer_seq = 0;
31520
31521 if (drbd_send_protocol(mdev) == -1)
31522@@ -999,7 +999,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
31523 do {
31524 next_epoch = NULL;
31525
31526- epoch_size = atomic_read(&epoch->epoch_size);
31527+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
31528
31529 switch (ev & ~EV_CLEANUP) {
31530 case EV_PUT:
31531@@ -1035,7 +1035,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
31532 rv = FE_DESTROYED;
31533 } else {
31534 epoch->flags = 0;
31535- atomic_set(&epoch->epoch_size, 0);
31536+ atomic_set_unchecked(&epoch->epoch_size, 0);
31537 /* atomic_set(&epoch->active, 0); is already zero */
31538 if (rv == FE_STILL_LIVE)
31539 rv = FE_RECYCLED;
31540@@ -1210,14 +1210,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
31541 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
31542 drbd_flush(mdev);
31543
31544- if (atomic_read(&mdev->current_epoch->epoch_size)) {
31545+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
31546 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
31547 if (epoch)
31548 break;
31549 }
31550
31551 epoch = mdev->current_epoch;
31552- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
31553+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
31554
31555 D_ASSERT(atomic_read(&epoch->active) == 0);
31556 D_ASSERT(epoch->flags == 0);
31557@@ -1229,11 +1229,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
31558 }
31559
31560 epoch->flags = 0;
31561- atomic_set(&epoch->epoch_size, 0);
31562+ atomic_set_unchecked(&epoch->epoch_size, 0);
31563 atomic_set(&epoch->active, 0);
31564
31565 spin_lock(&mdev->epoch_lock);
31566- if (atomic_read(&mdev->current_epoch->epoch_size)) {
31567+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
31568 list_add(&epoch->list, &mdev->current_epoch->list);
31569 mdev->current_epoch = epoch;
31570 mdev->epochs++;
31571@@ -1702,7 +1702,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
31572 spin_unlock(&mdev->peer_seq_lock);
31573
31574 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
31575- atomic_inc(&mdev->current_epoch->epoch_size);
31576+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
31577 return drbd_drain_block(mdev, data_size);
31578 }
31579
31580@@ -1732,7 +1732,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
31581
31582 spin_lock(&mdev->epoch_lock);
31583 e->epoch = mdev->current_epoch;
31584- atomic_inc(&e->epoch->epoch_size);
31585+ atomic_inc_unchecked(&e->epoch->epoch_size);
31586 atomic_inc(&e->epoch->active);
31587 spin_unlock(&mdev->epoch_lock);
31588
31589@@ -3954,7 +3954,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
31590 D_ASSERT(list_empty(&mdev->done_ee));
31591
31592 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
31593- atomic_set(&mdev->current_epoch->epoch_size, 0);
31594+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
31595 D_ASSERT(list_empty(&mdev->current_epoch->list));
31596 }
31597
31598diff --git a/drivers/block/loop.c b/drivers/block/loop.c
31599index 54046e5..7759c55 100644
31600--- a/drivers/block/loop.c
31601+++ b/drivers/block/loop.c
31602@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
31603 mm_segment_t old_fs = get_fs();
31604
31605 set_fs(get_ds());
31606- bw = file->f_op->write(file, buf, len, &pos);
31607+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
31608 set_fs(old_fs);
31609 if (likely(bw == len))
31610 return 0;
31611diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
31612index d620b44..587561e 100644
31613--- a/drivers/cdrom/cdrom.c
31614+++ b/drivers/cdrom/cdrom.c
31615@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
31616 ENSURE(reset, CDC_RESET);
31617 ENSURE(generic_packet, CDC_GENERIC_PACKET);
31618 cdi->mc_flags = 0;
31619- cdo->n_minors = 0;
31620 cdi->options = CDO_USE_FFLAGS;
31621
31622 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
31623@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
31624 else
31625 cdi->cdda_method = CDDA_OLD;
31626
31627- if (!cdo->generic_packet)
31628- cdo->generic_packet = cdrom_dummy_generic_packet;
31629+ if (!cdo->generic_packet) {
31630+ pax_open_kernel();
31631+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
31632+ pax_close_kernel();
31633+ }
31634
31635 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
31636 mutex_lock(&cdrom_mutex);
31637@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
31638 if (cdi->exit)
31639 cdi->exit(cdi);
31640
31641- cdi->ops->n_minors--;
31642 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
31643 }
31644
31645diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
31646index 75d485a..2809958 100644
31647--- a/drivers/cdrom/gdrom.c
31648+++ b/drivers/cdrom/gdrom.c
31649@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
31650 .audio_ioctl = gdrom_audio_ioctl,
31651 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
31652 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
31653- .n_minors = 1,
31654 };
31655
31656 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
31657diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
31658index 72bedad..8181ce1 100644
31659--- a/drivers/char/Kconfig
31660+++ b/drivers/char/Kconfig
31661@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
31662
31663 config DEVKMEM
31664 bool "/dev/kmem virtual device support"
31665- default y
31666+ default n
31667+ depends on !GRKERNSEC_KMEM
31668 help
31669 Say Y here if you want to support the /dev/kmem device. The
31670 /dev/kmem device is rarely used, but can be used for certain
31671@@ -581,6 +582,7 @@ config DEVPORT
31672 bool
31673 depends on !M68K
31674 depends on ISA || PCI
31675+ depends on !GRKERNSEC_KMEM
31676 default y
31677
31678 source "drivers/s390/char/Kconfig"
31679diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
31680index 2e04433..22afc64 100644
31681--- a/drivers/char/agp/frontend.c
31682+++ b/drivers/char/agp/frontend.c
31683@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
31684 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
31685 return -EFAULT;
31686
31687- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
31688+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
31689 return -EFAULT;
31690
31691 client = agp_find_client_by_pid(reserve.pid);
31692diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
31693index 21cb980..f15107c 100644
31694--- a/drivers/char/genrtc.c
31695+++ b/drivers/char/genrtc.c
31696@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
31697 switch (cmd) {
31698
31699 case RTC_PLL_GET:
31700+ memset(&pll, 0, sizeof(pll));
31701 if (get_rtc_pll(&pll))
31702 return -EINVAL;
31703 else
31704diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
31705index dfd7876..c0b0885 100644
31706--- a/drivers/char/hpet.c
31707+++ b/drivers/char/hpet.c
31708@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
31709 }
31710
31711 static int
31712-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
31713+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
31714 struct hpet_info *info)
31715 {
31716 struct hpet_timer __iomem *timer;
31717diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31718index a0c84bb..9edcf60 100644
31719--- a/drivers/char/ipmi/ipmi_msghandler.c
31720+++ b/drivers/char/ipmi/ipmi_msghandler.c
31721@@ -420,7 +420,7 @@ struct ipmi_smi {
31722 struct proc_dir_entry *proc_dir;
31723 char proc_dir_name[10];
31724
31725- atomic_t stats[IPMI_NUM_STATS];
31726+ atomic_unchecked_t stats[IPMI_NUM_STATS];
31727
31728 /*
31729 * run_to_completion duplicate of smb_info, smi_info
31730@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31731
31732
31733 #define ipmi_inc_stat(intf, stat) \
31734- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31735+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31736 #define ipmi_get_stat(intf, stat) \
31737- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31738+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31739
31740 static int is_lan_addr(struct ipmi_addr *addr)
31741 {
31742@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31743 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31744 init_waitqueue_head(&intf->waitq);
31745 for (i = 0; i < IPMI_NUM_STATS; i++)
31746- atomic_set(&intf->stats[i], 0);
31747+ atomic_set_unchecked(&intf->stats[i], 0);
31748
31749 intf->proc_dir = NULL;
31750
31751diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31752index 32a6c7e..f6966a9 100644
31753--- a/drivers/char/ipmi/ipmi_si_intf.c
31754+++ b/drivers/char/ipmi/ipmi_si_intf.c
31755@@ -275,7 +275,7 @@ struct smi_info {
31756 unsigned char slave_addr;
31757
31758 /* Counters and things for the proc filesystem. */
31759- atomic_t stats[SI_NUM_STATS];
31760+ atomic_unchecked_t stats[SI_NUM_STATS];
31761
31762 struct task_struct *thread;
31763
31764@@ -284,9 +284,9 @@ struct smi_info {
31765 };
31766
31767 #define smi_inc_stat(smi, stat) \
31768- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31769+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31770 #define smi_get_stat(smi, stat) \
31771- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31772+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31773
31774 #define SI_MAX_PARMS 4
31775
31776@@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
31777 atomic_set(&new_smi->req_events, 0);
31778 new_smi->run_to_completion = 0;
31779 for (i = 0; i < SI_NUM_STATS; i++)
31780- atomic_set(&new_smi->stats[i], 0);
31781+ atomic_set_unchecked(&new_smi->stats[i], 0);
31782
31783 new_smi->interrupt_disabled = 1;
31784 atomic_set(&new_smi->stop_operation, 0);
31785diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31786index 0537903..121c699 100644
31787--- a/drivers/char/mem.c
31788+++ b/drivers/char/mem.c
31789@@ -18,6 +18,7 @@
31790 #include <linux/raw.h>
31791 #include <linux/tty.h>
31792 #include <linux/capability.h>
31793+#include <linux/security.h>
31794 #include <linux/ptrace.h>
31795 #include <linux/device.h>
31796 #include <linux/highmem.h>
31797@@ -37,6 +38,10 @@
31798
31799 #define DEVPORT_MINOR 4
31800
31801+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31802+extern const struct file_operations grsec_fops;
31803+#endif
31804+
31805 static inline unsigned long size_inside_page(unsigned long start,
31806 unsigned long size)
31807 {
31808@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31809
31810 while (cursor < to) {
31811 if (!devmem_is_allowed(pfn)) {
31812+#ifdef CONFIG_GRKERNSEC_KMEM
31813+ gr_handle_mem_readwrite(from, to);
31814+#else
31815 printk(KERN_INFO
31816 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31817 current->comm, from, to);
31818+#endif
31819 return 0;
31820 }
31821 cursor += PAGE_SIZE;
31822@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31823 }
31824 return 1;
31825 }
31826+#elif defined(CONFIG_GRKERNSEC_KMEM)
31827+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31828+{
31829+ return 0;
31830+}
31831 #else
31832 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31833 {
31834@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
31835
31836 while (count > 0) {
31837 unsigned long remaining;
31838+ char *temp;
31839
31840 sz = size_inside_page(p, count);
31841
31842@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
31843 if (!ptr)
31844 return -EFAULT;
31845
31846- remaining = copy_to_user(buf, ptr, sz);
31847+#ifdef CONFIG_PAX_USERCOPY
31848+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
31849+ if (!temp) {
31850+ unxlate_dev_mem_ptr(p, ptr);
31851+ return -ENOMEM;
31852+ }
31853+ memcpy(temp, ptr, sz);
31854+#else
31855+ temp = ptr;
31856+#endif
31857+
31858+ remaining = copy_to_user(buf, temp, sz);
31859+
31860+#ifdef CONFIG_PAX_USERCOPY
31861+ kfree(temp);
31862+#endif
31863+
31864 unxlate_dev_mem_ptr(p, ptr);
31865 if (remaining)
31866 return -EFAULT;
31867@@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31868 size_t count, loff_t *ppos)
31869 {
31870 unsigned long p = *ppos;
31871- ssize_t low_count, read, sz;
31872+ ssize_t low_count, read, sz, err = 0;
31873 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31874- int err = 0;
31875
31876 read = 0;
31877 if (p < (unsigned long) high_memory) {
31878@@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31879 }
31880 #endif
31881 while (low_count > 0) {
31882+ char *temp;
31883+
31884 sz = size_inside_page(p, low_count);
31885
31886 /*
31887@@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31888 */
31889 kbuf = xlate_dev_kmem_ptr((char *)p);
31890
31891- if (copy_to_user(buf, kbuf, sz))
31892+#ifdef CONFIG_PAX_USERCOPY
31893+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
31894+ if (!temp)
31895+ return -ENOMEM;
31896+ memcpy(temp, kbuf, sz);
31897+#else
31898+ temp = kbuf;
31899+#endif
31900+
31901+ err = copy_to_user(buf, temp, sz);
31902+
31903+#ifdef CONFIG_PAX_USERCOPY
31904+ kfree(temp);
31905+#endif
31906+
31907+ if (err)
31908 return -EFAULT;
31909 buf += sz;
31910 p += sz;
31911@@ -833,6 +880,9 @@ static const struct memdev {
31912 #ifdef CONFIG_CRASH_DUMP
31913 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31914 #endif
31915+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31916+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31917+#endif
31918 };
31919
31920 static int memory_open(struct inode *inode, struct file *filp)
31921diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
31922index 9df78e2..01ba9ae 100644
31923--- a/drivers/char/nvram.c
31924+++ b/drivers/char/nvram.c
31925@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
31926
31927 spin_unlock_irq(&rtc_lock);
31928
31929- if (copy_to_user(buf, contents, tmp - contents))
31930+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
31931 return -EFAULT;
31932
31933 *ppos = i;
31934diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
31935index 21721d2..4e98777 100644
31936--- a/drivers/char/pcmcia/synclink_cs.c
31937+++ b/drivers/char/pcmcia/synclink_cs.c
31938@@ -2346,9 +2346,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
31939
31940 if (debug_level >= DEBUG_LEVEL_INFO)
31941 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
31942- __FILE__,__LINE__, info->device_name, port->count);
31943+ __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
31944
31945- WARN_ON(!port->count);
31946+ WARN_ON(!atomic_read(&port->count));
31947
31948 if (tty_port_close_start(port, tty, filp) == 0)
31949 goto cleanup;
31950@@ -2366,7 +2366,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
31951 cleanup:
31952 if (debug_level >= DEBUG_LEVEL_INFO)
31953 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
31954- tty->driver->name, port->count);
31955+ tty->driver->name, atomic_read(&port->count));
31956 }
31957
31958 /* Wait until the transmitter is empty.
31959@@ -2508,7 +2508,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
31960
31961 if (debug_level >= DEBUG_LEVEL_INFO)
31962 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
31963- __FILE__,__LINE__,tty->driver->name, port->count);
31964+ __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
31965
31966 /* If port is closing, signal caller to try again */
31967 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
31968@@ -2528,11 +2528,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
31969 goto cleanup;
31970 }
31971 spin_lock(&port->lock);
31972- port->count++;
31973+ atomic_inc(&port->count);
31974 spin_unlock(&port->lock);
31975 spin_unlock_irqrestore(&info->netlock, flags);
31976
31977- if (port->count == 1) {
31978+ if (atomic_read(&port->count) == 1) {
31979 /* 1st open on this device, init hardware */
31980 retval = startup(info, tty);
31981 if (retval < 0)
31982@@ -3886,7 +3886,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
31983 unsigned short new_crctype;
31984
31985 /* return error if TTY interface open */
31986- if (info->port.count)
31987+ if (atomic_read(&info->port.count))
31988 return -EBUSY;
31989
31990 switch (encoding)
31991@@ -3989,7 +3989,7 @@ static int hdlcdev_open(struct net_device *dev)
31992
31993 /* arbitrate between network and tty opens */
31994 spin_lock_irqsave(&info->netlock, flags);
31995- if (info->port.count != 0 || info->netcount != 0) {
31996+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
31997 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
31998 spin_unlock_irqrestore(&info->netlock, flags);
31999 return -EBUSY;
32000@@ -4078,7 +4078,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32001 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
32002
32003 /* return error if TTY interface open */
32004- if (info->port.count)
32005+ if (atomic_read(&info->port.count))
32006 return -EBUSY;
32007
32008 if (cmd != SIOCWANDEV)
32009diff --git a/drivers/char/random.c b/drivers/char/random.c
32010index b86eae9..b9c2ed7 100644
32011--- a/drivers/char/random.c
32012+++ b/drivers/char/random.c
32013@@ -272,8 +272,13 @@
32014 /*
32015 * Configuration information
32016 */
32017+#ifdef CONFIG_GRKERNSEC_RANDNET
32018+#define INPUT_POOL_WORDS 512
32019+#define OUTPUT_POOL_WORDS 128
32020+#else
32021 #define INPUT_POOL_WORDS 128
32022 #define OUTPUT_POOL_WORDS 32
32023+#endif
32024 #define SEC_XFER_SIZE 512
32025 #define EXTRACT_SIZE 10
32026
32027@@ -313,10 +318,17 @@ static struct poolinfo {
32028 int poolwords;
32029 int tap1, tap2, tap3, tap4, tap5;
32030 } poolinfo_table[] = {
32031+#ifdef CONFIG_GRKERNSEC_RANDNET
32032+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
32033+ { 512, 411, 308, 208, 104, 1 },
32034+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
32035+ { 128, 103, 76, 51, 25, 1 },
32036+#else
32037 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
32038 { 128, 103, 76, 51, 25, 1 },
32039 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
32040 { 32, 26, 20, 14, 7, 1 },
32041+#endif
32042 #if 0
32043 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
32044 { 2048, 1638, 1231, 819, 411, 1 },
32045@@ -437,6 +449,7 @@ struct entropy_store {
32046 int entropy_count;
32047 int entropy_total;
32048 unsigned int initialized:1;
32049+ bool last_data_init;
32050 __u8 last_data[EXTRACT_SIZE];
32051 };
32052
32053@@ -527,8 +540,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
32054 input_rotate += i ? 7 : 14;
32055 }
32056
32057- ACCESS_ONCE(r->input_rotate) = input_rotate;
32058- ACCESS_ONCE(r->add_ptr) = i;
32059+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
32060+ ACCESS_ONCE_RW(r->add_ptr) = i;
32061 smp_wmb();
32062
32063 if (out)
32064@@ -957,6 +970,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
32065 ssize_t ret = 0, i;
32066 __u8 tmp[EXTRACT_SIZE];
32067
32068+ /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
32069+ if (fips_enabled && !r->last_data_init)
32070+ nbytes += EXTRACT_SIZE;
32071+
32072 trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
32073 xfer_secondary_pool(r, nbytes);
32074 nbytes = account(r, nbytes, min, reserved);
32075@@ -967,6 +984,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
32076 if (fips_enabled) {
32077 unsigned long flags;
32078
32079+
32080+ /* prime last_data value if need be, per fips 140-2 */
32081+ if (!r->last_data_init) {
32082+ spin_lock_irqsave(&r->lock, flags);
32083+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
32084+ r->last_data_init = true;
32085+ nbytes -= EXTRACT_SIZE;
32086+ spin_unlock_irqrestore(&r->lock, flags);
32087+ extract_buf(r, tmp);
32088+ }
32089+
32090 spin_lock_irqsave(&r->lock, flags);
32091 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
32092 panic("Hardware RNG duplicated output!\n");
32093@@ -1008,7 +1036,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
32094
32095 extract_buf(r, tmp);
32096 i = min_t(int, nbytes, EXTRACT_SIZE);
32097- if (copy_to_user(buf, tmp, i)) {
32098+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
32099 ret = -EFAULT;
32100 break;
32101 }
32102@@ -1086,6 +1114,7 @@ static void init_std_data(struct entropy_store *r)
32103
32104 r->entropy_count = 0;
32105 r->entropy_total = 0;
32106+ r->last_data_init = false;
32107 mix_pool_bytes(r, &now, sizeof(now), NULL);
32108 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
32109 if (!arch_get_random_long(&rv))
32110@@ -1342,7 +1371,7 @@ EXPORT_SYMBOL(generate_random_uuid);
32111 #include <linux/sysctl.h>
32112
32113 static int min_read_thresh = 8, min_write_thresh;
32114-static int max_read_thresh = INPUT_POOL_WORDS * 32;
32115+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
32116 static int max_write_thresh = INPUT_POOL_WORDS * 32;
32117 static char sysctl_bootid[16];
32118
32119diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
32120index 9b4f011..b7e0a1a 100644
32121--- a/drivers/char/sonypi.c
32122+++ b/drivers/char/sonypi.c
32123@@ -54,6 +54,7 @@
32124
32125 #include <asm/uaccess.h>
32126 #include <asm/io.h>
32127+#include <asm/local.h>
32128
32129 #include <linux/sonypi.h>
32130
32131@@ -490,7 +491,7 @@ static struct sonypi_device {
32132 spinlock_t fifo_lock;
32133 wait_queue_head_t fifo_proc_list;
32134 struct fasync_struct *fifo_async;
32135- int open_count;
32136+ local_t open_count;
32137 int model;
32138 struct input_dev *input_jog_dev;
32139 struct input_dev *input_key_dev;
32140@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
32141 static int sonypi_misc_release(struct inode *inode, struct file *file)
32142 {
32143 mutex_lock(&sonypi_device.lock);
32144- sonypi_device.open_count--;
32145+ local_dec(&sonypi_device.open_count);
32146 mutex_unlock(&sonypi_device.lock);
32147 return 0;
32148 }
32149@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
32150 {
32151 mutex_lock(&sonypi_device.lock);
32152 /* Flush input queue on first open */
32153- if (!sonypi_device.open_count)
32154+ if (!local_read(&sonypi_device.open_count))
32155 kfifo_reset(&sonypi_device.fifo);
32156- sonypi_device.open_count++;
32157+ local_inc(&sonypi_device.open_count);
32158 mutex_unlock(&sonypi_device.lock);
32159
32160 return 0;
32161diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
32162index 93211df..c7805f7 100644
32163--- a/drivers/char/tpm/tpm.c
32164+++ b/drivers/char/tpm/tpm.c
32165@@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
32166 chip->vendor.req_complete_val)
32167 goto out_recv;
32168
32169- if ((status == chip->vendor.req_canceled)) {
32170+ if (status == chip->vendor.req_canceled) {
32171 dev_err(chip->dev, "Operation Canceled\n");
32172 rc = -ECANCELED;
32173 goto out;
32174diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
32175index 56051d0..11cf3b7 100644
32176--- a/drivers/char/tpm/tpm_acpi.c
32177+++ b/drivers/char/tpm/tpm_acpi.c
32178@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
32179 virt = acpi_os_map_memory(start, len);
32180 if (!virt) {
32181 kfree(log->bios_event_log);
32182+ log->bios_event_log = NULL;
32183 printk("%s: ERROR - Unable to map memory\n", __func__);
32184 return -EIO;
32185 }
32186
32187- memcpy_fromio(log->bios_event_log, virt, len);
32188+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
32189
32190 acpi_os_unmap_memory(virt, len);
32191 return 0;
32192diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
32193index 84ddc55..1d32f1e 100644
32194--- a/drivers/char/tpm/tpm_eventlog.c
32195+++ b/drivers/char/tpm/tpm_eventlog.c
32196@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
32197 event = addr;
32198
32199 if ((event->event_type == 0 && event->event_size == 0) ||
32200- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
32201+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
32202 return NULL;
32203
32204 return addr;
32205@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
32206 return NULL;
32207
32208 if ((event->event_type == 0 && event->event_size == 0) ||
32209- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
32210+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
32211 return NULL;
32212
32213 (*pos)++;
32214@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
32215 int i;
32216
32217 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
32218- seq_putc(m, data[i]);
32219+ if (!seq_putc(m, data[i]))
32220+ return -EFAULT;
32221
32222 return 0;
32223 }
32224diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
32225index 8ab9c3d..c3e65d3 100644
32226--- a/drivers/char/virtio_console.c
32227+++ b/drivers/char/virtio_console.c
32228@@ -622,7 +622,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
32229 if (to_user) {
32230 ssize_t ret;
32231
32232- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
32233+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
32234 if (ret)
32235 return -EFAULT;
32236 } else {
32237@@ -721,7 +721,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
32238 if (!port_has_data(port) && !port->host_connected)
32239 return 0;
32240
32241- return fill_readbuf(port, ubuf, count, true);
32242+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
32243 }
32244
32245 static int wait_port_writable(struct port *port, bool nonblock)
32246diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32247index e164c55..3aabb50 100644
32248--- a/drivers/edac/edac_pci_sysfs.c
32249+++ b/drivers/edac/edac_pci_sysfs.c
32250@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32251 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32252 static int edac_pci_poll_msec = 1000; /* one second workq period */
32253
32254-static atomic_t pci_parity_count = ATOMIC_INIT(0);
32255-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32256+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32257+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32258
32259 static struct kobject *edac_pci_top_main_kobj;
32260 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32261@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32262 edac_printk(KERN_CRIT, EDAC_PCI,
32263 "Signaled System Error on %s\n",
32264 pci_name(dev));
32265- atomic_inc(&pci_nonparity_count);
32266+ atomic_inc_unchecked(&pci_nonparity_count);
32267 }
32268
32269 if (status & (PCI_STATUS_PARITY)) {
32270@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32271 "Master Data Parity Error on %s\n",
32272 pci_name(dev));
32273
32274- atomic_inc(&pci_parity_count);
32275+ atomic_inc_unchecked(&pci_parity_count);
32276 }
32277
32278 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32279@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32280 "Detected Parity Error on %s\n",
32281 pci_name(dev));
32282
32283- atomic_inc(&pci_parity_count);
32284+ atomic_inc_unchecked(&pci_parity_count);
32285 }
32286 }
32287
32288@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32289 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32290 "Signaled System Error on %s\n",
32291 pci_name(dev));
32292- atomic_inc(&pci_nonparity_count);
32293+ atomic_inc_unchecked(&pci_nonparity_count);
32294 }
32295
32296 if (status & (PCI_STATUS_PARITY)) {
32297@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32298 "Master Data Parity Error on "
32299 "%s\n", pci_name(dev));
32300
32301- atomic_inc(&pci_parity_count);
32302+ atomic_inc_unchecked(&pci_parity_count);
32303 }
32304
32305 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32306@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32307 "Detected Parity Error on %s\n",
32308 pci_name(dev));
32309
32310- atomic_inc(&pci_parity_count);
32311+ atomic_inc_unchecked(&pci_parity_count);
32312 }
32313 }
32314 }
32315@@ -676,7 +676,7 @@ void edac_pci_do_parity_check(void)
32316 if (!check_pci_errors)
32317 return;
32318
32319- before_count = atomic_read(&pci_parity_count);
32320+ before_count = atomic_read_unchecked(&pci_parity_count);
32321
32322 /* scan all PCI devices looking for a Parity Error on devices and
32323 * bridges.
32324@@ -688,7 +688,7 @@ void edac_pci_do_parity_check(void)
32325 /* Only if operator has selected panic on PCI Error */
32326 if (edac_pci_get_panic_on_pe()) {
32327 /* If the count is different 'after' from 'before' */
32328- if (before_count != atomic_read(&pci_parity_count))
32329+ if (before_count != atomic_read_unchecked(&pci_parity_count))
32330 panic("EDAC: PCI Parity Error");
32331 }
32332 }
32333diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
32334index 8c87a5e..a19cbd7 100644
32335--- a/drivers/edac/mce_amd.h
32336+++ b/drivers/edac/mce_amd.h
32337@@ -80,7 +80,7 @@ extern const char * const ii_msgs[];
32338 struct amd_decoder_ops {
32339 bool (*dc_mce)(u16, u8);
32340 bool (*ic_mce)(u16, u8);
32341-};
32342+} __no_const;
32343
32344 void amd_report_gart_errors(bool);
32345 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
32346diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32347index 57ea7f4..789e3c3 100644
32348--- a/drivers/firewire/core-card.c
32349+++ b/drivers/firewire/core-card.c
32350@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
32351
32352 void fw_core_remove_card(struct fw_card *card)
32353 {
32354- struct fw_card_driver dummy_driver = dummy_driver_template;
32355+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
32356
32357 card->driver->update_phy_reg(card, 4,
32358 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32359diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32360index f8d2287..5aaf4db 100644
32361--- a/drivers/firewire/core-cdev.c
32362+++ b/drivers/firewire/core-cdev.c
32363@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
32364 int ret;
32365
32366 if ((request->channels == 0 && request->bandwidth == 0) ||
32367- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32368- request->bandwidth < 0)
32369+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32370 return -EINVAL;
32371
32372 r = kmalloc(sizeof(*r), GFP_KERNEL);
32373diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32374index 28a94c7..58da63a 100644
32375--- a/drivers/firewire/core-transaction.c
32376+++ b/drivers/firewire/core-transaction.c
32377@@ -38,6 +38,7 @@
32378 #include <linux/timer.h>
32379 #include <linux/types.h>
32380 #include <linux/workqueue.h>
32381+#include <linux/sched.h>
32382
32383 #include <asm/byteorder.h>
32384
32385diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32386index 515a42c..5ecf3ba 100644
32387--- a/drivers/firewire/core.h
32388+++ b/drivers/firewire/core.h
32389@@ -111,6 +111,7 @@ struct fw_card_driver {
32390
32391 int (*stop_iso)(struct fw_iso_context *ctx);
32392 };
32393+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32394
32395 void fw_card_initialize(struct fw_card *card,
32396 const struct fw_card_driver *driver, struct device *device);
32397diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32398index b298158..7ed8432 100644
32399--- a/drivers/firmware/dmi_scan.c
32400+++ b/drivers/firmware/dmi_scan.c
32401@@ -452,11 +452,6 @@ void __init dmi_scan_machine(void)
32402 }
32403 }
32404 else {
32405- /*
32406- * no iounmap() for that ioremap(); it would be a no-op, but
32407- * it's so early in setup that sucker gets confused into doing
32408- * what it shouldn't if we actually call it.
32409- */
32410 p = dmi_ioremap(0xF0000, 0x10000);
32411 if (p == NULL)
32412 goto error;
32413@@ -726,7 +721,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32414 if (buf == NULL)
32415 return -1;
32416
32417- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32418+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32419
32420 iounmap(buf);
32421 return 0;
32422diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32423index d10c987..ebe5400 100644
32424--- a/drivers/firmware/efivars.c
32425+++ b/drivers/firmware/efivars.c
32426@@ -1234,9 +1234,9 @@ efivars_init(void)
32427 return -ENOMEM;
32428 }
32429
32430- ops.get_variable = efi.get_variable;
32431- ops.set_variable = efi.set_variable;
32432- ops.get_next_variable = efi.get_next_variable;
32433+ *(void **)&ops.get_variable = efi.get_variable;
32434+ *(void **)&ops.set_variable = efi.set_variable;
32435+ *(void **)&ops.get_next_variable = efi.get_next_variable;
32436 error = register_efivars(&__efivars, &ops, efi_kobj);
32437 if (error)
32438 goto err_put;
32439diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
32440index 82d5c20..44a7177 100644
32441--- a/drivers/gpio/gpio-vr41xx.c
32442+++ b/drivers/gpio/gpio-vr41xx.c
32443@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32444 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32445 maskl, pendl, maskh, pendh);
32446
32447- atomic_inc(&irq_err_count);
32448+ atomic_inc_unchecked(&irq_err_count);
32449
32450 return -EINVAL;
32451 }
32452diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32453index 1227adf..f2301c2 100644
32454--- a/drivers/gpu/drm/drm_crtc_helper.c
32455+++ b/drivers/gpu/drm/drm_crtc_helper.c
32456@@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32457 struct drm_crtc *tmp;
32458 int crtc_mask = 1;
32459
32460- WARN(!crtc, "checking null crtc?\n");
32461+ BUG_ON(!crtc);
32462
32463 dev = crtc->dev;
32464
32465diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32466index be174ca..0bcbb71 100644
32467--- a/drivers/gpu/drm/drm_drv.c
32468+++ b/drivers/gpu/drm/drm_drv.c
32469@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
32470 /**
32471 * Copy and IOCTL return string to user space
32472 */
32473-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
32474+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
32475 {
32476 int len;
32477
32478@@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
32479 return -ENODEV;
32480
32481 atomic_inc(&dev->ioctl_count);
32482- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32483+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32484 ++file_priv->ioctl_count;
32485
32486 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32487diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32488index 133b413..fd68225 100644
32489--- a/drivers/gpu/drm/drm_fops.c
32490+++ b/drivers/gpu/drm/drm_fops.c
32491@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
32492 }
32493
32494 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32495- atomic_set(&dev->counts[i], 0);
32496+ atomic_set_unchecked(&dev->counts[i], 0);
32497
32498 dev->sigdata.lock = NULL;
32499
32500@@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
32501 if (drm_device_is_unplugged(dev))
32502 return -ENODEV;
32503
32504- if (!dev->open_count++)
32505+ if (local_inc_return(&dev->open_count) == 1)
32506 need_setup = 1;
32507 mutex_lock(&dev->struct_mutex);
32508 old_mapping = dev->dev_mapping;
32509@@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
32510 retcode = drm_open_helper(inode, filp, dev);
32511 if (retcode)
32512 goto err_undo;
32513- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32514+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32515 if (need_setup) {
32516 retcode = drm_setup(dev);
32517 if (retcode)
32518@@ -164,7 +164,7 @@ err_undo:
32519 iput(container_of(dev->dev_mapping, struct inode, i_data));
32520 dev->dev_mapping = old_mapping;
32521 mutex_unlock(&dev->struct_mutex);
32522- dev->open_count--;
32523+ local_dec(&dev->open_count);
32524 return retcode;
32525 }
32526 EXPORT_SYMBOL(drm_open);
32527@@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
32528
32529 mutex_lock(&drm_global_mutex);
32530
32531- DRM_DEBUG("open_count = %d\n", dev->open_count);
32532+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
32533
32534 if (dev->driver->preclose)
32535 dev->driver->preclose(dev, file_priv);
32536@@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
32537 * Begin inline drm_release
32538 */
32539
32540- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32541+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
32542 task_pid_nr(current),
32543 (long)old_encode_dev(file_priv->minor->device),
32544- dev->open_count);
32545+ local_read(&dev->open_count));
32546
32547 /* Release any auth tokens that might point to this file_priv,
32548 (do that under the drm_global_mutex) */
32549@@ -547,8 +547,8 @@ int drm_release(struct inode *inode, struct file *filp)
32550 * End inline drm_release
32551 */
32552
32553- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32554- if (!--dev->open_count) {
32555+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32556+ if (local_dec_and_test(&dev->open_count)) {
32557 if (atomic_read(&dev->ioctl_count)) {
32558 DRM_ERROR("Device busy: %d\n",
32559 atomic_read(&dev->ioctl_count));
32560diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
32561index f731116..629842c 100644
32562--- a/drivers/gpu/drm/drm_global.c
32563+++ b/drivers/gpu/drm/drm_global.c
32564@@ -36,7 +36,7 @@
32565 struct drm_global_item {
32566 struct mutex mutex;
32567 void *object;
32568- int refcount;
32569+ atomic_t refcount;
32570 };
32571
32572 static struct drm_global_item glob[DRM_GLOBAL_NUM];
32573@@ -49,7 +49,7 @@ void drm_global_init(void)
32574 struct drm_global_item *item = &glob[i];
32575 mutex_init(&item->mutex);
32576 item->object = NULL;
32577- item->refcount = 0;
32578+ atomic_set(&item->refcount, 0);
32579 }
32580 }
32581
32582@@ -59,7 +59,7 @@ void drm_global_release(void)
32583 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
32584 struct drm_global_item *item = &glob[i];
32585 BUG_ON(item->object != NULL);
32586- BUG_ON(item->refcount != 0);
32587+ BUG_ON(atomic_read(&item->refcount) != 0);
32588 }
32589 }
32590
32591@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
32592 void *object;
32593
32594 mutex_lock(&item->mutex);
32595- if (item->refcount == 0) {
32596+ if (atomic_read(&item->refcount) == 0) {
32597 item->object = kzalloc(ref->size, GFP_KERNEL);
32598 if (unlikely(item->object == NULL)) {
32599 ret = -ENOMEM;
32600@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
32601 goto out_err;
32602
32603 }
32604- ++item->refcount;
32605+ atomic_inc(&item->refcount);
32606 ref->object = item->object;
32607 object = item->object;
32608 mutex_unlock(&item->mutex);
32609@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
32610 struct drm_global_item *item = &glob[ref->global_type];
32611
32612 mutex_lock(&item->mutex);
32613- BUG_ON(item->refcount == 0);
32614+ BUG_ON(atomic_read(&item->refcount) == 0);
32615 BUG_ON(ref->object != item->object);
32616- if (--item->refcount == 0) {
32617+ if (atomic_dec_and_test(&item->refcount)) {
32618 ref->release(ref);
32619 item->object = NULL;
32620 }
32621diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32622index d4b20ce..77a8d41 100644
32623--- a/drivers/gpu/drm/drm_info.c
32624+++ b/drivers/gpu/drm/drm_info.c
32625@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32626 struct drm_local_map *map;
32627 struct drm_map_list *r_list;
32628
32629- /* Hardcoded from _DRM_FRAME_BUFFER,
32630- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32631- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32632- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32633+ static const char * const types[] = {
32634+ [_DRM_FRAME_BUFFER] = "FB",
32635+ [_DRM_REGISTERS] = "REG",
32636+ [_DRM_SHM] = "SHM",
32637+ [_DRM_AGP] = "AGP",
32638+ [_DRM_SCATTER_GATHER] = "SG",
32639+ [_DRM_CONSISTENT] = "PCI",
32640+ [_DRM_GEM] = "GEM" };
32641 const char *type;
32642 int i;
32643
32644@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32645 map = r_list->map;
32646 if (!map)
32647 continue;
32648- if (map->type < 0 || map->type > 5)
32649+ if (map->type >= ARRAY_SIZE(types))
32650 type = "??";
32651 else
32652 type = types[map->type];
32653@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32654 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32655 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32656 vma->vm_flags & VM_IO ? 'i' : '-',
32657+#ifdef CONFIG_GRKERNSEC_HIDESYM
32658+ 0);
32659+#else
32660 vma->vm_pgoff);
32661+#endif
32662
32663 #if defined(__i386__)
32664 pgprot = pgprot_val(vma->vm_page_prot);
32665diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32666index 2f4c434..764794b 100644
32667--- a/drivers/gpu/drm/drm_ioc32.c
32668+++ b/drivers/gpu/drm/drm_ioc32.c
32669@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32670 request = compat_alloc_user_space(nbytes);
32671 if (!access_ok(VERIFY_WRITE, request, nbytes))
32672 return -EFAULT;
32673- list = (struct drm_buf_desc *) (request + 1);
32674+ list = (struct drm_buf_desc __user *) (request + 1);
32675
32676 if (__put_user(count, &request->count)
32677 || __put_user(list, &request->list))
32678@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32679 request = compat_alloc_user_space(nbytes);
32680 if (!access_ok(VERIFY_WRITE, request, nbytes))
32681 return -EFAULT;
32682- list = (struct drm_buf_pub *) (request + 1);
32683+ list = (struct drm_buf_pub __user *) (request + 1);
32684
32685 if (__put_user(count, &request->count)
32686 || __put_user(list, &request->list))
32687diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32688index 23dd975..63e9801 100644
32689--- a/drivers/gpu/drm/drm_ioctl.c
32690+++ b/drivers/gpu/drm/drm_ioctl.c
32691@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32692 stats->data[i].value =
32693 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32694 else
32695- stats->data[i].value = atomic_read(&dev->counts[i]);
32696+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
32697 stats->data[i].type = dev->types[i];
32698 }
32699
32700diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
32701index d752c96..fe08455 100644
32702--- a/drivers/gpu/drm/drm_lock.c
32703+++ b/drivers/gpu/drm/drm_lock.c
32704@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32705 if (drm_lock_take(&master->lock, lock->context)) {
32706 master->lock.file_priv = file_priv;
32707 master->lock.lock_time = jiffies;
32708- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
32709+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
32710 break; /* Got lock */
32711 }
32712
32713@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32714 return -EINVAL;
32715 }
32716
32717- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
32718+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
32719
32720 if (drm_lock_free(&master->lock, lock->context)) {
32721 /* FIXME: Should really bail out here. */
32722diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
32723index c236fd2..6b5f2e7 100644
32724--- a/drivers/gpu/drm/drm_stub.c
32725+++ b/drivers/gpu/drm/drm_stub.c
32726@@ -511,7 +511,7 @@ void drm_unplug_dev(struct drm_device *dev)
32727
32728 drm_device_set_unplugged(dev);
32729
32730- if (dev->open_count == 0) {
32731+ if (local_read(&dev->open_count) == 0) {
32732 drm_put_dev(dev);
32733 }
32734 mutex_unlock(&drm_global_mutex);
32735diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
32736index 004ecdf..db1f6e0 100644
32737--- a/drivers/gpu/drm/i810/i810_dma.c
32738+++ b/drivers/gpu/drm/i810/i810_dma.c
32739@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
32740 dma->buflist[vertex->idx],
32741 vertex->discard, vertex->used);
32742
32743- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32744- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32745+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32746+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32747 sarea_priv->last_enqueue = dev_priv->counter - 1;
32748 sarea_priv->last_dispatch = (int)hw_status[5];
32749
32750@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
32751 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
32752 mc->last_render);
32753
32754- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32755- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32756+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32757+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32758 sarea_priv->last_enqueue = dev_priv->counter - 1;
32759 sarea_priv->last_dispatch = (int)hw_status[5];
32760
32761diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
32762index 6e0acad..93c8289 100644
32763--- a/drivers/gpu/drm/i810/i810_drv.h
32764+++ b/drivers/gpu/drm/i810/i810_drv.h
32765@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
32766 int page_flipping;
32767
32768 wait_queue_head_t irq_queue;
32769- atomic_t irq_received;
32770- atomic_t irq_emitted;
32771+ atomic_unchecked_t irq_received;
32772+ atomic_unchecked_t irq_emitted;
32773
32774 int front_offset;
32775 } drm_i810_private_t;
32776diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
32777index da21b11..14c8749 100644
32778--- a/drivers/gpu/drm/i915/i915_debugfs.c
32779+++ b/drivers/gpu/drm/i915/i915_debugfs.c
32780@@ -495,7 +495,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
32781 I915_READ(GTIMR));
32782 }
32783 seq_printf(m, "Interrupts received: %d\n",
32784- atomic_read(&dev_priv->irq_received));
32785+ atomic_read_unchecked(&dev_priv->irq_received));
32786 for_each_ring(ring, dev_priv, i) {
32787 if (IS_GEN6(dev) || IS_GEN7(dev)) {
32788 seq_printf(m,
32789diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
32790index 61ae104..f8a4bc1 100644
32791--- a/drivers/gpu/drm/i915/i915_dma.c
32792+++ b/drivers/gpu/drm/i915/i915_dma.c
32793@@ -1274,7 +1274,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
32794 bool can_switch;
32795
32796 spin_lock(&dev->count_lock);
32797- can_switch = (dev->open_count == 0);
32798+ can_switch = (local_read(&dev->open_count) == 0);
32799 spin_unlock(&dev->count_lock);
32800 return can_switch;
32801 }
32802diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
32803index 92f1750..3beba74 100644
32804--- a/drivers/gpu/drm/i915/i915_drv.h
32805+++ b/drivers/gpu/drm/i915/i915_drv.h
32806@@ -430,7 +430,7 @@ typedef struct drm_i915_private {
32807
32808 struct resource mch_res;
32809
32810- atomic_t irq_received;
32811+ atomic_unchecked_t irq_received;
32812
32813 /* protects the irq masks */
32814 spinlock_t irq_lock;
32815@@ -1055,7 +1055,7 @@ struct drm_i915_gem_object {
32816 * will be page flipped away on the next vblank. When it
32817 * reaches 0, dev_priv->pending_flip_queue will be woken up.
32818 */
32819- atomic_t pending_flip;
32820+ atomic_unchecked_t pending_flip;
32821 };
32822
32823 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
32824@@ -1558,7 +1558,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
32825 struct drm_i915_private *dev_priv, unsigned port);
32826 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
32827 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
32828-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
32829+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
32830 {
32831 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
32832 }
32833diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32834index 3eea143..a0b77db 100644
32835--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32836+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32837@@ -660,7 +660,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
32838 i915_gem_clflush_object(obj);
32839
32840 if (obj->base.pending_write_domain)
32841- flips |= atomic_read(&obj->pending_flip);
32842+ flips |= atomic_read_unchecked(&obj->pending_flip);
32843
32844 flush_domains |= obj->base.write_domain;
32845 }
32846@@ -691,9 +691,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
32847
32848 static int
32849 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
32850- int count)
32851+ unsigned int count)
32852 {
32853- int i;
32854+ unsigned int i;
32855
32856 for (i = 0; i < count; i++) {
32857 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
32858diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
32859index dc29ace..137d83a 100644
32860--- a/drivers/gpu/drm/i915/i915_irq.c
32861+++ b/drivers/gpu/drm/i915/i915_irq.c
32862@@ -531,7 +531,7 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
32863 u32 pipe_stats[I915_MAX_PIPES];
32864 bool blc_event;
32865
32866- atomic_inc(&dev_priv->irq_received);
32867+ atomic_inc_unchecked(&dev_priv->irq_received);
32868
32869 while (true) {
32870 iir = I915_READ(VLV_IIR);
32871@@ -678,7 +678,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
32872 irqreturn_t ret = IRQ_NONE;
32873 int i;
32874
32875- atomic_inc(&dev_priv->irq_received);
32876+ atomic_inc_unchecked(&dev_priv->irq_received);
32877
32878 /* disable master interrupt before clearing iir */
32879 de_ier = I915_READ(DEIER);
32880@@ -753,7 +753,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
32881 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
32882 u32 hotplug_mask;
32883
32884- atomic_inc(&dev_priv->irq_received);
32885+ atomic_inc_unchecked(&dev_priv->irq_received);
32886
32887 /* disable master interrupt before clearing iir */
32888 de_ier = I915_READ(DEIER);
32889@@ -1762,7 +1762,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
32890 {
32891 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32892
32893- atomic_set(&dev_priv->irq_received, 0);
32894+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32895
32896 I915_WRITE(HWSTAM, 0xeffe);
32897
32898@@ -1788,7 +1788,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
32899 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32900 int pipe;
32901
32902- atomic_set(&dev_priv->irq_received, 0);
32903+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32904
32905 /* VLV magic */
32906 I915_WRITE(VLV_IMR, 0);
32907@@ -2093,7 +2093,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
32908 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32909 int pipe;
32910
32911- atomic_set(&dev_priv->irq_received, 0);
32912+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32913
32914 for_each_pipe(pipe)
32915 I915_WRITE(PIPESTAT(pipe), 0);
32916@@ -2144,7 +2144,7 @@ static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
32917 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
32918 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
32919
32920- atomic_inc(&dev_priv->irq_received);
32921+ atomic_inc_unchecked(&dev_priv->irq_received);
32922
32923 iir = I915_READ16(IIR);
32924 if (iir == 0)
32925@@ -2229,7 +2229,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
32926 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32927 int pipe;
32928
32929- atomic_set(&dev_priv->irq_received, 0);
32930+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32931
32932 if (I915_HAS_HOTPLUG(dev)) {
32933 I915_WRITE(PORT_HOTPLUG_EN, 0);
32934@@ -2324,7 +2324,7 @@ static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
32935 };
32936 int pipe, ret = IRQ_NONE;
32937
32938- atomic_inc(&dev_priv->irq_received);
32939+ atomic_inc_unchecked(&dev_priv->irq_received);
32940
32941 iir = I915_READ(IIR);
32942 do {
32943@@ -2450,7 +2450,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
32944 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32945 int pipe;
32946
32947- atomic_set(&dev_priv->irq_received, 0);
32948+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32949
32950 I915_WRITE(PORT_HOTPLUG_EN, 0);
32951 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
32952@@ -2557,7 +2557,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
32953 int irq_received;
32954 int ret = IRQ_NONE, pipe;
32955
32956- atomic_inc(&dev_priv->irq_received);
32957+ atomic_inc_unchecked(&dev_priv->irq_received);
32958
32959 iir = I915_READ(IIR);
32960
32961diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
32962index 4d3c7c6..eaac87b 100644
32963--- a/drivers/gpu/drm/i915/intel_display.c
32964+++ b/drivers/gpu/drm/i915/intel_display.c
32965@@ -2131,7 +2131,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
32966
32967 wait_event(dev_priv->pending_flip_queue,
32968 atomic_read(&dev_priv->mm.wedged) ||
32969- atomic_read(&obj->pending_flip) == 0);
32970+ atomic_read_unchecked(&obj->pending_flip) == 0);
32971
32972 /* Big Hammer, we also need to ensure that any pending
32973 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
32974@@ -6221,8 +6221,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
32975
32976 obj = work->old_fb_obj;
32977
32978- atomic_clear_mask(1 << intel_crtc->plane,
32979- &obj->pending_flip.counter);
32980+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
32981 wake_up(&dev_priv->pending_flip_queue);
32982
32983 queue_work(dev_priv->wq, &work->work);
32984@@ -6589,7 +6588,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
32985 /* Block clients from rendering to the new back buffer until
32986 * the flip occurs and the object is no longer visible.
32987 */
32988- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32989+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32990 atomic_inc(&intel_crtc->unpin_work_count);
32991
32992 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
32993@@ -6606,7 +6605,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
32994
32995 cleanup_pending:
32996 atomic_dec(&intel_crtc->unpin_work_count);
32997- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32998+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32999 drm_gem_object_unreference(&work->old_fb_obj->base);
33000 drm_gem_object_unreference(&obj->base);
33001 mutex_unlock(&dev->struct_mutex);
33002diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33003index 54558a0..2d97005 100644
33004--- a/drivers/gpu/drm/mga/mga_drv.h
33005+++ b/drivers/gpu/drm/mga/mga_drv.h
33006@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33007 u32 clear_cmd;
33008 u32 maccess;
33009
33010- atomic_t vbl_received; /**< Number of vblanks received. */
33011+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33012 wait_queue_head_t fence_queue;
33013- atomic_t last_fence_retired;
33014+ atomic_unchecked_t last_fence_retired;
33015 u32 next_fence_to_post;
33016
33017 unsigned int fb_cpp;
33018diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33019index 598c281..60d590e 100644
33020--- a/drivers/gpu/drm/mga/mga_irq.c
33021+++ b/drivers/gpu/drm/mga/mga_irq.c
33022@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33023 if (crtc != 0)
33024 return 0;
33025
33026- return atomic_read(&dev_priv->vbl_received);
33027+ return atomic_read_unchecked(&dev_priv->vbl_received);
33028 }
33029
33030
33031@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33032 /* VBLANK interrupt */
33033 if (status & MGA_VLINEPEN) {
33034 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33035- atomic_inc(&dev_priv->vbl_received);
33036+ atomic_inc_unchecked(&dev_priv->vbl_received);
33037 drm_handle_vblank(dev, 0);
33038 handled = 1;
33039 }
33040@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33041 if ((prim_start & ~0x03) != (prim_end & ~0x03))
33042 MGA_WRITE(MGA_PRIMEND, prim_end);
33043
33044- atomic_inc(&dev_priv->last_fence_retired);
33045+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
33046 DRM_WAKEUP(&dev_priv->fence_queue);
33047 handled = 1;
33048 }
33049@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
33050 * using fences.
33051 */
33052 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33053- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33054+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33055 - *sequence) <= (1 << 23)));
33056
33057 *sequence = cur_fence;
33058diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
33059index 09fdef2..57f5c3b 100644
33060--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
33061+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
33062@@ -1240,7 +1240,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
33063 struct bit_table {
33064 const char id;
33065 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
33066-};
33067+} __no_const;
33068
33069 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
33070
33071diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
33072index a101699..a163f0a 100644
33073--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
33074+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
33075@@ -80,7 +80,7 @@ struct nouveau_drm {
33076 struct drm_global_reference mem_global_ref;
33077 struct ttm_bo_global_ref bo_global_ref;
33078 struct ttm_bo_device bdev;
33079- atomic_t validate_sequence;
33080+ atomic_unchecked_t validate_sequence;
33081 int (*move)(struct nouveau_channel *,
33082 struct ttm_buffer_object *,
33083 struct ttm_mem_reg *, struct ttm_mem_reg *);
33084diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
33085index cdb83ac..27f0a16 100644
33086--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
33087+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
33088@@ -43,7 +43,7 @@ struct nouveau_fence_priv {
33089 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
33090 struct nouveau_channel *);
33091 u32 (*read)(struct nouveau_channel *);
33092-};
33093+} __no_const;
33094
33095 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
33096
33097diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
33098index 5e2f521..0d21436 100644
33099--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
33100+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
33101@@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
33102 int trycnt = 0;
33103 int ret, i;
33104
33105- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
33106+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
33107 retry:
33108 if (++trycnt > 100000) {
33109 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
33110diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
33111index 6f0ac64..9c2dfb4 100644
33112--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
33113+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
33114@@ -63,7 +63,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
33115 bool can_switch;
33116
33117 spin_lock(&dev->count_lock);
33118- can_switch = (dev->open_count == 0);
33119+ can_switch = (local_read(&dev->open_count) == 0);
33120 spin_unlock(&dev->count_lock);
33121 return can_switch;
33122 }
33123diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
33124index 9f6f55c..30e3a29 100644
33125--- a/drivers/gpu/drm/nouveau/nv50_evo.c
33126+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
33127@@ -152,9 +152,9 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
33128 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
33129 evo->object->oclass->ofuncs =
33130 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
33131- evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
33132- evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
33133- evo->object->oclass->ofuncs->rd08 =
33134+ *(void**)&evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
33135+ *(void**)&evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
33136+ *(void**)&evo->object->oclass->ofuncs->rd08 =
33137 ioremap(pci_resource_start(dev->pdev, 0) +
33138 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
33139 return 0;
33140diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
33141index b562b59..9d725a8 100644
33142--- a/drivers/gpu/drm/nouveau/nv50_sor.c
33143+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
33144@@ -317,7 +317,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
33145 }
33146
33147 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
33148- struct dp_train_func func = {
33149+ static struct dp_train_func func = {
33150 .link_set = nv50_sor_dp_link_set,
33151 .train_set = nv50_sor_dp_train_set,
33152 .train_adj = nv50_sor_dp_train_adj
33153diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
33154index c402fca..f1d694b 100644
33155--- a/drivers/gpu/drm/nouveau/nvd0_display.c
33156+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
33157@@ -1389,7 +1389,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
33158 nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
33159
33160 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
33161- struct dp_train_func func = {
33162+ static struct dp_train_func func = {
33163 .link_set = nvd0_sor_dp_link_set,
33164 .train_set = nvd0_sor_dp_train_set,
33165 .train_adj = nvd0_sor_dp_train_adj
33166diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33167index d4660cf..70dbe65 100644
33168--- a/drivers/gpu/drm/r128/r128_cce.c
33169+++ b/drivers/gpu/drm/r128/r128_cce.c
33170@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
33171
33172 /* GH: Simple idle check.
33173 */
33174- atomic_set(&dev_priv->idle_count, 0);
33175+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33176
33177 /* We don't support anything other than bus-mastering ring mode,
33178 * but the ring can be in either AGP or PCI space for the ring
33179diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33180index 930c71b..499aded 100644
33181--- a/drivers/gpu/drm/r128/r128_drv.h
33182+++ b/drivers/gpu/drm/r128/r128_drv.h
33183@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33184 int is_pci;
33185 unsigned long cce_buffers_offset;
33186
33187- atomic_t idle_count;
33188+ atomic_unchecked_t idle_count;
33189
33190 int page_flipping;
33191 int current_page;
33192 u32 crtc_offset;
33193 u32 crtc_offset_cntl;
33194
33195- atomic_t vbl_received;
33196+ atomic_unchecked_t vbl_received;
33197
33198 u32 color_fmt;
33199 unsigned int front_offset;
33200diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33201index 2ea4f09..d391371 100644
33202--- a/drivers/gpu/drm/r128/r128_irq.c
33203+++ b/drivers/gpu/drm/r128/r128_irq.c
33204@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33205 if (crtc != 0)
33206 return 0;
33207
33208- return atomic_read(&dev_priv->vbl_received);
33209+ return atomic_read_unchecked(&dev_priv->vbl_received);
33210 }
33211
33212 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33213@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33214 /* VBLANK interrupt */
33215 if (status & R128_CRTC_VBLANK_INT) {
33216 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33217- atomic_inc(&dev_priv->vbl_received);
33218+ atomic_inc_unchecked(&dev_priv->vbl_received);
33219 drm_handle_vblank(dev, 0);
33220 return IRQ_HANDLED;
33221 }
33222diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33223index 19bb7e6..de7e2a2 100644
33224--- a/drivers/gpu/drm/r128/r128_state.c
33225+++ b/drivers/gpu/drm/r128/r128_state.c
33226@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
33227
33228 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
33229 {
33230- if (atomic_read(&dev_priv->idle_count) == 0)
33231+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
33232 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33233 else
33234- atomic_set(&dev_priv->idle_count, 0);
33235+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33236 }
33237
33238 #endif
33239diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33240index 5a82b6b..9e69c73 100644
33241--- a/drivers/gpu/drm/radeon/mkregtable.c
33242+++ b/drivers/gpu/drm/radeon/mkregtable.c
33243@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33244 regex_t mask_rex;
33245 regmatch_t match[4];
33246 char buf[1024];
33247- size_t end;
33248+ long end;
33249 int len;
33250 int done = 0;
33251 int r;
33252 unsigned o;
33253 struct offset *offset;
33254 char last_reg_s[10];
33255- int last_reg;
33256+ unsigned long last_reg;
33257
33258 if (regcomp
33259 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33260diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
33261index ad4c973..aa27bcb 100644
33262--- a/drivers/gpu/drm/radeon/radeon_device.c
33263+++ b/drivers/gpu/drm/radeon/radeon_device.c
33264@@ -940,7 +940,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
33265 bool can_switch;
33266
33267 spin_lock(&dev->count_lock);
33268- can_switch = (dev->open_count == 0);
33269+ can_switch = (local_read(&dev->open_count) == 0);
33270 spin_unlock(&dev->count_lock);
33271 return can_switch;
33272 }
33273diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33274index a1b59ca..86f2d44 100644
33275--- a/drivers/gpu/drm/radeon/radeon_drv.h
33276+++ b/drivers/gpu/drm/radeon/radeon_drv.h
33277@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
33278
33279 /* SW interrupt */
33280 wait_queue_head_t swi_queue;
33281- atomic_t swi_emitted;
33282+ atomic_unchecked_t swi_emitted;
33283 int vblank_crtc;
33284 uint32_t irq_enable_reg;
33285 uint32_t r500_disp_irq_reg;
33286diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33287index c180df8..cd80dd2d 100644
33288--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33289+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33290@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33291 request = compat_alloc_user_space(sizeof(*request));
33292 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33293 || __put_user(req32.param, &request->param)
33294- || __put_user((void __user *)(unsigned long)req32.value,
33295+ || __put_user((unsigned long)req32.value,
33296 &request->value))
33297 return -EFAULT;
33298
33299diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33300index e771033..a0bc6b3 100644
33301--- a/drivers/gpu/drm/radeon/radeon_irq.c
33302+++ b/drivers/gpu/drm/radeon/radeon_irq.c
33303@@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33304 unsigned int ret;
33305 RING_LOCALS;
33306
33307- atomic_inc(&dev_priv->swi_emitted);
33308- ret = atomic_read(&dev_priv->swi_emitted);
33309+ atomic_inc_unchecked(&dev_priv->swi_emitted);
33310+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33311
33312 BEGIN_RING(4);
33313 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33314@@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33315 drm_radeon_private_t *dev_priv =
33316 (drm_radeon_private_t *) dev->dev_private;
33317
33318- atomic_set(&dev_priv->swi_emitted, 0);
33319+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33320 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33321
33322 dev->max_vblank_count = 0x001fffff;
33323diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33324index 8e9057b..af6dacb 100644
33325--- a/drivers/gpu/drm/radeon/radeon_state.c
33326+++ b/drivers/gpu/drm/radeon/radeon_state.c
33327@@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
33328 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
33329 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
33330
33331- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
33332+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
33333 sarea_priv->nbox * sizeof(depth_boxes[0])))
33334 return -EFAULT;
33335
33336@@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33337 {
33338 drm_radeon_private_t *dev_priv = dev->dev_private;
33339 drm_radeon_getparam_t *param = data;
33340- int value;
33341+ int value = 0;
33342
33343 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33344
33345diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33346index 5ebe1b3..1ed9426 100644
33347--- a/drivers/gpu/drm/radeon/radeon_ttm.c
33348+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33349@@ -822,8 +822,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33350 }
33351 if (unlikely(ttm_vm_ops == NULL)) {
33352 ttm_vm_ops = vma->vm_ops;
33353- radeon_ttm_vm_ops = *ttm_vm_ops;
33354- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33355+ pax_open_kernel();
33356+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
33357+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33358+ pax_close_kernel();
33359 }
33360 vma->vm_ops = &radeon_ttm_vm_ops;
33361 return 0;
33362diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33363index 5706d2a..17aedaa 100644
33364--- a/drivers/gpu/drm/radeon/rs690.c
33365+++ b/drivers/gpu/drm/radeon/rs690.c
33366@@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33367 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33368 rdev->pm.sideport_bandwidth.full)
33369 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33370- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
33371+ read_delay_latency.full = dfixed_const(800 * 1000);
33372 read_delay_latency.full = dfixed_div(read_delay_latency,
33373 rdev->pm.igp_sideport_mclk);
33374+ a.full = dfixed_const(370);
33375+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
33376 } else {
33377 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33378 rdev->pm.k8_bandwidth.full)
33379diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
33380index bd2a3b4..122d9ad 100644
33381--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
33382+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
33383@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
33384 static int ttm_pool_mm_shrink(struct shrinker *shrink,
33385 struct shrink_control *sc)
33386 {
33387- static atomic_t start_pool = ATOMIC_INIT(0);
33388+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
33389 unsigned i;
33390- unsigned pool_offset = atomic_add_return(1, &start_pool);
33391+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
33392 struct ttm_page_pool *pool;
33393 int shrink_pages = sc->nr_to_scan;
33394
33395diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33396index 893a650..6190d3b 100644
33397--- a/drivers/gpu/drm/via/via_drv.h
33398+++ b/drivers/gpu/drm/via/via_drv.h
33399@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33400 typedef uint32_t maskarray_t[5];
33401
33402 typedef struct drm_via_irq {
33403- atomic_t irq_received;
33404+ atomic_unchecked_t irq_received;
33405 uint32_t pending_mask;
33406 uint32_t enable_mask;
33407 wait_queue_head_t irq_queue;
33408@@ -75,7 +75,7 @@ typedef struct drm_via_private {
33409 struct timeval last_vblank;
33410 int last_vblank_valid;
33411 unsigned usec_per_vblank;
33412- atomic_t vbl_received;
33413+ atomic_unchecked_t vbl_received;
33414 drm_via_state_t hc_state;
33415 char pci_buf[VIA_PCI_BUF_SIZE];
33416 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33417diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33418index ac98964..5dbf512 100644
33419--- a/drivers/gpu/drm/via/via_irq.c
33420+++ b/drivers/gpu/drm/via/via_irq.c
33421@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33422 if (crtc != 0)
33423 return 0;
33424
33425- return atomic_read(&dev_priv->vbl_received);
33426+ return atomic_read_unchecked(&dev_priv->vbl_received);
33427 }
33428
33429 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33430@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33431
33432 status = VIA_READ(VIA_REG_INTERRUPT);
33433 if (status & VIA_IRQ_VBLANK_PENDING) {
33434- atomic_inc(&dev_priv->vbl_received);
33435- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33436+ atomic_inc_unchecked(&dev_priv->vbl_received);
33437+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33438 do_gettimeofday(&cur_vblank);
33439 if (dev_priv->last_vblank_valid) {
33440 dev_priv->usec_per_vblank =
33441@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33442 dev_priv->last_vblank = cur_vblank;
33443 dev_priv->last_vblank_valid = 1;
33444 }
33445- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33446+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33447 DRM_DEBUG("US per vblank is: %u\n",
33448 dev_priv->usec_per_vblank);
33449 }
33450@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33451
33452 for (i = 0; i < dev_priv->num_irqs; ++i) {
33453 if (status & cur_irq->pending_mask) {
33454- atomic_inc(&cur_irq->irq_received);
33455+ atomic_inc_unchecked(&cur_irq->irq_received);
33456 DRM_WAKEUP(&cur_irq->irq_queue);
33457 handled = 1;
33458 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
33459@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
33460 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33461 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33462 masks[irq][4]));
33463- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33464+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33465 } else {
33466 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33467 (((cur_irq_sequence =
33468- atomic_read(&cur_irq->irq_received)) -
33469+ atomic_read_unchecked(&cur_irq->irq_received)) -
33470 *sequence) <= (1 << 23)));
33471 }
33472 *sequence = cur_irq_sequence;
33473@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
33474 }
33475
33476 for (i = 0; i < dev_priv->num_irqs; ++i) {
33477- atomic_set(&cur_irq->irq_received, 0);
33478+ atomic_set_unchecked(&cur_irq->irq_received, 0);
33479 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33480 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33481 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33482@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33483 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33484 case VIA_IRQ_RELATIVE:
33485 irqwait->request.sequence +=
33486- atomic_read(&cur_irq->irq_received);
33487+ atomic_read_unchecked(&cur_irq->irq_received);
33488 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33489 case VIA_IRQ_ABSOLUTE:
33490 break;
33491diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
33492index 88a179e..57fe50481c 100644
33493--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
33494+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
33495@@ -263,7 +263,7 @@ struct vmw_private {
33496 * Fencing and IRQs.
33497 */
33498
33499- atomic_t marker_seq;
33500+ atomic_unchecked_t marker_seq;
33501 wait_queue_head_t fence_queue;
33502 wait_queue_head_t fifo_queue;
33503 int fence_queue_waiters; /* Protected by hw_mutex */
33504diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
33505index 3eb1486..0a47ee9 100644
33506--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
33507+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
33508@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
33509 (unsigned int) min,
33510 (unsigned int) fifo->capabilities);
33511
33512- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
33513+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
33514 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
33515 vmw_marker_queue_init(&fifo->marker_queue);
33516 return vmw_fifo_send_fence(dev_priv, &dummy);
33517@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
33518 if (reserveable)
33519 iowrite32(bytes, fifo_mem +
33520 SVGA_FIFO_RESERVED);
33521- return fifo_mem + (next_cmd >> 2);
33522+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
33523 } else {
33524 need_bounce = true;
33525 }
33526@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
33527
33528 fm = vmw_fifo_reserve(dev_priv, bytes);
33529 if (unlikely(fm == NULL)) {
33530- *seqno = atomic_read(&dev_priv->marker_seq);
33531+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
33532 ret = -ENOMEM;
33533 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
33534 false, 3*HZ);
33535@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
33536 }
33537
33538 do {
33539- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
33540+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
33541 } while (*seqno == 0);
33542
33543 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
33544diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
33545index 4640adb..e1384ed 100644
33546--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
33547+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
33548@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
33549 * emitted. Then the fence is stale and signaled.
33550 */
33551
33552- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
33553+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
33554 > VMW_FENCE_WRAP);
33555
33556 return ret;
33557@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
33558
33559 if (fifo_idle)
33560 down_read(&fifo_state->rwsem);
33561- signal_seq = atomic_read(&dev_priv->marker_seq);
33562+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
33563 ret = 0;
33564
33565 for (;;) {
33566diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
33567index 8a8725c..afed796 100644
33568--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
33569+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
33570@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
33571 while (!vmw_lag_lt(queue, us)) {
33572 spin_lock(&queue->lock);
33573 if (list_empty(&queue->head))
33574- seqno = atomic_read(&dev_priv->marker_seq);
33575+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
33576 else {
33577 marker = list_first_entry(&queue->head,
33578 struct vmw_marker, head);
33579diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
33580index 52146db..ae33762 100644
33581--- a/drivers/hid/hid-core.c
33582+++ b/drivers/hid/hid-core.c
33583@@ -2201,7 +2201,7 @@ static bool hid_ignore(struct hid_device *hdev)
33584
33585 int hid_add_device(struct hid_device *hdev)
33586 {
33587- static atomic_t id = ATOMIC_INIT(0);
33588+ static atomic_unchecked_t id = ATOMIC_INIT(0);
33589 int ret;
33590
33591 if (WARN_ON(hdev->status & HID_STAT_ADDED))
33592@@ -2236,7 +2236,7 @@ int hid_add_device(struct hid_device *hdev)
33593 /* XXX hack, any other cleaner solution after the driver core
33594 * is converted to allow more than 20 bytes as the device name? */
33595 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
33596- hdev->vendor, hdev->product, atomic_inc_return(&id));
33597+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
33598
33599 hid_debug_register(hdev, dev_name(&hdev->dev));
33600 ret = device_add(&hdev->dev);
33601diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
33602index eec3291..8ed706b 100644
33603--- a/drivers/hid/hid-wiimote-debug.c
33604+++ b/drivers/hid/hid-wiimote-debug.c
33605@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
33606 else if (size == 0)
33607 return -EIO;
33608
33609- if (copy_to_user(u, buf, size))
33610+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
33611 return -EFAULT;
33612
33613 *off += size;
33614diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
33615index 14599e2..711c965 100644
33616--- a/drivers/hid/usbhid/hiddev.c
33617+++ b/drivers/hid/usbhid/hiddev.c
33618@@ -625,7 +625,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33619 break;
33620
33621 case HIDIOCAPPLICATION:
33622- if (arg < 0 || arg >= hid->maxapplication)
33623+ if (arg >= hid->maxapplication)
33624 break;
33625
33626 for (i = 0; i < hid->maxcollection; i++)
33627diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
33628index f4c3d28..82f45a9 100644
33629--- a/drivers/hv/channel.c
33630+++ b/drivers/hv/channel.c
33631@@ -402,8 +402,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
33632 int ret = 0;
33633 int t;
33634
33635- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
33636- atomic_inc(&vmbus_connection.next_gpadl_handle);
33637+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
33638+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
33639
33640 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
33641 if (ret)
33642diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
33643index 3648f8f..30ef30d 100644
33644--- a/drivers/hv/hv.c
33645+++ b/drivers/hv/hv.c
33646@@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
33647 u64 output_address = (output) ? virt_to_phys(output) : 0;
33648 u32 output_address_hi = output_address >> 32;
33649 u32 output_address_lo = output_address & 0xFFFFFFFF;
33650- void *hypercall_page = hv_context.hypercall_page;
33651+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
33652
33653 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
33654 "=a"(hv_status_lo) : "d" (control_hi),
33655diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
33656index d8d1fad..b91caf7 100644
33657--- a/drivers/hv/hyperv_vmbus.h
33658+++ b/drivers/hv/hyperv_vmbus.h
33659@@ -594,7 +594,7 @@ enum vmbus_connect_state {
33660 struct vmbus_connection {
33661 enum vmbus_connect_state conn_state;
33662
33663- atomic_t next_gpadl_handle;
33664+ atomic_unchecked_t next_gpadl_handle;
33665
33666 /*
33667 * Represents channel interrupts. Each bit position represents a
33668diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
33669index 8e1a9ec..4687821 100644
33670--- a/drivers/hv/vmbus_drv.c
33671+++ b/drivers/hv/vmbus_drv.c
33672@@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
33673 {
33674 int ret = 0;
33675
33676- static atomic_t device_num = ATOMIC_INIT(0);
33677+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
33678
33679 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
33680- atomic_inc_return(&device_num));
33681+ atomic_inc_return_unchecked(&device_num));
33682
33683 child_device_obj->device.bus = &hv_bus;
33684 child_device_obj->device.parent = &hv_acpi_dev->dev;
33685diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
33686index 07a0c1a..0cac334 100644
33687--- a/drivers/hwmon/sht15.c
33688+++ b/drivers/hwmon/sht15.c
33689@@ -169,7 +169,7 @@ struct sht15_data {
33690 int supply_uV;
33691 bool supply_uV_valid;
33692 struct work_struct update_supply_work;
33693- atomic_t interrupt_handled;
33694+ atomic_unchecked_t interrupt_handled;
33695 };
33696
33697 /**
33698@@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
33699 return ret;
33700
33701 gpio_direction_input(data->pdata->gpio_data);
33702- atomic_set(&data->interrupt_handled, 0);
33703+ atomic_set_unchecked(&data->interrupt_handled, 0);
33704
33705 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33706 if (gpio_get_value(data->pdata->gpio_data) == 0) {
33707 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
33708 /* Only relevant if the interrupt hasn't occurred. */
33709- if (!atomic_read(&data->interrupt_handled))
33710+ if (!atomic_read_unchecked(&data->interrupt_handled))
33711 schedule_work(&data->read_work);
33712 }
33713 ret = wait_event_timeout(data->wait_queue,
33714@@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
33715
33716 /* First disable the interrupt */
33717 disable_irq_nosync(irq);
33718- atomic_inc(&data->interrupt_handled);
33719+ atomic_inc_unchecked(&data->interrupt_handled);
33720 /* Then schedule a reading work struct */
33721 if (data->state != SHT15_READING_NOTHING)
33722 schedule_work(&data->read_work);
33723@@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
33724 * If not, then start the interrupt again - care here as could
33725 * have gone low in meantime so verify it hasn't!
33726 */
33727- atomic_set(&data->interrupt_handled, 0);
33728+ atomic_set_unchecked(&data->interrupt_handled, 0);
33729 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33730 /* If still not occurred or another handler was scheduled */
33731 if (gpio_get_value(data->pdata->gpio_data)
33732- || atomic_read(&data->interrupt_handled))
33733+ || atomic_read_unchecked(&data->interrupt_handled))
33734 return;
33735 }
33736
33737diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
33738index 378fcb5..5e91fa8 100644
33739--- a/drivers/i2c/busses/i2c-amd756-s4882.c
33740+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
33741@@ -43,7 +43,7 @@
33742 extern struct i2c_adapter amd756_smbus;
33743
33744 static struct i2c_adapter *s4882_adapter;
33745-static struct i2c_algorithm *s4882_algo;
33746+static i2c_algorithm_no_const *s4882_algo;
33747
33748 /* Wrapper access functions for multiplexed SMBus */
33749 static DEFINE_MUTEX(amd756_lock);
33750diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
33751index 29015eb..af2d8e9 100644
33752--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
33753+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
33754@@ -41,7 +41,7 @@
33755 extern struct i2c_adapter *nforce2_smbus;
33756
33757 static struct i2c_adapter *s4985_adapter;
33758-static struct i2c_algorithm *s4985_algo;
33759+static i2c_algorithm_no_const *s4985_algo;
33760
33761 /* Wrapper access functions for multiplexed SMBus */
33762 static DEFINE_MUTEX(nforce2_lock);
33763diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
33764index 8126824..55a2798 100644
33765--- a/drivers/ide/ide-cd.c
33766+++ b/drivers/ide/ide-cd.c
33767@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
33768 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
33769 if ((unsigned long)buf & alignment
33770 || blk_rq_bytes(rq) & q->dma_pad_mask
33771- || object_is_on_stack(buf))
33772+ || object_starts_on_stack(buf))
33773 drive->dma = 0;
33774 }
33775 }
33776diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
33777index 394fea2..c833880 100644
33778--- a/drivers/infiniband/core/cm.c
33779+++ b/drivers/infiniband/core/cm.c
33780@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
33781
33782 struct cm_counter_group {
33783 struct kobject obj;
33784- atomic_long_t counter[CM_ATTR_COUNT];
33785+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
33786 };
33787
33788 struct cm_counter_attribute {
33789@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
33790 struct ib_mad_send_buf *msg = NULL;
33791 int ret;
33792
33793- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33794+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33795 counter[CM_REQ_COUNTER]);
33796
33797 /* Quick state check to discard duplicate REQs. */
33798@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
33799 if (!cm_id_priv)
33800 return;
33801
33802- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33803+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33804 counter[CM_REP_COUNTER]);
33805 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
33806 if (ret)
33807@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
33808 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
33809 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
33810 spin_unlock_irq(&cm_id_priv->lock);
33811- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33812+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33813 counter[CM_RTU_COUNTER]);
33814 goto out;
33815 }
33816@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
33817 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
33818 dreq_msg->local_comm_id);
33819 if (!cm_id_priv) {
33820- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33821+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33822 counter[CM_DREQ_COUNTER]);
33823 cm_issue_drep(work->port, work->mad_recv_wc);
33824 return -EINVAL;
33825@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
33826 case IB_CM_MRA_REP_RCVD:
33827 break;
33828 case IB_CM_TIMEWAIT:
33829- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33830+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33831 counter[CM_DREQ_COUNTER]);
33832 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
33833 goto unlock;
33834@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
33835 cm_free_msg(msg);
33836 goto deref;
33837 case IB_CM_DREQ_RCVD:
33838- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33839+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33840 counter[CM_DREQ_COUNTER]);
33841 goto unlock;
33842 default:
33843@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
33844 ib_modify_mad(cm_id_priv->av.port->mad_agent,
33845 cm_id_priv->msg, timeout)) {
33846 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
33847- atomic_long_inc(&work->port->
33848+ atomic_long_inc_unchecked(&work->port->
33849 counter_group[CM_RECV_DUPLICATES].
33850 counter[CM_MRA_COUNTER]);
33851 goto out;
33852@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
33853 break;
33854 case IB_CM_MRA_REQ_RCVD:
33855 case IB_CM_MRA_REP_RCVD:
33856- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33857+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33858 counter[CM_MRA_COUNTER]);
33859 /* fall through */
33860 default:
33861@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
33862 case IB_CM_LAP_IDLE:
33863 break;
33864 case IB_CM_MRA_LAP_SENT:
33865- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33866+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33867 counter[CM_LAP_COUNTER]);
33868 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
33869 goto unlock;
33870@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
33871 cm_free_msg(msg);
33872 goto deref;
33873 case IB_CM_LAP_RCVD:
33874- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33875+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33876 counter[CM_LAP_COUNTER]);
33877 goto unlock;
33878 default:
33879@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
33880 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
33881 if (cur_cm_id_priv) {
33882 spin_unlock_irq(&cm.lock);
33883- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33884+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33885 counter[CM_SIDR_REQ_COUNTER]);
33886 goto out; /* Duplicate message. */
33887 }
33888@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
33889 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
33890 msg->retries = 1;
33891
33892- atomic_long_add(1 + msg->retries,
33893+ atomic_long_add_unchecked(1 + msg->retries,
33894 &port->counter_group[CM_XMIT].counter[attr_index]);
33895 if (msg->retries)
33896- atomic_long_add(msg->retries,
33897+ atomic_long_add_unchecked(msg->retries,
33898 &port->counter_group[CM_XMIT_RETRIES].
33899 counter[attr_index]);
33900
33901@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
33902 }
33903
33904 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
33905- atomic_long_inc(&port->counter_group[CM_RECV].
33906+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
33907 counter[attr_id - CM_ATTR_ID_OFFSET]);
33908
33909 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
33910@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
33911 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
33912
33913 return sprintf(buf, "%ld\n",
33914- atomic_long_read(&group->counter[cm_attr->index]));
33915+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
33916 }
33917
33918 static const struct sysfs_ops cm_counter_ops = {
33919diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
33920index 176c8f9..2627b62 100644
33921--- a/drivers/infiniband/core/fmr_pool.c
33922+++ b/drivers/infiniband/core/fmr_pool.c
33923@@ -98,8 +98,8 @@ struct ib_fmr_pool {
33924
33925 struct task_struct *thread;
33926
33927- atomic_t req_ser;
33928- atomic_t flush_ser;
33929+ atomic_unchecked_t req_ser;
33930+ atomic_unchecked_t flush_ser;
33931
33932 wait_queue_head_t force_wait;
33933 };
33934@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
33935 struct ib_fmr_pool *pool = pool_ptr;
33936
33937 do {
33938- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
33939+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
33940 ib_fmr_batch_release(pool);
33941
33942- atomic_inc(&pool->flush_ser);
33943+ atomic_inc_unchecked(&pool->flush_ser);
33944 wake_up_interruptible(&pool->force_wait);
33945
33946 if (pool->flush_function)
33947@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
33948 }
33949
33950 set_current_state(TASK_INTERRUPTIBLE);
33951- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
33952+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
33953 !kthread_should_stop())
33954 schedule();
33955 __set_current_state(TASK_RUNNING);
33956@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
33957 pool->dirty_watermark = params->dirty_watermark;
33958 pool->dirty_len = 0;
33959 spin_lock_init(&pool->pool_lock);
33960- atomic_set(&pool->req_ser, 0);
33961- atomic_set(&pool->flush_ser, 0);
33962+ atomic_set_unchecked(&pool->req_ser, 0);
33963+ atomic_set_unchecked(&pool->flush_ser, 0);
33964 init_waitqueue_head(&pool->force_wait);
33965
33966 pool->thread = kthread_run(ib_fmr_cleanup_thread,
33967@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
33968 }
33969 spin_unlock_irq(&pool->pool_lock);
33970
33971- serial = atomic_inc_return(&pool->req_ser);
33972+ serial = atomic_inc_return_unchecked(&pool->req_ser);
33973 wake_up_process(pool->thread);
33974
33975 if (wait_event_interruptible(pool->force_wait,
33976- atomic_read(&pool->flush_ser) - serial >= 0))
33977+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
33978 return -EINTR;
33979
33980 return 0;
33981@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
33982 } else {
33983 list_add_tail(&fmr->list, &pool->dirty_list);
33984 if (++pool->dirty_len >= pool->dirty_watermark) {
33985- atomic_inc(&pool->req_ser);
33986+ atomic_inc_unchecked(&pool->req_ser);
33987 wake_up_process(pool->thread);
33988 }
33989 }
33990diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
33991index afd8179..598063f 100644
33992--- a/drivers/infiniband/hw/cxgb4/mem.c
33993+++ b/drivers/infiniband/hw/cxgb4/mem.c
33994@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
33995 int err;
33996 struct fw_ri_tpte tpt;
33997 u32 stag_idx;
33998- static atomic_t key;
33999+ static atomic_unchecked_t key;
34000
34001 if (c4iw_fatal_error(rdev))
34002 return -EIO;
34003@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
34004 if (rdev->stats.stag.cur > rdev->stats.stag.max)
34005 rdev->stats.stag.max = rdev->stats.stag.cur;
34006 mutex_unlock(&rdev->stats.lock);
34007- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
34008+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
34009 }
34010 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
34011 __func__, stag_state, type, pdid, stag_idx);
34012diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
34013index 79b3dbc..96e5fcc 100644
34014--- a/drivers/infiniband/hw/ipath/ipath_rc.c
34015+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
34016@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
34017 struct ib_atomic_eth *ateth;
34018 struct ipath_ack_entry *e;
34019 u64 vaddr;
34020- atomic64_t *maddr;
34021+ atomic64_unchecked_t *maddr;
34022 u64 sdata;
34023 u32 rkey;
34024 u8 next;
34025@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
34026 IB_ACCESS_REMOTE_ATOMIC)))
34027 goto nack_acc_unlck;
34028 /* Perform atomic OP and save result. */
34029- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
34030+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
34031 sdata = be64_to_cpu(ateth->swap_data);
34032 e = &qp->s_ack_queue[qp->r_head_ack_queue];
34033 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
34034- (u64) atomic64_add_return(sdata, maddr) - sdata :
34035+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
34036 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
34037 be64_to_cpu(ateth->compare_data),
34038 sdata);
34039diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
34040index 1f95bba..9530f87 100644
34041--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
34042+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
34043@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
34044 unsigned long flags;
34045 struct ib_wc wc;
34046 u64 sdata;
34047- atomic64_t *maddr;
34048+ atomic64_unchecked_t *maddr;
34049 enum ib_wc_status send_status;
34050
34051 /*
34052@@ -382,11 +382,11 @@ again:
34053 IB_ACCESS_REMOTE_ATOMIC)))
34054 goto acc_err;
34055 /* Perform atomic OP and save result. */
34056- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
34057+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
34058 sdata = wqe->wr.wr.atomic.compare_add;
34059 *(u64 *) sqp->s_sge.sge.vaddr =
34060 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
34061- (u64) atomic64_add_return(sdata, maddr) - sdata :
34062+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
34063 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
34064 sdata, wqe->wr.wr.atomic.swap);
34065 goto send_comp;
34066diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
34067index 748db2d..5f75cc3 100644
34068--- a/drivers/infiniband/hw/nes/nes.c
34069+++ b/drivers/infiniband/hw/nes/nes.c
34070@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
34071 LIST_HEAD(nes_adapter_list);
34072 static LIST_HEAD(nes_dev_list);
34073
34074-atomic_t qps_destroyed;
34075+atomic_unchecked_t qps_destroyed;
34076
34077 static unsigned int ee_flsh_adapter;
34078 static unsigned int sysfs_nonidx_addr;
34079@@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
34080 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
34081 struct nes_adapter *nesadapter = nesdev->nesadapter;
34082
34083- atomic_inc(&qps_destroyed);
34084+ atomic_inc_unchecked(&qps_destroyed);
34085
34086 /* Free the control structures */
34087
34088diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
34089index 33cc589..3bd6538 100644
34090--- a/drivers/infiniband/hw/nes/nes.h
34091+++ b/drivers/infiniband/hw/nes/nes.h
34092@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
34093 extern unsigned int wqm_quanta;
34094 extern struct list_head nes_adapter_list;
34095
34096-extern atomic_t cm_connects;
34097-extern atomic_t cm_accepts;
34098-extern atomic_t cm_disconnects;
34099-extern atomic_t cm_closes;
34100-extern atomic_t cm_connecteds;
34101-extern atomic_t cm_connect_reqs;
34102-extern atomic_t cm_rejects;
34103-extern atomic_t mod_qp_timouts;
34104-extern atomic_t qps_created;
34105-extern atomic_t qps_destroyed;
34106-extern atomic_t sw_qps_destroyed;
34107+extern atomic_unchecked_t cm_connects;
34108+extern atomic_unchecked_t cm_accepts;
34109+extern atomic_unchecked_t cm_disconnects;
34110+extern atomic_unchecked_t cm_closes;
34111+extern atomic_unchecked_t cm_connecteds;
34112+extern atomic_unchecked_t cm_connect_reqs;
34113+extern atomic_unchecked_t cm_rejects;
34114+extern atomic_unchecked_t mod_qp_timouts;
34115+extern atomic_unchecked_t qps_created;
34116+extern atomic_unchecked_t qps_destroyed;
34117+extern atomic_unchecked_t sw_qps_destroyed;
34118 extern u32 mh_detected;
34119 extern u32 mh_pauses_sent;
34120 extern u32 cm_packets_sent;
34121@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
34122 extern u32 cm_packets_received;
34123 extern u32 cm_packets_dropped;
34124 extern u32 cm_packets_retrans;
34125-extern atomic_t cm_listens_created;
34126-extern atomic_t cm_listens_destroyed;
34127+extern atomic_unchecked_t cm_listens_created;
34128+extern atomic_unchecked_t cm_listens_destroyed;
34129 extern u32 cm_backlog_drops;
34130-extern atomic_t cm_loopbacks;
34131-extern atomic_t cm_nodes_created;
34132-extern atomic_t cm_nodes_destroyed;
34133-extern atomic_t cm_accel_dropped_pkts;
34134-extern atomic_t cm_resets_recvd;
34135-extern atomic_t pau_qps_created;
34136-extern atomic_t pau_qps_destroyed;
34137+extern atomic_unchecked_t cm_loopbacks;
34138+extern atomic_unchecked_t cm_nodes_created;
34139+extern atomic_unchecked_t cm_nodes_destroyed;
34140+extern atomic_unchecked_t cm_accel_dropped_pkts;
34141+extern atomic_unchecked_t cm_resets_recvd;
34142+extern atomic_unchecked_t pau_qps_created;
34143+extern atomic_unchecked_t pau_qps_destroyed;
34144
34145 extern u32 int_mod_timer_init;
34146 extern u32 int_mod_cq_depth_256;
34147diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
34148index cfaacaf..fa0722e 100644
34149--- a/drivers/infiniband/hw/nes/nes_cm.c
34150+++ b/drivers/infiniband/hw/nes/nes_cm.c
34151@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
34152 u32 cm_packets_retrans;
34153 u32 cm_packets_created;
34154 u32 cm_packets_received;
34155-atomic_t cm_listens_created;
34156-atomic_t cm_listens_destroyed;
34157+atomic_unchecked_t cm_listens_created;
34158+atomic_unchecked_t cm_listens_destroyed;
34159 u32 cm_backlog_drops;
34160-atomic_t cm_loopbacks;
34161-atomic_t cm_nodes_created;
34162-atomic_t cm_nodes_destroyed;
34163-atomic_t cm_accel_dropped_pkts;
34164-atomic_t cm_resets_recvd;
34165+atomic_unchecked_t cm_loopbacks;
34166+atomic_unchecked_t cm_nodes_created;
34167+atomic_unchecked_t cm_nodes_destroyed;
34168+atomic_unchecked_t cm_accel_dropped_pkts;
34169+atomic_unchecked_t cm_resets_recvd;
34170
34171 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
34172 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
34173@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
34174
34175 static struct nes_cm_core *g_cm_core;
34176
34177-atomic_t cm_connects;
34178-atomic_t cm_accepts;
34179-atomic_t cm_disconnects;
34180-atomic_t cm_closes;
34181-atomic_t cm_connecteds;
34182-atomic_t cm_connect_reqs;
34183-atomic_t cm_rejects;
34184+atomic_unchecked_t cm_connects;
34185+atomic_unchecked_t cm_accepts;
34186+atomic_unchecked_t cm_disconnects;
34187+atomic_unchecked_t cm_closes;
34188+atomic_unchecked_t cm_connecteds;
34189+atomic_unchecked_t cm_connect_reqs;
34190+atomic_unchecked_t cm_rejects;
34191
34192 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
34193 {
34194@@ -1281,7 +1281,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
34195 kfree(listener);
34196 listener = NULL;
34197 ret = 0;
34198- atomic_inc(&cm_listens_destroyed);
34199+ atomic_inc_unchecked(&cm_listens_destroyed);
34200 } else {
34201 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
34202 }
34203@@ -1480,7 +1480,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
34204 cm_node->rem_mac);
34205
34206 add_hte_node(cm_core, cm_node);
34207- atomic_inc(&cm_nodes_created);
34208+ atomic_inc_unchecked(&cm_nodes_created);
34209
34210 return cm_node;
34211 }
34212@@ -1538,7 +1538,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
34213 }
34214
34215 atomic_dec(&cm_core->node_cnt);
34216- atomic_inc(&cm_nodes_destroyed);
34217+ atomic_inc_unchecked(&cm_nodes_destroyed);
34218 nesqp = cm_node->nesqp;
34219 if (nesqp) {
34220 nesqp->cm_node = NULL;
34221@@ -1602,7 +1602,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
34222
34223 static void drop_packet(struct sk_buff *skb)
34224 {
34225- atomic_inc(&cm_accel_dropped_pkts);
34226+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
34227 dev_kfree_skb_any(skb);
34228 }
34229
34230@@ -1665,7 +1665,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
34231 {
34232
34233 int reset = 0; /* whether to send reset in case of err.. */
34234- atomic_inc(&cm_resets_recvd);
34235+ atomic_inc_unchecked(&cm_resets_recvd);
34236 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
34237 " refcnt=%d\n", cm_node, cm_node->state,
34238 atomic_read(&cm_node->ref_count));
34239@@ -2306,7 +2306,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
34240 rem_ref_cm_node(cm_node->cm_core, cm_node);
34241 return NULL;
34242 }
34243- atomic_inc(&cm_loopbacks);
34244+ atomic_inc_unchecked(&cm_loopbacks);
34245 loopbackremotenode->loopbackpartner = cm_node;
34246 loopbackremotenode->tcp_cntxt.rcv_wscale =
34247 NES_CM_DEFAULT_RCV_WND_SCALE;
34248@@ -2581,7 +2581,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
34249 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
34250 else {
34251 rem_ref_cm_node(cm_core, cm_node);
34252- atomic_inc(&cm_accel_dropped_pkts);
34253+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
34254 dev_kfree_skb_any(skb);
34255 }
34256 break;
34257@@ -2889,7 +2889,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
34258
34259 if ((cm_id) && (cm_id->event_handler)) {
34260 if (issue_disconn) {
34261- atomic_inc(&cm_disconnects);
34262+ atomic_inc_unchecked(&cm_disconnects);
34263 cm_event.event = IW_CM_EVENT_DISCONNECT;
34264 cm_event.status = disconn_status;
34265 cm_event.local_addr = cm_id->local_addr;
34266@@ -2911,7 +2911,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
34267 }
34268
34269 if (issue_close) {
34270- atomic_inc(&cm_closes);
34271+ atomic_inc_unchecked(&cm_closes);
34272 nes_disconnect(nesqp, 1);
34273
34274 cm_id->provider_data = nesqp;
34275@@ -3047,7 +3047,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
34276
34277 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
34278 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
34279- atomic_inc(&cm_accepts);
34280+ atomic_inc_unchecked(&cm_accepts);
34281
34282 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
34283 netdev_refcnt_read(nesvnic->netdev));
34284@@ -3242,7 +3242,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
34285 struct nes_cm_core *cm_core;
34286 u8 *start_buff;
34287
34288- atomic_inc(&cm_rejects);
34289+ atomic_inc_unchecked(&cm_rejects);
34290 cm_node = (struct nes_cm_node *)cm_id->provider_data;
34291 loopback = cm_node->loopbackpartner;
34292 cm_core = cm_node->cm_core;
34293@@ -3302,7 +3302,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
34294 ntohl(cm_id->local_addr.sin_addr.s_addr),
34295 ntohs(cm_id->local_addr.sin_port));
34296
34297- atomic_inc(&cm_connects);
34298+ atomic_inc_unchecked(&cm_connects);
34299 nesqp->active_conn = 1;
34300
34301 /* cache the cm_id in the qp */
34302@@ -3412,7 +3412,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
34303 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
34304 return err;
34305 }
34306- atomic_inc(&cm_listens_created);
34307+ atomic_inc_unchecked(&cm_listens_created);
34308 }
34309
34310 cm_id->add_ref(cm_id);
34311@@ -3513,7 +3513,7 @@ static void cm_event_connected(struct nes_cm_event *event)
34312
34313 if (nesqp->destroyed)
34314 return;
34315- atomic_inc(&cm_connecteds);
34316+ atomic_inc_unchecked(&cm_connecteds);
34317 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
34318 " local port 0x%04X. jiffies = %lu.\n",
34319 nesqp->hwqp.qp_id,
34320@@ -3693,7 +3693,7 @@ static void cm_event_reset(struct nes_cm_event *event)
34321
34322 cm_id->add_ref(cm_id);
34323 ret = cm_id->event_handler(cm_id, &cm_event);
34324- atomic_inc(&cm_closes);
34325+ atomic_inc_unchecked(&cm_closes);
34326 cm_event.event = IW_CM_EVENT_CLOSE;
34327 cm_event.status = 0;
34328 cm_event.provider_data = cm_id->provider_data;
34329@@ -3729,7 +3729,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
34330 return;
34331 cm_id = cm_node->cm_id;
34332
34333- atomic_inc(&cm_connect_reqs);
34334+ atomic_inc_unchecked(&cm_connect_reqs);
34335 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
34336 cm_node, cm_id, jiffies);
34337
34338@@ -3769,7 +3769,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
34339 return;
34340 cm_id = cm_node->cm_id;
34341
34342- atomic_inc(&cm_connect_reqs);
34343+ atomic_inc_unchecked(&cm_connect_reqs);
34344 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
34345 cm_node, cm_id, jiffies);
34346
34347diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
34348index 3ba7be3..c81f6ff 100644
34349--- a/drivers/infiniband/hw/nes/nes_mgt.c
34350+++ b/drivers/infiniband/hw/nes/nes_mgt.c
34351@@ -40,8 +40,8 @@
34352 #include "nes.h"
34353 #include "nes_mgt.h"
34354
34355-atomic_t pau_qps_created;
34356-atomic_t pau_qps_destroyed;
34357+atomic_unchecked_t pau_qps_created;
34358+atomic_unchecked_t pau_qps_destroyed;
34359
34360 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
34361 {
34362@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
34363 {
34364 struct sk_buff *skb;
34365 unsigned long flags;
34366- atomic_inc(&pau_qps_destroyed);
34367+ atomic_inc_unchecked(&pau_qps_destroyed);
34368
34369 /* Free packets that have not yet been forwarded */
34370 /* Lock is acquired by skb_dequeue when removing the skb */
34371@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
34372 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
34373 skb_queue_head_init(&nesqp->pau_list);
34374 spin_lock_init(&nesqp->pau_lock);
34375- atomic_inc(&pau_qps_created);
34376+ atomic_inc_unchecked(&pau_qps_created);
34377 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
34378 }
34379
34380diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
34381index 0564be7..f68b0f1 100644
34382--- a/drivers/infiniband/hw/nes/nes_nic.c
34383+++ b/drivers/infiniband/hw/nes/nes_nic.c
34384@@ -1272,39 +1272,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
34385 target_stat_values[++index] = mh_detected;
34386 target_stat_values[++index] = mh_pauses_sent;
34387 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
34388- target_stat_values[++index] = atomic_read(&cm_connects);
34389- target_stat_values[++index] = atomic_read(&cm_accepts);
34390- target_stat_values[++index] = atomic_read(&cm_disconnects);
34391- target_stat_values[++index] = atomic_read(&cm_connecteds);
34392- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
34393- target_stat_values[++index] = atomic_read(&cm_rejects);
34394- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
34395- target_stat_values[++index] = atomic_read(&qps_created);
34396- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
34397- target_stat_values[++index] = atomic_read(&qps_destroyed);
34398- target_stat_values[++index] = atomic_read(&cm_closes);
34399+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
34400+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
34401+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
34402+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
34403+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
34404+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
34405+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
34406+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
34407+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
34408+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
34409+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
34410 target_stat_values[++index] = cm_packets_sent;
34411 target_stat_values[++index] = cm_packets_bounced;
34412 target_stat_values[++index] = cm_packets_created;
34413 target_stat_values[++index] = cm_packets_received;
34414 target_stat_values[++index] = cm_packets_dropped;
34415 target_stat_values[++index] = cm_packets_retrans;
34416- target_stat_values[++index] = atomic_read(&cm_listens_created);
34417- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
34418+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
34419+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
34420 target_stat_values[++index] = cm_backlog_drops;
34421- target_stat_values[++index] = atomic_read(&cm_loopbacks);
34422- target_stat_values[++index] = atomic_read(&cm_nodes_created);
34423- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
34424- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
34425- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
34426+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
34427+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
34428+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
34429+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
34430+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
34431 target_stat_values[++index] = nesadapter->free_4kpbl;
34432 target_stat_values[++index] = nesadapter->free_256pbl;
34433 target_stat_values[++index] = int_mod_timer_init;
34434 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
34435 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
34436 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
34437- target_stat_values[++index] = atomic_read(&pau_qps_created);
34438- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
34439+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
34440+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
34441 }
34442
34443 /**
34444diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
34445index 07e4fba..685f041 100644
34446--- a/drivers/infiniband/hw/nes/nes_verbs.c
34447+++ b/drivers/infiniband/hw/nes/nes_verbs.c
34448@@ -46,9 +46,9 @@
34449
34450 #include <rdma/ib_umem.h>
34451
34452-atomic_t mod_qp_timouts;
34453-atomic_t qps_created;
34454-atomic_t sw_qps_destroyed;
34455+atomic_unchecked_t mod_qp_timouts;
34456+atomic_unchecked_t qps_created;
34457+atomic_unchecked_t sw_qps_destroyed;
34458
34459 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
34460
34461@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
34462 if (init_attr->create_flags)
34463 return ERR_PTR(-EINVAL);
34464
34465- atomic_inc(&qps_created);
34466+ atomic_inc_unchecked(&qps_created);
34467 switch (init_attr->qp_type) {
34468 case IB_QPT_RC:
34469 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
34470@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
34471 struct iw_cm_event cm_event;
34472 int ret = 0;
34473
34474- atomic_inc(&sw_qps_destroyed);
34475+ atomic_inc_unchecked(&sw_qps_destroyed);
34476 nesqp->destroyed = 1;
34477
34478 /* Blow away the connection if it exists. */
34479diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
34480index 4d11575..3e890e5 100644
34481--- a/drivers/infiniband/hw/qib/qib.h
34482+++ b/drivers/infiniband/hw/qib/qib.h
34483@@ -51,6 +51,7 @@
34484 #include <linux/completion.h>
34485 #include <linux/kref.h>
34486 #include <linux/sched.h>
34487+#include <linux/slab.h>
34488
34489 #include "qib_common.h"
34490 #include "qib_verbs.h"
34491diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
34492index da739d9..da1c7f4 100644
34493--- a/drivers/input/gameport/gameport.c
34494+++ b/drivers/input/gameport/gameport.c
34495@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
34496 */
34497 static void gameport_init_port(struct gameport *gameport)
34498 {
34499- static atomic_t gameport_no = ATOMIC_INIT(0);
34500+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
34501
34502 __module_get(THIS_MODULE);
34503
34504 mutex_init(&gameport->drv_mutex);
34505 device_initialize(&gameport->dev);
34506 dev_set_name(&gameport->dev, "gameport%lu",
34507- (unsigned long)atomic_inc_return(&gameport_no) - 1);
34508+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
34509 gameport->dev.bus = &gameport_bus;
34510 gameport->dev.release = gameport_release_port;
34511 if (gameport->parent)
34512diff --git a/drivers/input/input.c b/drivers/input/input.c
34513index 53a0dde..abffda7 100644
34514--- a/drivers/input/input.c
34515+++ b/drivers/input/input.c
34516@@ -1902,7 +1902,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
34517 */
34518 int input_register_device(struct input_dev *dev)
34519 {
34520- static atomic_t input_no = ATOMIC_INIT(0);
34521+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
34522 struct input_handler *handler;
34523 unsigned int packet_size;
34524 const char *path;
34525@@ -1945,7 +1945,7 @@ int input_register_device(struct input_dev *dev)
34526 dev->setkeycode = input_default_setkeycode;
34527
34528 dev_set_name(&dev->dev, "input%ld",
34529- (unsigned long) atomic_inc_return(&input_no) - 1);
34530+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
34531
34532 error = device_add(&dev->dev);
34533 if (error)
34534diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
34535index 04c69af..5f92d00 100644
34536--- a/drivers/input/joystick/sidewinder.c
34537+++ b/drivers/input/joystick/sidewinder.c
34538@@ -30,6 +30,7 @@
34539 #include <linux/kernel.h>
34540 #include <linux/module.h>
34541 #include <linux/slab.h>
34542+#include <linux/sched.h>
34543 #include <linux/init.h>
34544 #include <linux/input.h>
34545 #include <linux/gameport.h>
34546diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
34547index 83811e4..0822b90 100644
34548--- a/drivers/input/joystick/xpad.c
34549+++ b/drivers/input/joystick/xpad.c
34550@@ -726,7 +726,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
34551
34552 static int xpad_led_probe(struct usb_xpad *xpad)
34553 {
34554- static atomic_t led_seq = ATOMIC_INIT(0);
34555+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
34556 long led_no;
34557 struct xpad_led *led;
34558 struct led_classdev *led_cdev;
34559@@ -739,7 +739,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
34560 if (!led)
34561 return -ENOMEM;
34562
34563- led_no = (long)atomic_inc_return(&led_seq) - 1;
34564+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
34565
34566 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
34567 led->xpad = xpad;
34568diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
34569index 4c842c3..590b0bf 100644
34570--- a/drivers/input/mousedev.c
34571+++ b/drivers/input/mousedev.c
34572@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
34573
34574 spin_unlock_irq(&client->packet_lock);
34575
34576- if (copy_to_user(buffer, data, count))
34577+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
34578 return -EFAULT;
34579
34580 return count;
34581diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
34582index d0f7533..fb8215b 100644
34583--- a/drivers/input/serio/serio.c
34584+++ b/drivers/input/serio/serio.c
34585@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
34586 */
34587 static void serio_init_port(struct serio *serio)
34588 {
34589- static atomic_t serio_no = ATOMIC_INIT(0);
34590+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
34591
34592 __module_get(THIS_MODULE);
34593
34594@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
34595 mutex_init(&serio->drv_mutex);
34596 device_initialize(&serio->dev);
34597 dev_set_name(&serio->dev, "serio%ld",
34598- (long)atomic_inc_return(&serio_no) - 1);
34599+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
34600 serio->dev.bus = &serio_bus;
34601 serio->dev.release = serio_release_port;
34602 serio->dev.groups = serio_device_attr_groups;
34603diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
34604index 81837b0..d7470e8 100644
34605--- a/drivers/iommu/amd_iommu_init.c
34606+++ b/drivers/iommu/amd_iommu_init.c
34607@@ -1888,7 +1888,7 @@ static int __init state_next(void)
34608 case IOMMU_ACPI_FINISHED:
34609 early_enable_iommus();
34610 register_syscore_ops(&amd_iommu_syscore_ops);
34611- x86_platform.iommu_shutdown = disable_iommus;
34612+ *(void **)&x86_platform.iommu_shutdown = disable_iommus;
34613 init_state = IOMMU_ENABLED;
34614 break;
34615 case IOMMU_ENABLED:
34616@@ -2030,7 +2030,7 @@ int __init amd_iommu_detect(void)
34617
34618 amd_iommu_detected = true;
34619 iommu_detected = 1;
34620- x86_init.iommu.iommu_init = amd_iommu_init;
34621+ *(void **)&x86_init.iommu.iommu_init = amd_iommu_init;
34622
34623 return 0;
34624 }
34625diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
34626index 86e2f4a..d1cec5d 100644
34627--- a/drivers/iommu/dmar.c
34628+++ b/drivers/iommu/dmar.c
34629@@ -555,7 +555,7 @@ int __init detect_intel_iommu(void)
34630
34631 #ifdef CONFIG_X86
34632 if (ret)
34633- x86_init.iommu.iommu_init = intel_iommu_init;
34634+ *(void **)&x86_init.iommu.iommu_init = intel_iommu_init;
34635 #endif
34636 }
34637 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
34638diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
34639index c679867..6e2e34d 100644
34640--- a/drivers/isdn/capi/capi.c
34641+++ b/drivers/isdn/capi/capi.c
34642@@ -83,8 +83,8 @@ struct capiminor {
34643
34644 struct capi20_appl *ap;
34645 u32 ncci;
34646- atomic_t datahandle;
34647- atomic_t msgid;
34648+ atomic_unchecked_t datahandle;
34649+ atomic_unchecked_t msgid;
34650
34651 struct tty_port port;
34652 int ttyinstop;
34653@@ -393,7 +393,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
34654 capimsg_setu16(s, 2, mp->ap->applid);
34655 capimsg_setu8 (s, 4, CAPI_DATA_B3);
34656 capimsg_setu8 (s, 5, CAPI_RESP);
34657- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
34658+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
34659 capimsg_setu32(s, 8, mp->ncci);
34660 capimsg_setu16(s, 12, datahandle);
34661 }
34662@@ -514,14 +514,14 @@ static void handle_minor_send(struct capiminor *mp)
34663 mp->outbytes -= len;
34664 spin_unlock_bh(&mp->outlock);
34665
34666- datahandle = atomic_inc_return(&mp->datahandle);
34667+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
34668 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
34669 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
34670 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
34671 capimsg_setu16(skb->data, 2, mp->ap->applid);
34672 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
34673 capimsg_setu8 (skb->data, 5, CAPI_REQ);
34674- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
34675+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
34676 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
34677 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
34678 capimsg_setu16(skb->data, 16, len); /* Data length */
34679diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
34680index 67abf3f..076b3a6 100644
34681--- a/drivers/isdn/gigaset/interface.c
34682+++ b/drivers/isdn/gigaset/interface.c
34683@@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
34684 }
34685 tty->driver_data = cs;
34686
34687- ++cs->port.count;
34688+ atomic_inc(&cs->port.count);
34689
34690- if (cs->port.count == 1) {
34691+ if (atomic_read(&cs->port.count) == 1) {
34692 tty_port_tty_set(&cs->port, tty);
34693 tty->low_latency = 1;
34694 }
34695@@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
34696
34697 if (!cs->connected)
34698 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
34699- else if (!cs->port.count)
34700+ else if (!atomic_read(&cs->port.count))
34701 dev_warn(cs->dev, "%s: device not opened\n", __func__);
34702- else if (!--cs->port.count)
34703+ else if (!atomic_dec_return(&cs->port.count))
34704 tty_port_tty_set(&cs->port, NULL);
34705
34706 mutex_unlock(&cs->mutex);
34707diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
34708index 821f7ac..28d4030 100644
34709--- a/drivers/isdn/hardware/avm/b1.c
34710+++ b/drivers/isdn/hardware/avm/b1.c
34711@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
34712 }
34713 if (left) {
34714 if (t4file->user) {
34715- if (copy_from_user(buf, dp, left))
34716+ if (left > sizeof buf || copy_from_user(buf, dp, left))
34717 return -EFAULT;
34718 } else {
34719 memcpy(buf, dp, left);
34720@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
34721 }
34722 if (left) {
34723 if (config->user) {
34724- if (copy_from_user(buf, dp, left))
34725+ if (left > sizeof buf || copy_from_user(buf, dp, left))
34726 return -EFAULT;
34727 } else {
34728 memcpy(buf, dp, left);
34729diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
34730index b817809..409caff 100644
34731--- a/drivers/isdn/i4l/isdn_tty.c
34732+++ b/drivers/isdn/i4l/isdn_tty.c
34733@@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
34734
34735 #ifdef ISDN_DEBUG_MODEM_OPEN
34736 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
34737- port->count);
34738+ atomic_read(&port->count));
34739 #endif
34740- port->count++;
34741+ atomic_inc(&port->count);
34742 port->tty = tty;
34743 /*
34744 * Start up serial port
34745@@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
34746 #endif
34747 return;
34748 }
34749- if ((tty->count == 1) && (port->count != 1)) {
34750+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
34751 /*
34752 * Uh, oh. tty->count is 1, which means that the tty
34753 * structure will be freed. Info->count should always
34754@@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
34755 * serial port won't be shutdown.
34756 */
34757 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
34758- "info->count is %d\n", port->count);
34759- port->count = 1;
34760+ "info->count is %d\n", atomic_read(&port->count));
34761+ atomic_set(&port->count, 1);
34762 }
34763- if (--port->count < 0) {
34764+ if (atomic_dec_return(&port->count) < 0) {
34765 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
34766- info->line, port->count);
34767- port->count = 0;
34768+ info->line, atomic_read(&port->count));
34769+ atomic_set(&port->count, 0);
34770 }
34771- if (port->count) {
34772+ if (atomic_read(&port->count)) {
34773 #ifdef ISDN_DEBUG_MODEM_OPEN
34774 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
34775 #endif
34776@@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
34777 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
34778 return;
34779 isdn_tty_shutdown(info);
34780- port->count = 0;
34781+ atomic_set(&port->count, 0);
34782 port->flags &= ~ASYNC_NORMAL_ACTIVE;
34783 port->tty = NULL;
34784 wake_up_interruptible(&port->open_wait);
34785@@ -1971,7 +1971,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
34786 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
34787 modem_info *info = &dev->mdm.info[i];
34788
34789- if (info->port.count == 0)
34790+ if (atomic_read(&info->port.count) == 0)
34791 continue;
34792 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
34793 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
34794diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
34795index e74df7c..03a03ba 100644
34796--- a/drivers/isdn/icn/icn.c
34797+++ b/drivers/isdn/icn/icn.c
34798@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
34799 if (count > len)
34800 count = len;
34801 if (user) {
34802- if (copy_from_user(msg, buf, count))
34803+ if (count > sizeof msg || copy_from_user(msg, buf, count))
34804 return -EFAULT;
34805 } else
34806 memcpy(msg, buf, count);
34807diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
34808index b5fdcb7..5b6c59f 100644
34809--- a/drivers/lguest/core.c
34810+++ b/drivers/lguest/core.c
34811@@ -92,9 +92,17 @@ static __init int map_switcher(void)
34812 * it's worked so far. The end address needs +1 because __get_vm_area
34813 * allocates an extra guard page, so we need space for that.
34814 */
34815+
34816+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
34817+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
34818+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
34819+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
34820+#else
34821 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
34822 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
34823 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
34824+#endif
34825+
34826 if (!switcher_vma) {
34827 err = -ENOMEM;
34828 printk("lguest: could not map switcher pages high\n");
34829@@ -119,7 +127,7 @@ static __init int map_switcher(void)
34830 * Now the Switcher is mapped at the right address, we can't fail!
34831 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
34832 */
34833- memcpy(switcher_vma->addr, start_switcher_text,
34834+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
34835 end_switcher_text - start_switcher_text);
34836
34837 printk(KERN_INFO "lguest: mapped switcher at %p\n",
34838diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
34839index 4af12e1..0e89afe 100644
34840--- a/drivers/lguest/x86/core.c
34841+++ b/drivers/lguest/x86/core.c
34842@@ -59,7 +59,7 @@ static struct {
34843 /* Offset from where switcher.S was compiled to where we've copied it */
34844 static unsigned long switcher_offset(void)
34845 {
34846- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
34847+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
34848 }
34849
34850 /* This cpu's struct lguest_pages. */
34851@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
34852 * These copies are pretty cheap, so we do them unconditionally: */
34853 /* Save the current Host top-level page directory.
34854 */
34855+
34856+#ifdef CONFIG_PAX_PER_CPU_PGD
34857+ pages->state.host_cr3 = read_cr3();
34858+#else
34859 pages->state.host_cr3 = __pa(current->mm->pgd);
34860+#endif
34861+
34862 /*
34863 * Set up the Guest's page tables to see this CPU's pages (and no
34864 * other CPU's pages).
34865@@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
34866 * compiled-in switcher code and the high-mapped copy we just made.
34867 */
34868 for (i = 0; i < IDT_ENTRIES; i++)
34869- default_idt_entries[i] += switcher_offset();
34870+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
34871
34872 /*
34873 * Set up the Switcher's per-cpu areas.
34874@@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
34875 * it will be undisturbed when we switch. To change %cs and jump we
34876 * need this structure to feed to Intel's "lcall" instruction.
34877 */
34878- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
34879+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
34880 lguest_entry.segment = LGUEST_CS;
34881
34882 /*
34883diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
34884index 40634b0..4f5855e 100644
34885--- a/drivers/lguest/x86/switcher_32.S
34886+++ b/drivers/lguest/x86/switcher_32.S
34887@@ -87,6 +87,7 @@
34888 #include <asm/page.h>
34889 #include <asm/segment.h>
34890 #include <asm/lguest.h>
34891+#include <asm/processor-flags.h>
34892
34893 // We mark the start of the code to copy
34894 // It's placed in .text tho it's never run here
34895@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
34896 // Changes type when we load it: damn Intel!
34897 // For after we switch over our page tables
34898 // That entry will be read-only: we'd crash.
34899+
34900+#ifdef CONFIG_PAX_KERNEXEC
34901+ mov %cr0, %edx
34902+ xor $X86_CR0_WP, %edx
34903+ mov %edx, %cr0
34904+#endif
34905+
34906 movl $(GDT_ENTRY_TSS*8), %edx
34907 ltr %dx
34908
34909@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
34910 // Let's clear it again for our return.
34911 // The GDT descriptor of the Host
34912 // Points to the table after two "size" bytes
34913- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
34914+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
34915 // Clear "used" from type field (byte 5, bit 2)
34916- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
34917+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
34918+
34919+#ifdef CONFIG_PAX_KERNEXEC
34920+ mov %cr0, %eax
34921+ xor $X86_CR0_WP, %eax
34922+ mov %eax, %cr0
34923+#endif
34924
34925 // Once our page table's switched, the Guest is live!
34926 // The Host fades as we run this final step.
34927@@ -295,13 +309,12 @@ deliver_to_host:
34928 // I consulted gcc, and it gave
34929 // These instructions, which I gladly credit:
34930 leal (%edx,%ebx,8), %eax
34931- movzwl (%eax),%edx
34932- movl 4(%eax), %eax
34933- xorw %ax, %ax
34934- orl %eax, %edx
34935+ movl 4(%eax), %edx
34936+ movw (%eax), %dx
34937 // Now the address of the handler's in %edx
34938 // We call it now: its "iret" drops us home.
34939- jmp *%edx
34940+ ljmp $__KERNEL_CS, $1f
34941+1: jmp *%edx
34942
34943 // Every interrupt can come to us here
34944 // But we must truly tell each apart.
34945diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
34946index 7155945..4bcc562 100644
34947--- a/drivers/md/bitmap.c
34948+++ b/drivers/md/bitmap.c
34949@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
34950 chunk_kb ? "KB" : "B");
34951 if (bitmap->storage.file) {
34952 seq_printf(seq, ", file: ");
34953- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
34954+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
34955 }
34956
34957 seq_printf(seq, "\n");
34958diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
34959index a651d52..82f8a95 100644
34960--- a/drivers/md/dm-ioctl.c
34961+++ b/drivers/md/dm-ioctl.c
34962@@ -1601,7 +1601,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
34963 cmd == DM_LIST_VERSIONS_CMD)
34964 return 0;
34965
34966- if ((cmd == DM_DEV_CREATE_CMD)) {
34967+ if (cmd == DM_DEV_CREATE_CMD) {
34968 if (!*param->name) {
34969 DMWARN("name not supplied when creating device");
34970 return -EINVAL;
34971diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
34972index fd61f98..8050783 100644
34973--- a/drivers/md/dm-raid1.c
34974+++ b/drivers/md/dm-raid1.c
34975@@ -40,7 +40,7 @@ enum dm_raid1_error {
34976
34977 struct mirror {
34978 struct mirror_set *ms;
34979- atomic_t error_count;
34980+ atomic_unchecked_t error_count;
34981 unsigned long error_type;
34982 struct dm_dev *dev;
34983 sector_t offset;
34984@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
34985 struct mirror *m;
34986
34987 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
34988- if (!atomic_read(&m->error_count))
34989+ if (!atomic_read_unchecked(&m->error_count))
34990 return m;
34991
34992 return NULL;
34993@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
34994 * simple way to tell if a device has encountered
34995 * errors.
34996 */
34997- atomic_inc(&m->error_count);
34998+ atomic_inc_unchecked(&m->error_count);
34999
35000 if (test_and_set_bit(error_type, &m->error_type))
35001 return;
35002@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
35003 struct mirror *m = get_default_mirror(ms);
35004
35005 do {
35006- if (likely(!atomic_read(&m->error_count)))
35007+ if (likely(!atomic_read_unchecked(&m->error_count)))
35008 return m;
35009
35010 if (m-- == ms->mirror)
35011@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
35012 {
35013 struct mirror *default_mirror = get_default_mirror(m->ms);
35014
35015- return !atomic_read(&default_mirror->error_count);
35016+ return !atomic_read_unchecked(&default_mirror->error_count);
35017 }
35018
35019 static int mirror_available(struct mirror_set *ms, struct bio *bio)
35020@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
35021 */
35022 if (likely(region_in_sync(ms, region, 1)))
35023 m = choose_mirror(ms, bio->bi_sector);
35024- else if (m && atomic_read(&m->error_count))
35025+ else if (m && atomic_read_unchecked(&m->error_count))
35026 m = NULL;
35027
35028 if (likely(m))
35029@@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
35030 }
35031
35032 ms->mirror[mirror].ms = ms;
35033- atomic_set(&(ms->mirror[mirror].error_count), 0);
35034+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
35035 ms->mirror[mirror].error_type = 0;
35036 ms->mirror[mirror].offset = offset;
35037
35038@@ -1356,7 +1356,7 @@ static void mirror_resume(struct dm_target *ti)
35039 */
35040 static char device_status_char(struct mirror *m)
35041 {
35042- if (!atomic_read(&(m->error_count)))
35043+ if (!atomic_read_unchecked(&(m->error_count)))
35044 return 'A';
35045
35046 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
35047diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
35048index e2f87653..f279abe 100644
35049--- a/drivers/md/dm-stripe.c
35050+++ b/drivers/md/dm-stripe.c
35051@@ -20,7 +20,7 @@ struct stripe {
35052 struct dm_dev *dev;
35053 sector_t physical_start;
35054
35055- atomic_t error_count;
35056+ atomic_unchecked_t error_count;
35057 };
35058
35059 struct stripe_c {
35060@@ -183,7 +183,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
35061 kfree(sc);
35062 return r;
35063 }
35064- atomic_set(&(sc->stripe[i].error_count), 0);
35065+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
35066 }
35067
35068 ti->private = sc;
35069@@ -324,7 +324,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
35070 DMEMIT("%d ", sc->stripes);
35071 for (i = 0; i < sc->stripes; i++) {
35072 DMEMIT("%s ", sc->stripe[i].dev->name);
35073- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
35074+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
35075 'D' : 'A';
35076 }
35077 buffer[i] = '\0';
35078@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
35079 */
35080 for (i = 0; i < sc->stripes; i++)
35081 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
35082- atomic_inc(&(sc->stripe[i].error_count));
35083- if (atomic_read(&(sc->stripe[i].error_count)) <
35084+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
35085+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
35086 DM_IO_ERROR_THRESHOLD)
35087 schedule_work(&sc->trigger_event);
35088 }
35089diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
35090index fa29557..d24a5b7 100644
35091--- a/drivers/md/dm-table.c
35092+++ b/drivers/md/dm-table.c
35093@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
35094 if (!dev_size)
35095 return 0;
35096
35097- if ((start >= dev_size) || (start + len > dev_size)) {
35098+ if ((start >= dev_size) || (len > dev_size - start)) {
35099 DMWARN("%s: %s too small for target: "
35100 "start=%llu, len=%llu, dev_size=%llu",
35101 dm_device_name(ti->table->md), bdevname(bdev, b),
35102diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
35103index 693e149..b7e0fde 100644
35104--- a/drivers/md/dm-thin-metadata.c
35105+++ b/drivers/md/dm-thin-metadata.c
35106@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
35107 {
35108 pmd->info.tm = pmd->tm;
35109 pmd->info.levels = 2;
35110- pmd->info.value_type.context = pmd->data_sm;
35111+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
35112 pmd->info.value_type.size = sizeof(__le64);
35113 pmd->info.value_type.inc = data_block_inc;
35114 pmd->info.value_type.dec = data_block_dec;
35115@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
35116
35117 pmd->bl_info.tm = pmd->tm;
35118 pmd->bl_info.levels = 1;
35119- pmd->bl_info.value_type.context = pmd->data_sm;
35120+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
35121 pmd->bl_info.value_type.size = sizeof(__le64);
35122 pmd->bl_info.value_type.inc = data_block_inc;
35123 pmd->bl_info.value_type.dec = data_block_dec;
35124diff --git a/drivers/md/dm.c b/drivers/md/dm.c
35125index 77e6eff..913d695 100644
35126--- a/drivers/md/dm.c
35127+++ b/drivers/md/dm.c
35128@@ -182,9 +182,9 @@ struct mapped_device {
35129 /*
35130 * Event handling.
35131 */
35132- atomic_t event_nr;
35133+ atomic_unchecked_t event_nr;
35134 wait_queue_head_t eventq;
35135- atomic_t uevent_seq;
35136+ atomic_unchecked_t uevent_seq;
35137 struct list_head uevent_list;
35138 spinlock_t uevent_lock; /* Protect access to uevent_list */
35139
35140@@ -1847,8 +1847,8 @@ static struct mapped_device *alloc_dev(int minor)
35141 rwlock_init(&md->map_lock);
35142 atomic_set(&md->holders, 1);
35143 atomic_set(&md->open_count, 0);
35144- atomic_set(&md->event_nr, 0);
35145- atomic_set(&md->uevent_seq, 0);
35146+ atomic_set_unchecked(&md->event_nr, 0);
35147+ atomic_set_unchecked(&md->uevent_seq, 0);
35148 INIT_LIST_HEAD(&md->uevent_list);
35149 spin_lock_init(&md->uevent_lock);
35150
35151@@ -1982,7 +1982,7 @@ static void event_callback(void *context)
35152
35153 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
35154
35155- atomic_inc(&md->event_nr);
35156+ atomic_inc_unchecked(&md->event_nr);
35157 wake_up(&md->eventq);
35158 }
35159
35160@@ -2637,18 +2637,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
35161
35162 uint32_t dm_next_uevent_seq(struct mapped_device *md)
35163 {
35164- return atomic_add_return(1, &md->uevent_seq);
35165+ return atomic_add_return_unchecked(1, &md->uevent_seq);
35166 }
35167
35168 uint32_t dm_get_event_nr(struct mapped_device *md)
35169 {
35170- return atomic_read(&md->event_nr);
35171+ return atomic_read_unchecked(&md->event_nr);
35172 }
35173
35174 int dm_wait_event(struct mapped_device *md, int event_nr)
35175 {
35176 return wait_event_interruptible(md->eventq,
35177- (event_nr != atomic_read(&md->event_nr)));
35178+ (event_nr != atomic_read_unchecked(&md->event_nr)));
35179 }
35180
35181 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
35182diff --git a/drivers/md/md.c b/drivers/md/md.c
35183index 6120071..31d9be2 100644
35184--- a/drivers/md/md.c
35185+++ b/drivers/md/md.c
35186@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
35187 * start build, activate spare
35188 */
35189 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
35190-static atomic_t md_event_count;
35191+static atomic_unchecked_t md_event_count;
35192 void md_new_event(struct mddev *mddev)
35193 {
35194- atomic_inc(&md_event_count);
35195+ atomic_inc_unchecked(&md_event_count);
35196 wake_up(&md_event_waiters);
35197 }
35198 EXPORT_SYMBOL_GPL(md_new_event);
35199@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
35200 */
35201 static void md_new_event_inintr(struct mddev *mddev)
35202 {
35203- atomic_inc(&md_event_count);
35204+ atomic_inc_unchecked(&md_event_count);
35205 wake_up(&md_event_waiters);
35206 }
35207
35208@@ -1504,7 +1504,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
35209 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
35210 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
35211 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
35212- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
35213+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
35214
35215 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
35216 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
35217@@ -1748,7 +1748,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
35218 else
35219 sb->resync_offset = cpu_to_le64(0);
35220
35221- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
35222+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
35223
35224 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
35225 sb->size = cpu_to_le64(mddev->dev_sectors);
35226@@ -2748,7 +2748,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
35227 static ssize_t
35228 errors_show(struct md_rdev *rdev, char *page)
35229 {
35230- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
35231+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
35232 }
35233
35234 static ssize_t
35235@@ -2757,7 +2757,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
35236 char *e;
35237 unsigned long n = simple_strtoul(buf, &e, 10);
35238 if (*buf && (*e == 0 || *e == '\n')) {
35239- atomic_set(&rdev->corrected_errors, n);
35240+ atomic_set_unchecked(&rdev->corrected_errors, n);
35241 return len;
35242 }
35243 return -EINVAL;
35244@@ -3204,8 +3204,8 @@ int md_rdev_init(struct md_rdev *rdev)
35245 rdev->sb_loaded = 0;
35246 rdev->bb_page = NULL;
35247 atomic_set(&rdev->nr_pending, 0);
35248- atomic_set(&rdev->read_errors, 0);
35249- atomic_set(&rdev->corrected_errors, 0);
35250+ atomic_set_unchecked(&rdev->read_errors, 0);
35251+ atomic_set_unchecked(&rdev->corrected_errors, 0);
35252
35253 INIT_LIST_HEAD(&rdev->same_set);
35254 init_waitqueue_head(&rdev->blocked_wait);
35255@@ -6984,7 +6984,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
35256
35257 spin_unlock(&pers_lock);
35258 seq_printf(seq, "\n");
35259- seq->poll_event = atomic_read(&md_event_count);
35260+ seq->poll_event = atomic_read_unchecked(&md_event_count);
35261 return 0;
35262 }
35263 if (v == (void*)2) {
35264@@ -7087,7 +7087,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
35265 return error;
35266
35267 seq = file->private_data;
35268- seq->poll_event = atomic_read(&md_event_count);
35269+ seq->poll_event = atomic_read_unchecked(&md_event_count);
35270 return error;
35271 }
35272
35273@@ -7101,7 +7101,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
35274 /* always allow read */
35275 mask = POLLIN | POLLRDNORM;
35276
35277- if (seq->poll_event != atomic_read(&md_event_count))
35278+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
35279 mask |= POLLERR | POLLPRI;
35280 return mask;
35281 }
35282@@ -7145,7 +7145,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
35283 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
35284 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
35285 (int)part_stat_read(&disk->part0, sectors[1]) -
35286- atomic_read(&disk->sync_io);
35287+ atomic_read_unchecked(&disk->sync_io);
35288 /* sync IO will cause sync_io to increase before the disk_stats
35289 * as sync_io is counted when a request starts, and
35290 * disk_stats is counted when it completes.
35291diff --git a/drivers/md/md.h b/drivers/md/md.h
35292index af443ab..0f93be3 100644
35293--- a/drivers/md/md.h
35294+++ b/drivers/md/md.h
35295@@ -94,13 +94,13 @@ struct md_rdev {
35296 * only maintained for arrays that
35297 * support hot removal
35298 */
35299- atomic_t read_errors; /* number of consecutive read errors that
35300+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
35301 * we have tried to ignore.
35302 */
35303 struct timespec last_read_error; /* monotonic time since our
35304 * last read error
35305 */
35306- atomic_t corrected_errors; /* number of corrected read errors,
35307+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
35308 * for reporting to userspace and storing
35309 * in superblock.
35310 */
35311@@ -432,7 +432,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
35312
35313 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
35314 {
35315- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
35316+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
35317 }
35318
35319 struct md_personality
35320diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
35321index 1cbfc6b..56e1dbb 100644
35322--- a/drivers/md/persistent-data/dm-space-map.h
35323+++ b/drivers/md/persistent-data/dm-space-map.h
35324@@ -60,6 +60,7 @@ struct dm_space_map {
35325 int (*root_size)(struct dm_space_map *sm, size_t *result);
35326 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
35327 };
35328+typedef struct dm_space_map __no_const dm_space_map_no_const;
35329
35330 /*----------------------------------------------------------------*/
35331
35332diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
35333index a0f7309..5599dbc 100644
35334--- a/drivers/md/raid1.c
35335+++ b/drivers/md/raid1.c
35336@@ -1819,7 +1819,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
35337 if (r1_sync_page_io(rdev, sect, s,
35338 bio->bi_io_vec[idx].bv_page,
35339 READ) != 0)
35340- atomic_add(s, &rdev->corrected_errors);
35341+ atomic_add_unchecked(s, &rdev->corrected_errors);
35342 }
35343 sectors -= s;
35344 sect += s;
35345@@ -2041,7 +2041,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
35346 test_bit(In_sync, &rdev->flags)) {
35347 if (r1_sync_page_io(rdev, sect, s,
35348 conf->tmppage, READ)) {
35349- atomic_add(s, &rdev->corrected_errors);
35350+ atomic_add_unchecked(s, &rdev->corrected_errors);
35351 printk(KERN_INFO
35352 "md/raid1:%s: read error corrected "
35353 "(%d sectors at %llu on %s)\n",
35354diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
35355index c9acbd7..386cd3e 100644
35356--- a/drivers/md/raid10.c
35357+++ b/drivers/md/raid10.c
35358@@ -1878,7 +1878,7 @@ static void end_sync_read(struct bio *bio, int error)
35359 /* The write handler will notice the lack of
35360 * R10BIO_Uptodate and record any errors etc
35361 */
35362- atomic_add(r10_bio->sectors,
35363+ atomic_add_unchecked(r10_bio->sectors,
35364 &conf->mirrors[d].rdev->corrected_errors);
35365
35366 /* for reconstruct, we always reschedule after a read.
35367@@ -2227,7 +2227,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
35368 {
35369 struct timespec cur_time_mon;
35370 unsigned long hours_since_last;
35371- unsigned int read_errors = atomic_read(&rdev->read_errors);
35372+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
35373
35374 ktime_get_ts(&cur_time_mon);
35375
35376@@ -2249,9 +2249,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
35377 * overflowing the shift of read_errors by hours_since_last.
35378 */
35379 if (hours_since_last >= 8 * sizeof(read_errors))
35380- atomic_set(&rdev->read_errors, 0);
35381+ atomic_set_unchecked(&rdev->read_errors, 0);
35382 else
35383- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
35384+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
35385 }
35386
35387 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
35388@@ -2305,8 +2305,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
35389 return;
35390
35391 check_decay_read_errors(mddev, rdev);
35392- atomic_inc(&rdev->read_errors);
35393- if (atomic_read(&rdev->read_errors) > max_read_errors) {
35394+ atomic_inc_unchecked(&rdev->read_errors);
35395+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
35396 char b[BDEVNAME_SIZE];
35397 bdevname(rdev->bdev, b);
35398
35399@@ -2314,7 +2314,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
35400 "md/raid10:%s: %s: Raid device exceeded "
35401 "read_error threshold [cur %d:max %d]\n",
35402 mdname(mddev), b,
35403- atomic_read(&rdev->read_errors), max_read_errors);
35404+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
35405 printk(KERN_NOTICE
35406 "md/raid10:%s: %s: Failing raid device\n",
35407 mdname(mddev), b);
35408@@ -2469,7 +2469,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
35409 sect +
35410 choose_data_offset(r10_bio, rdev)),
35411 bdevname(rdev->bdev, b));
35412- atomic_add(s, &rdev->corrected_errors);
35413+ atomic_add_unchecked(s, &rdev->corrected_errors);
35414 }
35415
35416 rdev_dec_pending(rdev, mddev);
35417diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
35418index a450268..c4168a9 100644
35419--- a/drivers/md/raid5.c
35420+++ b/drivers/md/raid5.c
35421@@ -1789,21 +1789,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
35422 mdname(conf->mddev), STRIPE_SECTORS,
35423 (unsigned long long)s,
35424 bdevname(rdev->bdev, b));
35425- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
35426+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
35427 clear_bit(R5_ReadError, &sh->dev[i].flags);
35428 clear_bit(R5_ReWrite, &sh->dev[i].flags);
35429 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
35430 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
35431
35432- if (atomic_read(&rdev->read_errors))
35433- atomic_set(&rdev->read_errors, 0);
35434+ if (atomic_read_unchecked(&rdev->read_errors))
35435+ atomic_set_unchecked(&rdev->read_errors, 0);
35436 } else {
35437 const char *bdn = bdevname(rdev->bdev, b);
35438 int retry = 0;
35439 int set_bad = 0;
35440
35441 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
35442- atomic_inc(&rdev->read_errors);
35443+ atomic_inc_unchecked(&rdev->read_errors);
35444 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
35445 printk_ratelimited(
35446 KERN_WARNING
35447@@ -1831,7 +1831,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
35448 mdname(conf->mddev),
35449 (unsigned long long)s,
35450 bdn);
35451- } else if (atomic_read(&rdev->read_errors)
35452+ } else if (atomic_read_unchecked(&rdev->read_errors)
35453 > conf->max_nr_stripes)
35454 printk(KERN_WARNING
35455 "md/raid:%s: Too many read errors, failing device %s.\n",
35456diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
35457index d33101a..6b13069 100644
35458--- a/drivers/media/dvb-core/dvbdev.c
35459+++ b/drivers/media/dvb-core/dvbdev.c
35460@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
35461 const struct dvb_device *template, void *priv, int type)
35462 {
35463 struct dvb_device *dvbdev;
35464- struct file_operations *dvbdevfops;
35465+ file_operations_no_const *dvbdevfops;
35466 struct device *clsdev;
35467 int minor;
35468 int id;
35469diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
35470index 404f63a..4796533 100644
35471--- a/drivers/media/dvb-frontends/dib3000.h
35472+++ b/drivers/media/dvb-frontends/dib3000.h
35473@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
35474 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
35475 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
35476 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
35477-};
35478+} __no_const;
35479
35480 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
35481 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
35482diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
35483index 3aa6856..435ad25 100644
35484--- a/drivers/media/pci/cx88/cx88-alsa.c
35485+++ b/drivers/media/pci/cx88/cx88-alsa.c
35486@@ -749,7 +749,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
35487 * Only boards with eeprom and byte 1 at eeprom=1 have it
35488 */
35489
35490-static const struct pci_device_id cx88_audio_pci_tbl[] __devinitdata = {
35491+static const struct pci_device_id cx88_audio_pci_tbl[] __devinitconst = {
35492 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
35493 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
35494 {0, }
35495diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
35496index feff57e..66a2c67 100644
35497--- a/drivers/media/pci/ddbridge/ddbridge-core.c
35498+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
35499@@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
35500 .subvendor = _subvend, .subdevice = _subdev, \
35501 .driver_data = (unsigned long)&_driverdata }
35502
35503-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
35504+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
35505 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
35506 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
35507 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
35508diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
35509index 96a13ed..6df45b4 100644
35510--- a/drivers/media/pci/ngene/ngene-cards.c
35511+++ b/drivers/media/pci/ngene/ngene-cards.c
35512@@ -741,7 +741,7 @@ static struct ngene_info ngene_info_terratec = {
35513
35514 /****************************************************************************/
35515
35516-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
35517+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
35518 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
35519 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
35520 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
35521diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
35522index a3b1a34..71ce0e3 100644
35523--- a/drivers/media/platform/omap/omap_vout.c
35524+++ b/drivers/media/platform/omap/omap_vout.c
35525@@ -65,7 +65,6 @@ enum omap_vout_channels {
35526 OMAP_VIDEO2,
35527 };
35528
35529-static struct videobuf_queue_ops video_vbq_ops;
35530 /* Variables configurable through module params*/
35531 static u32 video1_numbuffers = 3;
35532 static u32 video2_numbuffers = 3;
35533@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
35534 {
35535 struct videobuf_queue *q;
35536 struct omap_vout_device *vout = NULL;
35537+ static struct videobuf_queue_ops video_vbq_ops = {
35538+ .buf_setup = omap_vout_buffer_setup,
35539+ .buf_prepare = omap_vout_buffer_prepare,
35540+ .buf_release = omap_vout_buffer_release,
35541+ .buf_queue = omap_vout_buffer_queue,
35542+ };
35543
35544 vout = video_drvdata(file);
35545 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
35546@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
35547 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
35548
35549 q = &vout->vbq;
35550- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
35551- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
35552- video_vbq_ops.buf_release = omap_vout_buffer_release;
35553- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
35554 spin_lock_init(&vout->vbq_lock);
35555
35556 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
35557diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
35558index 02194c0..36d69c1 100644
35559--- a/drivers/media/platform/timblogiw.c
35560+++ b/drivers/media/platform/timblogiw.c
35561@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
35562
35563 /* Platform device functions */
35564
35565-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
35566+static struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
35567 .vidioc_querycap = timblogiw_querycap,
35568 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
35569 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
35570@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
35571 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
35572 };
35573
35574-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
35575+static struct v4l2_file_operations timblogiw_fops = {
35576 .owner = THIS_MODULE,
35577 .open = timblogiw_open,
35578 .release = timblogiw_close,
35579diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
35580index 697a421..16c5a5f 100644
35581--- a/drivers/media/radio/radio-cadet.c
35582+++ b/drivers/media/radio/radio-cadet.c
35583@@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
35584 unsigned char readbuf[RDS_BUFFER];
35585 int i = 0;
35586
35587+ if (count > RDS_BUFFER)
35588+ return -EFAULT;
35589 mutex_lock(&dev->lock);
35590 if (dev->rdsstat == 0)
35591 cadet_start_rds(dev);
35592@@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
35593 while (i < count && dev->rdsin != dev->rdsout)
35594 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
35595
35596- if (i && copy_to_user(data, readbuf, i))
35597+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
35598 i = -EFAULT;
35599 unlock:
35600 mutex_unlock(&dev->lock);
35601diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
35602index 3940bb0..fb3952a 100644
35603--- a/drivers/media/usb/dvb-usb/cxusb.c
35604+++ b/drivers/media/usb/dvb-usb/cxusb.c
35605@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
35606
35607 struct dib0700_adapter_state {
35608 int (*set_param_save) (struct dvb_frontend *);
35609-};
35610+} __no_const;
35611
35612 static int dib7070_set_param_override(struct dvb_frontend *fe)
35613 {
35614diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
35615index 9382895..ac8093c 100644
35616--- a/drivers/media/usb/dvb-usb/dw2102.c
35617+++ b/drivers/media/usb/dvb-usb/dw2102.c
35618@@ -95,7 +95,7 @@ struct su3000_state {
35619
35620 struct s6x0_state {
35621 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
35622-};
35623+} __no_const;
35624
35625 /* debug */
35626 static int dvb_usb_dw2102_debug;
35627diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
35628index fb69baa..cf7ad22 100644
35629--- a/drivers/message/fusion/mptbase.c
35630+++ b/drivers/message/fusion/mptbase.c
35631@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
35632 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
35633 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
35634
35635+#ifdef CONFIG_GRKERNSEC_HIDESYM
35636+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
35637+#else
35638 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
35639 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
35640+#endif
35641+
35642 /*
35643 * Rounding UP to nearest 4-kB boundary here...
35644 */
35645diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
35646index 551262e..7551198 100644
35647--- a/drivers/message/fusion/mptsas.c
35648+++ b/drivers/message/fusion/mptsas.c
35649@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
35650 return 0;
35651 }
35652
35653+static inline void
35654+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
35655+{
35656+ if (phy_info->port_details) {
35657+ phy_info->port_details->rphy = rphy;
35658+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
35659+ ioc->name, rphy));
35660+ }
35661+
35662+ if (rphy) {
35663+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
35664+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
35665+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
35666+ ioc->name, rphy, rphy->dev.release));
35667+ }
35668+}
35669+
35670 /* no mutex */
35671 static void
35672 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
35673@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
35674 return NULL;
35675 }
35676
35677-static inline void
35678-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
35679-{
35680- if (phy_info->port_details) {
35681- phy_info->port_details->rphy = rphy;
35682- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
35683- ioc->name, rphy));
35684- }
35685-
35686- if (rphy) {
35687- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
35688- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
35689- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
35690- ioc->name, rphy, rphy->dev.release));
35691- }
35692-}
35693-
35694 static inline struct sas_port *
35695 mptsas_get_port(struct mptsas_phyinfo *phy_info)
35696 {
35697diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
35698index 0c3ced7..1fe34ec 100644
35699--- a/drivers/message/fusion/mptscsih.c
35700+++ b/drivers/message/fusion/mptscsih.c
35701@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
35702
35703 h = shost_priv(SChost);
35704
35705- if (h) {
35706- if (h->info_kbuf == NULL)
35707- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
35708- return h->info_kbuf;
35709- h->info_kbuf[0] = '\0';
35710+ if (!h)
35711+ return NULL;
35712
35713- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
35714- h->info_kbuf[size-1] = '\0';
35715- }
35716+ if (h->info_kbuf == NULL)
35717+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
35718+ return h->info_kbuf;
35719+ h->info_kbuf[0] = '\0';
35720+
35721+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
35722+ h->info_kbuf[size-1] = '\0';
35723
35724 return h->info_kbuf;
35725 }
35726diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
35727index 8001aa6..b137580 100644
35728--- a/drivers/message/i2o/i2o_proc.c
35729+++ b/drivers/message/i2o/i2o_proc.c
35730@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
35731 "Array Controller Device"
35732 };
35733
35734-static char *chtostr(char *tmp, u8 *chars, int n)
35735-{
35736- tmp[0] = 0;
35737- return strncat(tmp, (char *)chars, n);
35738-}
35739-
35740 static int i2o_report_query_status(struct seq_file *seq, int block_status,
35741 char *group)
35742 {
35743@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
35744 } *result;
35745
35746 i2o_exec_execute_ddm_table ddm_table;
35747- char tmp[28 + 1];
35748
35749 result = kmalloc(sizeof(*result), GFP_KERNEL);
35750 if (!result)
35751@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
35752
35753 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
35754 seq_printf(seq, "%-#8x", ddm_table.module_id);
35755- seq_printf(seq, "%-29s",
35756- chtostr(tmp, ddm_table.module_name_version, 28));
35757+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
35758 seq_printf(seq, "%9d ", ddm_table.data_size);
35759 seq_printf(seq, "%8d", ddm_table.code_size);
35760
35761@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
35762
35763 i2o_driver_result_table *result;
35764 i2o_driver_store_table *dst;
35765- char tmp[28 + 1];
35766
35767 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
35768 if (result == NULL)
35769@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
35770
35771 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
35772 seq_printf(seq, "%-#8x", dst->module_id);
35773- seq_printf(seq, "%-29s",
35774- chtostr(tmp, dst->module_name_version, 28));
35775- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
35776+ seq_printf(seq, "%-.28s", dst->module_name_version);
35777+ seq_printf(seq, "%-.8s", dst->date);
35778 seq_printf(seq, "%8d ", dst->module_size);
35779 seq_printf(seq, "%8d ", dst->mpb_size);
35780 seq_printf(seq, "0x%04x", dst->module_flags);
35781@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
35782 // == (allow) 512d bytes (max)
35783 static u16 *work16 = (u16 *) work32;
35784 int token;
35785- char tmp[16 + 1];
35786
35787 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
35788
35789@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
35790 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
35791 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
35792 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
35793- seq_printf(seq, "Vendor info : %s\n",
35794- chtostr(tmp, (u8 *) (work32 + 2), 16));
35795- seq_printf(seq, "Product info : %s\n",
35796- chtostr(tmp, (u8 *) (work32 + 6), 16));
35797- seq_printf(seq, "Description : %s\n",
35798- chtostr(tmp, (u8 *) (work32 + 10), 16));
35799- seq_printf(seq, "Product rev. : %s\n",
35800- chtostr(tmp, (u8 *) (work32 + 14), 8));
35801+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
35802+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
35803+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
35804+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
35805
35806 seq_printf(seq, "Serial number : ");
35807 print_serial_number(seq, (u8 *) (work32 + 16),
35808@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
35809 u8 pad[256]; // allow up to 256 byte (max) serial number
35810 } result;
35811
35812- char tmp[24 + 1];
35813-
35814 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
35815
35816 if (token < 0) {
35817@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
35818 }
35819
35820 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
35821- seq_printf(seq, "Module name : %s\n",
35822- chtostr(tmp, result.module_name, 24));
35823- seq_printf(seq, "Module revision : %s\n",
35824- chtostr(tmp, result.module_rev, 8));
35825+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
35826+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
35827
35828 seq_printf(seq, "Serial number : ");
35829 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
35830@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
35831 u8 instance_number[4];
35832 } result;
35833
35834- char tmp[64 + 1];
35835-
35836 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
35837
35838 if (token < 0) {
35839@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
35840 return 0;
35841 }
35842
35843- seq_printf(seq, "Device name : %s\n",
35844- chtostr(tmp, result.device_name, 64));
35845- seq_printf(seq, "Service name : %s\n",
35846- chtostr(tmp, result.service_name, 64));
35847- seq_printf(seq, "Physical name : %s\n",
35848- chtostr(tmp, result.physical_location, 64));
35849- seq_printf(seq, "Instance number : %s\n",
35850- chtostr(tmp, result.instance_number, 4));
35851+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
35852+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
35853+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
35854+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
35855
35856 return 0;
35857 }
35858diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
35859index a8c08f3..155fe3d 100644
35860--- a/drivers/message/i2o/iop.c
35861+++ b/drivers/message/i2o/iop.c
35862@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
35863
35864 spin_lock_irqsave(&c->context_list_lock, flags);
35865
35866- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
35867- atomic_inc(&c->context_list_counter);
35868+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
35869+ atomic_inc_unchecked(&c->context_list_counter);
35870
35871- entry->context = atomic_read(&c->context_list_counter);
35872+ entry->context = atomic_read_unchecked(&c->context_list_counter);
35873
35874 list_add(&entry->list, &c->context_list);
35875
35876@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
35877
35878 #if BITS_PER_LONG == 64
35879 spin_lock_init(&c->context_list_lock);
35880- atomic_set(&c->context_list_counter, 0);
35881+ atomic_set_unchecked(&c->context_list_counter, 0);
35882 INIT_LIST_HEAD(&c->context_list);
35883 #endif
35884
35885diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
35886index 965c480..71f2db9 100644
35887--- a/drivers/mfd/janz-cmodio.c
35888+++ b/drivers/mfd/janz-cmodio.c
35889@@ -13,6 +13,7 @@
35890
35891 #include <linux/kernel.h>
35892 #include <linux/module.h>
35893+#include <linux/slab.h>
35894 #include <linux/init.h>
35895 #include <linux/pci.h>
35896 #include <linux/interrupt.h>
35897diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
35898index 3aa9a96..59cf685 100644
35899--- a/drivers/misc/kgdbts.c
35900+++ b/drivers/misc/kgdbts.c
35901@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
35902 char before[BREAK_INSTR_SIZE];
35903 char after[BREAK_INSTR_SIZE];
35904
35905- probe_kernel_read(before, (char *)kgdbts_break_test,
35906+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
35907 BREAK_INSTR_SIZE);
35908 init_simple_test();
35909 ts.tst = plant_and_detach_test;
35910@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
35911 /* Activate test with initial breakpoint */
35912 if (!is_early)
35913 kgdb_breakpoint();
35914- probe_kernel_read(after, (char *)kgdbts_break_test,
35915+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
35916 BREAK_INSTR_SIZE);
35917 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
35918 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
35919diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
35920index 4a87e5c..76bdf5c 100644
35921--- a/drivers/misc/lis3lv02d/lis3lv02d.c
35922+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
35923@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
35924 * the lid is closed. This leads to interrupts as soon as a little move
35925 * is done.
35926 */
35927- atomic_inc(&lis3->count);
35928+ atomic_inc_unchecked(&lis3->count);
35929
35930 wake_up_interruptible(&lis3->misc_wait);
35931 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
35932@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
35933 if (lis3->pm_dev)
35934 pm_runtime_get_sync(lis3->pm_dev);
35935
35936- atomic_set(&lis3->count, 0);
35937+ atomic_set_unchecked(&lis3->count, 0);
35938 return 0;
35939 }
35940
35941@@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
35942 add_wait_queue(&lis3->misc_wait, &wait);
35943 while (true) {
35944 set_current_state(TASK_INTERRUPTIBLE);
35945- data = atomic_xchg(&lis3->count, 0);
35946+ data = atomic_xchg_unchecked(&lis3->count, 0);
35947 if (data)
35948 break;
35949
35950@@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
35951 struct lis3lv02d, miscdev);
35952
35953 poll_wait(file, &lis3->misc_wait, wait);
35954- if (atomic_read(&lis3->count))
35955+ if (atomic_read_unchecked(&lis3->count))
35956 return POLLIN | POLLRDNORM;
35957 return 0;
35958 }
35959diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
35960index c439c82..1f20f57 100644
35961--- a/drivers/misc/lis3lv02d/lis3lv02d.h
35962+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
35963@@ -297,7 +297,7 @@ struct lis3lv02d {
35964 struct input_polled_dev *idev; /* input device */
35965 struct platform_device *pdev; /* platform device */
35966 struct regulator_bulk_data regulators[2];
35967- atomic_t count; /* interrupt count after last read */
35968+ atomic_unchecked_t count; /* interrupt count after last read */
35969 union axis_conversion ac; /* hw -> logical axis */
35970 int mapped_btns[3];
35971
35972diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
35973index 2f30bad..c4c13d0 100644
35974--- a/drivers/misc/sgi-gru/gruhandles.c
35975+++ b/drivers/misc/sgi-gru/gruhandles.c
35976@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
35977 unsigned long nsec;
35978
35979 nsec = CLKS2NSEC(clks);
35980- atomic_long_inc(&mcs_op_statistics[op].count);
35981- atomic_long_add(nsec, &mcs_op_statistics[op].total);
35982+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
35983+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
35984 if (mcs_op_statistics[op].max < nsec)
35985 mcs_op_statistics[op].max = nsec;
35986 }
35987diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
35988index 950dbe9..eeef0f8 100644
35989--- a/drivers/misc/sgi-gru/gruprocfs.c
35990+++ b/drivers/misc/sgi-gru/gruprocfs.c
35991@@ -32,9 +32,9 @@
35992
35993 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
35994
35995-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
35996+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
35997 {
35998- unsigned long val = atomic_long_read(v);
35999+ unsigned long val = atomic_long_read_unchecked(v);
36000
36001 seq_printf(s, "%16lu %s\n", val, id);
36002 }
36003@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
36004
36005 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
36006 for (op = 0; op < mcsop_last; op++) {
36007- count = atomic_long_read(&mcs_op_statistics[op].count);
36008- total = atomic_long_read(&mcs_op_statistics[op].total);
36009+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
36010+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
36011 max = mcs_op_statistics[op].max;
36012 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
36013 count ? total / count : 0, max);
36014diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
36015index 5c3ce24..4915ccb 100644
36016--- a/drivers/misc/sgi-gru/grutables.h
36017+++ b/drivers/misc/sgi-gru/grutables.h
36018@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
36019 * GRU statistics.
36020 */
36021 struct gru_stats_s {
36022- atomic_long_t vdata_alloc;
36023- atomic_long_t vdata_free;
36024- atomic_long_t gts_alloc;
36025- atomic_long_t gts_free;
36026- atomic_long_t gms_alloc;
36027- atomic_long_t gms_free;
36028- atomic_long_t gts_double_allocate;
36029- atomic_long_t assign_context;
36030- atomic_long_t assign_context_failed;
36031- atomic_long_t free_context;
36032- atomic_long_t load_user_context;
36033- atomic_long_t load_kernel_context;
36034- atomic_long_t lock_kernel_context;
36035- atomic_long_t unlock_kernel_context;
36036- atomic_long_t steal_user_context;
36037- atomic_long_t steal_kernel_context;
36038- atomic_long_t steal_context_failed;
36039- atomic_long_t nopfn;
36040- atomic_long_t asid_new;
36041- atomic_long_t asid_next;
36042- atomic_long_t asid_wrap;
36043- atomic_long_t asid_reuse;
36044- atomic_long_t intr;
36045- atomic_long_t intr_cbr;
36046- atomic_long_t intr_tfh;
36047- atomic_long_t intr_spurious;
36048- atomic_long_t intr_mm_lock_failed;
36049- atomic_long_t call_os;
36050- atomic_long_t call_os_wait_queue;
36051- atomic_long_t user_flush_tlb;
36052- atomic_long_t user_unload_context;
36053- atomic_long_t user_exception;
36054- atomic_long_t set_context_option;
36055- atomic_long_t check_context_retarget_intr;
36056- atomic_long_t check_context_unload;
36057- atomic_long_t tlb_dropin;
36058- atomic_long_t tlb_preload_page;
36059- atomic_long_t tlb_dropin_fail_no_asid;
36060- atomic_long_t tlb_dropin_fail_upm;
36061- atomic_long_t tlb_dropin_fail_invalid;
36062- atomic_long_t tlb_dropin_fail_range_active;
36063- atomic_long_t tlb_dropin_fail_idle;
36064- atomic_long_t tlb_dropin_fail_fmm;
36065- atomic_long_t tlb_dropin_fail_no_exception;
36066- atomic_long_t tfh_stale_on_fault;
36067- atomic_long_t mmu_invalidate_range;
36068- atomic_long_t mmu_invalidate_page;
36069- atomic_long_t flush_tlb;
36070- atomic_long_t flush_tlb_gru;
36071- atomic_long_t flush_tlb_gru_tgh;
36072- atomic_long_t flush_tlb_gru_zero_asid;
36073+ atomic_long_unchecked_t vdata_alloc;
36074+ atomic_long_unchecked_t vdata_free;
36075+ atomic_long_unchecked_t gts_alloc;
36076+ atomic_long_unchecked_t gts_free;
36077+ atomic_long_unchecked_t gms_alloc;
36078+ atomic_long_unchecked_t gms_free;
36079+ atomic_long_unchecked_t gts_double_allocate;
36080+ atomic_long_unchecked_t assign_context;
36081+ atomic_long_unchecked_t assign_context_failed;
36082+ atomic_long_unchecked_t free_context;
36083+ atomic_long_unchecked_t load_user_context;
36084+ atomic_long_unchecked_t load_kernel_context;
36085+ atomic_long_unchecked_t lock_kernel_context;
36086+ atomic_long_unchecked_t unlock_kernel_context;
36087+ atomic_long_unchecked_t steal_user_context;
36088+ atomic_long_unchecked_t steal_kernel_context;
36089+ atomic_long_unchecked_t steal_context_failed;
36090+ atomic_long_unchecked_t nopfn;
36091+ atomic_long_unchecked_t asid_new;
36092+ atomic_long_unchecked_t asid_next;
36093+ atomic_long_unchecked_t asid_wrap;
36094+ atomic_long_unchecked_t asid_reuse;
36095+ atomic_long_unchecked_t intr;
36096+ atomic_long_unchecked_t intr_cbr;
36097+ atomic_long_unchecked_t intr_tfh;
36098+ atomic_long_unchecked_t intr_spurious;
36099+ atomic_long_unchecked_t intr_mm_lock_failed;
36100+ atomic_long_unchecked_t call_os;
36101+ atomic_long_unchecked_t call_os_wait_queue;
36102+ atomic_long_unchecked_t user_flush_tlb;
36103+ atomic_long_unchecked_t user_unload_context;
36104+ atomic_long_unchecked_t user_exception;
36105+ atomic_long_unchecked_t set_context_option;
36106+ atomic_long_unchecked_t check_context_retarget_intr;
36107+ atomic_long_unchecked_t check_context_unload;
36108+ atomic_long_unchecked_t tlb_dropin;
36109+ atomic_long_unchecked_t tlb_preload_page;
36110+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
36111+ atomic_long_unchecked_t tlb_dropin_fail_upm;
36112+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
36113+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
36114+ atomic_long_unchecked_t tlb_dropin_fail_idle;
36115+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
36116+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
36117+ atomic_long_unchecked_t tfh_stale_on_fault;
36118+ atomic_long_unchecked_t mmu_invalidate_range;
36119+ atomic_long_unchecked_t mmu_invalidate_page;
36120+ atomic_long_unchecked_t flush_tlb;
36121+ atomic_long_unchecked_t flush_tlb_gru;
36122+ atomic_long_unchecked_t flush_tlb_gru_tgh;
36123+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
36124
36125- atomic_long_t copy_gpa;
36126- atomic_long_t read_gpa;
36127+ atomic_long_unchecked_t copy_gpa;
36128+ atomic_long_unchecked_t read_gpa;
36129
36130- atomic_long_t mesq_receive;
36131- atomic_long_t mesq_receive_none;
36132- atomic_long_t mesq_send;
36133- atomic_long_t mesq_send_failed;
36134- atomic_long_t mesq_noop;
36135- atomic_long_t mesq_send_unexpected_error;
36136- atomic_long_t mesq_send_lb_overflow;
36137- atomic_long_t mesq_send_qlimit_reached;
36138- atomic_long_t mesq_send_amo_nacked;
36139- atomic_long_t mesq_send_put_nacked;
36140- atomic_long_t mesq_page_overflow;
36141- atomic_long_t mesq_qf_locked;
36142- atomic_long_t mesq_qf_noop_not_full;
36143- atomic_long_t mesq_qf_switch_head_failed;
36144- atomic_long_t mesq_qf_unexpected_error;
36145- atomic_long_t mesq_noop_unexpected_error;
36146- atomic_long_t mesq_noop_lb_overflow;
36147- atomic_long_t mesq_noop_qlimit_reached;
36148- atomic_long_t mesq_noop_amo_nacked;
36149- atomic_long_t mesq_noop_put_nacked;
36150- atomic_long_t mesq_noop_page_overflow;
36151+ atomic_long_unchecked_t mesq_receive;
36152+ atomic_long_unchecked_t mesq_receive_none;
36153+ atomic_long_unchecked_t mesq_send;
36154+ atomic_long_unchecked_t mesq_send_failed;
36155+ atomic_long_unchecked_t mesq_noop;
36156+ atomic_long_unchecked_t mesq_send_unexpected_error;
36157+ atomic_long_unchecked_t mesq_send_lb_overflow;
36158+ atomic_long_unchecked_t mesq_send_qlimit_reached;
36159+ atomic_long_unchecked_t mesq_send_amo_nacked;
36160+ atomic_long_unchecked_t mesq_send_put_nacked;
36161+ atomic_long_unchecked_t mesq_page_overflow;
36162+ atomic_long_unchecked_t mesq_qf_locked;
36163+ atomic_long_unchecked_t mesq_qf_noop_not_full;
36164+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
36165+ atomic_long_unchecked_t mesq_qf_unexpected_error;
36166+ atomic_long_unchecked_t mesq_noop_unexpected_error;
36167+ atomic_long_unchecked_t mesq_noop_lb_overflow;
36168+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
36169+ atomic_long_unchecked_t mesq_noop_amo_nacked;
36170+ atomic_long_unchecked_t mesq_noop_put_nacked;
36171+ atomic_long_unchecked_t mesq_noop_page_overflow;
36172
36173 };
36174
36175@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
36176 tghop_invalidate, mcsop_last};
36177
36178 struct mcs_op_statistic {
36179- atomic_long_t count;
36180- atomic_long_t total;
36181+ atomic_long_unchecked_t count;
36182+ atomic_long_unchecked_t total;
36183 unsigned long max;
36184 };
36185
36186@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
36187
36188 #define STAT(id) do { \
36189 if (gru_options & OPT_STATS) \
36190- atomic_long_inc(&gru_stats.id); \
36191+ atomic_long_inc_unchecked(&gru_stats.id); \
36192 } while (0)
36193
36194 #ifdef CONFIG_SGI_GRU_DEBUG
36195diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
36196index c862cd4..0d176fe 100644
36197--- a/drivers/misc/sgi-xp/xp.h
36198+++ b/drivers/misc/sgi-xp/xp.h
36199@@ -288,7 +288,7 @@ struct xpc_interface {
36200 xpc_notify_func, void *);
36201 void (*received) (short, int, void *);
36202 enum xp_retval (*partid_to_nasids) (short, void *);
36203-};
36204+} __no_const;
36205
36206 extern struct xpc_interface xpc_interface;
36207
36208diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
36209index b94d5f7..7f494c5 100644
36210--- a/drivers/misc/sgi-xp/xpc.h
36211+++ b/drivers/misc/sgi-xp/xpc.h
36212@@ -835,6 +835,7 @@ struct xpc_arch_operations {
36213 void (*received_payload) (struct xpc_channel *, void *);
36214 void (*notify_senders_of_disconnect) (struct xpc_channel *);
36215 };
36216+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
36217
36218 /* struct xpc_partition act_state values (for XPC HB) */
36219
36220@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
36221 /* found in xpc_main.c */
36222 extern struct device *xpc_part;
36223 extern struct device *xpc_chan;
36224-extern struct xpc_arch_operations xpc_arch_ops;
36225+extern xpc_arch_operations_no_const xpc_arch_ops;
36226 extern int xpc_disengage_timelimit;
36227 extern int xpc_disengage_timedout;
36228 extern int xpc_activate_IRQ_rcvd;
36229diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
36230index d971817..3805cce 100644
36231--- a/drivers/misc/sgi-xp/xpc_main.c
36232+++ b/drivers/misc/sgi-xp/xpc_main.c
36233@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
36234 .notifier_call = xpc_system_die,
36235 };
36236
36237-struct xpc_arch_operations xpc_arch_ops;
36238+xpc_arch_operations_no_const xpc_arch_ops;
36239
36240 /*
36241 * Timer function to enforce the timelimit on the partition disengage.
36242diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
36243index a0e1720..ee63d0b 100644
36244--- a/drivers/mmc/core/mmc_ops.c
36245+++ b/drivers/mmc/core/mmc_ops.c
36246@@ -245,7 +245,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
36247 void *data_buf;
36248 int is_on_stack;
36249
36250- is_on_stack = object_is_on_stack(buf);
36251+ is_on_stack = object_starts_on_stack(buf);
36252 if (is_on_stack) {
36253 /*
36254 * dma onto stack is unsafe/nonportable, but callers to this
36255diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
36256index 53b8fd9..615b462 100644
36257--- a/drivers/mmc/host/dw_mmc.h
36258+++ b/drivers/mmc/host/dw_mmc.h
36259@@ -205,5 +205,5 @@ struct dw_mci_drv_data {
36260 int (*parse_dt)(struct dw_mci *host);
36261 int (*setup_bus)(struct dw_mci *host,
36262 struct device_node *slot_np, u8 bus_width);
36263-};
36264+} __do_const;
36265 #endif /* _DW_MMC_H_ */
36266diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
36267index a4eb8b5..8c0628f 100644
36268--- a/drivers/mtd/devices/doc2000.c
36269+++ b/drivers/mtd/devices/doc2000.c
36270@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
36271
36272 /* The ECC will not be calculated correctly if less than 512 is written */
36273 /* DBB-
36274- if (len != 0x200 && eccbuf)
36275+ if (len != 0x200)
36276 printk(KERN_WARNING
36277 "ECC needs a full sector write (adr: %lx size %lx)\n",
36278 (long) to, (long) len);
36279diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
36280index e706a23..b3d262f 100644
36281--- a/drivers/mtd/nand/denali.c
36282+++ b/drivers/mtd/nand/denali.c
36283@@ -26,6 +26,7 @@
36284 #include <linux/pci.h>
36285 #include <linux/mtd/mtd.h>
36286 #include <linux/module.h>
36287+#include <linux/slab.h>
36288
36289 #include "denali.h"
36290
36291diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
36292index 51b9d6a..52af9a7 100644
36293--- a/drivers/mtd/nftlmount.c
36294+++ b/drivers/mtd/nftlmount.c
36295@@ -24,6 +24,7 @@
36296 #include <asm/errno.h>
36297 #include <linux/delay.h>
36298 #include <linux/slab.h>
36299+#include <linux/sched.h>
36300 #include <linux/mtd/mtd.h>
36301 #include <linux/mtd/nand.h>
36302 #include <linux/mtd/nftl.h>
36303diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
36304index 203ff9d..0968ca8 100644
36305--- a/drivers/net/ethernet/8390/ax88796.c
36306+++ b/drivers/net/ethernet/8390/ax88796.c
36307@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
36308 if (ax->plat->reg_offsets)
36309 ei_local->reg_offset = ax->plat->reg_offsets;
36310 else {
36311+ resource_size_t _mem_size = mem_size;
36312+ do_div(_mem_size, 0x18);
36313 ei_local->reg_offset = ax->reg_offsets;
36314 for (ret = 0; ret < 0x18; ret++)
36315- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
36316+ ax->reg_offsets[ret] = _mem_size * ret;
36317 }
36318
36319 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
36320diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
36321index 9c5ea6c..eaad276 100644
36322--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
36323+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
36324@@ -1046,7 +1046,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
36325 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
36326 {
36327 /* RX_MODE controlling object */
36328- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
36329+ bnx2x_init_rx_mode_obj(bp);
36330
36331 /* multicast configuration controlling object */
36332 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
36333diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
36334index 614981c..11216c7 100644
36335--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
36336+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
36337@@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
36338 return rc;
36339 }
36340
36341-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
36342- struct bnx2x_rx_mode_obj *o)
36343+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
36344 {
36345 if (CHIP_IS_E1x(bp)) {
36346- o->wait_comp = bnx2x_empty_rx_mode_wait;
36347- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
36348+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
36349+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
36350 } else {
36351- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
36352- o->config_rx_mode = bnx2x_set_rx_mode_e2;
36353+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
36354+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
36355 }
36356 }
36357
36358diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
36359index acf2fe4..efb96df 100644
36360--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
36361+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
36362@@ -1281,8 +1281,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
36363
36364 /********************* RX MODE ****************/
36365
36366-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
36367- struct bnx2x_rx_mode_obj *o);
36368+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
36369
36370 /**
36371 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
36372diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
36373index d9308c32..d87b824 100644
36374--- a/drivers/net/ethernet/broadcom/tg3.h
36375+++ b/drivers/net/ethernet/broadcom/tg3.h
36376@@ -140,6 +140,7 @@
36377 #define CHIPREV_ID_5750_A0 0x4000
36378 #define CHIPREV_ID_5750_A1 0x4001
36379 #define CHIPREV_ID_5750_A3 0x4003
36380+#define CHIPREV_ID_5750_C1 0x4201
36381 #define CHIPREV_ID_5750_C2 0x4202
36382 #define CHIPREV_ID_5752_A0_HW 0x5000
36383 #define CHIPREV_ID_5752_A0 0x6000
36384diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
36385index 8cffcdf..aadf043 100644
36386--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
36387+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
36388@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
36389 */
36390 struct l2t_skb_cb {
36391 arp_failure_handler_func arp_failure_handler;
36392-};
36393+} __no_const;
36394
36395 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
36396
36397diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
36398index f879e92..726f20f 100644
36399--- a/drivers/net/ethernet/dec/tulip/de4x5.c
36400+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
36401@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
36402 for (i=0; i<ETH_ALEN; i++) {
36403 tmp.addr[i] = dev->dev_addr[i];
36404 }
36405- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
36406+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
36407 break;
36408
36409 case DE4X5_SET_HWADDR: /* Set the hardware address */
36410@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
36411 spin_lock_irqsave(&lp->lock, flags);
36412 memcpy(&statbuf, &lp->pktStats, ioc->len);
36413 spin_unlock_irqrestore(&lp->lock, flags);
36414- if (copy_to_user(ioc->data, &statbuf, ioc->len))
36415+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
36416 return -EFAULT;
36417 break;
36418 }
36419diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
36420index d1b6cc5..cde0d97 100644
36421--- a/drivers/net/ethernet/emulex/benet/be_main.c
36422+++ b/drivers/net/ethernet/emulex/benet/be_main.c
36423@@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
36424
36425 if (wrapped)
36426 newacc += 65536;
36427- ACCESS_ONCE(*acc) = newacc;
36428+ ACCESS_ONCE_RW(*acc) = newacc;
36429 }
36430
36431 void be_parse_stats(struct be_adapter *adapter)
36432diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
36433index 74d749e..eefb1bd 100644
36434--- a/drivers/net/ethernet/faraday/ftgmac100.c
36435+++ b/drivers/net/ethernet/faraday/ftgmac100.c
36436@@ -31,6 +31,8 @@
36437 #include <linux/netdevice.h>
36438 #include <linux/phy.h>
36439 #include <linux/platform_device.h>
36440+#include <linux/interrupt.h>
36441+#include <linux/irqreturn.h>
36442 #include <net/ip.h>
36443
36444 #include "ftgmac100.h"
36445diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
36446index b901a01..1ff32ee 100644
36447--- a/drivers/net/ethernet/faraday/ftmac100.c
36448+++ b/drivers/net/ethernet/faraday/ftmac100.c
36449@@ -31,6 +31,8 @@
36450 #include <linux/module.h>
36451 #include <linux/netdevice.h>
36452 #include <linux/platform_device.h>
36453+#include <linux/interrupt.h>
36454+#include <linux/irqreturn.h>
36455
36456 #include "ftmac100.h"
36457
36458diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36459index d929131..aed108f 100644
36460--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36461+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36462@@ -865,7 +865,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
36463 /* store the new cycle speed */
36464 adapter->cycle_speed = cycle_speed;
36465
36466- ACCESS_ONCE(adapter->base_incval) = incval;
36467+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
36468 smp_mb();
36469
36470 /* grab the ptp lock */
36471diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
36472index c2e420a..26a75e0 100644
36473--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
36474+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
36475@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
36476 struct __vxge_hw_fifo *fifo;
36477 struct vxge_hw_fifo_config *config;
36478 u32 txdl_size, txdl_per_memblock;
36479- struct vxge_hw_mempool_cbs fifo_mp_callback;
36480+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
36481+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
36482+ };
36483+
36484 struct __vxge_hw_virtualpath *vpath;
36485
36486 if ((vp == NULL) || (attr == NULL)) {
36487@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
36488 goto exit;
36489 }
36490
36491- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
36492-
36493 fifo->mempool =
36494 __vxge_hw_mempool_create(vpath->hldev,
36495 fifo->config->memblock_size,
36496diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
36497index 927aa33..a6c2518 100644
36498--- a/drivers/net/ethernet/realtek/r8169.c
36499+++ b/drivers/net/ethernet/realtek/r8169.c
36500@@ -747,22 +747,22 @@ struct rtl8169_private {
36501 struct mdio_ops {
36502 void (*write)(struct rtl8169_private *, int, int);
36503 int (*read)(struct rtl8169_private *, int);
36504- } mdio_ops;
36505+ } __no_const mdio_ops;
36506
36507 struct pll_power_ops {
36508 void (*down)(struct rtl8169_private *);
36509 void (*up)(struct rtl8169_private *);
36510- } pll_power_ops;
36511+ } __no_const pll_power_ops;
36512
36513 struct jumbo_ops {
36514 void (*enable)(struct rtl8169_private *);
36515 void (*disable)(struct rtl8169_private *);
36516- } jumbo_ops;
36517+ } __no_const jumbo_ops;
36518
36519 struct csi_ops {
36520 void (*write)(struct rtl8169_private *, int, int);
36521 u32 (*read)(struct rtl8169_private *, int);
36522- } csi_ops;
36523+ } __no_const csi_ops;
36524
36525 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
36526 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
36527diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
36528index 0767043f..08c2553 100644
36529--- a/drivers/net/ethernet/sfc/ptp.c
36530+++ b/drivers/net/ethernet/sfc/ptp.c
36531@@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
36532 (u32)((u64)ptp->start.dma_addr >> 32));
36533
36534 /* Clear flag that signals MC ready */
36535- ACCESS_ONCE(*start) = 0;
36536+ ACCESS_ONCE_RW(*start) = 0;
36537 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
36538 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
36539
36540diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36541index 0c74a70..3bc6f68 100644
36542--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36543+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36544@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
36545
36546 writel(value, ioaddr + MMC_CNTRL);
36547
36548- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
36549- MMC_CNTRL, value);
36550+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
36551+// MMC_CNTRL, value);
36552 }
36553
36554 /* To mask all all interrupts.*/
36555diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
36556index 5fd6f46..ee1f265 100644
36557--- a/drivers/net/hyperv/hyperv_net.h
36558+++ b/drivers/net/hyperv/hyperv_net.h
36559@@ -101,7 +101,7 @@ struct rndis_device {
36560
36561 enum rndis_device_state state;
36562 bool link_state;
36563- atomic_t new_req_id;
36564+ atomic_unchecked_t new_req_id;
36565
36566 spinlock_t request_lock;
36567 struct list_head req_list;
36568diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
36569index 928148c..d83298e 100644
36570--- a/drivers/net/hyperv/rndis_filter.c
36571+++ b/drivers/net/hyperv/rndis_filter.c
36572@@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
36573 * template
36574 */
36575 set = &rndis_msg->msg.set_req;
36576- set->req_id = atomic_inc_return(&dev->new_req_id);
36577+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36578
36579 /* Add to the request list */
36580 spin_lock_irqsave(&dev->request_lock, flags);
36581@@ -760,7 +760,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
36582
36583 /* Setup the rndis set */
36584 halt = &request->request_msg.msg.halt_req;
36585- halt->req_id = atomic_inc_return(&dev->new_req_id);
36586+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36587
36588 /* Ignore return since this msg is optional. */
36589 rndis_filter_send_request(dev, request);
36590diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
36591index 7d39add..037e1da 100644
36592--- a/drivers/net/ieee802154/fakehard.c
36593+++ b/drivers/net/ieee802154/fakehard.c
36594@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
36595 phy->transmit_power = 0xbf;
36596
36597 dev->netdev_ops = &fake_ops;
36598- dev->ml_priv = &fake_mlme;
36599+ dev->ml_priv = (void *)&fake_mlme;
36600
36601 priv = netdev_priv(dev);
36602 priv->phy = phy;
36603diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
36604index 0f0f9ce..0ca5819 100644
36605--- a/drivers/net/macvtap.c
36606+++ b/drivers/net/macvtap.c
36607@@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
36608 return NOTIFY_DONE;
36609 }
36610
36611-static struct notifier_block macvtap_notifier_block __read_mostly = {
36612+static struct notifier_block macvtap_notifier_block = {
36613 .notifier_call = macvtap_device_event,
36614 };
36615
36616diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
36617index daec9b0..6428fcb 100644
36618--- a/drivers/net/phy/mdio-bitbang.c
36619+++ b/drivers/net/phy/mdio-bitbang.c
36620@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
36621 struct mdiobb_ctrl *ctrl = bus->priv;
36622
36623 module_put(ctrl->ops->owner);
36624+ mdiobus_unregister(bus);
36625 mdiobus_free(bus);
36626 }
36627 EXPORT_SYMBOL(free_mdio_bitbang);
36628diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
36629index eb3f5ce..d773730 100644
36630--- a/drivers/net/ppp/ppp_generic.c
36631+++ b/drivers/net/ppp/ppp_generic.c
36632@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36633 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
36634 struct ppp_stats stats;
36635 struct ppp_comp_stats cstats;
36636- char *vers;
36637
36638 switch (cmd) {
36639 case SIOCGPPPSTATS:
36640@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36641 break;
36642
36643 case SIOCGPPPVER:
36644- vers = PPP_VERSION;
36645- if (copy_to_user(addr, vers, strlen(vers) + 1))
36646+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
36647 break;
36648 err = 0;
36649 break;
36650diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
36651index ad86660..9fd0884 100644
36652--- a/drivers/net/team/team.c
36653+++ b/drivers/net/team/team.c
36654@@ -2601,7 +2601,7 @@ static int team_device_event(struct notifier_block *unused,
36655 return NOTIFY_DONE;
36656 }
36657
36658-static struct notifier_block team_notifier_block __read_mostly = {
36659+static struct notifier_block team_notifier_block = {
36660 .notifier_call = team_device_event,
36661 };
36662
36663diff --git a/drivers/net/tun.c b/drivers/net/tun.c
36664index 0873cdc..ddb178e 100644
36665--- a/drivers/net/tun.c
36666+++ b/drivers/net/tun.c
36667@@ -1374,7 +1374,7 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
36668 }
36669
36670 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
36671- unsigned long arg, int ifreq_len)
36672+ unsigned long arg, size_t ifreq_len)
36673 {
36674 struct tun_file *tfile = file->private_data;
36675 struct tun_struct *tun;
36676@@ -1387,6 +1387,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
36677 int vnet_hdr_sz;
36678 int ret;
36679
36680+ if (ifreq_len > sizeof ifr)
36681+ return -EFAULT;
36682+
36683 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
36684 if (copy_from_user(&ifr, argp, ifreq_len))
36685 return -EFAULT;
36686diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
36687index 605a4ba..a883dd1 100644
36688--- a/drivers/net/usb/hso.c
36689+++ b/drivers/net/usb/hso.c
36690@@ -71,7 +71,7 @@
36691 #include <asm/byteorder.h>
36692 #include <linux/serial_core.h>
36693 #include <linux/serial.h>
36694-
36695+#include <asm/local.h>
36696
36697 #define MOD_AUTHOR "Option Wireless"
36698 #define MOD_DESCRIPTION "USB High Speed Option driver"
36699@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
36700 struct urb *urb;
36701
36702 urb = serial->rx_urb[0];
36703- if (serial->port.count > 0) {
36704+ if (atomic_read(&serial->port.count) > 0) {
36705 count = put_rxbuf_data(urb, serial);
36706 if (count == -1)
36707 return;
36708@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
36709 DUMP1(urb->transfer_buffer, urb->actual_length);
36710
36711 /* Anyone listening? */
36712- if (serial->port.count == 0)
36713+ if (atomic_read(&serial->port.count) == 0)
36714 return;
36715
36716 if (status == 0) {
36717@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
36718 tty_port_tty_set(&serial->port, tty);
36719
36720 /* check for port already opened, if not set the termios */
36721- serial->port.count++;
36722- if (serial->port.count == 1) {
36723+ if (atomic_inc_return(&serial->port.count) == 1) {
36724 serial->rx_state = RX_IDLE;
36725 /* Force default termio settings */
36726 _hso_serial_set_termios(tty, NULL);
36727@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
36728 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
36729 if (result) {
36730 hso_stop_serial_device(serial->parent);
36731- serial->port.count--;
36732+ atomic_dec(&serial->port.count);
36733 kref_put(&serial->parent->ref, hso_serial_ref_free);
36734 }
36735 } else {
36736@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
36737
36738 /* reset the rts and dtr */
36739 /* do the actual close */
36740- serial->port.count--;
36741+ atomic_dec(&serial->port.count);
36742
36743- if (serial->port.count <= 0) {
36744- serial->port.count = 0;
36745+ if (atomic_read(&serial->port.count) <= 0) {
36746+ atomic_set(&serial->port.count, 0);
36747 tty_port_tty_set(&serial->port, NULL);
36748 if (!usb_gone)
36749 hso_stop_serial_device(serial->parent);
36750@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
36751
36752 /* the actual setup */
36753 spin_lock_irqsave(&serial->serial_lock, flags);
36754- if (serial->port.count)
36755+ if (atomic_read(&serial->port.count))
36756 _hso_serial_set_termios(tty, old);
36757 else
36758 tty->termios = *old;
36759@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
36760 D1("Pending read interrupt on port %d\n", i);
36761 spin_lock(&serial->serial_lock);
36762 if (serial->rx_state == RX_IDLE &&
36763- serial->port.count > 0) {
36764+ atomic_read(&serial->port.count) > 0) {
36765 /* Setup and send a ctrl req read on
36766 * port i */
36767 if (!serial->rx_urb_filled[0]) {
36768@@ -3078,7 +3077,7 @@ static int hso_resume(struct usb_interface *iface)
36769 /* Start all serial ports */
36770 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
36771 if (serial_table[i] && (serial_table[i]->interface == iface)) {
36772- if (dev2ser(serial_table[i])->port.count) {
36773+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
36774 result =
36775 hso_start_serial_device(serial_table[i], GFP_NOIO);
36776 hso_kick_transmit(dev2ser(serial_table[i]));
36777diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36778index 8d78253..bebbb68 100644
36779--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36780+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36781@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36782 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
36783 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
36784
36785- ACCESS_ONCE(ads->ds_link) = i->link;
36786- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
36787+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
36788+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
36789
36790 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
36791 ctl6 = SM(i->keytype, AR_EncrType);
36792@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36793
36794 if ((i->is_first || i->is_last) &&
36795 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
36796- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
36797+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
36798 | set11nTries(i->rates, 1)
36799 | set11nTries(i->rates, 2)
36800 | set11nTries(i->rates, 3)
36801 | (i->dur_update ? AR_DurUpdateEna : 0)
36802 | SM(0, AR_BurstDur);
36803
36804- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
36805+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
36806 | set11nRate(i->rates, 1)
36807 | set11nRate(i->rates, 2)
36808 | set11nRate(i->rates, 3);
36809 } else {
36810- ACCESS_ONCE(ads->ds_ctl2) = 0;
36811- ACCESS_ONCE(ads->ds_ctl3) = 0;
36812+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
36813+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
36814 }
36815
36816 if (!i->is_first) {
36817- ACCESS_ONCE(ads->ds_ctl0) = 0;
36818- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36819- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36820+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
36821+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36822+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
36823 return;
36824 }
36825
36826@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36827 break;
36828 }
36829
36830- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36831+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36832 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36833 | SM(i->txpower, AR_XmitPower)
36834 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36835@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36836 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
36837 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
36838
36839- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36840- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36841+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36842+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
36843
36844 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
36845 return;
36846
36847- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36848+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36849 | set11nPktDurRTSCTS(i->rates, 1);
36850
36851- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36852+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36853 | set11nPktDurRTSCTS(i->rates, 3);
36854
36855- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36856+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36857 | set11nRateFlags(i->rates, 1)
36858 | set11nRateFlags(i->rates, 2)
36859 | set11nRateFlags(i->rates, 3)
36860diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36861index 301bf72..3f5654f 100644
36862--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36863+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36864@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36865 (i->qcu << AR_TxQcuNum_S) | desc_len;
36866
36867 checksum += val;
36868- ACCESS_ONCE(ads->info) = val;
36869+ ACCESS_ONCE_RW(ads->info) = val;
36870
36871 checksum += i->link;
36872- ACCESS_ONCE(ads->link) = i->link;
36873+ ACCESS_ONCE_RW(ads->link) = i->link;
36874
36875 checksum += i->buf_addr[0];
36876- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
36877+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
36878 checksum += i->buf_addr[1];
36879- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
36880+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
36881 checksum += i->buf_addr[2];
36882- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
36883+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
36884 checksum += i->buf_addr[3];
36885- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
36886+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
36887
36888 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
36889- ACCESS_ONCE(ads->ctl3) = val;
36890+ ACCESS_ONCE_RW(ads->ctl3) = val;
36891 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
36892- ACCESS_ONCE(ads->ctl5) = val;
36893+ ACCESS_ONCE_RW(ads->ctl5) = val;
36894 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
36895- ACCESS_ONCE(ads->ctl7) = val;
36896+ ACCESS_ONCE_RW(ads->ctl7) = val;
36897 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
36898- ACCESS_ONCE(ads->ctl9) = val;
36899+ ACCESS_ONCE_RW(ads->ctl9) = val;
36900
36901 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
36902- ACCESS_ONCE(ads->ctl10) = checksum;
36903+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
36904
36905 if (i->is_first || i->is_last) {
36906- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
36907+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
36908 | set11nTries(i->rates, 1)
36909 | set11nTries(i->rates, 2)
36910 | set11nTries(i->rates, 3)
36911 | (i->dur_update ? AR_DurUpdateEna : 0)
36912 | SM(0, AR_BurstDur);
36913
36914- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
36915+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
36916 | set11nRate(i->rates, 1)
36917 | set11nRate(i->rates, 2)
36918 | set11nRate(i->rates, 3);
36919 } else {
36920- ACCESS_ONCE(ads->ctl13) = 0;
36921- ACCESS_ONCE(ads->ctl14) = 0;
36922+ ACCESS_ONCE_RW(ads->ctl13) = 0;
36923+ ACCESS_ONCE_RW(ads->ctl14) = 0;
36924 }
36925
36926 ads->ctl20 = 0;
36927@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36928
36929 ctl17 = SM(i->keytype, AR_EncrType);
36930 if (!i->is_first) {
36931- ACCESS_ONCE(ads->ctl11) = 0;
36932- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36933- ACCESS_ONCE(ads->ctl15) = 0;
36934- ACCESS_ONCE(ads->ctl16) = 0;
36935- ACCESS_ONCE(ads->ctl17) = ctl17;
36936- ACCESS_ONCE(ads->ctl18) = 0;
36937- ACCESS_ONCE(ads->ctl19) = 0;
36938+ ACCESS_ONCE_RW(ads->ctl11) = 0;
36939+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36940+ ACCESS_ONCE_RW(ads->ctl15) = 0;
36941+ ACCESS_ONCE_RW(ads->ctl16) = 0;
36942+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
36943+ ACCESS_ONCE_RW(ads->ctl18) = 0;
36944+ ACCESS_ONCE_RW(ads->ctl19) = 0;
36945 return;
36946 }
36947
36948- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36949+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36950 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36951 | SM(i->txpower, AR_XmitPower)
36952 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36953@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36954 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
36955 ctl12 |= SM(val, AR_PAPRDChainMask);
36956
36957- ACCESS_ONCE(ads->ctl12) = ctl12;
36958- ACCESS_ONCE(ads->ctl17) = ctl17;
36959+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
36960+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
36961
36962- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36963+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36964 | set11nPktDurRTSCTS(i->rates, 1);
36965
36966- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36967+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36968 | set11nPktDurRTSCTS(i->rates, 3);
36969
36970- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
36971+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
36972 | set11nRateFlags(i->rates, 1)
36973 | set11nRateFlags(i->rates, 2)
36974 | set11nRateFlags(i->rates, 3)
36975 | SM(i->rtscts_rate, AR_RTSCTSRate);
36976
36977- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
36978+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
36979 }
36980
36981 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
36982diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
36983index dbc1b7a..67e2ca2 100644
36984--- a/drivers/net/wireless/ath/ath9k/hw.h
36985+++ b/drivers/net/wireless/ath/ath9k/hw.h
36986@@ -657,7 +657,7 @@ struct ath_hw_private_ops {
36987
36988 /* ANI */
36989 void (*ani_cache_ini_regs)(struct ath_hw *ah);
36990-};
36991+} __no_const;
36992
36993 /**
36994 * struct ath_hw_ops - callbacks used by hardware code and driver code
36995@@ -687,7 +687,7 @@ struct ath_hw_ops {
36996 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
36997 struct ath_hw_antcomb_conf *antconf);
36998 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
36999-};
37000+} __no_const;
37001
37002 struct ath_nf_limits {
37003 s16 max;
37004diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
37005index 71ced17..cd82b12 100644
37006--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
37007+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
37008@@ -184,7 +184,7 @@ struct brcmf_cfg80211_event_loop {
37009 struct net_device *ndev,
37010 const struct brcmf_event_msg *e,
37011 void *data);
37012-};
37013+} __no_const;
37014
37015 /* basic structure of scan request */
37016 struct brcmf_cfg80211_scan_req {
37017@@ -239,7 +239,7 @@ struct brcmf_cfg80211_profile {
37018 struct brcmf_cfg80211_iscan_eloop {
37019 s32 (*handler[WL_SCAN_ERSULTS_LAST])
37020 (struct brcmf_cfg80211_info *cfg);
37021-};
37022+} __no_const;
37023
37024 /* dongle iscan controller */
37025 struct brcmf_cfg80211_iscan_ctrl {
37026diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
37027index e252acb..6ad1e65 100644
37028--- a/drivers/net/wireless/iwlegacy/3945-mac.c
37029+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
37030@@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
37031 */
37032 if (il3945_mod_params.disable_hw_scan) {
37033 D_INFO("Disabling hw_scan\n");
37034- il3945_mac_ops.hw_scan = NULL;
37035+ pax_open_kernel();
37036+ *(void **)&il3945_mac_ops.hw_scan = NULL;
37037+ pax_close_kernel();
37038 }
37039
37040 D_INFO("*** LOAD DRIVER ***\n");
37041diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
37042index 1a98fa3..51e6661 100644
37043--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
37044+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
37045@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
37046 {
37047 struct iwl_priv *priv = file->private_data;
37048 char buf[64];
37049- int buf_size;
37050+ size_t buf_size;
37051 u32 offset, len;
37052
37053 memset(buf, 0, sizeof(buf));
37054@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
37055 struct iwl_priv *priv = file->private_data;
37056
37057 char buf[8];
37058- int buf_size;
37059+ size_t buf_size;
37060 u32 reset_flag;
37061
37062 memset(buf, 0, sizeof(buf));
37063@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
37064 {
37065 struct iwl_priv *priv = file->private_data;
37066 char buf[8];
37067- int buf_size;
37068+ size_t buf_size;
37069 int ht40;
37070
37071 memset(buf, 0, sizeof(buf));
37072@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
37073 {
37074 struct iwl_priv *priv = file->private_data;
37075 char buf[8];
37076- int buf_size;
37077+ size_t buf_size;
37078 int value;
37079
37080 memset(buf, 0, sizeof(buf));
37081@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
37082 {
37083 struct iwl_priv *priv = file->private_data;
37084 char buf[8];
37085- int buf_size;
37086+ size_t buf_size;
37087 int clear;
37088
37089 memset(buf, 0, sizeof(buf));
37090@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
37091 {
37092 struct iwl_priv *priv = file->private_data;
37093 char buf[8];
37094- int buf_size;
37095+ size_t buf_size;
37096 int trace;
37097
37098 memset(buf, 0, sizeof(buf));
37099@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
37100 {
37101 struct iwl_priv *priv = file->private_data;
37102 char buf[8];
37103- int buf_size;
37104+ size_t buf_size;
37105 int missed;
37106
37107 memset(buf, 0, sizeof(buf));
37108@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
37109
37110 struct iwl_priv *priv = file->private_data;
37111 char buf[8];
37112- int buf_size;
37113+ size_t buf_size;
37114 int plcp;
37115
37116 memset(buf, 0, sizeof(buf));
37117@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
37118
37119 struct iwl_priv *priv = file->private_data;
37120 char buf[8];
37121- int buf_size;
37122+ size_t buf_size;
37123 int flush;
37124
37125 memset(buf, 0, sizeof(buf));
37126@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
37127
37128 struct iwl_priv *priv = file->private_data;
37129 char buf[8];
37130- int buf_size;
37131+ size_t buf_size;
37132 int rts;
37133
37134 if (!priv->cfg->ht_params)
37135@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
37136 {
37137 struct iwl_priv *priv = file->private_data;
37138 char buf[8];
37139- int buf_size;
37140+ size_t buf_size;
37141
37142 memset(buf, 0, sizeof(buf));
37143 buf_size = min(count, sizeof(buf) - 1);
37144@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
37145 struct iwl_priv *priv = file->private_data;
37146 u32 event_log_flag;
37147 char buf[8];
37148- int buf_size;
37149+ size_t buf_size;
37150
37151 /* check that the interface is up */
37152 if (!iwl_is_ready(priv))
37153@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
37154 struct iwl_priv *priv = file->private_data;
37155 char buf[8];
37156 u32 calib_disabled;
37157- int buf_size;
37158+ size_t buf_size;
37159
37160 memset(buf, 0, sizeof(buf));
37161 buf_size = min(count, sizeof(buf) - 1);
37162diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
37163index fe0fffd..b4c5724 100644
37164--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
37165+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
37166@@ -1967,7 +1967,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
37167 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
37168
37169 char buf[8];
37170- int buf_size;
37171+ size_t buf_size;
37172 u32 reset_flag;
37173
37174 memset(buf, 0, sizeof(buf));
37175@@ -1988,7 +1988,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
37176 {
37177 struct iwl_trans *trans = file->private_data;
37178 char buf[8];
37179- int buf_size;
37180+ size_t buf_size;
37181 int csr;
37182
37183 memset(buf, 0, sizeof(buf));
37184diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
37185index 429ca32..f86236b 100644
37186--- a/drivers/net/wireless/mac80211_hwsim.c
37187+++ b/drivers/net/wireless/mac80211_hwsim.c
37188@@ -1751,9 +1751,11 @@ static int __init init_mac80211_hwsim(void)
37189 return -EINVAL;
37190
37191 if (fake_hw_scan) {
37192- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
37193- mac80211_hwsim_ops.sw_scan_start = NULL;
37194- mac80211_hwsim_ops.sw_scan_complete = NULL;
37195+ pax_open_kernel();
37196+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
37197+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
37198+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
37199+ pax_close_kernel();
37200 }
37201
37202 spin_lock_init(&hwsim_radio_lock);
37203diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
37204index bd1f0cb..db85ab0 100644
37205--- a/drivers/net/wireless/rndis_wlan.c
37206+++ b/drivers/net/wireless/rndis_wlan.c
37207@@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
37208
37209 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
37210
37211- if (rts_threshold < 0 || rts_threshold > 2347)
37212+ if (rts_threshold > 2347)
37213 rts_threshold = 2347;
37214
37215 tmp = cpu_to_le32(rts_threshold);
37216diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
37217index 0751b35..246ba3e 100644
37218--- a/drivers/net/wireless/rt2x00/rt2x00.h
37219+++ b/drivers/net/wireless/rt2x00/rt2x00.h
37220@@ -398,7 +398,7 @@ struct rt2x00_intf {
37221 * for hardware which doesn't support hardware
37222 * sequence counting.
37223 */
37224- atomic_t seqno;
37225+ atomic_unchecked_t seqno;
37226 };
37227
37228 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
37229diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
37230index e488b94..14b6a0c 100644
37231--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
37232+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
37233@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
37234 * sequence counter given by mac80211.
37235 */
37236 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
37237- seqno = atomic_add_return(0x10, &intf->seqno);
37238+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
37239 else
37240- seqno = atomic_read(&intf->seqno);
37241+ seqno = atomic_read_unchecked(&intf->seqno);
37242
37243 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
37244 hdr->seq_ctrl |= cpu_to_le16(seqno);
37245diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
37246index e2750a1..797e179 100644
37247--- a/drivers/net/wireless/ti/wl1251/sdio.c
37248+++ b/drivers/net/wireless/ti/wl1251/sdio.c
37249@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
37250
37251 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
37252
37253- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
37254- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
37255+ pax_open_kernel();
37256+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
37257+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
37258+ pax_close_kernel();
37259
37260 wl1251_info("using dedicated interrupt line");
37261 } else {
37262- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
37263- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
37264+ pax_open_kernel();
37265+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
37266+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
37267+ pax_close_kernel();
37268
37269 wl1251_info("using SDIO interrupt");
37270 }
37271diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
37272index dadf1db..d9db7a7 100644
37273--- a/drivers/net/wireless/ti/wl12xx/main.c
37274+++ b/drivers/net/wireless/ti/wl12xx/main.c
37275@@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
37276 sizeof(wl->conf.mem));
37277
37278 /* read data preparation is only needed by wl127x */
37279- wl->ops->prepare_read = wl127x_prepare_read;
37280+ pax_open_kernel();
37281+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
37282+ pax_close_kernel();
37283
37284 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
37285 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
37286@@ -665,7 +667,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
37287 sizeof(wl->conf.mem));
37288
37289 /* read data preparation is only needed by wl127x */
37290- wl->ops->prepare_read = wl127x_prepare_read;
37291+ pax_open_kernel();
37292+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
37293+ pax_close_kernel();
37294
37295 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
37296 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
37297diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
37298index a39682a..1e8220c 100644
37299--- a/drivers/net/wireless/ti/wl18xx/main.c
37300+++ b/drivers/net/wireless/ti/wl18xx/main.c
37301@@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
37302 }
37303
37304 if (!checksum_param) {
37305- wl18xx_ops.set_rx_csum = NULL;
37306- wl18xx_ops.init_vif = NULL;
37307+ pax_open_kernel();
37308+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
37309+ *(void **)&wl18xx_ops.init_vif = NULL;
37310+ pax_close_kernel();
37311 }
37312
37313 /* Enable 11a Band only if we have 5G antennas */
37314diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
37315index d93b2b6..ae50401 100644
37316--- a/drivers/oprofile/buffer_sync.c
37317+++ b/drivers/oprofile/buffer_sync.c
37318@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
37319 if (cookie == NO_COOKIE)
37320 offset = pc;
37321 if (cookie == INVALID_COOKIE) {
37322- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
37323+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
37324 offset = pc;
37325 }
37326 if (cookie != last_cookie) {
37327@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
37328 /* add userspace sample */
37329
37330 if (!mm) {
37331- atomic_inc(&oprofile_stats.sample_lost_no_mm);
37332+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
37333 return 0;
37334 }
37335
37336 cookie = lookup_dcookie(mm, s->eip, &offset);
37337
37338 if (cookie == INVALID_COOKIE) {
37339- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
37340+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
37341 return 0;
37342 }
37343
37344@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
37345 /* ignore backtraces if failed to add a sample */
37346 if (state == sb_bt_start) {
37347 state = sb_bt_ignore;
37348- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
37349+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
37350 }
37351 }
37352 release_mm(mm);
37353diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
37354index c0cc4e7..44d4e54 100644
37355--- a/drivers/oprofile/event_buffer.c
37356+++ b/drivers/oprofile/event_buffer.c
37357@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
37358 }
37359
37360 if (buffer_pos == buffer_size) {
37361- atomic_inc(&oprofile_stats.event_lost_overflow);
37362+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
37363 return;
37364 }
37365
37366diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
37367index ed2c3ec..deda85a 100644
37368--- a/drivers/oprofile/oprof.c
37369+++ b/drivers/oprofile/oprof.c
37370@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
37371 if (oprofile_ops.switch_events())
37372 return;
37373
37374- atomic_inc(&oprofile_stats.multiplex_counter);
37375+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
37376 start_switch_worker();
37377 }
37378
37379diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
37380index 917d28e..d62d981 100644
37381--- a/drivers/oprofile/oprofile_stats.c
37382+++ b/drivers/oprofile/oprofile_stats.c
37383@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
37384 cpu_buf->sample_invalid_eip = 0;
37385 }
37386
37387- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
37388- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
37389- atomic_set(&oprofile_stats.event_lost_overflow, 0);
37390- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
37391- atomic_set(&oprofile_stats.multiplex_counter, 0);
37392+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
37393+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
37394+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
37395+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
37396+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
37397 }
37398
37399
37400diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
37401index 38b6fc0..b5cbfce 100644
37402--- a/drivers/oprofile/oprofile_stats.h
37403+++ b/drivers/oprofile/oprofile_stats.h
37404@@ -13,11 +13,11 @@
37405 #include <linux/atomic.h>
37406
37407 struct oprofile_stat_struct {
37408- atomic_t sample_lost_no_mm;
37409- atomic_t sample_lost_no_mapping;
37410- atomic_t bt_lost_no_mapping;
37411- atomic_t event_lost_overflow;
37412- atomic_t multiplex_counter;
37413+ atomic_unchecked_t sample_lost_no_mm;
37414+ atomic_unchecked_t sample_lost_no_mapping;
37415+ atomic_unchecked_t bt_lost_no_mapping;
37416+ atomic_unchecked_t event_lost_overflow;
37417+ atomic_unchecked_t multiplex_counter;
37418 };
37419
37420 extern struct oprofile_stat_struct oprofile_stats;
37421diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
37422index 849357c..b83c1e0 100644
37423--- a/drivers/oprofile/oprofilefs.c
37424+++ b/drivers/oprofile/oprofilefs.c
37425@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
37426
37427
37428 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
37429- char const *name, atomic_t *val)
37430+ char const *name, atomic_unchecked_t *val)
37431 {
37432 return __oprofilefs_create_file(sb, root, name,
37433 &atomic_ro_fops, 0444, val);
37434diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
37435index 3f56bc0..707d642 100644
37436--- a/drivers/parport/procfs.c
37437+++ b/drivers/parport/procfs.c
37438@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
37439
37440 *ppos += len;
37441
37442- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
37443+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
37444 }
37445
37446 #ifdef CONFIG_PARPORT_1284
37447@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
37448
37449 *ppos += len;
37450
37451- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
37452+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
37453 }
37454 #endif /* IEEE1284.3 support. */
37455
37456diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
37457index a6a71c4..c91097b 100644
37458--- a/drivers/pci/hotplug/cpcihp_generic.c
37459+++ b/drivers/pci/hotplug/cpcihp_generic.c
37460@@ -73,7 +73,6 @@ static u16 port;
37461 static unsigned int enum_bit;
37462 static u8 enum_mask;
37463
37464-static struct cpci_hp_controller_ops generic_hpc_ops;
37465 static struct cpci_hp_controller generic_hpc;
37466
37467 static int __init validate_parameters(void)
37468@@ -139,6 +138,10 @@ static int query_enum(void)
37469 return ((value & enum_mask) == enum_mask);
37470 }
37471
37472+static struct cpci_hp_controller_ops generic_hpc_ops = {
37473+ .query_enum = query_enum,
37474+};
37475+
37476 static int __init cpcihp_generic_init(void)
37477 {
37478 int status;
37479@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
37480 pci_dev_put(dev);
37481
37482 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
37483- generic_hpc_ops.query_enum = query_enum;
37484 generic_hpc.ops = &generic_hpc_ops;
37485
37486 status = cpci_hp_register_controller(&generic_hpc);
37487diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
37488index 6bf8d2a..9711ce0 100644
37489--- a/drivers/pci/hotplug/cpcihp_zt5550.c
37490+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
37491@@ -59,7 +59,6 @@
37492 /* local variables */
37493 static bool debug;
37494 static bool poll;
37495-static struct cpci_hp_controller_ops zt5550_hpc_ops;
37496 static struct cpci_hp_controller zt5550_hpc;
37497
37498 /* Primary cPCI bus bridge device */
37499@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
37500 return 0;
37501 }
37502
37503+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
37504+ .query_enum = zt5550_hc_query_enum,
37505+};
37506+
37507 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
37508 {
37509 int status;
37510@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
37511 dbg("returned from zt5550_hc_config");
37512
37513 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
37514- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
37515 zt5550_hpc.ops = &zt5550_hpc_ops;
37516 if(!poll) {
37517 zt5550_hpc.irq = hc_dev->irq;
37518 zt5550_hpc.irq_flags = IRQF_SHARED;
37519 zt5550_hpc.dev_id = hc_dev;
37520
37521- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
37522- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
37523- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
37524+ pax_open_kernel();
37525+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
37526+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
37527+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
37528+ pax_open_kernel();
37529 } else {
37530 info("using ENUM# polling mode");
37531 }
37532diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
37533index 76ba8a1..20ca857 100644
37534--- a/drivers/pci/hotplug/cpqphp_nvram.c
37535+++ b/drivers/pci/hotplug/cpqphp_nvram.c
37536@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
37537
37538 void compaq_nvram_init (void __iomem *rom_start)
37539 {
37540+
37541+#ifndef CONFIG_PAX_KERNEXEC
37542 if (rom_start) {
37543 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
37544 }
37545+#endif
37546+
37547 dbg("int15 entry = %p\n", compaq_int15_entry_point);
37548
37549 /* initialize our int15 lock */
37550diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
37551index 213753b..b4abaac 100644
37552--- a/drivers/pci/pcie/aspm.c
37553+++ b/drivers/pci/pcie/aspm.c
37554@@ -27,9 +27,9 @@
37555 #define MODULE_PARAM_PREFIX "pcie_aspm."
37556
37557 /* Note: those are not register definitions */
37558-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
37559-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
37560-#define ASPM_STATE_L1 (4) /* L1 state */
37561+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
37562+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
37563+#define ASPM_STATE_L1 (4U) /* L1 state */
37564 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
37565 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
37566
37567diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
37568index ec909af..e7517f3 100644
37569--- a/drivers/pci/probe.c
37570+++ b/drivers/pci/probe.c
37571@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
37572 struct pci_bus_region region;
37573 bool bar_too_big = false, bar_disabled = false;
37574
37575- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
37576+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
37577
37578 /* No printks while decoding is disabled! */
37579 if (!dev->mmio_always_on) {
37580diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
37581index 9b8505c..f00870a 100644
37582--- a/drivers/pci/proc.c
37583+++ b/drivers/pci/proc.c
37584@@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
37585 static int __init pci_proc_init(void)
37586 {
37587 struct pci_dev *dev = NULL;
37588+
37589+#ifdef CONFIG_GRKERNSEC_PROC_ADD
37590+#ifdef CONFIG_GRKERNSEC_PROC_USER
37591+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
37592+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
37593+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
37594+#endif
37595+#else
37596 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
37597+#endif
37598 proc_create("devices", 0, proc_bus_pci_dir,
37599 &proc_bus_pci_dev_operations);
37600 proc_initialized = 1;
37601diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
37602index 75dd651..2af4c9a 100644
37603--- a/drivers/platform/x86/thinkpad_acpi.c
37604+++ b/drivers/platform/x86/thinkpad_acpi.c
37605@@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
37606 return 0;
37607 }
37608
37609-void static hotkey_mask_warn_incomplete_mask(void)
37610+static void hotkey_mask_warn_incomplete_mask(void)
37611 {
37612 /* log only what the user can fix... */
37613 const u32 wantedmask = hotkey_driver_mask &
37614@@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
37615 }
37616 }
37617
37618-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37619- struct tp_nvram_state *newn,
37620- const u32 event_mask)
37621-{
37622-
37623 #define TPACPI_COMPARE_KEY(__scancode, __member) \
37624 do { \
37625 if ((event_mask & (1 << __scancode)) && \
37626@@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37627 tpacpi_hotkey_send_key(__scancode); \
37628 } while (0)
37629
37630- void issue_volchange(const unsigned int oldvol,
37631- const unsigned int newvol)
37632- {
37633- unsigned int i = oldvol;
37634+static void issue_volchange(const unsigned int oldvol,
37635+ const unsigned int newvol,
37636+ const u32 event_mask)
37637+{
37638+ unsigned int i = oldvol;
37639
37640- while (i > newvol) {
37641- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
37642- i--;
37643- }
37644- while (i < newvol) {
37645- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37646- i++;
37647- }
37648+ while (i > newvol) {
37649+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
37650+ i--;
37651 }
37652+ while (i < newvol) {
37653+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37654+ i++;
37655+ }
37656+}
37657
37658- void issue_brightnesschange(const unsigned int oldbrt,
37659- const unsigned int newbrt)
37660- {
37661- unsigned int i = oldbrt;
37662+static void issue_brightnesschange(const unsigned int oldbrt,
37663+ const unsigned int newbrt,
37664+ const u32 event_mask)
37665+{
37666+ unsigned int i = oldbrt;
37667
37668- while (i > newbrt) {
37669- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
37670- i--;
37671- }
37672- while (i < newbrt) {
37673- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37674- i++;
37675- }
37676+ while (i > newbrt) {
37677+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
37678+ i--;
37679+ }
37680+ while (i < newbrt) {
37681+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37682+ i++;
37683 }
37684+}
37685
37686+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37687+ struct tp_nvram_state *newn,
37688+ const u32 event_mask)
37689+{
37690 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
37691 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
37692 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
37693@@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37694 oldn->volume_level != newn->volume_level) {
37695 /* recently muted, or repeated mute keypress, or
37696 * multiple presses ending in mute */
37697- issue_volchange(oldn->volume_level, newn->volume_level);
37698+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
37699 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
37700 }
37701 } else {
37702@@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37703 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37704 }
37705 if (oldn->volume_level != newn->volume_level) {
37706- issue_volchange(oldn->volume_level, newn->volume_level);
37707+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
37708 } else if (oldn->volume_toggle != newn->volume_toggle) {
37709 /* repeated vol up/down keypress at end of scale ? */
37710 if (newn->volume_level == 0)
37711@@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37712 /* handle brightness */
37713 if (oldn->brightness_level != newn->brightness_level) {
37714 issue_brightnesschange(oldn->brightness_level,
37715- newn->brightness_level);
37716+ newn->brightness_level,
37717+ event_mask);
37718 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
37719 /* repeated key presses that didn't change state */
37720 if (newn->brightness_level == 0)
37721@@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37722 && !tp_features.bright_unkfw)
37723 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37724 }
37725+}
37726
37727 #undef TPACPI_COMPARE_KEY
37728 #undef TPACPI_MAY_SEND_KEY
37729-}
37730
37731 /*
37732 * Polling driver
37733diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
37734index 769d265..a3a05ca 100644
37735--- a/drivers/pnp/pnpbios/bioscalls.c
37736+++ b/drivers/pnp/pnpbios/bioscalls.c
37737@@ -58,7 +58,7 @@ do { \
37738 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
37739 } while(0)
37740
37741-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
37742+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
37743 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
37744
37745 /*
37746@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
37747
37748 cpu = get_cpu();
37749 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
37750+
37751+ pax_open_kernel();
37752 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
37753+ pax_close_kernel();
37754
37755 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
37756 spin_lock_irqsave(&pnp_bios_lock, flags);
37757@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
37758 :"memory");
37759 spin_unlock_irqrestore(&pnp_bios_lock, flags);
37760
37761+ pax_open_kernel();
37762 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
37763+ pax_close_kernel();
37764+
37765 put_cpu();
37766
37767 /* If we get here and this is set then the PnP BIOS faulted on us. */
37768@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
37769 return status;
37770 }
37771
37772-void pnpbios_calls_init(union pnp_bios_install_struct *header)
37773+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
37774 {
37775 int i;
37776
37777@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
37778 pnp_bios_callpoint.offset = header->fields.pm16offset;
37779 pnp_bios_callpoint.segment = PNP_CS16;
37780
37781+ pax_open_kernel();
37782+
37783 for_each_possible_cpu(i) {
37784 struct desc_struct *gdt = get_cpu_gdt_table(i);
37785 if (!gdt)
37786@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
37787 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
37788 (unsigned long)__va(header->fields.pm16dseg));
37789 }
37790+
37791+ pax_close_kernel();
37792 }
37793diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
37794index b0ecacb..7c9da2e 100644
37795--- a/drivers/pnp/resource.c
37796+++ b/drivers/pnp/resource.c
37797@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
37798 return 1;
37799
37800 /* check if the resource is valid */
37801- if (*irq < 0 || *irq > 15)
37802+ if (*irq > 15)
37803 return 0;
37804
37805 /* check if the resource is reserved */
37806@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
37807 return 1;
37808
37809 /* check if the resource is valid */
37810- if (*dma < 0 || *dma == 4 || *dma > 7)
37811+ if (*dma == 4 || *dma > 7)
37812 return 0;
37813
37814 /* check if the resource is reserved */
37815diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
37816index 7df7c5f..bd48c47 100644
37817--- a/drivers/power/pda_power.c
37818+++ b/drivers/power/pda_power.c
37819@@ -37,7 +37,11 @@ static int polling;
37820
37821 #ifdef CONFIG_USB_OTG_UTILS
37822 static struct usb_phy *transceiver;
37823-static struct notifier_block otg_nb;
37824+static int otg_handle_notification(struct notifier_block *nb,
37825+ unsigned long event, void *unused);
37826+static struct notifier_block otg_nb = {
37827+ .notifier_call = otg_handle_notification
37828+};
37829 #endif
37830
37831 static struct regulator *ac_draw;
37832@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
37833
37834 #ifdef CONFIG_USB_OTG_UTILS
37835 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
37836- otg_nb.notifier_call = otg_handle_notification;
37837 ret = usb_register_notifier(transceiver, &otg_nb);
37838 if (ret) {
37839 dev_err(dev, "failure to register otg notifier\n");
37840diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
37841index 8d53174..04c65de 100644
37842--- a/drivers/regulator/max8660.c
37843+++ b/drivers/regulator/max8660.c
37844@@ -333,8 +333,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
37845 max8660->shadow_regs[MAX8660_OVER1] = 5;
37846 } else {
37847 /* Otherwise devices can be toggled via software */
37848- max8660_dcdc_ops.enable = max8660_dcdc_enable;
37849- max8660_dcdc_ops.disable = max8660_dcdc_disable;
37850+ pax_open_kernel();
37851+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
37852+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
37853+ pax_close_kernel();
37854 }
37855
37856 /*
37857diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
37858index 1fa6381..f58834e 100644
37859--- a/drivers/regulator/mc13892-regulator.c
37860+++ b/drivers/regulator/mc13892-regulator.c
37861@@ -540,10 +540,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
37862 }
37863 mc13xxx_unlock(mc13892);
37864
37865- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37866+ pax_open_kernel();
37867+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37868 = mc13892_vcam_set_mode;
37869- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
37870+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
37871 = mc13892_vcam_get_mode;
37872+ pax_close_kernel();
37873
37874 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
37875 ARRAY_SIZE(mc13892_regulators));
37876diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
37877index cace6d3..f623fda 100644
37878--- a/drivers/rtc/rtc-dev.c
37879+++ b/drivers/rtc/rtc-dev.c
37880@@ -14,6 +14,7 @@
37881 #include <linux/module.h>
37882 #include <linux/rtc.h>
37883 #include <linux/sched.h>
37884+#include <linux/grsecurity.h>
37885 #include "rtc-core.h"
37886
37887 static dev_t rtc_devt;
37888@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
37889 if (copy_from_user(&tm, uarg, sizeof(tm)))
37890 return -EFAULT;
37891
37892+ gr_log_timechange();
37893+
37894 return rtc_set_time(rtc, &tm);
37895
37896 case RTC_PIE_ON:
37897diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
37898index 4ad7e36..d004679 100644
37899--- a/drivers/scsi/bfa/bfa.h
37900+++ b/drivers/scsi/bfa/bfa.h
37901@@ -196,7 +196,7 @@ struct bfa_hwif_s {
37902 u32 *end);
37903 int cpe_vec_q0;
37904 int rme_vec_q0;
37905-};
37906+} __no_const;
37907 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
37908
37909 struct bfa_faa_cbfn_s {
37910diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
37911index e693af6..2e525b6 100644
37912--- a/drivers/scsi/bfa/bfa_fcpim.h
37913+++ b/drivers/scsi/bfa/bfa_fcpim.h
37914@@ -36,7 +36,7 @@ struct bfa_iotag_s {
37915
37916 struct bfa_itn_s {
37917 bfa_isr_func_t isr;
37918-};
37919+} __no_const;
37920
37921 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
37922 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
37923diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
37924index 23a90e7..9cf04ee 100644
37925--- a/drivers/scsi/bfa/bfa_ioc.h
37926+++ b/drivers/scsi/bfa/bfa_ioc.h
37927@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
37928 bfa_ioc_disable_cbfn_t disable_cbfn;
37929 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
37930 bfa_ioc_reset_cbfn_t reset_cbfn;
37931-};
37932+} __no_const;
37933
37934 /*
37935 * IOC event notification mechanism.
37936@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
37937 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
37938 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
37939 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
37940-};
37941+} __no_const;
37942
37943 /*
37944 * Queue element to wait for room in request queue. FIFO order is
37945diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
37946index 593085a..47aa999 100644
37947--- a/drivers/scsi/hosts.c
37948+++ b/drivers/scsi/hosts.c
37949@@ -42,7 +42,7 @@
37950 #include "scsi_logging.h"
37951
37952
37953-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
37954+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
37955
37956
37957 static void scsi_host_cls_release(struct device *dev)
37958@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
37959 * subtract one because we increment first then return, but we need to
37960 * know what the next host number was before increment
37961 */
37962- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
37963+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
37964 shost->dma_channel = 0xff;
37965
37966 /* These three are default values which can be overridden */
37967diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
37968index 4217e49..9c77e3e 100644
37969--- a/drivers/scsi/hpsa.c
37970+++ b/drivers/scsi/hpsa.c
37971@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
37972 unsigned long flags;
37973
37974 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37975- return h->access.command_completed(h, q);
37976+ return h->access->command_completed(h, q);
37977
37978 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
37979 a = rq->head[rq->current_entry];
37980@@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
37981 while (!list_empty(&h->reqQ)) {
37982 c = list_entry(h->reqQ.next, struct CommandList, list);
37983 /* can't do anything if fifo is full */
37984- if ((h->access.fifo_full(h))) {
37985+ if ((h->access->fifo_full(h))) {
37986 dev_warn(&h->pdev->dev, "fifo full\n");
37987 break;
37988 }
37989@@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
37990
37991 /* Tell the controller execute command */
37992 spin_unlock_irqrestore(&h->lock, flags);
37993- h->access.submit_command(h, c);
37994+ h->access->submit_command(h, c);
37995 spin_lock_irqsave(&h->lock, flags);
37996 }
37997 spin_unlock_irqrestore(&h->lock, flags);
37998@@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
37999
38000 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
38001 {
38002- return h->access.command_completed(h, q);
38003+ return h->access->command_completed(h, q);
38004 }
38005
38006 static inline bool interrupt_pending(struct ctlr_info *h)
38007 {
38008- return h->access.intr_pending(h);
38009+ return h->access->intr_pending(h);
38010 }
38011
38012 static inline long interrupt_not_for_us(struct ctlr_info *h)
38013 {
38014- return (h->access.intr_pending(h) == 0) ||
38015+ return (h->access->intr_pending(h) == 0) ||
38016 (h->interrupts_enabled == 0);
38017 }
38018
38019@@ -4318,7 +4318,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
38020 if (prod_index < 0)
38021 return -ENODEV;
38022 h->product_name = products[prod_index].product_name;
38023- h->access = *(products[prod_index].access);
38024+ h->access = products[prod_index].access;
38025
38026 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
38027 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
38028@@ -4600,7 +4600,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
38029
38030 assert_spin_locked(&lockup_detector_lock);
38031 remove_ctlr_from_lockup_detector_list(h);
38032- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38033+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38034 spin_lock_irqsave(&h->lock, flags);
38035 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
38036 spin_unlock_irqrestore(&h->lock, flags);
38037@@ -4778,7 +4778,7 @@ reinit_after_soft_reset:
38038 }
38039
38040 /* make sure the board interrupts are off */
38041- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38042+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38043
38044 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
38045 goto clean2;
38046@@ -4812,7 +4812,7 @@ reinit_after_soft_reset:
38047 * fake ones to scoop up any residual completions.
38048 */
38049 spin_lock_irqsave(&h->lock, flags);
38050- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38051+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38052 spin_unlock_irqrestore(&h->lock, flags);
38053 free_irqs(h);
38054 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
38055@@ -4831,9 +4831,9 @@ reinit_after_soft_reset:
38056 dev_info(&h->pdev->dev, "Board READY.\n");
38057 dev_info(&h->pdev->dev,
38058 "Waiting for stale completions to drain.\n");
38059- h->access.set_intr_mask(h, HPSA_INTR_ON);
38060+ h->access->set_intr_mask(h, HPSA_INTR_ON);
38061 msleep(10000);
38062- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38063+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38064
38065 rc = controller_reset_failed(h->cfgtable);
38066 if (rc)
38067@@ -4854,7 +4854,7 @@ reinit_after_soft_reset:
38068 }
38069
38070 /* Turn the interrupts on so we can service requests */
38071- h->access.set_intr_mask(h, HPSA_INTR_ON);
38072+ h->access->set_intr_mask(h, HPSA_INTR_ON);
38073
38074 hpsa_hba_inquiry(h);
38075 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
38076@@ -4906,7 +4906,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
38077 * To write all data in the battery backed cache to disks
38078 */
38079 hpsa_flush_cache(h);
38080- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38081+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38082 hpsa_free_irqs_and_disable_msix(h);
38083 }
38084
38085@@ -5075,7 +5075,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
38086 return;
38087 }
38088 /* Change the access methods to the performant access methods */
38089- h->access = SA5_performant_access;
38090+ h->access = &SA5_performant_access;
38091 h->transMethod = CFGTBL_Trans_Performant;
38092 }
38093
38094diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
38095index 9816479..c5d4e97 100644
38096--- a/drivers/scsi/hpsa.h
38097+++ b/drivers/scsi/hpsa.h
38098@@ -79,7 +79,7 @@ struct ctlr_info {
38099 unsigned int msix_vector;
38100 unsigned int msi_vector;
38101 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
38102- struct access_method access;
38103+ struct access_method *access;
38104
38105 /* queue and queue Info */
38106 struct list_head reqQ;
38107diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
38108index c772d8d..35c362c 100644
38109--- a/drivers/scsi/libfc/fc_exch.c
38110+++ b/drivers/scsi/libfc/fc_exch.c
38111@@ -100,12 +100,12 @@ struct fc_exch_mgr {
38112 u16 pool_max_index;
38113
38114 struct {
38115- atomic_t no_free_exch;
38116- atomic_t no_free_exch_xid;
38117- atomic_t xid_not_found;
38118- atomic_t xid_busy;
38119- atomic_t seq_not_found;
38120- atomic_t non_bls_resp;
38121+ atomic_unchecked_t no_free_exch;
38122+ atomic_unchecked_t no_free_exch_xid;
38123+ atomic_unchecked_t xid_not_found;
38124+ atomic_unchecked_t xid_busy;
38125+ atomic_unchecked_t seq_not_found;
38126+ atomic_unchecked_t non_bls_resp;
38127 } stats;
38128 };
38129
38130@@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
38131 /* allocate memory for exchange */
38132 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
38133 if (!ep) {
38134- atomic_inc(&mp->stats.no_free_exch);
38135+ atomic_inc_unchecked(&mp->stats.no_free_exch);
38136 goto out;
38137 }
38138 memset(ep, 0, sizeof(*ep));
38139@@ -786,7 +786,7 @@ out:
38140 return ep;
38141 err:
38142 spin_unlock_bh(&pool->lock);
38143- atomic_inc(&mp->stats.no_free_exch_xid);
38144+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
38145 mempool_free(ep, mp->ep_pool);
38146 return NULL;
38147 }
38148@@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38149 xid = ntohs(fh->fh_ox_id); /* we originated exch */
38150 ep = fc_exch_find(mp, xid);
38151 if (!ep) {
38152- atomic_inc(&mp->stats.xid_not_found);
38153+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38154 reject = FC_RJT_OX_ID;
38155 goto out;
38156 }
38157@@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38158 ep = fc_exch_find(mp, xid);
38159 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
38160 if (ep) {
38161- atomic_inc(&mp->stats.xid_busy);
38162+ atomic_inc_unchecked(&mp->stats.xid_busy);
38163 reject = FC_RJT_RX_ID;
38164 goto rel;
38165 }
38166@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38167 }
38168 xid = ep->xid; /* get our XID */
38169 } else if (!ep) {
38170- atomic_inc(&mp->stats.xid_not_found);
38171+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38172 reject = FC_RJT_RX_ID; /* XID not found */
38173 goto out;
38174 }
38175@@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38176 } else {
38177 sp = &ep->seq;
38178 if (sp->id != fh->fh_seq_id) {
38179- atomic_inc(&mp->stats.seq_not_found);
38180+ atomic_inc_unchecked(&mp->stats.seq_not_found);
38181 if (f_ctl & FC_FC_END_SEQ) {
38182 /*
38183 * Update sequence_id based on incoming last
38184@@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
38185
38186 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
38187 if (!ep) {
38188- atomic_inc(&mp->stats.xid_not_found);
38189+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38190 goto out;
38191 }
38192 if (ep->esb_stat & ESB_ST_COMPLETE) {
38193- atomic_inc(&mp->stats.xid_not_found);
38194+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38195 goto rel;
38196 }
38197 if (ep->rxid == FC_XID_UNKNOWN)
38198 ep->rxid = ntohs(fh->fh_rx_id);
38199 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
38200- atomic_inc(&mp->stats.xid_not_found);
38201+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38202 goto rel;
38203 }
38204 if (ep->did != ntoh24(fh->fh_s_id) &&
38205 ep->did != FC_FID_FLOGI) {
38206- atomic_inc(&mp->stats.xid_not_found);
38207+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38208 goto rel;
38209 }
38210 sof = fr_sof(fp);
38211@@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
38212 sp->ssb_stat |= SSB_ST_RESP;
38213 sp->id = fh->fh_seq_id;
38214 } else if (sp->id != fh->fh_seq_id) {
38215- atomic_inc(&mp->stats.seq_not_found);
38216+ atomic_inc_unchecked(&mp->stats.seq_not_found);
38217 goto rel;
38218 }
38219
38220@@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
38221 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
38222
38223 if (!sp)
38224- atomic_inc(&mp->stats.xid_not_found);
38225+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38226 else
38227- atomic_inc(&mp->stats.non_bls_resp);
38228+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
38229
38230 fc_frame_free(fp);
38231 }
38232@@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
38233
38234 list_for_each_entry(ema, &lport->ema_list, ema_list) {
38235 mp = ema->mp;
38236- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
38237+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
38238 st->fc_no_free_exch_xid +=
38239- atomic_read(&mp->stats.no_free_exch_xid);
38240- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
38241- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
38242- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
38243- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
38244+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
38245+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
38246+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
38247+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
38248+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
38249 }
38250 }
38251 EXPORT_SYMBOL(fc_exch_update_stats);
38252diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
38253index bdb81cd..d3c7c2c 100644
38254--- a/drivers/scsi/libsas/sas_ata.c
38255+++ b/drivers/scsi/libsas/sas_ata.c
38256@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
38257 .postreset = ata_std_postreset,
38258 .error_handler = ata_std_error_handler,
38259 .post_internal_cmd = sas_ata_post_internal,
38260- .qc_defer = ata_std_qc_defer,
38261+ .qc_defer = ata_std_qc_defer,
38262 .qc_prep = ata_noop_qc_prep,
38263 .qc_issue = sas_ata_qc_issue,
38264 .qc_fill_rtf = sas_ata_qc_fill_rtf,
38265diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
38266index 69b5993..1ac9dce 100644
38267--- a/drivers/scsi/lpfc/lpfc.h
38268+++ b/drivers/scsi/lpfc/lpfc.h
38269@@ -424,7 +424,7 @@ struct lpfc_vport {
38270 struct dentry *debug_nodelist;
38271 struct dentry *vport_debugfs_root;
38272 struct lpfc_debugfs_trc *disc_trc;
38273- atomic_t disc_trc_cnt;
38274+ atomic_unchecked_t disc_trc_cnt;
38275 #endif
38276 uint8_t stat_data_enabled;
38277 uint8_t stat_data_blocked;
38278@@ -840,8 +840,8 @@ struct lpfc_hba {
38279 struct timer_list fabric_block_timer;
38280 unsigned long bit_flags;
38281 #define FABRIC_COMANDS_BLOCKED 0
38282- atomic_t num_rsrc_err;
38283- atomic_t num_cmd_success;
38284+ atomic_unchecked_t num_rsrc_err;
38285+ atomic_unchecked_t num_cmd_success;
38286 unsigned long last_rsrc_error_time;
38287 unsigned long last_ramp_down_time;
38288 unsigned long last_ramp_up_time;
38289@@ -877,7 +877,7 @@ struct lpfc_hba {
38290
38291 struct dentry *debug_slow_ring_trc;
38292 struct lpfc_debugfs_trc *slow_ring_trc;
38293- atomic_t slow_ring_trc_cnt;
38294+ atomic_unchecked_t slow_ring_trc_cnt;
38295 /* iDiag debugfs sub-directory */
38296 struct dentry *idiag_root;
38297 struct dentry *idiag_pci_cfg;
38298diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
38299index f63f5ff..de29189 100644
38300--- a/drivers/scsi/lpfc/lpfc_debugfs.c
38301+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
38302@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
38303
38304 #include <linux/debugfs.h>
38305
38306-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
38307+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
38308 static unsigned long lpfc_debugfs_start_time = 0L;
38309
38310 /* iDiag */
38311@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
38312 lpfc_debugfs_enable = 0;
38313
38314 len = 0;
38315- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
38316+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
38317 (lpfc_debugfs_max_disc_trc - 1);
38318 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
38319 dtp = vport->disc_trc + i;
38320@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
38321 lpfc_debugfs_enable = 0;
38322
38323 len = 0;
38324- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
38325+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
38326 (lpfc_debugfs_max_slow_ring_trc - 1);
38327 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
38328 dtp = phba->slow_ring_trc + i;
38329@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
38330 !vport || !vport->disc_trc)
38331 return;
38332
38333- index = atomic_inc_return(&vport->disc_trc_cnt) &
38334+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
38335 (lpfc_debugfs_max_disc_trc - 1);
38336 dtp = vport->disc_trc + index;
38337 dtp->fmt = fmt;
38338 dtp->data1 = data1;
38339 dtp->data2 = data2;
38340 dtp->data3 = data3;
38341- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
38342+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
38343 dtp->jif = jiffies;
38344 #endif
38345 return;
38346@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
38347 !phba || !phba->slow_ring_trc)
38348 return;
38349
38350- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
38351+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
38352 (lpfc_debugfs_max_slow_ring_trc - 1);
38353 dtp = phba->slow_ring_trc + index;
38354 dtp->fmt = fmt;
38355 dtp->data1 = data1;
38356 dtp->data2 = data2;
38357 dtp->data3 = data3;
38358- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
38359+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
38360 dtp->jif = jiffies;
38361 #endif
38362 return;
38363@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
38364 "slow_ring buffer\n");
38365 goto debug_failed;
38366 }
38367- atomic_set(&phba->slow_ring_trc_cnt, 0);
38368+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
38369 memset(phba->slow_ring_trc, 0,
38370 (sizeof(struct lpfc_debugfs_trc) *
38371 lpfc_debugfs_max_slow_ring_trc));
38372@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
38373 "buffer\n");
38374 goto debug_failed;
38375 }
38376- atomic_set(&vport->disc_trc_cnt, 0);
38377+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
38378
38379 snprintf(name, sizeof(name), "discovery_trace");
38380 vport->debug_disc_trc =
38381diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
38382index 7dc4218..3436f08 100644
38383--- a/drivers/scsi/lpfc/lpfc_init.c
38384+++ b/drivers/scsi/lpfc/lpfc_init.c
38385@@ -10589,8 +10589,10 @@ lpfc_init(void)
38386 "misc_register returned with status %d", error);
38387
38388 if (lpfc_enable_npiv) {
38389- lpfc_transport_functions.vport_create = lpfc_vport_create;
38390- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
38391+ pax_open_kernel();
38392+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
38393+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
38394+ pax_close_kernel();
38395 }
38396 lpfc_transport_template =
38397 fc_attach_transport(&lpfc_transport_functions);
38398diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
38399index 7f45ac9..cf62eda 100644
38400--- a/drivers/scsi/lpfc/lpfc_scsi.c
38401+++ b/drivers/scsi/lpfc/lpfc_scsi.c
38402@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
38403 uint32_t evt_posted;
38404
38405 spin_lock_irqsave(&phba->hbalock, flags);
38406- atomic_inc(&phba->num_rsrc_err);
38407+ atomic_inc_unchecked(&phba->num_rsrc_err);
38408 phba->last_rsrc_error_time = jiffies;
38409
38410 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
38411@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
38412 unsigned long flags;
38413 struct lpfc_hba *phba = vport->phba;
38414 uint32_t evt_posted;
38415- atomic_inc(&phba->num_cmd_success);
38416+ atomic_inc_unchecked(&phba->num_cmd_success);
38417
38418 if (vport->cfg_lun_queue_depth <= queue_depth)
38419 return;
38420@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
38421 unsigned long num_rsrc_err, num_cmd_success;
38422 int i;
38423
38424- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
38425- num_cmd_success = atomic_read(&phba->num_cmd_success);
38426+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
38427+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
38428
38429 /*
38430 * The error and success command counters are global per
38431@@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
38432 }
38433 }
38434 lpfc_destroy_vport_work_array(phba, vports);
38435- atomic_set(&phba->num_rsrc_err, 0);
38436- atomic_set(&phba->num_cmd_success, 0);
38437+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
38438+ atomic_set_unchecked(&phba->num_cmd_success, 0);
38439 }
38440
38441 /**
38442@@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
38443 }
38444 }
38445 lpfc_destroy_vport_work_array(phba, vports);
38446- atomic_set(&phba->num_rsrc_err, 0);
38447- atomic_set(&phba->num_cmd_success, 0);
38448+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
38449+ atomic_set_unchecked(&phba->num_cmd_success, 0);
38450 }
38451
38452 /**
38453diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
38454index af763ea..41904f7 100644
38455--- a/drivers/scsi/pmcraid.c
38456+++ b/drivers/scsi/pmcraid.c
38457@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
38458 res->scsi_dev = scsi_dev;
38459 scsi_dev->hostdata = res;
38460 res->change_detected = 0;
38461- atomic_set(&res->read_failures, 0);
38462- atomic_set(&res->write_failures, 0);
38463+ atomic_set_unchecked(&res->read_failures, 0);
38464+ atomic_set_unchecked(&res->write_failures, 0);
38465 rc = 0;
38466 }
38467 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
38468@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
38469
38470 /* If this was a SCSI read/write command keep count of errors */
38471 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
38472- atomic_inc(&res->read_failures);
38473+ atomic_inc_unchecked(&res->read_failures);
38474 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
38475- atomic_inc(&res->write_failures);
38476+ atomic_inc_unchecked(&res->write_failures);
38477
38478 if (!RES_IS_GSCSI(res->cfg_entry) &&
38479 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
38480@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
38481 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
38482 * hrrq_id assigned here in queuecommand
38483 */
38484- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
38485+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
38486 pinstance->num_hrrq;
38487 cmd->cmd_done = pmcraid_io_done;
38488
38489@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
38490 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
38491 * hrrq_id assigned here in queuecommand
38492 */
38493- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
38494+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
38495 pinstance->num_hrrq;
38496
38497 if (request_size) {
38498@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
38499
38500 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
38501 /* add resources only after host is added into system */
38502- if (!atomic_read(&pinstance->expose_resources))
38503+ if (!atomic_read_unchecked(&pinstance->expose_resources))
38504 return;
38505
38506 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
38507@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
38508 init_waitqueue_head(&pinstance->reset_wait_q);
38509
38510 atomic_set(&pinstance->outstanding_cmds, 0);
38511- atomic_set(&pinstance->last_message_id, 0);
38512- atomic_set(&pinstance->expose_resources, 0);
38513+ atomic_set_unchecked(&pinstance->last_message_id, 0);
38514+ atomic_set_unchecked(&pinstance->expose_resources, 0);
38515
38516 INIT_LIST_HEAD(&pinstance->free_res_q);
38517 INIT_LIST_HEAD(&pinstance->used_res_q);
38518@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
38519 /* Schedule worker thread to handle CCN and take care of adding and
38520 * removing devices to OS
38521 */
38522- atomic_set(&pinstance->expose_resources, 1);
38523+ atomic_set_unchecked(&pinstance->expose_resources, 1);
38524 schedule_work(&pinstance->worker_q);
38525 return rc;
38526
38527diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
38528index e1d150f..6c6df44 100644
38529--- a/drivers/scsi/pmcraid.h
38530+++ b/drivers/scsi/pmcraid.h
38531@@ -748,7 +748,7 @@ struct pmcraid_instance {
38532 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
38533
38534 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
38535- atomic_t last_message_id;
38536+ atomic_unchecked_t last_message_id;
38537
38538 /* configuration table */
38539 struct pmcraid_config_table *cfg_table;
38540@@ -777,7 +777,7 @@ struct pmcraid_instance {
38541 atomic_t outstanding_cmds;
38542
38543 /* should add/delete resources to mid-layer now ?*/
38544- atomic_t expose_resources;
38545+ atomic_unchecked_t expose_resources;
38546
38547
38548
38549@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
38550 struct pmcraid_config_table_entry_ext cfg_entry_ext;
38551 };
38552 struct scsi_device *scsi_dev; /* Link scsi_device structure */
38553- atomic_t read_failures; /* count of failed READ commands */
38554- atomic_t write_failures; /* count of failed WRITE commands */
38555+ atomic_unchecked_t read_failures; /* count of failed READ commands */
38556+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
38557
38558 /* To indicate add/delete/modify during CCN */
38559 u8 change_detected;
38560diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
38561index 83d7984..a27d947 100644
38562--- a/drivers/scsi/qla2xxx/qla_attr.c
38563+++ b/drivers/scsi/qla2xxx/qla_attr.c
38564@@ -1969,7 +1969,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
38565 return 0;
38566 }
38567
38568-struct fc_function_template qla2xxx_transport_functions = {
38569+fc_function_template_no_const qla2xxx_transport_functions = {
38570
38571 .show_host_node_name = 1,
38572 .show_host_port_name = 1,
38573@@ -2016,7 +2016,7 @@ struct fc_function_template qla2xxx_transport_functions = {
38574 .bsg_timeout = qla24xx_bsg_timeout,
38575 };
38576
38577-struct fc_function_template qla2xxx_transport_vport_functions = {
38578+fc_function_template_no_const qla2xxx_transport_vport_functions = {
38579
38580 .show_host_node_name = 1,
38581 .show_host_port_name = 1,
38582diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
38583index 6acb397..d86e3e0 100644
38584--- a/drivers/scsi/qla2xxx/qla_gbl.h
38585+++ b/drivers/scsi/qla2xxx/qla_gbl.h
38586@@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
38587 struct device_attribute;
38588 extern struct device_attribute *qla2x00_host_attrs[];
38589 struct fc_function_template;
38590-extern struct fc_function_template qla2xxx_transport_functions;
38591-extern struct fc_function_template qla2xxx_transport_vport_functions;
38592+extern fc_function_template_no_const qla2xxx_transport_functions;
38593+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
38594 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
38595 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
38596 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
38597diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
38598index f4b1fc8..a1ce4dd 100644
38599--- a/drivers/scsi/qla2xxx/qla_os.c
38600+++ b/drivers/scsi/qla2xxx/qla_os.c
38601@@ -1462,8 +1462,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
38602 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
38603 /* Ok, a 64bit DMA mask is applicable. */
38604 ha->flags.enable_64bit_addressing = 1;
38605- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
38606- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
38607+ pax_open_kernel();
38608+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
38609+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
38610+ pax_close_kernel();
38611 return;
38612 }
38613 }
38614diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
38615index 329d553..f20d31d 100644
38616--- a/drivers/scsi/qla4xxx/ql4_def.h
38617+++ b/drivers/scsi/qla4xxx/ql4_def.h
38618@@ -273,7 +273,7 @@ struct ddb_entry {
38619 * (4000 only) */
38620 atomic_t relogin_timer; /* Max Time to wait for
38621 * relogin to complete */
38622- atomic_t relogin_retry_count; /* Num of times relogin has been
38623+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
38624 * retried */
38625 uint32_t default_time2wait; /* Default Min time between
38626 * relogins (+aens) */
38627diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
38628index fbc546e..c7d1b48 100644
38629--- a/drivers/scsi/qla4xxx/ql4_os.c
38630+++ b/drivers/scsi/qla4xxx/ql4_os.c
38631@@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
38632 */
38633 if (!iscsi_is_session_online(cls_sess)) {
38634 /* Reset retry relogin timer */
38635- atomic_inc(&ddb_entry->relogin_retry_count);
38636+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
38637 DEBUG2(ql4_printk(KERN_INFO, ha,
38638 "%s: index[%d] relogin timed out-retrying"
38639 " relogin (%d), retry (%d)\n", __func__,
38640 ddb_entry->fw_ddb_index,
38641- atomic_read(&ddb_entry->relogin_retry_count),
38642+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
38643 ddb_entry->default_time2wait + 4));
38644 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
38645 atomic_set(&ddb_entry->retry_relogin_timer,
38646@@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
38647
38648 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
38649 atomic_set(&ddb_entry->relogin_timer, 0);
38650- atomic_set(&ddb_entry->relogin_retry_count, 0);
38651+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
38652 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
38653 ddb_entry->default_relogin_timeout =
38654 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
38655diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
38656index 2c0d0ec..4e8681a 100644
38657--- a/drivers/scsi/scsi.c
38658+++ b/drivers/scsi/scsi.c
38659@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
38660 unsigned long timeout;
38661 int rtn = 0;
38662
38663- atomic_inc(&cmd->device->iorequest_cnt);
38664+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
38665
38666 /* check if the device is still usable */
38667 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
38668diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
38669index 9032e91..7a805d0 100644
38670--- a/drivers/scsi/scsi_lib.c
38671+++ b/drivers/scsi/scsi_lib.c
38672@@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
38673 shost = sdev->host;
38674 scsi_init_cmd_errh(cmd);
38675 cmd->result = DID_NO_CONNECT << 16;
38676- atomic_inc(&cmd->device->iorequest_cnt);
38677+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
38678
38679 /*
38680 * SCSI request completion path will do scsi_device_unbusy(),
38681@@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
38682
38683 INIT_LIST_HEAD(&cmd->eh_entry);
38684
38685- atomic_inc(&cmd->device->iodone_cnt);
38686+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
38687 if (cmd->result)
38688- atomic_inc(&cmd->device->ioerr_cnt);
38689+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
38690
38691 disposition = scsi_decide_disposition(cmd);
38692 if (disposition != SUCCESS &&
38693diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
38694index 931a7d9..0c2a754 100644
38695--- a/drivers/scsi/scsi_sysfs.c
38696+++ b/drivers/scsi/scsi_sysfs.c
38697@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
38698 char *buf) \
38699 { \
38700 struct scsi_device *sdev = to_scsi_device(dev); \
38701- unsigned long long count = atomic_read(&sdev->field); \
38702+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
38703 return snprintf(buf, 20, "0x%llx\n", count); \
38704 } \
38705 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
38706diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
38707index 84a1fdf..693b0d6 100644
38708--- a/drivers/scsi/scsi_tgt_lib.c
38709+++ b/drivers/scsi/scsi_tgt_lib.c
38710@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
38711 int err;
38712
38713 dprintk("%lx %u\n", uaddr, len);
38714- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
38715+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
38716 if (err) {
38717 /*
38718 * TODO: need to fixup sg_tablesize, max_segment_size,
38719diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
38720index e894ca7..de9d7660 100644
38721--- a/drivers/scsi/scsi_transport_fc.c
38722+++ b/drivers/scsi/scsi_transport_fc.c
38723@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
38724 * Netlink Infrastructure
38725 */
38726
38727-static atomic_t fc_event_seq;
38728+static atomic_unchecked_t fc_event_seq;
38729
38730 /**
38731 * fc_get_event_number - Obtain the next sequential FC event number
38732@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
38733 u32
38734 fc_get_event_number(void)
38735 {
38736- return atomic_add_return(1, &fc_event_seq);
38737+ return atomic_add_return_unchecked(1, &fc_event_seq);
38738 }
38739 EXPORT_SYMBOL(fc_get_event_number);
38740
38741@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
38742 {
38743 int error;
38744
38745- atomic_set(&fc_event_seq, 0);
38746+ atomic_set_unchecked(&fc_event_seq, 0);
38747
38748 error = transport_class_register(&fc_host_class);
38749 if (error)
38750@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
38751 char *cp;
38752
38753 *val = simple_strtoul(buf, &cp, 0);
38754- if ((*cp && (*cp != '\n')) || (*val < 0))
38755+ if (*cp && (*cp != '\n'))
38756 return -EINVAL;
38757 /*
38758 * Check for overflow; dev_loss_tmo is u32
38759diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
38760index 31969f2..2b348f0 100644
38761--- a/drivers/scsi/scsi_transport_iscsi.c
38762+++ b/drivers/scsi/scsi_transport_iscsi.c
38763@@ -79,7 +79,7 @@ struct iscsi_internal {
38764 struct transport_container session_cont;
38765 };
38766
38767-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
38768+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
38769 static struct workqueue_struct *iscsi_eh_timer_workq;
38770
38771 static DEFINE_IDA(iscsi_sess_ida);
38772@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
38773 int err;
38774
38775 ihost = shost->shost_data;
38776- session->sid = atomic_add_return(1, &iscsi_session_nr);
38777+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
38778
38779 if (target_id == ISCSI_MAX_TARGET) {
38780 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
38781@@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
38782 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
38783 ISCSI_TRANSPORT_VERSION);
38784
38785- atomic_set(&iscsi_session_nr, 0);
38786+ atomic_set_unchecked(&iscsi_session_nr, 0);
38787
38788 err = class_register(&iscsi_transport_class);
38789 if (err)
38790diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
38791index 21a045e..ec89e03 100644
38792--- a/drivers/scsi/scsi_transport_srp.c
38793+++ b/drivers/scsi/scsi_transport_srp.c
38794@@ -33,7 +33,7 @@
38795 #include "scsi_transport_srp_internal.h"
38796
38797 struct srp_host_attrs {
38798- atomic_t next_port_id;
38799+ atomic_unchecked_t next_port_id;
38800 };
38801 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
38802
38803@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
38804 struct Scsi_Host *shost = dev_to_shost(dev);
38805 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
38806
38807- atomic_set(&srp_host->next_port_id, 0);
38808+ atomic_set_unchecked(&srp_host->next_port_id, 0);
38809 return 0;
38810 }
38811
38812@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
38813 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
38814 rport->roles = ids->roles;
38815
38816- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
38817+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
38818 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
38819
38820 transport_setup_device(&rport->dev);
38821diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
38822index 352bc77..c049b14 100644
38823--- a/drivers/scsi/sd.c
38824+++ b/drivers/scsi/sd.c
38825@@ -2899,7 +2899,7 @@ static int sd_probe(struct device *dev)
38826 sdkp->disk = gd;
38827 sdkp->index = index;
38828 atomic_set(&sdkp->openers, 0);
38829- atomic_set(&sdkp->device->ioerr_cnt, 0);
38830+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
38831
38832 if (!sdp->request_queue->rq_timeout) {
38833 if (sdp->type != TYPE_MOD)
38834diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
38835index be2c9a6..275525c 100644
38836--- a/drivers/scsi/sg.c
38837+++ b/drivers/scsi/sg.c
38838@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
38839 sdp->disk->disk_name,
38840 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
38841 NULL,
38842- (char *)arg);
38843+ (char __user *)arg);
38844 case BLKTRACESTART:
38845 return blk_trace_startstop(sdp->device->request_queue, 1);
38846 case BLKTRACESTOP:
38847diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
38848index 84c2861..ece0a31 100644
38849--- a/drivers/spi/spi.c
38850+++ b/drivers/spi/spi.c
38851@@ -1453,7 +1453,7 @@ int spi_bus_unlock(struct spi_master *master)
38852 EXPORT_SYMBOL_GPL(spi_bus_unlock);
38853
38854 /* portable code must never pass more than 32 bytes */
38855-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
38856+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
38857
38858 static u8 *buf;
38859
38860diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
38861index 34afc16..ffe44dd 100644
38862--- a/drivers/staging/octeon/ethernet-rx.c
38863+++ b/drivers/staging/octeon/ethernet-rx.c
38864@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
38865 /* Increment RX stats for virtual ports */
38866 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
38867 #ifdef CONFIG_64BIT
38868- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
38869- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
38870+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
38871+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
38872 #else
38873- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
38874- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
38875+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
38876+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
38877 #endif
38878 }
38879 netif_receive_skb(skb);
38880@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
38881 dev->name);
38882 */
38883 #ifdef CONFIG_64BIT
38884- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
38885+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38886 #else
38887- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
38888+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
38889 #endif
38890 dev_kfree_skb_irq(skb);
38891 }
38892diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
38893index 683bedc..86dba9a 100644
38894--- a/drivers/staging/octeon/ethernet.c
38895+++ b/drivers/staging/octeon/ethernet.c
38896@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
38897 * since the RX tasklet also increments it.
38898 */
38899 #ifdef CONFIG_64BIT
38900- atomic64_add(rx_status.dropped_packets,
38901- (atomic64_t *)&priv->stats.rx_dropped);
38902+ atomic64_add_unchecked(rx_status.dropped_packets,
38903+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38904 #else
38905- atomic_add(rx_status.dropped_packets,
38906- (atomic_t *)&priv->stats.rx_dropped);
38907+ atomic_add_unchecked(rx_status.dropped_packets,
38908+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
38909 #endif
38910 }
38911
38912diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
38913index a2b7e03..aaf3630 100644
38914--- a/drivers/staging/ramster/tmem.c
38915+++ b/drivers/staging/ramster/tmem.c
38916@@ -50,25 +50,25 @@
38917 * A tmem host implementation must use this function to register callbacks
38918 * for memory allocation.
38919 */
38920-static struct tmem_hostops tmem_hostops;
38921+static struct tmem_hostops *tmem_hostops;
38922
38923 static void tmem_objnode_tree_init(void);
38924
38925 void tmem_register_hostops(struct tmem_hostops *m)
38926 {
38927 tmem_objnode_tree_init();
38928- tmem_hostops = *m;
38929+ tmem_hostops = m;
38930 }
38931
38932 /*
38933 * A tmem host implementation must use this function to register
38934 * callbacks for a page-accessible memory (PAM) implementation.
38935 */
38936-static struct tmem_pamops tmem_pamops;
38937+static struct tmem_pamops *tmem_pamops;
38938
38939 void tmem_register_pamops(struct tmem_pamops *m)
38940 {
38941- tmem_pamops = *m;
38942+ tmem_pamops = m;
38943 }
38944
38945 /*
38946@@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
38947 obj->pampd_count = 0;
38948 #ifdef CONFIG_RAMSTER
38949 if (tmem_pamops.new_obj != NULL)
38950- (*tmem_pamops.new_obj)(obj);
38951+ (tmem_pamops->new_obj)(obj);
38952 #endif
38953 SET_SENTINEL(obj, OBJ);
38954
38955@@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
38956 rbnode = rb_next(rbnode);
38957 tmem_pampd_destroy_all_in_obj(obj, true);
38958 tmem_obj_free(obj, hb);
38959- (*tmem_hostops.obj_free)(obj, pool);
38960+ (tmem_hostops->obj_free)(obj, pool);
38961 }
38962 spin_unlock(&hb->lock);
38963 }
38964@@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
38965 ASSERT_SENTINEL(obj, OBJ);
38966 BUG_ON(obj->pool == NULL);
38967 ASSERT_SENTINEL(obj->pool, POOL);
38968- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
38969+ objnode = (tmem_hostops->objnode_alloc)(obj->pool);
38970 if (unlikely(objnode == NULL))
38971 goto out;
38972 objnode->obj = obj;
38973@@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
38974 ASSERT_SENTINEL(pool, POOL);
38975 objnode->obj->objnode_count--;
38976 objnode->obj = NULL;
38977- (*tmem_hostops.objnode_free)(objnode, pool);
38978+ (tmem_hostops->objnode_free)(objnode, pool);
38979 }
38980
38981 /*
38982@@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
38983 void *old_pampd = *(void **)slot;
38984 *(void **)slot = new_pampd;
38985 if (!no_free)
38986- (*tmem_pamops.free)(old_pampd, obj->pool,
38987+ (tmem_pamops->free)(old_pampd, obj->pool,
38988 NULL, 0, false);
38989 ret = new_pampd;
38990 }
38991@@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
38992 if (objnode->slots[i]) {
38993 if (ht == 1) {
38994 obj->pampd_count--;
38995- (*tmem_pamops.free)(objnode->slots[i],
38996+ (tmem_pamops->free)(objnode->slots[i],
38997 obj->pool, NULL, 0, true);
38998 objnode->slots[i] = NULL;
38999 continue;
39000@@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
39001 return;
39002 if (obj->objnode_tree_height == 0) {
39003 obj->pampd_count--;
39004- (*tmem_pamops.free)(obj->objnode_tree_root,
39005+ (tmem_pamops->free)(obj->objnode_tree_root,
39006 obj->pool, NULL, 0, true);
39007 } else {
39008 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
39009@@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
39010 obj->objnode_tree_root = NULL;
39011 #ifdef CONFIG_RAMSTER
39012 if (tmem_pamops.free_obj != NULL)
39013- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
39014+ (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
39015 #endif
39016 }
39017
39018@@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
39019 /* if found, is a dup put, flush the old one */
39020 pampd_del = tmem_pampd_delete_from_obj(obj, index);
39021 BUG_ON(pampd_del != pampd);
39022- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
39023+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
39024 if (obj->pampd_count == 0) {
39025 objnew = obj;
39026 objfound = NULL;
39027@@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
39028 pampd = NULL;
39029 }
39030 } else {
39031- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
39032+ obj = objnew = (tmem_hostops->obj_alloc)(pool);
39033 if (unlikely(obj == NULL)) {
39034 ret = -ENOMEM;
39035 goto out;
39036@@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
39037 if (unlikely(ret == -ENOMEM))
39038 /* may have partially built objnode tree ("stump") */
39039 goto delete_and_free;
39040- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
39041+ (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
39042 goto out;
39043
39044 delete_and_free:
39045 (void)tmem_pampd_delete_from_obj(obj, index);
39046 if (pampd)
39047- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
39048+ (tmem_pamops->free)(pampd, pool, NULL, 0, true);
39049 if (objnew) {
39050 tmem_obj_free(objnew, hb);
39051- (*tmem_hostops.obj_free)(objnew, pool);
39052+ (tmem_hostops->obj_free)(objnew, pool);
39053 }
39054 out:
39055 spin_unlock(&hb->lock);
39056@@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
39057 if (pampd != NULL) {
39058 BUG_ON(obj == NULL);
39059 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
39060- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
39061+ (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
39062 } else if (delete) {
39063 BUG_ON(obj == NULL);
39064 (void)tmem_pampd_delete_from_obj(obj, index);
39065@@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
39066 int ret = 0;
39067
39068 if (!is_ephemeral(pool))
39069- new_pampd = (*tmem_pamops.repatriate_preload)(
39070+ new_pampd = (tmem_pamops->repatriate_preload)(
39071 old_pampd, pool, oidp, index, &intransit);
39072 if (intransit)
39073 ret = -EAGAIN;
39074@@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
39075 /* must release the hb->lock else repatriate can't sleep */
39076 spin_unlock(&hb->lock);
39077 if (!intransit)
39078- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
39079+ ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
39080 oidp, index, free, data);
39081 if (ret == -EAGAIN) {
39082 /* rare I think, but should cond_resched()??? */
39083@@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
39084 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
39085 /* if we bug here, pamops wasn't properly set up for ramster */
39086 BUG_ON(tmem_pamops.replace_in_obj == NULL);
39087- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
39088+ ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
39089 out:
39090 spin_unlock(&hb->lock);
39091 return ret;
39092@@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
39093 if (free) {
39094 if (obj->pampd_count == 0) {
39095 tmem_obj_free(obj, hb);
39096- (*tmem_hostops.obj_free)(obj, pool);
39097+ (tmem_hostops->obj_free)(obj, pool);
39098 obj = NULL;
39099 }
39100 }
39101 if (free)
39102- ret = (*tmem_pamops.get_data_and_free)(
39103+ ret = (tmem_pamops->get_data_and_free)(
39104 data, sizep, raw, pampd, pool, oidp, index);
39105 else
39106- ret = (*tmem_pamops.get_data)(
39107+ ret = (tmem_pamops->get_data)(
39108 data, sizep, raw, pampd, pool, oidp, index);
39109 if (ret < 0)
39110 goto out;
39111@@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
39112 pampd = tmem_pampd_delete_from_obj(obj, index);
39113 if (pampd == NULL)
39114 goto out;
39115- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
39116+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
39117 if (obj->pampd_count == 0) {
39118 tmem_obj_free(obj, hb);
39119- (*tmem_hostops.obj_free)(obj, pool);
39120+ (tmem_hostops->obj_free)(obj, pool);
39121 }
39122 ret = 0;
39123
39124@@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
39125 goto out;
39126 tmem_pampd_destroy_all_in_obj(obj, false);
39127 tmem_obj_free(obj, hb);
39128- (*tmem_hostops.obj_free)(obj, pool);
39129+ (tmem_hostops->obj_free)(obj, pool);
39130 ret = 0;
39131
39132 out:
39133diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
39134index dc23395..cf7e9b1 100644
39135--- a/drivers/staging/rtl8712/rtl871x_io.h
39136+++ b/drivers/staging/rtl8712/rtl871x_io.h
39137@@ -108,7 +108,7 @@ struct _io_ops {
39138 u8 *pmem);
39139 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
39140 u8 *pmem);
39141-};
39142+} __no_const;
39143
39144 struct io_req {
39145 struct list_head list;
39146diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
39147index 180c963..1f18377 100644
39148--- a/drivers/staging/sbe-2t3e3/netdev.c
39149+++ b/drivers/staging/sbe-2t3e3/netdev.c
39150@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39151 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
39152
39153 if (rlen)
39154- if (copy_to_user(data, &resp, rlen))
39155+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
39156 return -EFAULT;
39157
39158 return 0;
39159diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
39160index c66b8b3..a4a035b 100644
39161--- a/drivers/staging/usbip/vhci.h
39162+++ b/drivers/staging/usbip/vhci.h
39163@@ -83,7 +83,7 @@ struct vhci_hcd {
39164 unsigned resuming:1;
39165 unsigned long re_timeout;
39166
39167- atomic_t seqnum;
39168+ atomic_unchecked_t seqnum;
39169
39170 /*
39171 * NOTE:
39172diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
39173index 620d1be..1cd6711 100644
39174--- a/drivers/staging/usbip/vhci_hcd.c
39175+++ b/drivers/staging/usbip/vhci_hcd.c
39176@@ -471,7 +471,7 @@ static void vhci_tx_urb(struct urb *urb)
39177 return;
39178 }
39179
39180- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
39181+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
39182 if (priv->seqnum == 0xffff)
39183 dev_info(&urb->dev->dev, "seqnum max\n");
39184
39185@@ -723,7 +723,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
39186 return -ENOMEM;
39187 }
39188
39189- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
39190+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
39191 if (unlink->seqnum == 0xffff)
39192 pr_info("seqnum max\n");
39193
39194@@ -924,7 +924,7 @@ static int vhci_start(struct usb_hcd *hcd)
39195 vdev->rhport = rhport;
39196 }
39197
39198- atomic_set(&vhci->seqnum, 0);
39199+ atomic_set_unchecked(&vhci->seqnum, 0);
39200 spin_lock_init(&vhci->lock);
39201
39202 hcd->power_budget = 0; /* no limit */
39203diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
39204index f0eaf04..5a82e06 100644
39205--- a/drivers/staging/usbip/vhci_rx.c
39206+++ b/drivers/staging/usbip/vhci_rx.c
39207@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
39208 if (!urb) {
39209 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
39210 pr_info("max seqnum %d\n",
39211- atomic_read(&the_controller->seqnum));
39212+ atomic_read_unchecked(&the_controller->seqnum));
39213 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
39214 return;
39215 }
39216diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
39217index 67b1b88..6392fe9 100644
39218--- a/drivers/staging/vt6655/hostap.c
39219+++ b/drivers/staging/vt6655/hostap.c
39220@@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
39221 *
39222 */
39223
39224+static net_device_ops_no_const apdev_netdev_ops;
39225+
39226 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39227 {
39228 PSDevice apdev_priv;
39229 struct net_device *dev = pDevice->dev;
39230 int ret;
39231- const struct net_device_ops apdev_netdev_ops = {
39232- .ndo_start_xmit = pDevice->tx_80211,
39233- };
39234
39235 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
39236
39237@@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39238 *apdev_priv = *pDevice;
39239 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
39240
39241+ /* only half broken now */
39242+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
39243 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
39244
39245 pDevice->apdev->type = ARPHRD_IEEE80211;
39246diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
39247index 0a73d40..6fda560 100644
39248--- a/drivers/staging/vt6656/hostap.c
39249+++ b/drivers/staging/vt6656/hostap.c
39250@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
39251 *
39252 */
39253
39254+static net_device_ops_no_const apdev_netdev_ops;
39255+
39256 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39257 {
39258 PSDevice apdev_priv;
39259 struct net_device *dev = pDevice->dev;
39260 int ret;
39261- const struct net_device_ops apdev_netdev_ops = {
39262- .ndo_start_xmit = pDevice->tx_80211,
39263- };
39264
39265 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
39266
39267@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39268 *apdev_priv = *pDevice;
39269 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
39270
39271+ /* only half broken now */
39272+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
39273 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
39274
39275 pDevice->apdev->type = ARPHRD_IEEE80211;
39276diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
39277index 56c8e60..1920c63 100644
39278--- a/drivers/staging/zcache/tmem.c
39279+++ b/drivers/staging/zcache/tmem.c
39280@@ -39,7 +39,7 @@
39281 * A tmem host implementation must use this function to register callbacks
39282 * for memory allocation.
39283 */
39284-static struct tmem_hostops tmem_hostops;
39285+static tmem_hostops_no_const tmem_hostops;
39286
39287 static void tmem_objnode_tree_init(void);
39288
39289@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
39290 * A tmem host implementation must use this function to register
39291 * callbacks for a page-accessible memory (PAM) implementation
39292 */
39293-static struct tmem_pamops tmem_pamops;
39294+static tmem_pamops_no_const tmem_pamops;
39295
39296 void tmem_register_pamops(struct tmem_pamops *m)
39297 {
39298diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
39299index 0d4aa82..f7832d4 100644
39300--- a/drivers/staging/zcache/tmem.h
39301+++ b/drivers/staging/zcache/tmem.h
39302@@ -180,6 +180,7 @@ struct tmem_pamops {
39303 void (*new_obj)(struct tmem_obj *);
39304 int (*replace_in_obj)(void *, struct tmem_obj *);
39305 };
39306+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
39307 extern void tmem_register_pamops(struct tmem_pamops *m);
39308
39309 /* memory allocation methods provided by the host implementation */
39310@@ -189,6 +190,7 @@ struct tmem_hostops {
39311 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
39312 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
39313 };
39314+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
39315 extern void tmem_register_hostops(struct tmem_hostops *m);
39316
39317 /* core tmem accessor functions */
39318diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
39319index 13fe16c..cbdc39a 100644
39320--- a/drivers/target/target_core_transport.c
39321+++ b/drivers/target/target_core_transport.c
39322@@ -1085,7 +1085,7 @@ struct se_device *transport_add_device_to_core_hba(
39323 spin_lock_init(&dev->se_port_lock);
39324 spin_lock_init(&dev->se_tmr_lock);
39325 spin_lock_init(&dev->qf_cmd_lock);
39326- atomic_set(&dev->dev_ordered_id, 0);
39327+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
39328
39329 se_dev_set_default_attribs(dev, dev_limits);
39330
39331@@ -1275,7 +1275,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
39332 * Used to determine when ORDERED commands should go from
39333 * Dormant to Active status.
39334 */
39335- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
39336+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
39337 smp_mb__after_atomic_inc();
39338 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
39339 cmd->se_ordered_id, cmd->sam_task_attr,
39340diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
39341index 0a6a0bc..5501b06 100644
39342--- a/drivers/tty/cyclades.c
39343+++ b/drivers/tty/cyclades.c
39344@@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
39345 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
39346 info->port.count);
39347 #endif
39348- info->port.count++;
39349+ atomic_inc(&info->port.count);
39350 #ifdef CY_DEBUG_COUNT
39351 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
39352- current->pid, info->port.count);
39353+ current->pid, atomic_read(&info->port.count));
39354 #endif
39355
39356 /*
39357@@ -3989,7 +3989,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
39358 for (j = 0; j < cy_card[i].nports; j++) {
39359 info = &cy_card[i].ports[j];
39360
39361- if (info->port.count) {
39362+ if (atomic_read(&info->port.count)) {
39363 /* XXX is the ldisc num worth this? */
39364 struct tty_struct *tty;
39365 struct tty_ldisc *ld;
39366diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
39367index 13ee53b..418d164 100644
39368--- a/drivers/tty/hvc/hvc_console.c
39369+++ b/drivers/tty/hvc/hvc_console.c
39370@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
39371
39372 spin_lock_irqsave(&hp->port.lock, flags);
39373 /* Check and then increment for fast path open. */
39374- if (hp->port.count++ > 0) {
39375+ if (atomic_inc_return(&hp->port.count) > 1) {
39376 spin_unlock_irqrestore(&hp->port.lock, flags);
39377 hvc_kick();
39378 return 0;
39379@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
39380
39381 spin_lock_irqsave(&hp->port.lock, flags);
39382
39383- if (--hp->port.count == 0) {
39384+ if (atomic_dec_return(&hp->port.count) == 0) {
39385 spin_unlock_irqrestore(&hp->port.lock, flags);
39386 /* We are done with the tty pointer now. */
39387 tty_port_tty_set(&hp->port, NULL);
39388@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
39389 */
39390 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
39391 } else {
39392- if (hp->port.count < 0)
39393+ if (atomic_read(&hp->port.count) < 0)
39394 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
39395- hp->vtermno, hp->port.count);
39396+ hp->vtermno, atomic_read(&hp->port.count));
39397 spin_unlock_irqrestore(&hp->port.lock, flags);
39398 }
39399 }
39400@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
39401 * open->hangup case this can be called after the final close so prevent
39402 * that from happening for now.
39403 */
39404- if (hp->port.count <= 0) {
39405+ if (atomic_read(&hp->port.count) <= 0) {
39406 spin_unlock_irqrestore(&hp->port.lock, flags);
39407 return;
39408 }
39409
39410- hp->port.count = 0;
39411+ atomic_set(&hp->port.count, 0);
39412 spin_unlock_irqrestore(&hp->port.lock, flags);
39413 tty_port_tty_set(&hp->port, NULL);
39414
39415@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
39416 return -EPIPE;
39417
39418 /* FIXME what's this (unprotected) check for? */
39419- if (hp->port.count <= 0)
39420+ if (atomic_read(&hp->port.count) <= 0)
39421 return -EIO;
39422
39423 spin_lock_irqsave(&hp->lock, flags);
39424diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
39425index cab5c7a..4cc66ea 100644
39426--- a/drivers/tty/hvc/hvcs.c
39427+++ b/drivers/tty/hvc/hvcs.c
39428@@ -83,6 +83,7 @@
39429 #include <asm/hvcserver.h>
39430 #include <asm/uaccess.h>
39431 #include <asm/vio.h>
39432+#include <asm/local.h>
39433
39434 /*
39435 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
39436@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
39437
39438 spin_lock_irqsave(&hvcsd->lock, flags);
39439
39440- if (hvcsd->port.count > 0) {
39441+ if (atomic_read(&hvcsd->port.count) > 0) {
39442 spin_unlock_irqrestore(&hvcsd->lock, flags);
39443 printk(KERN_INFO "HVCS: vterm state unchanged. "
39444 "The hvcs device node is still in use.\n");
39445@@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
39446 }
39447 }
39448
39449- hvcsd->port.count = 0;
39450+ atomic_set(&hvcsd->port.count, 0);
39451 hvcsd->port.tty = tty;
39452 tty->driver_data = hvcsd;
39453
39454@@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
39455 unsigned long flags;
39456
39457 spin_lock_irqsave(&hvcsd->lock, flags);
39458- hvcsd->port.count++;
39459+ atomic_inc(&hvcsd->port.count);
39460 hvcsd->todo_mask |= HVCS_SCHED_READ;
39461 spin_unlock_irqrestore(&hvcsd->lock, flags);
39462
39463@@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
39464 hvcsd = tty->driver_data;
39465
39466 spin_lock_irqsave(&hvcsd->lock, flags);
39467- if (--hvcsd->port.count == 0) {
39468+ if (atomic_dec_and_test(&hvcsd->port.count)) {
39469
39470 vio_disable_interrupts(hvcsd->vdev);
39471
39472@@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
39473
39474 free_irq(irq, hvcsd);
39475 return;
39476- } else if (hvcsd->port.count < 0) {
39477+ } else if (atomic_read(&hvcsd->port.count) < 0) {
39478 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
39479 " is missmanaged.\n",
39480- hvcsd->vdev->unit_address, hvcsd->port.count);
39481+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
39482 }
39483
39484 spin_unlock_irqrestore(&hvcsd->lock, flags);
39485@@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
39486
39487 spin_lock_irqsave(&hvcsd->lock, flags);
39488 /* Preserve this so that we know how many kref refs to put */
39489- temp_open_count = hvcsd->port.count;
39490+ temp_open_count = atomic_read(&hvcsd->port.count);
39491
39492 /*
39493 * Don't kref put inside the spinlock because the destruction
39494@@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
39495 tty->driver_data = NULL;
39496 hvcsd->port.tty = NULL;
39497
39498- hvcsd->port.count = 0;
39499+ atomic_set(&hvcsd->port.count, 0);
39500
39501 /* This will drop any buffered data on the floor which is OK in a hangup
39502 * scenario. */
39503@@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
39504 * the middle of a write operation? This is a crummy place to do this
39505 * but we want to keep it all in the spinlock.
39506 */
39507- if (hvcsd->port.count <= 0) {
39508+ if (atomic_read(&hvcsd->port.count) <= 0) {
39509 spin_unlock_irqrestore(&hvcsd->lock, flags);
39510 return -ENODEV;
39511 }
39512@@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
39513 {
39514 struct hvcs_struct *hvcsd = tty->driver_data;
39515
39516- if (!hvcsd || hvcsd->port.count <= 0)
39517+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
39518 return 0;
39519
39520 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
39521diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
39522index 160f0ad..588b853 100644
39523--- a/drivers/tty/ipwireless/tty.c
39524+++ b/drivers/tty/ipwireless/tty.c
39525@@ -29,6 +29,7 @@
39526 #include <linux/tty_driver.h>
39527 #include <linux/tty_flip.h>
39528 #include <linux/uaccess.h>
39529+#include <asm/local.h>
39530
39531 #include "tty.h"
39532 #include "network.h"
39533@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
39534 mutex_unlock(&tty->ipw_tty_mutex);
39535 return -ENODEV;
39536 }
39537- if (tty->port.count == 0)
39538+ if (atomic_read(&tty->port.count) == 0)
39539 tty->tx_bytes_queued = 0;
39540
39541- tty->port.count++;
39542+ atomic_inc(&tty->port.count);
39543
39544 tty->port.tty = linux_tty;
39545 linux_tty->driver_data = tty;
39546@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
39547
39548 static void do_ipw_close(struct ipw_tty *tty)
39549 {
39550- tty->port.count--;
39551-
39552- if (tty->port.count == 0) {
39553+ if (atomic_dec_return(&tty->port.count) == 0) {
39554 struct tty_struct *linux_tty = tty->port.tty;
39555
39556 if (linux_tty != NULL) {
39557@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
39558 return;
39559
39560 mutex_lock(&tty->ipw_tty_mutex);
39561- if (tty->port.count == 0) {
39562+ if (atomic_read(&tty->port.count) == 0) {
39563 mutex_unlock(&tty->ipw_tty_mutex);
39564 return;
39565 }
39566@@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
39567 return;
39568 }
39569
39570- if (!tty->port.count) {
39571+ if (!atomic_read(&tty->port.count)) {
39572 mutex_unlock(&tty->ipw_tty_mutex);
39573 return;
39574 }
39575@@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
39576 return -ENODEV;
39577
39578 mutex_lock(&tty->ipw_tty_mutex);
39579- if (!tty->port.count) {
39580+ if (!atomic_read(&tty->port.count)) {
39581 mutex_unlock(&tty->ipw_tty_mutex);
39582 return -EINVAL;
39583 }
39584@@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
39585 if (!tty)
39586 return -ENODEV;
39587
39588- if (!tty->port.count)
39589+ if (!atomic_read(&tty->port.count))
39590 return -EINVAL;
39591
39592 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
39593@@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
39594 if (!tty)
39595 return 0;
39596
39597- if (!tty->port.count)
39598+ if (!atomic_read(&tty->port.count))
39599 return 0;
39600
39601 return tty->tx_bytes_queued;
39602@@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
39603 if (!tty)
39604 return -ENODEV;
39605
39606- if (!tty->port.count)
39607+ if (!atomic_read(&tty->port.count))
39608 return -EINVAL;
39609
39610 return get_control_lines(tty);
39611@@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
39612 if (!tty)
39613 return -ENODEV;
39614
39615- if (!tty->port.count)
39616+ if (!atomic_read(&tty->port.count))
39617 return -EINVAL;
39618
39619 return set_control_lines(tty, set, clear);
39620@@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
39621 if (!tty)
39622 return -ENODEV;
39623
39624- if (!tty->port.count)
39625+ if (!atomic_read(&tty->port.count))
39626 return -EINVAL;
39627
39628 /* FIXME: Exactly how is the tty object locked here .. */
39629@@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
39630 * are gone */
39631 mutex_lock(&ttyj->ipw_tty_mutex);
39632 }
39633- while (ttyj->port.count)
39634+ while (atomic_read(&ttyj->port.count))
39635 do_ipw_close(ttyj);
39636 ipwireless_disassociate_network_ttys(network,
39637 ttyj->channel_idx);
39638diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
39639index 56e616b..9d9f10a 100644
39640--- a/drivers/tty/moxa.c
39641+++ b/drivers/tty/moxa.c
39642@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
39643 }
39644
39645 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
39646- ch->port.count++;
39647+ atomic_inc(&ch->port.count);
39648 tty->driver_data = ch;
39649 tty_port_tty_set(&ch->port, tty);
39650 mutex_lock(&ch->port.mutex);
39651diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
39652index 1e8e8ce..a9efc93 100644
39653--- a/drivers/tty/n_gsm.c
39654+++ b/drivers/tty/n_gsm.c
39655@@ -1638,7 +1638,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
39656 kref_init(&dlci->ref);
39657 mutex_init(&dlci->mutex);
39658 dlci->fifo = &dlci->_fifo;
39659- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
39660+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
39661 kfree(dlci);
39662 return NULL;
39663 }
39664@@ -2925,7 +2925,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
39665 struct gsm_dlci *dlci = tty->driver_data;
39666 struct tty_port *port = &dlci->port;
39667
39668- port->count++;
39669+ atomic_inc(&port->count);
39670 dlci_get(dlci);
39671 dlci_get(dlci->gsm->dlci[0]);
39672 mux_get(dlci->gsm);
39673diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
39674index 8c0b7b4..e88f052 100644
39675--- a/drivers/tty/n_tty.c
39676+++ b/drivers/tty/n_tty.c
39677@@ -2142,6 +2142,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
39678 {
39679 *ops = tty_ldisc_N_TTY;
39680 ops->owner = NULL;
39681- ops->refcount = ops->flags = 0;
39682+ atomic_set(&ops->refcount, 0);
39683+ ops->flags = 0;
39684 }
39685 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
39686diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
39687index 8cf8d0a..4ef9ed0 100644
39688--- a/drivers/tty/pty.c
39689+++ b/drivers/tty/pty.c
39690@@ -730,8 +730,10 @@ static void __init unix98_pty_init(void)
39691 panic("Couldn't register Unix98 pts driver");
39692
39693 /* Now create the /dev/ptmx special device */
39694+ pax_open_kernel();
39695 tty_default_fops(&ptmx_fops);
39696- ptmx_fops.open = ptmx_open;
39697+ *(void **)&ptmx_fops.open = ptmx_open;
39698+ pax_close_kernel();
39699
39700 cdev_init(&ptmx_cdev, &ptmx_fops);
39701 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
39702diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
39703index 9700d34..df7520c 100644
39704--- a/drivers/tty/rocket.c
39705+++ b/drivers/tty/rocket.c
39706@@ -924,7 +924,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
39707 tty->driver_data = info;
39708 tty_port_tty_set(port, tty);
39709
39710- if (port->count++ == 0) {
39711+ if (atomic_inc_return(&port->count) == 1) {
39712 atomic_inc(&rp_num_ports_open);
39713
39714 #ifdef ROCKET_DEBUG_OPEN
39715@@ -933,7 +933,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
39716 #endif
39717 }
39718 #ifdef ROCKET_DEBUG_OPEN
39719- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
39720+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
39721 #endif
39722
39723 /*
39724@@ -1528,7 +1528,7 @@ static void rp_hangup(struct tty_struct *tty)
39725 spin_unlock_irqrestore(&info->port.lock, flags);
39726 return;
39727 }
39728- if (info->port.count)
39729+ if (atomic_read(&info->port.count))
39730 atomic_dec(&rp_num_ports_open);
39731 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
39732 spin_unlock_irqrestore(&info->port.lock, flags);
39733diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
39734index 1002054..dd644a8 100644
39735--- a/drivers/tty/serial/kgdboc.c
39736+++ b/drivers/tty/serial/kgdboc.c
39737@@ -24,8 +24,9 @@
39738 #define MAX_CONFIG_LEN 40
39739
39740 static struct kgdb_io kgdboc_io_ops;
39741+static struct kgdb_io kgdboc_io_ops_console;
39742
39743-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
39744+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
39745 static int configured = -1;
39746
39747 static char config[MAX_CONFIG_LEN];
39748@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
39749 kgdboc_unregister_kbd();
39750 if (configured == 1)
39751 kgdb_unregister_io_module(&kgdboc_io_ops);
39752+ else if (configured == 2)
39753+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
39754 }
39755
39756 static int configure_kgdboc(void)
39757@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
39758 int err;
39759 char *cptr = config;
39760 struct console *cons;
39761+ int is_console = 0;
39762
39763 err = kgdboc_option_setup(config);
39764 if (err || !strlen(config) || isspace(config[0]))
39765 goto noconfig;
39766
39767 err = -ENODEV;
39768- kgdboc_io_ops.is_console = 0;
39769 kgdb_tty_driver = NULL;
39770
39771 kgdboc_use_kms = 0;
39772@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
39773 int idx;
39774 if (cons->device && cons->device(cons, &idx) == p &&
39775 idx == tty_line) {
39776- kgdboc_io_ops.is_console = 1;
39777+ is_console = 1;
39778 break;
39779 }
39780 cons = cons->next;
39781@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
39782 kgdb_tty_line = tty_line;
39783
39784 do_register:
39785- err = kgdb_register_io_module(&kgdboc_io_ops);
39786+ if (is_console) {
39787+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
39788+ configured = 2;
39789+ } else {
39790+ err = kgdb_register_io_module(&kgdboc_io_ops);
39791+ configured = 1;
39792+ }
39793 if (err)
39794 goto noconfig;
39795
39796@@ -205,8 +214,6 @@ do_register:
39797 if (err)
39798 goto nmi_con_failed;
39799
39800- configured = 1;
39801-
39802 return 0;
39803
39804 nmi_con_failed:
39805@@ -223,7 +230,7 @@ noconfig:
39806 static int __init init_kgdboc(void)
39807 {
39808 /* Already configured? */
39809- if (configured == 1)
39810+ if (configured >= 1)
39811 return 0;
39812
39813 return configure_kgdboc();
39814@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
39815 if (config[len - 1] == '\n')
39816 config[len - 1] = '\0';
39817
39818- if (configured == 1)
39819+ if (configured >= 1)
39820 cleanup_kgdboc();
39821
39822 /* Go and configure with the new params. */
39823@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
39824 .post_exception = kgdboc_post_exp_handler,
39825 };
39826
39827+static struct kgdb_io kgdboc_io_ops_console = {
39828+ .name = "kgdboc",
39829+ .read_char = kgdboc_get_char,
39830+ .write_char = kgdboc_put_char,
39831+ .pre_exception = kgdboc_pre_exp_handler,
39832+ .post_exception = kgdboc_post_exp_handler,
39833+ .is_console = 1
39834+};
39835+
39836 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
39837 /* This is only available if kgdboc is a built in for early debugging */
39838 static int __init kgdboc_early_init(char *opt)
39839diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
39840index 7f04717..0f3794f 100644
39841--- a/drivers/tty/serial/samsung.c
39842+++ b/drivers/tty/serial/samsung.c
39843@@ -445,11 +445,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
39844 }
39845 }
39846
39847+static int s3c64xx_serial_startup(struct uart_port *port);
39848 static int s3c24xx_serial_startup(struct uart_port *port)
39849 {
39850 struct s3c24xx_uart_port *ourport = to_ourport(port);
39851 int ret;
39852
39853+ /* Startup sequence is different for s3c64xx and higher SoC's */
39854+ if (s3c24xx_serial_has_interrupt_mask(port))
39855+ return s3c64xx_serial_startup(port);
39856+
39857 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
39858 port->mapbase, port->membase);
39859
39860@@ -1115,10 +1120,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
39861 /* setup info for port */
39862 port->dev = &platdev->dev;
39863
39864- /* Startup sequence is different for s3c64xx and higher SoC's */
39865- if (s3c24xx_serial_has_interrupt_mask(port))
39866- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
39867-
39868 port->uartclk = 1;
39869
39870 if (cfg->uart_flags & UPF_CONS_FLOW) {
39871diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
39872index 0fcfd98..8244fce 100644
39873--- a/drivers/tty/serial/serial_core.c
39874+++ b/drivers/tty/serial/serial_core.c
39875@@ -1408,7 +1408,7 @@ static void uart_hangup(struct tty_struct *tty)
39876 uart_flush_buffer(tty);
39877 uart_shutdown(tty, state);
39878 spin_lock_irqsave(&port->lock, flags);
39879- port->count = 0;
39880+ atomic_set(&port->count, 0);
39881 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
39882 spin_unlock_irqrestore(&port->lock, flags);
39883 tty_port_tty_set(port, NULL);
39884@@ -1504,7 +1504,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
39885 goto end;
39886 }
39887
39888- port->count++;
39889+ atomic_inc(&port->count);
39890 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
39891 retval = -ENXIO;
39892 goto err_dec_count;
39893@@ -1531,7 +1531,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
39894 /*
39895 * Make sure the device is in D0 state.
39896 */
39897- if (port->count == 1)
39898+ if (atomic_read(&port->count) == 1)
39899 uart_change_pm(state, 0);
39900
39901 /*
39902@@ -1549,7 +1549,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
39903 end:
39904 return retval;
39905 err_dec_count:
39906- port->count--;
39907+ atomic_inc(&port->count);
39908 mutex_unlock(&port->mutex);
39909 goto end;
39910 }
39911diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
39912index 70e3a52..5742052 100644
39913--- a/drivers/tty/synclink.c
39914+++ b/drivers/tty/synclink.c
39915@@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
39916
39917 if (debug_level >= DEBUG_LEVEL_INFO)
39918 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
39919- __FILE__,__LINE__, info->device_name, info->port.count);
39920+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
39921
39922 if (tty_port_close_start(&info->port, tty, filp) == 0)
39923 goto cleanup;
39924@@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
39925 cleanup:
39926 if (debug_level >= DEBUG_LEVEL_INFO)
39927 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
39928- tty->driver->name, info->port.count);
39929+ tty->driver->name, atomic_read(&info->port.count));
39930
39931 } /* end of mgsl_close() */
39932
39933@@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
39934
39935 mgsl_flush_buffer(tty);
39936 shutdown(info);
39937-
39938- info->port.count = 0;
39939+
39940+ atomic_set(&info->port.count, 0);
39941 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
39942 info->port.tty = NULL;
39943
39944@@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
39945
39946 if (debug_level >= DEBUG_LEVEL_INFO)
39947 printk("%s(%d):block_til_ready before block on %s count=%d\n",
39948- __FILE__,__LINE__, tty->driver->name, port->count );
39949+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
39950
39951 spin_lock_irqsave(&info->irq_spinlock, flags);
39952 if (!tty_hung_up_p(filp)) {
39953 extra_count = true;
39954- port->count--;
39955+ atomic_dec(&port->count);
39956 }
39957 spin_unlock_irqrestore(&info->irq_spinlock, flags);
39958 port->blocked_open++;
39959@@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
39960
39961 if (debug_level >= DEBUG_LEVEL_INFO)
39962 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
39963- __FILE__,__LINE__, tty->driver->name, port->count );
39964+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
39965
39966 tty_unlock(tty);
39967 schedule();
39968@@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
39969
39970 /* FIXME: Racy on hangup during close wait */
39971 if (extra_count)
39972- port->count++;
39973+ atomic_inc(&port->count);
39974 port->blocked_open--;
39975
39976 if (debug_level >= DEBUG_LEVEL_INFO)
39977 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
39978- __FILE__,__LINE__, tty->driver->name, port->count );
39979+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
39980
39981 if (!retval)
39982 port->flags |= ASYNC_NORMAL_ACTIVE;
39983@@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
39984
39985 if (debug_level >= DEBUG_LEVEL_INFO)
39986 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
39987- __FILE__,__LINE__,tty->driver->name, info->port.count);
39988+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
39989
39990 /* If port is closing, signal caller to try again */
39991 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
39992@@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
39993 spin_unlock_irqrestore(&info->netlock, flags);
39994 goto cleanup;
39995 }
39996- info->port.count++;
39997+ atomic_inc(&info->port.count);
39998 spin_unlock_irqrestore(&info->netlock, flags);
39999
40000- if (info->port.count == 1) {
40001+ if (atomic_read(&info->port.count) == 1) {
40002 /* 1st open on this device, init hardware */
40003 retval = startup(info);
40004 if (retval < 0)
40005@@ -3451,8 +3451,8 @@ cleanup:
40006 if (retval) {
40007 if (tty->count == 1)
40008 info->port.tty = NULL; /* tty layer will release tty struct */
40009- if(info->port.count)
40010- info->port.count--;
40011+ if (atomic_read(&info->port.count))
40012+ atomic_dec(&info->port.count);
40013 }
40014
40015 return retval;
40016@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40017 unsigned short new_crctype;
40018
40019 /* return error if TTY interface open */
40020- if (info->port.count)
40021+ if (atomic_read(&info->port.count))
40022 return -EBUSY;
40023
40024 switch (encoding)
40025@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
40026
40027 /* arbitrate between network and tty opens */
40028 spin_lock_irqsave(&info->netlock, flags);
40029- if (info->port.count != 0 || info->netcount != 0) {
40030+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40031 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40032 spin_unlock_irqrestore(&info->netlock, flags);
40033 return -EBUSY;
40034@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40035 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
40036
40037 /* return error if TTY interface open */
40038- if (info->port.count)
40039+ if (atomic_read(&info->port.count))
40040 return -EBUSY;
40041
40042 if (cmd != SIOCWANDEV)
40043diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
40044index b38e954..ce45b38 100644
40045--- a/drivers/tty/synclink_gt.c
40046+++ b/drivers/tty/synclink_gt.c
40047@@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
40048 tty->driver_data = info;
40049 info->port.tty = tty;
40050
40051- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
40052+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
40053
40054 /* If port is closing, signal caller to try again */
40055 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
40056@@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
40057 mutex_unlock(&info->port.mutex);
40058 goto cleanup;
40059 }
40060- info->port.count++;
40061+ atomic_inc(&info->port.count);
40062 spin_unlock_irqrestore(&info->netlock, flags);
40063
40064- if (info->port.count == 1) {
40065+ if (atomic_read(&info->port.count) == 1) {
40066 /* 1st open on this device, init hardware */
40067 retval = startup(info);
40068 if (retval < 0) {
40069@@ -716,8 +716,8 @@ cleanup:
40070 if (retval) {
40071 if (tty->count == 1)
40072 info->port.tty = NULL; /* tty layer will release tty struct */
40073- if(info->port.count)
40074- info->port.count--;
40075+ if(atomic_read(&info->port.count))
40076+ atomic_dec(&info->port.count);
40077 }
40078
40079 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
40080@@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40081
40082 if (sanity_check(info, tty->name, "close"))
40083 return;
40084- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
40085+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
40086
40087 if (tty_port_close_start(&info->port, tty, filp) == 0)
40088 goto cleanup;
40089@@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40090 tty_port_close_end(&info->port, tty);
40091 info->port.tty = NULL;
40092 cleanup:
40093- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
40094+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
40095 }
40096
40097 static void hangup(struct tty_struct *tty)
40098@@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
40099 shutdown(info);
40100
40101 spin_lock_irqsave(&info->port.lock, flags);
40102- info->port.count = 0;
40103+ atomic_set(&info->port.count, 0);
40104 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
40105 info->port.tty = NULL;
40106 spin_unlock_irqrestore(&info->port.lock, flags);
40107@@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40108 unsigned short new_crctype;
40109
40110 /* return error if TTY interface open */
40111- if (info->port.count)
40112+ if (atomic_read(&info->port.count))
40113 return -EBUSY;
40114
40115 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
40116@@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
40117
40118 /* arbitrate between network and tty opens */
40119 spin_lock_irqsave(&info->netlock, flags);
40120- if (info->port.count != 0 || info->netcount != 0) {
40121+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40122 DBGINFO(("%s hdlc_open busy\n", dev->name));
40123 spin_unlock_irqrestore(&info->netlock, flags);
40124 return -EBUSY;
40125@@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40126 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
40127
40128 /* return error if TTY interface open */
40129- if (info->port.count)
40130+ if (atomic_read(&info->port.count))
40131 return -EBUSY;
40132
40133 if (cmd != SIOCWANDEV)
40134@@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
40135 if (port == NULL)
40136 continue;
40137 spin_lock(&port->lock);
40138- if ((port->port.count || port->netcount) &&
40139+ if ((atomic_read(&port->port.count) || port->netcount) &&
40140 port->pending_bh && !port->bh_running &&
40141 !port->bh_requested) {
40142 DBGISR(("%s bh queued\n", port->device_name));
40143@@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40144 spin_lock_irqsave(&info->lock, flags);
40145 if (!tty_hung_up_p(filp)) {
40146 extra_count = true;
40147- port->count--;
40148+ atomic_dec(&port->count);
40149 }
40150 spin_unlock_irqrestore(&info->lock, flags);
40151 port->blocked_open++;
40152@@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40153 remove_wait_queue(&port->open_wait, &wait);
40154
40155 if (extra_count)
40156- port->count++;
40157+ atomic_inc(&port->count);
40158 port->blocked_open--;
40159
40160 if (!retval)
40161diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
40162index f17d9f3..27a041b 100644
40163--- a/drivers/tty/synclinkmp.c
40164+++ b/drivers/tty/synclinkmp.c
40165@@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
40166
40167 if (debug_level >= DEBUG_LEVEL_INFO)
40168 printk("%s(%d):%s open(), old ref count = %d\n",
40169- __FILE__,__LINE__,tty->driver->name, info->port.count);
40170+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
40171
40172 /* If port is closing, signal caller to try again */
40173 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
40174@@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
40175 spin_unlock_irqrestore(&info->netlock, flags);
40176 goto cleanup;
40177 }
40178- info->port.count++;
40179+ atomic_inc(&info->port.count);
40180 spin_unlock_irqrestore(&info->netlock, flags);
40181
40182- if (info->port.count == 1) {
40183+ if (atomic_read(&info->port.count) == 1) {
40184 /* 1st open on this device, init hardware */
40185 retval = startup(info);
40186 if (retval < 0)
40187@@ -797,8 +797,8 @@ cleanup:
40188 if (retval) {
40189 if (tty->count == 1)
40190 info->port.tty = NULL; /* tty layer will release tty struct */
40191- if(info->port.count)
40192- info->port.count--;
40193+ if(atomic_read(&info->port.count))
40194+ atomic_dec(&info->port.count);
40195 }
40196
40197 return retval;
40198@@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40199
40200 if (debug_level >= DEBUG_LEVEL_INFO)
40201 printk("%s(%d):%s close() entry, count=%d\n",
40202- __FILE__,__LINE__, info->device_name, info->port.count);
40203+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
40204
40205 if (tty_port_close_start(&info->port, tty, filp) == 0)
40206 goto cleanup;
40207@@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40208 cleanup:
40209 if (debug_level >= DEBUG_LEVEL_INFO)
40210 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
40211- tty->driver->name, info->port.count);
40212+ tty->driver->name, atomic_read(&info->port.count));
40213 }
40214
40215 /* Called by tty_hangup() when a hangup is signaled.
40216@@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
40217 shutdown(info);
40218
40219 spin_lock_irqsave(&info->port.lock, flags);
40220- info->port.count = 0;
40221+ atomic_set(&info->port.count, 0);
40222 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
40223 info->port.tty = NULL;
40224 spin_unlock_irqrestore(&info->port.lock, flags);
40225@@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40226 unsigned short new_crctype;
40227
40228 /* return error if TTY interface open */
40229- if (info->port.count)
40230+ if (atomic_read(&info->port.count))
40231 return -EBUSY;
40232
40233 switch (encoding)
40234@@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
40235
40236 /* arbitrate between network and tty opens */
40237 spin_lock_irqsave(&info->netlock, flags);
40238- if (info->port.count != 0 || info->netcount != 0) {
40239+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40240 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40241 spin_unlock_irqrestore(&info->netlock, flags);
40242 return -EBUSY;
40243@@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40244 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
40245
40246 /* return error if TTY interface open */
40247- if (info->port.count)
40248+ if (atomic_read(&info->port.count))
40249 return -EBUSY;
40250
40251 if (cmd != SIOCWANDEV)
40252@@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
40253 * do not request bottom half processing if the
40254 * device is not open in a normal mode.
40255 */
40256- if ( port && (port->port.count || port->netcount) &&
40257+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
40258 port->pending_bh && !port->bh_running &&
40259 !port->bh_requested ) {
40260 if ( debug_level >= DEBUG_LEVEL_ISR )
40261@@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40262
40263 if (debug_level >= DEBUG_LEVEL_INFO)
40264 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
40265- __FILE__,__LINE__, tty->driver->name, port->count );
40266+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40267
40268 spin_lock_irqsave(&info->lock, flags);
40269 if (!tty_hung_up_p(filp)) {
40270 extra_count = true;
40271- port->count--;
40272+ atomic_dec(&port->count);
40273 }
40274 spin_unlock_irqrestore(&info->lock, flags);
40275 port->blocked_open++;
40276@@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40277
40278 if (debug_level >= DEBUG_LEVEL_INFO)
40279 printk("%s(%d):%s block_til_ready() count=%d\n",
40280- __FILE__,__LINE__, tty->driver->name, port->count );
40281+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40282
40283 tty_unlock(tty);
40284 schedule();
40285@@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40286 remove_wait_queue(&port->open_wait, &wait);
40287
40288 if (extra_count)
40289- port->count++;
40290+ atomic_inc(&port->count);
40291 port->blocked_open--;
40292
40293 if (debug_level >= DEBUG_LEVEL_INFO)
40294 printk("%s(%d):%s block_til_ready() after, count=%d\n",
40295- __FILE__,__LINE__, tty->driver->name, port->count );
40296+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40297
40298 if (!retval)
40299 port->flags |= ASYNC_NORMAL_ACTIVE;
40300diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
40301index 16ee6ce..bfcac57 100644
40302--- a/drivers/tty/sysrq.c
40303+++ b/drivers/tty/sysrq.c
40304@@ -866,7 +866,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
40305 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
40306 size_t count, loff_t *ppos)
40307 {
40308- if (count) {
40309+ if (count && capable(CAP_SYS_ADMIN)) {
40310 char c;
40311
40312 if (get_user(c, buf))
40313diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
40314index 2ea176b..2877bc8 100644
40315--- a/drivers/tty/tty_io.c
40316+++ b/drivers/tty/tty_io.c
40317@@ -3395,7 +3395,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
40318
40319 void tty_default_fops(struct file_operations *fops)
40320 {
40321- *fops = tty_fops;
40322+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
40323 }
40324
40325 /*
40326diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
40327index 0f2a2c5..471e228 100644
40328--- a/drivers/tty/tty_ldisc.c
40329+++ b/drivers/tty/tty_ldisc.c
40330@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
40331 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
40332 struct tty_ldisc_ops *ldo = ld->ops;
40333
40334- ldo->refcount--;
40335+ atomic_dec(&ldo->refcount);
40336 module_put(ldo->owner);
40337 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40338
40339@@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
40340 spin_lock_irqsave(&tty_ldisc_lock, flags);
40341 tty_ldiscs[disc] = new_ldisc;
40342 new_ldisc->num = disc;
40343- new_ldisc->refcount = 0;
40344+ atomic_set(&new_ldisc->refcount, 0);
40345 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40346
40347 return ret;
40348@@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
40349 return -EINVAL;
40350
40351 spin_lock_irqsave(&tty_ldisc_lock, flags);
40352- if (tty_ldiscs[disc]->refcount)
40353+ if (atomic_read(&tty_ldiscs[disc]->refcount))
40354 ret = -EBUSY;
40355 else
40356 tty_ldiscs[disc] = NULL;
40357@@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
40358 if (ldops) {
40359 ret = ERR_PTR(-EAGAIN);
40360 if (try_module_get(ldops->owner)) {
40361- ldops->refcount++;
40362+ atomic_inc(&ldops->refcount);
40363 ret = ldops;
40364 }
40365 }
40366@@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
40367 unsigned long flags;
40368
40369 spin_lock_irqsave(&tty_ldisc_lock, flags);
40370- ldops->refcount--;
40371+ atomic_dec(&ldops->refcount);
40372 module_put(ldops->owner);
40373 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40374 }
40375diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
40376index d7bdd8d..feaef30 100644
40377--- a/drivers/tty/tty_port.c
40378+++ b/drivers/tty/tty_port.c
40379@@ -202,7 +202,7 @@ void tty_port_hangup(struct tty_port *port)
40380 unsigned long flags;
40381
40382 spin_lock_irqsave(&port->lock, flags);
40383- port->count = 0;
40384+ atomic_set(&port->count, 0);
40385 port->flags &= ~ASYNC_NORMAL_ACTIVE;
40386 if (port->tty) {
40387 set_bit(TTY_IO_ERROR, &port->tty->flags);
40388@@ -328,7 +328,7 @@ int tty_port_block_til_ready(struct tty_port *port,
40389 /* The port lock protects the port counts */
40390 spin_lock_irqsave(&port->lock, flags);
40391 if (!tty_hung_up_p(filp))
40392- port->count--;
40393+ atomic_dec(&port->count);
40394 port->blocked_open++;
40395 spin_unlock_irqrestore(&port->lock, flags);
40396
40397@@ -370,7 +370,7 @@ int tty_port_block_til_ready(struct tty_port *port,
40398 we must not mess that up further */
40399 spin_lock_irqsave(&port->lock, flags);
40400 if (!tty_hung_up_p(filp))
40401- port->count++;
40402+ atomic_inc(&port->count);
40403 port->blocked_open--;
40404 if (retval == 0)
40405 port->flags |= ASYNC_NORMAL_ACTIVE;
40406@@ -390,19 +390,19 @@ int tty_port_close_start(struct tty_port *port,
40407 return 0;
40408 }
40409
40410- if (tty->count == 1 && port->count != 1) {
40411+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
40412 printk(KERN_WARNING
40413 "tty_port_close_start: tty->count = 1 port count = %d.\n",
40414- port->count);
40415- port->count = 1;
40416+ atomic_read(&port->count));
40417+ atomic_set(&port->count, 1);
40418 }
40419- if (--port->count < 0) {
40420+ if (atomic_dec_return(&port->count) < 0) {
40421 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
40422- port->count);
40423- port->count = 0;
40424+ atomic_read(&port->count));
40425+ atomic_set(&port->count, 0);
40426 }
40427
40428- if (port->count) {
40429+ if (atomic_read(&port->count)) {
40430 spin_unlock_irqrestore(&port->lock, flags);
40431 if (port->ops->drop)
40432 port->ops->drop(port);
40433@@ -500,7 +500,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
40434 {
40435 spin_lock_irq(&port->lock);
40436 if (!tty_hung_up_p(filp))
40437- ++port->count;
40438+ atomic_inc(&port->count);
40439 spin_unlock_irq(&port->lock);
40440 tty_port_tty_set(port, tty);
40441
40442diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
40443index 681765b..d3ccdf2 100644
40444--- a/drivers/tty/vt/keyboard.c
40445+++ b/drivers/tty/vt/keyboard.c
40446@@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
40447 kbd->kbdmode == VC_OFF) &&
40448 value != KVAL(K_SAK))
40449 return; /* SAK is allowed even in raw mode */
40450+
40451+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40452+ {
40453+ void *func = fn_handler[value];
40454+ if (func == fn_show_state || func == fn_show_ptregs ||
40455+ func == fn_show_mem)
40456+ return;
40457+ }
40458+#endif
40459+
40460 fn_handler[value](vc);
40461 }
40462
40463@@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
40464 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
40465 return -EFAULT;
40466
40467- if (!capable(CAP_SYS_TTY_CONFIG))
40468- perm = 0;
40469-
40470 switch (cmd) {
40471 case KDGKBENT:
40472 /* Ensure another thread doesn't free it under us */
40473@@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
40474 spin_unlock_irqrestore(&kbd_event_lock, flags);
40475 return put_user(val, &user_kbe->kb_value);
40476 case KDSKBENT:
40477+ if (!capable(CAP_SYS_TTY_CONFIG))
40478+ perm = 0;
40479+
40480 if (!perm)
40481 return -EPERM;
40482 if (!i && v == K_NOSUCHMAP) {
40483@@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
40484 int i, j, k;
40485 int ret;
40486
40487- if (!capable(CAP_SYS_TTY_CONFIG))
40488- perm = 0;
40489-
40490 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
40491 if (!kbs) {
40492 ret = -ENOMEM;
40493@@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
40494 kfree(kbs);
40495 return ((p && *p) ? -EOVERFLOW : 0);
40496 case KDSKBSENT:
40497+ if (!capable(CAP_SYS_TTY_CONFIG))
40498+ perm = 0;
40499+
40500 if (!perm) {
40501 ret = -EPERM;
40502 goto reterr;
40503diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
40504index 5110f36..8dc0a74 100644
40505--- a/drivers/uio/uio.c
40506+++ b/drivers/uio/uio.c
40507@@ -25,6 +25,7 @@
40508 #include <linux/kobject.h>
40509 #include <linux/cdev.h>
40510 #include <linux/uio_driver.h>
40511+#include <asm/local.h>
40512
40513 #define UIO_MAX_DEVICES (1U << MINORBITS)
40514
40515@@ -32,10 +33,10 @@ struct uio_device {
40516 struct module *owner;
40517 struct device *dev;
40518 int minor;
40519- atomic_t event;
40520+ atomic_unchecked_t event;
40521 struct fasync_struct *async_queue;
40522 wait_queue_head_t wait;
40523- int vma_count;
40524+ local_t vma_count;
40525 struct uio_info *info;
40526 struct kobject *map_dir;
40527 struct kobject *portio_dir;
40528@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
40529 struct device_attribute *attr, char *buf)
40530 {
40531 struct uio_device *idev = dev_get_drvdata(dev);
40532- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
40533+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
40534 }
40535
40536 static struct device_attribute uio_class_attributes[] = {
40537@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
40538 {
40539 struct uio_device *idev = info->uio_dev;
40540
40541- atomic_inc(&idev->event);
40542+ atomic_inc_unchecked(&idev->event);
40543 wake_up_interruptible(&idev->wait);
40544 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
40545 }
40546@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
40547 }
40548
40549 listener->dev = idev;
40550- listener->event_count = atomic_read(&idev->event);
40551+ listener->event_count = atomic_read_unchecked(&idev->event);
40552 filep->private_data = listener;
40553
40554 if (idev->info->open) {
40555@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
40556 return -EIO;
40557
40558 poll_wait(filep, &idev->wait, wait);
40559- if (listener->event_count != atomic_read(&idev->event))
40560+ if (listener->event_count != atomic_read_unchecked(&idev->event))
40561 return POLLIN | POLLRDNORM;
40562 return 0;
40563 }
40564@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
40565 do {
40566 set_current_state(TASK_INTERRUPTIBLE);
40567
40568- event_count = atomic_read(&idev->event);
40569+ event_count = atomic_read_unchecked(&idev->event);
40570 if (event_count != listener->event_count) {
40571 if (copy_to_user(buf, &event_count, count))
40572 retval = -EFAULT;
40573@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
40574 static void uio_vma_open(struct vm_area_struct *vma)
40575 {
40576 struct uio_device *idev = vma->vm_private_data;
40577- idev->vma_count++;
40578+ local_inc(&idev->vma_count);
40579 }
40580
40581 static void uio_vma_close(struct vm_area_struct *vma)
40582 {
40583 struct uio_device *idev = vma->vm_private_data;
40584- idev->vma_count--;
40585+ local_dec(&idev->vma_count);
40586 }
40587
40588 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40589@@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
40590 idev->owner = owner;
40591 idev->info = info;
40592 init_waitqueue_head(&idev->wait);
40593- atomic_set(&idev->event, 0);
40594+ atomic_set_unchecked(&idev->event, 0);
40595
40596 ret = uio_get_minor(idev);
40597 if (ret)
40598diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
40599index b7eb86a..36d28af 100644
40600--- a/drivers/usb/atm/cxacru.c
40601+++ b/drivers/usb/atm/cxacru.c
40602@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
40603 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
40604 if (ret < 2)
40605 return -EINVAL;
40606- if (index < 0 || index > 0x7f)
40607+ if (index > 0x7f)
40608 return -EINVAL;
40609 pos += tmp;
40610
40611diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
40612index 35f10bf..6a38a0b 100644
40613--- a/drivers/usb/atm/usbatm.c
40614+++ b/drivers/usb/atm/usbatm.c
40615@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40616 if (printk_ratelimit())
40617 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
40618 __func__, vpi, vci);
40619- atomic_inc(&vcc->stats->rx_err);
40620+ atomic_inc_unchecked(&vcc->stats->rx_err);
40621 return;
40622 }
40623
40624@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40625 if (length > ATM_MAX_AAL5_PDU) {
40626 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
40627 __func__, length, vcc);
40628- atomic_inc(&vcc->stats->rx_err);
40629+ atomic_inc_unchecked(&vcc->stats->rx_err);
40630 goto out;
40631 }
40632
40633@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40634 if (sarb->len < pdu_length) {
40635 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
40636 __func__, pdu_length, sarb->len, vcc);
40637- atomic_inc(&vcc->stats->rx_err);
40638+ atomic_inc_unchecked(&vcc->stats->rx_err);
40639 goto out;
40640 }
40641
40642 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
40643 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
40644 __func__, vcc);
40645- atomic_inc(&vcc->stats->rx_err);
40646+ atomic_inc_unchecked(&vcc->stats->rx_err);
40647 goto out;
40648 }
40649
40650@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40651 if (printk_ratelimit())
40652 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
40653 __func__, length);
40654- atomic_inc(&vcc->stats->rx_drop);
40655+ atomic_inc_unchecked(&vcc->stats->rx_drop);
40656 goto out;
40657 }
40658
40659@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40660
40661 vcc->push(vcc, skb);
40662
40663- atomic_inc(&vcc->stats->rx);
40664+ atomic_inc_unchecked(&vcc->stats->rx);
40665 out:
40666 skb_trim(sarb, 0);
40667 }
40668@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
40669 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
40670
40671 usbatm_pop(vcc, skb);
40672- atomic_inc(&vcc->stats->tx);
40673+ atomic_inc_unchecked(&vcc->stats->tx);
40674
40675 skb = skb_dequeue(&instance->sndqueue);
40676 }
40677@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
40678 if (!left--)
40679 return sprintf(page,
40680 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
40681- atomic_read(&atm_dev->stats.aal5.tx),
40682- atomic_read(&atm_dev->stats.aal5.tx_err),
40683- atomic_read(&atm_dev->stats.aal5.rx),
40684- atomic_read(&atm_dev->stats.aal5.rx_err),
40685- atomic_read(&atm_dev->stats.aal5.rx_drop));
40686+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
40687+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
40688+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
40689+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
40690+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
40691
40692 if (!left--) {
40693 if (instance->disconnected)
40694diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
40695index f460de3..95ba1f6 100644
40696--- a/drivers/usb/core/devices.c
40697+++ b/drivers/usb/core/devices.c
40698@@ -126,7 +126,7 @@ static const char format_endpt[] =
40699 * time it gets called.
40700 */
40701 static struct device_connect_event {
40702- atomic_t count;
40703+ atomic_unchecked_t count;
40704 wait_queue_head_t wait;
40705 } device_event = {
40706 .count = ATOMIC_INIT(1),
40707@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
40708
40709 void usbfs_conn_disc_event(void)
40710 {
40711- atomic_add(2, &device_event.count);
40712+ atomic_add_unchecked(2, &device_event.count);
40713 wake_up(&device_event.wait);
40714 }
40715
40716@@ -647,7 +647,7 @@ static unsigned int usb_device_poll(struct file *file,
40717
40718 poll_wait(file, &device_event.wait, wait);
40719
40720- event_count = atomic_read(&device_event.count);
40721+ event_count = atomic_read_unchecked(&device_event.count);
40722 if (file->f_version != event_count) {
40723 file->f_version = event_count;
40724 return POLLIN | POLLRDNORM;
40725diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
40726index f034716..aed0368 100644
40727--- a/drivers/usb/core/hcd.c
40728+++ b/drivers/usb/core/hcd.c
40729@@ -1478,7 +1478,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
40730 */
40731 usb_get_urb(urb);
40732 atomic_inc(&urb->use_count);
40733- atomic_inc(&urb->dev->urbnum);
40734+ atomic_inc_unchecked(&urb->dev->urbnum);
40735 usbmon_urb_submit(&hcd->self, urb);
40736
40737 /* NOTE requirements on root-hub callers (usbfs and the hub
40738@@ -1505,7 +1505,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
40739 urb->hcpriv = NULL;
40740 INIT_LIST_HEAD(&urb->urb_list);
40741 atomic_dec(&urb->use_count);
40742- atomic_dec(&urb->dev->urbnum);
40743+ atomic_dec_unchecked(&urb->dev->urbnum);
40744 if (atomic_read(&urb->reject))
40745 wake_up(&usb_kill_urb_queue);
40746 usb_put_urb(urb);
40747diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
40748index 818e4a0..0fc9589 100644
40749--- a/drivers/usb/core/sysfs.c
40750+++ b/drivers/usb/core/sysfs.c
40751@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
40752 struct usb_device *udev;
40753
40754 udev = to_usb_device(dev);
40755- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
40756+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
40757 }
40758 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
40759
40760diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
40761index cd8fb44..17fbe0c 100644
40762--- a/drivers/usb/core/usb.c
40763+++ b/drivers/usb/core/usb.c
40764@@ -397,7 +397,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
40765 set_dev_node(&dev->dev, dev_to_node(bus->controller));
40766 dev->state = USB_STATE_ATTACHED;
40767 dev->lpm_disable_count = 1;
40768- atomic_set(&dev->urbnum, 0);
40769+ atomic_set_unchecked(&dev->urbnum, 0);
40770
40771 INIT_LIST_HEAD(&dev->ep0.urb_list);
40772 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
40773diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
40774index 4bfa78a..902bfbd 100644
40775--- a/drivers/usb/early/ehci-dbgp.c
40776+++ b/drivers/usb/early/ehci-dbgp.c
40777@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
40778
40779 #ifdef CONFIG_KGDB
40780 static struct kgdb_io kgdbdbgp_io_ops;
40781-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
40782+static struct kgdb_io kgdbdbgp_io_ops_console;
40783+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
40784 #else
40785 #define dbgp_kgdb_mode (0)
40786 #endif
40787@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
40788 .write_char = kgdbdbgp_write_char,
40789 };
40790
40791+static struct kgdb_io kgdbdbgp_io_ops_console = {
40792+ .name = "kgdbdbgp",
40793+ .read_char = kgdbdbgp_read_char,
40794+ .write_char = kgdbdbgp_write_char,
40795+ .is_console = 1
40796+};
40797+
40798 static int kgdbdbgp_wait_time;
40799
40800 static int __init kgdbdbgp_parse_config(char *str)
40801@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
40802 ptr++;
40803 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
40804 }
40805- kgdb_register_io_module(&kgdbdbgp_io_ops);
40806- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
40807+ if (early_dbgp_console.index != -1)
40808+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
40809+ else
40810+ kgdb_register_io_module(&kgdbdbgp_io_ops);
40811
40812 return 0;
40813 }
40814diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
40815index f173952..83d6ec0 100644
40816--- a/drivers/usb/gadget/u_serial.c
40817+++ b/drivers/usb/gadget/u_serial.c
40818@@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
40819 spin_lock_irq(&port->port_lock);
40820
40821 /* already open? Great. */
40822- if (port->port.count) {
40823+ if (atomic_read(&port->port.count)) {
40824 status = 0;
40825- port->port.count++;
40826+ atomic_inc(&port->port.count);
40827
40828 /* currently opening/closing? wait ... */
40829 } else if (port->openclose) {
40830@@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
40831 tty->driver_data = port;
40832 port->port.tty = tty;
40833
40834- port->port.count = 1;
40835+ atomic_set(&port->port.count, 1);
40836 port->openclose = false;
40837
40838 /* if connected, start the I/O stream */
40839@@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
40840
40841 spin_lock_irq(&port->port_lock);
40842
40843- if (port->port.count != 1) {
40844- if (port->port.count == 0)
40845+ if (atomic_read(&port->port.count) != 1) {
40846+ if (atomic_read(&port->port.count) == 0)
40847 WARN_ON(1);
40848 else
40849- --port->port.count;
40850+ atomic_dec(&port->port.count);
40851 goto exit;
40852 }
40853
40854@@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
40855 * and sleep if necessary
40856 */
40857 port->openclose = true;
40858- port->port.count = 0;
40859+ atomic_set(&port->port.count, 0);
40860
40861 gser = port->port_usb;
40862 if (gser && gser->disconnect)
40863@@ -1157,7 +1157,7 @@ static int gs_closed(struct gs_port *port)
40864 int cond;
40865
40866 spin_lock_irq(&port->port_lock);
40867- cond = (port->port.count == 0) && !port->openclose;
40868+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
40869 spin_unlock_irq(&port->port_lock);
40870 return cond;
40871 }
40872@@ -1270,7 +1270,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
40873 /* if it's already open, start I/O ... and notify the serial
40874 * protocol about open/close status (connect/disconnect).
40875 */
40876- if (port->port.count) {
40877+ if (atomic_read(&port->port.count)) {
40878 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
40879 gs_start_io(port);
40880 if (gser->connect)
40881@@ -1317,7 +1317,7 @@ void gserial_disconnect(struct gserial *gser)
40882
40883 port->port_usb = NULL;
40884 gser->ioport = NULL;
40885- if (port->port.count > 0 || port->openclose) {
40886+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
40887 wake_up_interruptible(&port->drain_wait);
40888 if (port->port.tty)
40889 tty_hangup(port->port.tty);
40890@@ -1333,7 +1333,7 @@ void gserial_disconnect(struct gserial *gser)
40891
40892 /* finally, free any unused/unusable I/O buffers */
40893 spin_lock_irqsave(&port->port_lock, flags);
40894- if (port->port.count == 0 && !port->openclose)
40895+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
40896 gs_buf_free(&port->port_write_buf);
40897 gs_free_requests(gser->out, &port->read_pool, NULL);
40898 gs_free_requests(gser->out, &port->read_queue, NULL);
40899diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
40900index 5f3bcd3..bfca43f 100644
40901--- a/drivers/usb/serial/console.c
40902+++ b/drivers/usb/serial/console.c
40903@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
40904
40905 info->port = port;
40906
40907- ++port->port.count;
40908+ atomic_inc(&port->port.count);
40909 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
40910 if (serial->type->set_termios) {
40911 /*
40912@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
40913 }
40914 /* Now that any required fake tty operations are completed restore
40915 * the tty port count */
40916- --port->port.count;
40917+ atomic_dec(&port->port.count);
40918 /* The console is special in terms of closing the device so
40919 * indicate this port is now acting as a system console. */
40920 port->port.console = 1;
40921@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
40922 free_tty:
40923 kfree(tty);
40924 reset_open_count:
40925- port->port.count = 0;
40926+ atomic_set(&port->port.count, 0);
40927 usb_autopm_put_interface(serial->interface);
40928 error_get_interface:
40929 usb_serial_put(serial);
40930diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
40931index d6bea3e..60b250e 100644
40932--- a/drivers/usb/wusbcore/wa-hc.h
40933+++ b/drivers/usb/wusbcore/wa-hc.h
40934@@ -192,7 +192,7 @@ struct wahc {
40935 struct list_head xfer_delayed_list;
40936 spinlock_t xfer_list_lock;
40937 struct work_struct xfer_work;
40938- atomic_t xfer_id_count;
40939+ atomic_unchecked_t xfer_id_count;
40940 };
40941
40942
40943@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
40944 INIT_LIST_HEAD(&wa->xfer_delayed_list);
40945 spin_lock_init(&wa->xfer_list_lock);
40946 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
40947- atomic_set(&wa->xfer_id_count, 1);
40948+ atomic_set_unchecked(&wa->xfer_id_count, 1);
40949 }
40950
40951 /**
40952diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
40953index 57c01ab..8a05959 100644
40954--- a/drivers/usb/wusbcore/wa-xfer.c
40955+++ b/drivers/usb/wusbcore/wa-xfer.c
40956@@ -296,7 +296,7 @@ out:
40957 */
40958 static void wa_xfer_id_init(struct wa_xfer *xfer)
40959 {
40960- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
40961+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
40962 }
40963
40964 /*
40965diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
40966index dedaf81..b0f11ab 100644
40967--- a/drivers/vhost/vhost.c
40968+++ b/drivers/vhost/vhost.c
40969@@ -634,7 +634,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
40970 return 0;
40971 }
40972
40973-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
40974+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
40975 {
40976 struct file *eventfp, *filep = NULL;
40977 bool pollstart = false, pollstop = false;
40978diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
40979index 0fefa84..7a9d581 100644
40980--- a/drivers/video/aty/aty128fb.c
40981+++ b/drivers/video/aty/aty128fb.c
40982@@ -149,7 +149,7 @@ enum {
40983 };
40984
40985 /* Must match above enum */
40986-static char * const r128_family[] __devinitconst = {
40987+static const char * const r128_family[] __devinitconst = {
40988 "AGP",
40989 "PCI",
40990 "PRO AGP",
40991diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
40992index 5c3960d..15cf8fc 100644
40993--- a/drivers/video/fbcmap.c
40994+++ b/drivers/video/fbcmap.c
40995@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
40996 rc = -ENODEV;
40997 goto out;
40998 }
40999- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
41000- !info->fbops->fb_setcmap)) {
41001+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
41002 rc = -EINVAL;
41003 goto out1;
41004 }
41005diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
41006index 3ff0105..7589d98 100644
41007--- a/drivers/video/fbmem.c
41008+++ b/drivers/video/fbmem.c
41009@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
41010 image->dx += image->width + 8;
41011 }
41012 } else if (rotate == FB_ROTATE_UD) {
41013- for (x = 0; x < num && image->dx >= 0; x++) {
41014+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
41015 info->fbops->fb_imageblit(info, image);
41016 image->dx -= image->width + 8;
41017 }
41018@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
41019 image->dy += image->height + 8;
41020 }
41021 } else if (rotate == FB_ROTATE_CCW) {
41022- for (x = 0; x < num && image->dy >= 0; x++) {
41023+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
41024 info->fbops->fb_imageblit(info, image);
41025 image->dy -= image->height + 8;
41026 }
41027@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
41028 return -EFAULT;
41029 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
41030 return -EINVAL;
41031- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
41032+ if (con2fb.framebuffer >= FB_MAX)
41033 return -EINVAL;
41034 if (!registered_fb[con2fb.framebuffer])
41035 request_module("fb%d", con2fb.framebuffer);
41036diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
41037index 7672d2e..b56437f 100644
41038--- a/drivers/video/i810/i810_accel.c
41039+++ b/drivers/video/i810/i810_accel.c
41040@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
41041 }
41042 }
41043 printk("ringbuffer lockup!!!\n");
41044+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
41045 i810_report_error(mmio);
41046 par->dev_flags |= LOCKUP;
41047 info->pixmap.scan_align = 1;
41048diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
41049index 3c14e43..eafa544 100644
41050--- a/drivers/video/logo/logo_linux_clut224.ppm
41051+++ b/drivers/video/logo/logo_linux_clut224.ppm
41052@@ -1,1604 +1,1123 @@
41053 P3
41054-# Standard 224-color Linux logo
41055 80 80
41056 255
41057- 0 0 0 0 0 0 0 0 0 0 0 0
41058- 0 0 0 0 0 0 0 0 0 0 0 0
41059- 0 0 0 0 0 0 0 0 0 0 0 0
41060- 0 0 0 0 0 0 0 0 0 0 0 0
41061- 0 0 0 0 0 0 0 0 0 0 0 0
41062- 0 0 0 0 0 0 0 0 0 0 0 0
41063- 0 0 0 0 0 0 0 0 0 0 0 0
41064- 0 0 0 0 0 0 0 0 0 0 0 0
41065- 0 0 0 0 0 0 0 0 0 0 0 0
41066- 6 6 6 6 6 6 10 10 10 10 10 10
41067- 10 10 10 6 6 6 6 6 6 6 6 6
41068- 0 0 0 0 0 0 0 0 0 0 0 0
41069- 0 0 0 0 0 0 0 0 0 0 0 0
41070- 0 0 0 0 0 0 0 0 0 0 0 0
41071- 0 0 0 0 0 0 0 0 0 0 0 0
41072- 0 0 0 0 0 0 0 0 0 0 0 0
41073- 0 0 0 0 0 0 0 0 0 0 0 0
41074- 0 0 0 0 0 0 0 0 0 0 0 0
41075- 0 0 0 0 0 0 0 0 0 0 0 0
41076- 0 0 0 0 0 0 0 0 0 0 0 0
41077- 0 0 0 0 0 0 0 0 0 0 0 0
41078- 0 0 0 0 0 0 0 0 0 0 0 0
41079- 0 0 0 0 0 0 0 0 0 0 0 0
41080- 0 0 0 0 0 0 0 0 0 0 0 0
41081- 0 0 0 0 0 0 0 0 0 0 0 0
41082- 0 0 0 0 0 0 0 0 0 0 0 0
41083- 0 0 0 0 0 0 0 0 0 0 0 0
41084- 0 0 0 0 0 0 0 0 0 0 0 0
41085- 0 0 0 6 6 6 10 10 10 14 14 14
41086- 22 22 22 26 26 26 30 30 30 34 34 34
41087- 30 30 30 30 30 30 26 26 26 18 18 18
41088- 14 14 14 10 10 10 6 6 6 0 0 0
41089- 0 0 0 0 0 0 0 0 0 0 0 0
41090- 0 0 0 0 0 0 0 0 0 0 0 0
41091- 0 0 0 0 0 0 0 0 0 0 0 0
41092- 0 0 0 0 0 0 0 0 0 0 0 0
41093- 0 0 0 0 0 0 0 0 0 0 0 0
41094- 0 0 0 0 0 0 0 0 0 0 0 0
41095- 0 0 0 0 0 0 0 0 0 0 0 0
41096- 0 0 0 0 0 0 0 0 0 0 0 0
41097- 0 0 0 0 0 0 0 0 0 0 0 0
41098- 0 0 0 0 0 1 0 0 1 0 0 0
41099- 0 0 0 0 0 0 0 0 0 0 0 0
41100- 0 0 0 0 0 0 0 0 0 0 0 0
41101- 0 0 0 0 0 0 0 0 0 0 0 0
41102- 0 0 0 0 0 0 0 0 0 0 0 0
41103- 0 0 0 0 0 0 0 0 0 0 0 0
41104- 0 0 0 0 0 0 0 0 0 0 0 0
41105- 6 6 6 14 14 14 26 26 26 42 42 42
41106- 54 54 54 66 66 66 78 78 78 78 78 78
41107- 78 78 78 74 74 74 66 66 66 54 54 54
41108- 42 42 42 26 26 26 18 18 18 10 10 10
41109- 6 6 6 0 0 0 0 0 0 0 0 0
41110- 0 0 0 0 0 0 0 0 0 0 0 0
41111- 0 0 0 0 0 0 0 0 0 0 0 0
41112- 0 0 0 0 0 0 0 0 0 0 0 0
41113- 0 0 0 0 0 0 0 0 0 0 0 0
41114- 0 0 0 0 0 0 0 0 0 0 0 0
41115- 0 0 0 0 0 0 0 0 0 0 0 0
41116- 0 0 0 0 0 0 0 0 0 0 0 0
41117- 0 0 0 0 0 0 0 0 0 0 0 0
41118- 0 0 1 0 0 0 0 0 0 0 0 0
41119- 0 0 0 0 0 0 0 0 0 0 0 0
41120- 0 0 0 0 0 0 0 0 0 0 0 0
41121- 0 0 0 0 0 0 0 0 0 0 0 0
41122- 0 0 0 0 0 0 0 0 0 0 0 0
41123- 0 0 0 0 0 0 0 0 0 0 0 0
41124- 0 0 0 0 0 0 0 0 0 10 10 10
41125- 22 22 22 42 42 42 66 66 66 86 86 86
41126- 66 66 66 38 38 38 38 38 38 22 22 22
41127- 26 26 26 34 34 34 54 54 54 66 66 66
41128- 86 86 86 70 70 70 46 46 46 26 26 26
41129- 14 14 14 6 6 6 0 0 0 0 0 0
41130- 0 0 0 0 0 0 0 0 0 0 0 0
41131- 0 0 0 0 0 0 0 0 0 0 0 0
41132- 0 0 0 0 0 0 0 0 0 0 0 0
41133- 0 0 0 0 0 0 0 0 0 0 0 0
41134- 0 0 0 0 0 0 0 0 0 0 0 0
41135- 0 0 0 0 0 0 0 0 0 0 0 0
41136- 0 0 0 0 0 0 0 0 0 0 0 0
41137- 0 0 0 0 0 0 0 0 0 0 0 0
41138- 0 0 1 0 0 1 0 0 1 0 0 0
41139- 0 0 0 0 0 0 0 0 0 0 0 0
41140- 0 0 0 0 0 0 0 0 0 0 0 0
41141- 0 0 0 0 0 0 0 0 0 0 0 0
41142- 0 0 0 0 0 0 0 0 0 0 0 0
41143- 0 0 0 0 0 0 0 0 0 0 0 0
41144- 0 0 0 0 0 0 10 10 10 26 26 26
41145- 50 50 50 82 82 82 58 58 58 6 6 6
41146- 2 2 6 2 2 6 2 2 6 2 2 6
41147- 2 2 6 2 2 6 2 2 6 2 2 6
41148- 6 6 6 54 54 54 86 86 86 66 66 66
41149- 38 38 38 18 18 18 6 6 6 0 0 0
41150- 0 0 0 0 0 0 0 0 0 0 0 0
41151- 0 0 0 0 0 0 0 0 0 0 0 0
41152- 0 0 0 0 0 0 0 0 0 0 0 0
41153- 0 0 0 0 0 0 0 0 0 0 0 0
41154- 0 0 0 0 0 0 0 0 0 0 0 0
41155- 0 0 0 0 0 0 0 0 0 0 0 0
41156- 0 0 0 0 0 0 0 0 0 0 0 0
41157- 0 0 0 0 0 0 0 0 0 0 0 0
41158- 0 0 0 0 0 0 0 0 0 0 0 0
41159- 0 0 0 0 0 0 0 0 0 0 0 0
41160- 0 0 0 0 0 0 0 0 0 0 0 0
41161- 0 0 0 0 0 0 0 0 0 0 0 0
41162- 0 0 0 0 0 0 0 0 0 0 0 0
41163- 0 0 0 0 0 0 0 0 0 0 0 0
41164- 0 0 0 6 6 6 22 22 22 50 50 50
41165- 78 78 78 34 34 34 2 2 6 2 2 6
41166- 2 2 6 2 2 6 2 2 6 2 2 6
41167- 2 2 6 2 2 6 2 2 6 2 2 6
41168- 2 2 6 2 2 6 6 6 6 70 70 70
41169- 78 78 78 46 46 46 22 22 22 6 6 6
41170- 0 0 0 0 0 0 0 0 0 0 0 0
41171- 0 0 0 0 0 0 0 0 0 0 0 0
41172- 0 0 0 0 0 0 0 0 0 0 0 0
41173- 0 0 0 0 0 0 0 0 0 0 0 0
41174- 0 0 0 0 0 0 0 0 0 0 0 0
41175- 0 0 0 0 0 0 0 0 0 0 0 0
41176- 0 0 0 0 0 0 0 0 0 0 0 0
41177- 0 0 0 0 0 0 0 0 0 0 0 0
41178- 0 0 1 0 0 1 0 0 1 0 0 0
41179- 0 0 0 0 0 0 0 0 0 0 0 0
41180- 0 0 0 0 0 0 0 0 0 0 0 0
41181- 0 0 0 0 0 0 0 0 0 0 0 0
41182- 0 0 0 0 0 0 0 0 0 0 0 0
41183- 0 0 0 0 0 0 0 0 0 0 0 0
41184- 6 6 6 18 18 18 42 42 42 82 82 82
41185- 26 26 26 2 2 6 2 2 6 2 2 6
41186- 2 2 6 2 2 6 2 2 6 2 2 6
41187- 2 2 6 2 2 6 2 2 6 14 14 14
41188- 46 46 46 34 34 34 6 6 6 2 2 6
41189- 42 42 42 78 78 78 42 42 42 18 18 18
41190- 6 6 6 0 0 0 0 0 0 0 0 0
41191- 0 0 0 0 0 0 0 0 0 0 0 0
41192- 0 0 0 0 0 0 0 0 0 0 0 0
41193- 0 0 0 0 0 0 0 0 0 0 0 0
41194- 0 0 0 0 0 0 0 0 0 0 0 0
41195- 0 0 0 0 0 0 0 0 0 0 0 0
41196- 0 0 0 0 0 0 0 0 0 0 0 0
41197- 0 0 0 0 0 0 0 0 0 0 0 0
41198- 0 0 1 0 0 0 0 0 1 0 0 0
41199- 0 0 0 0 0 0 0 0 0 0 0 0
41200- 0 0 0 0 0 0 0 0 0 0 0 0
41201- 0 0 0 0 0 0 0 0 0 0 0 0
41202- 0 0 0 0 0 0 0 0 0 0 0 0
41203- 0 0 0 0 0 0 0 0 0 0 0 0
41204- 10 10 10 30 30 30 66 66 66 58 58 58
41205- 2 2 6 2 2 6 2 2 6 2 2 6
41206- 2 2 6 2 2 6 2 2 6 2 2 6
41207- 2 2 6 2 2 6 2 2 6 26 26 26
41208- 86 86 86 101 101 101 46 46 46 10 10 10
41209- 2 2 6 58 58 58 70 70 70 34 34 34
41210- 10 10 10 0 0 0 0 0 0 0 0 0
41211- 0 0 0 0 0 0 0 0 0 0 0 0
41212- 0 0 0 0 0 0 0 0 0 0 0 0
41213- 0 0 0 0 0 0 0 0 0 0 0 0
41214- 0 0 0 0 0 0 0 0 0 0 0 0
41215- 0 0 0 0 0 0 0 0 0 0 0 0
41216- 0 0 0 0 0 0 0 0 0 0 0 0
41217- 0 0 0 0 0 0 0 0 0 0 0 0
41218- 0 0 1 0 0 1 0 0 1 0 0 0
41219- 0 0 0 0 0 0 0 0 0 0 0 0
41220- 0 0 0 0 0 0 0 0 0 0 0 0
41221- 0 0 0 0 0 0 0 0 0 0 0 0
41222- 0 0 0 0 0 0 0 0 0 0 0 0
41223- 0 0 0 0 0 0 0 0 0 0 0 0
41224- 14 14 14 42 42 42 86 86 86 10 10 10
41225- 2 2 6 2 2 6 2 2 6 2 2 6
41226- 2 2 6 2 2 6 2 2 6 2 2 6
41227- 2 2 6 2 2 6 2 2 6 30 30 30
41228- 94 94 94 94 94 94 58 58 58 26 26 26
41229- 2 2 6 6 6 6 78 78 78 54 54 54
41230- 22 22 22 6 6 6 0 0 0 0 0 0
41231- 0 0 0 0 0 0 0 0 0 0 0 0
41232- 0 0 0 0 0 0 0 0 0 0 0 0
41233- 0 0 0 0 0 0 0 0 0 0 0 0
41234- 0 0 0 0 0 0 0 0 0 0 0 0
41235- 0 0 0 0 0 0 0 0 0 0 0 0
41236- 0 0 0 0 0 0 0 0 0 0 0 0
41237- 0 0 0 0 0 0 0 0 0 0 0 0
41238- 0 0 0 0 0 0 0 0 0 0 0 0
41239- 0 0 0 0 0 0 0 0 0 0 0 0
41240- 0 0 0 0 0 0 0 0 0 0 0 0
41241- 0 0 0 0 0 0 0 0 0 0 0 0
41242- 0 0 0 0 0 0 0 0 0 0 0 0
41243- 0 0 0 0 0 0 0 0 0 6 6 6
41244- 22 22 22 62 62 62 62 62 62 2 2 6
41245- 2 2 6 2 2 6 2 2 6 2 2 6
41246- 2 2 6 2 2 6 2 2 6 2 2 6
41247- 2 2 6 2 2 6 2 2 6 26 26 26
41248- 54 54 54 38 38 38 18 18 18 10 10 10
41249- 2 2 6 2 2 6 34 34 34 82 82 82
41250- 38 38 38 14 14 14 0 0 0 0 0 0
41251- 0 0 0 0 0 0 0 0 0 0 0 0
41252- 0 0 0 0 0 0 0 0 0 0 0 0
41253- 0 0 0 0 0 0 0 0 0 0 0 0
41254- 0 0 0 0 0 0 0 0 0 0 0 0
41255- 0 0 0 0 0 0 0 0 0 0 0 0
41256- 0 0 0 0 0 0 0 0 0 0 0 0
41257- 0 0 0 0 0 0 0 0 0 0 0 0
41258- 0 0 0 0 0 1 0 0 1 0 0 0
41259- 0 0 0 0 0 0 0 0 0 0 0 0
41260- 0 0 0 0 0 0 0 0 0 0 0 0
41261- 0 0 0 0 0 0 0 0 0 0 0 0
41262- 0 0 0 0 0 0 0 0 0 0 0 0
41263- 0 0 0 0 0 0 0 0 0 6 6 6
41264- 30 30 30 78 78 78 30 30 30 2 2 6
41265- 2 2 6 2 2 6 2 2 6 2 2 6
41266- 2 2 6 2 2 6 2 2 6 2 2 6
41267- 2 2 6 2 2 6 2 2 6 10 10 10
41268- 10 10 10 2 2 6 2 2 6 2 2 6
41269- 2 2 6 2 2 6 2 2 6 78 78 78
41270- 50 50 50 18 18 18 6 6 6 0 0 0
41271- 0 0 0 0 0 0 0 0 0 0 0 0
41272- 0 0 0 0 0 0 0 0 0 0 0 0
41273- 0 0 0 0 0 0 0 0 0 0 0 0
41274- 0 0 0 0 0 0 0 0 0 0 0 0
41275- 0 0 0 0 0 0 0 0 0 0 0 0
41276- 0 0 0 0 0 0 0 0 0 0 0 0
41277- 0 0 0 0 0 0 0 0 0 0 0 0
41278- 0 0 1 0 0 0 0 0 0 0 0 0
41279- 0 0 0 0 0 0 0 0 0 0 0 0
41280- 0 0 0 0 0 0 0 0 0 0 0 0
41281- 0 0 0 0 0 0 0 0 0 0 0 0
41282- 0 0 0 0 0 0 0 0 0 0 0 0
41283- 0 0 0 0 0 0 0 0 0 10 10 10
41284- 38 38 38 86 86 86 14 14 14 2 2 6
41285- 2 2 6 2 2 6 2 2 6 2 2 6
41286- 2 2 6 2 2 6 2 2 6 2 2 6
41287- 2 2 6 2 2 6 2 2 6 2 2 6
41288- 2 2 6 2 2 6 2 2 6 2 2 6
41289- 2 2 6 2 2 6 2 2 6 54 54 54
41290- 66 66 66 26 26 26 6 6 6 0 0 0
41291- 0 0 0 0 0 0 0 0 0 0 0 0
41292- 0 0 0 0 0 0 0 0 0 0 0 0
41293- 0 0 0 0 0 0 0 0 0 0 0 0
41294- 0 0 0 0 0 0 0 0 0 0 0 0
41295- 0 0 0 0 0 0 0 0 0 0 0 0
41296- 0 0 0 0 0 0 0 0 0 0 0 0
41297- 0 0 0 0 0 0 0 0 0 0 0 0
41298- 0 0 0 0 0 1 0 0 1 0 0 0
41299- 0 0 0 0 0 0 0 0 0 0 0 0
41300- 0 0 0 0 0 0 0 0 0 0 0 0
41301- 0 0 0 0 0 0 0 0 0 0 0 0
41302- 0 0 0 0 0 0 0 0 0 0 0 0
41303- 0 0 0 0 0 0 0 0 0 14 14 14
41304- 42 42 42 82 82 82 2 2 6 2 2 6
41305- 2 2 6 6 6 6 10 10 10 2 2 6
41306- 2 2 6 2 2 6 2 2 6 2 2 6
41307- 2 2 6 2 2 6 2 2 6 6 6 6
41308- 14 14 14 10 10 10 2 2 6 2 2 6
41309- 2 2 6 2 2 6 2 2 6 18 18 18
41310- 82 82 82 34 34 34 10 10 10 0 0 0
41311- 0 0 0 0 0 0 0 0 0 0 0 0
41312- 0 0 0 0 0 0 0 0 0 0 0 0
41313- 0 0 0 0 0 0 0 0 0 0 0 0
41314- 0 0 0 0 0 0 0 0 0 0 0 0
41315- 0 0 0 0 0 0 0 0 0 0 0 0
41316- 0 0 0 0 0 0 0 0 0 0 0 0
41317- 0 0 0 0 0 0 0 0 0 0 0 0
41318- 0 0 1 0 0 0 0 0 0 0 0 0
41319- 0 0 0 0 0 0 0 0 0 0 0 0
41320- 0 0 0 0 0 0 0 0 0 0 0 0
41321- 0 0 0 0 0 0 0 0 0 0 0 0
41322- 0 0 0 0 0 0 0 0 0 0 0 0
41323- 0 0 0 0 0 0 0 0 0 14 14 14
41324- 46 46 46 86 86 86 2 2 6 2 2 6
41325- 6 6 6 6 6 6 22 22 22 34 34 34
41326- 6 6 6 2 2 6 2 2 6 2 2 6
41327- 2 2 6 2 2 6 18 18 18 34 34 34
41328- 10 10 10 50 50 50 22 22 22 2 2 6
41329- 2 2 6 2 2 6 2 2 6 10 10 10
41330- 86 86 86 42 42 42 14 14 14 0 0 0
41331- 0 0 0 0 0 0 0 0 0 0 0 0
41332- 0 0 0 0 0 0 0 0 0 0 0 0
41333- 0 0 0 0 0 0 0 0 0 0 0 0
41334- 0 0 0 0 0 0 0 0 0 0 0 0
41335- 0 0 0 0 0 0 0 0 0 0 0 0
41336- 0 0 0 0 0 0 0 0 0 0 0 0
41337- 0 0 0 0 0 0 0 0 0 0 0 0
41338- 0 0 1 0 0 1 0 0 1 0 0 0
41339- 0 0 0 0 0 0 0 0 0 0 0 0
41340- 0 0 0 0 0 0 0 0 0 0 0 0
41341- 0 0 0 0 0 0 0 0 0 0 0 0
41342- 0 0 0 0 0 0 0 0 0 0 0 0
41343- 0 0 0 0 0 0 0 0 0 14 14 14
41344- 46 46 46 86 86 86 2 2 6 2 2 6
41345- 38 38 38 116 116 116 94 94 94 22 22 22
41346- 22 22 22 2 2 6 2 2 6 2 2 6
41347- 14 14 14 86 86 86 138 138 138 162 162 162
41348-154 154 154 38 38 38 26 26 26 6 6 6
41349- 2 2 6 2 2 6 2 2 6 2 2 6
41350- 86 86 86 46 46 46 14 14 14 0 0 0
41351- 0 0 0 0 0 0 0 0 0 0 0 0
41352- 0 0 0 0 0 0 0 0 0 0 0 0
41353- 0 0 0 0 0 0 0 0 0 0 0 0
41354- 0 0 0 0 0 0 0 0 0 0 0 0
41355- 0 0 0 0 0 0 0 0 0 0 0 0
41356- 0 0 0 0 0 0 0 0 0 0 0 0
41357- 0 0 0 0 0 0 0 0 0 0 0 0
41358- 0 0 0 0 0 0 0 0 0 0 0 0
41359- 0 0 0 0 0 0 0 0 0 0 0 0
41360- 0 0 0 0 0 0 0 0 0 0 0 0
41361- 0 0 0 0 0 0 0 0 0 0 0 0
41362- 0 0 0 0 0 0 0 0 0 0 0 0
41363- 0 0 0 0 0 0 0 0 0 14 14 14
41364- 46 46 46 86 86 86 2 2 6 14 14 14
41365-134 134 134 198 198 198 195 195 195 116 116 116
41366- 10 10 10 2 2 6 2 2 6 6 6 6
41367-101 98 89 187 187 187 210 210 210 218 218 218
41368-214 214 214 134 134 134 14 14 14 6 6 6
41369- 2 2 6 2 2 6 2 2 6 2 2 6
41370- 86 86 86 50 50 50 18 18 18 6 6 6
41371- 0 0 0 0 0 0 0 0 0 0 0 0
41372- 0 0 0 0 0 0 0 0 0 0 0 0
41373- 0 0 0 0 0 0 0 0 0 0 0 0
41374- 0 0 0 0 0 0 0 0 0 0 0 0
41375- 0 0 0 0 0 0 0 0 0 0 0 0
41376- 0 0 0 0 0 0 0 0 0 0 0 0
41377- 0 0 0 0 0 0 0 0 1 0 0 0
41378- 0 0 1 0 0 1 0 0 1 0 0 0
41379- 0 0 0 0 0 0 0 0 0 0 0 0
41380- 0 0 0 0 0 0 0 0 0 0 0 0
41381- 0 0 0 0 0 0 0 0 0 0 0 0
41382- 0 0 0 0 0 0 0 0 0 0 0 0
41383- 0 0 0 0 0 0 0 0 0 14 14 14
41384- 46 46 46 86 86 86 2 2 6 54 54 54
41385-218 218 218 195 195 195 226 226 226 246 246 246
41386- 58 58 58 2 2 6 2 2 6 30 30 30
41387-210 210 210 253 253 253 174 174 174 123 123 123
41388-221 221 221 234 234 234 74 74 74 2 2 6
41389- 2 2 6 2 2 6 2 2 6 2 2 6
41390- 70 70 70 58 58 58 22 22 22 6 6 6
41391- 0 0 0 0 0 0 0 0 0 0 0 0
41392- 0 0 0 0 0 0 0 0 0 0 0 0
41393- 0 0 0 0 0 0 0 0 0 0 0 0
41394- 0 0 0 0 0 0 0 0 0 0 0 0
41395- 0 0 0 0 0 0 0 0 0 0 0 0
41396- 0 0 0 0 0 0 0 0 0 0 0 0
41397- 0 0 0 0 0 0 0 0 0 0 0 0
41398- 0 0 0 0 0 0 0 0 0 0 0 0
41399- 0 0 0 0 0 0 0 0 0 0 0 0
41400- 0 0 0 0 0 0 0 0 0 0 0 0
41401- 0 0 0 0 0 0 0 0 0 0 0 0
41402- 0 0 0 0 0 0 0 0 0 0 0 0
41403- 0 0 0 0 0 0 0 0 0 14 14 14
41404- 46 46 46 82 82 82 2 2 6 106 106 106
41405-170 170 170 26 26 26 86 86 86 226 226 226
41406-123 123 123 10 10 10 14 14 14 46 46 46
41407-231 231 231 190 190 190 6 6 6 70 70 70
41408- 90 90 90 238 238 238 158 158 158 2 2 6
41409- 2 2 6 2 2 6 2 2 6 2 2 6
41410- 70 70 70 58 58 58 22 22 22 6 6 6
41411- 0 0 0 0 0 0 0 0 0 0 0 0
41412- 0 0 0 0 0 0 0 0 0 0 0 0
41413- 0 0 0 0 0 0 0 0 0 0 0 0
41414- 0 0 0 0 0 0 0 0 0 0 0 0
41415- 0 0 0 0 0 0 0 0 0 0 0 0
41416- 0 0 0 0 0 0 0 0 0 0 0 0
41417- 0 0 0 0 0 0 0 0 1 0 0 0
41418- 0 0 1 0 0 1 0 0 1 0 0 0
41419- 0 0 0 0 0 0 0 0 0 0 0 0
41420- 0 0 0 0 0 0 0 0 0 0 0 0
41421- 0 0 0 0 0 0 0 0 0 0 0 0
41422- 0 0 0 0 0 0 0 0 0 0 0 0
41423- 0 0 0 0 0 0 0 0 0 14 14 14
41424- 42 42 42 86 86 86 6 6 6 116 116 116
41425-106 106 106 6 6 6 70 70 70 149 149 149
41426-128 128 128 18 18 18 38 38 38 54 54 54
41427-221 221 221 106 106 106 2 2 6 14 14 14
41428- 46 46 46 190 190 190 198 198 198 2 2 6
41429- 2 2 6 2 2 6 2 2 6 2 2 6
41430- 74 74 74 62 62 62 22 22 22 6 6 6
41431- 0 0 0 0 0 0 0 0 0 0 0 0
41432- 0 0 0 0 0 0 0 0 0 0 0 0
41433- 0 0 0 0 0 0 0 0 0 0 0 0
41434- 0 0 0 0 0 0 0 0 0 0 0 0
41435- 0 0 0 0 0 0 0 0 0 0 0 0
41436- 0 0 0 0 0 0 0 0 0 0 0 0
41437- 0 0 0 0 0 0 0 0 1 0 0 0
41438- 0 0 1 0 0 0 0 0 1 0 0 0
41439- 0 0 0 0 0 0 0 0 0 0 0 0
41440- 0 0 0 0 0 0 0 0 0 0 0 0
41441- 0 0 0 0 0 0 0 0 0 0 0 0
41442- 0 0 0 0 0 0 0 0 0 0 0 0
41443- 0 0 0 0 0 0 0 0 0 14 14 14
41444- 42 42 42 94 94 94 14 14 14 101 101 101
41445-128 128 128 2 2 6 18 18 18 116 116 116
41446-118 98 46 121 92 8 121 92 8 98 78 10
41447-162 162 162 106 106 106 2 2 6 2 2 6
41448- 2 2 6 195 195 195 195 195 195 6 6 6
41449- 2 2 6 2 2 6 2 2 6 2 2 6
41450- 74 74 74 62 62 62 22 22 22 6 6 6
41451- 0 0 0 0 0 0 0 0 0 0 0 0
41452- 0 0 0 0 0 0 0 0 0 0 0 0
41453- 0 0 0 0 0 0 0 0 0 0 0 0
41454- 0 0 0 0 0 0 0 0 0 0 0 0
41455- 0 0 0 0 0 0 0 0 0 0 0 0
41456- 0 0 0 0 0 0 0 0 0 0 0 0
41457- 0 0 0 0 0 0 0 0 1 0 0 1
41458- 0 0 1 0 0 0 0 0 1 0 0 0
41459- 0 0 0 0 0 0 0 0 0 0 0 0
41460- 0 0 0 0 0 0 0 0 0 0 0 0
41461- 0 0 0 0 0 0 0 0 0 0 0 0
41462- 0 0 0 0 0 0 0 0 0 0 0 0
41463- 0 0 0 0 0 0 0 0 0 10 10 10
41464- 38 38 38 90 90 90 14 14 14 58 58 58
41465-210 210 210 26 26 26 54 38 6 154 114 10
41466-226 170 11 236 186 11 225 175 15 184 144 12
41467-215 174 15 175 146 61 37 26 9 2 2 6
41468- 70 70 70 246 246 246 138 138 138 2 2 6
41469- 2 2 6 2 2 6 2 2 6 2 2 6
41470- 70 70 70 66 66 66 26 26 26 6 6 6
41471- 0 0 0 0 0 0 0 0 0 0 0 0
41472- 0 0 0 0 0 0 0 0 0 0 0 0
41473- 0 0 0 0 0 0 0 0 0 0 0 0
41474- 0 0 0 0 0 0 0 0 0 0 0 0
41475- 0 0 0 0 0 0 0 0 0 0 0 0
41476- 0 0 0 0 0 0 0 0 0 0 0 0
41477- 0 0 0 0 0 0 0 0 0 0 0 0
41478- 0 0 0 0 0 0 0 0 0 0 0 0
41479- 0 0 0 0 0 0 0 0 0 0 0 0
41480- 0 0 0 0 0 0 0 0 0 0 0 0
41481- 0 0 0 0 0 0 0 0 0 0 0 0
41482- 0 0 0 0 0 0 0 0 0 0 0 0
41483- 0 0 0 0 0 0 0 0 0 10 10 10
41484- 38 38 38 86 86 86 14 14 14 10 10 10
41485-195 195 195 188 164 115 192 133 9 225 175 15
41486-239 182 13 234 190 10 232 195 16 232 200 30
41487-245 207 45 241 208 19 232 195 16 184 144 12
41488-218 194 134 211 206 186 42 42 42 2 2 6
41489- 2 2 6 2 2 6 2 2 6 2 2 6
41490- 50 50 50 74 74 74 30 30 30 6 6 6
41491- 0 0 0 0 0 0 0 0 0 0 0 0
41492- 0 0 0 0 0 0 0 0 0 0 0 0
41493- 0 0 0 0 0 0 0 0 0 0 0 0
41494- 0 0 0 0 0 0 0 0 0 0 0 0
41495- 0 0 0 0 0 0 0 0 0 0 0 0
41496- 0 0 0 0 0 0 0 0 0 0 0 0
41497- 0 0 0 0 0 0 0 0 0 0 0 0
41498- 0 0 0 0 0 0 0 0 0 0 0 0
41499- 0 0 0 0 0 0 0 0 0 0 0 0
41500- 0 0 0 0 0 0 0 0 0 0 0 0
41501- 0 0 0 0 0 0 0 0 0 0 0 0
41502- 0 0 0 0 0 0 0 0 0 0 0 0
41503- 0 0 0 0 0 0 0 0 0 10 10 10
41504- 34 34 34 86 86 86 14 14 14 2 2 6
41505-121 87 25 192 133 9 219 162 10 239 182 13
41506-236 186 11 232 195 16 241 208 19 244 214 54
41507-246 218 60 246 218 38 246 215 20 241 208 19
41508-241 208 19 226 184 13 121 87 25 2 2 6
41509- 2 2 6 2 2 6 2 2 6 2 2 6
41510- 50 50 50 82 82 82 34 34 34 10 10 10
41511- 0 0 0 0 0 0 0 0 0 0 0 0
41512- 0 0 0 0 0 0 0 0 0 0 0 0
41513- 0 0 0 0 0 0 0 0 0 0 0 0
41514- 0 0 0 0 0 0 0 0 0 0 0 0
41515- 0 0 0 0 0 0 0 0 0 0 0 0
41516- 0 0 0 0 0 0 0 0 0 0 0 0
41517- 0 0 0 0 0 0 0 0 0 0 0 0
41518- 0 0 0 0 0 0 0 0 0 0 0 0
41519- 0 0 0 0 0 0 0 0 0 0 0 0
41520- 0 0 0 0 0 0 0 0 0 0 0 0
41521- 0 0 0 0 0 0 0 0 0 0 0 0
41522- 0 0 0 0 0 0 0 0 0 0 0 0
41523- 0 0 0 0 0 0 0 0 0 10 10 10
41524- 34 34 34 82 82 82 30 30 30 61 42 6
41525-180 123 7 206 145 10 230 174 11 239 182 13
41526-234 190 10 238 202 15 241 208 19 246 218 74
41527-246 218 38 246 215 20 246 215 20 246 215 20
41528-226 184 13 215 174 15 184 144 12 6 6 6
41529- 2 2 6 2 2 6 2 2 6 2 2 6
41530- 26 26 26 94 94 94 42 42 42 14 14 14
41531- 0 0 0 0 0 0 0 0 0 0 0 0
41532- 0 0 0 0 0 0 0 0 0 0 0 0
41533- 0 0 0 0 0 0 0 0 0 0 0 0
41534- 0 0 0 0 0 0 0 0 0 0 0 0
41535- 0 0 0 0 0 0 0 0 0 0 0 0
41536- 0 0 0 0 0 0 0 0 0 0 0 0
41537- 0 0 0 0 0 0 0 0 0 0 0 0
41538- 0 0 0 0 0 0 0 0 0 0 0 0
41539- 0 0 0 0 0 0 0 0 0 0 0 0
41540- 0 0 0 0 0 0 0 0 0 0 0 0
41541- 0 0 0 0 0 0 0 0 0 0 0 0
41542- 0 0 0 0 0 0 0 0 0 0 0 0
41543- 0 0 0 0 0 0 0 0 0 10 10 10
41544- 30 30 30 78 78 78 50 50 50 104 69 6
41545-192 133 9 216 158 10 236 178 12 236 186 11
41546-232 195 16 241 208 19 244 214 54 245 215 43
41547-246 215 20 246 215 20 241 208 19 198 155 10
41548-200 144 11 216 158 10 156 118 10 2 2 6
41549- 2 2 6 2 2 6 2 2 6 2 2 6
41550- 6 6 6 90 90 90 54 54 54 18 18 18
41551- 6 6 6 0 0 0 0 0 0 0 0 0
41552- 0 0 0 0 0 0 0 0 0 0 0 0
41553- 0 0 0 0 0 0 0 0 0 0 0 0
41554- 0 0 0 0 0 0 0 0 0 0 0 0
41555- 0 0 0 0 0 0 0 0 0 0 0 0
41556- 0 0 0 0 0 0 0 0 0 0 0 0
41557- 0 0 0 0 0 0 0 0 0 0 0 0
41558- 0 0 0 0 0 0 0 0 0 0 0 0
41559- 0 0 0 0 0 0 0 0 0 0 0 0
41560- 0 0 0 0 0 0 0 0 0 0 0 0
41561- 0 0 0 0 0 0 0 0 0 0 0 0
41562- 0 0 0 0 0 0 0 0 0 0 0 0
41563- 0 0 0 0 0 0 0 0 0 10 10 10
41564- 30 30 30 78 78 78 46 46 46 22 22 22
41565-137 92 6 210 162 10 239 182 13 238 190 10
41566-238 202 15 241 208 19 246 215 20 246 215 20
41567-241 208 19 203 166 17 185 133 11 210 150 10
41568-216 158 10 210 150 10 102 78 10 2 2 6
41569- 6 6 6 54 54 54 14 14 14 2 2 6
41570- 2 2 6 62 62 62 74 74 74 30 30 30
41571- 10 10 10 0 0 0 0 0 0 0 0 0
41572- 0 0 0 0 0 0 0 0 0 0 0 0
41573- 0 0 0 0 0 0 0 0 0 0 0 0
41574- 0 0 0 0 0 0 0 0 0 0 0 0
41575- 0 0 0 0 0 0 0 0 0 0 0 0
41576- 0 0 0 0 0 0 0 0 0 0 0 0
41577- 0 0 0 0 0 0 0 0 0 0 0 0
41578- 0 0 0 0 0 0 0 0 0 0 0 0
41579- 0 0 0 0 0 0 0 0 0 0 0 0
41580- 0 0 0 0 0 0 0 0 0 0 0 0
41581- 0 0 0 0 0 0 0 0 0 0 0 0
41582- 0 0 0 0 0 0 0 0 0 0 0 0
41583- 0 0 0 0 0 0 0 0 0 10 10 10
41584- 34 34 34 78 78 78 50 50 50 6 6 6
41585- 94 70 30 139 102 15 190 146 13 226 184 13
41586-232 200 30 232 195 16 215 174 15 190 146 13
41587-168 122 10 192 133 9 210 150 10 213 154 11
41588-202 150 34 182 157 106 101 98 89 2 2 6
41589- 2 2 6 78 78 78 116 116 116 58 58 58
41590- 2 2 6 22 22 22 90 90 90 46 46 46
41591- 18 18 18 6 6 6 0 0 0 0 0 0
41592- 0 0 0 0 0 0 0 0 0 0 0 0
41593- 0 0 0 0 0 0 0 0 0 0 0 0
41594- 0 0 0 0 0 0 0 0 0 0 0 0
41595- 0 0 0 0 0 0 0 0 0 0 0 0
41596- 0 0 0 0 0 0 0 0 0 0 0 0
41597- 0 0 0 0 0 0 0 0 0 0 0 0
41598- 0 0 0 0 0 0 0 0 0 0 0 0
41599- 0 0 0 0 0 0 0 0 0 0 0 0
41600- 0 0 0 0 0 0 0 0 0 0 0 0
41601- 0 0 0 0 0 0 0 0 0 0 0 0
41602- 0 0 0 0 0 0 0 0 0 0 0 0
41603- 0 0 0 0 0 0 0 0 0 10 10 10
41604- 38 38 38 86 86 86 50 50 50 6 6 6
41605-128 128 128 174 154 114 156 107 11 168 122 10
41606-198 155 10 184 144 12 197 138 11 200 144 11
41607-206 145 10 206 145 10 197 138 11 188 164 115
41608-195 195 195 198 198 198 174 174 174 14 14 14
41609- 2 2 6 22 22 22 116 116 116 116 116 116
41610- 22 22 22 2 2 6 74 74 74 70 70 70
41611- 30 30 30 10 10 10 0 0 0 0 0 0
41612- 0 0 0 0 0 0 0 0 0 0 0 0
41613- 0 0 0 0 0 0 0 0 0 0 0 0
41614- 0 0 0 0 0 0 0 0 0 0 0 0
41615- 0 0 0 0 0 0 0 0 0 0 0 0
41616- 0 0 0 0 0 0 0 0 0 0 0 0
41617- 0 0 0 0 0 0 0 0 0 0 0 0
41618- 0 0 0 0 0 0 0 0 0 0 0 0
41619- 0 0 0 0 0 0 0 0 0 0 0 0
41620- 0 0 0 0 0 0 0 0 0 0 0 0
41621- 0 0 0 0 0 0 0 0 0 0 0 0
41622- 0 0 0 0 0 0 0 0 0 0 0 0
41623- 0 0 0 0 0 0 6 6 6 18 18 18
41624- 50 50 50 101 101 101 26 26 26 10 10 10
41625-138 138 138 190 190 190 174 154 114 156 107 11
41626-197 138 11 200 144 11 197 138 11 192 133 9
41627-180 123 7 190 142 34 190 178 144 187 187 187
41628-202 202 202 221 221 221 214 214 214 66 66 66
41629- 2 2 6 2 2 6 50 50 50 62 62 62
41630- 6 6 6 2 2 6 10 10 10 90 90 90
41631- 50 50 50 18 18 18 6 6 6 0 0 0
41632- 0 0 0 0 0 0 0 0 0 0 0 0
41633- 0 0 0 0 0 0 0 0 0 0 0 0
41634- 0 0 0 0 0 0 0 0 0 0 0 0
41635- 0 0 0 0 0 0 0 0 0 0 0 0
41636- 0 0 0 0 0 0 0 0 0 0 0 0
41637- 0 0 0 0 0 0 0 0 0 0 0 0
41638- 0 0 0 0 0 0 0 0 0 0 0 0
41639- 0 0 0 0 0 0 0 0 0 0 0 0
41640- 0 0 0 0 0 0 0 0 0 0 0 0
41641- 0 0 0 0 0 0 0 0 0 0 0 0
41642- 0 0 0 0 0 0 0 0 0 0 0 0
41643- 0 0 0 0 0 0 10 10 10 34 34 34
41644- 74 74 74 74 74 74 2 2 6 6 6 6
41645-144 144 144 198 198 198 190 190 190 178 166 146
41646-154 121 60 156 107 11 156 107 11 168 124 44
41647-174 154 114 187 187 187 190 190 190 210 210 210
41648-246 246 246 253 253 253 253 253 253 182 182 182
41649- 6 6 6 2 2 6 2 2 6 2 2 6
41650- 2 2 6 2 2 6 2 2 6 62 62 62
41651- 74 74 74 34 34 34 14 14 14 0 0 0
41652- 0 0 0 0 0 0 0 0 0 0 0 0
41653- 0 0 0 0 0 0 0 0 0 0 0 0
41654- 0 0 0 0 0 0 0 0 0 0 0 0
41655- 0 0 0 0 0 0 0 0 0 0 0 0
41656- 0 0 0 0 0 0 0 0 0 0 0 0
41657- 0 0 0 0 0 0 0 0 0 0 0 0
41658- 0 0 0 0 0 0 0 0 0 0 0 0
41659- 0 0 0 0 0 0 0 0 0 0 0 0
41660- 0 0 0 0 0 0 0 0 0 0 0 0
41661- 0 0 0 0 0 0 0 0 0 0 0 0
41662- 0 0 0 0 0 0 0 0 0 0 0 0
41663- 0 0 0 10 10 10 22 22 22 54 54 54
41664- 94 94 94 18 18 18 2 2 6 46 46 46
41665-234 234 234 221 221 221 190 190 190 190 190 190
41666-190 190 190 187 187 187 187 187 187 190 190 190
41667-190 190 190 195 195 195 214 214 214 242 242 242
41668-253 253 253 253 253 253 253 253 253 253 253 253
41669- 82 82 82 2 2 6 2 2 6 2 2 6
41670- 2 2 6 2 2 6 2 2 6 14 14 14
41671- 86 86 86 54 54 54 22 22 22 6 6 6
41672- 0 0 0 0 0 0 0 0 0 0 0 0
41673- 0 0 0 0 0 0 0 0 0 0 0 0
41674- 0 0 0 0 0 0 0 0 0 0 0 0
41675- 0 0 0 0 0 0 0 0 0 0 0 0
41676- 0 0 0 0 0 0 0 0 0 0 0 0
41677- 0 0 0 0 0 0 0 0 0 0 0 0
41678- 0 0 0 0 0 0 0 0 0 0 0 0
41679- 0 0 0 0 0 0 0 0 0 0 0 0
41680- 0 0 0 0 0 0 0 0 0 0 0 0
41681- 0 0 0 0 0 0 0 0 0 0 0 0
41682- 0 0 0 0 0 0 0 0 0 0 0 0
41683- 6 6 6 18 18 18 46 46 46 90 90 90
41684- 46 46 46 18 18 18 6 6 6 182 182 182
41685-253 253 253 246 246 246 206 206 206 190 190 190
41686-190 190 190 190 190 190 190 190 190 190 190 190
41687-206 206 206 231 231 231 250 250 250 253 253 253
41688-253 253 253 253 253 253 253 253 253 253 253 253
41689-202 202 202 14 14 14 2 2 6 2 2 6
41690- 2 2 6 2 2 6 2 2 6 2 2 6
41691- 42 42 42 86 86 86 42 42 42 18 18 18
41692- 6 6 6 0 0 0 0 0 0 0 0 0
41693- 0 0 0 0 0 0 0 0 0 0 0 0
41694- 0 0 0 0 0 0 0 0 0 0 0 0
41695- 0 0 0 0 0 0 0 0 0 0 0 0
41696- 0 0 0 0 0 0 0 0 0 0 0 0
41697- 0 0 0 0 0 0 0 0 0 0 0 0
41698- 0 0 0 0 0 0 0 0 0 0 0 0
41699- 0 0 0 0 0 0 0 0 0 0 0 0
41700- 0 0 0 0 0 0 0 0 0 0 0 0
41701- 0 0 0 0 0 0 0 0 0 0 0 0
41702- 0 0 0 0 0 0 0 0 0 6 6 6
41703- 14 14 14 38 38 38 74 74 74 66 66 66
41704- 2 2 6 6 6 6 90 90 90 250 250 250
41705-253 253 253 253 253 253 238 238 238 198 198 198
41706-190 190 190 190 190 190 195 195 195 221 221 221
41707-246 246 246 253 253 253 253 253 253 253 253 253
41708-253 253 253 253 253 253 253 253 253 253 253 253
41709-253 253 253 82 82 82 2 2 6 2 2 6
41710- 2 2 6 2 2 6 2 2 6 2 2 6
41711- 2 2 6 78 78 78 70 70 70 34 34 34
41712- 14 14 14 6 6 6 0 0 0 0 0 0
41713- 0 0 0 0 0 0 0 0 0 0 0 0
41714- 0 0 0 0 0 0 0 0 0 0 0 0
41715- 0 0 0 0 0 0 0 0 0 0 0 0
41716- 0 0 0 0 0 0 0 0 0 0 0 0
41717- 0 0 0 0 0 0 0 0 0 0 0 0
41718- 0 0 0 0 0 0 0 0 0 0 0 0
41719- 0 0 0 0 0 0 0 0 0 0 0 0
41720- 0 0 0 0 0 0 0 0 0 0 0 0
41721- 0 0 0 0 0 0 0 0 0 0 0 0
41722- 0 0 0 0 0 0 0 0 0 14 14 14
41723- 34 34 34 66 66 66 78 78 78 6 6 6
41724- 2 2 6 18 18 18 218 218 218 253 253 253
41725-253 253 253 253 253 253 253 253 253 246 246 246
41726-226 226 226 231 231 231 246 246 246 253 253 253
41727-253 253 253 253 253 253 253 253 253 253 253 253
41728-253 253 253 253 253 253 253 253 253 253 253 253
41729-253 253 253 178 178 178 2 2 6 2 2 6
41730- 2 2 6 2 2 6 2 2 6 2 2 6
41731- 2 2 6 18 18 18 90 90 90 62 62 62
41732- 30 30 30 10 10 10 0 0 0 0 0 0
41733- 0 0 0 0 0 0 0 0 0 0 0 0
41734- 0 0 0 0 0 0 0 0 0 0 0 0
41735- 0 0 0 0 0 0 0 0 0 0 0 0
41736- 0 0 0 0 0 0 0 0 0 0 0 0
41737- 0 0 0 0 0 0 0 0 0 0 0 0
41738- 0 0 0 0 0 0 0 0 0 0 0 0
41739- 0 0 0 0 0 0 0 0 0 0 0 0
41740- 0 0 0 0 0 0 0 0 0 0 0 0
41741- 0 0 0 0 0 0 0 0 0 0 0 0
41742- 0 0 0 0 0 0 10 10 10 26 26 26
41743- 58 58 58 90 90 90 18 18 18 2 2 6
41744- 2 2 6 110 110 110 253 253 253 253 253 253
41745-253 253 253 253 253 253 253 253 253 253 253 253
41746-250 250 250 253 253 253 253 253 253 253 253 253
41747-253 253 253 253 253 253 253 253 253 253 253 253
41748-253 253 253 253 253 253 253 253 253 253 253 253
41749-253 253 253 231 231 231 18 18 18 2 2 6
41750- 2 2 6 2 2 6 2 2 6 2 2 6
41751- 2 2 6 2 2 6 18 18 18 94 94 94
41752- 54 54 54 26 26 26 10 10 10 0 0 0
41753- 0 0 0 0 0 0 0 0 0 0 0 0
41754- 0 0 0 0 0 0 0 0 0 0 0 0
41755- 0 0 0 0 0 0 0 0 0 0 0 0
41756- 0 0 0 0 0 0 0 0 0 0 0 0
41757- 0 0 0 0 0 0 0 0 0 0 0 0
41758- 0 0 0 0 0 0 0 0 0 0 0 0
41759- 0 0 0 0 0 0 0 0 0 0 0 0
41760- 0 0 0 0 0 0 0 0 0 0 0 0
41761- 0 0 0 0 0 0 0 0 0 0 0 0
41762- 0 0 0 6 6 6 22 22 22 50 50 50
41763- 90 90 90 26 26 26 2 2 6 2 2 6
41764- 14 14 14 195 195 195 250 250 250 253 253 253
41765-253 253 253 253 253 253 253 253 253 253 253 253
41766-253 253 253 253 253 253 253 253 253 253 253 253
41767-253 253 253 253 253 253 253 253 253 253 253 253
41768-253 253 253 253 253 253 253 253 253 253 253 253
41769-250 250 250 242 242 242 54 54 54 2 2 6
41770- 2 2 6 2 2 6 2 2 6 2 2 6
41771- 2 2 6 2 2 6 2 2 6 38 38 38
41772- 86 86 86 50 50 50 22 22 22 6 6 6
41773- 0 0 0 0 0 0 0 0 0 0 0 0
41774- 0 0 0 0 0 0 0 0 0 0 0 0
41775- 0 0 0 0 0 0 0 0 0 0 0 0
41776- 0 0 0 0 0 0 0 0 0 0 0 0
41777- 0 0 0 0 0 0 0 0 0 0 0 0
41778- 0 0 0 0 0 0 0 0 0 0 0 0
41779- 0 0 0 0 0 0 0 0 0 0 0 0
41780- 0 0 0 0 0 0 0 0 0 0 0 0
41781- 0 0 0 0 0 0 0 0 0 0 0 0
41782- 6 6 6 14 14 14 38 38 38 82 82 82
41783- 34 34 34 2 2 6 2 2 6 2 2 6
41784- 42 42 42 195 195 195 246 246 246 253 253 253
41785-253 253 253 253 253 253 253 253 253 250 250 250
41786-242 242 242 242 242 242 250 250 250 253 253 253
41787-253 253 253 253 253 253 253 253 253 253 253 253
41788-253 253 253 250 250 250 246 246 246 238 238 238
41789-226 226 226 231 231 231 101 101 101 6 6 6
41790- 2 2 6 2 2 6 2 2 6 2 2 6
41791- 2 2 6 2 2 6 2 2 6 2 2 6
41792- 38 38 38 82 82 82 42 42 42 14 14 14
41793- 6 6 6 0 0 0 0 0 0 0 0 0
41794- 0 0 0 0 0 0 0 0 0 0 0 0
41795- 0 0 0 0 0 0 0 0 0 0 0 0
41796- 0 0 0 0 0 0 0 0 0 0 0 0
41797- 0 0 0 0 0 0 0 0 0 0 0 0
41798- 0 0 0 0 0 0 0 0 0 0 0 0
41799- 0 0 0 0 0 0 0 0 0 0 0 0
41800- 0 0 0 0 0 0 0 0 0 0 0 0
41801- 0 0 0 0 0 0 0 0 0 0 0 0
41802- 10 10 10 26 26 26 62 62 62 66 66 66
41803- 2 2 6 2 2 6 2 2 6 6 6 6
41804- 70 70 70 170 170 170 206 206 206 234 234 234
41805-246 246 246 250 250 250 250 250 250 238 238 238
41806-226 226 226 231 231 231 238 238 238 250 250 250
41807-250 250 250 250 250 250 246 246 246 231 231 231
41808-214 214 214 206 206 206 202 202 202 202 202 202
41809-198 198 198 202 202 202 182 182 182 18 18 18
41810- 2 2 6 2 2 6 2 2 6 2 2 6
41811- 2 2 6 2 2 6 2 2 6 2 2 6
41812- 2 2 6 62 62 62 66 66 66 30 30 30
41813- 10 10 10 0 0 0 0 0 0 0 0 0
41814- 0 0 0 0 0 0 0 0 0 0 0 0
41815- 0 0 0 0 0 0 0 0 0 0 0 0
41816- 0 0 0 0 0 0 0 0 0 0 0 0
41817- 0 0 0 0 0 0 0 0 0 0 0 0
41818- 0 0 0 0 0 0 0 0 0 0 0 0
41819- 0 0 0 0 0 0 0 0 0 0 0 0
41820- 0 0 0 0 0 0 0 0 0 0 0 0
41821- 0 0 0 0 0 0 0 0 0 0 0 0
41822- 14 14 14 42 42 42 82 82 82 18 18 18
41823- 2 2 6 2 2 6 2 2 6 10 10 10
41824- 94 94 94 182 182 182 218 218 218 242 242 242
41825-250 250 250 253 253 253 253 253 253 250 250 250
41826-234 234 234 253 253 253 253 253 253 253 253 253
41827-253 253 253 253 253 253 253 253 253 246 246 246
41828-238 238 238 226 226 226 210 210 210 202 202 202
41829-195 195 195 195 195 195 210 210 210 158 158 158
41830- 6 6 6 14 14 14 50 50 50 14 14 14
41831- 2 2 6 2 2 6 2 2 6 2 2 6
41832- 2 2 6 6 6 6 86 86 86 46 46 46
41833- 18 18 18 6 6 6 0 0 0 0 0 0
41834- 0 0 0 0 0 0 0 0 0 0 0 0
41835- 0 0 0 0 0 0 0 0 0 0 0 0
41836- 0 0 0 0 0 0 0 0 0 0 0 0
41837- 0 0 0 0 0 0 0 0 0 0 0 0
41838- 0 0 0 0 0 0 0 0 0 0 0 0
41839- 0 0 0 0 0 0 0 0 0 0 0 0
41840- 0 0 0 0 0 0 0 0 0 0 0 0
41841- 0 0 0 0 0 0 0 0 0 6 6 6
41842- 22 22 22 54 54 54 70 70 70 2 2 6
41843- 2 2 6 10 10 10 2 2 6 22 22 22
41844-166 166 166 231 231 231 250 250 250 253 253 253
41845-253 253 253 253 253 253 253 253 253 250 250 250
41846-242 242 242 253 253 253 253 253 253 253 253 253
41847-253 253 253 253 253 253 253 253 253 253 253 253
41848-253 253 253 253 253 253 253 253 253 246 246 246
41849-231 231 231 206 206 206 198 198 198 226 226 226
41850- 94 94 94 2 2 6 6 6 6 38 38 38
41851- 30 30 30 2 2 6 2 2 6 2 2 6
41852- 2 2 6 2 2 6 62 62 62 66 66 66
41853- 26 26 26 10 10 10 0 0 0 0 0 0
41854- 0 0 0 0 0 0 0 0 0 0 0 0
41855- 0 0 0 0 0 0 0 0 0 0 0 0
41856- 0 0 0 0 0 0 0 0 0 0 0 0
41857- 0 0 0 0 0 0 0 0 0 0 0 0
41858- 0 0 0 0 0 0 0 0 0 0 0 0
41859- 0 0 0 0 0 0 0 0 0 0 0 0
41860- 0 0 0 0 0 0 0 0 0 0 0 0
41861- 0 0 0 0 0 0 0 0 0 10 10 10
41862- 30 30 30 74 74 74 50 50 50 2 2 6
41863- 26 26 26 26 26 26 2 2 6 106 106 106
41864-238 238 238 253 253 253 253 253 253 253 253 253
41865-253 253 253 253 253 253 253 253 253 253 253 253
41866-253 253 253 253 253 253 253 253 253 253 253 253
41867-253 253 253 253 253 253 253 253 253 253 253 253
41868-253 253 253 253 253 253 253 253 253 253 253 253
41869-253 253 253 246 246 246 218 218 218 202 202 202
41870-210 210 210 14 14 14 2 2 6 2 2 6
41871- 30 30 30 22 22 22 2 2 6 2 2 6
41872- 2 2 6 2 2 6 18 18 18 86 86 86
41873- 42 42 42 14 14 14 0 0 0 0 0 0
41874- 0 0 0 0 0 0 0 0 0 0 0 0
41875- 0 0 0 0 0 0 0 0 0 0 0 0
41876- 0 0 0 0 0 0 0 0 0 0 0 0
41877- 0 0 0 0 0 0 0 0 0 0 0 0
41878- 0 0 0 0 0 0 0 0 0 0 0 0
41879- 0 0 0 0 0 0 0 0 0 0 0 0
41880- 0 0 0 0 0 0 0 0 0 0 0 0
41881- 0 0 0 0 0 0 0 0 0 14 14 14
41882- 42 42 42 90 90 90 22 22 22 2 2 6
41883- 42 42 42 2 2 6 18 18 18 218 218 218
41884-253 253 253 253 253 253 253 253 253 253 253 253
41885-253 253 253 253 253 253 253 253 253 253 253 253
41886-253 253 253 253 253 253 253 253 253 253 253 253
41887-253 253 253 253 253 253 253 253 253 253 253 253
41888-253 253 253 253 253 253 253 253 253 253 253 253
41889-253 253 253 253 253 253 250 250 250 221 221 221
41890-218 218 218 101 101 101 2 2 6 14 14 14
41891- 18 18 18 38 38 38 10 10 10 2 2 6
41892- 2 2 6 2 2 6 2 2 6 78 78 78
41893- 58 58 58 22 22 22 6 6 6 0 0 0
41894- 0 0 0 0 0 0 0 0 0 0 0 0
41895- 0 0 0 0 0 0 0 0 0 0 0 0
41896- 0 0 0 0 0 0 0 0 0 0 0 0
41897- 0 0 0 0 0 0 0 0 0 0 0 0
41898- 0 0 0 0 0 0 0 0 0 0 0 0
41899- 0 0 0 0 0 0 0 0 0 0 0 0
41900- 0 0 0 0 0 0 0 0 0 0 0 0
41901- 0 0 0 0 0 0 6 6 6 18 18 18
41902- 54 54 54 82 82 82 2 2 6 26 26 26
41903- 22 22 22 2 2 6 123 123 123 253 253 253
41904-253 253 253 253 253 253 253 253 253 253 253 253
41905-253 253 253 253 253 253 253 253 253 253 253 253
41906-253 253 253 253 253 253 253 253 253 253 253 253
41907-253 253 253 253 253 253 253 253 253 253 253 253
41908-253 253 253 253 253 253 253 253 253 253 253 253
41909-253 253 253 253 253 253 253 253 253 250 250 250
41910-238 238 238 198 198 198 6 6 6 38 38 38
41911- 58 58 58 26 26 26 38 38 38 2 2 6
41912- 2 2 6 2 2 6 2 2 6 46 46 46
41913- 78 78 78 30 30 30 10 10 10 0 0 0
41914- 0 0 0 0 0 0 0 0 0 0 0 0
41915- 0 0 0 0 0 0 0 0 0 0 0 0
41916- 0 0 0 0 0 0 0 0 0 0 0 0
41917- 0 0 0 0 0 0 0 0 0 0 0 0
41918- 0 0 0 0 0 0 0 0 0 0 0 0
41919- 0 0 0 0 0 0 0 0 0 0 0 0
41920- 0 0 0 0 0 0 0 0 0 0 0 0
41921- 0 0 0 0 0 0 10 10 10 30 30 30
41922- 74 74 74 58 58 58 2 2 6 42 42 42
41923- 2 2 6 22 22 22 231 231 231 253 253 253
41924-253 253 253 253 253 253 253 253 253 253 253 253
41925-253 253 253 253 253 253 253 253 253 250 250 250
41926-253 253 253 253 253 253 253 253 253 253 253 253
41927-253 253 253 253 253 253 253 253 253 253 253 253
41928-253 253 253 253 253 253 253 253 253 253 253 253
41929-253 253 253 253 253 253 253 253 253 253 253 253
41930-253 253 253 246 246 246 46 46 46 38 38 38
41931- 42 42 42 14 14 14 38 38 38 14 14 14
41932- 2 2 6 2 2 6 2 2 6 6 6 6
41933- 86 86 86 46 46 46 14 14 14 0 0 0
41934- 0 0 0 0 0 0 0 0 0 0 0 0
41935- 0 0 0 0 0 0 0 0 0 0 0 0
41936- 0 0 0 0 0 0 0 0 0 0 0 0
41937- 0 0 0 0 0 0 0 0 0 0 0 0
41938- 0 0 0 0 0 0 0 0 0 0 0 0
41939- 0 0 0 0 0 0 0 0 0 0 0 0
41940- 0 0 0 0 0 0 0 0 0 0 0 0
41941- 0 0 0 6 6 6 14 14 14 42 42 42
41942- 90 90 90 18 18 18 18 18 18 26 26 26
41943- 2 2 6 116 116 116 253 253 253 253 253 253
41944-253 253 253 253 253 253 253 253 253 253 253 253
41945-253 253 253 253 253 253 250 250 250 238 238 238
41946-253 253 253 253 253 253 253 253 253 253 253 253
41947-253 253 253 253 253 253 253 253 253 253 253 253
41948-253 253 253 253 253 253 253 253 253 253 253 253
41949-253 253 253 253 253 253 253 253 253 253 253 253
41950-253 253 253 253 253 253 94 94 94 6 6 6
41951- 2 2 6 2 2 6 10 10 10 34 34 34
41952- 2 2 6 2 2 6 2 2 6 2 2 6
41953- 74 74 74 58 58 58 22 22 22 6 6 6
41954- 0 0 0 0 0 0 0 0 0 0 0 0
41955- 0 0 0 0 0 0 0 0 0 0 0 0
41956- 0 0 0 0 0 0 0 0 0 0 0 0
41957- 0 0 0 0 0 0 0 0 0 0 0 0
41958- 0 0 0 0 0 0 0 0 0 0 0 0
41959- 0 0 0 0 0 0 0 0 0 0 0 0
41960- 0 0 0 0 0 0 0 0 0 0 0 0
41961- 0 0 0 10 10 10 26 26 26 66 66 66
41962- 82 82 82 2 2 6 38 38 38 6 6 6
41963- 14 14 14 210 210 210 253 253 253 253 253 253
41964-253 253 253 253 253 253 253 253 253 253 253 253
41965-253 253 253 253 253 253 246 246 246 242 242 242
41966-253 253 253 253 253 253 253 253 253 253 253 253
41967-253 253 253 253 253 253 253 253 253 253 253 253
41968-253 253 253 253 253 253 253 253 253 253 253 253
41969-253 253 253 253 253 253 253 253 253 253 253 253
41970-253 253 253 253 253 253 144 144 144 2 2 6
41971- 2 2 6 2 2 6 2 2 6 46 46 46
41972- 2 2 6 2 2 6 2 2 6 2 2 6
41973- 42 42 42 74 74 74 30 30 30 10 10 10
41974- 0 0 0 0 0 0 0 0 0 0 0 0
41975- 0 0 0 0 0 0 0 0 0 0 0 0
41976- 0 0 0 0 0 0 0 0 0 0 0 0
41977- 0 0 0 0 0 0 0 0 0 0 0 0
41978- 0 0 0 0 0 0 0 0 0 0 0 0
41979- 0 0 0 0 0 0 0 0 0 0 0 0
41980- 0 0 0 0 0 0 0 0 0 0 0 0
41981- 6 6 6 14 14 14 42 42 42 90 90 90
41982- 26 26 26 6 6 6 42 42 42 2 2 6
41983- 74 74 74 250 250 250 253 253 253 253 253 253
41984-253 253 253 253 253 253 253 253 253 253 253 253
41985-253 253 253 253 253 253 242 242 242 242 242 242
41986-253 253 253 253 253 253 253 253 253 253 253 253
41987-253 253 253 253 253 253 253 253 253 253 253 253
41988-253 253 253 253 253 253 253 253 253 253 253 253
41989-253 253 253 253 253 253 253 253 253 253 253 253
41990-253 253 253 253 253 253 182 182 182 2 2 6
41991- 2 2 6 2 2 6 2 2 6 46 46 46
41992- 2 2 6 2 2 6 2 2 6 2 2 6
41993- 10 10 10 86 86 86 38 38 38 10 10 10
41994- 0 0 0 0 0 0 0 0 0 0 0 0
41995- 0 0 0 0 0 0 0 0 0 0 0 0
41996- 0 0 0 0 0 0 0 0 0 0 0 0
41997- 0 0 0 0 0 0 0 0 0 0 0 0
41998- 0 0 0 0 0 0 0 0 0 0 0 0
41999- 0 0 0 0 0 0 0 0 0 0 0 0
42000- 0 0 0 0 0 0 0 0 0 0 0 0
42001- 10 10 10 26 26 26 66 66 66 82 82 82
42002- 2 2 6 22 22 22 18 18 18 2 2 6
42003-149 149 149 253 253 253 253 253 253 253 253 253
42004-253 253 253 253 253 253 253 253 253 253 253 253
42005-253 253 253 253 253 253 234 234 234 242 242 242
42006-253 253 253 253 253 253 253 253 253 253 253 253
42007-253 253 253 253 253 253 253 253 253 253 253 253
42008-253 253 253 253 253 253 253 253 253 253 253 253
42009-253 253 253 253 253 253 253 253 253 253 253 253
42010-253 253 253 253 253 253 206 206 206 2 2 6
42011- 2 2 6 2 2 6 2 2 6 38 38 38
42012- 2 2 6 2 2 6 2 2 6 2 2 6
42013- 6 6 6 86 86 86 46 46 46 14 14 14
42014- 0 0 0 0 0 0 0 0 0 0 0 0
42015- 0 0 0 0 0 0 0 0 0 0 0 0
42016- 0 0 0 0 0 0 0 0 0 0 0 0
42017- 0 0 0 0 0 0 0 0 0 0 0 0
42018- 0 0 0 0 0 0 0 0 0 0 0 0
42019- 0 0 0 0 0 0 0 0 0 0 0 0
42020- 0 0 0 0 0 0 0 0 0 6 6 6
42021- 18 18 18 46 46 46 86 86 86 18 18 18
42022- 2 2 6 34 34 34 10 10 10 6 6 6
42023-210 210 210 253 253 253 253 253 253 253 253 253
42024-253 253 253 253 253 253 253 253 253 253 253 253
42025-253 253 253 253 253 253 234 234 234 242 242 242
42026-253 253 253 253 253 253 253 253 253 253 253 253
42027-253 253 253 253 253 253 253 253 253 253 253 253
42028-253 253 253 253 253 253 253 253 253 253 253 253
42029-253 253 253 253 253 253 253 253 253 253 253 253
42030-253 253 253 253 253 253 221 221 221 6 6 6
42031- 2 2 6 2 2 6 6 6 6 30 30 30
42032- 2 2 6 2 2 6 2 2 6 2 2 6
42033- 2 2 6 82 82 82 54 54 54 18 18 18
42034- 6 6 6 0 0 0 0 0 0 0 0 0
42035- 0 0 0 0 0 0 0 0 0 0 0 0
42036- 0 0 0 0 0 0 0 0 0 0 0 0
42037- 0 0 0 0 0 0 0 0 0 0 0 0
42038- 0 0 0 0 0 0 0 0 0 0 0 0
42039- 0 0 0 0 0 0 0 0 0 0 0 0
42040- 0 0 0 0 0 0 0 0 0 10 10 10
42041- 26 26 26 66 66 66 62 62 62 2 2 6
42042- 2 2 6 38 38 38 10 10 10 26 26 26
42043-238 238 238 253 253 253 253 253 253 253 253 253
42044-253 253 253 253 253 253 253 253 253 253 253 253
42045-253 253 253 253 253 253 231 231 231 238 238 238
42046-253 253 253 253 253 253 253 253 253 253 253 253
42047-253 253 253 253 253 253 253 253 253 253 253 253
42048-253 253 253 253 253 253 253 253 253 253 253 253
42049-253 253 253 253 253 253 253 253 253 253 253 253
42050-253 253 253 253 253 253 231 231 231 6 6 6
42051- 2 2 6 2 2 6 10 10 10 30 30 30
42052- 2 2 6 2 2 6 2 2 6 2 2 6
42053- 2 2 6 66 66 66 58 58 58 22 22 22
42054- 6 6 6 0 0 0 0 0 0 0 0 0
42055- 0 0 0 0 0 0 0 0 0 0 0 0
42056- 0 0 0 0 0 0 0 0 0 0 0 0
42057- 0 0 0 0 0 0 0 0 0 0 0 0
42058- 0 0 0 0 0 0 0 0 0 0 0 0
42059- 0 0 0 0 0 0 0 0 0 0 0 0
42060- 0 0 0 0 0 0 0 0 0 10 10 10
42061- 38 38 38 78 78 78 6 6 6 2 2 6
42062- 2 2 6 46 46 46 14 14 14 42 42 42
42063-246 246 246 253 253 253 253 253 253 253 253 253
42064-253 253 253 253 253 253 253 253 253 253 253 253
42065-253 253 253 253 253 253 231 231 231 242 242 242
42066-253 253 253 253 253 253 253 253 253 253 253 253
42067-253 253 253 253 253 253 253 253 253 253 253 253
42068-253 253 253 253 253 253 253 253 253 253 253 253
42069-253 253 253 253 253 253 253 253 253 253 253 253
42070-253 253 253 253 253 253 234 234 234 10 10 10
42071- 2 2 6 2 2 6 22 22 22 14 14 14
42072- 2 2 6 2 2 6 2 2 6 2 2 6
42073- 2 2 6 66 66 66 62 62 62 22 22 22
42074- 6 6 6 0 0 0 0 0 0 0 0 0
42075- 0 0 0 0 0 0 0 0 0 0 0 0
42076- 0 0 0 0 0 0 0 0 0 0 0 0
42077- 0 0 0 0 0 0 0 0 0 0 0 0
42078- 0 0 0 0 0 0 0 0 0 0 0 0
42079- 0 0 0 0 0 0 0 0 0 0 0 0
42080- 0 0 0 0 0 0 6 6 6 18 18 18
42081- 50 50 50 74 74 74 2 2 6 2 2 6
42082- 14 14 14 70 70 70 34 34 34 62 62 62
42083-250 250 250 253 253 253 253 253 253 253 253 253
42084-253 253 253 253 253 253 253 253 253 253 253 253
42085-253 253 253 253 253 253 231 231 231 246 246 246
42086-253 253 253 253 253 253 253 253 253 253 253 253
42087-253 253 253 253 253 253 253 253 253 253 253 253
42088-253 253 253 253 253 253 253 253 253 253 253 253
42089-253 253 253 253 253 253 253 253 253 253 253 253
42090-253 253 253 253 253 253 234 234 234 14 14 14
42091- 2 2 6 2 2 6 30 30 30 2 2 6
42092- 2 2 6 2 2 6 2 2 6 2 2 6
42093- 2 2 6 66 66 66 62 62 62 22 22 22
42094- 6 6 6 0 0 0 0 0 0 0 0 0
42095- 0 0 0 0 0 0 0 0 0 0 0 0
42096- 0 0 0 0 0 0 0 0 0 0 0 0
42097- 0 0 0 0 0 0 0 0 0 0 0 0
42098- 0 0 0 0 0 0 0 0 0 0 0 0
42099- 0 0 0 0 0 0 0 0 0 0 0 0
42100- 0 0 0 0 0 0 6 6 6 18 18 18
42101- 54 54 54 62 62 62 2 2 6 2 2 6
42102- 2 2 6 30 30 30 46 46 46 70 70 70
42103-250 250 250 253 253 253 253 253 253 253 253 253
42104-253 253 253 253 253 253 253 253 253 253 253 253
42105-253 253 253 253 253 253 231 231 231 246 246 246
42106-253 253 253 253 253 253 253 253 253 253 253 253
42107-253 253 253 253 253 253 253 253 253 253 253 253
42108-253 253 253 253 253 253 253 253 253 253 253 253
42109-253 253 253 253 253 253 253 253 253 253 253 253
42110-253 253 253 253 253 253 226 226 226 10 10 10
42111- 2 2 6 6 6 6 30 30 30 2 2 6
42112- 2 2 6 2 2 6 2 2 6 2 2 6
42113- 2 2 6 66 66 66 58 58 58 22 22 22
42114- 6 6 6 0 0 0 0 0 0 0 0 0
42115- 0 0 0 0 0 0 0 0 0 0 0 0
42116- 0 0 0 0 0 0 0 0 0 0 0 0
42117- 0 0 0 0 0 0 0 0 0 0 0 0
42118- 0 0 0 0 0 0 0 0 0 0 0 0
42119- 0 0 0 0 0 0 0 0 0 0 0 0
42120- 0 0 0 0 0 0 6 6 6 22 22 22
42121- 58 58 58 62 62 62 2 2 6 2 2 6
42122- 2 2 6 2 2 6 30 30 30 78 78 78
42123-250 250 250 253 253 253 253 253 253 253 253 253
42124-253 253 253 253 253 253 253 253 253 253 253 253
42125-253 253 253 253 253 253 231 231 231 246 246 246
42126-253 253 253 253 253 253 253 253 253 253 253 253
42127-253 253 253 253 253 253 253 253 253 253 253 253
42128-253 253 253 253 253 253 253 253 253 253 253 253
42129-253 253 253 253 253 253 253 253 253 253 253 253
42130-253 253 253 253 253 253 206 206 206 2 2 6
42131- 22 22 22 34 34 34 18 14 6 22 22 22
42132- 26 26 26 18 18 18 6 6 6 2 2 6
42133- 2 2 6 82 82 82 54 54 54 18 18 18
42134- 6 6 6 0 0 0 0 0 0 0 0 0
42135- 0 0 0 0 0 0 0 0 0 0 0 0
42136- 0 0 0 0 0 0 0 0 0 0 0 0
42137- 0 0 0 0 0 0 0 0 0 0 0 0
42138- 0 0 0 0 0 0 0 0 0 0 0 0
42139- 0 0 0 0 0 0 0 0 0 0 0 0
42140- 0 0 0 0 0 0 6 6 6 26 26 26
42141- 62 62 62 106 106 106 74 54 14 185 133 11
42142-210 162 10 121 92 8 6 6 6 62 62 62
42143-238 238 238 253 253 253 253 253 253 253 253 253
42144-253 253 253 253 253 253 253 253 253 253 253 253
42145-253 253 253 253 253 253 231 231 231 246 246 246
42146-253 253 253 253 253 253 253 253 253 253 253 253
42147-253 253 253 253 253 253 253 253 253 253 253 253
42148-253 253 253 253 253 253 253 253 253 253 253 253
42149-253 253 253 253 253 253 253 253 253 253 253 253
42150-253 253 253 253 253 253 158 158 158 18 18 18
42151- 14 14 14 2 2 6 2 2 6 2 2 6
42152- 6 6 6 18 18 18 66 66 66 38 38 38
42153- 6 6 6 94 94 94 50 50 50 18 18 18
42154- 6 6 6 0 0 0 0 0 0 0 0 0
42155- 0 0 0 0 0 0 0 0 0 0 0 0
42156- 0 0 0 0 0 0 0 0 0 0 0 0
42157- 0 0 0 0 0 0 0 0 0 0 0 0
42158- 0 0 0 0 0 0 0 0 0 0 0 0
42159- 0 0 0 0 0 0 0 0 0 6 6 6
42160- 10 10 10 10 10 10 18 18 18 38 38 38
42161- 78 78 78 142 134 106 216 158 10 242 186 14
42162-246 190 14 246 190 14 156 118 10 10 10 10
42163- 90 90 90 238 238 238 253 253 253 253 253 253
42164-253 253 253 253 253 253 253 253 253 253 253 253
42165-253 253 253 253 253 253 231 231 231 250 250 250
42166-253 253 253 253 253 253 253 253 253 253 253 253
42167-253 253 253 253 253 253 253 253 253 253 253 253
42168-253 253 253 253 253 253 253 253 253 253 253 253
42169-253 253 253 253 253 253 253 253 253 246 230 190
42170-238 204 91 238 204 91 181 142 44 37 26 9
42171- 2 2 6 2 2 6 2 2 6 2 2 6
42172- 2 2 6 2 2 6 38 38 38 46 46 46
42173- 26 26 26 106 106 106 54 54 54 18 18 18
42174- 6 6 6 0 0 0 0 0 0 0 0 0
42175- 0 0 0 0 0 0 0 0 0 0 0 0
42176- 0 0 0 0 0 0 0 0 0 0 0 0
42177- 0 0 0 0 0 0 0 0 0 0 0 0
42178- 0 0 0 0 0 0 0 0 0 0 0 0
42179- 0 0 0 6 6 6 14 14 14 22 22 22
42180- 30 30 30 38 38 38 50 50 50 70 70 70
42181-106 106 106 190 142 34 226 170 11 242 186 14
42182-246 190 14 246 190 14 246 190 14 154 114 10
42183- 6 6 6 74 74 74 226 226 226 253 253 253
42184-253 253 253 253 253 253 253 253 253 253 253 253
42185-253 253 253 253 253 253 231 231 231 250 250 250
42186-253 253 253 253 253 253 253 253 253 253 253 253
42187-253 253 253 253 253 253 253 253 253 253 253 253
42188-253 253 253 253 253 253 253 253 253 253 253 253
42189-253 253 253 253 253 253 253 253 253 228 184 62
42190-241 196 14 241 208 19 232 195 16 38 30 10
42191- 2 2 6 2 2 6 2 2 6 2 2 6
42192- 2 2 6 6 6 6 30 30 30 26 26 26
42193-203 166 17 154 142 90 66 66 66 26 26 26
42194- 6 6 6 0 0 0 0 0 0 0 0 0
42195- 0 0 0 0 0 0 0 0 0 0 0 0
42196- 0 0 0 0 0 0 0 0 0 0 0 0
42197- 0 0 0 0 0 0 0 0 0 0 0 0
42198- 0 0 0 0 0 0 0 0 0 0 0 0
42199- 6 6 6 18 18 18 38 38 38 58 58 58
42200- 78 78 78 86 86 86 101 101 101 123 123 123
42201-175 146 61 210 150 10 234 174 13 246 186 14
42202-246 190 14 246 190 14 246 190 14 238 190 10
42203-102 78 10 2 2 6 46 46 46 198 198 198
42204-253 253 253 253 253 253 253 253 253 253 253 253
42205-253 253 253 253 253 253 234 234 234 242 242 242
42206-253 253 253 253 253 253 253 253 253 253 253 253
42207-253 253 253 253 253 253 253 253 253 253 253 253
42208-253 253 253 253 253 253 253 253 253 253 253 253
42209-253 253 253 253 253 253 253 253 253 224 178 62
42210-242 186 14 241 196 14 210 166 10 22 18 6
42211- 2 2 6 2 2 6 2 2 6 2 2 6
42212- 2 2 6 2 2 6 6 6 6 121 92 8
42213-238 202 15 232 195 16 82 82 82 34 34 34
42214- 10 10 10 0 0 0 0 0 0 0 0 0
42215- 0 0 0 0 0 0 0 0 0 0 0 0
42216- 0 0 0 0 0 0 0 0 0 0 0 0
42217- 0 0 0 0 0 0 0 0 0 0 0 0
42218- 0 0 0 0 0 0 0 0 0 0 0 0
42219- 14 14 14 38 38 38 70 70 70 154 122 46
42220-190 142 34 200 144 11 197 138 11 197 138 11
42221-213 154 11 226 170 11 242 186 14 246 190 14
42222-246 190 14 246 190 14 246 190 14 246 190 14
42223-225 175 15 46 32 6 2 2 6 22 22 22
42224-158 158 158 250 250 250 253 253 253 253 253 253
42225-253 253 253 253 253 253 253 253 253 253 253 253
42226-253 253 253 253 253 253 253 253 253 253 253 253
42227-253 253 253 253 253 253 253 253 253 253 253 253
42228-253 253 253 253 253 253 253 253 253 253 253 253
42229-253 253 253 250 250 250 242 242 242 224 178 62
42230-239 182 13 236 186 11 213 154 11 46 32 6
42231- 2 2 6 2 2 6 2 2 6 2 2 6
42232- 2 2 6 2 2 6 61 42 6 225 175 15
42233-238 190 10 236 186 11 112 100 78 42 42 42
42234- 14 14 14 0 0 0 0 0 0 0 0 0
42235- 0 0 0 0 0 0 0 0 0 0 0 0
42236- 0 0 0 0 0 0 0 0 0 0 0 0
42237- 0 0 0 0 0 0 0 0 0 0 0 0
42238- 0 0 0 0 0 0 0 0 0 6 6 6
42239- 22 22 22 54 54 54 154 122 46 213 154 11
42240-226 170 11 230 174 11 226 170 11 226 170 11
42241-236 178 12 242 186 14 246 190 14 246 190 14
42242-246 190 14 246 190 14 246 190 14 246 190 14
42243-241 196 14 184 144 12 10 10 10 2 2 6
42244- 6 6 6 116 116 116 242 242 242 253 253 253
42245-253 253 253 253 253 253 253 253 253 253 253 253
42246-253 253 253 253 253 253 253 253 253 253 253 253
42247-253 253 253 253 253 253 253 253 253 253 253 253
42248-253 253 253 253 253 253 253 253 253 253 253 253
42249-253 253 253 231 231 231 198 198 198 214 170 54
42250-236 178 12 236 178 12 210 150 10 137 92 6
42251- 18 14 6 2 2 6 2 2 6 2 2 6
42252- 6 6 6 70 47 6 200 144 11 236 178 12
42253-239 182 13 239 182 13 124 112 88 58 58 58
42254- 22 22 22 6 6 6 0 0 0 0 0 0
42255- 0 0 0 0 0 0 0 0 0 0 0 0
42256- 0 0 0 0 0 0 0 0 0 0 0 0
42257- 0 0 0 0 0 0 0 0 0 0 0 0
42258- 0 0 0 0 0 0 0 0 0 10 10 10
42259- 30 30 30 70 70 70 180 133 36 226 170 11
42260-239 182 13 242 186 14 242 186 14 246 186 14
42261-246 190 14 246 190 14 246 190 14 246 190 14
42262-246 190 14 246 190 14 246 190 14 246 190 14
42263-246 190 14 232 195 16 98 70 6 2 2 6
42264- 2 2 6 2 2 6 66 66 66 221 221 221
42265-253 253 253 253 253 253 253 253 253 253 253 253
42266-253 253 253 253 253 253 253 253 253 253 253 253
42267-253 253 253 253 253 253 253 253 253 253 253 253
42268-253 253 253 253 253 253 253 253 253 253 253 253
42269-253 253 253 206 206 206 198 198 198 214 166 58
42270-230 174 11 230 174 11 216 158 10 192 133 9
42271-163 110 8 116 81 8 102 78 10 116 81 8
42272-167 114 7 197 138 11 226 170 11 239 182 13
42273-242 186 14 242 186 14 162 146 94 78 78 78
42274- 34 34 34 14 14 14 6 6 6 0 0 0
42275- 0 0 0 0 0 0 0 0 0 0 0 0
42276- 0 0 0 0 0 0 0 0 0 0 0 0
42277- 0 0 0 0 0 0 0 0 0 0 0 0
42278- 0 0 0 0 0 0 0 0 0 6 6 6
42279- 30 30 30 78 78 78 190 142 34 226 170 11
42280-239 182 13 246 190 14 246 190 14 246 190 14
42281-246 190 14 246 190 14 246 190 14 246 190 14
42282-246 190 14 246 190 14 246 190 14 246 190 14
42283-246 190 14 241 196 14 203 166 17 22 18 6
42284- 2 2 6 2 2 6 2 2 6 38 38 38
42285-218 218 218 253 253 253 253 253 253 253 253 253
42286-253 253 253 253 253 253 253 253 253 253 253 253
42287-253 253 253 253 253 253 253 253 253 253 253 253
42288-253 253 253 253 253 253 253 253 253 253 253 253
42289-250 250 250 206 206 206 198 198 198 202 162 69
42290-226 170 11 236 178 12 224 166 10 210 150 10
42291-200 144 11 197 138 11 192 133 9 197 138 11
42292-210 150 10 226 170 11 242 186 14 246 190 14
42293-246 190 14 246 186 14 225 175 15 124 112 88
42294- 62 62 62 30 30 30 14 14 14 6 6 6
42295- 0 0 0 0 0 0 0 0 0 0 0 0
42296- 0 0 0 0 0 0 0 0 0 0 0 0
42297- 0 0 0 0 0 0 0 0 0 0 0 0
42298- 0 0 0 0 0 0 0 0 0 10 10 10
42299- 30 30 30 78 78 78 174 135 50 224 166 10
42300-239 182 13 246 190 14 246 190 14 246 190 14
42301-246 190 14 246 190 14 246 190 14 246 190 14
42302-246 190 14 246 190 14 246 190 14 246 190 14
42303-246 190 14 246 190 14 241 196 14 139 102 15
42304- 2 2 6 2 2 6 2 2 6 2 2 6
42305- 78 78 78 250 250 250 253 253 253 253 253 253
42306-253 253 253 253 253 253 253 253 253 253 253 253
42307-253 253 253 253 253 253 253 253 253 253 253 253
42308-253 253 253 253 253 253 253 253 253 253 253 253
42309-250 250 250 214 214 214 198 198 198 190 150 46
42310-219 162 10 236 178 12 234 174 13 224 166 10
42311-216 158 10 213 154 11 213 154 11 216 158 10
42312-226 170 11 239 182 13 246 190 14 246 190 14
42313-246 190 14 246 190 14 242 186 14 206 162 42
42314-101 101 101 58 58 58 30 30 30 14 14 14
42315- 6 6 6 0 0 0 0 0 0 0 0 0
42316- 0 0 0 0 0 0 0 0 0 0 0 0
42317- 0 0 0 0 0 0 0 0 0 0 0 0
42318- 0 0 0 0 0 0 0 0 0 10 10 10
42319- 30 30 30 74 74 74 174 135 50 216 158 10
42320-236 178 12 246 190 14 246 190 14 246 190 14
42321-246 190 14 246 190 14 246 190 14 246 190 14
42322-246 190 14 246 190 14 246 190 14 246 190 14
42323-246 190 14 246 190 14 241 196 14 226 184 13
42324- 61 42 6 2 2 6 2 2 6 2 2 6
42325- 22 22 22 238 238 238 253 253 253 253 253 253
42326-253 253 253 253 253 253 253 253 253 253 253 253
42327-253 253 253 253 253 253 253 253 253 253 253 253
42328-253 253 253 253 253 253 253 253 253 253 253 253
42329-253 253 253 226 226 226 187 187 187 180 133 36
42330-216 158 10 236 178 12 239 182 13 236 178 12
42331-230 174 11 226 170 11 226 170 11 230 174 11
42332-236 178 12 242 186 14 246 190 14 246 190 14
42333-246 190 14 246 190 14 246 186 14 239 182 13
42334-206 162 42 106 106 106 66 66 66 34 34 34
42335- 14 14 14 6 6 6 0 0 0 0 0 0
42336- 0 0 0 0 0 0 0 0 0 0 0 0
42337- 0 0 0 0 0 0 0 0 0 0 0 0
42338- 0 0 0 0 0 0 0 0 0 6 6 6
42339- 26 26 26 70 70 70 163 133 67 213 154 11
42340-236 178 12 246 190 14 246 190 14 246 190 14
42341-246 190 14 246 190 14 246 190 14 246 190 14
42342-246 190 14 246 190 14 246 190 14 246 190 14
42343-246 190 14 246 190 14 246 190 14 241 196 14
42344-190 146 13 18 14 6 2 2 6 2 2 6
42345- 46 46 46 246 246 246 253 253 253 253 253 253
42346-253 253 253 253 253 253 253 253 253 253 253 253
42347-253 253 253 253 253 253 253 253 253 253 253 253
42348-253 253 253 253 253 253 253 253 253 253 253 253
42349-253 253 253 221 221 221 86 86 86 156 107 11
42350-216 158 10 236 178 12 242 186 14 246 186 14
42351-242 186 14 239 182 13 239 182 13 242 186 14
42352-242 186 14 246 186 14 246 190 14 246 190 14
42353-246 190 14 246 190 14 246 190 14 246 190 14
42354-242 186 14 225 175 15 142 122 72 66 66 66
42355- 30 30 30 10 10 10 0 0 0 0 0 0
42356- 0 0 0 0 0 0 0 0 0 0 0 0
42357- 0 0 0 0 0 0 0 0 0 0 0 0
42358- 0 0 0 0 0 0 0 0 0 6 6 6
42359- 26 26 26 70 70 70 163 133 67 210 150 10
42360-236 178 12 246 190 14 246 190 14 246 190 14
42361-246 190 14 246 190 14 246 190 14 246 190 14
42362-246 190 14 246 190 14 246 190 14 246 190 14
42363-246 190 14 246 190 14 246 190 14 246 190 14
42364-232 195 16 121 92 8 34 34 34 106 106 106
42365-221 221 221 253 253 253 253 253 253 253 253 253
42366-253 253 253 253 253 253 253 253 253 253 253 253
42367-253 253 253 253 253 253 253 253 253 253 253 253
42368-253 253 253 253 253 253 253 253 253 253 253 253
42369-242 242 242 82 82 82 18 14 6 163 110 8
42370-216 158 10 236 178 12 242 186 14 246 190 14
42371-246 190 14 246 190 14 246 190 14 246 190 14
42372-246 190 14 246 190 14 246 190 14 246 190 14
42373-246 190 14 246 190 14 246 190 14 246 190 14
42374-246 190 14 246 190 14 242 186 14 163 133 67
42375- 46 46 46 18 18 18 6 6 6 0 0 0
42376- 0 0 0 0 0 0 0 0 0 0 0 0
42377- 0 0 0 0 0 0 0 0 0 0 0 0
42378- 0 0 0 0 0 0 0 0 0 10 10 10
42379- 30 30 30 78 78 78 163 133 67 210 150 10
42380-236 178 12 246 186 14 246 190 14 246 190 14
42381-246 190 14 246 190 14 246 190 14 246 190 14
42382-246 190 14 246 190 14 246 190 14 246 190 14
42383-246 190 14 246 190 14 246 190 14 246 190 14
42384-241 196 14 215 174 15 190 178 144 253 253 253
42385-253 253 253 253 253 253 253 253 253 253 253 253
42386-253 253 253 253 253 253 253 253 253 253 253 253
42387-253 253 253 253 253 253 253 253 253 253 253 253
42388-253 253 253 253 253 253 253 253 253 218 218 218
42389- 58 58 58 2 2 6 22 18 6 167 114 7
42390-216 158 10 236 178 12 246 186 14 246 190 14
42391-246 190 14 246 190 14 246 190 14 246 190 14
42392-246 190 14 246 190 14 246 190 14 246 190 14
42393-246 190 14 246 190 14 246 190 14 246 190 14
42394-246 190 14 246 186 14 242 186 14 190 150 46
42395- 54 54 54 22 22 22 6 6 6 0 0 0
42396- 0 0 0 0 0 0 0 0 0 0 0 0
42397- 0 0 0 0 0 0 0 0 0 0 0 0
42398- 0 0 0 0 0 0 0 0 0 14 14 14
42399- 38 38 38 86 86 86 180 133 36 213 154 11
42400-236 178 12 246 186 14 246 190 14 246 190 14
42401-246 190 14 246 190 14 246 190 14 246 190 14
42402-246 190 14 246 190 14 246 190 14 246 190 14
42403-246 190 14 246 190 14 246 190 14 246 190 14
42404-246 190 14 232 195 16 190 146 13 214 214 214
42405-253 253 253 253 253 253 253 253 253 253 253 253
42406-253 253 253 253 253 253 253 253 253 253 253 253
42407-253 253 253 253 253 253 253 253 253 253 253 253
42408-253 253 253 250 250 250 170 170 170 26 26 26
42409- 2 2 6 2 2 6 37 26 9 163 110 8
42410-219 162 10 239 182 13 246 186 14 246 190 14
42411-246 190 14 246 190 14 246 190 14 246 190 14
42412-246 190 14 246 190 14 246 190 14 246 190 14
42413-246 190 14 246 190 14 246 190 14 246 190 14
42414-246 186 14 236 178 12 224 166 10 142 122 72
42415- 46 46 46 18 18 18 6 6 6 0 0 0
42416- 0 0 0 0 0 0 0 0 0 0 0 0
42417- 0 0 0 0 0 0 0 0 0 0 0 0
42418- 0 0 0 0 0 0 6 6 6 18 18 18
42419- 50 50 50 109 106 95 192 133 9 224 166 10
42420-242 186 14 246 190 14 246 190 14 246 190 14
42421-246 190 14 246 190 14 246 190 14 246 190 14
42422-246 190 14 246 190 14 246 190 14 246 190 14
42423-246 190 14 246 190 14 246 190 14 246 190 14
42424-242 186 14 226 184 13 210 162 10 142 110 46
42425-226 226 226 253 253 253 253 253 253 253 253 253
42426-253 253 253 253 253 253 253 253 253 253 253 253
42427-253 253 253 253 253 253 253 253 253 253 253 253
42428-198 198 198 66 66 66 2 2 6 2 2 6
42429- 2 2 6 2 2 6 50 34 6 156 107 11
42430-219 162 10 239 182 13 246 186 14 246 190 14
42431-246 190 14 246 190 14 246 190 14 246 190 14
42432-246 190 14 246 190 14 246 190 14 246 190 14
42433-246 190 14 246 190 14 246 190 14 242 186 14
42434-234 174 13 213 154 11 154 122 46 66 66 66
42435- 30 30 30 10 10 10 0 0 0 0 0 0
42436- 0 0 0 0 0 0 0 0 0 0 0 0
42437- 0 0 0 0 0 0 0 0 0 0 0 0
42438- 0 0 0 0 0 0 6 6 6 22 22 22
42439- 58 58 58 154 121 60 206 145 10 234 174 13
42440-242 186 14 246 186 14 246 190 14 246 190 14
42441-246 190 14 246 190 14 246 190 14 246 190 14
42442-246 190 14 246 190 14 246 190 14 246 190 14
42443-246 190 14 246 190 14 246 190 14 246 190 14
42444-246 186 14 236 178 12 210 162 10 163 110 8
42445- 61 42 6 138 138 138 218 218 218 250 250 250
42446-253 253 253 253 253 253 253 253 253 250 250 250
42447-242 242 242 210 210 210 144 144 144 66 66 66
42448- 6 6 6 2 2 6 2 2 6 2 2 6
42449- 2 2 6 2 2 6 61 42 6 163 110 8
42450-216 158 10 236 178 12 246 190 14 246 190 14
42451-246 190 14 246 190 14 246 190 14 246 190 14
42452-246 190 14 246 190 14 246 190 14 246 190 14
42453-246 190 14 239 182 13 230 174 11 216 158 10
42454-190 142 34 124 112 88 70 70 70 38 38 38
42455- 18 18 18 6 6 6 0 0 0 0 0 0
42456- 0 0 0 0 0 0 0 0 0 0 0 0
42457- 0 0 0 0 0 0 0 0 0 0 0 0
42458- 0 0 0 0 0 0 6 6 6 22 22 22
42459- 62 62 62 168 124 44 206 145 10 224 166 10
42460-236 178 12 239 182 13 242 186 14 242 186 14
42461-246 186 14 246 190 14 246 190 14 246 190 14
42462-246 190 14 246 190 14 246 190 14 246 190 14
42463-246 190 14 246 190 14 246 190 14 246 190 14
42464-246 190 14 236 178 12 216 158 10 175 118 6
42465- 80 54 7 2 2 6 6 6 6 30 30 30
42466- 54 54 54 62 62 62 50 50 50 38 38 38
42467- 14 14 14 2 2 6 2 2 6 2 2 6
42468- 2 2 6 2 2 6 2 2 6 2 2 6
42469- 2 2 6 6 6 6 80 54 7 167 114 7
42470-213 154 11 236 178 12 246 190 14 246 190 14
42471-246 190 14 246 190 14 246 190 14 246 190 14
42472-246 190 14 242 186 14 239 182 13 239 182 13
42473-230 174 11 210 150 10 174 135 50 124 112 88
42474- 82 82 82 54 54 54 34 34 34 18 18 18
42475- 6 6 6 0 0 0 0 0 0 0 0 0
42476- 0 0 0 0 0 0 0 0 0 0 0 0
42477- 0 0 0 0 0 0 0 0 0 0 0 0
42478- 0 0 0 0 0 0 6 6 6 18 18 18
42479- 50 50 50 158 118 36 192 133 9 200 144 11
42480-216 158 10 219 162 10 224 166 10 226 170 11
42481-230 174 11 236 178 12 239 182 13 239 182 13
42482-242 186 14 246 186 14 246 190 14 246 190 14
42483-246 190 14 246 190 14 246 190 14 246 190 14
42484-246 186 14 230 174 11 210 150 10 163 110 8
42485-104 69 6 10 10 10 2 2 6 2 2 6
42486- 2 2 6 2 2 6 2 2 6 2 2 6
42487- 2 2 6 2 2 6 2 2 6 2 2 6
42488- 2 2 6 2 2 6 2 2 6 2 2 6
42489- 2 2 6 6 6 6 91 60 6 167 114 7
42490-206 145 10 230 174 11 242 186 14 246 190 14
42491-246 190 14 246 190 14 246 186 14 242 186 14
42492-239 182 13 230 174 11 224 166 10 213 154 11
42493-180 133 36 124 112 88 86 86 86 58 58 58
42494- 38 38 38 22 22 22 10 10 10 6 6 6
42495- 0 0 0 0 0 0 0 0 0 0 0 0
42496- 0 0 0 0 0 0 0 0 0 0 0 0
42497- 0 0 0 0 0 0 0 0 0 0 0 0
42498- 0 0 0 0 0 0 0 0 0 14 14 14
42499- 34 34 34 70 70 70 138 110 50 158 118 36
42500-167 114 7 180 123 7 192 133 9 197 138 11
42501-200 144 11 206 145 10 213 154 11 219 162 10
42502-224 166 10 230 174 11 239 182 13 242 186 14
42503-246 186 14 246 186 14 246 186 14 246 186 14
42504-239 182 13 216 158 10 185 133 11 152 99 6
42505-104 69 6 18 14 6 2 2 6 2 2 6
42506- 2 2 6 2 2 6 2 2 6 2 2 6
42507- 2 2 6 2 2 6 2 2 6 2 2 6
42508- 2 2 6 2 2 6 2 2 6 2 2 6
42509- 2 2 6 6 6 6 80 54 7 152 99 6
42510-192 133 9 219 162 10 236 178 12 239 182 13
42511-246 186 14 242 186 14 239 182 13 236 178 12
42512-224 166 10 206 145 10 192 133 9 154 121 60
42513- 94 94 94 62 62 62 42 42 42 22 22 22
42514- 14 14 14 6 6 6 0 0 0 0 0 0
42515- 0 0 0 0 0 0 0 0 0 0 0 0
42516- 0 0 0 0 0 0 0 0 0 0 0 0
42517- 0 0 0 0 0 0 0 0 0 0 0 0
42518- 0 0 0 0 0 0 0 0 0 6 6 6
42519- 18 18 18 34 34 34 58 58 58 78 78 78
42520-101 98 89 124 112 88 142 110 46 156 107 11
42521-163 110 8 167 114 7 175 118 6 180 123 7
42522-185 133 11 197 138 11 210 150 10 219 162 10
42523-226 170 11 236 178 12 236 178 12 234 174 13
42524-219 162 10 197 138 11 163 110 8 130 83 6
42525- 91 60 6 10 10 10 2 2 6 2 2 6
42526- 18 18 18 38 38 38 38 38 38 38 38 38
42527- 38 38 38 38 38 38 38 38 38 38 38 38
42528- 38 38 38 38 38 38 26 26 26 2 2 6
42529- 2 2 6 6 6 6 70 47 6 137 92 6
42530-175 118 6 200 144 11 219 162 10 230 174 11
42531-234 174 13 230 174 11 219 162 10 210 150 10
42532-192 133 9 163 110 8 124 112 88 82 82 82
42533- 50 50 50 30 30 30 14 14 14 6 6 6
42534- 0 0 0 0 0 0 0 0 0 0 0 0
42535- 0 0 0 0 0 0 0 0 0 0 0 0
42536- 0 0 0 0 0 0 0 0 0 0 0 0
42537- 0 0 0 0 0 0 0 0 0 0 0 0
42538- 0 0 0 0 0 0 0 0 0 0 0 0
42539- 6 6 6 14 14 14 22 22 22 34 34 34
42540- 42 42 42 58 58 58 74 74 74 86 86 86
42541-101 98 89 122 102 70 130 98 46 121 87 25
42542-137 92 6 152 99 6 163 110 8 180 123 7
42543-185 133 11 197 138 11 206 145 10 200 144 11
42544-180 123 7 156 107 11 130 83 6 104 69 6
42545- 50 34 6 54 54 54 110 110 110 101 98 89
42546- 86 86 86 82 82 82 78 78 78 78 78 78
42547- 78 78 78 78 78 78 78 78 78 78 78 78
42548- 78 78 78 82 82 82 86 86 86 94 94 94
42549-106 106 106 101 101 101 86 66 34 124 80 6
42550-156 107 11 180 123 7 192 133 9 200 144 11
42551-206 145 10 200 144 11 192 133 9 175 118 6
42552-139 102 15 109 106 95 70 70 70 42 42 42
42553- 22 22 22 10 10 10 0 0 0 0 0 0
42554- 0 0 0 0 0 0 0 0 0 0 0 0
42555- 0 0 0 0 0 0 0 0 0 0 0 0
42556- 0 0 0 0 0 0 0 0 0 0 0 0
42557- 0 0 0 0 0 0 0 0 0 0 0 0
42558- 0 0 0 0 0 0 0 0 0 0 0 0
42559- 0 0 0 0 0 0 6 6 6 10 10 10
42560- 14 14 14 22 22 22 30 30 30 38 38 38
42561- 50 50 50 62 62 62 74 74 74 90 90 90
42562-101 98 89 112 100 78 121 87 25 124 80 6
42563-137 92 6 152 99 6 152 99 6 152 99 6
42564-138 86 6 124 80 6 98 70 6 86 66 30
42565-101 98 89 82 82 82 58 58 58 46 46 46
42566- 38 38 38 34 34 34 34 34 34 34 34 34
42567- 34 34 34 34 34 34 34 34 34 34 34 34
42568- 34 34 34 34 34 34 38 38 38 42 42 42
42569- 54 54 54 82 82 82 94 86 76 91 60 6
42570-134 86 6 156 107 11 167 114 7 175 118 6
42571-175 118 6 167 114 7 152 99 6 121 87 25
42572-101 98 89 62 62 62 34 34 34 18 18 18
42573- 6 6 6 0 0 0 0 0 0 0 0 0
42574- 0 0 0 0 0 0 0 0 0 0 0 0
42575- 0 0 0 0 0 0 0 0 0 0 0 0
42576- 0 0 0 0 0 0 0 0 0 0 0 0
42577- 0 0 0 0 0 0 0 0 0 0 0 0
42578- 0 0 0 0 0 0 0 0 0 0 0 0
42579- 0 0 0 0 0 0 0 0 0 0 0 0
42580- 0 0 0 6 6 6 6 6 6 10 10 10
42581- 18 18 18 22 22 22 30 30 30 42 42 42
42582- 50 50 50 66 66 66 86 86 86 101 98 89
42583-106 86 58 98 70 6 104 69 6 104 69 6
42584-104 69 6 91 60 6 82 62 34 90 90 90
42585- 62 62 62 38 38 38 22 22 22 14 14 14
42586- 10 10 10 10 10 10 10 10 10 10 10 10
42587- 10 10 10 10 10 10 6 6 6 10 10 10
42588- 10 10 10 10 10 10 10 10 10 14 14 14
42589- 22 22 22 42 42 42 70 70 70 89 81 66
42590- 80 54 7 104 69 6 124 80 6 137 92 6
42591-134 86 6 116 81 8 100 82 52 86 86 86
42592- 58 58 58 30 30 30 14 14 14 6 6 6
42593- 0 0 0 0 0 0 0 0 0 0 0 0
42594- 0 0 0 0 0 0 0 0 0 0 0 0
42595- 0 0 0 0 0 0 0 0 0 0 0 0
42596- 0 0 0 0 0 0 0 0 0 0 0 0
42597- 0 0 0 0 0 0 0 0 0 0 0 0
42598- 0 0 0 0 0 0 0 0 0 0 0 0
42599- 0 0 0 0 0 0 0 0 0 0 0 0
42600- 0 0 0 0 0 0 0 0 0 0 0 0
42601- 0 0 0 6 6 6 10 10 10 14 14 14
42602- 18 18 18 26 26 26 38 38 38 54 54 54
42603- 70 70 70 86 86 86 94 86 76 89 81 66
42604- 89 81 66 86 86 86 74 74 74 50 50 50
42605- 30 30 30 14 14 14 6 6 6 0 0 0
42606- 0 0 0 0 0 0 0 0 0 0 0 0
42607- 0 0 0 0 0 0 0 0 0 0 0 0
42608- 0 0 0 0 0 0 0 0 0 0 0 0
42609- 6 6 6 18 18 18 34 34 34 58 58 58
42610- 82 82 82 89 81 66 89 81 66 89 81 66
42611- 94 86 66 94 86 76 74 74 74 50 50 50
42612- 26 26 26 14 14 14 6 6 6 0 0 0
42613- 0 0 0 0 0 0 0 0 0 0 0 0
42614- 0 0 0 0 0 0 0 0 0 0 0 0
42615- 0 0 0 0 0 0 0 0 0 0 0 0
42616- 0 0 0 0 0 0 0 0 0 0 0 0
42617- 0 0 0 0 0 0 0 0 0 0 0 0
42618- 0 0 0 0 0 0 0 0 0 0 0 0
42619- 0 0 0 0 0 0 0 0 0 0 0 0
42620- 0 0 0 0 0 0 0 0 0 0 0 0
42621- 0 0 0 0 0 0 0 0 0 0 0 0
42622- 6 6 6 6 6 6 14 14 14 18 18 18
42623- 30 30 30 38 38 38 46 46 46 54 54 54
42624- 50 50 50 42 42 42 30 30 30 18 18 18
42625- 10 10 10 0 0 0 0 0 0 0 0 0
42626- 0 0 0 0 0 0 0 0 0 0 0 0
42627- 0 0 0 0 0 0 0 0 0 0 0 0
42628- 0 0 0 0 0 0 0 0 0 0 0 0
42629- 0 0 0 6 6 6 14 14 14 26 26 26
42630- 38 38 38 50 50 50 58 58 58 58 58 58
42631- 54 54 54 42 42 42 30 30 30 18 18 18
42632- 10 10 10 0 0 0 0 0 0 0 0 0
42633- 0 0 0 0 0 0 0 0 0 0 0 0
42634- 0 0 0 0 0 0 0 0 0 0 0 0
42635- 0 0 0 0 0 0 0 0 0 0 0 0
42636- 0 0 0 0 0 0 0 0 0 0 0 0
42637- 0 0 0 0 0 0 0 0 0 0 0 0
42638- 0 0 0 0 0 0 0 0 0 0 0 0
42639- 0 0 0 0 0 0 0 0 0 0 0 0
42640- 0 0 0 0 0 0 0 0 0 0 0 0
42641- 0 0 0 0 0 0 0 0 0 0 0 0
42642- 0 0 0 0 0 0 0 0 0 6 6 6
42643- 6 6 6 10 10 10 14 14 14 18 18 18
42644- 18 18 18 14 14 14 10 10 10 6 6 6
42645- 0 0 0 0 0 0 0 0 0 0 0 0
42646- 0 0 0 0 0 0 0 0 0 0 0 0
42647- 0 0 0 0 0 0 0 0 0 0 0 0
42648- 0 0 0 0 0 0 0 0 0 0 0 0
42649- 0 0 0 0 0 0 0 0 0 6 6 6
42650- 14 14 14 18 18 18 22 22 22 22 22 22
42651- 18 18 18 14 14 14 10 10 10 6 6 6
42652- 0 0 0 0 0 0 0 0 0 0 0 0
42653- 0 0 0 0 0 0 0 0 0 0 0 0
42654- 0 0 0 0 0 0 0 0 0 0 0 0
42655- 0 0 0 0 0 0 0 0 0 0 0 0
42656- 0 0 0 0 0 0 0 0 0 0 0 0
42657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42670+4 4 4 4 4 4
42671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42684+4 4 4 4 4 4
42685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42698+4 4 4 4 4 4
42699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42712+4 4 4 4 4 4
42713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42726+4 4 4 4 4 4
42727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42740+4 4 4 4 4 4
42741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42745+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
42746+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
42747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42750+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
42751+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42752+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
42753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42754+4 4 4 4 4 4
42755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42759+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
42760+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
42761+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42764+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
42765+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
42766+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
42767+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42768+4 4 4 4 4 4
42769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42773+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
42774+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
42775+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42778+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
42779+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
42780+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
42781+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
42782+4 4 4 4 4 4
42783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42786+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
42787+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
42788+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
42789+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
42790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42791+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
42792+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
42793+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
42794+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
42795+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
42796+4 4 4 4 4 4
42797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42800+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
42801+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
42802+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
42803+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
42804+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42805+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
42806+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
42807+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
42808+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
42809+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
42810+4 4 4 4 4 4
42811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42814+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
42815+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
42816+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
42817+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
42818+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
42819+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
42820+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
42821+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
42822+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
42823+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
42824+4 4 4 4 4 4
42825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42827+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
42828+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
42829+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
42830+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
42831+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
42832+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
42833+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
42834+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
42835+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
42836+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
42837+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
42838+4 4 4 4 4 4
42839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42841+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
42842+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
42843+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
42844+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
42845+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
42846+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
42847+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
42848+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
42849+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
42850+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
42851+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
42852+4 4 4 4 4 4
42853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42855+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
42856+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
42857+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
42858+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
42859+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
42860+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
42861+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
42862+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
42863+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
42864+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
42865+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42866+4 4 4 4 4 4
42867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42869+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
42870+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
42871+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
42872+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
42873+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
42874+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
42875+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
42876+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
42877+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
42878+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
42879+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
42880+4 4 4 4 4 4
42881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42882+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
42883+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
42884+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
42885+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
42886+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
42887+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
42888+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
42889+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
42890+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
42891+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
42892+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
42893+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
42894+4 4 4 4 4 4
42895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42896+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
42897+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
42898+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
42899+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
42900+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
42901+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
42902+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
42903+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
42904+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
42905+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
42906+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
42907+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
42908+0 0 0 4 4 4
42909+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
42910+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
42911+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
42912+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
42913+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
42914+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
42915+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
42916+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
42917+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
42918+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
42919+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
42920+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
42921+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
42922+2 0 0 0 0 0
42923+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
42924+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
42925+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
42926+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
42927+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
42928+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
42929+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
42930+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
42931+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
42932+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
42933+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
42934+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
42935+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
42936+37 38 37 0 0 0
42937+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
42938+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
42939+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
42940+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
42941+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
42942+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
42943+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
42944+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
42945+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
42946+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
42947+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
42948+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
42949+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
42950+85 115 134 4 0 0
42951+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
42952+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
42953+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
42954+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
42955+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
42956+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
42957+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
42958+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
42959+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
42960+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
42961+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
42962+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
42963+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
42964+60 73 81 4 0 0
42965+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
42966+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
42967+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
42968+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
42969+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
42970+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
42971+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
42972+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
42973+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
42974+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
42975+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
42976+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
42977+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
42978+16 19 21 4 0 0
42979+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
42980+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
42981+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
42982+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
42983+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
42984+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
42985+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
42986+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
42987+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
42988+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
42989+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
42990+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
42991+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
42992+4 0 0 4 3 3
42993+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
42994+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
42995+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
42996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
42997+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
42998+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
42999+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
43000+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
43001+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
43002+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
43003+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
43004+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
43005+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
43006+3 2 2 4 4 4
43007+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
43008+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
43009+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
43010+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
43011+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
43012+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
43013+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
43014+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
43015+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
43016+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
43017+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
43018+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
43019+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
43020+4 4 4 4 4 4
43021+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
43022+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
43023+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
43024+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
43025+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
43026+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
43027+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
43028+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
43029+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
43030+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
43031+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
43032+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
43033+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
43034+4 4 4 4 4 4
43035+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
43036+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
43037+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
43038+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
43039+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
43040+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
43041+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
43042+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
43043+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
43044+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
43045+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
43046+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
43047+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
43048+5 5 5 5 5 5
43049+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
43050+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
43051+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
43052+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
43053+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
43054+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43055+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
43056+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
43057+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
43058+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
43059+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
43060+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
43061+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
43062+5 5 5 4 4 4
43063+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
43064+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
43065+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
43066+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
43067+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
43068+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
43069+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
43070+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
43071+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
43072+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
43073+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
43074+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
43075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43076+4 4 4 4 4 4
43077+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
43078+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
43079+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
43080+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
43081+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
43082+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43083+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43084+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
43085+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
43086+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
43087+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
43088+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
43089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43090+4 4 4 4 4 4
43091+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
43092+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
43093+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
43094+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
43095+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
43096+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
43097+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
43098+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
43099+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
43100+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
43101+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
43102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43104+4 4 4 4 4 4
43105+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
43106+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
43107+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
43108+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
43109+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
43110+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43111+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43112+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
43113+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
43114+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
43115+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
43116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43118+4 4 4 4 4 4
43119+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
43120+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
43121+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
43122+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
43123+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
43124+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
43125+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
43126+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
43127+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
43128+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
43129+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43132+4 4 4 4 4 4
43133+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
43134+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
43135+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
43136+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
43137+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
43138+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
43139+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
43140+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
43141+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
43142+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
43143+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
43144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43146+4 4 4 4 4 4
43147+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
43148+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
43149+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
43150+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
43151+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
43152+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
43153+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
43154+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
43155+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
43156+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
43157+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
43158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43160+4 4 4 4 4 4
43161+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
43162+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
43163+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
43164+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
43165+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
43166+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
43167+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
43168+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
43169+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
43170+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
43171+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43174+4 4 4 4 4 4
43175+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
43176+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
43177+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
43178+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
43179+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43180+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
43181+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
43182+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
43183+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
43184+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
43185+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43188+4 4 4 4 4 4
43189+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
43190+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
43191+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
43192+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
43193+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43194+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
43195+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
43196+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
43197+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
43198+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
43199+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43202+4 4 4 4 4 4
43203+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
43204+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
43205+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
43206+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
43207+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43208+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
43209+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
43210+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
43211+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
43212+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43213+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43216+4 4 4 4 4 4
43217+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
43218+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
43219+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
43220+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
43221+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
43222+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
43223+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
43224+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
43225+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43226+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43227+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43230+4 4 4 4 4 4
43231+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
43232+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
43233+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
43234+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
43235+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43236+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
43237+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
43238+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
43239+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43240+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43241+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43244+4 4 4 4 4 4
43245+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
43246+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
43247+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
43248+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
43249+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
43250+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
43251+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
43252+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
43253+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43254+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43255+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43258+4 4 4 4 4 4
43259+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
43260+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
43261+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43262+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
43263+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
43264+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
43265+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
43266+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
43267+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
43268+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43269+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43272+4 4 4 4 4 4
43273+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
43274+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
43275+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
43276+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
43277+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
43278+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
43279+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
43280+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
43281+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43282+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43283+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43286+4 4 4 4 4 4
43287+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
43288+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
43289+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43290+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
43291+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
43292+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
43293+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
43294+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
43295+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
43296+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43297+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43300+4 4 4 4 4 4
43301+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
43302+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
43303+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
43304+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
43305+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
43306+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
43307+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
43308+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
43309+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43310+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43311+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43314+4 4 4 4 4 4
43315+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43316+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
43317+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43318+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
43319+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
43320+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
43321+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
43322+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
43323+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43324+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43325+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43328+4 4 4 4 4 4
43329+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
43330+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
43331+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
43332+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
43333+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
43334+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
43335+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43336+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
43337+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43338+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43339+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43342+4 4 4 4 4 4
43343+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43344+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
43345+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
43346+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
43347+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
43348+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
43349+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43350+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
43351+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43352+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43353+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43356+4 4 4 4 4 4
43357+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
43358+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
43359+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
43360+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
43361+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
43362+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
43363+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
43364+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
43365+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
43366+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43367+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43370+4 4 4 4 4 4
43371+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43372+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
43373+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
43374+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
43375+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
43376+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
43377+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
43378+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
43379+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
43380+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43381+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43384+4 4 4 4 4 4
43385+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
43386+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
43387+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
43388+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
43389+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
43390+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
43391+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
43392+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
43393+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
43394+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43395+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43398+4 4 4 4 4 4
43399+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43400+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
43401+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
43402+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
43403+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
43404+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
43405+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
43406+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
43407+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
43408+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43409+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43412+4 4 4 4 4 4
43413+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
43414+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
43415+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
43416+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
43417+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
43418+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
43419+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
43420+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
43421+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
43422+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
43423+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43426+4 4 4 4 4 4
43427+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
43428+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
43429+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
43430+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
43431+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
43432+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
43433+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
43434+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
43435+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
43436+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
43437+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43440+4 4 4 4 4 4
43441+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
43442+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
43443+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
43444+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
43445+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
43446+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
43447+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43448+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
43449+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
43450+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
43451+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43454+4 4 4 4 4 4
43455+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
43456+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
43457+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
43458+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
43459+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
43460+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
43461+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
43462+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
43463+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
43464+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
43465+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43468+4 4 4 4 4 4
43469+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
43470+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
43471+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
43472+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
43473+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
43474+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
43475+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
43476+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
43477+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
43478+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
43479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43482+4 4 4 4 4 4
43483+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43484+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
43485+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
43486+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
43487+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
43488+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
43489+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
43490+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
43491+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
43492+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
43493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43496+4 4 4 4 4 4
43497+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
43498+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
43499+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
43500+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
43501+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
43502+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
43503+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
43504+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
43505+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
43506+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
43507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43510+4 4 4 4 4 4
43511+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
43512+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
43513+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
43514+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
43515+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
43516+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
43517+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
43518+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
43519+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
43520+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43524+4 4 4 4 4 4
43525+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
43526+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43527+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
43528+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
43529+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
43530+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
43531+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
43532+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
43533+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
43534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43538+4 4 4 4 4 4
43539+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
43540+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
43541+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
43542+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
43543+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
43544+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
43545+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
43546+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
43547+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
43548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43552+4 4 4 4 4 4
43553+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
43554+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
43555+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
43556+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
43557+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
43558+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
43559+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
43560+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
43561+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43566+4 4 4 4 4 4
43567+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
43568+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
43569+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
43570+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
43571+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
43572+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
43573+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
43574+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
43575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43580+4 4 4 4 4 4
43581+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
43582+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
43583+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
43584+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
43585+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
43586+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
43587+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
43588+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
43589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43594+4 4 4 4 4 4
43595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43596+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
43597+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43598+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
43599+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
43600+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
43601+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
43602+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
43603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43608+4 4 4 4 4 4
43609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43610+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
43611+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
43612+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
43613+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
43614+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
43615+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
43616+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
43617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43622+4 4 4 4 4 4
43623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43624+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
43625+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
43626+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
43627+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
43628+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
43629+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
43630+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43636+4 4 4 4 4 4
43637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43639+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
43640+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
43641+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
43642+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
43643+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
43644+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43650+4 4 4 4 4 4
43651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43654+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43655+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
43656+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
43657+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
43658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43664+4 4 4 4 4 4
43665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43668+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
43669+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
43670+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
43671+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
43672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43678+4 4 4 4 4 4
43679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43682+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
43683+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43684+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
43685+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
43686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43692+4 4 4 4 4 4
43693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43696+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
43697+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
43698+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
43699+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
43700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43706+4 4 4 4 4 4
43707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43711+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
43712+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43713+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43720+4 4 4 4 4 4
43721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43725+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
43726+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
43727+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
43728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43734+4 4 4 4 4 4
43735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43739+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
43740+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
43741+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43748+4 4 4 4 4 4
43749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43753+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
43754+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
43755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43762+4 4 4 4 4 4
43763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43767+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
43768+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
43769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43776+4 4 4 4 4 4
43777diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
43778index 86d449e..af6a7f7 100644
43779--- a/drivers/video/udlfb.c
43780+++ b/drivers/video/udlfb.c
43781@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
43782 dlfb_urb_completion(urb);
43783
43784 error:
43785- atomic_add(bytes_sent, &dev->bytes_sent);
43786- atomic_add(bytes_identical, &dev->bytes_identical);
43787- atomic_add(width*height*2, &dev->bytes_rendered);
43788+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
43789+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
43790+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
43791 end_cycles = get_cycles();
43792- atomic_add(((unsigned int) ((end_cycles - start_cycles)
43793+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
43794 >> 10)), /* Kcycles */
43795 &dev->cpu_kcycles_used);
43796
43797@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
43798 dlfb_urb_completion(urb);
43799
43800 error:
43801- atomic_add(bytes_sent, &dev->bytes_sent);
43802- atomic_add(bytes_identical, &dev->bytes_identical);
43803- atomic_add(bytes_rendered, &dev->bytes_rendered);
43804+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
43805+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
43806+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
43807 end_cycles = get_cycles();
43808- atomic_add(((unsigned int) ((end_cycles - start_cycles)
43809+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
43810 >> 10)), /* Kcycles */
43811 &dev->cpu_kcycles_used);
43812 }
43813@@ -1372,7 +1372,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
43814 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43815 struct dlfb_data *dev = fb_info->par;
43816 return snprintf(buf, PAGE_SIZE, "%u\n",
43817- atomic_read(&dev->bytes_rendered));
43818+ atomic_read_unchecked(&dev->bytes_rendered));
43819 }
43820
43821 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
43822@@ -1380,7 +1380,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
43823 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43824 struct dlfb_data *dev = fb_info->par;
43825 return snprintf(buf, PAGE_SIZE, "%u\n",
43826- atomic_read(&dev->bytes_identical));
43827+ atomic_read_unchecked(&dev->bytes_identical));
43828 }
43829
43830 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
43831@@ -1388,7 +1388,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
43832 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43833 struct dlfb_data *dev = fb_info->par;
43834 return snprintf(buf, PAGE_SIZE, "%u\n",
43835- atomic_read(&dev->bytes_sent));
43836+ atomic_read_unchecked(&dev->bytes_sent));
43837 }
43838
43839 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
43840@@ -1396,7 +1396,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
43841 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43842 struct dlfb_data *dev = fb_info->par;
43843 return snprintf(buf, PAGE_SIZE, "%u\n",
43844- atomic_read(&dev->cpu_kcycles_used));
43845+ atomic_read_unchecked(&dev->cpu_kcycles_used));
43846 }
43847
43848 static ssize_t edid_show(
43849@@ -1456,10 +1456,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
43850 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43851 struct dlfb_data *dev = fb_info->par;
43852
43853- atomic_set(&dev->bytes_rendered, 0);
43854- atomic_set(&dev->bytes_identical, 0);
43855- atomic_set(&dev->bytes_sent, 0);
43856- atomic_set(&dev->cpu_kcycles_used, 0);
43857+ atomic_set_unchecked(&dev->bytes_rendered, 0);
43858+ atomic_set_unchecked(&dev->bytes_identical, 0);
43859+ atomic_set_unchecked(&dev->bytes_sent, 0);
43860+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
43861
43862 return count;
43863 }
43864diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
43865index 2f8f82d..191de37 100644
43866--- a/drivers/video/uvesafb.c
43867+++ b/drivers/video/uvesafb.c
43868@@ -19,6 +19,7 @@
43869 #include <linux/io.h>
43870 #include <linux/mutex.h>
43871 #include <linux/slab.h>
43872+#include <linux/moduleloader.h>
43873 #include <video/edid.h>
43874 #include <video/uvesafb.h>
43875 #ifdef CONFIG_X86
43876@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
43877 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
43878 par->pmi_setpal = par->ypan = 0;
43879 } else {
43880+
43881+#ifdef CONFIG_PAX_KERNEXEC
43882+#ifdef CONFIG_MODULES
43883+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
43884+#endif
43885+ if (!par->pmi_code) {
43886+ par->pmi_setpal = par->ypan = 0;
43887+ return 0;
43888+ }
43889+#endif
43890+
43891 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
43892 + task->t.regs.edi);
43893+
43894+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43895+ pax_open_kernel();
43896+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
43897+ pax_close_kernel();
43898+
43899+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
43900+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
43901+#else
43902 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
43903 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
43904+#endif
43905+
43906 printk(KERN_INFO "uvesafb: protected mode interface info at "
43907 "%04x:%04x\n",
43908 (u16)task->t.regs.es, (u16)task->t.regs.edi);
43909@@ -818,13 +841,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
43910 par->ypan = ypan;
43911
43912 if (par->pmi_setpal || par->ypan) {
43913+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
43914 if (__supported_pte_mask & _PAGE_NX) {
43915 par->pmi_setpal = par->ypan = 0;
43916 printk(KERN_WARNING "uvesafb: NX protection is actively."
43917 "We have better not to use the PMI.\n");
43918- } else {
43919+ } else
43920+#endif
43921 uvesafb_vbe_getpmi(task, par);
43922- }
43923 }
43924 #else
43925 /* The protected mode interface is not available on non-x86. */
43926@@ -1838,6 +1862,11 @@ out:
43927 if (par->vbe_modes)
43928 kfree(par->vbe_modes);
43929
43930+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43931+ if (par->pmi_code)
43932+ module_free_exec(NULL, par->pmi_code);
43933+#endif
43934+
43935 framebuffer_release(info);
43936 return err;
43937 }
43938@@ -1864,6 +1893,12 @@ static int uvesafb_remove(struct platform_device *dev)
43939 kfree(par->vbe_state_orig);
43940 if (par->vbe_state_saved)
43941 kfree(par->vbe_state_saved);
43942+
43943+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43944+ if (par->pmi_code)
43945+ module_free_exec(NULL, par->pmi_code);
43946+#endif
43947+
43948 }
43949
43950 framebuffer_release(info);
43951diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
43952index 501b340..86bd4cf 100644
43953--- a/drivers/video/vesafb.c
43954+++ b/drivers/video/vesafb.c
43955@@ -9,6 +9,7 @@
43956 */
43957
43958 #include <linux/module.h>
43959+#include <linux/moduleloader.h>
43960 #include <linux/kernel.h>
43961 #include <linux/errno.h>
43962 #include <linux/string.h>
43963@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
43964 static int vram_total __initdata; /* Set total amount of memory */
43965 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
43966 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
43967-static void (*pmi_start)(void) __read_mostly;
43968-static void (*pmi_pal) (void) __read_mostly;
43969+static void (*pmi_start)(void) __read_only;
43970+static void (*pmi_pal) (void) __read_only;
43971 static int depth __read_mostly;
43972 static int vga_compat __read_mostly;
43973 /* --------------------------------------------------------------------- */
43974@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
43975 unsigned int size_vmode;
43976 unsigned int size_remap;
43977 unsigned int size_total;
43978+ void *pmi_code = NULL;
43979
43980 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
43981 return -ENODEV;
43982@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
43983 size_remap = size_total;
43984 vesafb_fix.smem_len = size_remap;
43985
43986-#ifndef __i386__
43987- screen_info.vesapm_seg = 0;
43988-#endif
43989-
43990 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
43991 printk(KERN_WARNING
43992 "vesafb: cannot reserve video memory at 0x%lx\n",
43993@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
43994 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
43995 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
43996
43997+#ifdef __i386__
43998+
43999+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44000+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
44001+ if (!pmi_code)
44002+#elif !defined(CONFIG_PAX_KERNEXEC)
44003+ if (0)
44004+#endif
44005+
44006+#endif
44007+ screen_info.vesapm_seg = 0;
44008+
44009 if (screen_info.vesapm_seg) {
44010- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
44011- screen_info.vesapm_seg,screen_info.vesapm_off);
44012+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
44013+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
44014 }
44015
44016 if (screen_info.vesapm_seg < 0xc000)
44017@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
44018
44019 if (ypan || pmi_setpal) {
44020 unsigned short *pmi_base;
44021+
44022 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
44023- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
44024- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
44025+
44026+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44027+ pax_open_kernel();
44028+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
44029+#else
44030+ pmi_code = pmi_base;
44031+#endif
44032+
44033+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
44034+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
44035+
44036+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44037+ pmi_start = ktva_ktla(pmi_start);
44038+ pmi_pal = ktva_ktla(pmi_pal);
44039+ pax_close_kernel();
44040+#endif
44041+
44042 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
44043 if (pmi_base[3]) {
44044 printk(KERN_INFO "vesafb: pmi: ports = ");
44045@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
44046 info->node, info->fix.id);
44047 return 0;
44048 err:
44049+
44050+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44051+ module_free_exec(NULL, pmi_code);
44052+#endif
44053+
44054 if (info->screen_base)
44055 iounmap(info->screen_base);
44056 framebuffer_release(info);
44057diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
44058index 88714ae..16c2e11 100644
44059--- a/drivers/video/via/via_clock.h
44060+++ b/drivers/video/via/via_clock.h
44061@@ -56,7 +56,7 @@ struct via_clock {
44062
44063 void (*set_engine_pll_state)(u8 state);
44064 void (*set_engine_pll)(struct via_pll_config config);
44065-};
44066+} __no_const;
44067
44068
44069 static inline u32 get_pll_internal_frequency(u32 ref_freq,
44070diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
44071index 6b1b7e1..b2fa4d5 100644
44072--- a/drivers/virtio/virtio_mmio.c
44073+++ b/drivers/virtio/virtio_mmio.c
44074@@ -530,7 +530,7 @@ static int vm_cmdline_set(const char *device,
44075
44076 resources[0].end = memparse(device, &str) - 1;
44077
44078- processed = sscanf(str, "@%lli:%u%n:%d%n",
44079+ processed = sscanf(str, "@%lli:%llu%n:%d%n",
44080 &base, &resources[1].start, &consumed,
44081 &vm_cmdline_id, &consumed);
44082
44083diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
44084index fef20db..d28b1ab 100644
44085--- a/drivers/xen/xenfs/xenstored.c
44086+++ b/drivers/xen/xenfs/xenstored.c
44087@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
44088 static int xsd_kva_open(struct inode *inode, struct file *file)
44089 {
44090 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
44091+#ifdef CONFIG_GRKERNSEC_HIDESYM
44092+ NULL);
44093+#else
44094 xen_store_interface);
44095+#endif
44096+
44097 if (!file->private_data)
44098 return -ENOMEM;
44099 return 0;
44100diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
44101index 890bed5..17ae73e 100644
44102--- a/fs/9p/vfs_inode.c
44103+++ b/fs/9p/vfs_inode.c
44104@@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
44105 void
44106 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44107 {
44108- char *s = nd_get_link(nd);
44109+ const char *s = nd_get_link(nd);
44110
44111 p9_debug(P9_DEBUG_VFS, " %s %s\n",
44112 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
44113diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
44114index 0efd152..b5802ad 100644
44115--- a/fs/Kconfig.binfmt
44116+++ b/fs/Kconfig.binfmt
44117@@ -89,7 +89,7 @@ config HAVE_AOUT
44118
44119 config BINFMT_AOUT
44120 tristate "Kernel support for a.out and ECOFF binaries"
44121- depends on HAVE_AOUT
44122+ depends on HAVE_AOUT && BROKEN
44123 ---help---
44124 A.out (Assembler.OUTput) is a set of formats for libraries and
44125 executables used in the earliest versions of UNIX. Linux used
44126diff --git a/fs/aio.c b/fs/aio.c
44127index 71f613c..9d01f1f 100644
44128--- a/fs/aio.c
44129+++ b/fs/aio.c
44130@@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
44131 size += sizeof(struct io_event) * nr_events;
44132 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
44133
44134- if (nr_pages < 0)
44135+ if (nr_pages <= 0)
44136 return -EINVAL;
44137
44138 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
44139@@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
44140 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
44141 {
44142 ssize_t ret;
44143+ struct iovec iovstack;
44144
44145 #ifdef CONFIG_COMPAT
44146 if (compat)
44147 ret = compat_rw_copy_check_uvector(type,
44148 (struct compat_iovec __user *)kiocb->ki_buf,
44149- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
44150+ kiocb->ki_nbytes, 1, &iovstack,
44151 &kiocb->ki_iovec);
44152 else
44153 #endif
44154 ret = rw_copy_check_uvector(type,
44155 (struct iovec __user *)kiocb->ki_buf,
44156- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
44157+ kiocb->ki_nbytes, 1, &iovstack,
44158 &kiocb->ki_iovec);
44159 if (ret < 0)
44160 goto out;
44161@@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
44162 if (ret < 0)
44163 goto out;
44164
44165+ if (kiocb->ki_iovec == &iovstack) {
44166+ kiocb->ki_inline_vec = iovstack;
44167+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
44168+ }
44169 kiocb->ki_nr_segs = kiocb->ki_nbytes;
44170 kiocb->ki_cur_seg = 0;
44171 /* ki_nbytes/left now reflect bytes instead of segs */
44172diff --git a/fs/attr.c b/fs/attr.c
44173index cce7df5..eaa2731 100644
44174--- a/fs/attr.c
44175+++ b/fs/attr.c
44176@@ -100,6 +100,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
44177 unsigned long limit;
44178
44179 limit = rlimit(RLIMIT_FSIZE);
44180+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
44181 if (limit != RLIM_INFINITY && offset > limit)
44182 goto out_sig;
44183 if (offset > inode->i_sb->s_maxbytes)
44184diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
44185index dce436e..55e670d 100644
44186--- a/fs/autofs4/waitq.c
44187+++ b/fs/autofs4/waitq.c
44188@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
44189 {
44190 unsigned long sigpipe, flags;
44191 mm_segment_t fs;
44192- const char *data = (const char *)addr;
44193+ const char __user *data = (const char __force_user *)addr;
44194 ssize_t wr = 0;
44195
44196 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
44197@@ -347,6 +347,10 @@ static int validate_request(struct autofs_wait_queue **wait,
44198 return 1;
44199 }
44200
44201+#ifdef CONFIG_GRKERNSEC_HIDESYM
44202+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
44203+#endif
44204+
44205 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
44206 enum autofs_notify notify)
44207 {
44208@@ -380,7 +384,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
44209
44210 /* If this is a direct mount request create a dummy name */
44211 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
44212+#ifdef CONFIG_GRKERNSEC_HIDESYM
44213+ /* this name does get written to userland via autofs4_write() */
44214+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
44215+#else
44216 qstr.len = sprintf(name, "%p", dentry);
44217+#endif
44218 else {
44219 qstr.len = autofs4_getpath(sbi, dentry, &name);
44220 if (!qstr.len) {
44221diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
44222index 2b3bda8..6a2d4be 100644
44223--- a/fs/befs/linuxvfs.c
44224+++ b/fs/befs/linuxvfs.c
44225@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44226 {
44227 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
44228 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
44229- char *link = nd_get_link(nd);
44230+ const char *link = nd_get_link(nd);
44231 if (!IS_ERR(link))
44232 kfree(link);
44233 }
44234diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
44235index 0e7a6f8..332b1ca 100644
44236--- a/fs/binfmt_aout.c
44237+++ b/fs/binfmt_aout.c
44238@@ -16,6 +16,7 @@
44239 #include <linux/string.h>
44240 #include <linux/fs.h>
44241 #include <linux/file.h>
44242+#include <linux/security.h>
44243 #include <linux/stat.h>
44244 #include <linux/fcntl.h>
44245 #include <linux/ptrace.h>
44246@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
44247 #endif
44248 # define START_STACK(u) ((void __user *)u.start_stack)
44249
44250+ memset(&dump, 0, sizeof(dump));
44251+
44252 fs = get_fs();
44253 set_fs(KERNEL_DS);
44254 has_dumped = 1;
44255@@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
44256
44257 /* If the size of the dump file exceeds the rlimit, then see what would happen
44258 if we wrote the stack, but not the data area. */
44259+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
44260 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
44261 dump.u_dsize = 0;
44262
44263 /* Make sure we have enough room to write the stack and data areas. */
44264+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
44265 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
44266 dump.u_ssize = 0;
44267
44268@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44269 rlim = rlimit(RLIMIT_DATA);
44270 if (rlim >= RLIM_INFINITY)
44271 rlim = ~0;
44272+
44273+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
44274 if (ex.a_data + ex.a_bss > rlim)
44275 return -ENOMEM;
44276
44277@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44278
44279 install_exec_creds(bprm);
44280
44281+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44282+ current->mm->pax_flags = 0UL;
44283+#endif
44284+
44285+#ifdef CONFIG_PAX_PAGEEXEC
44286+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
44287+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
44288+
44289+#ifdef CONFIG_PAX_EMUTRAMP
44290+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
44291+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
44292+#endif
44293+
44294+#ifdef CONFIG_PAX_MPROTECT
44295+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
44296+ current->mm->pax_flags |= MF_PAX_MPROTECT;
44297+#endif
44298+
44299+ }
44300+#endif
44301+
44302 if (N_MAGIC(ex) == OMAGIC) {
44303 unsigned long text_addr, map_size;
44304 loff_t pos;
44305@@ -332,7 +360,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44306 }
44307
44308 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
44309- PROT_READ | PROT_WRITE | PROT_EXEC,
44310+ PROT_READ | PROT_WRITE,
44311 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
44312 fd_offset + ex.a_text);
44313 if (error != N_DATADDR(ex)) {
44314diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
44315index fbd9f60..0b845dd 100644
44316--- a/fs/binfmt_elf.c
44317+++ b/fs/binfmt_elf.c
44318@@ -33,6 +33,7 @@
44319 #include <linux/elf.h>
44320 #include <linux/utsname.h>
44321 #include <linux/coredump.h>
44322+#include <linux/xattr.h>
44323 #include <asm/uaccess.h>
44324 #include <asm/param.h>
44325 #include <asm/page.h>
44326@@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
44327 #define elf_core_dump NULL
44328 #endif
44329
44330+#ifdef CONFIG_PAX_MPROTECT
44331+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
44332+#endif
44333+
44334 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
44335 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
44336 #else
44337@@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
44338 .load_binary = load_elf_binary,
44339 .load_shlib = load_elf_library,
44340 .core_dump = elf_core_dump,
44341+
44342+#ifdef CONFIG_PAX_MPROTECT
44343+ .handle_mprotect= elf_handle_mprotect,
44344+#endif
44345+
44346 .min_coredump = ELF_EXEC_PAGESIZE,
44347 };
44348
44349@@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
44350
44351 static int set_brk(unsigned long start, unsigned long end)
44352 {
44353+ unsigned long e = end;
44354+
44355 start = ELF_PAGEALIGN(start);
44356 end = ELF_PAGEALIGN(end);
44357 if (end > start) {
44358@@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
44359 if (BAD_ADDR(addr))
44360 return addr;
44361 }
44362- current->mm->start_brk = current->mm->brk = end;
44363+ current->mm->start_brk = current->mm->brk = e;
44364 return 0;
44365 }
44366
44367@@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44368 elf_addr_t __user *u_rand_bytes;
44369 const char *k_platform = ELF_PLATFORM;
44370 const char *k_base_platform = ELF_BASE_PLATFORM;
44371- unsigned char k_rand_bytes[16];
44372+ u32 k_rand_bytes[4];
44373 int items;
44374 elf_addr_t *elf_info;
44375 int ei_index = 0;
44376 const struct cred *cred = current_cred();
44377 struct vm_area_struct *vma;
44378+ unsigned long saved_auxv[AT_VECTOR_SIZE];
44379
44380 /*
44381 * In some cases (e.g. Hyper-Threading), we want to avoid L1
44382@@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44383 * Generate 16 random bytes for userspace PRNG seeding.
44384 */
44385 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
44386- u_rand_bytes = (elf_addr_t __user *)
44387- STACK_ALLOC(p, sizeof(k_rand_bytes));
44388+ srandom32(k_rand_bytes[0] ^ random32());
44389+ srandom32(k_rand_bytes[1] ^ random32());
44390+ srandom32(k_rand_bytes[2] ^ random32());
44391+ srandom32(k_rand_bytes[3] ^ random32());
44392+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
44393+ u_rand_bytes = (elf_addr_t __user *) p;
44394 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
44395 return -EFAULT;
44396
44397@@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44398 return -EFAULT;
44399 current->mm->env_end = p;
44400
44401+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
44402+
44403 /* Put the elf_info on the stack in the right place. */
44404 sp = (elf_addr_t __user *)envp + 1;
44405- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
44406+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
44407 return -EFAULT;
44408 return 0;
44409 }
44410@@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
44411 an ELF header */
44412
44413 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44414- struct file *interpreter, unsigned long *interp_map_addr,
44415- unsigned long no_base)
44416+ struct file *interpreter, unsigned long no_base)
44417 {
44418 struct elf_phdr *elf_phdata;
44419 struct elf_phdr *eppnt;
44420- unsigned long load_addr = 0;
44421+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
44422 int load_addr_set = 0;
44423 unsigned long last_bss = 0, elf_bss = 0;
44424- unsigned long error = ~0UL;
44425+ unsigned long error = -EINVAL;
44426 unsigned long total_size;
44427 int retval, i, size;
44428
44429@@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44430 goto out_close;
44431 }
44432
44433+#ifdef CONFIG_PAX_SEGMEXEC
44434+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
44435+ pax_task_size = SEGMEXEC_TASK_SIZE;
44436+#endif
44437+
44438 eppnt = elf_phdata;
44439 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
44440 if (eppnt->p_type == PT_LOAD) {
44441@@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44442 map_addr = elf_map(interpreter, load_addr + vaddr,
44443 eppnt, elf_prot, elf_type, total_size);
44444 total_size = 0;
44445- if (!*interp_map_addr)
44446- *interp_map_addr = map_addr;
44447 error = map_addr;
44448 if (BAD_ADDR(map_addr))
44449 goto out_close;
44450@@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44451 k = load_addr + eppnt->p_vaddr;
44452 if (BAD_ADDR(k) ||
44453 eppnt->p_filesz > eppnt->p_memsz ||
44454- eppnt->p_memsz > TASK_SIZE ||
44455- TASK_SIZE - eppnt->p_memsz < k) {
44456+ eppnt->p_memsz > pax_task_size ||
44457+ pax_task_size - eppnt->p_memsz < k) {
44458 error = -ENOMEM;
44459 goto out_close;
44460 }
44461@@ -530,6 +551,315 @@ out:
44462 return error;
44463 }
44464
44465+#ifdef CONFIG_PAX_PT_PAX_FLAGS
44466+#ifdef CONFIG_PAX_SOFTMODE
44467+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
44468+{
44469+ unsigned long pax_flags = 0UL;
44470+
44471+#ifdef CONFIG_PAX_PAGEEXEC
44472+ if (elf_phdata->p_flags & PF_PAGEEXEC)
44473+ pax_flags |= MF_PAX_PAGEEXEC;
44474+#endif
44475+
44476+#ifdef CONFIG_PAX_SEGMEXEC
44477+ if (elf_phdata->p_flags & PF_SEGMEXEC)
44478+ pax_flags |= MF_PAX_SEGMEXEC;
44479+#endif
44480+
44481+#ifdef CONFIG_PAX_EMUTRAMP
44482+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
44483+ pax_flags |= MF_PAX_EMUTRAMP;
44484+#endif
44485+
44486+#ifdef CONFIG_PAX_MPROTECT
44487+ if (elf_phdata->p_flags & PF_MPROTECT)
44488+ pax_flags |= MF_PAX_MPROTECT;
44489+#endif
44490+
44491+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44492+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
44493+ pax_flags |= MF_PAX_RANDMMAP;
44494+#endif
44495+
44496+ return pax_flags;
44497+}
44498+#endif
44499+
44500+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
44501+{
44502+ unsigned long pax_flags = 0UL;
44503+
44504+#ifdef CONFIG_PAX_PAGEEXEC
44505+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
44506+ pax_flags |= MF_PAX_PAGEEXEC;
44507+#endif
44508+
44509+#ifdef CONFIG_PAX_SEGMEXEC
44510+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
44511+ pax_flags |= MF_PAX_SEGMEXEC;
44512+#endif
44513+
44514+#ifdef CONFIG_PAX_EMUTRAMP
44515+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
44516+ pax_flags |= MF_PAX_EMUTRAMP;
44517+#endif
44518+
44519+#ifdef CONFIG_PAX_MPROTECT
44520+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
44521+ pax_flags |= MF_PAX_MPROTECT;
44522+#endif
44523+
44524+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44525+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
44526+ pax_flags |= MF_PAX_RANDMMAP;
44527+#endif
44528+
44529+ return pax_flags;
44530+}
44531+#endif
44532+
44533+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
44534+#ifdef CONFIG_PAX_SOFTMODE
44535+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
44536+{
44537+ unsigned long pax_flags = 0UL;
44538+
44539+#ifdef CONFIG_PAX_PAGEEXEC
44540+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
44541+ pax_flags |= MF_PAX_PAGEEXEC;
44542+#endif
44543+
44544+#ifdef CONFIG_PAX_SEGMEXEC
44545+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
44546+ pax_flags |= MF_PAX_SEGMEXEC;
44547+#endif
44548+
44549+#ifdef CONFIG_PAX_EMUTRAMP
44550+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
44551+ pax_flags |= MF_PAX_EMUTRAMP;
44552+#endif
44553+
44554+#ifdef CONFIG_PAX_MPROTECT
44555+ if (pax_flags_softmode & MF_PAX_MPROTECT)
44556+ pax_flags |= MF_PAX_MPROTECT;
44557+#endif
44558+
44559+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44560+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
44561+ pax_flags |= MF_PAX_RANDMMAP;
44562+#endif
44563+
44564+ return pax_flags;
44565+}
44566+#endif
44567+
44568+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
44569+{
44570+ unsigned long pax_flags = 0UL;
44571+
44572+#ifdef CONFIG_PAX_PAGEEXEC
44573+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
44574+ pax_flags |= MF_PAX_PAGEEXEC;
44575+#endif
44576+
44577+#ifdef CONFIG_PAX_SEGMEXEC
44578+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
44579+ pax_flags |= MF_PAX_SEGMEXEC;
44580+#endif
44581+
44582+#ifdef CONFIG_PAX_EMUTRAMP
44583+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
44584+ pax_flags |= MF_PAX_EMUTRAMP;
44585+#endif
44586+
44587+#ifdef CONFIG_PAX_MPROTECT
44588+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
44589+ pax_flags |= MF_PAX_MPROTECT;
44590+#endif
44591+
44592+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44593+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
44594+ pax_flags |= MF_PAX_RANDMMAP;
44595+#endif
44596+
44597+ return pax_flags;
44598+}
44599+#endif
44600+
44601+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44602+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
44603+{
44604+ unsigned long pax_flags = 0UL;
44605+
44606+#ifdef CONFIG_PAX_EI_PAX
44607+
44608+#ifdef CONFIG_PAX_PAGEEXEC
44609+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
44610+ pax_flags |= MF_PAX_PAGEEXEC;
44611+#endif
44612+
44613+#ifdef CONFIG_PAX_SEGMEXEC
44614+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
44615+ pax_flags |= MF_PAX_SEGMEXEC;
44616+#endif
44617+
44618+#ifdef CONFIG_PAX_EMUTRAMP
44619+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
44620+ pax_flags |= MF_PAX_EMUTRAMP;
44621+#endif
44622+
44623+#ifdef CONFIG_PAX_MPROTECT
44624+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
44625+ pax_flags |= MF_PAX_MPROTECT;
44626+#endif
44627+
44628+#ifdef CONFIG_PAX_ASLR
44629+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
44630+ pax_flags |= MF_PAX_RANDMMAP;
44631+#endif
44632+
44633+#else
44634+
44635+#ifdef CONFIG_PAX_PAGEEXEC
44636+ pax_flags |= MF_PAX_PAGEEXEC;
44637+#endif
44638+
44639+#ifdef CONFIG_PAX_SEGMEXEC
44640+ pax_flags |= MF_PAX_SEGMEXEC;
44641+#endif
44642+
44643+#ifdef CONFIG_PAX_MPROTECT
44644+ pax_flags |= MF_PAX_MPROTECT;
44645+#endif
44646+
44647+#ifdef CONFIG_PAX_RANDMMAP
44648+ if (randomize_va_space)
44649+ pax_flags |= MF_PAX_RANDMMAP;
44650+#endif
44651+
44652+#endif
44653+
44654+ return pax_flags;
44655+}
44656+
44657+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
44658+{
44659+
44660+#ifdef CONFIG_PAX_PT_PAX_FLAGS
44661+ unsigned long i;
44662+
44663+ for (i = 0UL; i < elf_ex->e_phnum; i++)
44664+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
44665+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
44666+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
44667+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
44668+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
44669+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
44670+ return ~0UL;
44671+
44672+#ifdef CONFIG_PAX_SOFTMODE
44673+ if (pax_softmode)
44674+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
44675+ else
44676+#endif
44677+
44678+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
44679+ break;
44680+ }
44681+#endif
44682+
44683+ return ~0UL;
44684+}
44685+
44686+static unsigned long pax_parse_xattr_pax(struct file * const file)
44687+{
44688+
44689+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
44690+ ssize_t xattr_size, i;
44691+ unsigned char xattr_value[5];
44692+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
44693+
44694+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
44695+ if (xattr_size <= 0 || xattr_size > 5)
44696+ return ~0UL;
44697+
44698+ for (i = 0; i < xattr_size; i++)
44699+ switch (xattr_value[i]) {
44700+ default:
44701+ return ~0UL;
44702+
44703+#define parse_flag(option1, option2, flag) \
44704+ case option1: \
44705+ if (pax_flags_hardmode & MF_PAX_##flag) \
44706+ return ~0UL; \
44707+ pax_flags_hardmode |= MF_PAX_##flag; \
44708+ break; \
44709+ case option2: \
44710+ if (pax_flags_softmode & MF_PAX_##flag) \
44711+ return ~0UL; \
44712+ pax_flags_softmode |= MF_PAX_##flag; \
44713+ break;
44714+
44715+ parse_flag('p', 'P', PAGEEXEC);
44716+ parse_flag('e', 'E', EMUTRAMP);
44717+ parse_flag('m', 'M', MPROTECT);
44718+ parse_flag('r', 'R', RANDMMAP);
44719+ parse_flag('s', 'S', SEGMEXEC);
44720+
44721+#undef parse_flag
44722+ }
44723+
44724+ if (pax_flags_hardmode & pax_flags_softmode)
44725+ return ~0UL;
44726+
44727+#ifdef CONFIG_PAX_SOFTMODE
44728+ if (pax_softmode)
44729+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
44730+ else
44731+#endif
44732+
44733+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
44734+#else
44735+ return ~0UL;
44736+#endif
44737+
44738+}
44739+
44740+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
44741+{
44742+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
44743+
44744+ pax_flags = pax_parse_ei_pax(elf_ex);
44745+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
44746+ xattr_pax_flags = pax_parse_xattr_pax(file);
44747+
44748+ if (pt_pax_flags == ~0UL)
44749+ pt_pax_flags = xattr_pax_flags;
44750+ else if (xattr_pax_flags == ~0UL)
44751+ xattr_pax_flags = pt_pax_flags;
44752+ if (pt_pax_flags != xattr_pax_flags)
44753+ return -EINVAL;
44754+ if (pt_pax_flags != ~0UL)
44755+ pax_flags = pt_pax_flags;
44756+
44757+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
44758+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44759+ if ((__supported_pte_mask & _PAGE_NX))
44760+ pax_flags &= ~MF_PAX_SEGMEXEC;
44761+ else
44762+ pax_flags &= ~MF_PAX_PAGEEXEC;
44763+ }
44764+#endif
44765+
44766+ if (0 > pax_check_flags(&pax_flags))
44767+ return -EINVAL;
44768+
44769+ current->mm->pax_flags = pax_flags;
44770+ return 0;
44771+}
44772+#endif
44773+
44774 /*
44775 * These are the functions used to load ELF style executables and shared
44776 * libraries. There is no binary dependent code anywhere else.
44777@@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
44778 {
44779 unsigned int random_variable = 0;
44780
44781+#ifdef CONFIG_PAX_RANDUSTACK
44782+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
44783+ return stack_top - current->mm->delta_stack;
44784+#endif
44785+
44786 if ((current->flags & PF_RANDOMIZE) &&
44787 !(current->personality & ADDR_NO_RANDOMIZE)) {
44788 random_variable = get_random_int() & STACK_RND_MASK;
44789@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44790 unsigned long load_addr = 0, load_bias = 0;
44791 int load_addr_set = 0;
44792 char * elf_interpreter = NULL;
44793- unsigned long error;
44794+ unsigned long error = 0;
44795 struct elf_phdr *elf_ppnt, *elf_phdata;
44796 unsigned long elf_bss, elf_brk;
44797 int retval, i;
44798@@ -574,11 +909,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44799 unsigned long start_code, end_code, start_data, end_data;
44800 unsigned long reloc_func_desc __maybe_unused = 0;
44801 int executable_stack = EXSTACK_DEFAULT;
44802- unsigned long def_flags = 0;
44803 struct {
44804 struct elfhdr elf_ex;
44805 struct elfhdr interp_elf_ex;
44806 } *loc;
44807+ unsigned long pax_task_size = TASK_SIZE;
44808
44809 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
44810 if (!loc) {
44811@@ -714,11 +1049,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44812 goto out_free_dentry;
44813
44814 /* OK, This is the point of no return */
44815- current->mm->def_flags = def_flags;
44816+
44817+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44818+ current->mm->pax_flags = 0UL;
44819+#endif
44820+
44821+#ifdef CONFIG_PAX_DLRESOLVE
44822+ current->mm->call_dl_resolve = 0UL;
44823+#endif
44824+
44825+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
44826+ current->mm->call_syscall = 0UL;
44827+#endif
44828+
44829+#ifdef CONFIG_PAX_ASLR
44830+ current->mm->delta_mmap = 0UL;
44831+ current->mm->delta_stack = 0UL;
44832+#endif
44833+
44834+ current->mm->def_flags = 0;
44835+
44836+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44837+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
44838+ send_sig(SIGKILL, current, 0);
44839+ goto out_free_dentry;
44840+ }
44841+#endif
44842+
44843+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
44844+ pax_set_initial_flags(bprm);
44845+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
44846+ if (pax_set_initial_flags_func)
44847+ (pax_set_initial_flags_func)(bprm);
44848+#endif
44849+
44850+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
44851+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
44852+ current->mm->context.user_cs_limit = PAGE_SIZE;
44853+ current->mm->def_flags |= VM_PAGEEXEC;
44854+ }
44855+#endif
44856+
44857+#ifdef CONFIG_PAX_SEGMEXEC
44858+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
44859+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
44860+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
44861+ pax_task_size = SEGMEXEC_TASK_SIZE;
44862+ current->mm->def_flags |= VM_NOHUGEPAGE;
44863+ }
44864+#endif
44865+
44866+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
44867+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44868+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
44869+ put_cpu();
44870+ }
44871+#endif
44872
44873 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
44874 may depend on the personality. */
44875 SET_PERSONALITY(loc->elf_ex);
44876+
44877+#ifdef CONFIG_PAX_ASLR
44878+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
44879+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
44880+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
44881+ }
44882+#endif
44883+
44884+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44885+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44886+ executable_stack = EXSTACK_DISABLE_X;
44887+ current->personality &= ~READ_IMPLIES_EXEC;
44888+ } else
44889+#endif
44890+
44891 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
44892 current->personality |= READ_IMPLIES_EXEC;
44893
44894@@ -809,6 +1214,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44895 #else
44896 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
44897 #endif
44898+
44899+#ifdef CONFIG_PAX_RANDMMAP
44900+ /* PaX: randomize base address at the default exe base if requested */
44901+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
44902+#ifdef CONFIG_SPARC64
44903+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
44904+#else
44905+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
44906+#endif
44907+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
44908+ elf_flags |= MAP_FIXED;
44909+ }
44910+#endif
44911+
44912 }
44913
44914 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
44915@@ -841,9 +1260,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44916 * allowed task size. Note that p_filesz must always be
44917 * <= p_memsz so it is only necessary to check p_memsz.
44918 */
44919- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
44920- elf_ppnt->p_memsz > TASK_SIZE ||
44921- TASK_SIZE - elf_ppnt->p_memsz < k) {
44922+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
44923+ elf_ppnt->p_memsz > pax_task_size ||
44924+ pax_task_size - elf_ppnt->p_memsz < k) {
44925 /* set_brk can never work. Avoid overflows. */
44926 send_sig(SIGKILL, current, 0);
44927 retval = -EINVAL;
44928@@ -882,17 +1301,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44929 goto out_free_dentry;
44930 }
44931 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
44932- send_sig(SIGSEGV, current, 0);
44933- retval = -EFAULT; /* Nobody gets to see this, but.. */
44934- goto out_free_dentry;
44935+ /*
44936+ * This bss-zeroing can fail if the ELF
44937+ * file specifies odd protections. So
44938+ * we don't check the return value
44939+ */
44940 }
44941
44942+#ifdef CONFIG_PAX_RANDMMAP
44943+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
44944+ unsigned long start, size;
44945+
44946+ start = ELF_PAGEALIGN(elf_brk);
44947+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
44948+ down_read(&current->mm->mmap_sem);
44949+ retval = -ENOMEM;
44950+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
44951+ unsigned long prot = PROT_NONE;
44952+
44953+ up_read(&current->mm->mmap_sem);
44954+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
44955+// if (current->personality & ADDR_NO_RANDOMIZE)
44956+// prot = PROT_READ;
44957+ start = vm_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
44958+ retval = IS_ERR_VALUE(start) ? start : 0;
44959+ } else
44960+ up_read(&current->mm->mmap_sem);
44961+ if (retval == 0)
44962+ retval = set_brk(start + size, start + size + PAGE_SIZE);
44963+ if (retval < 0) {
44964+ send_sig(SIGKILL, current, 0);
44965+ goto out_free_dentry;
44966+ }
44967+ }
44968+#endif
44969+
44970 if (elf_interpreter) {
44971- unsigned long interp_map_addr = 0;
44972-
44973 elf_entry = load_elf_interp(&loc->interp_elf_ex,
44974 interpreter,
44975- &interp_map_addr,
44976 load_bias);
44977 if (!IS_ERR((void *)elf_entry)) {
44978 /*
44979@@ -1114,7 +1560,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
44980 * Decide what to dump of a segment, part, all or none.
44981 */
44982 static unsigned long vma_dump_size(struct vm_area_struct *vma,
44983- unsigned long mm_flags)
44984+ unsigned long mm_flags, long signr)
44985 {
44986 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
44987
44988@@ -1151,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
44989 if (vma->vm_file == NULL)
44990 return 0;
44991
44992- if (FILTER(MAPPED_PRIVATE))
44993+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
44994 goto whole;
44995
44996 /*
44997@@ -1373,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
44998 {
44999 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
45000 int i = 0;
45001- do
45002+ do {
45003 i += 2;
45004- while (auxv[i - 2] != AT_NULL);
45005+ } while (auxv[i - 2] != AT_NULL);
45006 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
45007 }
45008
45009@@ -2003,14 +2449,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
45010 }
45011
45012 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
45013- unsigned long mm_flags)
45014+ struct coredump_params *cprm)
45015 {
45016 struct vm_area_struct *vma;
45017 size_t size = 0;
45018
45019 for (vma = first_vma(current, gate_vma); vma != NULL;
45020 vma = next_vma(vma, gate_vma))
45021- size += vma_dump_size(vma, mm_flags);
45022+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
45023 return size;
45024 }
45025
45026@@ -2104,7 +2550,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45027
45028 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
45029
45030- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
45031+ offset += elf_core_vma_data_size(gate_vma, cprm);
45032 offset += elf_core_extra_data_size();
45033 e_shoff = offset;
45034
45035@@ -2118,10 +2564,12 @@ static int elf_core_dump(struct coredump_params *cprm)
45036 offset = dataoff;
45037
45038 size += sizeof(*elf);
45039+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
45040 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
45041 goto end_coredump;
45042
45043 size += sizeof(*phdr4note);
45044+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
45045 if (size > cprm->limit
45046 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
45047 goto end_coredump;
45048@@ -2135,7 +2583,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45049 phdr.p_offset = offset;
45050 phdr.p_vaddr = vma->vm_start;
45051 phdr.p_paddr = 0;
45052- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
45053+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
45054 phdr.p_memsz = vma->vm_end - vma->vm_start;
45055 offset += phdr.p_filesz;
45056 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
45057@@ -2146,6 +2594,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45058 phdr.p_align = ELF_EXEC_PAGESIZE;
45059
45060 size += sizeof(phdr);
45061+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
45062 if (size > cprm->limit
45063 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
45064 goto end_coredump;
45065@@ -2170,7 +2619,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45066 unsigned long addr;
45067 unsigned long end;
45068
45069- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
45070+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
45071
45072 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
45073 struct page *page;
45074@@ -2179,6 +2628,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45075 page = get_dump_page(addr);
45076 if (page) {
45077 void *kaddr = kmap(page);
45078+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
45079 stop = ((size += PAGE_SIZE) > cprm->limit) ||
45080 !dump_write(cprm->file, kaddr,
45081 PAGE_SIZE);
45082@@ -2196,6 +2646,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45083
45084 if (e_phnum == PN_XNUM) {
45085 size += sizeof(*shdr4extnum);
45086+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
45087 if (size > cprm->limit
45088 || !dump_write(cprm->file, shdr4extnum,
45089 sizeof(*shdr4extnum)))
45090@@ -2216,6 +2667,97 @@ out:
45091
45092 #endif /* CONFIG_ELF_CORE */
45093
45094+#ifdef CONFIG_PAX_MPROTECT
45095+/* PaX: non-PIC ELF libraries need relocations on their executable segments
45096+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
45097+ * we'll remove VM_MAYWRITE for good on RELRO segments.
45098+ *
45099+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
45100+ * basis because we want to allow the common case and not the special ones.
45101+ */
45102+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
45103+{
45104+ struct elfhdr elf_h;
45105+ struct elf_phdr elf_p;
45106+ unsigned long i;
45107+ unsigned long oldflags;
45108+ bool is_textrel_rw, is_textrel_rx, is_relro;
45109+
45110+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
45111+ return;
45112+
45113+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
45114+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
45115+
45116+#ifdef CONFIG_PAX_ELFRELOCS
45117+ /* possible TEXTREL */
45118+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
45119+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
45120+#else
45121+ is_textrel_rw = false;
45122+ is_textrel_rx = false;
45123+#endif
45124+
45125+ /* possible RELRO */
45126+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
45127+
45128+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
45129+ return;
45130+
45131+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
45132+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
45133+
45134+#ifdef CONFIG_PAX_ETEXECRELOCS
45135+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
45136+#else
45137+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
45138+#endif
45139+
45140+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
45141+ !elf_check_arch(&elf_h) ||
45142+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
45143+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
45144+ return;
45145+
45146+ for (i = 0UL; i < elf_h.e_phnum; i++) {
45147+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
45148+ return;
45149+ switch (elf_p.p_type) {
45150+ case PT_DYNAMIC:
45151+ if (!is_textrel_rw && !is_textrel_rx)
45152+ continue;
45153+ i = 0UL;
45154+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
45155+ elf_dyn dyn;
45156+
45157+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
45158+ return;
45159+ if (dyn.d_tag == DT_NULL)
45160+ return;
45161+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
45162+ gr_log_textrel(vma);
45163+ if (is_textrel_rw)
45164+ vma->vm_flags |= VM_MAYWRITE;
45165+ else
45166+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
45167+ vma->vm_flags &= ~VM_MAYWRITE;
45168+ return;
45169+ }
45170+ i++;
45171+ }
45172+ return;
45173+
45174+ case PT_GNU_RELRO:
45175+ if (!is_relro)
45176+ continue;
45177+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
45178+ vma->vm_flags &= ~VM_MAYWRITE;
45179+ return;
45180+ }
45181+ }
45182+}
45183+#endif
45184+
45185 static int __init init_elf_binfmt(void)
45186 {
45187 register_binfmt(&elf_format);
45188diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
45189index e280352..7b2f231 100644
45190--- a/fs/binfmt_flat.c
45191+++ b/fs/binfmt_flat.c
45192@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
45193 realdatastart = (unsigned long) -ENOMEM;
45194 printk("Unable to allocate RAM for process data, errno %d\n",
45195 (int)-realdatastart);
45196+ down_write(&current->mm->mmap_sem);
45197 vm_munmap(textpos, text_len);
45198+ up_write(&current->mm->mmap_sem);
45199 ret = realdatastart;
45200 goto err;
45201 }
45202@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
45203 }
45204 if (IS_ERR_VALUE(result)) {
45205 printk("Unable to read data+bss, errno %d\n", (int)-result);
45206+ down_write(&current->mm->mmap_sem);
45207 vm_munmap(textpos, text_len);
45208 vm_munmap(realdatastart, len);
45209+ up_write(&current->mm->mmap_sem);
45210 ret = result;
45211 goto err;
45212 }
45213@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
45214 }
45215 if (IS_ERR_VALUE(result)) {
45216 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
45217+ down_write(&current->mm->mmap_sem);
45218 vm_munmap(textpos, text_len + data_len + extra +
45219 MAX_SHARED_LIBS * sizeof(unsigned long));
45220+ up_write(&current->mm->mmap_sem);
45221 ret = result;
45222 goto err;
45223 }
45224diff --git a/fs/bio.c b/fs/bio.c
45225index b96fc6c..431d628 100644
45226--- a/fs/bio.c
45227+++ b/fs/bio.c
45228@@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
45229 /*
45230 * Overflow, abort
45231 */
45232- if (end < start)
45233+ if (end < start || end - start > INT_MAX - nr_pages)
45234 return ERR_PTR(-EINVAL);
45235
45236 nr_pages += end - start;
45237@@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
45238 /*
45239 * Overflow, abort
45240 */
45241- if (end < start)
45242+ if (end < start || end - start > INT_MAX - nr_pages)
45243 return ERR_PTR(-EINVAL);
45244
45245 nr_pages += end - start;
45246@@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
45247 const int read = bio_data_dir(bio) == READ;
45248 struct bio_map_data *bmd = bio->bi_private;
45249 int i;
45250- char *p = bmd->sgvecs[0].iov_base;
45251+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
45252
45253 __bio_for_each_segment(bvec, bio, i, 0) {
45254 char *addr = page_address(bvec->bv_page);
45255diff --git a/fs/block_dev.c b/fs/block_dev.c
45256index ab3a456..7da538b 100644
45257--- a/fs/block_dev.c
45258+++ b/fs/block_dev.c
45259@@ -651,7 +651,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
45260 else if (bdev->bd_contains == bdev)
45261 return true; /* is a whole device which isn't held */
45262
45263- else if (whole->bd_holder == bd_may_claim)
45264+ else if (whole->bd_holder == (void *)bd_may_claim)
45265 return true; /* is a partition of a device that is being partitioned */
45266 else if (whole->bd_holder != NULL)
45267 return false; /* is a partition of a held device */
45268diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
45269index cdfb4c4..da736d4 100644
45270--- a/fs/btrfs/ctree.c
45271+++ b/fs/btrfs/ctree.c
45272@@ -1035,9 +1035,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
45273 free_extent_buffer(buf);
45274 add_root_to_dirty_list(root);
45275 } else {
45276- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
45277- parent_start = parent->start;
45278- else
45279+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
45280+ if (parent)
45281+ parent_start = parent->start;
45282+ else
45283+ parent_start = 0;
45284+ } else
45285 parent_start = 0;
45286
45287 WARN_ON(trans->transid != btrfs_header_generation(parent));
45288diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
45289index 95542a1..95a8727 100644
45290--- a/fs/btrfs/inode.c
45291+++ b/fs/btrfs/inode.c
45292@@ -7243,7 +7243,7 @@ fail:
45293 return -ENOMEM;
45294 }
45295
45296-static int btrfs_getattr(struct vfsmount *mnt,
45297+int btrfs_getattr(struct vfsmount *mnt,
45298 struct dentry *dentry, struct kstat *stat)
45299 {
45300 struct inode *inode = dentry->d_inode;
45301@@ -7257,6 +7257,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
45302 return 0;
45303 }
45304
45305+EXPORT_SYMBOL(btrfs_getattr);
45306+
45307+dev_t get_btrfs_dev_from_inode(struct inode *inode)
45308+{
45309+ return BTRFS_I(inode)->root->anon_dev;
45310+}
45311+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
45312+
45313 /*
45314 * If a file is moved, it will inherit the cow and compression flags of the new
45315 * directory.
45316diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
45317index 8fcf9a5..a200000 100644
45318--- a/fs/btrfs/ioctl.c
45319+++ b/fs/btrfs/ioctl.c
45320@@ -2965,9 +2965,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
45321 for (i = 0; i < num_types; i++) {
45322 struct btrfs_space_info *tmp;
45323
45324+ /* Don't copy in more than we allocated */
45325 if (!slot_count)
45326 break;
45327
45328+ slot_count--;
45329+
45330 info = NULL;
45331 rcu_read_lock();
45332 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
45333@@ -2989,10 +2992,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
45334 memcpy(dest, &space, sizeof(space));
45335 dest++;
45336 space_args.total_spaces++;
45337- slot_count--;
45338 }
45339- if (!slot_count)
45340- break;
45341 }
45342 up_read(&info->groups_sem);
45343 }
45344diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
45345index 776f0aa..3aad281 100644
45346--- a/fs/btrfs/relocation.c
45347+++ b/fs/btrfs/relocation.c
45348@@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
45349 }
45350 spin_unlock(&rc->reloc_root_tree.lock);
45351
45352- BUG_ON((struct btrfs_root *)node->data != root);
45353+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
45354
45355 if (!del) {
45356 spin_lock(&rc->reloc_root_tree.lock);
45357diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
45358index 622f469..e8d2d55 100644
45359--- a/fs/cachefiles/bind.c
45360+++ b/fs/cachefiles/bind.c
45361@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
45362 args);
45363
45364 /* start by checking things over */
45365- ASSERT(cache->fstop_percent >= 0 &&
45366- cache->fstop_percent < cache->fcull_percent &&
45367+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
45368 cache->fcull_percent < cache->frun_percent &&
45369 cache->frun_percent < 100);
45370
45371- ASSERT(cache->bstop_percent >= 0 &&
45372- cache->bstop_percent < cache->bcull_percent &&
45373+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
45374 cache->bcull_percent < cache->brun_percent &&
45375 cache->brun_percent < 100);
45376
45377diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
45378index 0a1467b..6a53245 100644
45379--- a/fs/cachefiles/daemon.c
45380+++ b/fs/cachefiles/daemon.c
45381@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
45382 if (n > buflen)
45383 return -EMSGSIZE;
45384
45385- if (copy_to_user(_buffer, buffer, n) != 0)
45386+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
45387 return -EFAULT;
45388
45389 return n;
45390@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
45391 if (test_bit(CACHEFILES_DEAD, &cache->flags))
45392 return -EIO;
45393
45394- if (datalen < 0 || datalen > PAGE_SIZE - 1)
45395+ if (datalen > PAGE_SIZE - 1)
45396 return -EOPNOTSUPP;
45397
45398 /* drag the command string into the kernel so we can parse it */
45399@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
45400 if (args[0] != '%' || args[1] != '\0')
45401 return -EINVAL;
45402
45403- if (fstop < 0 || fstop >= cache->fcull_percent)
45404+ if (fstop >= cache->fcull_percent)
45405 return cachefiles_daemon_range_error(cache, args);
45406
45407 cache->fstop_percent = fstop;
45408@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
45409 if (args[0] != '%' || args[1] != '\0')
45410 return -EINVAL;
45411
45412- if (bstop < 0 || bstop >= cache->bcull_percent)
45413+ if (bstop >= cache->bcull_percent)
45414 return cachefiles_daemon_range_error(cache, args);
45415
45416 cache->bstop_percent = bstop;
45417diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
45418index bd6bc1b..b627b53 100644
45419--- a/fs/cachefiles/internal.h
45420+++ b/fs/cachefiles/internal.h
45421@@ -57,7 +57,7 @@ struct cachefiles_cache {
45422 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
45423 struct rb_root active_nodes; /* active nodes (can't be culled) */
45424 rwlock_t active_lock; /* lock for active_nodes */
45425- atomic_t gravecounter; /* graveyard uniquifier */
45426+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
45427 unsigned frun_percent; /* when to stop culling (% files) */
45428 unsigned fcull_percent; /* when to start culling (% files) */
45429 unsigned fstop_percent; /* when to stop allocating (% files) */
45430@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
45431 * proc.c
45432 */
45433 #ifdef CONFIG_CACHEFILES_HISTOGRAM
45434-extern atomic_t cachefiles_lookup_histogram[HZ];
45435-extern atomic_t cachefiles_mkdir_histogram[HZ];
45436-extern atomic_t cachefiles_create_histogram[HZ];
45437+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
45438+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
45439+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
45440
45441 extern int __init cachefiles_proc_init(void);
45442 extern void cachefiles_proc_cleanup(void);
45443 static inline
45444-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
45445+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
45446 {
45447 unsigned long jif = jiffies - start_jif;
45448 if (jif >= HZ)
45449 jif = HZ - 1;
45450- atomic_inc(&histogram[jif]);
45451+ atomic_inc_unchecked(&histogram[jif]);
45452 }
45453
45454 #else
45455diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
45456index b0b5f7c..039bb26 100644
45457--- a/fs/cachefiles/namei.c
45458+++ b/fs/cachefiles/namei.c
45459@@ -318,7 +318,7 @@ try_again:
45460 /* first step is to make up a grave dentry in the graveyard */
45461 sprintf(nbuffer, "%08x%08x",
45462 (uint32_t) get_seconds(),
45463- (uint32_t) atomic_inc_return(&cache->gravecounter));
45464+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
45465
45466 /* do the multiway lock magic */
45467 trap = lock_rename(cache->graveyard, dir);
45468diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
45469index eccd339..4c1d995 100644
45470--- a/fs/cachefiles/proc.c
45471+++ b/fs/cachefiles/proc.c
45472@@ -14,9 +14,9 @@
45473 #include <linux/seq_file.h>
45474 #include "internal.h"
45475
45476-atomic_t cachefiles_lookup_histogram[HZ];
45477-atomic_t cachefiles_mkdir_histogram[HZ];
45478-atomic_t cachefiles_create_histogram[HZ];
45479+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
45480+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
45481+atomic_unchecked_t cachefiles_create_histogram[HZ];
45482
45483 /*
45484 * display the latency histogram
45485@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
45486 return 0;
45487 default:
45488 index = (unsigned long) v - 3;
45489- x = atomic_read(&cachefiles_lookup_histogram[index]);
45490- y = atomic_read(&cachefiles_mkdir_histogram[index]);
45491- z = atomic_read(&cachefiles_create_histogram[index]);
45492+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
45493+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
45494+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
45495 if (x == 0 && y == 0 && z == 0)
45496 return 0;
45497
45498diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
45499index c994691..2a1537f 100644
45500--- a/fs/cachefiles/rdwr.c
45501+++ b/fs/cachefiles/rdwr.c
45502@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
45503 old_fs = get_fs();
45504 set_fs(KERNEL_DS);
45505 ret = file->f_op->write(
45506- file, (const void __user *) data, len, &pos);
45507+ file, (const void __force_user *) data, len, &pos);
45508 set_fs(old_fs);
45509 kunmap(page);
45510 if (ret != len)
45511diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
45512index e5b7731..b9c59fb 100644
45513--- a/fs/ceph/dir.c
45514+++ b/fs/ceph/dir.c
45515@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
45516 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
45517 struct ceph_mds_client *mdsc = fsc->mdsc;
45518 unsigned frag = fpos_frag(filp->f_pos);
45519- int off = fpos_off(filp->f_pos);
45520+ unsigned int off = fpos_off(filp->f_pos);
45521 int err;
45522 u32 ftype;
45523 struct ceph_mds_reply_info_parsed *rinfo;
45524diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
45525index d9ea6ed..1e6c8ac 100644
45526--- a/fs/cifs/cifs_debug.c
45527+++ b/fs/cifs/cifs_debug.c
45528@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
45529
45530 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
45531 #ifdef CONFIG_CIFS_STATS2
45532- atomic_set(&totBufAllocCount, 0);
45533- atomic_set(&totSmBufAllocCount, 0);
45534+ atomic_set_unchecked(&totBufAllocCount, 0);
45535+ atomic_set_unchecked(&totSmBufAllocCount, 0);
45536 #endif /* CONFIG_CIFS_STATS2 */
45537 spin_lock(&cifs_tcp_ses_lock);
45538 list_for_each(tmp1, &cifs_tcp_ses_list) {
45539@@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
45540 tcon = list_entry(tmp3,
45541 struct cifs_tcon,
45542 tcon_list);
45543- atomic_set(&tcon->num_smbs_sent, 0);
45544+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
45545 if (server->ops->clear_stats)
45546 server->ops->clear_stats(tcon);
45547 }
45548@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
45549 smBufAllocCount.counter, cifs_min_small);
45550 #ifdef CONFIG_CIFS_STATS2
45551 seq_printf(m, "Total Large %d Small %d Allocations\n",
45552- atomic_read(&totBufAllocCount),
45553- atomic_read(&totSmBufAllocCount));
45554+ atomic_read_unchecked(&totBufAllocCount),
45555+ atomic_read_unchecked(&totSmBufAllocCount));
45556 #endif /* CONFIG_CIFS_STATS2 */
45557
45558 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
45559@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
45560 if (tcon->need_reconnect)
45561 seq_puts(m, "\tDISCONNECTED ");
45562 seq_printf(m, "\nSMBs: %d",
45563- atomic_read(&tcon->num_smbs_sent));
45564+ atomic_read_unchecked(&tcon->num_smbs_sent));
45565 if (server->ops->print_stats)
45566 server->ops->print_stats(m, tcon);
45567 }
45568diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
45569index e7931cc..76a1ab9 100644
45570--- a/fs/cifs/cifsfs.c
45571+++ b/fs/cifs/cifsfs.c
45572@@ -999,7 +999,7 @@ cifs_init_request_bufs(void)
45573 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
45574 cifs_req_cachep = kmem_cache_create("cifs_request",
45575 CIFSMaxBufSize + max_hdr_size, 0,
45576- SLAB_HWCACHE_ALIGN, NULL);
45577+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
45578 if (cifs_req_cachep == NULL)
45579 return -ENOMEM;
45580
45581@@ -1026,7 +1026,7 @@ cifs_init_request_bufs(void)
45582 efficient to alloc 1 per page off the slab compared to 17K (5page)
45583 alloc of large cifs buffers even when page debugging is on */
45584 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
45585- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
45586+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
45587 NULL);
45588 if (cifs_sm_req_cachep == NULL) {
45589 mempool_destroy(cifs_req_poolp);
45590@@ -1111,8 +1111,8 @@ init_cifs(void)
45591 atomic_set(&bufAllocCount, 0);
45592 atomic_set(&smBufAllocCount, 0);
45593 #ifdef CONFIG_CIFS_STATS2
45594- atomic_set(&totBufAllocCount, 0);
45595- atomic_set(&totSmBufAllocCount, 0);
45596+ atomic_set_unchecked(&totBufAllocCount, 0);
45597+ atomic_set_unchecked(&totSmBufAllocCount, 0);
45598 #endif /* CONFIG_CIFS_STATS2 */
45599
45600 atomic_set(&midCount, 0);
45601diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
45602index f5af252..489b5f2 100644
45603--- a/fs/cifs/cifsglob.h
45604+++ b/fs/cifs/cifsglob.h
45605@@ -751,35 +751,35 @@ struct cifs_tcon {
45606 __u16 Flags; /* optional support bits */
45607 enum statusEnum tidStatus;
45608 #ifdef CONFIG_CIFS_STATS
45609- atomic_t num_smbs_sent;
45610+ atomic_unchecked_t num_smbs_sent;
45611 union {
45612 struct {
45613- atomic_t num_writes;
45614- atomic_t num_reads;
45615- atomic_t num_flushes;
45616- atomic_t num_oplock_brks;
45617- atomic_t num_opens;
45618- atomic_t num_closes;
45619- atomic_t num_deletes;
45620- atomic_t num_mkdirs;
45621- atomic_t num_posixopens;
45622- atomic_t num_posixmkdirs;
45623- atomic_t num_rmdirs;
45624- atomic_t num_renames;
45625- atomic_t num_t2renames;
45626- atomic_t num_ffirst;
45627- atomic_t num_fnext;
45628- atomic_t num_fclose;
45629- atomic_t num_hardlinks;
45630- atomic_t num_symlinks;
45631- atomic_t num_locks;
45632- atomic_t num_acl_get;
45633- atomic_t num_acl_set;
45634+ atomic_unchecked_t num_writes;
45635+ atomic_unchecked_t num_reads;
45636+ atomic_unchecked_t num_flushes;
45637+ atomic_unchecked_t num_oplock_brks;
45638+ atomic_unchecked_t num_opens;
45639+ atomic_unchecked_t num_closes;
45640+ atomic_unchecked_t num_deletes;
45641+ atomic_unchecked_t num_mkdirs;
45642+ atomic_unchecked_t num_posixopens;
45643+ atomic_unchecked_t num_posixmkdirs;
45644+ atomic_unchecked_t num_rmdirs;
45645+ atomic_unchecked_t num_renames;
45646+ atomic_unchecked_t num_t2renames;
45647+ atomic_unchecked_t num_ffirst;
45648+ atomic_unchecked_t num_fnext;
45649+ atomic_unchecked_t num_fclose;
45650+ atomic_unchecked_t num_hardlinks;
45651+ atomic_unchecked_t num_symlinks;
45652+ atomic_unchecked_t num_locks;
45653+ atomic_unchecked_t num_acl_get;
45654+ atomic_unchecked_t num_acl_set;
45655 } cifs_stats;
45656 #ifdef CONFIG_CIFS_SMB2
45657 struct {
45658- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
45659- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
45660+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
45661+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
45662 } smb2_stats;
45663 #endif /* CONFIG_CIFS_SMB2 */
45664 } stats;
45665@@ -1094,7 +1094,7 @@ build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
45666 }
45667
45668 #ifdef CONFIG_CIFS_STATS
45669-#define cifs_stats_inc atomic_inc
45670+#define cifs_stats_inc atomic_inc_unchecked
45671
45672 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
45673 unsigned int bytes)
45674@@ -1459,8 +1459,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
45675 /* Various Debug counters */
45676 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
45677 #ifdef CONFIG_CIFS_STATS2
45678-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
45679-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
45680+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
45681+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
45682 #endif
45683 GLOBAL_EXTERN atomic_t smBufAllocCount;
45684 GLOBAL_EXTERN atomic_t midCount;
45685diff --git a/fs/cifs/link.c b/fs/cifs/link.c
45686index 51dc2fb..1e12a33 100644
45687--- a/fs/cifs/link.c
45688+++ b/fs/cifs/link.c
45689@@ -616,7 +616,7 @@ symlink_exit:
45690
45691 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
45692 {
45693- char *p = nd_get_link(nd);
45694+ const char *p = nd_get_link(nd);
45695 if (!IS_ERR(p))
45696 kfree(p);
45697 }
45698diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
45699index 3a00c0d..42d901c 100644
45700--- a/fs/cifs/misc.c
45701+++ b/fs/cifs/misc.c
45702@@ -169,7 +169,7 @@ cifs_buf_get(void)
45703 memset(ret_buf, 0, buf_size + 3);
45704 atomic_inc(&bufAllocCount);
45705 #ifdef CONFIG_CIFS_STATS2
45706- atomic_inc(&totBufAllocCount);
45707+ atomic_inc_unchecked(&totBufAllocCount);
45708 #endif /* CONFIG_CIFS_STATS2 */
45709 }
45710
45711@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
45712 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
45713 atomic_inc(&smBufAllocCount);
45714 #ifdef CONFIG_CIFS_STATS2
45715- atomic_inc(&totSmBufAllocCount);
45716+ atomic_inc_unchecked(&totSmBufAllocCount);
45717 #endif /* CONFIG_CIFS_STATS2 */
45718
45719 }
45720diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
45721index 591bf19..690d600 100644
45722--- a/fs/cifs/smb1ops.c
45723+++ b/fs/cifs/smb1ops.c
45724@@ -617,27 +617,27 @@ static void
45725 cifs_clear_stats(struct cifs_tcon *tcon)
45726 {
45727 #ifdef CONFIG_CIFS_STATS
45728- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
45729- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
45730- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
45731- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
45732- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
45733- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
45734- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
45735- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
45736- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
45737- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
45738- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
45739- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
45740- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
45741- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
45742- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
45743- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
45744- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
45745- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
45746- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
45747- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
45748- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
45749+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
45750+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
45751+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
45752+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
45753+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
45754+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
45755+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
45756+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
45757+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
45758+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
45759+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
45760+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
45761+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
45762+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
45763+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
45764+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
45765+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
45766+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
45767+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
45768+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
45769+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
45770 #endif
45771 }
45772
45773@@ -646,36 +646,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
45774 {
45775 #ifdef CONFIG_CIFS_STATS
45776 seq_printf(m, " Oplocks breaks: %d",
45777- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
45778+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
45779 seq_printf(m, "\nReads: %d Bytes: %llu",
45780- atomic_read(&tcon->stats.cifs_stats.num_reads),
45781+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
45782 (long long)(tcon->bytes_read));
45783 seq_printf(m, "\nWrites: %d Bytes: %llu",
45784- atomic_read(&tcon->stats.cifs_stats.num_writes),
45785+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
45786 (long long)(tcon->bytes_written));
45787 seq_printf(m, "\nFlushes: %d",
45788- atomic_read(&tcon->stats.cifs_stats.num_flushes));
45789+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
45790 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
45791- atomic_read(&tcon->stats.cifs_stats.num_locks),
45792- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
45793- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
45794+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
45795+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
45796+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
45797 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
45798- atomic_read(&tcon->stats.cifs_stats.num_opens),
45799- atomic_read(&tcon->stats.cifs_stats.num_closes),
45800- atomic_read(&tcon->stats.cifs_stats.num_deletes));
45801+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
45802+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
45803+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
45804 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
45805- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
45806- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
45807+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
45808+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
45809 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
45810- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
45811- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
45812+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
45813+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
45814 seq_printf(m, "\nRenames: %d T2 Renames %d",
45815- atomic_read(&tcon->stats.cifs_stats.num_renames),
45816- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
45817+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
45818+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
45819 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
45820- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
45821- atomic_read(&tcon->stats.cifs_stats.num_fnext),
45822- atomic_read(&tcon->stats.cifs_stats.num_fclose));
45823+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
45824+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
45825+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
45826 #endif
45827 }
45828
45829diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
45830index 4d9dbe0..0af4601 100644
45831--- a/fs/cifs/smb2ops.c
45832+++ b/fs/cifs/smb2ops.c
45833@@ -291,8 +291,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
45834 #ifdef CONFIG_CIFS_STATS
45835 int i;
45836 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
45837- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
45838- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
45839+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
45840+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
45841 }
45842 #endif
45843 }
45844@@ -301,66 +301,66 @@ static void
45845 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
45846 {
45847 #ifdef CONFIG_CIFS_STATS
45848- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
45849- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
45850+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
45851+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
45852 seq_printf(m, "\nNegotiates: %d sent %d failed",
45853- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
45854- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
45855+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
45856+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
45857 seq_printf(m, "\nSessionSetups: %d sent %d failed",
45858- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
45859- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
45860+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
45861+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
45862 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
45863 seq_printf(m, "\nLogoffs: %d sent %d failed",
45864- atomic_read(&sent[SMB2_LOGOFF_HE]),
45865- atomic_read(&failed[SMB2_LOGOFF_HE]));
45866+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
45867+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
45868 seq_printf(m, "\nTreeConnects: %d sent %d failed",
45869- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
45870- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
45871+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
45872+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
45873 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
45874- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
45875- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
45876+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
45877+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
45878 seq_printf(m, "\nCreates: %d sent %d failed",
45879- atomic_read(&sent[SMB2_CREATE_HE]),
45880- atomic_read(&failed[SMB2_CREATE_HE]));
45881+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
45882+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
45883 seq_printf(m, "\nCloses: %d sent %d failed",
45884- atomic_read(&sent[SMB2_CLOSE_HE]),
45885- atomic_read(&failed[SMB2_CLOSE_HE]));
45886+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
45887+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
45888 seq_printf(m, "\nFlushes: %d sent %d failed",
45889- atomic_read(&sent[SMB2_FLUSH_HE]),
45890- atomic_read(&failed[SMB2_FLUSH_HE]));
45891+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
45892+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
45893 seq_printf(m, "\nReads: %d sent %d failed",
45894- atomic_read(&sent[SMB2_READ_HE]),
45895- atomic_read(&failed[SMB2_READ_HE]));
45896+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
45897+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
45898 seq_printf(m, "\nWrites: %d sent %d failed",
45899- atomic_read(&sent[SMB2_WRITE_HE]),
45900- atomic_read(&failed[SMB2_WRITE_HE]));
45901+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
45902+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
45903 seq_printf(m, "\nLocks: %d sent %d failed",
45904- atomic_read(&sent[SMB2_LOCK_HE]),
45905- atomic_read(&failed[SMB2_LOCK_HE]));
45906+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
45907+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
45908 seq_printf(m, "\nIOCTLs: %d sent %d failed",
45909- atomic_read(&sent[SMB2_IOCTL_HE]),
45910- atomic_read(&failed[SMB2_IOCTL_HE]));
45911+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
45912+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
45913 seq_printf(m, "\nCancels: %d sent %d failed",
45914- atomic_read(&sent[SMB2_CANCEL_HE]),
45915- atomic_read(&failed[SMB2_CANCEL_HE]));
45916+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
45917+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
45918 seq_printf(m, "\nEchos: %d sent %d failed",
45919- atomic_read(&sent[SMB2_ECHO_HE]),
45920- atomic_read(&failed[SMB2_ECHO_HE]));
45921+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
45922+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
45923 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
45924- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
45925- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
45926+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
45927+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
45928 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
45929- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
45930- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
45931+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
45932+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
45933 seq_printf(m, "\nQueryInfos: %d sent %d failed",
45934- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
45935- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
45936+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
45937+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
45938 seq_printf(m, "\nSetInfos: %d sent %d failed",
45939- atomic_read(&sent[SMB2_SET_INFO_HE]),
45940- atomic_read(&failed[SMB2_SET_INFO_HE]));
45941+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
45942+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
45943 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
45944- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
45945- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
45946+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
45947+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
45948 #endif
45949 }
45950
45951diff --git a/fs/coda/cache.c b/fs/coda/cache.c
45952index 958ae0e..505c9d0 100644
45953--- a/fs/coda/cache.c
45954+++ b/fs/coda/cache.c
45955@@ -24,7 +24,7 @@
45956 #include "coda_linux.h"
45957 #include "coda_cache.h"
45958
45959-static atomic_t permission_epoch = ATOMIC_INIT(0);
45960+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
45961
45962 /* replace or extend an acl cache hit */
45963 void coda_cache_enter(struct inode *inode, int mask)
45964@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
45965 struct coda_inode_info *cii = ITOC(inode);
45966
45967 spin_lock(&cii->c_lock);
45968- cii->c_cached_epoch = atomic_read(&permission_epoch);
45969+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
45970 if (cii->c_uid != current_fsuid()) {
45971 cii->c_uid = current_fsuid();
45972 cii->c_cached_perm = mask;
45973@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
45974 {
45975 struct coda_inode_info *cii = ITOC(inode);
45976 spin_lock(&cii->c_lock);
45977- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
45978+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
45979 spin_unlock(&cii->c_lock);
45980 }
45981
45982 /* remove all acl caches */
45983 void coda_cache_clear_all(struct super_block *sb)
45984 {
45985- atomic_inc(&permission_epoch);
45986+ atomic_inc_unchecked(&permission_epoch);
45987 }
45988
45989
45990@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
45991 spin_lock(&cii->c_lock);
45992 hit = (mask & cii->c_cached_perm) == mask &&
45993 cii->c_uid == current_fsuid() &&
45994- cii->c_cached_epoch == atomic_read(&permission_epoch);
45995+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
45996 spin_unlock(&cii->c_lock);
45997
45998 return hit;
45999diff --git a/fs/compat.c b/fs/compat.c
46000index 015e1e1..5ce8e54 100644
46001--- a/fs/compat.c
46002+++ b/fs/compat.c
46003@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
46004
46005 set_fs(KERNEL_DS);
46006 /* The __user pointer cast is valid because of the set_fs() */
46007- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
46008+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
46009 set_fs(oldfs);
46010 /* truncating is ok because it's a user address */
46011 if (!ret)
46012@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
46013 goto out;
46014
46015 ret = -EINVAL;
46016- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
46017+ if (nr_segs > UIO_MAXIOV)
46018 goto out;
46019 if (nr_segs > fast_segs) {
46020 ret = -ENOMEM;
46021@@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
46022
46023 struct compat_readdir_callback {
46024 struct compat_old_linux_dirent __user *dirent;
46025+ struct file * file;
46026 int result;
46027 };
46028
46029@@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
46030 buf->result = -EOVERFLOW;
46031 return -EOVERFLOW;
46032 }
46033+
46034+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46035+ return 0;
46036+
46037 buf->result++;
46038 dirent = buf->dirent;
46039 if (!access_ok(VERIFY_WRITE, dirent,
46040@@ -878,6 +883,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
46041
46042 buf.result = 0;
46043 buf.dirent = dirent;
46044+ buf.file = f.file;
46045
46046 error = vfs_readdir(f.file, compat_fillonedir, &buf);
46047 if (buf.result)
46048@@ -897,6 +903,7 @@ struct compat_linux_dirent {
46049 struct compat_getdents_callback {
46050 struct compat_linux_dirent __user *current_dir;
46051 struct compat_linux_dirent __user *previous;
46052+ struct file * file;
46053 int count;
46054 int error;
46055 };
46056@@ -918,6 +925,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
46057 buf->error = -EOVERFLOW;
46058 return -EOVERFLOW;
46059 }
46060+
46061+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46062+ return 0;
46063+
46064 dirent = buf->previous;
46065 if (dirent) {
46066 if (__put_user(offset, &dirent->d_off))
46067@@ -963,6 +974,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
46068 buf.previous = NULL;
46069 buf.count = count;
46070 buf.error = 0;
46071+ buf.file = f.file;
46072
46073 error = vfs_readdir(f.file, compat_filldir, &buf);
46074 if (error >= 0)
46075@@ -983,6 +995,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
46076 struct compat_getdents_callback64 {
46077 struct linux_dirent64 __user *current_dir;
46078 struct linux_dirent64 __user *previous;
46079+ struct file * file;
46080 int count;
46081 int error;
46082 };
46083@@ -999,6 +1012,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
46084 buf->error = -EINVAL; /* only used if we fail.. */
46085 if (reclen > buf->count)
46086 return -EINVAL;
46087+
46088+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46089+ return 0;
46090+
46091 dirent = buf->previous;
46092
46093 if (dirent) {
46094@@ -1048,13 +1065,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
46095 buf.previous = NULL;
46096 buf.count = count;
46097 buf.error = 0;
46098+ buf.file = f.file;
46099
46100 error = vfs_readdir(f.file, compat_filldir64, &buf);
46101 if (error >= 0)
46102 error = buf.error;
46103 lastdirent = buf.previous;
46104 if (lastdirent) {
46105- typeof(lastdirent->d_off) d_off = f.file->f_pos;
46106+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
46107 if (__put_user_unaligned(d_off, &lastdirent->d_off))
46108 error = -EFAULT;
46109 else
46110diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
46111index a81147e..20bf2b5 100644
46112--- a/fs/compat_binfmt_elf.c
46113+++ b/fs/compat_binfmt_elf.c
46114@@ -30,11 +30,13 @@
46115 #undef elf_phdr
46116 #undef elf_shdr
46117 #undef elf_note
46118+#undef elf_dyn
46119 #undef elf_addr_t
46120 #define elfhdr elf32_hdr
46121 #define elf_phdr elf32_phdr
46122 #define elf_shdr elf32_shdr
46123 #define elf_note elf32_note
46124+#define elf_dyn Elf32_Dyn
46125 #define elf_addr_t Elf32_Addr
46126
46127 /*
46128diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
46129index 4c6285f..b7a2411 100644
46130--- a/fs/compat_ioctl.c
46131+++ b/fs/compat_ioctl.c
46132@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
46133 return -EFAULT;
46134 if (__get_user(udata, &ss32->iomem_base))
46135 return -EFAULT;
46136- ss.iomem_base = compat_ptr(udata);
46137+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
46138 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
46139 __get_user(ss.port_high, &ss32->port_high))
46140 return -EFAULT;
46141@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
46142 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
46143 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
46144 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
46145- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
46146+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
46147 return -EFAULT;
46148
46149 return ioctl_preallocate(file, p);
46150@@ -1617,8 +1617,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
46151 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
46152 {
46153 unsigned int a, b;
46154- a = *(unsigned int *)p;
46155- b = *(unsigned int *)q;
46156+ a = *(const unsigned int *)p;
46157+ b = *(const unsigned int *)q;
46158 if (a > b)
46159 return 1;
46160 if (a < b)
46161diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
46162index 7414ae2..d98ad6d 100644
46163--- a/fs/configfs/dir.c
46164+++ b/fs/configfs/dir.c
46165@@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
46166 }
46167 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
46168 struct configfs_dirent *next;
46169- const char * name;
46170+ const unsigned char * name;
46171+ char d_name[sizeof(next->s_dentry->d_iname)];
46172 int len;
46173 struct inode *inode = NULL;
46174
46175@@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
46176 continue;
46177
46178 name = configfs_get_name(next);
46179- len = strlen(name);
46180+ if (next->s_dentry && name == next->s_dentry->d_iname) {
46181+ len = next->s_dentry->d_name.len;
46182+ memcpy(d_name, name, len);
46183+ name = d_name;
46184+ } else
46185+ len = strlen(name);
46186
46187 /*
46188 * We'll have a dentry and an inode for
46189diff --git a/fs/coredump.c b/fs/coredump.c
46190index ce47379..68c8e43 100644
46191--- a/fs/coredump.c
46192+++ b/fs/coredump.c
46193@@ -52,7 +52,7 @@ struct core_name {
46194 char *corename;
46195 int used, size;
46196 };
46197-static atomic_t call_count = ATOMIC_INIT(1);
46198+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
46199
46200 /* The maximal length of core_pattern is also specified in sysctl.c */
46201
46202@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
46203 {
46204 char *old_corename = cn->corename;
46205
46206- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
46207+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
46208 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
46209
46210 if (!cn->corename) {
46211@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
46212 int pid_in_pattern = 0;
46213 int err = 0;
46214
46215- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
46216+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
46217 cn->corename = kmalloc(cn->size, GFP_KERNEL);
46218 cn->used = 0;
46219
46220@@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
46221 pipe = file->f_path.dentry->d_inode->i_pipe;
46222
46223 pipe_lock(pipe);
46224- pipe->readers++;
46225- pipe->writers--;
46226+ atomic_inc(&pipe->readers);
46227+ atomic_dec(&pipe->writers);
46228
46229- while ((pipe->readers > 1) && (!signal_pending(current))) {
46230+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
46231 wake_up_interruptible_sync(&pipe->wait);
46232 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
46233 pipe_wait(pipe);
46234 }
46235
46236- pipe->readers--;
46237- pipe->writers++;
46238+ atomic_dec(&pipe->readers);
46239+ atomic_inc(&pipe->writers);
46240 pipe_unlock(pipe);
46241
46242 }
46243@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46244 int ispipe;
46245 struct files_struct *displaced;
46246 bool need_nonrelative = false;
46247- static atomic_t core_dump_count = ATOMIC_INIT(0);
46248+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
46249+ long signr = siginfo->si_signo;
46250 struct coredump_params cprm = {
46251 .siginfo = siginfo,
46252 .regs = regs,
46253@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46254 .mm_flags = mm->flags,
46255 };
46256
46257- audit_core_dumps(siginfo->si_signo);
46258+ audit_core_dumps(signr);
46259+
46260+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
46261+ gr_handle_brute_attach(cprm.mm_flags);
46262
46263 binfmt = mm->binfmt;
46264 if (!binfmt || !binfmt->core_dump)
46265@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46266 need_nonrelative = true;
46267 }
46268
46269- retval = coredump_wait(siginfo->si_signo, &core_state);
46270+ retval = coredump_wait(signr, &core_state);
46271 if (retval < 0)
46272 goto fail_creds;
46273
46274@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46275 }
46276 cprm.limit = RLIM_INFINITY;
46277
46278- dump_count = atomic_inc_return(&core_dump_count);
46279+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
46280 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
46281 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
46282 task_tgid_vnr(current), current->comm);
46283@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46284 } else {
46285 struct inode *inode;
46286
46287+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
46288+
46289 if (cprm.limit < binfmt->min_coredump)
46290 goto fail_unlock;
46291
46292@@ -640,7 +646,7 @@ close_fail:
46293 filp_close(cprm.file, NULL);
46294 fail_dropcount:
46295 if (ispipe)
46296- atomic_dec(&core_dump_count);
46297+ atomic_dec_unchecked(&core_dump_count);
46298 fail_unlock:
46299 kfree(cn.corename);
46300 fail_corename:
46301@@ -659,7 +665,7 @@ fail:
46302 */
46303 int dump_write(struct file *file, const void *addr, int nr)
46304 {
46305- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
46306+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
46307 }
46308 EXPORT_SYMBOL(dump_write);
46309
46310diff --git a/fs/dcache.c b/fs/dcache.c
46311index 0d0adb6..f4646e9 100644
46312--- a/fs/dcache.c
46313+++ b/fs/dcache.c
46314@@ -3164,7 +3164,7 @@ void __init vfs_caches_init(unsigned long mempages)
46315 mempages -= reserve;
46316
46317 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
46318- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
46319+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
46320
46321 dcache_init();
46322 inode_init();
46323diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
46324index b607d92..41fda09 100644
46325--- a/fs/debugfs/inode.c
46326+++ b/fs/debugfs/inode.c
46327@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
46328 */
46329 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
46330 {
46331+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46332+ return __create_file(name, S_IFDIR | S_IRWXU,
46333+#else
46334 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46335+#endif
46336 parent, NULL, NULL);
46337 }
46338 EXPORT_SYMBOL_GPL(debugfs_create_dir);
46339diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
46340index cc7709e..7e7211f 100644
46341--- a/fs/ecryptfs/inode.c
46342+++ b/fs/ecryptfs/inode.c
46343@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
46344 old_fs = get_fs();
46345 set_fs(get_ds());
46346 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
46347- (char __user *)lower_buf,
46348+ (char __force_user *)lower_buf,
46349 PATH_MAX);
46350 set_fs(old_fs);
46351 if (rc < 0)
46352@@ -706,7 +706,7 @@ out:
46353 static void
46354 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
46355 {
46356- char *buf = nd_get_link(nd);
46357+ const char *buf = nd_get_link(nd);
46358 if (!IS_ERR(buf)) {
46359 /* Free the char* */
46360 kfree(buf);
46361diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
46362index 412e6ed..4292d22 100644
46363--- a/fs/ecryptfs/miscdev.c
46364+++ b/fs/ecryptfs/miscdev.c
46365@@ -315,7 +315,7 @@ check_list:
46366 goto out_unlock_msg_ctx;
46367 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
46368 if (msg_ctx->msg) {
46369- if (copy_to_user(&buf[i], packet_length, packet_length_size))
46370+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
46371 goto out_unlock_msg_ctx;
46372 i += packet_length_size;
46373 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
46374diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
46375index b2a34a1..162fa69 100644
46376--- a/fs/ecryptfs/read_write.c
46377+++ b/fs/ecryptfs/read_write.c
46378@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
46379 return -EIO;
46380 fs_save = get_fs();
46381 set_fs(get_ds());
46382- rc = vfs_write(lower_file, data, size, &offset);
46383+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
46384 set_fs(fs_save);
46385 mark_inode_dirty_sync(ecryptfs_inode);
46386 return rc;
46387@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
46388 return -EIO;
46389 fs_save = get_fs();
46390 set_fs(get_ds());
46391- rc = vfs_read(lower_file, data, size, &offset);
46392+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
46393 set_fs(fs_save);
46394 return rc;
46395 }
46396diff --git a/fs/exec.c b/fs/exec.c
46397index c6e6de4..fb98879 100644
46398--- a/fs/exec.c
46399+++ b/fs/exec.c
46400@@ -55,6 +55,17 @@
46401 #include <linux/pipe_fs_i.h>
46402 #include <linux/oom.h>
46403 #include <linux/compat.h>
46404+#include <linux/random.h>
46405+#include <linux/seq_file.h>
46406+#include <linux/coredump.h>
46407+#include <linux/mman.h>
46408+
46409+#ifdef CONFIG_PAX_REFCOUNT
46410+#include <linux/kallsyms.h>
46411+#include <linux/kdebug.h>
46412+#endif
46413+
46414+#include <trace/events/fs.h>
46415
46416 #include <asm/uaccess.h>
46417 #include <asm/mmu_context.h>
46418@@ -66,6 +77,18 @@
46419
46420 #include <trace/events/sched.h>
46421
46422+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46423+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
46424+{
46425+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
46426+}
46427+#endif
46428+
46429+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
46430+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
46431+EXPORT_SYMBOL(pax_set_initial_flags_func);
46432+#endif
46433+
46434 int suid_dumpable = 0;
46435
46436 static LIST_HEAD(formats);
46437@@ -180,18 +203,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
46438 int write)
46439 {
46440 struct page *page;
46441- int ret;
46442
46443-#ifdef CONFIG_STACK_GROWSUP
46444- if (write) {
46445- ret = expand_downwards(bprm->vma, pos);
46446- if (ret < 0)
46447- return NULL;
46448- }
46449-#endif
46450- ret = get_user_pages(current, bprm->mm, pos,
46451- 1, write, 1, &page, NULL);
46452- if (ret <= 0)
46453+ if (0 > expand_downwards(bprm->vma, pos))
46454+ return NULL;
46455+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
46456 return NULL;
46457
46458 if (write) {
46459@@ -207,6 +222,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
46460 if (size <= ARG_MAX)
46461 return page;
46462
46463+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46464+ // only allow 512KB for argv+env on suid/sgid binaries
46465+ // to prevent easy ASLR exhaustion
46466+ if (((bprm->cred->euid != current_euid()) ||
46467+ (bprm->cred->egid != current_egid())) &&
46468+ (size > (512 * 1024))) {
46469+ put_page(page);
46470+ return NULL;
46471+ }
46472+#endif
46473+
46474 /*
46475 * Limit to 1/4-th the stack size for the argv+env strings.
46476 * This ensures that:
46477@@ -266,6 +292,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
46478 vma->vm_end = STACK_TOP_MAX;
46479 vma->vm_start = vma->vm_end - PAGE_SIZE;
46480 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
46481+
46482+#ifdef CONFIG_PAX_SEGMEXEC
46483+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
46484+#endif
46485+
46486 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
46487 INIT_LIST_HEAD(&vma->anon_vma_chain);
46488
46489@@ -276,6 +307,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
46490 mm->stack_vm = mm->total_vm = 1;
46491 up_write(&mm->mmap_sem);
46492 bprm->p = vma->vm_end - sizeof(void *);
46493+
46494+#ifdef CONFIG_PAX_RANDUSTACK
46495+ if (randomize_va_space)
46496+ bprm->p ^= random32() & ~PAGE_MASK;
46497+#endif
46498+
46499 return 0;
46500 err:
46501 up_write(&mm->mmap_sem);
46502@@ -384,19 +421,7 @@ err:
46503 return err;
46504 }
46505
46506-struct user_arg_ptr {
46507-#ifdef CONFIG_COMPAT
46508- bool is_compat;
46509-#endif
46510- union {
46511- const char __user *const __user *native;
46512-#ifdef CONFIG_COMPAT
46513- const compat_uptr_t __user *compat;
46514-#endif
46515- } ptr;
46516-};
46517-
46518-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46519+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46520 {
46521 const char __user *native;
46522
46523@@ -405,14 +430,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46524 compat_uptr_t compat;
46525
46526 if (get_user(compat, argv.ptr.compat + nr))
46527- return ERR_PTR(-EFAULT);
46528+ return (const char __force_user *)ERR_PTR(-EFAULT);
46529
46530 return compat_ptr(compat);
46531 }
46532 #endif
46533
46534 if (get_user(native, argv.ptr.native + nr))
46535- return ERR_PTR(-EFAULT);
46536+ return (const char __force_user *)ERR_PTR(-EFAULT);
46537
46538 return native;
46539 }
46540@@ -431,11 +456,12 @@ static int count(struct user_arg_ptr argv, int max)
46541 if (!p)
46542 break;
46543
46544- if (IS_ERR(p))
46545+ if (IS_ERR((const char __force_kernel *)p))
46546 return -EFAULT;
46547
46548- if (i++ >= max)
46549+ if (i >= max)
46550 return -E2BIG;
46551+ ++i;
46552
46553 if (fatal_signal_pending(current))
46554 return -ERESTARTNOHAND;
46555@@ -465,7 +491,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
46556
46557 ret = -EFAULT;
46558 str = get_user_arg_ptr(argv, argc);
46559- if (IS_ERR(str))
46560+ if (IS_ERR((const char __force_kernel *)str))
46561 goto out;
46562
46563 len = strnlen_user(str, MAX_ARG_STRLEN);
46564@@ -547,7 +573,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
46565 int r;
46566 mm_segment_t oldfs = get_fs();
46567 struct user_arg_ptr argv = {
46568- .ptr.native = (const char __user *const __user *)__argv,
46569+ .ptr.native = (const char __force_user *const __force_user *)__argv,
46570 };
46571
46572 set_fs(KERNEL_DS);
46573@@ -582,7 +608,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
46574 unsigned long new_end = old_end - shift;
46575 struct mmu_gather tlb;
46576
46577- BUG_ON(new_start > new_end);
46578+ if (new_start >= new_end || new_start < mmap_min_addr)
46579+ return -ENOMEM;
46580
46581 /*
46582 * ensure there are no vmas between where we want to go
46583@@ -591,6 +618,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
46584 if (vma != find_vma(mm, new_start))
46585 return -EFAULT;
46586
46587+#ifdef CONFIG_PAX_SEGMEXEC
46588+ BUG_ON(pax_find_mirror_vma(vma));
46589+#endif
46590+
46591 /*
46592 * cover the whole range: [new_start, old_end)
46593 */
46594@@ -671,10 +702,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
46595 stack_top = arch_align_stack(stack_top);
46596 stack_top = PAGE_ALIGN(stack_top);
46597
46598- if (unlikely(stack_top < mmap_min_addr) ||
46599- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
46600- return -ENOMEM;
46601-
46602 stack_shift = vma->vm_end - stack_top;
46603
46604 bprm->p -= stack_shift;
46605@@ -686,8 +713,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
46606 bprm->exec -= stack_shift;
46607
46608 down_write(&mm->mmap_sem);
46609+
46610+ /* Move stack pages down in memory. */
46611+ if (stack_shift) {
46612+ ret = shift_arg_pages(vma, stack_shift);
46613+ if (ret)
46614+ goto out_unlock;
46615+ }
46616+
46617 vm_flags = VM_STACK_FLAGS;
46618
46619+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46620+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46621+ vm_flags &= ~VM_EXEC;
46622+
46623+#ifdef CONFIG_PAX_MPROTECT
46624+ if (mm->pax_flags & MF_PAX_MPROTECT)
46625+ vm_flags &= ~VM_MAYEXEC;
46626+#endif
46627+
46628+ }
46629+#endif
46630+
46631 /*
46632 * Adjust stack execute permissions; explicitly enable for
46633 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
46634@@ -706,13 +753,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
46635 goto out_unlock;
46636 BUG_ON(prev != vma);
46637
46638- /* Move stack pages down in memory. */
46639- if (stack_shift) {
46640- ret = shift_arg_pages(vma, stack_shift);
46641- if (ret)
46642- goto out_unlock;
46643- }
46644-
46645 /* mprotect_fixup is overkill to remove the temporary stack flags */
46646 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
46647
46648@@ -736,6 +776,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
46649 #endif
46650 current->mm->start_stack = bprm->p;
46651 ret = expand_stack(vma, stack_base);
46652+
46653+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_ASLR)
46654+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
46655+ unsigned long size, flags, vm_flags;
46656+
46657+ size = STACK_TOP - vma->vm_end;
46658+ flags = MAP_FIXED | MAP_PRIVATE;
46659+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
46660+
46661+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
46662+
46663+#ifdef CONFIG_X86
46664+ if (!ret) {
46665+ size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
46666+ ret = 0 != mmap_region(NULL, 0, size, flags, vm_flags, 0);
46667+ }
46668+#endif
46669+
46670+ }
46671+#endif
46672+
46673 if (ret)
46674 ret = -EFAULT;
46675
46676@@ -771,6 +832,8 @@ struct file *open_exec(const char *name)
46677
46678 fsnotify_open(file);
46679
46680+ trace_open_exec(name);
46681+
46682 err = deny_write_access(file);
46683 if (err)
46684 goto exit;
46685@@ -794,7 +857,7 @@ int kernel_read(struct file *file, loff_t offset,
46686 old_fs = get_fs();
46687 set_fs(get_ds());
46688 /* The cast to a user pointer is valid due to the set_fs() */
46689- result = vfs_read(file, (void __user *)addr, count, &pos);
46690+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
46691 set_fs(old_fs);
46692 return result;
46693 }
46694@@ -1246,7 +1309,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
46695 }
46696 rcu_read_unlock();
46697
46698- if (p->fs->users > n_fs) {
46699+ if (atomic_read(&p->fs->users) > n_fs) {
46700 bprm->unsafe |= LSM_UNSAFE_SHARE;
46701 } else {
46702 res = -EAGAIN;
46703@@ -1449,6 +1512,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
46704
46705 EXPORT_SYMBOL(search_binary_handler);
46706
46707+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46708+static DEFINE_PER_CPU(u64, exec_counter);
46709+static int __init init_exec_counters(void)
46710+{
46711+ unsigned int cpu;
46712+
46713+ for_each_possible_cpu(cpu) {
46714+ per_cpu(exec_counter, cpu) = (u64)cpu;
46715+ }
46716+
46717+ return 0;
46718+}
46719+early_initcall(init_exec_counters);
46720+static inline void increment_exec_counter(void)
46721+{
46722+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
46723+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
46724+}
46725+#else
46726+static inline void increment_exec_counter(void) {}
46727+#endif
46728+
46729 /*
46730 * sys_execve() executes a new program.
46731 */
46732@@ -1457,6 +1542,11 @@ static int do_execve_common(const char *filename,
46733 struct user_arg_ptr envp,
46734 struct pt_regs *regs)
46735 {
46736+#ifdef CONFIG_GRKERNSEC
46737+ struct file *old_exec_file;
46738+ struct acl_subject_label *old_acl;
46739+ struct rlimit old_rlim[RLIM_NLIMITS];
46740+#endif
46741 struct linux_binprm *bprm;
46742 struct file *file;
46743 struct files_struct *displaced;
46744@@ -1464,6 +1554,8 @@ static int do_execve_common(const char *filename,
46745 int retval;
46746 const struct cred *cred = current_cred();
46747
46748+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
46749+
46750 /*
46751 * We move the actual failure in case of RLIMIT_NPROC excess from
46752 * set*uid() to execve() because too many poorly written programs
46753@@ -1504,12 +1596,27 @@ static int do_execve_common(const char *filename,
46754 if (IS_ERR(file))
46755 goto out_unmark;
46756
46757+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
46758+ retval = -EPERM;
46759+ goto out_file;
46760+ }
46761+
46762 sched_exec();
46763
46764 bprm->file = file;
46765 bprm->filename = filename;
46766 bprm->interp = filename;
46767
46768+ if (gr_process_user_ban()) {
46769+ retval = -EPERM;
46770+ goto out_file;
46771+ }
46772+
46773+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
46774+ retval = -EACCES;
46775+ goto out_file;
46776+ }
46777+
46778 retval = bprm_mm_init(bprm);
46779 if (retval)
46780 goto out_file;
46781@@ -1526,24 +1633,65 @@ static int do_execve_common(const char *filename,
46782 if (retval < 0)
46783 goto out;
46784
46785+#ifdef CONFIG_GRKERNSEC
46786+ old_acl = current->acl;
46787+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
46788+ old_exec_file = current->exec_file;
46789+ get_file(file);
46790+ current->exec_file = file;
46791+#endif
46792+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46793+ /* limit suid stack to 8MB
46794+ we saved the old limits above and will restore them if this exec fails
46795+ */
46796+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
46797+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
46798+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
46799+#endif
46800+
46801+ if (!gr_tpe_allow(file)) {
46802+ retval = -EACCES;
46803+ goto out_fail;
46804+ }
46805+
46806+ if (gr_check_crash_exec(file)) {
46807+ retval = -EACCES;
46808+ goto out_fail;
46809+ }
46810+
46811+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
46812+ bprm->unsafe);
46813+ if (retval < 0)
46814+ goto out_fail;
46815+
46816 retval = copy_strings_kernel(1, &bprm->filename, bprm);
46817 if (retval < 0)
46818- goto out;
46819+ goto out_fail;
46820
46821 bprm->exec = bprm->p;
46822 retval = copy_strings(bprm->envc, envp, bprm);
46823 if (retval < 0)
46824- goto out;
46825+ goto out_fail;
46826
46827 retval = copy_strings(bprm->argc, argv, bprm);
46828 if (retval < 0)
46829- goto out;
46830+ goto out_fail;
46831+
46832+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
46833+
46834+ gr_handle_exec_args(bprm, argv);
46835
46836 retval = search_binary_handler(bprm,regs);
46837 if (retval < 0)
46838- goto out;
46839+ goto out_fail;
46840+#ifdef CONFIG_GRKERNSEC
46841+ if (old_exec_file)
46842+ fput(old_exec_file);
46843+#endif
46844
46845 /* execve succeeded */
46846+
46847+ increment_exec_counter();
46848 current->fs->in_exec = 0;
46849 current->in_execve = 0;
46850 acct_update_integrals(current);
46851@@ -1552,6 +1700,14 @@ static int do_execve_common(const char *filename,
46852 put_files_struct(displaced);
46853 return retval;
46854
46855+out_fail:
46856+#ifdef CONFIG_GRKERNSEC
46857+ current->acl = old_acl;
46858+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
46859+ fput(current->exec_file);
46860+ current->exec_file = old_exec_file;
46861+#endif
46862+
46863 out:
46864 if (bprm->mm) {
46865 acct_arg_size(bprm, 0);
46866@@ -1727,3 +1883,253 @@ int kernel_execve(const char *filename,
46867 ret_from_kernel_execve(p);
46868 }
46869 #endif
46870+
46871+int pax_check_flags(unsigned long *flags)
46872+{
46873+ int retval = 0;
46874+
46875+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
46876+ if (*flags & MF_PAX_SEGMEXEC)
46877+ {
46878+ *flags &= ~MF_PAX_SEGMEXEC;
46879+ retval = -EINVAL;
46880+ }
46881+#endif
46882+
46883+ if ((*flags & MF_PAX_PAGEEXEC)
46884+
46885+#ifdef CONFIG_PAX_PAGEEXEC
46886+ && (*flags & MF_PAX_SEGMEXEC)
46887+#endif
46888+
46889+ )
46890+ {
46891+ *flags &= ~MF_PAX_PAGEEXEC;
46892+ retval = -EINVAL;
46893+ }
46894+
46895+ if ((*flags & MF_PAX_MPROTECT)
46896+
46897+#ifdef CONFIG_PAX_MPROTECT
46898+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
46899+#endif
46900+
46901+ )
46902+ {
46903+ *flags &= ~MF_PAX_MPROTECT;
46904+ retval = -EINVAL;
46905+ }
46906+
46907+ if ((*flags & MF_PAX_EMUTRAMP)
46908+
46909+#ifdef CONFIG_PAX_EMUTRAMP
46910+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
46911+#endif
46912+
46913+ )
46914+ {
46915+ *flags &= ~MF_PAX_EMUTRAMP;
46916+ retval = -EINVAL;
46917+ }
46918+
46919+ return retval;
46920+}
46921+
46922+EXPORT_SYMBOL(pax_check_flags);
46923+
46924+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46925+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
46926+{
46927+ struct task_struct *tsk = current;
46928+ struct mm_struct *mm = current->mm;
46929+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
46930+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
46931+ char *path_exec = NULL;
46932+ char *path_fault = NULL;
46933+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
46934+ siginfo_t info = { };
46935+
46936+ if (buffer_exec && buffer_fault) {
46937+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
46938+
46939+ down_read(&mm->mmap_sem);
46940+ vma = mm->mmap;
46941+ while (vma && (!vma_exec || !vma_fault)) {
46942+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
46943+ vma_exec = vma;
46944+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
46945+ vma_fault = vma;
46946+ vma = vma->vm_next;
46947+ }
46948+ if (vma_exec) {
46949+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
46950+ if (IS_ERR(path_exec))
46951+ path_exec = "<path too long>";
46952+ else {
46953+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
46954+ if (path_exec) {
46955+ *path_exec = 0;
46956+ path_exec = buffer_exec;
46957+ } else
46958+ path_exec = "<path too long>";
46959+ }
46960+ }
46961+ if (vma_fault) {
46962+ start = vma_fault->vm_start;
46963+ end = vma_fault->vm_end;
46964+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
46965+ if (vma_fault->vm_file) {
46966+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
46967+ if (IS_ERR(path_fault))
46968+ path_fault = "<path too long>";
46969+ else {
46970+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
46971+ if (path_fault) {
46972+ *path_fault = 0;
46973+ path_fault = buffer_fault;
46974+ } else
46975+ path_fault = "<path too long>";
46976+ }
46977+ } else
46978+ path_fault = "<anonymous mapping>";
46979+ }
46980+ up_read(&mm->mmap_sem);
46981+ }
46982+ if (tsk->signal->curr_ip)
46983+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
46984+ else
46985+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
46986+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
46987+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
46988+ task_uid(tsk), task_euid(tsk), pc, sp);
46989+ free_page((unsigned long)buffer_exec);
46990+ free_page((unsigned long)buffer_fault);
46991+ pax_report_insns(regs, pc, sp);
46992+ info.si_signo = SIGKILL;
46993+ info.si_errno = 0;
46994+ info.si_code = SI_KERNEL;
46995+ info.si_pid = 0;
46996+ info.si_uid = 0;
46997+ do_coredump(&info, regs);
46998+}
46999+#endif
47000+
47001+#ifdef CONFIG_PAX_REFCOUNT
47002+void pax_report_refcount_overflow(struct pt_regs *regs)
47003+{
47004+ if (current->signal->curr_ip)
47005+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
47006+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
47007+ else
47008+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
47009+ current->comm, task_pid_nr(current), current_uid(), current_euid());
47010+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
47011+ show_regs(regs);
47012+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
47013+}
47014+#endif
47015+
47016+#ifdef CONFIG_PAX_USERCOPY
47017+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
47018+static noinline int check_stack_object(const void *obj, unsigned long len)
47019+{
47020+ const void * const stack = task_stack_page(current);
47021+ const void * const stackend = stack + THREAD_SIZE;
47022+
47023+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
47024+ const void *frame = NULL;
47025+ const void *oldframe;
47026+#endif
47027+
47028+ if (obj + len < obj)
47029+ return -1;
47030+
47031+ if (obj + len <= stack || stackend <= obj)
47032+ return 0;
47033+
47034+ if (obj < stack || stackend < obj + len)
47035+ return -1;
47036+
47037+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
47038+ oldframe = __builtin_frame_address(1);
47039+ if (oldframe)
47040+ frame = __builtin_frame_address(2);
47041+ /*
47042+ low ----------------------------------------------> high
47043+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
47044+ ^----------------^
47045+ allow copies only within here
47046+ */
47047+ while (stack <= frame && frame < stackend) {
47048+ /* if obj + len extends past the last frame, this
47049+ check won't pass and the next frame will be 0,
47050+ causing us to bail out and correctly report
47051+ the copy as invalid
47052+ */
47053+ if (obj + len <= frame)
47054+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
47055+ oldframe = frame;
47056+ frame = *(const void * const *)frame;
47057+ }
47058+ return -1;
47059+#else
47060+ return 1;
47061+#endif
47062+}
47063+
47064+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
47065+{
47066+ if (current->signal->curr_ip)
47067+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
47068+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
47069+ else
47070+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
47071+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
47072+ dump_stack();
47073+ gr_handle_kernel_exploit();
47074+ do_group_exit(SIGKILL);
47075+}
47076+#endif
47077+
47078+void check_object_size(const void *ptr, unsigned long n, bool to)
47079+{
47080+
47081+#ifdef CONFIG_PAX_USERCOPY
47082+ const char *type;
47083+
47084+ if (!n)
47085+ return;
47086+
47087+ type = check_heap_object(ptr, n);
47088+ if (!type) {
47089+ if (check_stack_object(ptr, n) != -1)
47090+ return;
47091+ type = "<process stack>";
47092+ }
47093+
47094+ pax_report_usercopy(ptr, n, to, type);
47095+#endif
47096+
47097+}
47098+EXPORT_SYMBOL(check_object_size);
47099+
47100+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
47101+void pax_track_stack(void)
47102+{
47103+ unsigned long sp = (unsigned long)&sp;
47104+ if (sp < current_thread_info()->lowest_stack &&
47105+ sp > (unsigned long)task_stack_page(current))
47106+ current_thread_info()->lowest_stack = sp;
47107+}
47108+EXPORT_SYMBOL(pax_track_stack);
47109+#endif
47110+
47111+#ifdef CONFIG_PAX_SIZE_OVERFLOW
47112+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
47113+{
47114+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
47115+ dump_stack();
47116+ do_group_exit(SIGKILL);
47117+}
47118+EXPORT_SYMBOL(report_size_overflow);
47119+#endif
47120diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
47121index 2616d0e..2ffdec9 100644
47122--- a/fs/ext2/balloc.c
47123+++ b/fs/ext2/balloc.c
47124@@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
47125
47126 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
47127 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
47128- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
47129+ if (free_blocks < root_blocks + 1 &&
47130 !uid_eq(sbi->s_resuid, current_fsuid()) &&
47131 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
47132- !in_group_p (sbi->s_resgid))) {
47133+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
47134 return 0;
47135 }
47136 return 1;
47137diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
47138index 22548f5..41521d8 100644
47139--- a/fs/ext3/balloc.c
47140+++ b/fs/ext3/balloc.c
47141@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
47142
47143 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
47144 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
47145- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
47146+ if (free_blocks < root_blocks + 1 &&
47147 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
47148 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
47149- !in_group_p (sbi->s_resgid))) {
47150+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
47151 return 0;
47152 }
47153 return 1;
47154diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
47155index cf18217..8f6b9c3 100644
47156--- a/fs/ext4/balloc.c
47157+++ b/fs/ext4/balloc.c
47158@@ -498,8 +498,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
47159 /* Hm, nope. Are (enough) root reserved clusters available? */
47160 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
47161 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
47162- capable(CAP_SYS_RESOURCE) ||
47163- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
47164+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
47165+ capable_nolog(CAP_SYS_RESOURCE)) {
47166
47167 if (free_clusters >= (nclusters + dirty_clusters))
47168 return 1;
47169diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
47170index 3c20de1..6ff2460 100644
47171--- a/fs/ext4/ext4.h
47172+++ b/fs/ext4/ext4.h
47173@@ -1247,19 +1247,19 @@ struct ext4_sb_info {
47174 unsigned long s_mb_last_start;
47175
47176 /* stats for buddy allocator */
47177- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
47178- atomic_t s_bal_success; /* we found long enough chunks */
47179- atomic_t s_bal_allocated; /* in blocks */
47180- atomic_t s_bal_ex_scanned; /* total extents scanned */
47181- atomic_t s_bal_goals; /* goal hits */
47182- atomic_t s_bal_breaks; /* too long searches */
47183- atomic_t s_bal_2orders; /* 2^order hits */
47184+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
47185+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
47186+ atomic_unchecked_t s_bal_allocated; /* in blocks */
47187+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
47188+ atomic_unchecked_t s_bal_goals; /* goal hits */
47189+ atomic_unchecked_t s_bal_breaks; /* too long searches */
47190+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
47191 spinlock_t s_bal_lock;
47192 unsigned long s_mb_buddies_generated;
47193 unsigned long long s_mb_generation_time;
47194- atomic_t s_mb_lost_chunks;
47195- atomic_t s_mb_preallocated;
47196- atomic_t s_mb_discarded;
47197+ atomic_unchecked_t s_mb_lost_chunks;
47198+ atomic_unchecked_t s_mb_preallocated;
47199+ atomic_unchecked_t s_mb_discarded;
47200 atomic_t s_lock_busy;
47201
47202 /* locality groups */
47203diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
47204index 526e553..3f2de85 100644
47205--- a/fs/ext4/mballoc.c
47206+++ b/fs/ext4/mballoc.c
47207@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
47208 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
47209
47210 if (EXT4_SB(sb)->s_mb_stats)
47211- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
47212+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
47213
47214 break;
47215 }
47216@@ -2044,7 +2044,7 @@ repeat:
47217 ac->ac_status = AC_STATUS_CONTINUE;
47218 ac->ac_flags |= EXT4_MB_HINT_FIRST;
47219 cr = 3;
47220- atomic_inc(&sbi->s_mb_lost_chunks);
47221+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
47222 goto repeat;
47223 }
47224 }
47225@@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
47226 if (sbi->s_mb_stats) {
47227 ext4_msg(sb, KERN_INFO,
47228 "mballoc: %u blocks %u reqs (%u success)",
47229- atomic_read(&sbi->s_bal_allocated),
47230- atomic_read(&sbi->s_bal_reqs),
47231- atomic_read(&sbi->s_bal_success));
47232+ atomic_read_unchecked(&sbi->s_bal_allocated),
47233+ atomic_read_unchecked(&sbi->s_bal_reqs),
47234+ atomic_read_unchecked(&sbi->s_bal_success));
47235 ext4_msg(sb, KERN_INFO,
47236 "mballoc: %u extents scanned, %u goal hits, "
47237 "%u 2^N hits, %u breaks, %u lost",
47238- atomic_read(&sbi->s_bal_ex_scanned),
47239- atomic_read(&sbi->s_bal_goals),
47240- atomic_read(&sbi->s_bal_2orders),
47241- atomic_read(&sbi->s_bal_breaks),
47242- atomic_read(&sbi->s_mb_lost_chunks));
47243+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
47244+ atomic_read_unchecked(&sbi->s_bal_goals),
47245+ atomic_read_unchecked(&sbi->s_bal_2orders),
47246+ atomic_read_unchecked(&sbi->s_bal_breaks),
47247+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
47248 ext4_msg(sb, KERN_INFO,
47249 "mballoc: %lu generated and it took %Lu",
47250 sbi->s_mb_buddies_generated,
47251 sbi->s_mb_generation_time);
47252 ext4_msg(sb, KERN_INFO,
47253 "mballoc: %u preallocated, %u discarded",
47254- atomic_read(&sbi->s_mb_preallocated),
47255- atomic_read(&sbi->s_mb_discarded));
47256+ atomic_read_unchecked(&sbi->s_mb_preallocated),
47257+ atomic_read_unchecked(&sbi->s_mb_discarded));
47258 }
47259
47260 free_percpu(sbi->s_locality_groups);
47261@@ -3052,16 +3052,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
47262 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
47263
47264 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
47265- atomic_inc(&sbi->s_bal_reqs);
47266- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
47267+ atomic_inc_unchecked(&sbi->s_bal_reqs);
47268+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
47269 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
47270- atomic_inc(&sbi->s_bal_success);
47271- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
47272+ atomic_inc_unchecked(&sbi->s_bal_success);
47273+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
47274 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
47275 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
47276- atomic_inc(&sbi->s_bal_goals);
47277+ atomic_inc_unchecked(&sbi->s_bal_goals);
47278 if (ac->ac_found > sbi->s_mb_max_to_scan)
47279- atomic_inc(&sbi->s_bal_breaks);
47280+ atomic_inc_unchecked(&sbi->s_bal_breaks);
47281 }
47282
47283 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
47284@@ -3461,7 +3461,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
47285 trace_ext4_mb_new_inode_pa(ac, pa);
47286
47287 ext4_mb_use_inode_pa(ac, pa);
47288- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
47289+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
47290
47291 ei = EXT4_I(ac->ac_inode);
47292 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
47293@@ -3521,7 +3521,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
47294 trace_ext4_mb_new_group_pa(ac, pa);
47295
47296 ext4_mb_use_group_pa(ac, pa);
47297- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
47298+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
47299
47300 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
47301 lg = ac->ac_lg;
47302@@ -3610,7 +3610,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
47303 * from the bitmap and continue.
47304 */
47305 }
47306- atomic_add(free, &sbi->s_mb_discarded);
47307+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
47308
47309 return err;
47310 }
47311@@ -3628,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
47312 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
47313 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
47314 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
47315- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
47316+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
47317 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
47318
47319 return 0;
47320diff --git a/fs/ext4/super.c b/fs/ext4/super.c
47321index d59b351..775f8c8 100644
47322--- a/fs/ext4/super.c
47323+++ b/fs/ext4/super.c
47324@@ -3212,7 +3212,6 @@ int ext4_calculate_overhead(struct super_block *sb)
47325 ext4_fsblk_t overhead = 0;
47326 char *buf = (char *) get_zeroed_page(GFP_KERNEL);
47327
47328- memset(buf, 0, PAGE_SIZE);
47329 if (!buf)
47330 return -ENOMEM;
47331
47332diff --git a/fs/fcntl.c b/fs/fcntl.c
47333index 71a600a..20d87b1 100644
47334--- a/fs/fcntl.c
47335+++ b/fs/fcntl.c
47336@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
47337 if (err)
47338 return err;
47339
47340+ if (gr_handle_chroot_fowner(pid, type))
47341+ return -ENOENT;
47342+ if (gr_check_protected_task_fowner(pid, type))
47343+ return -EACCES;
47344+
47345 f_modown(filp, pid, type, force);
47346 return 0;
47347 }
47348diff --git a/fs/fifo.c b/fs/fifo.c
47349index cf6f434..3d7942c 100644
47350--- a/fs/fifo.c
47351+++ b/fs/fifo.c
47352@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
47353 */
47354 filp->f_op = &read_pipefifo_fops;
47355 pipe->r_counter++;
47356- if (pipe->readers++ == 0)
47357+ if (atomic_inc_return(&pipe->readers) == 1)
47358 wake_up_partner(inode);
47359
47360- if (!pipe->writers) {
47361+ if (!atomic_read(&pipe->writers)) {
47362 if ((filp->f_flags & O_NONBLOCK)) {
47363 /* suppress POLLHUP until we have
47364 * seen a writer */
47365@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
47366 * errno=ENXIO when there is no process reading the FIFO.
47367 */
47368 ret = -ENXIO;
47369- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
47370+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
47371 goto err;
47372
47373 filp->f_op = &write_pipefifo_fops;
47374 pipe->w_counter++;
47375- if (!pipe->writers++)
47376+ if (atomic_inc_return(&pipe->writers) == 1)
47377 wake_up_partner(inode);
47378
47379- if (!pipe->readers) {
47380+ if (!atomic_read(&pipe->readers)) {
47381 if (wait_for_partner(inode, &pipe->r_counter))
47382 goto err_wr;
47383 }
47384@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
47385 */
47386 filp->f_op = &rdwr_pipefifo_fops;
47387
47388- pipe->readers++;
47389- pipe->writers++;
47390+ atomic_inc(&pipe->readers);
47391+ atomic_inc(&pipe->writers);
47392 pipe->r_counter++;
47393 pipe->w_counter++;
47394- if (pipe->readers == 1 || pipe->writers == 1)
47395+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
47396 wake_up_partner(inode);
47397 break;
47398
47399@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
47400 return 0;
47401
47402 err_rd:
47403- if (!--pipe->readers)
47404+ if (atomic_dec_and_test(&pipe->readers))
47405 wake_up_interruptible(&pipe->wait);
47406 ret = -ERESTARTSYS;
47407 goto err;
47408
47409 err_wr:
47410- if (!--pipe->writers)
47411+ if (atomic_dec_and_test(&pipe->writers))
47412 wake_up_interruptible(&pipe->wait);
47413 ret = -ERESTARTSYS;
47414 goto err;
47415
47416 err:
47417- if (!pipe->readers && !pipe->writers)
47418+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
47419 free_pipe_info(inode);
47420
47421 err_nocleanup:
47422diff --git a/fs/file.c b/fs/file.c
47423index eff2316..8c8930c 100644
47424--- a/fs/file.c
47425+++ b/fs/file.c
47426@@ -16,6 +16,7 @@
47427 #include <linux/slab.h>
47428 #include <linux/vmalloc.h>
47429 #include <linux/file.h>
47430+#include <linux/security.h>
47431 #include <linux/fdtable.h>
47432 #include <linux/bitops.h>
47433 #include <linux/interrupt.h>
47434@@ -898,6 +899,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
47435 if (!file)
47436 return __close_fd(files, fd);
47437
47438+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
47439 if (fd >= rlimit(RLIMIT_NOFILE))
47440 return -EBADF;
47441
47442@@ -924,6 +926,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
47443 if (unlikely(oldfd == newfd))
47444 return -EINVAL;
47445
47446+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
47447 if (newfd >= rlimit(RLIMIT_NOFILE))
47448 return -EBADF;
47449
47450@@ -979,6 +982,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
47451 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
47452 {
47453 int err;
47454+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
47455 if (from >= rlimit(RLIMIT_NOFILE))
47456 return -EINVAL;
47457 err = alloc_fd(from, flags);
47458diff --git a/fs/filesystems.c b/fs/filesystems.c
47459index da165f6..3671bdb 100644
47460--- a/fs/filesystems.c
47461+++ b/fs/filesystems.c
47462@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
47463 int len = dot ? dot - name : strlen(name);
47464
47465 fs = __get_fs_type(name, len);
47466+
47467+#ifdef CONFIG_GRKERNSEC_MODHARDEN
47468+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
47469+#else
47470 if (!fs && (request_module("%.*s", len, name) == 0))
47471+#endif
47472 fs = __get_fs_type(name, len);
47473
47474 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
47475diff --git a/fs/fs_struct.c b/fs/fs_struct.c
47476index 5df4775..9d9336f 100644
47477--- a/fs/fs_struct.c
47478+++ b/fs/fs_struct.c
47479@@ -4,6 +4,7 @@
47480 #include <linux/path.h>
47481 #include <linux/slab.h>
47482 #include <linux/fs_struct.h>
47483+#include <linux/grsecurity.h>
47484 #include "internal.h"
47485
47486 /*
47487@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
47488 write_seqcount_begin(&fs->seq);
47489 old_root = fs->root;
47490 fs->root = *path;
47491+ gr_set_chroot_entries(current, path);
47492 write_seqcount_end(&fs->seq);
47493 spin_unlock(&fs->lock);
47494 if (old_root.dentry)
47495@@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
47496 return 1;
47497 }
47498
47499+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
47500+{
47501+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
47502+ return 0;
47503+ *p = *new;
47504+
47505+ /* This function is only called from pivot_root(). Leave our
47506+ gr_chroot_dentry and is_chrooted flags as-is, so that a
47507+ pivoted root isn't treated as a chroot
47508+ */
47509+ //gr_set_chroot_entries(task, new);
47510+
47511+ return 1;
47512+}
47513+
47514 void chroot_fs_refs(struct path *old_root, struct path *new_root)
47515 {
47516 struct task_struct *g, *p;
47517@@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
47518 int hits = 0;
47519 spin_lock(&fs->lock);
47520 write_seqcount_begin(&fs->seq);
47521- hits += replace_path(&fs->root, old_root, new_root);
47522+ hits += replace_root_path(p, &fs->root, old_root, new_root);
47523 hits += replace_path(&fs->pwd, old_root, new_root);
47524 write_seqcount_end(&fs->seq);
47525 while (hits--) {
47526@@ -99,7 +116,8 @@ void exit_fs(struct task_struct *tsk)
47527 task_lock(tsk);
47528 spin_lock(&fs->lock);
47529 tsk->fs = NULL;
47530- kill = !--fs->users;
47531+ gr_clear_chroot_entries(tsk);
47532+ kill = !atomic_dec_return(&fs->users);
47533 spin_unlock(&fs->lock);
47534 task_unlock(tsk);
47535 if (kill)
47536@@ -112,7 +130,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
47537 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
47538 /* We don't need to lock fs - think why ;-) */
47539 if (fs) {
47540- fs->users = 1;
47541+ atomic_set(&fs->users, 1);
47542 fs->in_exec = 0;
47543 spin_lock_init(&fs->lock);
47544 seqcount_init(&fs->seq);
47545@@ -121,6 +139,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
47546 spin_lock(&old->lock);
47547 fs->root = old->root;
47548 path_get(&fs->root);
47549+ /* instead of calling gr_set_chroot_entries here,
47550+ we call it from every caller of this function
47551+ */
47552 fs->pwd = old->pwd;
47553 path_get(&fs->pwd);
47554 spin_unlock(&old->lock);
47555@@ -139,8 +160,9 @@ int unshare_fs_struct(void)
47556
47557 task_lock(current);
47558 spin_lock(&fs->lock);
47559- kill = !--fs->users;
47560+ kill = !atomic_dec_return(&fs->users);
47561 current->fs = new_fs;
47562+ gr_set_chroot_entries(current, &new_fs->root);
47563 spin_unlock(&fs->lock);
47564 task_unlock(current);
47565
47566@@ -153,13 +175,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
47567
47568 int current_umask(void)
47569 {
47570- return current->fs->umask;
47571+ return current->fs->umask | gr_acl_umask();
47572 }
47573 EXPORT_SYMBOL(current_umask);
47574
47575 /* to be mentioned only in INIT_TASK */
47576 struct fs_struct init_fs = {
47577- .users = 1,
47578+ .users = ATOMIC_INIT(1),
47579 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
47580 .seq = SEQCNT_ZERO,
47581 .umask = 0022,
47582@@ -175,12 +197,13 @@ void daemonize_fs_struct(void)
47583 task_lock(current);
47584
47585 spin_lock(&init_fs.lock);
47586- init_fs.users++;
47587+ atomic_inc(&init_fs.users);
47588 spin_unlock(&init_fs.lock);
47589
47590 spin_lock(&fs->lock);
47591 current->fs = &init_fs;
47592- kill = !--fs->users;
47593+ gr_set_chroot_entries(current, &current->fs->root);
47594+ kill = !atomic_dec_return(&fs->users);
47595 spin_unlock(&fs->lock);
47596
47597 task_unlock(current);
47598diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
47599index 9905350..02eaec4 100644
47600--- a/fs/fscache/cookie.c
47601+++ b/fs/fscache/cookie.c
47602@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
47603 parent ? (char *) parent->def->name : "<no-parent>",
47604 def->name, netfs_data);
47605
47606- fscache_stat(&fscache_n_acquires);
47607+ fscache_stat_unchecked(&fscache_n_acquires);
47608
47609 /* if there's no parent cookie, then we don't create one here either */
47610 if (!parent) {
47611- fscache_stat(&fscache_n_acquires_null);
47612+ fscache_stat_unchecked(&fscache_n_acquires_null);
47613 _leave(" [no parent]");
47614 return NULL;
47615 }
47616@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
47617 /* allocate and initialise a cookie */
47618 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
47619 if (!cookie) {
47620- fscache_stat(&fscache_n_acquires_oom);
47621+ fscache_stat_unchecked(&fscache_n_acquires_oom);
47622 _leave(" [ENOMEM]");
47623 return NULL;
47624 }
47625@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
47626
47627 switch (cookie->def->type) {
47628 case FSCACHE_COOKIE_TYPE_INDEX:
47629- fscache_stat(&fscache_n_cookie_index);
47630+ fscache_stat_unchecked(&fscache_n_cookie_index);
47631 break;
47632 case FSCACHE_COOKIE_TYPE_DATAFILE:
47633- fscache_stat(&fscache_n_cookie_data);
47634+ fscache_stat_unchecked(&fscache_n_cookie_data);
47635 break;
47636 default:
47637- fscache_stat(&fscache_n_cookie_special);
47638+ fscache_stat_unchecked(&fscache_n_cookie_special);
47639 break;
47640 }
47641
47642@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
47643 if (fscache_acquire_non_index_cookie(cookie) < 0) {
47644 atomic_dec(&parent->n_children);
47645 __fscache_cookie_put(cookie);
47646- fscache_stat(&fscache_n_acquires_nobufs);
47647+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
47648 _leave(" = NULL");
47649 return NULL;
47650 }
47651 }
47652
47653- fscache_stat(&fscache_n_acquires_ok);
47654+ fscache_stat_unchecked(&fscache_n_acquires_ok);
47655 _leave(" = %p", cookie);
47656 return cookie;
47657 }
47658@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
47659 cache = fscache_select_cache_for_object(cookie->parent);
47660 if (!cache) {
47661 up_read(&fscache_addremove_sem);
47662- fscache_stat(&fscache_n_acquires_no_cache);
47663+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
47664 _leave(" = -ENOMEDIUM [no cache]");
47665 return -ENOMEDIUM;
47666 }
47667@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
47668 object = cache->ops->alloc_object(cache, cookie);
47669 fscache_stat_d(&fscache_n_cop_alloc_object);
47670 if (IS_ERR(object)) {
47671- fscache_stat(&fscache_n_object_no_alloc);
47672+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
47673 ret = PTR_ERR(object);
47674 goto error;
47675 }
47676
47677- fscache_stat(&fscache_n_object_alloc);
47678+ fscache_stat_unchecked(&fscache_n_object_alloc);
47679
47680 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
47681
47682@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
47683 struct fscache_object *object;
47684 struct hlist_node *_p;
47685
47686- fscache_stat(&fscache_n_updates);
47687+ fscache_stat_unchecked(&fscache_n_updates);
47688
47689 if (!cookie) {
47690- fscache_stat(&fscache_n_updates_null);
47691+ fscache_stat_unchecked(&fscache_n_updates_null);
47692 _leave(" [no cookie]");
47693 return;
47694 }
47695@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
47696 struct fscache_object *object;
47697 unsigned long event;
47698
47699- fscache_stat(&fscache_n_relinquishes);
47700+ fscache_stat_unchecked(&fscache_n_relinquishes);
47701 if (retire)
47702- fscache_stat(&fscache_n_relinquishes_retire);
47703+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
47704
47705 if (!cookie) {
47706- fscache_stat(&fscache_n_relinquishes_null);
47707+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
47708 _leave(" [no cookie]");
47709 return;
47710 }
47711@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
47712
47713 /* wait for the cookie to finish being instantiated (or to fail) */
47714 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
47715- fscache_stat(&fscache_n_relinquishes_waitcrt);
47716+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
47717 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
47718 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
47719 }
47720diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
47721index f6aad48..88dcf26 100644
47722--- a/fs/fscache/internal.h
47723+++ b/fs/fscache/internal.h
47724@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
47725 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
47726 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
47727
47728-extern atomic_t fscache_n_op_pend;
47729-extern atomic_t fscache_n_op_run;
47730-extern atomic_t fscache_n_op_enqueue;
47731-extern atomic_t fscache_n_op_deferred_release;
47732-extern atomic_t fscache_n_op_release;
47733-extern atomic_t fscache_n_op_gc;
47734-extern atomic_t fscache_n_op_cancelled;
47735-extern atomic_t fscache_n_op_rejected;
47736+extern atomic_unchecked_t fscache_n_op_pend;
47737+extern atomic_unchecked_t fscache_n_op_run;
47738+extern atomic_unchecked_t fscache_n_op_enqueue;
47739+extern atomic_unchecked_t fscache_n_op_deferred_release;
47740+extern atomic_unchecked_t fscache_n_op_release;
47741+extern atomic_unchecked_t fscache_n_op_gc;
47742+extern atomic_unchecked_t fscache_n_op_cancelled;
47743+extern atomic_unchecked_t fscache_n_op_rejected;
47744
47745-extern atomic_t fscache_n_attr_changed;
47746-extern atomic_t fscache_n_attr_changed_ok;
47747-extern atomic_t fscache_n_attr_changed_nobufs;
47748-extern atomic_t fscache_n_attr_changed_nomem;
47749-extern atomic_t fscache_n_attr_changed_calls;
47750+extern atomic_unchecked_t fscache_n_attr_changed;
47751+extern atomic_unchecked_t fscache_n_attr_changed_ok;
47752+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
47753+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
47754+extern atomic_unchecked_t fscache_n_attr_changed_calls;
47755
47756-extern atomic_t fscache_n_allocs;
47757-extern atomic_t fscache_n_allocs_ok;
47758-extern atomic_t fscache_n_allocs_wait;
47759-extern atomic_t fscache_n_allocs_nobufs;
47760-extern atomic_t fscache_n_allocs_intr;
47761-extern atomic_t fscache_n_allocs_object_dead;
47762-extern atomic_t fscache_n_alloc_ops;
47763-extern atomic_t fscache_n_alloc_op_waits;
47764+extern atomic_unchecked_t fscache_n_allocs;
47765+extern atomic_unchecked_t fscache_n_allocs_ok;
47766+extern atomic_unchecked_t fscache_n_allocs_wait;
47767+extern atomic_unchecked_t fscache_n_allocs_nobufs;
47768+extern atomic_unchecked_t fscache_n_allocs_intr;
47769+extern atomic_unchecked_t fscache_n_allocs_object_dead;
47770+extern atomic_unchecked_t fscache_n_alloc_ops;
47771+extern atomic_unchecked_t fscache_n_alloc_op_waits;
47772
47773-extern atomic_t fscache_n_retrievals;
47774-extern atomic_t fscache_n_retrievals_ok;
47775-extern atomic_t fscache_n_retrievals_wait;
47776-extern atomic_t fscache_n_retrievals_nodata;
47777-extern atomic_t fscache_n_retrievals_nobufs;
47778-extern atomic_t fscache_n_retrievals_intr;
47779-extern atomic_t fscache_n_retrievals_nomem;
47780-extern atomic_t fscache_n_retrievals_object_dead;
47781-extern atomic_t fscache_n_retrieval_ops;
47782-extern atomic_t fscache_n_retrieval_op_waits;
47783+extern atomic_unchecked_t fscache_n_retrievals;
47784+extern atomic_unchecked_t fscache_n_retrievals_ok;
47785+extern atomic_unchecked_t fscache_n_retrievals_wait;
47786+extern atomic_unchecked_t fscache_n_retrievals_nodata;
47787+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
47788+extern atomic_unchecked_t fscache_n_retrievals_intr;
47789+extern atomic_unchecked_t fscache_n_retrievals_nomem;
47790+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
47791+extern atomic_unchecked_t fscache_n_retrieval_ops;
47792+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
47793
47794-extern atomic_t fscache_n_stores;
47795-extern atomic_t fscache_n_stores_ok;
47796-extern atomic_t fscache_n_stores_again;
47797-extern atomic_t fscache_n_stores_nobufs;
47798-extern atomic_t fscache_n_stores_oom;
47799-extern atomic_t fscache_n_store_ops;
47800-extern atomic_t fscache_n_store_calls;
47801-extern atomic_t fscache_n_store_pages;
47802-extern atomic_t fscache_n_store_radix_deletes;
47803-extern atomic_t fscache_n_store_pages_over_limit;
47804+extern atomic_unchecked_t fscache_n_stores;
47805+extern atomic_unchecked_t fscache_n_stores_ok;
47806+extern atomic_unchecked_t fscache_n_stores_again;
47807+extern atomic_unchecked_t fscache_n_stores_nobufs;
47808+extern atomic_unchecked_t fscache_n_stores_oom;
47809+extern atomic_unchecked_t fscache_n_store_ops;
47810+extern atomic_unchecked_t fscache_n_store_calls;
47811+extern atomic_unchecked_t fscache_n_store_pages;
47812+extern atomic_unchecked_t fscache_n_store_radix_deletes;
47813+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
47814
47815-extern atomic_t fscache_n_store_vmscan_not_storing;
47816-extern atomic_t fscache_n_store_vmscan_gone;
47817-extern atomic_t fscache_n_store_vmscan_busy;
47818-extern atomic_t fscache_n_store_vmscan_cancelled;
47819+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
47820+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
47821+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
47822+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
47823
47824-extern atomic_t fscache_n_marks;
47825-extern atomic_t fscache_n_uncaches;
47826+extern atomic_unchecked_t fscache_n_marks;
47827+extern atomic_unchecked_t fscache_n_uncaches;
47828
47829-extern atomic_t fscache_n_acquires;
47830-extern atomic_t fscache_n_acquires_null;
47831-extern atomic_t fscache_n_acquires_no_cache;
47832-extern atomic_t fscache_n_acquires_ok;
47833-extern atomic_t fscache_n_acquires_nobufs;
47834-extern atomic_t fscache_n_acquires_oom;
47835+extern atomic_unchecked_t fscache_n_acquires;
47836+extern atomic_unchecked_t fscache_n_acquires_null;
47837+extern atomic_unchecked_t fscache_n_acquires_no_cache;
47838+extern atomic_unchecked_t fscache_n_acquires_ok;
47839+extern atomic_unchecked_t fscache_n_acquires_nobufs;
47840+extern atomic_unchecked_t fscache_n_acquires_oom;
47841
47842-extern atomic_t fscache_n_updates;
47843-extern atomic_t fscache_n_updates_null;
47844-extern atomic_t fscache_n_updates_run;
47845+extern atomic_unchecked_t fscache_n_updates;
47846+extern atomic_unchecked_t fscache_n_updates_null;
47847+extern atomic_unchecked_t fscache_n_updates_run;
47848
47849-extern atomic_t fscache_n_relinquishes;
47850-extern atomic_t fscache_n_relinquishes_null;
47851-extern atomic_t fscache_n_relinquishes_waitcrt;
47852-extern atomic_t fscache_n_relinquishes_retire;
47853+extern atomic_unchecked_t fscache_n_relinquishes;
47854+extern atomic_unchecked_t fscache_n_relinquishes_null;
47855+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
47856+extern atomic_unchecked_t fscache_n_relinquishes_retire;
47857
47858-extern atomic_t fscache_n_cookie_index;
47859-extern atomic_t fscache_n_cookie_data;
47860-extern atomic_t fscache_n_cookie_special;
47861+extern atomic_unchecked_t fscache_n_cookie_index;
47862+extern atomic_unchecked_t fscache_n_cookie_data;
47863+extern atomic_unchecked_t fscache_n_cookie_special;
47864
47865-extern atomic_t fscache_n_object_alloc;
47866-extern atomic_t fscache_n_object_no_alloc;
47867-extern atomic_t fscache_n_object_lookups;
47868-extern atomic_t fscache_n_object_lookups_negative;
47869-extern atomic_t fscache_n_object_lookups_positive;
47870-extern atomic_t fscache_n_object_lookups_timed_out;
47871-extern atomic_t fscache_n_object_created;
47872-extern atomic_t fscache_n_object_avail;
47873-extern atomic_t fscache_n_object_dead;
47874+extern atomic_unchecked_t fscache_n_object_alloc;
47875+extern atomic_unchecked_t fscache_n_object_no_alloc;
47876+extern atomic_unchecked_t fscache_n_object_lookups;
47877+extern atomic_unchecked_t fscache_n_object_lookups_negative;
47878+extern atomic_unchecked_t fscache_n_object_lookups_positive;
47879+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
47880+extern atomic_unchecked_t fscache_n_object_created;
47881+extern atomic_unchecked_t fscache_n_object_avail;
47882+extern atomic_unchecked_t fscache_n_object_dead;
47883
47884-extern atomic_t fscache_n_checkaux_none;
47885-extern atomic_t fscache_n_checkaux_okay;
47886-extern atomic_t fscache_n_checkaux_update;
47887-extern atomic_t fscache_n_checkaux_obsolete;
47888+extern atomic_unchecked_t fscache_n_checkaux_none;
47889+extern atomic_unchecked_t fscache_n_checkaux_okay;
47890+extern atomic_unchecked_t fscache_n_checkaux_update;
47891+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
47892
47893 extern atomic_t fscache_n_cop_alloc_object;
47894 extern atomic_t fscache_n_cop_lookup_object;
47895@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
47896 atomic_inc(stat);
47897 }
47898
47899+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
47900+{
47901+ atomic_inc_unchecked(stat);
47902+}
47903+
47904 static inline void fscache_stat_d(atomic_t *stat)
47905 {
47906 atomic_dec(stat);
47907@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
47908
47909 #define __fscache_stat(stat) (NULL)
47910 #define fscache_stat(stat) do {} while (0)
47911+#define fscache_stat_unchecked(stat) do {} while (0)
47912 #define fscache_stat_d(stat) do {} while (0)
47913 #endif
47914
47915diff --git a/fs/fscache/object.c b/fs/fscache/object.c
47916index b6b897c..0ffff9c 100644
47917--- a/fs/fscache/object.c
47918+++ b/fs/fscache/object.c
47919@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
47920 /* update the object metadata on disk */
47921 case FSCACHE_OBJECT_UPDATING:
47922 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
47923- fscache_stat(&fscache_n_updates_run);
47924+ fscache_stat_unchecked(&fscache_n_updates_run);
47925 fscache_stat(&fscache_n_cop_update_object);
47926 object->cache->ops->update_object(object);
47927 fscache_stat_d(&fscache_n_cop_update_object);
47928@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
47929 spin_lock(&object->lock);
47930 object->state = FSCACHE_OBJECT_DEAD;
47931 spin_unlock(&object->lock);
47932- fscache_stat(&fscache_n_object_dead);
47933+ fscache_stat_unchecked(&fscache_n_object_dead);
47934 goto terminal_transit;
47935
47936 /* handle the parent cache of this object being withdrawn from
47937@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
47938 spin_lock(&object->lock);
47939 object->state = FSCACHE_OBJECT_DEAD;
47940 spin_unlock(&object->lock);
47941- fscache_stat(&fscache_n_object_dead);
47942+ fscache_stat_unchecked(&fscache_n_object_dead);
47943 goto terminal_transit;
47944
47945 /* complain about the object being woken up once it is
47946@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
47947 parent->cookie->def->name, cookie->def->name,
47948 object->cache->tag->name);
47949
47950- fscache_stat(&fscache_n_object_lookups);
47951+ fscache_stat_unchecked(&fscache_n_object_lookups);
47952 fscache_stat(&fscache_n_cop_lookup_object);
47953 ret = object->cache->ops->lookup_object(object);
47954 fscache_stat_d(&fscache_n_cop_lookup_object);
47955@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
47956 if (ret == -ETIMEDOUT) {
47957 /* probably stuck behind another object, so move this one to
47958 * the back of the queue */
47959- fscache_stat(&fscache_n_object_lookups_timed_out);
47960+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
47961 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
47962 }
47963
47964@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
47965
47966 spin_lock(&object->lock);
47967 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
47968- fscache_stat(&fscache_n_object_lookups_negative);
47969+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
47970
47971 /* transit here to allow write requests to begin stacking up
47972 * and read requests to begin returning ENODATA */
47973@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
47974 * result, in which case there may be data available */
47975 spin_lock(&object->lock);
47976 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
47977- fscache_stat(&fscache_n_object_lookups_positive);
47978+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
47979
47980 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
47981
47982@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
47983 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
47984 } else {
47985 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
47986- fscache_stat(&fscache_n_object_created);
47987+ fscache_stat_unchecked(&fscache_n_object_created);
47988
47989 object->state = FSCACHE_OBJECT_AVAILABLE;
47990 spin_unlock(&object->lock);
47991@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
47992 fscache_enqueue_dependents(object);
47993
47994 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
47995- fscache_stat(&fscache_n_object_avail);
47996+ fscache_stat_unchecked(&fscache_n_object_avail);
47997
47998 _leave("");
47999 }
48000@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
48001 enum fscache_checkaux result;
48002
48003 if (!object->cookie->def->check_aux) {
48004- fscache_stat(&fscache_n_checkaux_none);
48005+ fscache_stat_unchecked(&fscache_n_checkaux_none);
48006 return FSCACHE_CHECKAUX_OKAY;
48007 }
48008
48009@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
48010 switch (result) {
48011 /* entry okay as is */
48012 case FSCACHE_CHECKAUX_OKAY:
48013- fscache_stat(&fscache_n_checkaux_okay);
48014+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
48015 break;
48016
48017 /* entry requires update */
48018 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
48019- fscache_stat(&fscache_n_checkaux_update);
48020+ fscache_stat_unchecked(&fscache_n_checkaux_update);
48021 break;
48022
48023 /* entry requires deletion */
48024 case FSCACHE_CHECKAUX_OBSOLETE:
48025- fscache_stat(&fscache_n_checkaux_obsolete);
48026+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
48027 break;
48028
48029 default:
48030diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
48031index 30afdfa..2256596 100644
48032--- a/fs/fscache/operation.c
48033+++ b/fs/fscache/operation.c
48034@@ -17,7 +17,7 @@
48035 #include <linux/slab.h>
48036 #include "internal.h"
48037
48038-atomic_t fscache_op_debug_id;
48039+atomic_unchecked_t fscache_op_debug_id;
48040 EXPORT_SYMBOL(fscache_op_debug_id);
48041
48042 /**
48043@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
48044 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
48045 ASSERTCMP(atomic_read(&op->usage), >, 0);
48046
48047- fscache_stat(&fscache_n_op_enqueue);
48048+ fscache_stat_unchecked(&fscache_n_op_enqueue);
48049 switch (op->flags & FSCACHE_OP_TYPE) {
48050 case FSCACHE_OP_ASYNC:
48051 _debug("queue async");
48052@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
48053 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
48054 if (op->processor)
48055 fscache_enqueue_operation(op);
48056- fscache_stat(&fscache_n_op_run);
48057+ fscache_stat_unchecked(&fscache_n_op_run);
48058 }
48059
48060 /*
48061@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
48062 if (object->n_ops > 1) {
48063 atomic_inc(&op->usage);
48064 list_add_tail(&op->pend_link, &object->pending_ops);
48065- fscache_stat(&fscache_n_op_pend);
48066+ fscache_stat_unchecked(&fscache_n_op_pend);
48067 } else if (!list_empty(&object->pending_ops)) {
48068 atomic_inc(&op->usage);
48069 list_add_tail(&op->pend_link, &object->pending_ops);
48070- fscache_stat(&fscache_n_op_pend);
48071+ fscache_stat_unchecked(&fscache_n_op_pend);
48072 fscache_start_operations(object);
48073 } else {
48074 ASSERTCMP(object->n_in_progress, ==, 0);
48075@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
48076 object->n_exclusive++; /* reads and writes must wait */
48077 atomic_inc(&op->usage);
48078 list_add_tail(&op->pend_link, &object->pending_ops);
48079- fscache_stat(&fscache_n_op_pend);
48080+ fscache_stat_unchecked(&fscache_n_op_pend);
48081 ret = 0;
48082 } else {
48083 /* not allowed to submit ops in any other state */
48084@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
48085 if (object->n_exclusive > 0) {
48086 atomic_inc(&op->usage);
48087 list_add_tail(&op->pend_link, &object->pending_ops);
48088- fscache_stat(&fscache_n_op_pend);
48089+ fscache_stat_unchecked(&fscache_n_op_pend);
48090 } else if (!list_empty(&object->pending_ops)) {
48091 atomic_inc(&op->usage);
48092 list_add_tail(&op->pend_link, &object->pending_ops);
48093- fscache_stat(&fscache_n_op_pend);
48094+ fscache_stat_unchecked(&fscache_n_op_pend);
48095 fscache_start_operations(object);
48096 } else {
48097 ASSERTCMP(object->n_exclusive, ==, 0);
48098@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
48099 object->n_ops++;
48100 atomic_inc(&op->usage);
48101 list_add_tail(&op->pend_link, &object->pending_ops);
48102- fscache_stat(&fscache_n_op_pend);
48103+ fscache_stat_unchecked(&fscache_n_op_pend);
48104 ret = 0;
48105 } else if (object->state == FSCACHE_OBJECT_DYING ||
48106 object->state == FSCACHE_OBJECT_LC_DYING ||
48107 object->state == FSCACHE_OBJECT_WITHDRAWING) {
48108- fscache_stat(&fscache_n_op_rejected);
48109+ fscache_stat_unchecked(&fscache_n_op_rejected);
48110 ret = -ENOBUFS;
48111 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
48112 fscache_report_unexpected_submission(object, op, ostate);
48113@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
48114
48115 ret = -EBUSY;
48116 if (!list_empty(&op->pend_link)) {
48117- fscache_stat(&fscache_n_op_cancelled);
48118+ fscache_stat_unchecked(&fscache_n_op_cancelled);
48119 list_del_init(&op->pend_link);
48120 object->n_ops--;
48121 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
48122@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
48123 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
48124 BUG();
48125
48126- fscache_stat(&fscache_n_op_release);
48127+ fscache_stat_unchecked(&fscache_n_op_release);
48128
48129 if (op->release) {
48130 op->release(op);
48131@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
48132 * lock, and defer it otherwise */
48133 if (!spin_trylock(&object->lock)) {
48134 _debug("defer put");
48135- fscache_stat(&fscache_n_op_deferred_release);
48136+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
48137
48138 cache = object->cache;
48139 spin_lock(&cache->op_gc_list_lock);
48140@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
48141
48142 _debug("GC DEFERRED REL OBJ%x OP%x",
48143 object->debug_id, op->debug_id);
48144- fscache_stat(&fscache_n_op_gc);
48145+ fscache_stat_unchecked(&fscache_n_op_gc);
48146
48147 ASSERTCMP(atomic_read(&op->usage), ==, 0);
48148
48149diff --git a/fs/fscache/page.c b/fs/fscache/page.c
48150index 3f7a59b..cf196cc 100644
48151--- a/fs/fscache/page.c
48152+++ b/fs/fscache/page.c
48153@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
48154 val = radix_tree_lookup(&cookie->stores, page->index);
48155 if (!val) {
48156 rcu_read_unlock();
48157- fscache_stat(&fscache_n_store_vmscan_not_storing);
48158+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
48159 __fscache_uncache_page(cookie, page);
48160 return true;
48161 }
48162@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
48163 spin_unlock(&cookie->stores_lock);
48164
48165 if (xpage) {
48166- fscache_stat(&fscache_n_store_vmscan_cancelled);
48167- fscache_stat(&fscache_n_store_radix_deletes);
48168+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
48169+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
48170 ASSERTCMP(xpage, ==, page);
48171 } else {
48172- fscache_stat(&fscache_n_store_vmscan_gone);
48173+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
48174 }
48175
48176 wake_up_bit(&cookie->flags, 0);
48177@@ -107,7 +107,7 @@ page_busy:
48178 /* we might want to wait here, but that could deadlock the allocator as
48179 * the work threads writing to the cache may all end up sleeping
48180 * on memory allocation */
48181- fscache_stat(&fscache_n_store_vmscan_busy);
48182+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
48183 return false;
48184 }
48185 EXPORT_SYMBOL(__fscache_maybe_release_page);
48186@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
48187 FSCACHE_COOKIE_STORING_TAG);
48188 if (!radix_tree_tag_get(&cookie->stores, page->index,
48189 FSCACHE_COOKIE_PENDING_TAG)) {
48190- fscache_stat(&fscache_n_store_radix_deletes);
48191+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
48192 xpage = radix_tree_delete(&cookie->stores, page->index);
48193 }
48194 spin_unlock(&cookie->stores_lock);
48195@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
48196
48197 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
48198
48199- fscache_stat(&fscache_n_attr_changed_calls);
48200+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
48201
48202 if (fscache_object_is_active(object)) {
48203 fscache_stat(&fscache_n_cop_attr_changed);
48204@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48205
48206 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48207
48208- fscache_stat(&fscache_n_attr_changed);
48209+ fscache_stat_unchecked(&fscache_n_attr_changed);
48210
48211 op = kzalloc(sizeof(*op), GFP_KERNEL);
48212 if (!op) {
48213- fscache_stat(&fscache_n_attr_changed_nomem);
48214+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
48215 _leave(" = -ENOMEM");
48216 return -ENOMEM;
48217 }
48218@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48219 if (fscache_submit_exclusive_op(object, op) < 0)
48220 goto nobufs;
48221 spin_unlock(&cookie->lock);
48222- fscache_stat(&fscache_n_attr_changed_ok);
48223+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
48224 fscache_put_operation(op);
48225 _leave(" = 0");
48226 return 0;
48227@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48228 nobufs:
48229 spin_unlock(&cookie->lock);
48230 kfree(op);
48231- fscache_stat(&fscache_n_attr_changed_nobufs);
48232+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
48233 _leave(" = %d", -ENOBUFS);
48234 return -ENOBUFS;
48235 }
48236@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
48237 /* allocate a retrieval operation and attempt to submit it */
48238 op = kzalloc(sizeof(*op), GFP_NOIO);
48239 if (!op) {
48240- fscache_stat(&fscache_n_retrievals_nomem);
48241+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48242 return NULL;
48243 }
48244
48245@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
48246 return 0;
48247 }
48248
48249- fscache_stat(&fscache_n_retrievals_wait);
48250+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
48251
48252 jif = jiffies;
48253 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
48254 fscache_wait_bit_interruptible,
48255 TASK_INTERRUPTIBLE) != 0) {
48256- fscache_stat(&fscache_n_retrievals_intr);
48257+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
48258 _leave(" = -ERESTARTSYS");
48259 return -ERESTARTSYS;
48260 }
48261@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
48262 */
48263 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48264 struct fscache_retrieval *op,
48265- atomic_t *stat_op_waits,
48266- atomic_t *stat_object_dead)
48267+ atomic_unchecked_t *stat_op_waits,
48268+ atomic_unchecked_t *stat_object_dead)
48269 {
48270 int ret;
48271
48272@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48273 goto check_if_dead;
48274
48275 _debug(">>> WT");
48276- fscache_stat(stat_op_waits);
48277+ fscache_stat_unchecked(stat_op_waits);
48278 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
48279 fscache_wait_bit_interruptible,
48280 TASK_INTERRUPTIBLE) < 0) {
48281@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48282
48283 check_if_dead:
48284 if (unlikely(fscache_object_is_dead(object))) {
48285- fscache_stat(stat_object_dead);
48286+ fscache_stat_unchecked(stat_object_dead);
48287 return -ENOBUFS;
48288 }
48289 return 0;
48290@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48291
48292 _enter("%p,%p,,,", cookie, page);
48293
48294- fscache_stat(&fscache_n_retrievals);
48295+ fscache_stat_unchecked(&fscache_n_retrievals);
48296
48297 if (hlist_empty(&cookie->backing_objects))
48298 goto nobufs;
48299@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48300 goto nobufs_unlock;
48301 spin_unlock(&cookie->lock);
48302
48303- fscache_stat(&fscache_n_retrieval_ops);
48304+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
48305
48306 /* pin the netfs read context in case we need to do the actual netfs
48307 * read because we've encountered a cache read failure */
48308@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48309
48310 error:
48311 if (ret == -ENOMEM)
48312- fscache_stat(&fscache_n_retrievals_nomem);
48313+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48314 else if (ret == -ERESTARTSYS)
48315- fscache_stat(&fscache_n_retrievals_intr);
48316+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
48317 else if (ret == -ENODATA)
48318- fscache_stat(&fscache_n_retrievals_nodata);
48319+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
48320 else if (ret < 0)
48321- fscache_stat(&fscache_n_retrievals_nobufs);
48322+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48323 else
48324- fscache_stat(&fscache_n_retrievals_ok);
48325+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
48326
48327 fscache_put_retrieval(op);
48328 _leave(" = %d", ret);
48329@@ -429,7 +429,7 @@ nobufs_unlock:
48330 spin_unlock(&cookie->lock);
48331 kfree(op);
48332 nobufs:
48333- fscache_stat(&fscache_n_retrievals_nobufs);
48334+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48335 _leave(" = -ENOBUFS");
48336 return -ENOBUFS;
48337 }
48338@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48339
48340 _enter("%p,,%d,,,", cookie, *nr_pages);
48341
48342- fscache_stat(&fscache_n_retrievals);
48343+ fscache_stat_unchecked(&fscache_n_retrievals);
48344
48345 if (hlist_empty(&cookie->backing_objects))
48346 goto nobufs;
48347@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48348 goto nobufs_unlock;
48349 spin_unlock(&cookie->lock);
48350
48351- fscache_stat(&fscache_n_retrieval_ops);
48352+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
48353
48354 /* pin the netfs read context in case we need to do the actual netfs
48355 * read because we've encountered a cache read failure */
48356@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48357
48358 error:
48359 if (ret == -ENOMEM)
48360- fscache_stat(&fscache_n_retrievals_nomem);
48361+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48362 else if (ret == -ERESTARTSYS)
48363- fscache_stat(&fscache_n_retrievals_intr);
48364+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
48365 else if (ret == -ENODATA)
48366- fscache_stat(&fscache_n_retrievals_nodata);
48367+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
48368 else if (ret < 0)
48369- fscache_stat(&fscache_n_retrievals_nobufs);
48370+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48371 else
48372- fscache_stat(&fscache_n_retrievals_ok);
48373+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
48374
48375 fscache_put_retrieval(op);
48376 _leave(" = %d", ret);
48377@@ -545,7 +545,7 @@ nobufs_unlock:
48378 spin_unlock(&cookie->lock);
48379 kfree(op);
48380 nobufs:
48381- fscache_stat(&fscache_n_retrievals_nobufs);
48382+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48383 _leave(" = -ENOBUFS");
48384 return -ENOBUFS;
48385 }
48386@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48387
48388 _enter("%p,%p,,,", cookie, page);
48389
48390- fscache_stat(&fscache_n_allocs);
48391+ fscache_stat_unchecked(&fscache_n_allocs);
48392
48393 if (hlist_empty(&cookie->backing_objects))
48394 goto nobufs;
48395@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48396 goto nobufs_unlock;
48397 spin_unlock(&cookie->lock);
48398
48399- fscache_stat(&fscache_n_alloc_ops);
48400+ fscache_stat_unchecked(&fscache_n_alloc_ops);
48401
48402 ret = fscache_wait_for_retrieval_activation(
48403 object, op,
48404@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48405
48406 error:
48407 if (ret == -ERESTARTSYS)
48408- fscache_stat(&fscache_n_allocs_intr);
48409+ fscache_stat_unchecked(&fscache_n_allocs_intr);
48410 else if (ret < 0)
48411- fscache_stat(&fscache_n_allocs_nobufs);
48412+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
48413 else
48414- fscache_stat(&fscache_n_allocs_ok);
48415+ fscache_stat_unchecked(&fscache_n_allocs_ok);
48416
48417 fscache_put_retrieval(op);
48418 _leave(" = %d", ret);
48419@@ -625,7 +625,7 @@ nobufs_unlock:
48420 spin_unlock(&cookie->lock);
48421 kfree(op);
48422 nobufs:
48423- fscache_stat(&fscache_n_allocs_nobufs);
48424+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
48425 _leave(" = -ENOBUFS");
48426 return -ENOBUFS;
48427 }
48428@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48429
48430 spin_lock(&cookie->stores_lock);
48431
48432- fscache_stat(&fscache_n_store_calls);
48433+ fscache_stat_unchecked(&fscache_n_store_calls);
48434
48435 /* find a page to store */
48436 page = NULL;
48437@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48438 page = results[0];
48439 _debug("gang %d [%lx]", n, page->index);
48440 if (page->index > op->store_limit) {
48441- fscache_stat(&fscache_n_store_pages_over_limit);
48442+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
48443 goto superseded;
48444 }
48445
48446@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48447 spin_unlock(&cookie->stores_lock);
48448 spin_unlock(&object->lock);
48449
48450- fscache_stat(&fscache_n_store_pages);
48451+ fscache_stat_unchecked(&fscache_n_store_pages);
48452 fscache_stat(&fscache_n_cop_write_page);
48453 ret = object->cache->ops->write_page(op, page);
48454 fscache_stat_d(&fscache_n_cop_write_page);
48455@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48456 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48457 ASSERT(PageFsCache(page));
48458
48459- fscache_stat(&fscache_n_stores);
48460+ fscache_stat_unchecked(&fscache_n_stores);
48461
48462 op = kzalloc(sizeof(*op), GFP_NOIO);
48463 if (!op)
48464@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48465 spin_unlock(&cookie->stores_lock);
48466 spin_unlock(&object->lock);
48467
48468- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
48469+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
48470 op->store_limit = object->store_limit;
48471
48472 if (fscache_submit_op(object, &op->op) < 0)
48473@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48474
48475 spin_unlock(&cookie->lock);
48476 radix_tree_preload_end();
48477- fscache_stat(&fscache_n_store_ops);
48478- fscache_stat(&fscache_n_stores_ok);
48479+ fscache_stat_unchecked(&fscache_n_store_ops);
48480+ fscache_stat_unchecked(&fscache_n_stores_ok);
48481
48482 /* the work queue now carries its own ref on the object */
48483 fscache_put_operation(&op->op);
48484@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48485 return 0;
48486
48487 already_queued:
48488- fscache_stat(&fscache_n_stores_again);
48489+ fscache_stat_unchecked(&fscache_n_stores_again);
48490 already_pending:
48491 spin_unlock(&cookie->stores_lock);
48492 spin_unlock(&object->lock);
48493 spin_unlock(&cookie->lock);
48494 radix_tree_preload_end();
48495 kfree(op);
48496- fscache_stat(&fscache_n_stores_ok);
48497+ fscache_stat_unchecked(&fscache_n_stores_ok);
48498 _leave(" = 0");
48499 return 0;
48500
48501@@ -851,14 +851,14 @@ nobufs:
48502 spin_unlock(&cookie->lock);
48503 radix_tree_preload_end();
48504 kfree(op);
48505- fscache_stat(&fscache_n_stores_nobufs);
48506+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
48507 _leave(" = -ENOBUFS");
48508 return -ENOBUFS;
48509
48510 nomem_free:
48511 kfree(op);
48512 nomem:
48513- fscache_stat(&fscache_n_stores_oom);
48514+ fscache_stat_unchecked(&fscache_n_stores_oom);
48515 _leave(" = -ENOMEM");
48516 return -ENOMEM;
48517 }
48518@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
48519 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48520 ASSERTCMP(page, !=, NULL);
48521
48522- fscache_stat(&fscache_n_uncaches);
48523+ fscache_stat_unchecked(&fscache_n_uncaches);
48524
48525 /* cache withdrawal may beat us to it */
48526 if (!PageFsCache(page))
48527@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
48528 unsigned long loop;
48529
48530 #ifdef CONFIG_FSCACHE_STATS
48531- atomic_add(pagevec->nr, &fscache_n_marks);
48532+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
48533 #endif
48534
48535 for (loop = 0; loop < pagevec->nr; loop++) {
48536diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
48537index 4765190..2a067f2 100644
48538--- a/fs/fscache/stats.c
48539+++ b/fs/fscache/stats.c
48540@@ -18,95 +18,95 @@
48541 /*
48542 * operation counters
48543 */
48544-atomic_t fscache_n_op_pend;
48545-atomic_t fscache_n_op_run;
48546-atomic_t fscache_n_op_enqueue;
48547-atomic_t fscache_n_op_requeue;
48548-atomic_t fscache_n_op_deferred_release;
48549-atomic_t fscache_n_op_release;
48550-atomic_t fscache_n_op_gc;
48551-atomic_t fscache_n_op_cancelled;
48552-atomic_t fscache_n_op_rejected;
48553+atomic_unchecked_t fscache_n_op_pend;
48554+atomic_unchecked_t fscache_n_op_run;
48555+atomic_unchecked_t fscache_n_op_enqueue;
48556+atomic_unchecked_t fscache_n_op_requeue;
48557+atomic_unchecked_t fscache_n_op_deferred_release;
48558+atomic_unchecked_t fscache_n_op_release;
48559+atomic_unchecked_t fscache_n_op_gc;
48560+atomic_unchecked_t fscache_n_op_cancelled;
48561+atomic_unchecked_t fscache_n_op_rejected;
48562
48563-atomic_t fscache_n_attr_changed;
48564-atomic_t fscache_n_attr_changed_ok;
48565-atomic_t fscache_n_attr_changed_nobufs;
48566-atomic_t fscache_n_attr_changed_nomem;
48567-atomic_t fscache_n_attr_changed_calls;
48568+atomic_unchecked_t fscache_n_attr_changed;
48569+atomic_unchecked_t fscache_n_attr_changed_ok;
48570+atomic_unchecked_t fscache_n_attr_changed_nobufs;
48571+atomic_unchecked_t fscache_n_attr_changed_nomem;
48572+atomic_unchecked_t fscache_n_attr_changed_calls;
48573
48574-atomic_t fscache_n_allocs;
48575-atomic_t fscache_n_allocs_ok;
48576-atomic_t fscache_n_allocs_wait;
48577-atomic_t fscache_n_allocs_nobufs;
48578-atomic_t fscache_n_allocs_intr;
48579-atomic_t fscache_n_allocs_object_dead;
48580-atomic_t fscache_n_alloc_ops;
48581-atomic_t fscache_n_alloc_op_waits;
48582+atomic_unchecked_t fscache_n_allocs;
48583+atomic_unchecked_t fscache_n_allocs_ok;
48584+atomic_unchecked_t fscache_n_allocs_wait;
48585+atomic_unchecked_t fscache_n_allocs_nobufs;
48586+atomic_unchecked_t fscache_n_allocs_intr;
48587+atomic_unchecked_t fscache_n_allocs_object_dead;
48588+atomic_unchecked_t fscache_n_alloc_ops;
48589+atomic_unchecked_t fscache_n_alloc_op_waits;
48590
48591-atomic_t fscache_n_retrievals;
48592-atomic_t fscache_n_retrievals_ok;
48593-atomic_t fscache_n_retrievals_wait;
48594-atomic_t fscache_n_retrievals_nodata;
48595-atomic_t fscache_n_retrievals_nobufs;
48596-atomic_t fscache_n_retrievals_intr;
48597-atomic_t fscache_n_retrievals_nomem;
48598-atomic_t fscache_n_retrievals_object_dead;
48599-atomic_t fscache_n_retrieval_ops;
48600-atomic_t fscache_n_retrieval_op_waits;
48601+atomic_unchecked_t fscache_n_retrievals;
48602+atomic_unchecked_t fscache_n_retrievals_ok;
48603+atomic_unchecked_t fscache_n_retrievals_wait;
48604+atomic_unchecked_t fscache_n_retrievals_nodata;
48605+atomic_unchecked_t fscache_n_retrievals_nobufs;
48606+atomic_unchecked_t fscache_n_retrievals_intr;
48607+atomic_unchecked_t fscache_n_retrievals_nomem;
48608+atomic_unchecked_t fscache_n_retrievals_object_dead;
48609+atomic_unchecked_t fscache_n_retrieval_ops;
48610+atomic_unchecked_t fscache_n_retrieval_op_waits;
48611
48612-atomic_t fscache_n_stores;
48613-atomic_t fscache_n_stores_ok;
48614-atomic_t fscache_n_stores_again;
48615-atomic_t fscache_n_stores_nobufs;
48616-atomic_t fscache_n_stores_oom;
48617-atomic_t fscache_n_store_ops;
48618-atomic_t fscache_n_store_calls;
48619-atomic_t fscache_n_store_pages;
48620-atomic_t fscache_n_store_radix_deletes;
48621-atomic_t fscache_n_store_pages_over_limit;
48622+atomic_unchecked_t fscache_n_stores;
48623+atomic_unchecked_t fscache_n_stores_ok;
48624+atomic_unchecked_t fscache_n_stores_again;
48625+atomic_unchecked_t fscache_n_stores_nobufs;
48626+atomic_unchecked_t fscache_n_stores_oom;
48627+atomic_unchecked_t fscache_n_store_ops;
48628+atomic_unchecked_t fscache_n_store_calls;
48629+atomic_unchecked_t fscache_n_store_pages;
48630+atomic_unchecked_t fscache_n_store_radix_deletes;
48631+atomic_unchecked_t fscache_n_store_pages_over_limit;
48632
48633-atomic_t fscache_n_store_vmscan_not_storing;
48634-atomic_t fscache_n_store_vmscan_gone;
48635-atomic_t fscache_n_store_vmscan_busy;
48636-atomic_t fscache_n_store_vmscan_cancelled;
48637+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
48638+atomic_unchecked_t fscache_n_store_vmscan_gone;
48639+atomic_unchecked_t fscache_n_store_vmscan_busy;
48640+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
48641
48642-atomic_t fscache_n_marks;
48643-atomic_t fscache_n_uncaches;
48644+atomic_unchecked_t fscache_n_marks;
48645+atomic_unchecked_t fscache_n_uncaches;
48646
48647-atomic_t fscache_n_acquires;
48648-atomic_t fscache_n_acquires_null;
48649-atomic_t fscache_n_acquires_no_cache;
48650-atomic_t fscache_n_acquires_ok;
48651-atomic_t fscache_n_acquires_nobufs;
48652-atomic_t fscache_n_acquires_oom;
48653+atomic_unchecked_t fscache_n_acquires;
48654+atomic_unchecked_t fscache_n_acquires_null;
48655+atomic_unchecked_t fscache_n_acquires_no_cache;
48656+atomic_unchecked_t fscache_n_acquires_ok;
48657+atomic_unchecked_t fscache_n_acquires_nobufs;
48658+atomic_unchecked_t fscache_n_acquires_oom;
48659
48660-atomic_t fscache_n_updates;
48661-atomic_t fscache_n_updates_null;
48662-atomic_t fscache_n_updates_run;
48663+atomic_unchecked_t fscache_n_updates;
48664+atomic_unchecked_t fscache_n_updates_null;
48665+atomic_unchecked_t fscache_n_updates_run;
48666
48667-atomic_t fscache_n_relinquishes;
48668-atomic_t fscache_n_relinquishes_null;
48669-atomic_t fscache_n_relinquishes_waitcrt;
48670-atomic_t fscache_n_relinquishes_retire;
48671+atomic_unchecked_t fscache_n_relinquishes;
48672+atomic_unchecked_t fscache_n_relinquishes_null;
48673+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
48674+atomic_unchecked_t fscache_n_relinquishes_retire;
48675
48676-atomic_t fscache_n_cookie_index;
48677-atomic_t fscache_n_cookie_data;
48678-atomic_t fscache_n_cookie_special;
48679+atomic_unchecked_t fscache_n_cookie_index;
48680+atomic_unchecked_t fscache_n_cookie_data;
48681+atomic_unchecked_t fscache_n_cookie_special;
48682
48683-atomic_t fscache_n_object_alloc;
48684-atomic_t fscache_n_object_no_alloc;
48685-atomic_t fscache_n_object_lookups;
48686-atomic_t fscache_n_object_lookups_negative;
48687-atomic_t fscache_n_object_lookups_positive;
48688-atomic_t fscache_n_object_lookups_timed_out;
48689-atomic_t fscache_n_object_created;
48690-atomic_t fscache_n_object_avail;
48691-atomic_t fscache_n_object_dead;
48692+atomic_unchecked_t fscache_n_object_alloc;
48693+atomic_unchecked_t fscache_n_object_no_alloc;
48694+atomic_unchecked_t fscache_n_object_lookups;
48695+atomic_unchecked_t fscache_n_object_lookups_negative;
48696+atomic_unchecked_t fscache_n_object_lookups_positive;
48697+atomic_unchecked_t fscache_n_object_lookups_timed_out;
48698+atomic_unchecked_t fscache_n_object_created;
48699+atomic_unchecked_t fscache_n_object_avail;
48700+atomic_unchecked_t fscache_n_object_dead;
48701
48702-atomic_t fscache_n_checkaux_none;
48703-atomic_t fscache_n_checkaux_okay;
48704-atomic_t fscache_n_checkaux_update;
48705-atomic_t fscache_n_checkaux_obsolete;
48706+atomic_unchecked_t fscache_n_checkaux_none;
48707+atomic_unchecked_t fscache_n_checkaux_okay;
48708+atomic_unchecked_t fscache_n_checkaux_update;
48709+atomic_unchecked_t fscache_n_checkaux_obsolete;
48710
48711 atomic_t fscache_n_cop_alloc_object;
48712 atomic_t fscache_n_cop_lookup_object;
48713@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
48714 seq_puts(m, "FS-Cache statistics\n");
48715
48716 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
48717- atomic_read(&fscache_n_cookie_index),
48718- atomic_read(&fscache_n_cookie_data),
48719- atomic_read(&fscache_n_cookie_special));
48720+ atomic_read_unchecked(&fscache_n_cookie_index),
48721+ atomic_read_unchecked(&fscache_n_cookie_data),
48722+ atomic_read_unchecked(&fscache_n_cookie_special));
48723
48724 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
48725- atomic_read(&fscache_n_object_alloc),
48726- atomic_read(&fscache_n_object_no_alloc),
48727- atomic_read(&fscache_n_object_avail),
48728- atomic_read(&fscache_n_object_dead));
48729+ atomic_read_unchecked(&fscache_n_object_alloc),
48730+ atomic_read_unchecked(&fscache_n_object_no_alloc),
48731+ atomic_read_unchecked(&fscache_n_object_avail),
48732+ atomic_read_unchecked(&fscache_n_object_dead));
48733 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
48734- atomic_read(&fscache_n_checkaux_none),
48735- atomic_read(&fscache_n_checkaux_okay),
48736- atomic_read(&fscache_n_checkaux_update),
48737- atomic_read(&fscache_n_checkaux_obsolete));
48738+ atomic_read_unchecked(&fscache_n_checkaux_none),
48739+ atomic_read_unchecked(&fscache_n_checkaux_okay),
48740+ atomic_read_unchecked(&fscache_n_checkaux_update),
48741+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
48742
48743 seq_printf(m, "Pages : mrk=%u unc=%u\n",
48744- atomic_read(&fscache_n_marks),
48745- atomic_read(&fscache_n_uncaches));
48746+ atomic_read_unchecked(&fscache_n_marks),
48747+ atomic_read_unchecked(&fscache_n_uncaches));
48748
48749 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
48750 " oom=%u\n",
48751- atomic_read(&fscache_n_acquires),
48752- atomic_read(&fscache_n_acquires_null),
48753- atomic_read(&fscache_n_acquires_no_cache),
48754- atomic_read(&fscache_n_acquires_ok),
48755- atomic_read(&fscache_n_acquires_nobufs),
48756- atomic_read(&fscache_n_acquires_oom));
48757+ atomic_read_unchecked(&fscache_n_acquires),
48758+ atomic_read_unchecked(&fscache_n_acquires_null),
48759+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
48760+ atomic_read_unchecked(&fscache_n_acquires_ok),
48761+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
48762+ atomic_read_unchecked(&fscache_n_acquires_oom));
48763
48764 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
48765- atomic_read(&fscache_n_object_lookups),
48766- atomic_read(&fscache_n_object_lookups_negative),
48767- atomic_read(&fscache_n_object_lookups_positive),
48768- atomic_read(&fscache_n_object_created),
48769- atomic_read(&fscache_n_object_lookups_timed_out));
48770+ atomic_read_unchecked(&fscache_n_object_lookups),
48771+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
48772+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
48773+ atomic_read_unchecked(&fscache_n_object_created),
48774+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
48775
48776 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
48777- atomic_read(&fscache_n_updates),
48778- atomic_read(&fscache_n_updates_null),
48779- atomic_read(&fscache_n_updates_run));
48780+ atomic_read_unchecked(&fscache_n_updates),
48781+ atomic_read_unchecked(&fscache_n_updates_null),
48782+ atomic_read_unchecked(&fscache_n_updates_run));
48783
48784 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
48785- atomic_read(&fscache_n_relinquishes),
48786- atomic_read(&fscache_n_relinquishes_null),
48787- atomic_read(&fscache_n_relinquishes_waitcrt),
48788- atomic_read(&fscache_n_relinquishes_retire));
48789+ atomic_read_unchecked(&fscache_n_relinquishes),
48790+ atomic_read_unchecked(&fscache_n_relinquishes_null),
48791+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
48792+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
48793
48794 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
48795- atomic_read(&fscache_n_attr_changed),
48796- atomic_read(&fscache_n_attr_changed_ok),
48797- atomic_read(&fscache_n_attr_changed_nobufs),
48798- atomic_read(&fscache_n_attr_changed_nomem),
48799- atomic_read(&fscache_n_attr_changed_calls));
48800+ atomic_read_unchecked(&fscache_n_attr_changed),
48801+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
48802+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
48803+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
48804+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
48805
48806 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
48807- atomic_read(&fscache_n_allocs),
48808- atomic_read(&fscache_n_allocs_ok),
48809- atomic_read(&fscache_n_allocs_wait),
48810- atomic_read(&fscache_n_allocs_nobufs),
48811- atomic_read(&fscache_n_allocs_intr));
48812+ atomic_read_unchecked(&fscache_n_allocs),
48813+ atomic_read_unchecked(&fscache_n_allocs_ok),
48814+ atomic_read_unchecked(&fscache_n_allocs_wait),
48815+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
48816+ atomic_read_unchecked(&fscache_n_allocs_intr));
48817 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
48818- atomic_read(&fscache_n_alloc_ops),
48819- atomic_read(&fscache_n_alloc_op_waits),
48820- atomic_read(&fscache_n_allocs_object_dead));
48821+ atomic_read_unchecked(&fscache_n_alloc_ops),
48822+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
48823+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
48824
48825 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
48826 " int=%u oom=%u\n",
48827- atomic_read(&fscache_n_retrievals),
48828- atomic_read(&fscache_n_retrievals_ok),
48829- atomic_read(&fscache_n_retrievals_wait),
48830- atomic_read(&fscache_n_retrievals_nodata),
48831- atomic_read(&fscache_n_retrievals_nobufs),
48832- atomic_read(&fscache_n_retrievals_intr),
48833- atomic_read(&fscache_n_retrievals_nomem));
48834+ atomic_read_unchecked(&fscache_n_retrievals),
48835+ atomic_read_unchecked(&fscache_n_retrievals_ok),
48836+ atomic_read_unchecked(&fscache_n_retrievals_wait),
48837+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
48838+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
48839+ atomic_read_unchecked(&fscache_n_retrievals_intr),
48840+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
48841 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
48842- atomic_read(&fscache_n_retrieval_ops),
48843- atomic_read(&fscache_n_retrieval_op_waits),
48844- atomic_read(&fscache_n_retrievals_object_dead));
48845+ atomic_read_unchecked(&fscache_n_retrieval_ops),
48846+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
48847+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
48848
48849 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
48850- atomic_read(&fscache_n_stores),
48851- atomic_read(&fscache_n_stores_ok),
48852- atomic_read(&fscache_n_stores_again),
48853- atomic_read(&fscache_n_stores_nobufs),
48854- atomic_read(&fscache_n_stores_oom));
48855+ atomic_read_unchecked(&fscache_n_stores),
48856+ atomic_read_unchecked(&fscache_n_stores_ok),
48857+ atomic_read_unchecked(&fscache_n_stores_again),
48858+ atomic_read_unchecked(&fscache_n_stores_nobufs),
48859+ atomic_read_unchecked(&fscache_n_stores_oom));
48860 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
48861- atomic_read(&fscache_n_store_ops),
48862- atomic_read(&fscache_n_store_calls),
48863- atomic_read(&fscache_n_store_pages),
48864- atomic_read(&fscache_n_store_radix_deletes),
48865- atomic_read(&fscache_n_store_pages_over_limit));
48866+ atomic_read_unchecked(&fscache_n_store_ops),
48867+ atomic_read_unchecked(&fscache_n_store_calls),
48868+ atomic_read_unchecked(&fscache_n_store_pages),
48869+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
48870+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
48871
48872 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
48873- atomic_read(&fscache_n_store_vmscan_not_storing),
48874- atomic_read(&fscache_n_store_vmscan_gone),
48875- atomic_read(&fscache_n_store_vmscan_busy),
48876- atomic_read(&fscache_n_store_vmscan_cancelled));
48877+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
48878+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
48879+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
48880+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
48881
48882 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
48883- atomic_read(&fscache_n_op_pend),
48884- atomic_read(&fscache_n_op_run),
48885- atomic_read(&fscache_n_op_enqueue),
48886- atomic_read(&fscache_n_op_cancelled),
48887- atomic_read(&fscache_n_op_rejected));
48888+ atomic_read_unchecked(&fscache_n_op_pend),
48889+ atomic_read_unchecked(&fscache_n_op_run),
48890+ atomic_read_unchecked(&fscache_n_op_enqueue),
48891+ atomic_read_unchecked(&fscache_n_op_cancelled),
48892+ atomic_read_unchecked(&fscache_n_op_rejected));
48893 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
48894- atomic_read(&fscache_n_op_deferred_release),
48895- atomic_read(&fscache_n_op_release),
48896- atomic_read(&fscache_n_op_gc));
48897+ atomic_read_unchecked(&fscache_n_op_deferred_release),
48898+ atomic_read_unchecked(&fscache_n_op_release),
48899+ atomic_read_unchecked(&fscache_n_op_gc));
48900
48901 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
48902 atomic_read(&fscache_n_cop_alloc_object),
48903diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
48904index ee8d550..7189d8c 100644
48905--- a/fs/fuse/cuse.c
48906+++ b/fs/fuse/cuse.c
48907@@ -585,10 +585,12 @@ static int __init cuse_init(void)
48908 INIT_LIST_HEAD(&cuse_conntbl[i]);
48909
48910 /* inherit and extend fuse_dev_operations */
48911- cuse_channel_fops = fuse_dev_operations;
48912- cuse_channel_fops.owner = THIS_MODULE;
48913- cuse_channel_fops.open = cuse_channel_open;
48914- cuse_channel_fops.release = cuse_channel_release;
48915+ pax_open_kernel();
48916+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
48917+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
48918+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
48919+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
48920+ pax_close_kernel();
48921
48922 cuse_class = class_create(THIS_MODULE, "cuse");
48923 if (IS_ERR(cuse_class))
48924diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
48925index 8c23fa7..0e3aac7 100644
48926--- a/fs/fuse/dev.c
48927+++ b/fs/fuse/dev.c
48928@@ -1241,7 +1241,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
48929 ret = 0;
48930 pipe_lock(pipe);
48931
48932- if (!pipe->readers) {
48933+ if (!atomic_read(&pipe->readers)) {
48934 send_sig(SIGPIPE, current, 0);
48935 if (!ret)
48936 ret = -EPIPE;
48937diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
48938index 324bc08..4fdd56e 100644
48939--- a/fs/fuse/dir.c
48940+++ b/fs/fuse/dir.c
48941@@ -1226,7 +1226,7 @@ static char *read_link(struct dentry *dentry)
48942 return link;
48943 }
48944
48945-static void free_link(char *link)
48946+static void free_link(const char *link)
48947 {
48948 if (!IS_ERR(link))
48949 free_page((unsigned long) link);
48950diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
48951index 381893c..3793318 100644
48952--- a/fs/gfs2/inode.c
48953+++ b/fs/gfs2/inode.c
48954@@ -1490,7 +1490,7 @@ out:
48955
48956 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48957 {
48958- char *s = nd_get_link(nd);
48959+ const char *s = nd_get_link(nd);
48960 if (!IS_ERR(s))
48961 kfree(s);
48962 }
48963diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
48964index c5bc355..5a513bb 100644
48965--- a/fs/hugetlbfs/inode.c
48966+++ b/fs/hugetlbfs/inode.c
48967@@ -923,7 +923,7 @@ static struct file_system_type hugetlbfs_fs_type = {
48968 .kill_sb = kill_litter_super,
48969 };
48970
48971-static struct vfsmount *hugetlbfs_vfsmount;
48972+struct vfsmount *hugetlbfs_vfsmount;
48973
48974 static int can_do_hugetlb_shm(void)
48975 {
48976diff --git a/fs/inode.c b/fs/inode.c
48977index 64999f1..8fad608 100644
48978--- a/fs/inode.c
48979+++ b/fs/inode.c
48980@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
48981
48982 #ifdef CONFIG_SMP
48983 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
48984- static atomic_t shared_last_ino;
48985- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
48986+ static atomic_unchecked_t shared_last_ino;
48987+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
48988
48989 res = next - LAST_INO_BATCH;
48990 }
48991diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
48992index 4a6cf28..d3a29d3 100644
48993--- a/fs/jffs2/erase.c
48994+++ b/fs/jffs2/erase.c
48995@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
48996 struct jffs2_unknown_node marker = {
48997 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
48998 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
48999- .totlen = cpu_to_je32(c->cleanmarker_size)
49000+ .totlen = cpu_to_je32(c->cleanmarker_size),
49001+ .hdr_crc = cpu_to_je32(0)
49002 };
49003
49004 jffs2_prealloc_raw_node_refs(c, jeb, 1);
49005diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
49006index a6597d6..41b30ec 100644
49007--- a/fs/jffs2/wbuf.c
49008+++ b/fs/jffs2/wbuf.c
49009@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
49010 {
49011 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
49012 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
49013- .totlen = constant_cpu_to_je32(8)
49014+ .totlen = constant_cpu_to_je32(8),
49015+ .hdr_crc = constant_cpu_to_je32(0)
49016 };
49017
49018 /*
49019diff --git a/fs/jfs/super.c b/fs/jfs/super.c
49020index 1a543be..d803c40 100644
49021--- a/fs/jfs/super.c
49022+++ b/fs/jfs/super.c
49023@@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
49024
49025 jfs_inode_cachep =
49026 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
49027- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
49028+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
49029 init_once);
49030 if (jfs_inode_cachep == NULL)
49031 return -ENOMEM;
49032diff --git a/fs/libfs.c b/fs/libfs.c
49033index 7cc37ca..b3e3eec 100644
49034--- a/fs/libfs.c
49035+++ b/fs/libfs.c
49036@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
49037
49038 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
49039 struct dentry *next;
49040+ char d_name[sizeof(next->d_iname)];
49041+ const unsigned char *name;
49042+
49043 next = list_entry(p, struct dentry, d_u.d_child);
49044 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
49045 if (!simple_positive(next)) {
49046@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
49047
49048 spin_unlock(&next->d_lock);
49049 spin_unlock(&dentry->d_lock);
49050- if (filldir(dirent, next->d_name.name,
49051+ name = next->d_name.name;
49052+ if (name == next->d_iname) {
49053+ memcpy(d_name, name, next->d_name.len);
49054+ name = d_name;
49055+ }
49056+ if (filldir(dirent, name,
49057 next->d_name.len, filp->f_pos,
49058 next->d_inode->i_ino,
49059 dt_type(next->d_inode)) < 0)
49060diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
49061index 05d2912..760abfa 100644
49062--- a/fs/lockd/clntproc.c
49063+++ b/fs/lockd/clntproc.c
49064@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
49065 /*
49066 * Cookie counter for NLM requests
49067 */
49068-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
49069+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
49070
49071 void nlmclnt_next_cookie(struct nlm_cookie *c)
49072 {
49073- u32 cookie = atomic_inc_return(&nlm_cookie);
49074+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
49075
49076 memcpy(c->data, &cookie, 4);
49077 c->len=4;
49078diff --git a/fs/locks.c b/fs/locks.c
49079index a94e331..060bce3 100644
49080--- a/fs/locks.c
49081+++ b/fs/locks.c
49082@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
49083 return;
49084
49085 if (filp->f_op && filp->f_op->flock) {
49086- struct file_lock fl = {
49087+ struct file_lock flock = {
49088 .fl_pid = current->tgid,
49089 .fl_file = filp,
49090 .fl_flags = FL_FLOCK,
49091 .fl_type = F_UNLCK,
49092 .fl_end = OFFSET_MAX,
49093 };
49094- filp->f_op->flock(filp, F_SETLKW, &fl);
49095- if (fl.fl_ops && fl.fl_ops->fl_release_private)
49096- fl.fl_ops->fl_release_private(&fl);
49097+ filp->f_op->flock(filp, F_SETLKW, &flock);
49098+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
49099+ flock.fl_ops->fl_release_private(&flock);
49100 }
49101
49102 lock_flocks();
49103diff --git a/fs/namei.c b/fs/namei.c
49104index 5f4cdf3..959a013 100644
49105--- a/fs/namei.c
49106+++ b/fs/namei.c
49107@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
49108 if (ret != -EACCES)
49109 return ret;
49110
49111+#ifdef CONFIG_GRKERNSEC
49112+ /* we'll block if we have to log due to a denied capability use */
49113+ if (mask & MAY_NOT_BLOCK)
49114+ return -ECHILD;
49115+#endif
49116+
49117 if (S_ISDIR(inode->i_mode)) {
49118 /* DACs are overridable for directories */
49119- if (inode_capable(inode, CAP_DAC_OVERRIDE))
49120- return 0;
49121 if (!(mask & MAY_WRITE))
49122- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
49123+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
49124+ inode_capable(inode, CAP_DAC_READ_SEARCH))
49125 return 0;
49126+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
49127+ return 0;
49128 return -EACCES;
49129 }
49130 /*
49131+ * Searching includes executable on directories, else just read.
49132+ */
49133+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
49134+ if (mask == MAY_READ)
49135+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
49136+ inode_capable(inode, CAP_DAC_READ_SEARCH))
49137+ return 0;
49138+
49139+ /*
49140 * Read/write DACs are always overridable.
49141 * Executable DACs are overridable when there is
49142 * at least one exec bit set.
49143@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
49144 if (inode_capable(inode, CAP_DAC_OVERRIDE))
49145 return 0;
49146
49147- /*
49148- * Searching includes executable on directories, else just read.
49149- */
49150- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
49151- if (mask == MAY_READ)
49152- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
49153- return 0;
49154-
49155 return -EACCES;
49156 }
49157
49158@@ -826,7 +834,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
49159 {
49160 struct dentry *dentry = link->dentry;
49161 int error;
49162- char *s;
49163+ const char *s;
49164
49165 BUG_ON(nd->flags & LOOKUP_RCU);
49166
49167@@ -847,6 +855,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
49168 if (error)
49169 goto out_put_nd_path;
49170
49171+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
49172+ dentry->d_inode, dentry, nd->path.mnt)) {
49173+ error = -EACCES;
49174+ goto out_put_nd_path;
49175+ }
49176+
49177 nd->last_type = LAST_BIND;
49178 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
49179 error = PTR_ERR(*p);
49180@@ -1605,6 +1619,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
49181 break;
49182 res = walk_component(nd, path, &nd->last,
49183 nd->last_type, LOOKUP_FOLLOW);
49184+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
49185+ res = -EACCES;
49186 put_link(nd, &link, cookie);
49187 } while (res > 0);
49188
49189@@ -1703,7 +1719,7 @@ EXPORT_SYMBOL(full_name_hash);
49190 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
49191 {
49192 unsigned long a, b, adata, bdata, mask, hash, len;
49193- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
49194+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
49195
49196 hash = a = 0;
49197 len = -sizeof(unsigned long);
49198@@ -1993,6 +2009,8 @@ static int path_lookupat(int dfd, const char *name,
49199 if (err)
49200 break;
49201 err = lookup_last(nd, &path);
49202+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
49203+ err = -EACCES;
49204 put_link(nd, &link, cookie);
49205 }
49206 }
49207@@ -2000,6 +2018,21 @@ static int path_lookupat(int dfd, const char *name,
49208 if (!err)
49209 err = complete_walk(nd);
49210
49211+ if (!(nd->flags & LOOKUP_PARENT)) {
49212+#ifdef CONFIG_GRKERNSEC
49213+ if (flags & LOOKUP_RCU) {
49214+ if (!err)
49215+ path_put(&nd->path);
49216+ err = -ECHILD;
49217+ } else
49218+#endif
49219+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49220+ if (!err)
49221+ path_put(&nd->path);
49222+ err = -ENOENT;
49223+ }
49224+ }
49225+
49226 if (!err && nd->flags & LOOKUP_DIRECTORY) {
49227 if (!nd->inode->i_op->lookup) {
49228 path_put(&nd->path);
49229@@ -2027,8 +2060,17 @@ static int filename_lookup(int dfd, struct filename *name,
49230 retval = path_lookupat(dfd, name->name,
49231 flags | LOOKUP_REVAL, nd);
49232
49233- if (likely(!retval))
49234+ if (likely(!retval)) {
49235+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
49236+#ifdef CONFIG_GRKERNSEC
49237+ if (flags & LOOKUP_RCU)
49238+ return -ECHILD;
49239+#endif
49240+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
49241+ return -ENOENT;
49242+ }
49243 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
49244+ }
49245 return retval;
49246 }
49247
49248@@ -2402,6 +2444,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
49249 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
49250 return -EPERM;
49251
49252+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
49253+ return -EPERM;
49254+ if (gr_handle_rawio(inode))
49255+ return -EPERM;
49256+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
49257+ return -EACCES;
49258+
49259 return 0;
49260 }
49261
49262@@ -2623,7 +2672,7 @@ looked_up:
49263 * cleared otherwise prior to returning.
49264 */
49265 static int lookup_open(struct nameidata *nd, struct path *path,
49266- struct file *file,
49267+ struct path *link, struct file *file,
49268 const struct open_flags *op,
49269 bool got_write, int *opened)
49270 {
49271@@ -2658,6 +2707,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
49272 /* Negative dentry, just create the file */
49273 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
49274 umode_t mode = op->mode;
49275+
49276+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
49277+ error = -EACCES;
49278+ goto out_dput;
49279+ }
49280+
49281+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
49282+ error = -EACCES;
49283+ goto out_dput;
49284+ }
49285+
49286 if (!IS_POSIXACL(dir->d_inode))
49287 mode &= ~current_umask();
49288 /*
49289@@ -2679,6 +2739,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
49290 nd->flags & LOOKUP_EXCL);
49291 if (error)
49292 goto out_dput;
49293+ else
49294+ gr_handle_create(dentry, nd->path.mnt);
49295 }
49296 out_no_open:
49297 path->dentry = dentry;
49298@@ -2693,7 +2755,7 @@ out_dput:
49299 /*
49300 * Handle the last step of open()
49301 */
49302-static int do_last(struct nameidata *nd, struct path *path,
49303+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
49304 struct file *file, const struct open_flags *op,
49305 int *opened, struct filename *name)
49306 {
49307@@ -2722,16 +2784,44 @@ static int do_last(struct nameidata *nd, struct path *path,
49308 error = complete_walk(nd);
49309 if (error)
49310 return error;
49311+#ifdef CONFIG_GRKERNSEC
49312+ if (nd->flags & LOOKUP_RCU) {
49313+ error = -ECHILD;
49314+ goto out;
49315+ }
49316+#endif
49317+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49318+ error = -ENOENT;
49319+ goto out;
49320+ }
49321 audit_inode(name, nd->path.dentry, 0);
49322 if (open_flag & O_CREAT) {
49323 error = -EISDIR;
49324 goto out;
49325 }
49326+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
49327+ error = -EACCES;
49328+ goto out;
49329+ }
49330 goto finish_open;
49331 case LAST_BIND:
49332 error = complete_walk(nd);
49333 if (error)
49334 return error;
49335+#ifdef CONFIG_GRKERNSEC
49336+ if (nd->flags & LOOKUP_RCU) {
49337+ error = -ECHILD;
49338+ goto out;
49339+ }
49340+#endif
49341+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
49342+ error = -ENOENT;
49343+ goto out;
49344+ }
49345+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
49346+ error = -EACCES;
49347+ goto out;
49348+ }
49349 audit_inode(name, dir, 0);
49350 goto finish_open;
49351 }
49352@@ -2780,7 +2870,7 @@ retry_lookup:
49353 */
49354 }
49355 mutex_lock(&dir->d_inode->i_mutex);
49356- error = lookup_open(nd, path, file, op, got_write, opened);
49357+ error = lookup_open(nd, path, link, file, op, got_write, opened);
49358 mutex_unlock(&dir->d_inode->i_mutex);
49359
49360 if (error <= 0) {
49361@@ -2804,11 +2894,28 @@ retry_lookup:
49362 goto finish_open_created;
49363 }
49364
49365+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
49366+ error = -ENOENT;
49367+ goto exit_dput;
49368+ }
49369+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
49370+ error = -EACCES;
49371+ goto exit_dput;
49372+ }
49373+
49374 /*
49375 * create/update audit record if it already exists.
49376 */
49377- if (path->dentry->d_inode)
49378+ if (path->dentry->d_inode) {
49379+ /* only check if O_CREAT is specified, all other checks need to go
49380+ into may_open */
49381+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
49382+ error = -EACCES;
49383+ goto exit_dput;
49384+ }
49385+
49386 audit_inode(name, path->dentry, 0);
49387+ }
49388
49389 /*
49390 * If atomic_open() acquired write access it is dropped now due to
49391@@ -2849,6 +2956,11 @@ finish_lookup:
49392 }
49393 }
49394 BUG_ON(inode != path->dentry->d_inode);
49395+ /* if we're resolving a symlink to another symlink */
49396+ if (link && gr_handle_symlink_owner(link, inode)) {
49397+ error = -EACCES;
49398+ goto out;
49399+ }
49400 return 1;
49401 }
49402
49403@@ -2858,7 +2970,6 @@ finish_lookup:
49404 save_parent.dentry = nd->path.dentry;
49405 save_parent.mnt = mntget(path->mnt);
49406 nd->path.dentry = path->dentry;
49407-
49408 }
49409 nd->inode = inode;
49410 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
49411@@ -2867,6 +2978,22 @@ finish_lookup:
49412 path_put(&save_parent);
49413 return error;
49414 }
49415+
49416+#ifdef CONFIG_GRKERNSEC
49417+ if (nd->flags & LOOKUP_RCU) {
49418+ error = -ECHILD;
49419+ goto out;
49420+ }
49421+#endif
49422+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49423+ error = -ENOENT;
49424+ goto out;
49425+ }
49426+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
49427+ error = -EACCES;
49428+ goto out;
49429+ }
49430+
49431 error = -EISDIR;
49432 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
49433 goto out;
49434@@ -2965,7 +3092,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
49435 if (unlikely(error))
49436 goto out;
49437
49438- error = do_last(nd, &path, file, op, &opened, pathname);
49439+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
49440 while (unlikely(error > 0)) { /* trailing symlink */
49441 struct path link = path;
49442 void *cookie;
49443@@ -2983,7 +3110,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
49444 error = follow_link(&link, nd, &cookie);
49445 if (unlikely(error))
49446 break;
49447- error = do_last(nd, &path, file, op, &opened, pathname);
49448+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
49449 put_link(nd, &link, cookie);
49450 }
49451 out:
49452@@ -3073,8 +3200,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
49453 goto unlock;
49454
49455 error = -EEXIST;
49456- if (dentry->d_inode)
49457+ if (dentry->d_inode) {
49458+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
49459+ error = -ENOENT;
49460+ }
49461 goto fail;
49462+ }
49463 /*
49464 * Special case - lookup gave negative, but... we had foo/bar/
49465 * From the vfs_mknod() POV we just have a negative dentry -
49466@@ -3125,6 +3256,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
49467 }
49468 EXPORT_SYMBOL(user_path_create);
49469
49470+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, int is_dir)
49471+{
49472+ struct filename *tmp = getname(pathname);
49473+ struct dentry *res;
49474+ if (IS_ERR(tmp))
49475+ return ERR_CAST(tmp);
49476+ res = kern_path_create(dfd, tmp->name, path, is_dir);
49477+ if (IS_ERR(res))
49478+ putname(tmp);
49479+ else
49480+ *to = tmp;
49481+ return res;
49482+}
49483+
49484 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
49485 {
49486 int error = may_create(dir, dentry);
49487@@ -3186,6 +3331,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
49488
49489 if (!IS_POSIXACL(path.dentry->d_inode))
49490 mode &= ~current_umask();
49491+
49492+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
49493+ error = -EPERM;
49494+ goto out;
49495+ }
49496+
49497+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
49498+ error = -EACCES;
49499+ goto out;
49500+ }
49501+
49502 error = security_path_mknod(&path, dentry, mode, dev);
49503 if (error)
49504 goto out;
49505@@ -3202,6 +3358,8 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
49506 break;
49507 }
49508 out:
49509+ if (!error)
49510+ gr_handle_create(dentry, path.mnt);
49511 done_path_create(&path, dentry);
49512 return error;
49513 }
49514@@ -3248,9 +3406,18 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
49515
49516 if (!IS_POSIXACL(path.dentry->d_inode))
49517 mode &= ~current_umask();
49518+
49519+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
49520+ error = -EACCES;
49521+ goto out;
49522+ }
49523+
49524 error = security_path_mkdir(&path, dentry, mode);
49525 if (!error)
49526 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
49527+ if (!error)
49528+ gr_handle_create(dentry, path.mnt);
49529+out:
49530 done_path_create(&path, dentry);
49531 return error;
49532 }
49533@@ -3327,6 +3494,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
49534 struct filename *name;
49535 struct dentry *dentry;
49536 struct nameidata nd;
49537+ ino_t saved_ino = 0;
49538+ dev_t saved_dev = 0;
49539
49540 name = user_path_parent(dfd, pathname, &nd);
49541 if (IS_ERR(name))
49542@@ -3358,10 +3527,21 @@ static long do_rmdir(int dfd, const char __user *pathname)
49543 error = -ENOENT;
49544 goto exit3;
49545 }
49546+
49547+ saved_ino = dentry->d_inode->i_ino;
49548+ saved_dev = gr_get_dev_from_dentry(dentry);
49549+
49550+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
49551+ error = -EACCES;
49552+ goto exit3;
49553+ }
49554+
49555 error = security_path_rmdir(&nd.path, dentry);
49556 if (error)
49557 goto exit3;
49558 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
49559+ if (!error && (saved_dev || saved_ino))
49560+ gr_handle_delete(saved_ino, saved_dev);
49561 exit3:
49562 dput(dentry);
49563 exit2:
49564@@ -3423,6 +3603,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
49565 struct dentry *dentry;
49566 struct nameidata nd;
49567 struct inode *inode = NULL;
49568+ ino_t saved_ino = 0;
49569+ dev_t saved_dev = 0;
49570
49571 name = user_path_parent(dfd, pathname, &nd);
49572 if (IS_ERR(name))
49573@@ -3448,10 +3630,22 @@ static long do_unlinkat(int dfd, const char __user *pathname)
49574 if (!inode)
49575 goto slashes;
49576 ihold(inode);
49577+
49578+ if (inode->i_nlink <= 1) {
49579+ saved_ino = inode->i_ino;
49580+ saved_dev = gr_get_dev_from_dentry(dentry);
49581+ }
49582+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
49583+ error = -EACCES;
49584+ goto exit2;
49585+ }
49586+
49587 error = security_path_unlink(&nd.path, dentry);
49588 if (error)
49589 goto exit2;
49590 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
49591+ if (!error && (saved_ino || saved_dev))
49592+ gr_handle_delete(saved_ino, saved_dev);
49593 exit2:
49594 dput(dentry);
49595 }
49596@@ -3523,9 +3717,17 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
49597 if (IS_ERR(dentry))
49598 goto out_putname;
49599
49600+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
49601+ error = -EACCES;
49602+ goto out;
49603+ }
49604+
49605 error = security_path_symlink(&path, dentry, from->name);
49606 if (!error)
49607 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
49608+ if (!error)
49609+ gr_handle_create(dentry, path.mnt);
49610+out:
49611 done_path_create(&path, dentry);
49612 out_putname:
49613 putname(from);
49614@@ -3595,6 +3797,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49615 {
49616 struct dentry *new_dentry;
49617 struct path old_path, new_path;
49618+ struct filename *to = NULL;
49619 int how = 0;
49620 int error;
49621
49622@@ -3618,7 +3821,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49623 if (error)
49624 return error;
49625
49626- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
49627+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
49628 error = PTR_ERR(new_dentry);
49629 if (IS_ERR(new_dentry))
49630 goto out;
49631@@ -3629,11 +3832,28 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49632 error = may_linkat(&old_path);
49633 if (unlikely(error))
49634 goto out_dput;
49635+
49636+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
49637+ old_path.dentry->d_inode,
49638+ old_path.dentry->d_inode->i_mode, to)) {
49639+ error = -EACCES;
49640+ goto out_dput;
49641+ }
49642+
49643+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
49644+ old_path.dentry, old_path.mnt, to)) {
49645+ error = -EACCES;
49646+ goto out_dput;
49647+ }
49648+
49649 error = security_path_link(old_path.dentry, &new_path, new_dentry);
49650 if (error)
49651 goto out_dput;
49652 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
49653+ if (!error)
49654+ gr_handle_create(new_dentry, new_path.mnt);
49655 out_dput:
49656+ putname(to);
49657 done_path_create(&new_path, new_dentry);
49658 out:
49659 path_put(&old_path);
49660@@ -3873,12 +4093,21 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
49661 if (new_dentry == trap)
49662 goto exit5;
49663
49664+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
49665+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
49666+ to);
49667+ if (error)
49668+ goto exit5;
49669+
49670 error = security_path_rename(&oldnd.path, old_dentry,
49671 &newnd.path, new_dentry);
49672 if (error)
49673 goto exit5;
49674 error = vfs_rename(old_dir->d_inode, old_dentry,
49675 new_dir->d_inode, new_dentry);
49676+ if (!error)
49677+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
49678+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
49679 exit5:
49680 dput(new_dentry);
49681 exit4:
49682@@ -3903,6 +4132,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
49683
49684 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
49685 {
49686+ char tmpbuf[64];
49687+ const char *newlink;
49688 int len;
49689
49690 len = PTR_ERR(link);
49691@@ -3912,7 +4143,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
49692 len = strlen(link);
49693 if (len > (unsigned) buflen)
49694 len = buflen;
49695- if (copy_to_user(buffer, link, len))
49696+
49697+ if (len < sizeof(tmpbuf)) {
49698+ memcpy(tmpbuf, link, len);
49699+ newlink = tmpbuf;
49700+ } else
49701+ newlink = link;
49702+
49703+ if (copy_to_user(buffer, newlink, len))
49704 len = -EFAULT;
49705 out:
49706 return len;
49707diff --git a/fs/namespace.c b/fs/namespace.c
49708index 2496062..e26f6d6 100644
49709--- a/fs/namespace.c
49710+++ b/fs/namespace.c
49711@@ -1212,6 +1212,9 @@ static int do_umount(struct mount *mnt, int flags)
49712 if (!(sb->s_flags & MS_RDONLY))
49713 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
49714 up_write(&sb->s_umount);
49715+
49716+ gr_log_remount(mnt->mnt_devname, retval);
49717+
49718 return retval;
49719 }
49720
49721@@ -1231,6 +1234,9 @@ static int do_umount(struct mount *mnt, int flags)
49722 br_write_unlock(&vfsmount_lock);
49723 up_write(&namespace_sem);
49724 release_mounts(&umount_list);
49725+
49726+ gr_log_unmount(mnt->mnt_devname, retval);
49727+
49728 return retval;
49729 }
49730
49731@@ -2244,6 +2250,16 @@ long do_mount(const char *dev_name, const char *dir_name,
49732 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
49733 MS_STRICTATIME);
49734
49735+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
49736+ retval = -EPERM;
49737+ goto dput_out;
49738+ }
49739+
49740+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
49741+ retval = -EPERM;
49742+ goto dput_out;
49743+ }
49744+
49745 if (flags & MS_REMOUNT)
49746 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
49747 data_page);
49748@@ -2258,6 +2274,9 @@ long do_mount(const char *dev_name, const char *dir_name,
49749 dev_name, data_page);
49750 dput_out:
49751 path_put(&path);
49752+
49753+ gr_log_mount(dev_name, dir_name, retval);
49754+
49755 return retval;
49756 }
49757
49758@@ -2516,6 +2535,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
49759 if (error)
49760 goto out2;
49761
49762+ if (gr_handle_chroot_pivot()) {
49763+ error = -EPERM;
49764+ goto out2;
49765+ }
49766+
49767 get_fs_root(current->fs, &root);
49768 error = lock_mount(&old);
49769 if (error)
49770diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
49771index 6fa01ae..2790820 100644
49772--- a/fs/nfs/inode.c
49773+++ b/fs/nfs/inode.c
49774@@ -1029,16 +1029,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
49775 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
49776 }
49777
49778-static atomic_long_t nfs_attr_generation_counter;
49779+static atomic_long_unchecked_t nfs_attr_generation_counter;
49780
49781 static unsigned long nfs_read_attr_generation_counter(void)
49782 {
49783- return atomic_long_read(&nfs_attr_generation_counter);
49784+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
49785 }
49786
49787 unsigned long nfs_inc_attr_generation_counter(void)
49788 {
49789- return atomic_long_inc_return(&nfs_attr_generation_counter);
49790+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
49791 }
49792
49793 void nfs_fattr_init(struct nfs_fattr *fattr)
49794diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
49795index f59169e..fd7d359 100644
49796--- a/fs/nfsd/vfs.c
49797+++ b/fs/nfsd/vfs.c
49798@@ -941,7 +941,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
49799 } else {
49800 oldfs = get_fs();
49801 set_fs(KERNEL_DS);
49802- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
49803+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
49804 set_fs(oldfs);
49805 }
49806
49807@@ -1045,7 +1045,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
49808
49809 /* Write the data. */
49810 oldfs = get_fs(); set_fs(KERNEL_DS);
49811- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
49812+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
49813 set_fs(oldfs);
49814 if (host_err < 0)
49815 goto out_nfserr;
49816@@ -1587,7 +1587,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
49817 */
49818
49819 oldfs = get_fs(); set_fs(KERNEL_DS);
49820- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
49821+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
49822 set_fs(oldfs);
49823
49824 if (host_err < 0)
49825diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
49826index 6fcaeb8..9d16d04 100644
49827--- a/fs/notify/fanotify/fanotify_user.c
49828+++ b/fs/notify/fanotify/fanotify_user.c
49829@@ -250,8 +250,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
49830
49831 fd = fanotify_event_metadata.fd;
49832 ret = -EFAULT;
49833- if (copy_to_user(buf, &fanotify_event_metadata,
49834- fanotify_event_metadata.event_len))
49835+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
49836+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
49837 goto out_close_fd;
49838
49839 ret = prepare_for_access_response(group, event, fd);
49840diff --git a/fs/notify/notification.c b/fs/notify/notification.c
49841index c887b13..0fdf472 100644
49842--- a/fs/notify/notification.c
49843+++ b/fs/notify/notification.c
49844@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
49845 * get set to 0 so it will never get 'freed'
49846 */
49847 static struct fsnotify_event *q_overflow_event;
49848-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
49849+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
49850
49851 /**
49852 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
49853@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
49854 */
49855 u32 fsnotify_get_cookie(void)
49856 {
49857- return atomic_inc_return(&fsnotify_sync_cookie);
49858+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
49859 }
49860 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
49861
49862diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
49863index 99e3610..02c1068 100644
49864--- a/fs/ntfs/dir.c
49865+++ b/fs/ntfs/dir.c
49866@@ -1329,7 +1329,7 @@ find_next_index_buffer:
49867 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
49868 ~(s64)(ndir->itype.index.block_size - 1)));
49869 /* Bounds checks. */
49870- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
49871+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
49872 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
49873 "inode 0x%lx or driver bug.", vdir->i_ino);
49874 goto err_out;
49875diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
49876index 1ecf464..e1ff8bf 100644
49877--- a/fs/ntfs/file.c
49878+++ b/fs/ntfs/file.c
49879@@ -2232,6 +2232,6 @@ const struct inode_operations ntfs_file_inode_ops = {
49880 #endif /* NTFS_RW */
49881 };
49882
49883-const struct file_operations ntfs_empty_file_ops = {};
49884+const struct file_operations ntfs_empty_file_ops __read_only;
49885
49886-const struct inode_operations ntfs_empty_inode_ops = {};
49887+const struct inode_operations ntfs_empty_inode_ops __read_only;
49888diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
49889index a9f78c7..ed8a381 100644
49890--- a/fs/ocfs2/localalloc.c
49891+++ b/fs/ocfs2/localalloc.c
49892@@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
49893 goto bail;
49894 }
49895
49896- atomic_inc(&osb->alloc_stats.moves);
49897+ atomic_inc_unchecked(&osb->alloc_stats.moves);
49898
49899 bail:
49900 if (handle)
49901diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
49902index d355e6e..578d905 100644
49903--- a/fs/ocfs2/ocfs2.h
49904+++ b/fs/ocfs2/ocfs2.h
49905@@ -235,11 +235,11 @@ enum ocfs2_vol_state
49906
49907 struct ocfs2_alloc_stats
49908 {
49909- atomic_t moves;
49910- atomic_t local_data;
49911- atomic_t bitmap_data;
49912- atomic_t bg_allocs;
49913- atomic_t bg_extends;
49914+ atomic_unchecked_t moves;
49915+ atomic_unchecked_t local_data;
49916+ atomic_unchecked_t bitmap_data;
49917+ atomic_unchecked_t bg_allocs;
49918+ atomic_unchecked_t bg_extends;
49919 };
49920
49921 enum ocfs2_local_alloc_state
49922diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
49923index f169da4..9112253 100644
49924--- a/fs/ocfs2/suballoc.c
49925+++ b/fs/ocfs2/suballoc.c
49926@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
49927 mlog_errno(status);
49928 goto bail;
49929 }
49930- atomic_inc(&osb->alloc_stats.bg_extends);
49931+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
49932
49933 /* You should never ask for this much metadata */
49934 BUG_ON(bits_wanted >
49935@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
49936 mlog_errno(status);
49937 goto bail;
49938 }
49939- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49940+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49941
49942 *suballoc_loc = res.sr_bg_blkno;
49943 *suballoc_bit_start = res.sr_bit_offset;
49944@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
49945 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
49946 res->sr_bits);
49947
49948- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49949+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49950
49951 BUG_ON(res->sr_bits != 1);
49952
49953@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
49954 mlog_errno(status);
49955 goto bail;
49956 }
49957- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49958+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49959
49960 BUG_ON(res.sr_bits != 1);
49961
49962@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
49963 cluster_start,
49964 num_clusters);
49965 if (!status)
49966- atomic_inc(&osb->alloc_stats.local_data);
49967+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
49968 } else {
49969 if (min_clusters > (osb->bitmap_cpg - 1)) {
49970 /* The only paths asking for contiguousness
49971@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
49972 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
49973 res.sr_bg_blkno,
49974 res.sr_bit_offset);
49975- atomic_inc(&osb->alloc_stats.bitmap_data);
49976+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
49977 *num_clusters = res.sr_bits;
49978 }
49979 }
49980diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
49981index 0e91ec2..f4b3fc6 100644
49982--- a/fs/ocfs2/super.c
49983+++ b/fs/ocfs2/super.c
49984@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
49985 "%10s => GlobalAllocs: %d LocalAllocs: %d "
49986 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
49987 "Stats",
49988- atomic_read(&osb->alloc_stats.bitmap_data),
49989- atomic_read(&osb->alloc_stats.local_data),
49990- atomic_read(&osb->alloc_stats.bg_allocs),
49991- atomic_read(&osb->alloc_stats.moves),
49992- atomic_read(&osb->alloc_stats.bg_extends));
49993+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
49994+ atomic_read_unchecked(&osb->alloc_stats.local_data),
49995+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
49996+ atomic_read_unchecked(&osb->alloc_stats.moves),
49997+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
49998
49999 out += snprintf(buf + out, len - out,
50000 "%10s => State: %u Descriptor: %llu Size: %u bits "
50001@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
50002 spin_lock_init(&osb->osb_xattr_lock);
50003 ocfs2_init_steal_slots(osb);
50004
50005- atomic_set(&osb->alloc_stats.moves, 0);
50006- atomic_set(&osb->alloc_stats.local_data, 0);
50007- atomic_set(&osb->alloc_stats.bitmap_data, 0);
50008- atomic_set(&osb->alloc_stats.bg_allocs, 0);
50009- atomic_set(&osb->alloc_stats.bg_extends, 0);
50010+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
50011+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
50012+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
50013+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
50014+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
50015
50016 /* Copy the blockcheck stats from the superblock probe */
50017 osb->osb_ecc_stats = *stats;
50018diff --git a/fs/open.c b/fs/open.c
50019index 59071f5..c6229a0 100644
50020--- a/fs/open.c
50021+++ b/fs/open.c
50022@@ -31,6 +31,8 @@
50023 #include <linux/ima.h>
50024 #include <linux/dnotify.h>
50025
50026+#define CREATE_TRACE_POINTS
50027+#include <trace/events/fs.h>
50028 #include "internal.h"
50029
50030 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
50031@@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
50032 error = locks_verify_truncate(inode, NULL, length);
50033 if (!error)
50034 error = security_path_truncate(&path);
50035+
50036+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
50037+ error = -EACCES;
50038+
50039 if (!error)
50040 error = do_truncate(path.dentry, length, 0, NULL);
50041
50042@@ -362,6 +368,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
50043 if (__mnt_is_readonly(path.mnt))
50044 res = -EROFS;
50045
50046+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
50047+ res = -EACCES;
50048+
50049 out_path_release:
50050 path_put(&path);
50051 out:
50052@@ -388,6 +397,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
50053 if (error)
50054 goto dput_and_out;
50055
50056+ gr_log_chdir(path.dentry, path.mnt);
50057+
50058 set_fs_pwd(current->fs, &path);
50059
50060 dput_and_out:
50061@@ -413,6 +424,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
50062 goto out_putf;
50063
50064 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
50065+
50066+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
50067+ error = -EPERM;
50068+
50069+ if (!error)
50070+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
50071+
50072 if (!error)
50073 set_fs_pwd(current->fs, &f.file->f_path);
50074 out_putf:
50075@@ -441,7 +459,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
50076 if (error)
50077 goto dput_and_out;
50078
50079+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
50080+ goto dput_and_out;
50081+
50082 set_fs_root(current->fs, &path);
50083+
50084+ gr_handle_chroot_chdir(&path);
50085+
50086 error = 0;
50087 dput_and_out:
50088 path_put(&path);
50089@@ -459,6 +483,16 @@ static int chmod_common(struct path *path, umode_t mode)
50090 if (error)
50091 return error;
50092 mutex_lock(&inode->i_mutex);
50093+
50094+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
50095+ error = -EACCES;
50096+ goto out_unlock;
50097+ }
50098+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
50099+ error = -EACCES;
50100+ goto out_unlock;
50101+ }
50102+
50103 error = security_path_chmod(path, mode);
50104 if (error)
50105 goto out_unlock;
50106@@ -514,6 +548,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
50107 uid = make_kuid(current_user_ns(), user);
50108 gid = make_kgid(current_user_ns(), group);
50109
50110+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
50111+ return -EACCES;
50112+
50113 newattrs.ia_valid = ATTR_CTIME;
50114 if (user != (uid_t) -1) {
50115 if (!uid_valid(uid))
50116@@ -925,6 +962,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
50117 } else {
50118 fsnotify_open(f);
50119 fd_install(fd, f);
50120+ trace_do_sys_open(tmp->name, flags, mode);
50121 }
50122 }
50123 putname(tmp);
50124diff --git a/fs/pipe.c b/fs/pipe.c
50125index bd3479d..fb92c4d 100644
50126--- a/fs/pipe.c
50127+++ b/fs/pipe.c
50128@@ -438,9 +438,9 @@ redo:
50129 }
50130 if (bufs) /* More to do? */
50131 continue;
50132- if (!pipe->writers)
50133+ if (!atomic_read(&pipe->writers))
50134 break;
50135- if (!pipe->waiting_writers) {
50136+ if (!atomic_read(&pipe->waiting_writers)) {
50137 /* syscall merging: Usually we must not sleep
50138 * if O_NONBLOCK is set, or if we got some data.
50139 * But if a writer sleeps in kernel space, then
50140@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
50141 mutex_lock(&inode->i_mutex);
50142 pipe = inode->i_pipe;
50143
50144- if (!pipe->readers) {
50145+ if (!atomic_read(&pipe->readers)) {
50146 send_sig(SIGPIPE, current, 0);
50147 ret = -EPIPE;
50148 goto out;
50149@@ -553,7 +553,7 @@ redo1:
50150 for (;;) {
50151 int bufs;
50152
50153- if (!pipe->readers) {
50154+ if (!atomic_read(&pipe->readers)) {
50155 send_sig(SIGPIPE, current, 0);
50156 if (!ret)
50157 ret = -EPIPE;
50158@@ -644,9 +644,9 @@ redo2:
50159 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
50160 do_wakeup = 0;
50161 }
50162- pipe->waiting_writers++;
50163+ atomic_inc(&pipe->waiting_writers);
50164 pipe_wait(pipe);
50165- pipe->waiting_writers--;
50166+ atomic_dec(&pipe->waiting_writers);
50167 }
50168 out:
50169 mutex_unlock(&inode->i_mutex);
50170@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
50171 mask = 0;
50172 if (filp->f_mode & FMODE_READ) {
50173 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
50174- if (!pipe->writers && filp->f_version != pipe->w_counter)
50175+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
50176 mask |= POLLHUP;
50177 }
50178
50179@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
50180 * Most Unices do not set POLLERR for FIFOs but on Linux they
50181 * behave exactly like pipes for poll().
50182 */
50183- if (!pipe->readers)
50184+ if (!atomic_read(&pipe->readers))
50185 mask |= POLLERR;
50186 }
50187
50188@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
50189
50190 mutex_lock(&inode->i_mutex);
50191 pipe = inode->i_pipe;
50192- pipe->readers -= decr;
50193- pipe->writers -= decw;
50194+ atomic_sub(decr, &pipe->readers);
50195+ atomic_sub(decw, &pipe->writers);
50196
50197- if (!pipe->readers && !pipe->writers) {
50198+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
50199 free_pipe_info(inode);
50200 } else {
50201 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
50202@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
50203
50204 if (inode->i_pipe) {
50205 ret = 0;
50206- inode->i_pipe->readers++;
50207+ atomic_inc(&inode->i_pipe->readers);
50208 }
50209
50210 mutex_unlock(&inode->i_mutex);
50211@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
50212
50213 if (inode->i_pipe) {
50214 ret = 0;
50215- inode->i_pipe->writers++;
50216+ atomic_inc(&inode->i_pipe->writers);
50217 }
50218
50219 mutex_unlock(&inode->i_mutex);
50220@@ -868,9 +868,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
50221 if (inode->i_pipe) {
50222 ret = 0;
50223 if (filp->f_mode & FMODE_READ)
50224- inode->i_pipe->readers++;
50225+ atomic_inc(&inode->i_pipe->readers);
50226 if (filp->f_mode & FMODE_WRITE)
50227- inode->i_pipe->writers++;
50228+ atomic_inc(&inode->i_pipe->writers);
50229 }
50230
50231 mutex_unlock(&inode->i_mutex);
50232@@ -962,7 +962,7 @@ void free_pipe_info(struct inode *inode)
50233 inode->i_pipe = NULL;
50234 }
50235
50236-static struct vfsmount *pipe_mnt __read_mostly;
50237+struct vfsmount *pipe_mnt __read_mostly;
50238
50239 /*
50240 * pipefs_dname() is called from d_path().
50241@@ -992,7 +992,8 @@ static struct inode * get_pipe_inode(void)
50242 goto fail_iput;
50243 inode->i_pipe = pipe;
50244
50245- pipe->readers = pipe->writers = 1;
50246+ atomic_set(&pipe->readers, 1);
50247+ atomic_set(&pipe->writers, 1);
50248 inode->i_fop = &rdwr_pipefifo_fops;
50249
50250 /*
50251diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
50252index 15af622..0e9f4467 100644
50253--- a/fs/proc/Kconfig
50254+++ b/fs/proc/Kconfig
50255@@ -30,12 +30,12 @@ config PROC_FS
50256
50257 config PROC_KCORE
50258 bool "/proc/kcore support" if !ARM
50259- depends on PROC_FS && MMU
50260+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
50261
50262 config PROC_VMCORE
50263 bool "/proc/vmcore support"
50264- depends on PROC_FS && CRASH_DUMP
50265- default y
50266+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
50267+ default n
50268 help
50269 Exports the dump image of crashed kernel in ELF format.
50270
50271@@ -59,8 +59,8 @@ config PROC_SYSCTL
50272 limited in memory.
50273
50274 config PROC_PAGE_MONITOR
50275- default y
50276- depends on PROC_FS && MMU
50277+ default n
50278+ depends on PROC_FS && MMU && !GRKERNSEC
50279 bool "Enable /proc page monitoring" if EXPERT
50280 help
50281 Various /proc files exist to monitor process memory utilization:
50282diff --git a/fs/proc/array.c b/fs/proc/array.c
50283index bd31e02..15cae71 100644
50284--- a/fs/proc/array.c
50285+++ b/fs/proc/array.c
50286@@ -60,6 +60,7 @@
50287 #include <linux/tty.h>
50288 #include <linux/string.h>
50289 #include <linux/mman.h>
50290+#include <linux/grsecurity.h>
50291 #include <linux/proc_fs.h>
50292 #include <linux/ioport.h>
50293 #include <linux/uaccess.h>
50294@@ -346,6 +347,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
50295 seq_putc(m, '\n');
50296 }
50297
50298+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50299+static inline void task_pax(struct seq_file *m, struct task_struct *p)
50300+{
50301+ if (p->mm)
50302+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
50303+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
50304+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
50305+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
50306+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
50307+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
50308+ else
50309+ seq_printf(m, "PaX:\t-----\n");
50310+}
50311+#endif
50312+
50313 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
50314 struct pid *pid, struct task_struct *task)
50315 {
50316@@ -363,9 +379,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
50317 task_cpus_allowed(m, task);
50318 cpuset_task_status_allowed(m, task);
50319 task_context_switch_counts(m, task);
50320+
50321+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50322+ task_pax(m, task);
50323+#endif
50324+
50325+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
50326+ task_grsec_rbac(m, task);
50327+#endif
50328+
50329 return 0;
50330 }
50331
50332+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50333+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
50334+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
50335+ _mm->pax_flags & MF_PAX_SEGMEXEC))
50336+#endif
50337+
50338 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50339 struct pid *pid, struct task_struct *task, int whole)
50340 {
50341@@ -387,6 +418,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50342 char tcomm[sizeof(task->comm)];
50343 unsigned long flags;
50344
50345+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50346+ if (current->exec_id != m->exec_id) {
50347+ gr_log_badprocpid("stat");
50348+ return 0;
50349+ }
50350+#endif
50351+
50352 state = *get_task_state(task);
50353 vsize = eip = esp = 0;
50354 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
50355@@ -458,6 +496,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50356 gtime = task->gtime;
50357 }
50358
50359+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50360+ if (PAX_RAND_FLAGS(mm)) {
50361+ eip = 0;
50362+ esp = 0;
50363+ wchan = 0;
50364+ }
50365+#endif
50366+#ifdef CONFIG_GRKERNSEC_HIDESYM
50367+ wchan = 0;
50368+ eip =0;
50369+ esp =0;
50370+#endif
50371+
50372 /* scale priority and nice values from timeslices to -20..20 */
50373 /* to make it look like a "normal" Unix priority/nice value */
50374 priority = task_prio(task);
50375@@ -494,9 +545,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50376 seq_put_decimal_ull(m, ' ', vsize);
50377 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
50378 seq_put_decimal_ull(m, ' ', rsslim);
50379+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50380+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
50381+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
50382+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
50383+#else
50384 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
50385 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
50386 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
50387+#endif
50388 seq_put_decimal_ull(m, ' ', esp);
50389 seq_put_decimal_ull(m, ' ', eip);
50390 /* The signal information here is obsolete.
50391@@ -518,7 +575,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50392 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
50393 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
50394
50395- if (mm && permitted) {
50396+ if (mm && permitted
50397+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50398+ && !PAX_RAND_FLAGS(mm)
50399+#endif
50400+ ) {
50401 seq_put_decimal_ull(m, ' ', mm->start_data);
50402 seq_put_decimal_ull(m, ' ', mm->end_data);
50403 seq_put_decimal_ull(m, ' ', mm->start_brk);
50404@@ -556,8 +617,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
50405 struct pid *pid, struct task_struct *task)
50406 {
50407 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
50408- struct mm_struct *mm = get_task_mm(task);
50409+ struct mm_struct *mm;
50410
50411+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50412+ if (current->exec_id != m->exec_id) {
50413+ gr_log_badprocpid("statm");
50414+ return 0;
50415+ }
50416+#endif
50417+ mm = get_task_mm(task);
50418 if (mm) {
50419 size = task_statm(mm, &shared, &text, &data, &resident);
50420 mmput(mm);
50421@@ -580,6 +648,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
50422 return 0;
50423 }
50424
50425+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
50426+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
50427+{
50428+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
50429+}
50430+#endif
50431+
50432 #ifdef CONFIG_CHECKPOINT_RESTORE
50433 static struct pid *
50434 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
50435diff --git a/fs/proc/base.c b/fs/proc/base.c
50436index 9e28356..c485b3c 100644
50437--- a/fs/proc/base.c
50438+++ b/fs/proc/base.c
50439@@ -111,6 +111,14 @@ struct pid_entry {
50440 union proc_op op;
50441 };
50442
50443+struct getdents_callback {
50444+ struct linux_dirent __user * current_dir;
50445+ struct linux_dirent __user * previous;
50446+ struct file * file;
50447+ int count;
50448+ int error;
50449+};
50450+
50451 #define NOD(NAME, MODE, IOP, FOP, OP) { \
50452 .name = (NAME), \
50453 .len = sizeof(NAME) - 1, \
50454@@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
50455 if (!mm->arg_end)
50456 goto out_mm; /* Shh! No looking before we're done */
50457
50458+ if (gr_acl_handle_procpidmem(task))
50459+ goto out_mm;
50460+
50461 len = mm->arg_end - mm->arg_start;
50462
50463 if (len > PAGE_SIZE)
50464@@ -235,12 +246,28 @@ out:
50465 return res;
50466 }
50467
50468+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50469+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
50470+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
50471+ _mm->pax_flags & MF_PAX_SEGMEXEC))
50472+#endif
50473+
50474 static int proc_pid_auxv(struct task_struct *task, char *buffer)
50475 {
50476 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
50477 int res = PTR_ERR(mm);
50478 if (mm && !IS_ERR(mm)) {
50479 unsigned int nwords = 0;
50480+
50481+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50482+ /* allow if we're currently ptracing this task */
50483+ if (PAX_RAND_FLAGS(mm) &&
50484+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
50485+ mmput(mm);
50486+ return 0;
50487+ }
50488+#endif
50489+
50490 do {
50491 nwords += 2;
50492 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
50493@@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
50494 }
50495
50496
50497-#ifdef CONFIG_KALLSYMS
50498+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50499 /*
50500 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
50501 * Returns the resolved symbol. If that fails, simply return the address.
50502@@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
50503 mutex_unlock(&task->signal->cred_guard_mutex);
50504 }
50505
50506-#ifdef CONFIG_STACKTRACE
50507+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50508
50509 #define MAX_STACK_TRACE_DEPTH 64
50510
50511@@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
50512 return count;
50513 }
50514
50515-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
50516+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50517 static int proc_pid_syscall(struct task_struct *task, char *buffer)
50518 {
50519 long nr;
50520@@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
50521 /************************************************************************/
50522
50523 /* permission checks */
50524-static int proc_fd_access_allowed(struct inode *inode)
50525+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
50526 {
50527 struct task_struct *task;
50528 int allowed = 0;
50529@@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
50530 */
50531 task = get_proc_task(inode);
50532 if (task) {
50533- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
50534+ if (log)
50535+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
50536+ else
50537+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
50538 put_task_struct(task);
50539 }
50540 return allowed;
50541@@ -562,10 +592,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
50542 struct task_struct *task,
50543 int hide_pid_min)
50544 {
50545+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50546+ return false;
50547+
50548+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50549+ rcu_read_lock();
50550+ {
50551+ const struct cred *tmpcred = current_cred();
50552+ const struct cred *cred = __task_cred(task);
50553+
50554+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
50555+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50556+ || in_group_p(grsec_proc_gid)
50557+#endif
50558+ ) {
50559+ rcu_read_unlock();
50560+ return true;
50561+ }
50562+ }
50563+ rcu_read_unlock();
50564+
50565+ if (!pid->hide_pid)
50566+ return false;
50567+#endif
50568+
50569 if (pid->hide_pid < hide_pid_min)
50570 return true;
50571 if (in_group_p(pid->pid_gid))
50572 return true;
50573+
50574 return ptrace_may_access(task, PTRACE_MODE_READ);
50575 }
50576
50577@@ -583,7 +638,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
50578 put_task_struct(task);
50579
50580 if (!has_perms) {
50581+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50582+ {
50583+#else
50584 if (pid->hide_pid == 2) {
50585+#endif
50586 /*
50587 * Let's make getdents(), stat(), and open()
50588 * consistent with each other. If a process
50589@@ -681,6 +740,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
50590 if (!task)
50591 return -ESRCH;
50592
50593+ if (gr_acl_handle_procpidmem(task)) {
50594+ put_task_struct(task);
50595+ return -EPERM;
50596+ }
50597+
50598 mm = mm_access(task, mode);
50599 put_task_struct(task);
50600
50601@@ -696,6 +760,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
50602
50603 file->private_data = mm;
50604
50605+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50606+ file->f_version = current->exec_id;
50607+#endif
50608+
50609 return 0;
50610 }
50611
50612@@ -717,6 +785,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
50613 ssize_t copied;
50614 char *page;
50615
50616+#ifdef CONFIG_GRKERNSEC
50617+ if (write)
50618+ return -EPERM;
50619+#endif
50620+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50621+ if (file->f_version != current->exec_id) {
50622+ gr_log_badprocpid("mem");
50623+ return 0;
50624+ }
50625+#endif
50626+
50627 if (!mm)
50628 return 0;
50629
50630@@ -821,6 +900,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
50631 if (!mm)
50632 return 0;
50633
50634+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50635+ if (file->f_version != current->exec_id) {
50636+ gr_log_badprocpid("environ");
50637+ return 0;
50638+ }
50639+#endif
50640+
50641 page = (char *)__get_free_page(GFP_TEMPORARY);
50642 if (!page)
50643 return -ENOMEM;
50644@@ -1436,7 +1522,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
50645 int error = -EACCES;
50646
50647 /* Are we allowed to snoop on the tasks file descriptors? */
50648- if (!proc_fd_access_allowed(inode))
50649+ if (!proc_fd_access_allowed(inode, 0))
50650 goto out;
50651
50652 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
50653@@ -1480,8 +1566,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
50654 struct path path;
50655
50656 /* Are we allowed to snoop on the tasks file descriptors? */
50657- if (!proc_fd_access_allowed(inode))
50658- goto out;
50659+ /* logging this is needed for learning on chromium to work properly,
50660+ but we don't want to flood the logs from 'ps' which does a readlink
50661+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
50662+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
50663+ */
50664+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
50665+ if (!proc_fd_access_allowed(inode,0))
50666+ goto out;
50667+ } else {
50668+ if (!proc_fd_access_allowed(inode,1))
50669+ goto out;
50670+ }
50671
50672 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
50673 if (error)
50674@@ -1531,7 +1627,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
50675 rcu_read_lock();
50676 cred = __task_cred(task);
50677 inode->i_uid = cred->euid;
50678+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50679+ inode->i_gid = grsec_proc_gid;
50680+#else
50681 inode->i_gid = cred->egid;
50682+#endif
50683 rcu_read_unlock();
50684 }
50685 security_task_to_inode(task, inode);
50686@@ -1567,10 +1667,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
50687 return -ENOENT;
50688 }
50689 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
50690+#ifdef CONFIG_GRKERNSEC_PROC_USER
50691+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
50692+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50693+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
50694+#endif
50695 task_dumpable(task)) {
50696 cred = __task_cred(task);
50697 stat->uid = cred->euid;
50698+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50699+ stat->gid = grsec_proc_gid;
50700+#else
50701 stat->gid = cred->egid;
50702+#endif
50703 }
50704 }
50705 rcu_read_unlock();
50706@@ -1608,11 +1717,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
50707
50708 if (task) {
50709 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
50710+#ifdef CONFIG_GRKERNSEC_PROC_USER
50711+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
50712+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50713+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
50714+#endif
50715 task_dumpable(task)) {
50716 rcu_read_lock();
50717 cred = __task_cred(task);
50718 inode->i_uid = cred->euid;
50719+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50720+ inode->i_gid = grsec_proc_gid;
50721+#else
50722 inode->i_gid = cred->egid;
50723+#endif
50724 rcu_read_unlock();
50725 } else {
50726 inode->i_uid = GLOBAL_ROOT_UID;
50727@@ -2065,6 +2183,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
50728 if (!task)
50729 goto out_no_task;
50730
50731+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50732+ goto out;
50733+
50734 /*
50735 * Yes, it does not scale. And it should not. Don't add
50736 * new entries into /proc/<tgid>/ without very good reasons.
50737@@ -2109,6 +2230,9 @@ static int proc_pident_readdir(struct file *filp,
50738 if (!task)
50739 goto out_no_task;
50740
50741+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50742+ goto out;
50743+
50744 ret = 0;
50745 i = filp->f_pos;
50746 switch (i) {
50747@@ -2380,7 +2504,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
50748 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
50749 void *cookie)
50750 {
50751- char *s = nd_get_link(nd);
50752+ const char *s = nd_get_link(nd);
50753 if (!IS_ERR(s))
50754 kfree(s);
50755 }
50756@@ -2662,7 +2786,7 @@ static const struct pid_entry tgid_base_stuff[] = {
50757 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
50758 #endif
50759 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
50760-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
50761+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50762 INF("syscall", S_IRUGO, proc_pid_syscall),
50763 #endif
50764 INF("cmdline", S_IRUGO, proc_pid_cmdline),
50765@@ -2687,10 +2811,10 @@ static const struct pid_entry tgid_base_stuff[] = {
50766 #ifdef CONFIG_SECURITY
50767 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
50768 #endif
50769-#ifdef CONFIG_KALLSYMS
50770+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50771 INF("wchan", S_IRUGO, proc_pid_wchan),
50772 #endif
50773-#ifdef CONFIG_STACKTRACE
50774+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50775 ONE("stack", S_IRUGO, proc_pid_stack),
50776 #endif
50777 #ifdef CONFIG_SCHEDSTATS
50778@@ -2724,6 +2848,9 @@ static const struct pid_entry tgid_base_stuff[] = {
50779 #ifdef CONFIG_HARDWALL
50780 INF("hardwall", S_IRUGO, proc_pid_hardwall),
50781 #endif
50782+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
50783+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
50784+#endif
50785 #ifdef CONFIG_USER_NS
50786 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
50787 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
50788@@ -2856,7 +2983,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
50789 if (!inode)
50790 goto out;
50791
50792+#ifdef CONFIG_GRKERNSEC_PROC_USER
50793+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
50794+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50795+ inode->i_gid = grsec_proc_gid;
50796+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
50797+#else
50798 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
50799+#endif
50800 inode->i_op = &proc_tgid_base_inode_operations;
50801 inode->i_fop = &proc_tgid_base_operations;
50802 inode->i_flags|=S_IMMUTABLE;
50803@@ -2898,7 +3032,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
50804 if (!task)
50805 goto out;
50806
50807+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50808+ goto out_put_task;
50809+
50810 result = proc_pid_instantiate(dir, dentry, task, NULL);
50811+out_put_task:
50812 put_task_struct(task);
50813 out:
50814 return result;
50815@@ -2961,6 +3099,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
50816 static int fake_filldir(void *buf, const char *name, int namelen,
50817 loff_t offset, u64 ino, unsigned d_type)
50818 {
50819+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
50820+ __buf->error = -EINVAL;
50821 return 0;
50822 }
50823
50824@@ -3027,7 +3167,7 @@ static const struct pid_entry tid_base_stuff[] = {
50825 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
50826 #endif
50827 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
50828-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
50829+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50830 INF("syscall", S_IRUGO, proc_pid_syscall),
50831 #endif
50832 INF("cmdline", S_IRUGO, proc_pid_cmdline),
50833@@ -3054,10 +3194,10 @@ static const struct pid_entry tid_base_stuff[] = {
50834 #ifdef CONFIG_SECURITY
50835 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
50836 #endif
50837-#ifdef CONFIG_KALLSYMS
50838+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50839 INF("wchan", S_IRUGO, proc_pid_wchan),
50840 #endif
50841-#ifdef CONFIG_STACKTRACE
50842+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50843 ONE("stack", S_IRUGO, proc_pid_stack),
50844 #endif
50845 #ifdef CONFIG_SCHEDSTATS
50846diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
50847index 82676e3..5f8518a 100644
50848--- a/fs/proc/cmdline.c
50849+++ b/fs/proc/cmdline.c
50850@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
50851
50852 static int __init proc_cmdline_init(void)
50853 {
50854+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50855+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
50856+#else
50857 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
50858+#endif
50859 return 0;
50860 }
50861 module_init(proc_cmdline_init);
50862diff --git a/fs/proc/devices.c b/fs/proc/devices.c
50863index b143471..bb105e5 100644
50864--- a/fs/proc/devices.c
50865+++ b/fs/proc/devices.c
50866@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
50867
50868 static int __init proc_devices_init(void)
50869 {
50870+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50871+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
50872+#else
50873 proc_create("devices", 0, NULL, &proc_devinfo_operations);
50874+#endif
50875 return 0;
50876 }
50877 module_init(proc_devices_init);
50878diff --git a/fs/proc/fd.c b/fs/proc/fd.c
50879index f28a875..c467953 100644
50880--- a/fs/proc/fd.c
50881+++ b/fs/proc/fd.c
50882@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
50883 if (!task)
50884 return -ENOENT;
50885
50886- files = get_files_struct(task);
50887+ if (!gr_acl_handle_procpidmem(task))
50888+ files = get_files_struct(task);
50889 put_task_struct(task);
50890
50891 if (files) {
50892@@ -300,11 +301,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
50893 */
50894 int proc_fd_permission(struct inode *inode, int mask)
50895 {
50896+ struct task_struct *task;
50897 int rv = generic_permission(inode, mask);
50898- if (rv == 0)
50899- return 0;
50900+
50901 if (task_pid(current) == proc_pid(inode))
50902 rv = 0;
50903+
50904+ task = get_proc_task(inode);
50905+ if (task == NULL)
50906+ return rv;
50907+
50908+ if (gr_acl_handle_procpidmem(task))
50909+ rv = -EACCES;
50910+
50911+ put_task_struct(task);
50912+
50913 return rv;
50914 }
50915
50916diff --git a/fs/proc/inode.c b/fs/proc/inode.c
50917index 3b22bbd..895b58c 100644
50918--- a/fs/proc/inode.c
50919+++ b/fs/proc/inode.c
50920@@ -21,11 +21,17 @@
50921 #include <linux/seq_file.h>
50922 #include <linux/slab.h>
50923 #include <linux/mount.h>
50924+#include <linux/grsecurity.h>
50925
50926 #include <asm/uaccess.h>
50927
50928 #include "internal.h"
50929
50930+#ifdef CONFIG_PROC_SYSCTL
50931+extern const struct inode_operations proc_sys_inode_operations;
50932+extern const struct inode_operations proc_sys_dir_operations;
50933+#endif
50934+
50935 static void proc_evict_inode(struct inode *inode)
50936 {
50937 struct proc_dir_entry *de;
50938@@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
50939 ns_ops = PROC_I(inode)->ns_ops;
50940 if (ns_ops && ns_ops->put)
50941 ns_ops->put(PROC_I(inode)->ns);
50942+
50943+#ifdef CONFIG_PROC_SYSCTL
50944+ if (inode->i_op == &proc_sys_inode_operations ||
50945+ inode->i_op == &proc_sys_dir_operations)
50946+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
50947+#endif
50948+
50949 }
50950
50951 static struct kmem_cache * proc_inode_cachep;
50952@@ -455,7 +468,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
50953 if (de->mode) {
50954 inode->i_mode = de->mode;
50955 inode->i_uid = de->uid;
50956+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50957+ inode->i_gid = grsec_proc_gid;
50958+#else
50959 inode->i_gid = de->gid;
50960+#endif
50961 }
50962 if (de->size)
50963 inode->i_size = de->size;
50964diff --git a/fs/proc/internal.h b/fs/proc/internal.h
50965index 43973b0..a20e704 100644
50966--- a/fs/proc/internal.h
50967+++ b/fs/proc/internal.h
50968@@ -54,6 +54,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
50969 struct pid *pid, struct task_struct *task);
50970 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
50971 struct pid *pid, struct task_struct *task);
50972+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
50973+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
50974+#endif
50975 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
50976
50977 extern const struct file_operations proc_tid_children_operations;
50978diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
50979index 86c67ee..cdca321 100644
50980--- a/fs/proc/kcore.c
50981+++ b/fs/proc/kcore.c
50982@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
50983 * the addresses in the elf_phdr on our list.
50984 */
50985 start = kc_offset_to_vaddr(*fpos - elf_buflen);
50986- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
50987+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
50988+ if (tsz > buflen)
50989 tsz = buflen;
50990-
50991+
50992 while (buflen) {
50993 struct kcore_list *m;
50994
50995@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
50996 kfree(elf_buf);
50997 } else {
50998 if (kern_addr_valid(start)) {
50999- unsigned long n;
51000+ char *elf_buf;
51001+ mm_segment_t oldfs;
51002
51003- n = copy_to_user(buffer, (char *)start, tsz);
51004- /*
51005- * We cannot distinguish between fault on source
51006- * and fault on destination. When this happens
51007- * we clear too and hope it will trigger the
51008- * EFAULT again.
51009- */
51010- if (n) {
51011- if (clear_user(buffer + tsz - n,
51012- n))
51013+ elf_buf = kmalloc(tsz, GFP_KERNEL);
51014+ if (!elf_buf)
51015+ return -ENOMEM;
51016+ oldfs = get_fs();
51017+ set_fs(KERNEL_DS);
51018+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
51019+ set_fs(oldfs);
51020+ if (copy_to_user(buffer, elf_buf, tsz)) {
51021+ kfree(elf_buf);
51022 return -EFAULT;
51023+ }
51024 }
51025+ set_fs(oldfs);
51026+ kfree(elf_buf);
51027 } else {
51028 if (clear_user(buffer, tsz))
51029 return -EFAULT;
51030@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
51031
51032 static int open_kcore(struct inode *inode, struct file *filp)
51033 {
51034+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
51035+ return -EPERM;
51036+#endif
51037 if (!capable(CAP_SYS_RAWIO))
51038 return -EPERM;
51039 if (kcore_need_update)
51040diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
51041index 80e4645..53e5fcf 100644
51042--- a/fs/proc/meminfo.c
51043+++ b/fs/proc/meminfo.c
51044@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
51045 vmi.used >> 10,
51046 vmi.largest_chunk >> 10
51047 #ifdef CONFIG_MEMORY_FAILURE
51048- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
51049+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
51050 #endif
51051 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
51052 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
51053diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
51054index b1822dd..df622cb 100644
51055--- a/fs/proc/nommu.c
51056+++ b/fs/proc/nommu.c
51057@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
51058 if (len < 1)
51059 len = 1;
51060 seq_printf(m, "%*c", len, ' ');
51061- seq_path(m, &file->f_path, "");
51062+ seq_path(m, &file->f_path, "\n\\");
51063 }
51064
51065 seq_putc(m, '\n');
51066diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
51067index fe72cd0..cb9b67d 100644
51068--- a/fs/proc/proc_net.c
51069+++ b/fs/proc/proc_net.c
51070@@ -23,6 +23,7 @@
51071 #include <linux/nsproxy.h>
51072 #include <net/net_namespace.h>
51073 #include <linux/seq_file.h>
51074+#include <linux/grsecurity.h>
51075
51076 #include "internal.h"
51077
51078@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
51079 struct task_struct *task;
51080 struct nsproxy *ns;
51081 struct net *net = NULL;
51082+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51083+ const struct cred *cred = current_cred();
51084+#endif
51085+
51086+#ifdef CONFIG_GRKERNSEC_PROC_USER
51087+ if (cred->fsuid)
51088+ return net;
51089+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51090+ if (cred->fsuid && !in_group_p(grsec_proc_gid))
51091+ return net;
51092+#endif
51093
51094 rcu_read_lock();
51095 task = pid_task(proc_pid(dir), PIDTYPE_PID);
51096diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
51097index a781bdf..6665284 100644
51098--- a/fs/proc/proc_sysctl.c
51099+++ b/fs/proc/proc_sysctl.c
51100@@ -12,11 +12,15 @@
51101 #include <linux/module.h>
51102 #include "internal.h"
51103
51104+extern int gr_handle_chroot_sysctl(const int op);
51105+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
51106+ const int op);
51107+
51108 static const struct dentry_operations proc_sys_dentry_operations;
51109 static const struct file_operations proc_sys_file_operations;
51110-static const struct inode_operations proc_sys_inode_operations;
51111+const struct inode_operations proc_sys_inode_operations;
51112 static const struct file_operations proc_sys_dir_file_operations;
51113-static const struct inode_operations proc_sys_dir_operations;
51114+const struct inode_operations proc_sys_dir_operations;
51115
51116 void proc_sys_poll_notify(struct ctl_table_poll *poll)
51117 {
51118@@ -465,6 +469,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
51119
51120 err = NULL;
51121 d_set_d_op(dentry, &proc_sys_dentry_operations);
51122+
51123+ gr_handle_proc_create(dentry, inode);
51124+
51125 d_add(dentry, inode);
51126
51127 out:
51128@@ -480,18 +487,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
51129 struct inode *inode = filp->f_path.dentry->d_inode;
51130 struct ctl_table_header *head = grab_header(inode);
51131 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
51132+ int op = write ? MAY_WRITE : MAY_READ;
51133 ssize_t error;
51134 size_t res;
51135
51136 if (IS_ERR(head))
51137 return PTR_ERR(head);
51138
51139+
51140 /*
51141 * At this point we know that the sysctl was not unregistered
51142 * and won't be until we finish.
51143 */
51144 error = -EPERM;
51145- if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
51146+ if (sysctl_perm(head->root, table, op))
51147 goto out;
51148
51149 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
51150@@ -499,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
51151 if (!table->proc_handler)
51152 goto out;
51153
51154+#ifdef CONFIG_GRKERNSEC
51155+ error = -EPERM;
51156+ if (gr_handle_chroot_sysctl(op))
51157+ goto out;
51158+ dget(filp->f_path.dentry);
51159+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
51160+ dput(filp->f_path.dentry);
51161+ goto out;
51162+ }
51163+ dput(filp->f_path.dentry);
51164+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
51165+ goto out;
51166+ if (write && !capable(CAP_SYS_ADMIN))
51167+ goto out;
51168+#endif
51169+
51170 /* careful: calling conventions are nasty here */
51171 res = count;
51172 error = table->proc_handler(table, write, buf, &res, ppos);
51173@@ -596,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
51174 return -ENOMEM;
51175 } else {
51176 d_set_d_op(child, &proc_sys_dentry_operations);
51177+
51178+ gr_handle_proc_create(child, inode);
51179+
51180 d_add(child, inode);
51181 }
51182 } else {
51183@@ -639,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
51184 if ((*pos)++ < file->f_pos)
51185 return 0;
51186
51187+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
51188+ return 0;
51189+
51190 if (unlikely(S_ISLNK(table->mode)))
51191 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
51192 else
51193@@ -756,6 +787,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
51194 if (IS_ERR(head))
51195 return PTR_ERR(head);
51196
51197+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
51198+ return -ENOENT;
51199+
51200 generic_fillattr(inode, stat);
51201 if (table)
51202 stat->mode = (stat->mode & S_IFMT) | table->mode;
51203@@ -778,13 +812,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
51204 .llseek = generic_file_llseek,
51205 };
51206
51207-static const struct inode_operations proc_sys_inode_operations = {
51208+const struct inode_operations proc_sys_inode_operations = {
51209 .permission = proc_sys_permission,
51210 .setattr = proc_sys_setattr,
51211 .getattr = proc_sys_getattr,
51212 };
51213
51214-static const struct inode_operations proc_sys_dir_operations = {
51215+const struct inode_operations proc_sys_dir_operations = {
51216 .lookup = proc_sys_lookup,
51217 .permission = proc_sys_permission,
51218 .setattr = proc_sys_setattr,
51219diff --git a/fs/proc/root.c b/fs/proc/root.c
51220index 9889a92..2613b48 100644
51221--- a/fs/proc/root.c
51222+++ b/fs/proc/root.c
51223@@ -187,7 +187,15 @@ void __init proc_root_init(void)
51224 #ifdef CONFIG_PROC_DEVICETREE
51225 proc_device_tree_init();
51226 #endif
51227+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51228+#ifdef CONFIG_GRKERNSEC_PROC_USER
51229+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
51230+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51231+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51232+#endif
51233+#else
51234 proc_mkdir("bus", NULL);
51235+#endif
51236 proc_sys_init();
51237 }
51238
51239diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
51240index 90c63f9..e662cfc 100644
51241--- a/fs/proc/task_mmu.c
51242+++ b/fs/proc/task_mmu.c
51243@@ -11,12 +11,19 @@
51244 #include <linux/rmap.h>
51245 #include <linux/swap.h>
51246 #include <linux/swapops.h>
51247+#include <linux/grsecurity.h>
51248
51249 #include <asm/elf.h>
51250 #include <asm/uaccess.h>
51251 #include <asm/tlbflush.h>
51252 #include "internal.h"
51253
51254+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51255+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
51256+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
51257+ _mm->pax_flags & MF_PAX_SEGMEXEC))
51258+#endif
51259+
51260 void task_mem(struct seq_file *m, struct mm_struct *mm)
51261 {
51262 unsigned long data, text, lib, swap;
51263@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51264 "VmExe:\t%8lu kB\n"
51265 "VmLib:\t%8lu kB\n"
51266 "VmPTE:\t%8lu kB\n"
51267- "VmSwap:\t%8lu kB\n",
51268- hiwater_vm << (PAGE_SHIFT-10),
51269+ "VmSwap:\t%8lu kB\n"
51270+
51271+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
51272+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
51273+#endif
51274+
51275+ ,hiwater_vm << (PAGE_SHIFT-10),
51276 total_vm << (PAGE_SHIFT-10),
51277 mm->locked_vm << (PAGE_SHIFT-10),
51278 mm->pinned_vm << (PAGE_SHIFT-10),
51279@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51280 data << (PAGE_SHIFT-10),
51281 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
51282 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
51283- swap << (PAGE_SHIFT-10));
51284+ swap << (PAGE_SHIFT-10)
51285+
51286+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
51287+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51288+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
51289+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
51290+#else
51291+ , mm->context.user_cs_base
51292+ , mm->context.user_cs_limit
51293+#endif
51294+#endif
51295+
51296+ );
51297 }
51298
51299 unsigned long task_vsize(struct mm_struct *mm)
51300@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51301 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
51302 }
51303
51304- /* We don't show the stack guard page in /proc/maps */
51305+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51306+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
51307+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
51308+#else
51309 start = vma->vm_start;
51310- if (stack_guard_page_start(vma, start))
51311- start += PAGE_SIZE;
51312 end = vma->vm_end;
51313- if (stack_guard_page_end(vma, end))
51314- end -= PAGE_SIZE;
51315+#endif
51316
51317 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
51318 start,
51319@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51320 flags & VM_WRITE ? 'w' : '-',
51321 flags & VM_EXEC ? 'x' : '-',
51322 flags & VM_MAYSHARE ? 's' : 'p',
51323+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51324+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
51325+#else
51326 pgoff,
51327+#endif
51328 MAJOR(dev), MINOR(dev), ino, &len);
51329
51330 /*
51331@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51332 */
51333 if (file) {
51334 pad_len_spaces(m, len);
51335- seq_path(m, &file->f_path, "\n");
51336+ seq_path(m, &file->f_path, "\n\\");
51337 goto done;
51338 }
51339
51340@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51341 * Thread stack in /proc/PID/task/TID/maps or
51342 * the main process stack.
51343 */
51344- if (!is_pid || (vma->vm_start <= mm->start_stack &&
51345- vma->vm_end >= mm->start_stack)) {
51346+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
51347+ (vma->vm_start <= mm->start_stack &&
51348+ vma->vm_end >= mm->start_stack)) {
51349 name = "[stack]";
51350 } else {
51351 /* Thread stack in /proc/PID/maps */
51352@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
51353 struct proc_maps_private *priv = m->private;
51354 struct task_struct *task = priv->task;
51355
51356+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51357+ if (current->exec_id != m->exec_id) {
51358+ gr_log_badprocpid("maps");
51359+ return 0;
51360+ }
51361+#endif
51362+
51363 show_map_vma(m, vma, is_pid);
51364
51365 if (m->count < m->size) /* vma is copied successfully */
51366@@ -538,12 +574,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
51367 .private = &mss,
51368 };
51369
51370+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51371+ if (current->exec_id != m->exec_id) {
51372+ gr_log_badprocpid("smaps");
51373+ return 0;
51374+ }
51375+#endif
51376 memset(&mss, 0, sizeof mss);
51377- mss.vma = vma;
51378- /* mmap_sem is held in m_start */
51379- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
51380- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
51381-
51382+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51383+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
51384+#endif
51385+ mss.vma = vma;
51386+ /* mmap_sem is held in m_start */
51387+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
51388+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
51389+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51390+ }
51391+#endif
51392 show_map_vma(m, vma, is_pid);
51393
51394 seq_printf(m,
51395@@ -561,7 +608,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
51396 "KernelPageSize: %8lu kB\n"
51397 "MMUPageSize: %8lu kB\n"
51398 "Locked: %8lu kB\n",
51399+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51400+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
51401+#else
51402 (vma->vm_end - vma->vm_start) >> 10,
51403+#endif
51404 mss.resident >> 10,
51405 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
51406 mss.shared_clean >> 10,
51407@@ -1211,6 +1262,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
51408 int n;
51409 char buffer[50];
51410
51411+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51412+ if (current->exec_id != m->exec_id) {
51413+ gr_log_badprocpid("numa_maps");
51414+ return 0;
51415+ }
51416+#endif
51417+
51418 if (!mm)
51419 return 0;
51420
51421@@ -1228,11 +1286,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
51422 mpol_to_str(buffer, sizeof(buffer), pol, 0);
51423 mpol_cond_put(pol);
51424
51425+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51426+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
51427+#else
51428 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
51429+#endif
51430
51431 if (file) {
51432 seq_printf(m, " file=");
51433- seq_path(m, &file->f_path, "\n\t= ");
51434+ seq_path(m, &file->f_path, "\n\t\\= ");
51435 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
51436 seq_printf(m, " heap");
51437 } else {
51438diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
51439index 1ccfa53..0848f95 100644
51440--- a/fs/proc/task_nommu.c
51441+++ b/fs/proc/task_nommu.c
51442@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51443 else
51444 bytes += kobjsize(mm);
51445
51446- if (current->fs && current->fs->users > 1)
51447+ if (current->fs && atomic_read(&current->fs->users) > 1)
51448 sbytes += kobjsize(current->fs);
51449 else
51450 bytes += kobjsize(current->fs);
51451@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
51452
51453 if (file) {
51454 pad_len_spaces(m, len);
51455- seq_path(m, &file->f_path, "");
51456+ seq_path(m, &file->f_path, "\n\\");
51457 } else if (mm) {
51458 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
51459
51460diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
51461index 2d57e1a..43b1280 100644
51462--- a/fs/pstore/ftrace.c
51463+++ b/fs/pstore/ftrace.c
51464@@ -28,7 +28,9 @@
51465 #include "internal.h"
51466
51467 static void notrace pstore_ftrace_call(unsigned long ip,
51468- unsigned long parent_ip)
51469+ unsigned long parent_ip,
51470+ struct ftrace_ops *op,
51471+ struct pt_regs *regs)
51472 {
51473 unsigned long flags;
51474 struct pstore_ftrace_record rec = {};
51475diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
51476index 16e8abb..2dcf914 100644
51477--- a/fs/quota/netlink.c
51478+++ b/fs/quota/netlink.c
51479@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
51480 void quota_send_warning(struct kqid qid, dev_t dev,
51481 const char warntype)
51482 {
51483- static atomic_t seq;
51484+ static atomic_unchecked_t seq;
51485 struct sk_buff *skb;
51486 void *msg_head;
51487 int ret;
51488@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
51489 "VFS: Not enough memory to send quota warning.\n");
51490 return;
51491 }
51492- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
51493+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
51494 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
51495 if (!msg_head) {
51496 printk(KERN_ERR
51497diff --git a/fs/read_write.c b/fs/read_write.c
51498index d065348..8e2b43d 100644
51499--- a/fs/read_write.c
51500+++ b/fs/read_write.c
51501@@ -935,6 +935,8 @@ ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
51502 if (retval > 0) {
51503 add_rchar(current, retval);
51504 add_wchar(current, retval);
51505+ fsnotify_access(in.file);
51506+ fsnotify_modify(out.file);
51507 }
51508
51509 inc_syscr(current);
51510diff --git a/fs/readdir.c b/fs/readdir.c
51511index 5e69ef5..e5d9099 100644
51512--- a/fs/readdir.c
51513+++ b/fs/readdir.c
51514@@ -17,6 +17,7 @@
51515 #include <linux/security.h>
51516 #include <linux/syscalls.h>
51517 #include <linux/unistd.h>
51518+#include <linux/namei.h>
51519
51520 #include <asm/uaccess.h>
51521
51522@@ -67,6 +68,7 @@ struct old_linux_dirent {
51523
51524 struct readdir_callback {
51525 struct old_linux_dirent __user * dirent;
51526+ struct file * file;
51527 int result;
51528 };
51529
51530@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
51531 buf->result = -EOVERFLOW;
51532 return -EOVERFLOW;
51533 }
51534+
51535+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51536+ return 0;
51537+
51538 buf->result++;
51539 dirent = buf->dirent;
51540 if (!access_ok(VERIFY_WRITE, dirent,
51541@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
51542
51543 buf.result = 0;
51544 buf.dirent = dirent;
51545+ buf.file = f.file;
51546
51547 error = vfs_readdir(f.file, fillonedir, &buf);
51548 if (buf.result)
51549@@ -139,6 +146,7 @@ struct linux_dirent {
51550 struct getdents_callback {
51551 struct linux_dirent __user * current_dir;
51552 struct linux_dirent __user * previous;
51553+ struct file * file;
51554 int count;
51555 int error;
51556 };
51557@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
51558 buf->error = -EOVERFLOW;
51559 return -EOVERFLOW;
51560 }
51561+
51562+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51563+ return 0;
51564+
51565 dirent = buf->previous;
51566 if (dirent) {
51567 if (__put_user(offset, &dirent->d_off))
51568@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
51569 buf.previous = NULL;
51570 buf.count = count;
51571 buf.error = 0;
51572+ buf.file = f.file;
51573
51574 error = vfs_readdir(f.file, filldir, &buf);
51575 if (error >= 0)
51576@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
51577 struct getdents_callback64 {
51578 struct linux_dirent64 __user * current_dir;
51579 struct linux_dirent64 __user * previous;
51580+ struct file *file;
51581 int count;
51582 int error;
51583 };
51584@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
51585 buf->error = -EINVAL; /* only used if we fail.. */
51586 if (reclen > buf->count)
51587 return -EINVAL;
51588+
51589+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51590+ return 0;
51591+
51592 dirent = buf->previous;
51593 if (dirent) {
51594 if (__put_user(offset, &dirent->d_off))
51595@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
51596
51597 buf.current_dir = dirent;
51598 buf.previous = NULL;
51599+ buf.file = f.file;
51600 buf.count = count;
51601 buf.error = 0;
51602
51603@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
51604 error = buf.error;
51605 lastdirent = buf.previous;
51606 if (lastdirent) {
51607- typeof(lastdirent->d_off) d_off = f.file->f_pos;
51608+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
51609 if (__put_user(d_off, &lastdirent->d_off))
51610 error = -EFAULT;
51611 else
51612diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
51613index 2b7882b..1c5ef48 100644
51614--- a/fs/reiserfs/do_balan.c
51615+++ b/fs/reiserfs/do_balan.c
51616@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
51617 return;
51618 }
51619
51620- atomic_inc(&(fs_generation(tb->tb_sb)));
51621+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
51622 do_balance_starts(tb);
51623
51624 /* balance leaf returns 0 except if combining L R and S into
51625diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
51626index e60e870..f40ac16 100644
51627--- a/fs/reiserfs/procfs.c
51628+++ b/fs/reiserfs/procfs.c
51629@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
51630 "SMALL_TAILS " : "NO_TAILS ",
51631 replay_only(sb) ? "REPLAY_ONLY " : "",
51632 convert_reiserfs(sb) ? "CONV " : "",
51633- atomic_read(&r->s_generation_counter),
51634+ atomic_read_unchecked(&r->s_generation_counter),
51635 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
51636 SF(s_do_balance), SF(s_unneeded_left_neighbor),
51637 SF(s_good_search_by_key_reada), SF(s_bmaps),
51638diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
51639index 33215f5..c5d427a 100644
51640--- a/fs/reiserfs/reiserfs.h
51641+++ b/fs/reiserfs/reiserfs.h
51642@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
51643 /* Comment? -Hans */
51644 wait_queue_head_t s_wait;
51645 /* To be obsoleted soon by per buffer seals.. -Hans */
51646- atomic_t s_generation_counter; // increased by one every time the
51647+ atomic_unchecked_t s_generation_counter; // increased by one every time the
51648 // tree gets re-balanced
51649 unsigned long s_properties; /* File system properties. Currently holds
51650 on-disk FS format */
51651@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
51652 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
51653
51654 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
51655-#define get_generation(s) atomic_read (&fs_generation(s))
51656+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
51657 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
51658 #define __fs_changed(gen,s) (gen != get_generation (s))
51659 #define fs_changed(gen,s) \
51660diff --git a/fs/select.c b/fs/select.c
51661index 2ef72d9..f213b17 100644
51662--- a/fs/select.c
51663+++ b/fs/select.c
51664@@ -20,6 +20,7 @@
51665 #include <linux/export.h>
51666 #include <linux/slab.h>
51667 #include <linux/poll.h>
51668+#include <linux/security.h>
51669 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
51670 #include <linux/file.h>
51671 #include <linux/fdtable.h>
51672@@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
51673 struct poll_list *walk = head;
51674 unsigned long todo = nfds;
51675
51676+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
51677 if (nfds > rlimit(RLIMIT_NOFILE))
51678 return -EINVAL;
51679
51680diff --git a/fs/seq_file.c b/fs/seq_file.c
51681index 99dffab..e4fcb71 100644
51682--- a/fs/seq_file.c
51683+++ b/fs/seq_file.c
51684@@ -10,6 +10,7 @@
51685 #include <linux/seq_file.h>
51686 #include <linux/slab.h>
51687 #include <linux/cred.h>
51688+#include <linux/sched.h>
51689
51690 #include <asm/uaccess.h>
51691 #include <asm/page.h>
51692@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
51693 #ifdef CONFIG_USER_NS
51694 p->user_ns = file->f_cred->user_ns;
51695 #endif
51696+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51697+ p->exec_id = current->exec_id;
51698+#endif
51699
51700 /*
51701 * Wrappers around seq_open(e.g. swaps_open) need to be
51702@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
51703 return 0;
51704 }
51705 if (!m->buf) {
51706- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
51707+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
51708 if (!m->buf)
51709 return -ENOMEM;
51710 }
51711@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
51712 Eoverflow:
51713 m->op->stop(m, p);
51714 kfree(m->buf);
51715- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
51716+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
51717 return !m->buf ? -ENOMEM : -EAGAIN;
51718 }
51719
51720@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
51721
51722 /* grab buffer if we didn't have one */
51723 if (!m->buf) {
51724- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
51725+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
51726 if (!m->buf)
51727 goto Enomem;
51728 }
51729@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
51730 goto Fill;
51731 m->op->stop(m, p);
51732 kfree(m->buf);
51733- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
51734+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
51735 if (!m->buf)
51736 goto Enomem;
51737 m->count = 0;
51738@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
51739 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
51740 void *data)
51741 {
51742- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
51743+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
51744 int res = -ENOMEM;
51745
51746 if (op) {
51747diff --git a/fs/splice.c b/fs/splice.c
51748index 48c7bd1..d0740e4 100644
51749--- a/fs/splice.c
51750+++ b/fs/splice.c
51751@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
51752 pipe_lock(pipe);
51753
51754 for (;;) {
51755- if (!pipe->readers) {
51756+ if (!atomic_read(&pipe->readers)) {
51757 send_sig(SIGPIPE, current, 0);
51758 if (!ret)
51759 ret = -EPIPE;
51760@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
51761 do_wakeup = 0;
51762 }
51763
51764- pipe->waiting_writers++;
51765+ atomic_inc(&pipe->waiting_writers);
51766 pipe_wait(pipe);
51767- pipe->waiting_writers--;
51768+ atomic_dec(&pipe->waiting_writers);
51769 }
51770
51771 pipe_unlock(pipe);
51772@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
51773 old_fs = get_fs();
51774 set_fs(get_ds());
51775 /* The cast to a user pointer is valid due to the set_fs() */
51776- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
51777+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
51778 set_fs(old_fs);
51779
51780 return res;
51781@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
51782 old_fs = get_fs();
51783 set_fs(get_ds());
51784 /* The cast to a user pointer is valid due to the set_fs() */
51785- res = vfs_write(file, (const char __user *)buf, count, &pos);
51786+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
51787 set_fs(old_fs);
51788
51789 return res;
51790@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
51791 goto err;
51792
51793 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
51794- vec[i].iov_base = (void __user *) page_address(page);
51795+ vec[i].iov_base = (void __force_user *) page_address(page);
51796 vec[i].iov_len = this_len;
51797 spd.pages[i] = page;
51798 spd.nr_pages++;
51799@@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
51800 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
51801 {
51802 while (!pipe->nrbufs) {
51803- if (!pipe->writers)
51804+ if (!atomic_read(&pipe->writers))
51805 return 0;
51806
51807- if (!pipe->waiting_writers && sd->num_spliced)
51808+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
51809 return 0;
51810
51811 if (sd->flags & SPLICE_F_NONBLOCK)
51812@@ -1192,7 +1192,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
51813 * out of the pipe right after the splice_to_pipe(). So set
51814 * PIPE_READERS appropriately.
51815 */
51816- pipe->readers = 1;
51817+ atomic_set(&pipe->readers, 1);
51818
51819 current->splice_pipe = pipe;
51820 }
51821@@ -1741,9 +1741,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
51822 ret = -ERESTARTSYS;
51823 break;
51824 }
51825- if (!pipe->writers)
51826+ if (!atomic_read(&pipe->writers))
51827 break;
51828- if (!pipe->waiting_writers) {
51829+ if (!atomic_read(&pipe->waiting_writers)) {
51830 if (flags & SPLICE_F_NONBLOCK) {
51831 ret = -EAGAIN;
51832 break;
51833@@ -1775,7 +1775,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
51834 pipe_lock(pipe);
51835
51836 while (pipe->nrbufs >= pipe->buffers) {
51837- if (!pipe->readers) {
51838+ if (!atomic_read(&pipe->readers)) {
51839 send_sig(SIGPIPE, current, 0);
51840 ret = -EPIPE;
51841 break;
51842@@ -1788,9 +1788,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
51843 ret = -ERESTARTSYS;
51844 break;
51845 }
51846- pipe->waiting_writers++;
51847+ atomic_inc(&pipe->waiting_writers);
51848 pipe_wait(pipe);
51849- pipe->waiting_writers--;
51850+ atomic_dec(&pipe->waiting_writers);
51851 }
51852
51853 pipe_unlock(pipe);
51854@@ -1826,14 +1826,14 @@ retry:
51855 pipe_double_lock(ipipe, opipe);
51856
51857 do {
51858- if (!opipe->readers) {
51859+ if (!atomic_read(&opipe->readers)) {
51860 send_sig(SIGPIPE, current, 0);
51861 if (!ret)
51862 ret = -EPIPE;
51863 break;
51864 }
51865
51866- if (!ipipe->nrbufs && !ipipe->writers)
51867+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
51868 break;
51869
51870 /*
51871@@ -1930,7 +1930,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
51872 pipe_double_lock(ipipe, opipe);
51873
51874 do {
51875- if (!opipe->readers) {
51876+ if (!atomic_read(&opipe->readers)) {
51877 send_sig(SIGPIPE, current, 0);
51878 if (!ret)
51879 ret = -EPIPE;
51880@@ -1975,7 +1975,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
51881 * return EAGAIN if we have the potential of some data in the
51882 * future, otherwise just return 0
51883 */
51884- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
51885+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
51886 ret = -EAGAIN;
51887
51888 pipe_unlock(ipipe);
51889diff --git a/fs/stat.c b/fs/stat.c
51890index eae4946..6198f55 100644
51891--- a/fs/stat.c
51892+++ b/fs/stat.c
51893@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
51894 stat->gid = inode->i_gid;
51895 stat->rdev = inode->i_rdev;
51896 stat->size = i_size_read(inode);
51897- stat->atime = inode->i_atime;
51898- stat->mtime = inode->i_mtime;
51899+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
51900+ stat->atime = inode->i_ctime;
51901+ stat->mtime = inode->i_ctime;
51902+ } else {
51903+ stat->atime = inode->i_atime;
51904+ stat->mtime = inode->i_mtime;
51905+ }
51906 stat->ctime = inode->i_ctime;
51907 stat->blksize = (1 << inode->i_blkbits);
51908 stat->blocks = inode->i_blocks;
51909@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
51910 if (retval)
51911 return retval;
51912
51913- if (inode->i_op->getattr)
51914- return inode->i_op->getattr(mnt, dentry, stat);
51915+ if (inode->i_op->getattr) {
51916+ retval = inode->i_op->getattr(mnt, dentry, stat);
51917+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
51918+ stat->atime = stat->ctime;
51919+ stat->mtime = stat->ctime;
51920+ }
51921+ return retval;
51922+ }
51923
51924 generic_fillattr(inode, stat);
51925 return 0;
51926diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
51927index 2fbdff6..5530a61 100644
51928--- a/fs/sysfs/dir.c
51929+++ b/fs/sysfs/dir.c
51930@@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
51931 struct sysfs_dirent *sd;
51932 int rc;
51933
51934+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
51935+ const char *parent_name = parent_sd->s_name;
51936+
51937+ mode = S_IFDIR | S_IRWXU;
51938+
51939+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
51940+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
51941+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
51942+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
51943+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
51944+#endif
51945+
51946 /* allocate */
51947 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
51948 if (!sd)
51949diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
51950index 00012e3..8392349 100644
51951--- a/fs/sysfs/file.c
51952+++ b/fs/sysfs/file.c
51953@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
51954
51955 struct sysfs_open_dirent {
51956 atomic_t refcnt;
51957- atomic_t event;
51958+ atomic_unchecked_t event;
51959 wait_queue_head_t poll;
51960 struct list_head buffers; /* goes through sysfs_buffer.list */
51961 };
51962@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
51963 if (!sysfs_get_active(attr_sd))
51964 return -ENODEV;
51965
51966- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
51967+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
51968 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
51969
51970 sysfs_put_active(attr_sd);
51971@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
51972 return -ENOMEM;
51973
51974 atomic_set(&new_od->refcnt, 0);
51975- atomic_set(&new_od->event, 1);
51976+ atomic_set_unchecked(&new_od->event, 1);
51977 init_waitqueue_head(&new_od->poll);
51978 INIT_LIST_HEAD(&new_od->buffers);
51979 goto retry;
51980@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
51981
51982 sysfs_put_active(attr_sd);
51983
51984- if (buffer->event != atomic_read(&od->event))
51985+ if (buffer->event != atomic_read_unchecked(&od->event))
51986 goto trigger;
51987
51988 return DEFAULT_POLLMASK;
51989@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
51990
51991 od = sd->s_attr.open;
51992 if (od) {
51993- atomic_inc(&od->event);
51994+ atomic_inc_unchecked(&od->event);
51995 wake_up_interruptible(&od->poll);
51996 }
51997
51998diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
51999index 3c9eb56..9dea5be 100644
52000--- a/fs/sysfs/symlink.c
52001+++ b/fs/sysfs/symlink.c
52002@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
52003
52004 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
52005 {
52006- char *page = nd_get_link(nd);
52007+ const char *page = nd_get_link(nd);
52008 if (!IS_ERR(page))
52009 free_page((unsigned long)page);
52010 }
52011diff --git a/fs/udf/misc.c b/fs/udf/misc.c
52012index c175b4d..8f36a16 100644
52013--- a/fs/udf/misc.c
52014+++ b/fs/udf/misc.c
52015@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
52016
52017 u8 udf_tag_checksum(const struct tag *t)
52018 {
52019- u8 *data = (u8 *)t;
52020+ const u8 *data = (const u8 *)t;
52021 u8 checksum = 0;
52022 int i;
52023 for (i = 0; i < sizeof(struct tag); ++i)
52024diff --git a/fs/utimes.c b/fs/utimes.c
52025index bb0696a..552054b 100644
52026--- a/fs/utimes.c
52027+++ b/fs/utimes.c
52028@@ -1,6 +1,7 @@
52029 #include <linux/compiler.h>
52030 #include <linux/file.h>
52031 #include <linux/fs.h>
52032+#include <linux/security.h>
52033 #include <linux/linkage.h>
52034 #include <linux/mount.h>
52035 #include <linux/namei.h>
52036@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
52037 goto mnt_drop_write_and_out;
52038 }
52039 }
52040+
52041+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
52042+ error = -EACCES;
52043+ goto mnt_drop_write_and_out;
52044+ }
52045+
52046 mutex_lock(&inode->i_mutex);
52047 error = notify_change(path->dentry, &newattrs);
52048 mutex_unlock(&inode->i_mutex);
52049diff --git a/fs/xattr.c b/fs/xattr.c
52050index e21c119..21dfc7c 100644
52051--- a/fs/xattr.c
52052+++ b/fs/xattr.c
52053@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
52054 * Extended attribute SET operations
52055 */
52056 static long
52057-setxattr(struct dentry *d, const char __user *name, const void __user *value,
52058+setxattr(struct path *path, const char __user *name, const void __user *value,
52059 size_t size, int flags)
52060 {
52061 int error;
52062@@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
52063 posix_acl_fix_xattr_from_user(kvalue, size);
52064 }
52065
52066- error = vfs_setxattr(d, kname, kvalue, size, flags);
52067+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
52068+ error = -EACCES;
52069+ goto out;
52070+ }
52071+
52072+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
52073 out:
52074 if (vvalue)
52075 vfree(vvalue);
52076@@ -376,7 +381,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
52077 return error;
52078 error = mnt_want_write(path.mnt);
52079 if (!error) {
52080- error = setxattr(path.dentry, name, value, size, flags);
52081+ error = setxattr(&path, name, value, size, flags);
52082 mnt_drop_write(path.mnt);
52083 }
52084 path_put(&path);
52085@@ -395,7 +400,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
52086 return error;
52087 error = mnt_want_write(path.mnt);
52088 if (!error) {
52089- error = setxattr(path.dentry, name, value, size, flags);
52090+ error = setxattr(&path, name, value, size, flags);
52091 mnt_drop_write(path.mnt);
52092 }
52093 path_put(&path);
52094@@ -406,16 +411,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
52095 const void __user *,value, size_t, size, int, flags)
52096 {
52097 struct fd f = fdget(fd);
52098- struct dentry *dentry;
52099 int error = -EBADF;
52100
52101 if (!f.file)
52102 return error;
52103- dentry = f.file->f_path.dentry;
52104- audit_inode(NULL, dentry, 0);
52105+ audit_inode(NULL, f.file->f_path.dentry, 0);
52106 error = mnt_want_write_file(f.file);
52107 if (!error) {
52108- error = setxattr(dentry, name, value, size, flags);
52109+ error = setxattr(&f.file->f_path, name, value, size, flags);
52110 mnt_drop_write_file(f.file);
52111 }
52112 fdput(f);
52113diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
52114index 9fbea87..6b19972 100644
52115--- a/fs/xattr_acl.c
52116+++ b/fs/xattr_acl.c
52117@@ -76,8 +76,8 @@ struct posix_acl *
52118 posix_acl_from_xattr(struct user_namespace *user_ns,
52119 const void *value, size_t size)
52120 {
52121- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
52122- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
52123+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
52124+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
52125 int count;
52126 struct posix_acl *acl;
52127 struct posix_acl_entry *acl_e;
52128diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
52129index 83d0cf3..2ef526b 100644
52130--- a/fs/xfs/xfs_bmap.c
52131+++ b/fs/xfs/xfs_bmap.c
52132@@ -189,7 +189,7 @@ xfs_bmap_validate_ret(
52133 int nmap,
52134 int ret_nmap);
52135 #else
52136-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
52137+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
52138 #endif /* DEBUG */
52139
52140 STATIC int
52141diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
52142index 1b9fc3e..e1bdde0 100644
52143--- a/fs/xfs/xfs_dir2_sf.c
52144+++ b/fs/xfs/xfs_dir2_sf.c
52145@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
52146 }
52147
52148 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
52149- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
52150+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
52151+ char name[sfep->namelen];
52152+ memcpy(name, sfep->name, sfep->namelen);
52153+ if (filldir(dirent, name, sfep->namelen,
52154+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
52155+ *offset = off & 0x7fffffff;
52156+ return 0;
52157+ }
52158+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
52159 off & 0x7fffffff, ino, DT_UNKNOWN)) {
52160 *offset = off & 0x7fffffff;
52161 return 0;
52162diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
52163index c1df3c6..f987db6 100644
52164--- a/fs/xfs/xfs_ioctl.c
52165+++ b/fs/xfs/xfs_ioctl.c
52166@@ -126,7 +126,7 @@ xfs_find_handle(
52167 }
52168
52169 error = -EFAULT;
52170- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
52171+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
52172 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
52173 goto out_put;
52174
52175diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
52176index 4e00cf0..3374374 100644
52177--- a/fs/xfs/xfs_iops.c
52178+++ b/fs/xfs/xfs_iops.c
52179@@ -394,7 +394,7 @@ xfs_vn_put_link(
52180 struct nameidata *nd,
52181 void *p)
52182 {
52183- char *s = nd_get_link(nd);
52184+ const char *s = nd_get_link(nd);
52185
52186 if (!IS_ERR(s))
52187 kfree(s);
52188diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
52189new file mode 100644
52190index 0000000..5ce8347
52191--- /dev/null
52192+++ b/grsecurity/Kconfig
52193@@ -0,0 +1,1015 @@
52194+#
52195+# grecurity configuration
52196+#
52197+menu "Memory Protections"
52198+depends on GRKERNSEC
52199+
52200+config GRKERNSEC_KMEM
52201+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
52202+ default y if GRKERNSEC_CONFIG_AUTO
52203+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52204+ help
52205+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52206+ be written to or read from to modify or leak the contents of the running
52207+ kernel. /dev/port will also not be allowed to be opened. If you have module
52208+ support disabled, enabling this will close up four ways that are
52209+ currently used to insert malicious code into the running kernel.
52210+ Even with all these features enabled, we still highly recommend that
52211+ you use the RBAC system, as it is still possible for an attacker to
52212+ modify the running kernel through privileged I/O granted by ioperm/iopl.
52213+ If you are not using XFree86, you may be able to stop this additional
52214+ case by enabling the 'Disable privileged I/O' option. Though nothing
52215+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52216+ but only to video memory, which is the only writing we allow in this
52217+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52218+ not be allowed to mprotect it with PROT_WRITE later.
52219+ It is highly recommended that you say Y here if you meet all the
52220+ conditions above.
52221+
52222+config GRKERNSEC_VM86
52223+ bool "Restrict VM86 mode"
52224+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52225+ depends on X86_32
52226+
52227+ help
52228+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52229+ make use of a special execution mode on 32bit x86 processors called
52230+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52231+ video cards and will still work with this option enabled. The purpose
52232+ of the option is to prevent exploitation of emulation errors in
52233+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
52234+ Nearly all users should be able to enable this option.
52235+
52236+config GRKERNSEC_IO
52237+ bool "Disable privileged I/O"
52238+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52239+ depends on X86
52240+ select RTC_CLASS
52241+ select RTC_INTF_DEV
52242+ select RTC_DRV_CMOS
52243+
52244+ help
52245+ If you say Y here, all ioperm and iopl calls will return an error.
52246+ Ioperm and iopl can be used to modify the running kernel.
52247+ Unfortunately, some programs need this access to operate properly,
52248+ the most notable of which are XFree86 and hwclock. hwclock can be
52249+ remedied by having RTC support in the kernel, so real-time
52250+ clock support is enabled if this option is enabled, to ensure
52251+ that hwclock operates correctly. XFree86 still will not
52252+ operate correctly with this option enabled, so DO NOT CHOOSE Y
52253+ IF YOU USE XFree86. If you use XFree86 and you still want to
52254+ protect your kernel against modification, use the RBAC system.
52255+
52256+config GRKERNSEC_JIT_HARDEN
52257+ bool "Harden BPF JIT against spray attacks"
52258+ default y if GRKERNSEC_CONFIG_AUTO
52259+ depends on BPF_JIT
52260+ help
52261+ If you say Y here, the native code generated by the kernel's Berkeley
52262+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
52263+ attacks that attempt to fit attacker-beneficial instructions in
52264+ 32bit immediate fields of JIT-generated native instructions. The
52265+ attacker will generally aim to cause an unintended instruction sequence
52266+ of JIT-generated native code to execute by jumping into the middle of
52267+ a generated instruction. This feature effectively randomizes the 32bit
52268+ immediate constants present in the generated code to thwart such attacks.
52269+
52270+ If you're using KERNEXEC, it's recommended that you enable this option
52271+ to supplement the hardening of the kernel.
52272+
52273+config GRKERNSEC_RAND_THREADSTACK
52274+ bool "Insert random gaps between thread stacks"
52275+ default y if GRKERNSEC_CONFIG_AUTO
52276+ depends on PAX_RANDMMAP && !PPC
52277+ help
52278+ If you say Y here, a random-sized gap will be enforced between allocated
52279+ thread stacks. Glibc's NPTL and other threading libraries that
52280+ pass MAP_STACK to the kernel for thread stack allocation are supported.
52281+ The implementation currently provides 8 bits of entropy for the gap.
52282+
52283+ Many distributions do not compile threaded remote services with the
52284+ -fstack-check argument to GCC, causing the variable-sized stack-based
52285+ allocator, alloca(), to not probe the stack on allocation. This
52286+ permits an unbounded alloca() to skip over any guard page and potentially
52287+ modify another thread's stack reliably. An enforced random gap
52288+ reduces the reliability of such an attack and increases the chance
52289+ that such a read/write to another thread's stack instead lands in
52290+ an unmapped area, causing a crash and triggering grsecurity's
52291+ anti-bruteforcing logic.
52292+
52293+config GRKERNSEC_PROC_MEMMAP
52294+ bool "Harden ASLR against information leaks and entropy reduction"
52295+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
52296+ depends on PAX_NOEXEC || PAX_ASLR
52297+ help
52298+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52299+ give no information about the addresses of its mappings if
52300+ PaX features that rely on random addresses are enabled on the task.
52301+ In addition to sanitizing this information and disabling other
52302+ dangerous sources of information, this option causes reads of sensitive
52303+ /proc/<pid> entries where the file descriptor was opened in a different
52304+ task than the one performing the read. Such attempts are logged.
52305+ This option also limits argv/env strings for suid/sgid binaries
52306+ to 512KB to prevent a complete exhaustion of the stack entropy provided
52307+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
52308+ binaries to prevent alternative mmap layouts from being abused.
52309+
52310+ If you use PaX it is essential that you say Y here as it closes up
52311+ several holes that make full ASLR useless locally.
52312+
52313+config GRKERNSEC_BRUTE
52314+ bool "Deter exploit bruteforcing"
52315+ default y if GRKERNSEC_CONFIG_AUTO
52316+ help
52317+ If you say Y here, attempts to bruteforce exploits against forking
52318+ daemons such as apache or sshd, as well as against suid/sgid binaries
52319+ will be deterred. When a child of a forking daemon is killed by PaX
52320+ or crashes due to an illegal instruction or other suspicious signal,
52321+ the parent process will be delayed 30 seconds upon every subsequent
52322+ fork until the administrator is able to assess the situation and
52323+ restart the daemon.
52324+ In the suid/sgid case, the attempt is logged, the user has all their
52325+ processes terminated, and they are prevented from executing any further
52326+ processes for 15 minutes.
52327+ It is recommended that you also enable signal logging in the auditing
52328+ section so that logs are generated when a process triggers a suspicious
52329+ signal.
52330+ If the sysctl option is enabled, a sysctl option with name
52331+ "deter_bruteforce" is created.
52332+
52333+
52334+config GRKERNSEC_MODHARDEN
52335+ bool "Harden module auto-loading"
52336+ default y if GRKERNSEC_CONFIG_AUTO
52337+ depends on MODULES
52338+ help
52339+ If you say Y here, module auto-loading in response to use of some
52340+ feature implemented by an unloaded module will be restricted to
52341+ root users. Enabling this option helps defend against attacks
52342+ by unprivileged users who abuse the auto-loading behavior to
52343+ cause a vulnerable module to load that is then exploited.
52344+
52345+ If this option prevents a legitimate use of auto-loading for a
52346+ non-root user, the administrator can execute modprobe manually
52347+ with the exact name of the module mentioned in the alert log.
52348+ Alternatively, the administrator can add the module to the list
52349+ of modules loaded at boot by modifying init scripts.
52350+
52351+ Modification of init scripts will most likely be needed on
52352+ Ubuntu servers with encrypted home directory support enabled,
52353+ as the first non-root user logging in will cause the ecb(aes),
52354+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52355+
52356+config GRKERNSEC_HIDESYM
52357+ bool "Hide kernel symbols"
52358+ default y if GRKERNSEC_CONFIG_AUTO
52359+ select PAX_USERCOPY_SLABS
52360+ help
52361+ If you say Y here, getting information on loaded modules, and
52362+ displaying all kernel symbols through a syscall will be restricted
52363+ to users with CAP_SYS_MODULE. For software compatibility reasons,
52364+ /proc/kallsyms will be restricted to the root user. The RBAC
52365+ system can hide that entry even from root.
52366+
52367+ This option also prevents leaking of kernel addresses through
52368+ several /proc entries.
52369+
52370+ Note that this option is only effective provided the following
52371+ conditions are met:
52372+ 1) The kernel using grsecurity is not precompiled by some distribution
52373+ 2) You have also enabled GRKERNSEC_DMESG
52374+ 3) You are using the RBAC system and hiding other files such as your
52375+ kernel image and System.map. Alternatively, enabling this option
52376+ causes the permissions on /boot, /lib/modules, and the kernel
52377+ source directory to change at compile time to prevent
52378+ reading by non-root users.
52379+ If the above conditions are met, this option will aid in providing a
52380+ useful protection against local kernel exploitation of overflows
52381+ and arbitrary read/write vulnerabilities.
52382+
52383+config GRKERNSEC_KERN_LOCKOUT
52384+ bool "Active kernel exploit response"
52385+ default y if GRKERNSEC_CONFIG_AUTO
52386+ depends on X86 || ARM || PPC || SPARC
52387+ help
52388+ If you say Y here, when a PaX alert is triggered due to suspicious
52389+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52390+ or an OOPS occurs due to bad memory accesses, instead of just
52391+ terminating the offending process (and potentially allowing
52392+ a subsequent exploit from the same user), we will take one of two
52393+ actions:
52394+ If the user was root, we will panic the system
52395+ If the user was non-root, we will log the attempt, terminate
52396+ all processes owned by the user, then prevent them from creating
52397+ any new processes until the system is restarted
52398+ This deters repeated kernel exploitation/bruteforcing attempts
52399+ and is useful for later forensics.
52400+
52401+endmenu
52402+menu "Role Based Access Control Options"
52403+depends on GRKERNSEC
52404+
52405+config GRKERNSEC_RBAC_DEBUG
52406+ bool
52407+
52408+config GRKERNSEC_NO_RBAC
52409+ bool "Disable RBAC system"
52410+ help
52411+ If you say Y here, the /dev/grsec device will be removed from the kernel,
52412+ preventing the RBAC system from being enabled. You should only say Y
52413+ here if you have no intention of using the RBAC system, so as to prevent
52414+ an attacker with root access from misusing the RBAC system to hide files
52415+ and processes when loadable module support and /dev/[k]mem have been
52416+ locked down.
52417+
52418+config GRKERNSEC_ACL_HIDEKERN
52419+ bool "Hide kernel processes"
52420+ help
52421+ If you say Y here, all kernel threads will be hidden to all
52422+ processes but those whose subject has the "view hidden processes"
52423+ flag.
52424+
52425+config GRKERNSEC_ACL_MAXTRIES
52426+ int "Maximum tries before password lockout"
52427+ default 3
52428+ help
52429+ This option enforces the maximum number of times a user can attempt
52430+ to authorize themselves with the grsecurity RBAC system before being
52431+ denied the ability to attempt authorization again for a specified time.
52432+ The lower the number, the harder it will be to brute-force a password.
52433+
52434+config GRKERNSEC_ACL_TIMEOUT
52435+ int "Time to wait after max password tries, in seconds"
52436+ default 30
52437+ help
52438+ This option specifies the time the user must wait after attempting to
52439+ authorize to the RBAC system with the maximum number of invalid
52440+ passwords. The higher the number, the harder it will be to brute-force
52441+ a password.
52442+
52443+endmenu
52444+menu "Filesystem Protections"
52445+depends on GRKERNSEC
52446+
52447+config GRKERNSEC_PROC
52448+ bool "Proc restrictions"
52449+ default y if GRKERNSEC_CONFIG_AUTO
52450+ help
52451+ If you say Y here, the permissions of the /proc filesystem
52452+ will be altered to enhance system security and privacy. You MUST
52453+ choose either a user only restriction or a user and group restriction.
52454+ Depending upon the option you choose, you can either restrict users to
52455+ see only the processes they themselves run, or choose a group that can
52456+ view all processes and files normally restricted to root if you choose
52457+ the "restrict to user only" option. NOTE: If you're running identd or
52458+ ntpd as a non-root user, you will have to run it as the group you
52459+ specify here.
52460+
52461+config GRKERNSEC_PROC_USER
52462+ bool "Restrict /proc to user only"
52463+ depends on GRKERNSEC_PROC
52464+ help
52465+ If you say Y here, non-root users will only be able to view their own
52466+ processes, and restricts them from viewing network-related information,
52467+ and viewing kernel symbol and module information.
52468+
52469+config GRKERNSEC_PROC_USERGROUP
52470+ bool "Allow special group"
52471+ default y if GRKERNSEC_CONFIG_AUTO
52472+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
52473+ help
52474+ If you say Y here, you will be able to select a group that will be
52475+ able to view all processes and network-related information. If you've
52476+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
52477+ remain hidden. This option is useful if you want to run identd as
52478+ a non-root user. The group you select may also be chosen at boot time
52479+ via "grsec_proc_gid=" on the kernel commandline.
52480+
52481+config GRKERNSEC_PROC_GID
52482+ int "GID for special group"
52483+ depends on GRKERNSEC_PROC_USERGROUP
52484+ default 1001
52485+
52486+config GRKERNSEC_PROC_ADD
52487+ bool "Additional restrictions"
52488+ default y if GRKERNSEC_CONFIG_AUTO
52489+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
52490+ help
52491+ If you say Y here, additional restrictions will be placed on
52492+ /proc that keep normal users from viewing device information and
52493+ slabinfo information that could be useful for exploits.
52494+
52495+config GRKERNSEC_LINK
52496+ bool "Linking restrictions"
52497+ default y if GRKERNSEC_CONFIG_AUTO
52498+ help
52499+ If you say Y here, /tmp race exploits will be prevented, since users
52500+ will no longer be able to follow symlinks owned by other users in
52501+ world-writable +t directories (e.g. /tmp), unless the owner of the
52502+ symlink is the owner of the directory. users will also not be
52503+ able to hardlink to files they do not own. If the sysctl option is
52504+ enabled, a sysctl option with name "linking_restrictions" is created.
52505+
52506+config GRKERNSEC_SYMLINKOWN
52507+ bool "Kernel-enforced SymlinksIfOwnerMatch"
52508+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
52509+ help
52510+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
52511+ that prevents it from being used as a security feature. As Apache
52512+ verifies the symlink by performing a stat() against the target of
52513+ the symlink before it is followed, an attacker can setup a symlink
52514+ to point to a same-owned file, then replace the symlink with one
52515+ that targets another user's file just after Apache "validates" the
52516+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
52517+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
52518+ will be in place for the group you specify. If the sysctl option
52519+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
52520+ created.
52521+
52522+config GRKERNSEC_SYMLINKOWN_GID
52523+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
52524+ depends on GRKERNSEC_SYMLINKOWN
52525+ default 1006
52526+ help
52527+ Setting this GID determines what group kernel-enforced
52528+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
52529+ is enabled, a sysctl option with name "symlinkown_gid" is created.
52530+
52531+config GRKERNSEC_FIFO
52532+ bool "FIFO restrictions"
52533+ default y if GRKERNSEC_CONFIG_AUTO
52534+ help
52535+ If you say Y here, users will not be able to write to FIFOs they don't
52536+ own in world-writable +t directories (e.g. /tmp), unless the owner of
52537+ the FIFO is the same owner of the directory it's held in. If the sysctl
52538+ option is enabled, a sysctl option with name "fifo_restrictions" is
52539+ created.
52540+
52541+config GRKERNSEC_SYSFS_RESTRICT
52542+ bool "Sysfs/debugfs restriction"
52543+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52544+ depends on SYSFS
52545+ help
52546+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
52547+ any filesystem normally mounted under it (e.g. debugfs) will be
52548+ mostly accessible only by root. These filesystems generally provide access
52549+ to hardware and debug information that isn't appropriate for unprivileged
52550+ users of the system. Sysfs and debugfs have also become a large source
52551+ of new vulnerabilities, ranging from infoleaks to local compromise.
52552+ There has been very little oversight with an eye toward security involved
52553+ in adding new exporters of information to these filesystems, so their
52554+ use is discouraged.
52555+ For reasons of compatibility, a few directories have been whitelisted
52556+ for access by non-root users:
52557+ /sys/fs/selinux
52558+ /sys/fs/fuse
52559+ /sys/devices/system/cpu
52560+
52561+config GRKERNSEC_ROFS
52562+ bool "Runtime read-only mount protection"
52563+ help
52564+ If you say Y here, a sysctl option with name "romount_protect" will
52565+ be created. By setting this option to 1 at runtime, filesystems
52566+ will be protected in the following ways:
52567+ * No new writable mounts will be allowed
52568+ * Existing read-only mounts won't be able to be remounted read/write
52569+ * Write operations will be denied on all block devices
52570+ This option acts independently of grsec_lock: once it is set to 1,
52571+ it cannot be turned off. Therefore, please be mindful of the resulting
52572+ behavior if this option is enabled in an init script on a read-only
52573+ filesystem. This feature is mainly intended for secure embedded systems.
52574+
52575+config GRKERNSEC_DEVICE_SIDECHANNEL
52576+ bool "Eliminate stat/notify-based device sidechannels"
52577+ default y if GRKERNSEC_CONFIG_AUTO
52578+ help
52579+ If you say Y here, timing analyses on block or character
52580+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
52581+ will be thwarted for unprivileged users. If a process without
52582+ CAP_MKNOD stats such a device, the last access and last modify times
52583+ will match the device's create time. No access or modify events
52584+ will be triggered through inotify/dnotify/fanotify for such devices.
52585+ This feature will prevent attacks that may at a minimum
52586+ allow an attacker to determine the administrator's password length.
52587+
52588+config GRKERNSEC_CHROOT
52589+ bool "Chroot jail restrictions"
52590+ default y if GRKERNSEC_CONFIG_AUTO
52591+ help
52592+ If you say Y here, you will be able to choose several options that will
52593+ make breaking out of a chrooted jail much more difficult. If you
52594+ encounter no software incompatibilities with the following options, it
52595+ is recommended that you enable each one.
52596+
52597+config GRKERNSEC_CHROOT_MOUNT
52598+ bool "Deny mounts"
52599+ default y if GRKERNSEC_CONFIG_AUTO
52600+ depends on GRKERNSEC_CHROOT
52601+ help
52602+ If you say Y here, processes inside a chroot will not be able to
52603+ mount or remount filesystems. If the sysctl option is enabled, a
52604+ sysctl option with name "chroot_deny_mount" is created.
52605+
52606+config GRKERNSEC_CHROOT_DOUBLE
52607+ bool "Deny double-chroots"
52608+ default y if GRKERNSEC_CONFIG_AUTO
52609+ depends on GRKERNSEC_CHROOT
52610+ help
52611+ If you say Y here, processes inside a chroot will not be able to chroot
52612+ again outside the chroot. This is a widely used method of breaking
52613+ out of a chroot jail and should not be allowed. If the sysctl
52614+ option is enabled, a sysctl option with name
52615+ "chroot_deny_chroot" is created.
52616+
52617+config GRKERNSEC_CHROOT_PIVOT
52618+ bool "Deny pivot_root in chroot"
52619+ default y if GRKERNSEC_CONFIG_AUTO
52620+ depends on GRKERNSEC_CHROOT
52621+ help
52622+ If you say Y here, processes inside a chroot will not be able to use
52623+ a function called pivot_root() that was introduced in Linux 2.3.41. It
52624+ works similar to chroot in that it changes the root filesystem. This
52625+ function could be misused in a chrooted process to attempt to break out
52626+ of the chroot, and therefore should not be allowed. If the sysctl
52627+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
52628+ created.
52629+
52630+config GRKERNSEC_CHROOT_CHDIR
52631+ bool "Enforce chdir(\"/\") on all chroots"
52632+ default y if GRKERNSEC_CONFIG_AUTO
52633+ depends on GRKERNSEC_CHROOT
52634+ help
52635+ If you say Y here, the current working directory of all newly-chrooted
52636+ applications will be set to the the root directory of the chroot.
52637+ The man page on chroot(2) states:
52638+ Note that this call does not change the current working
52639+ directory, so that `.' can be outside the tree rooted at
52640+ `/'. In particular, the super-user can escape from a
52641+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
52642+
52643+ It is recommended that you say Y here, since it's not known to break
52644+ any software. If the sysctl option is enabled, a sysctl option with
52645+ name "chroot_enforce_chdir" is created.
52646+
52647+config GRKERNSEC_CHROOT_CHMOD
52648+ bool "Deny (f)chmod +s"
52649+ default y if GRKERNSEC_CONFIG_AUTO
52650+ depends on GRKERNSEC_CHROOT
52651+ help
52652+ If you say Y here, processes inside a chroot will not be able to chmod
52653+ or fchmod files to make them have suid or sgid bits. This protects
52654+ against another published method of breaking a chroot. If the sysctl
52655+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
52656+ created.
52657+
52658+config GRKERNSEC_CHROOT_FCHDIR
52659+ bool "Deny fchdir out of chroot"
52660+ default y if GRKERNSEC_CONFIG_AUTO
52661+ depends on GRKERNSEC_CHROOT
52662+ help
52663+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
52664+ to a file descriptor of the chrooting process that points to a directory
52665+ outside the filesystem will be stopped. If the sysctl option
52666+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
52667+
52668+config GRKERNSEC_CHROOT_MKNOD
52669+ bool "Deny mknod"
52670+ default y if GRKERNSEC_CONFIG_AUTO
52671+ depends on GRKERNSEC_CHROOT
52672+ help
52673+ If you say Y here, processes inside a chroot will not be allowed to
52674+ mknod. The problem with using mknod inside a chroot is that it
52675+ would allow an attacker to create a device entry that is the same
52676+ as one on the physical root of your system, which could range from
52677+ anything from the console device to a device for your harddrive (which
52678+ they could then use to wipe the drive or steal data). It is recommended
52679+ that you say Y here, unless you run into software incompatibilities.
52680+ If the sysctl option is enabled, a sysctl option with name
52681+ "chroot_deny_mknod" is created.
52682+
52683+config GRKERNSEC_CHROOT_SHMAT
52684+ bool "Deny shmat() out of chroot"
52685+ default y if GRKERNSEC_CONFIG_AUTO
52686+ depends on GRKERNSEC_CHROOT
52687+ help
52688+ If you say Y here, processes inside a chroot will not be able to attach
52689+ to shared memory segments that were created outside of the chroot jail.
52690+ It is recommended that you say Y here. If the sysctl option is enabled,
52691+ a sysctl option with name "chroot_deny_shmat" is created.
52692+
52693+config GRKERNSEC_CHROOT_UNIX
52694+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
52695+ default y if GRKERNSEC_CONFIG_AUTO
52696+ depends on GRKERNSEC_CHROOT
52697+ help
52698+ If you say Y here, processes inside a chroot will not be able to
52699+ connect to abstract (meaning not belonging to a filesystem) Unix
52700+ domain sockets that were bound outside of a chroot. It is recommended
52701+ that you say Y here. If the sysctl option is enabled, a sysctl option
52702+ with name "chroot_deny_unix" is created.
52703+
52704+config GRKERNSEC_CHROOT_FINDTASK
52705+ bool "Protect outside processes"
52706+ default y if GRKERNSEC_CONFIG_AUTO
52707+ depends on GRKERNSEC_CHROOT
52708+ help
52709+ If you say Y here, processes inside a chroot will not be able to
52710+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
52711+ getsid, or view any process outside of the chroot. If the sysctl
52712+ option is enabled, a sysctl option with name "chroot_findtask" is
52713+ created.
52714+
52715+config GRKERNSEC_CHROOT_NICE
52716+ bool "Restrict priority changes"
52717+ default y if GRKERNSEC_CONFIG_AUTO
52718+ depends on GRKERNSEC_CHROOT
52719+ help
52720+ If you say Y here, processes inside a chroot will not be able to raise
52721+ the priority of processes in the chroot, or alter the priority of
52722+ processes outside the chroot. This provides more security than simply
52723+ removing CAP_SYS_NICE from the process' capability set. If the
52724+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
52725+ is created.
52726+
52727+config GRKERNSEC_CHROOT_SYSCTL
52728+ bool "Deny sysctl writes"
52729+ default y if GRKERNSEC_CONFIG_AUTO
52730+ depends on GRKERNSEC_CHROOT
52731+ help
52732+ If you say Y here, an attacker in a chroot will not be able to
52733+ write to sysctl entries, either by sysctl(2) or through a /proc
52734+ interface. It is strongly recommended that you say Y here. If the
52735+ sysctl option is enabled, a sysctl option with name
52736+ "chroot_deny_sysctl" is created.
52737+
52738+config GRKERNSEC_CHROOT_CAPS
52739+ bool "Capability restrictions"
52740+ default y if GRKERNSEC_CONFIG_AUTO
52741+ depends on GRKERNSEC_CHROOT
52742+ help
52743+ If you say Y here, the capabilities on all processes within a
52744+ chroot jail will be lowered to stop module insertion, raw i/o,
52745+ system and net admin tasks, rebooting the system, modifying immutable
52746+ files, modifying IPC owned by another, and changing the system time.
52747+ This is left an option because it can break some apps. Disable this
52748+ if your chrooted apps are having problems performing those kinds of
52749+ tasks. If the sysctl option is enabled, a sysctl option with
52750+ name "chroot_caps" is created.
52751+
52752+endmenu
52753+menu "Kernel Auditing"
52754+depends on GRKERNSEC
52755+
52756+config GRKERNSEC_AUDIT_GROUP
52757+ bool "Single group for auditing"
52758+ help
52759+ If you say Y here, the exec, chdir, and (un)mount logging features
52760+ will only operate on a group you specify. This option is recommended
52761+ if you only want to watch certain users instead of having a large
52762+ amount of logs from the entire system. If the sysctl option is enabled,
52763+ a sysctl option with name "audit_group" is created.
52764+
52765+config GRKERNSEC_AUDIT_GID
52766+ int "GID for auditing"
52767+ depends on GRKERNSEC_AUDIT_GROUP
52768+ default 1007
52769+
52770+config GRKERNSEC_EXECLOG
52771+ bool "Exec logging"
52772+ help
52773+ If you say Y here, all execve() calls will be logged (since the
52774+ other exec*() calls are frontends to execve(), all execution
52775+ will be logged). Useful for shell-servers that like to keep track
52776+ of their users. If the sysctl option is enabled, a sysctl option with
52777+ name "exec_logging" is created.
52778+ WARNING: This option when enabled will produce a LOT of logs, especially
52779+ on an active system.
52780+
52781+config GRKERNSEC_RESLOG
52782+ bool "Resource logging"
52783+ default y if GRKERNSEC_CONFIG_AUTO
52784+ help
52785+ If you say Y here, all attempts to overstep resource limits will
52786+ be logged with the resource name, the requested size, and the current
52787+ limit. It is highly recommended that you say Y here. If the sysctl
52788+ option is enabled, a sysctl option with name "resource_logging" is
52789+ created. If the RBAC system is enabled, the sysctl value is ignored.
52790+
52791+config GRKERNSEC_CHROOT_EXECLOG
52792+ bool "Log execs within chroot"
52793+ help
52794+ If you say Y here, all executions inside a chroot jail will be logged
52795+ to syslog. This can cause a large amount of logs if certain
52796+ applications (eg. djb's daemontools) are installed on the system, and
52797+ is therefore left as an option. If the sysctl option is enabled, a
52798+ sysctl option with name "chroot_execlog" is created.
52799+
52800+config GRKERNSEC_AUDIT_PTRACE
52801+ bool "Ptrace logging"
52802+ help
52803+ If you say Y here, all attempts to attach to a process via ptrace
52804+ will be logged. If the sysctl option is enabled, a sysctl option
52805+ with name "audit_ptrace" is created.
52806+
52807+config GRKERNSEC_AUDIT_CHDIR
52808+ bool "Chdir logging"
52809+ help
52810+ If you say Y here, all chdir() calls will be logged. If the sysctl
52811+ option is enabled, a sysctl option with name "audit_chdir" is created.
52812+
52813+config GRKERNSEC_AUDIT_MOUNT
52814+ bool "(Un)Mount logging"
52815+ help
52816+ If you say Y here, all mounts and unmounts will be logged. If the
52817+ sysctl option is enabled, a sysctl option with name "audit_mount" is
52818+ created.
52819+
52820+config GRKERNSEC_SIGNAL
52821+ bool "Signal logging"
52822+ default y if GRKERNSEC_CONFIG_AUTO
52823+ help
52824+ If you say Y here, certain important signals will be logged, such as
52825+ SIGSEGV, which will as a result inform you of when a error in a program
52826+ occurred, which in some cases could mean a possible exploit attempt.
52827+ If the sysctl option is enabled, a sysctl option with name
52828+ "signal_logging" is created.
52829+
52830+config GRKERNSEC_FORKFAIL
52831+ bool "Fork failure logging"
52832+ help
52833+ If you say Y here, all failed fork() attempts will be logged.
52834+ This could suggest a fork bomb, or someone attempting to overstep
52835+ their process limit. If the sysctl option is enabled, a sysctl option
52836+ with name "forkfail_logging" is created.
52837+
52838+config GRKERNSEC_TIME
52839+ bool "Time change logging"
52840+ default y if GRKERNSEC_CONFIG_AUTO
52841+ help
52842+ If you say Y here, any changes of the system clock will be logged.
52843+ If the sysctl option is enabled, a sysctl option with name
52844+ "timechange_logging" is created.
52845+
52846+config GRKERNSEC_PROC_IPADDR
52847+ bool "/proc/<pid>/ipaddr support"
52848+ default y if GRKERNSEC_CONFIG_AUTO
52849+ help
52850+ If you say Y here, a new entry will be added to each /proc/<pid>
52851+ directory that contains the IP address of the person using the task.
52852+ The IP is carried across local TCP and AF_UNIX stream sockets.
52853+ This information can be useful for IDS/IPSes to perform remote response
52854+ to a local attack. The entry is readable by only the owner of the
52855+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
52856+ the RBAC system), and thus does not create privacy concerns.
52857+
52858+config GRKERNSEC_RWXMAP_LOG
52859+ bool 'Denied RWX mmap/mprotect logging'
52860+ default y if GRKERNSEC_CONFIG_AUTO
52861+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
52862+ help
52863+ If you say Y here, calls to mmap() and mprotect() with explicit
52864+ usage of PROT_WRITE and PROT_EXEC together will be logged when
52865+ denied by the PAX_MPROTECT feature. If the sysctl option is
52866+ enabled, a sysctl option with name "rwxmap_logging" is created.
52867+
52868+config GRKERNSEC_AUDIT_TEXTREL
52869+ bool 'ELF text relocations logging (READ HELP)'
52870+ depends on PAX_MPROTECT
52871+ help
52872+ If you say Y here, text relocations will be logged with the filename
52873+ of the offending library or binary. The purpose of the feature is
52874+ to help Linux distribution developers get rid of libraries and
52875+ binaries that need text relocations which hinder the future progress
52876+ of PaX. Only Linux distribution developers should say Y here, and
52877+ never on a production machine, as this option creates an information
52878+ leak that could aid an attacker in defeating the randomization of
52879+ a single memory region. If the sysctl option is enabled, a sysctl
52880+ option with name "audit_textrel" is created.
52881+
52882+endmenu
52883+
52884+menu "Executable Protections"
52885+depends on GRKERNSEC
52886+
52887+config GRKERNSEC_DMESG
52888+ bool "Dmesg(8) restriction"
52889+ default y if GRKERNSEC_CONFIG_AUTO
52890+ help
52891+ If you say Y here, non-root users will not be able to use dmesg(8)
52892+ to view the contents of the kernel's circular log buffer.
52893+ The kernel's log buffer often contains kernel addresses and other
52894+ identifying information useful to an attacker in fingerprinting a
52895+ system for a targeted exploit.
52896+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
52897+ created.
52898+
52899+config GRKERNSEC_HARDEN_PTRACE
52900+ bool "Deter ptrace-based process snooping"
52901+ default y if GRKERNSEC_CONFIG_AUTO
52902+ help
52903+ If you say Y here, TTY sniffers and other malicious monitoring
52904+ programs implemented through ptrace will be defeated. If you
52905+ have been using the RBAC system, this option has already been
52906+ enabled for several years for all users, with the ability to make
52907+ fine-grained exceptions.
52908+
52909+ This option only affects the ability of non-root users to ptrace
52910+ processes that are not a descendent of the ptracing process.
52911+ This means that strace ./binary and gdb ./binary will still work,
52912+ but attaching to arbitrary processes will not. If the sysctl
52913+ option is enabled, a sysctl option with name "harden_ptrace" is
52914+ created.
52915+
52916+config GRKERNSEC_PTRACE_READEXEC
52917+ bool "Require read access to ptrace sensitive binaries"
52918+ default y if GRKERNSEC_CONFIG_AUTO
52919+ help
52920+ If you say Y here, unprivileged users will not be able to ptrace unreadable
52921+ binaries. This option is useful in environments that
52922+ remove the read bits (e.g. file mode 4711) from suid binaries to
52923+ prevent infoleaking of their contents. This option adds
52924+ consistency to the use of that file mode, as the binary could normally
52925+ be read out when run without privileges while ptracing.
52926+
52927+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
52928+ is created.
52929+
52930+config GRKERNSEC_SETXID
52931+ bool "Enforce consistent multithreaded privileges"
52932+ default y if GRKERNSEC_CONFIG_AUTO
52933+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
52934+ help
52935+ If you say Y here, a change from a root uid to a non-root uid
52936+ in a multithreaded application will cause the resulting uids,
52937+ gids, supplementary groups, and capabilities in that thread
52938+ to be propagated to the other threads of the process. In most
52939+ cases this is unnecessary, as glibc will emulate this behavior
52940+ on behalf of the application. Other libcs do not act in the
52941+ same way, allowing the other threads of the process to continue
52942+ running with root privileges. If the sysctl option is enabled,
52943+ a sysctl option with name "consistent_setxid" is created.
52944+
52945+config GRKERNSEC_TPE
52946+ bool "Trusted Path Execution (TPE)"
52947+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
52948+ help
52949+ If you say Y here, you will be able to choose a gid to add to the
52950+ supplementary groups of users you want to mark as "untrusted."
52951+ These users will not be able to execute any files that are not in
52952+ root-owned directories writable only by root. If the sysctl option
52953+ is enabled, a sysctl option with name "tpe" is created.
52954+
52955+config GRKERNSEC_TPE_ALL
52956+ bool "Partially restrict all non-root users"
52957+ depends on GRKERNSEC_TPE
52958+ help
52959+ If you say Y here, all non-root users will be covered under
52960+ a weaker TPE restriction. This is separate from, and in addition to,
52961+ the main TPE options that you have selected elsewhere. Thus, if a
52962+ "trusted" GID is chosen, this restriction applies to even that GID.
52963+ Under this restriction, all non-root users will only be allowed to
52964+ execute files in directories they own that are not group or
52965+ world-writable, or in directories owned by root and writable only by
52966+ root. If the sysctl option is enabled, a sysctl option with name
52967+ "tpe_restrict_all" is created.
52968+
52969+config GRKERNSEC_TPE_INVERT
52970+ bool "Invert GID option"
52971+ depends on GRKERNSEC_TPE
52972+ help
52973+ If you say Y here, the group you specify in the TPE configuration will
52974+ decide what group TPE restrictions will be *disabled* for. This
52975+ option is useful if you want TPE restrictions to be applied to most
52976+ users on the system. If the sysctl option is enabled, a sysctl option
52977+ with name "tpe_invert" is created. Unlike other sysctl options, this
52978+ entry will default to on for backward-compatibility.
52979+
52980+config GRKERNSEC_TPE_GID
52981+ int
52982+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
52983+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
52984+
52985+config GRKERNSEC_TPE_UNTRUSTED_GID
52986+ int "GID for TPE-untrusted users"
52987+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
52988+ default 1005
52989+ help
52990+ Setting this GID determines what group TPE restrictions will be
52991+ *enabled* for. If the sysctl option is enabled, a sysctl option
52992+ with name "tpe_gid" is created.
52993+
52994+config GRKERNSEC_TPE_TRUSTED_GID
52995+ int "GID for TPE-trusted users"
52996+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
52997+ default 1005
52998+ help
52999+ Setting this GID determines what group TPE restrictions will be
53000+ *disabled* for. If the sysctl option is enabled, a sysctl option
53001+ with name "tpe_gid" is created.
53002+
53003+endmenu
53004+menu "Network Protections"
53005+depends on GRKERNSEC
53006+
53007+config GRKERNSEC_RANDNET
53008+ bool "Larger entropy pools"
53009+ default y if GRKERNSEC_CONFIG_AUTO
53010+ help
53011+ If you say Y here, the entropy pools used for many features of Linux
53012+ and grsecurity will be doubled in size. Since several grsecurity
53013+ features use additional randomness, it is recommended that you say Y
53014+ here. Saying Y here has a similar effect as modifying
53015+ /proc/sys/kernel/random/poolsize.
53016+
53017+config GRKERNSEC_BLACKHOLE
53018+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53019+ default y if GRKERNSEC_CONFIG_AUTO
53020+ depends on NET
53021+ help
53022+ If you say Y here, neither TCP resets nor ICMP
53023+ destination-unreachable packets will be sent in response to packets
53024+ sent to ports for which no associated listening process exists.
53025+ This feature supports both IPV4 and IPV6 and exempts the
53026+ loopback interface from blackholing. Enabling this feature
53027+ makes a host more resilient to DoS attacks and reduces network
53028+ visibility against scanners.
53029+
53030+ The blackhole feature as-implemented is equivalent to the FreeBSD
53031+ blackhole feature, as it prevents RST responses to all packets, not
53032+ just SYNs. Under most application behavior this causes no
53033+ problems, but applications (like haproxy) may not close certain
53034+ connections in a way that cleanly terminates them on the remote
53035+ end, leaving the remote host in LAST_ACK state. Because of this
53036+ side-effect and to prevent intentional LAST_ACK DoSes, this
53037+ feature also adds automatic mitigation against such attacks.
53038+ The mitigation drastically reduces the amount of time a socket
53039+ can spend in LAST_ACK state. If you're using haproxy and not
53040+ all servers it connects to have this option enabled, consider
53041+ disabling this feature on the haproxy host.
53042+
53043+ If the sysctl option is enabled, two sysctl options with names
53044+ "ip_blackhole" and "lastack_retries" will be created.
53045+ While "ip_blackhole" takes the standard zero/non-zero on/off
53046+ toggle, "lastack_retries" uses the same kinds of values as
53047+ "tcp_retries1" and "tcp_retries2". The default value of 4
53048+ prevents a socket from lasting more than 45 seconds in LAST_ACK
53049+ state.
53050+
53051+config GRKERNSEC_NO_SIMULT_CONNECT
53052+ bool "Disable TCP Simultaneous Connect"
53053+ default y if GRKERNSEC_CONFIG_AUTO
53054+ depends on NET
53055+ help
53056+ If you say Y here, a feature by Willy Tarreau will be enabled that
53057+ removes a weakness in Linux's strict implementation of TCP that
53058+ allows two clients to connect to each other without either entering
53059+ a listening state. The weakness allows an attacker to easily prevent
53060+ a client from connecting to a known server provided the source port
53061+ for the connection is guessed correctly.
53062+
53063+ As the weakness could be used to prevent an antivirus or IPS from
53064+ fetching updates, or prevent an SSL gateway from fetching a CRL,
53065+ it should be eliminated by enabling this option. Though Linux is
53066+ one of few operating systems supporting simultaneous connect, it
53067+ has no legitimate use in practice and is rarely supported by firewalls.
53068+
53069+config GRKERNSEC_SOCKET
53070+ bool "Socket restrictions"
53071+ depends on NET
53072+ help
53073+ If you say Y here, you will be able to choose from several options.
53074+ If you assign a GID on your system and add it to the supplementary
53075+ groups of users you want to restrict socket access to, this patch
53076+ will perform up to three things, based on the option(s) you choose.
53077+
53078+config GRKERNSEC_SOCKET_ALL
53079+ bool "Deny any sockets to group"
53080+ depends on GRKERNSEC_SOCKET
53081+ help
53082+ If you say Y here, you will be able to choose a GID of whose users will
53083+ be unable to connect to other hosts from your machine or run server
53084+ applications from your machine. If the sysctl option is enabled, a
53085+ sysctl option with name "socket_all" is created.
53086+
53087+config GRKERNSEC_SOCKET_ALL_GID
53088+ int "GID to deny all sockets for"
53089+ depends on GRKERNSEC_SOCKET_ALL
53090+ default 1004
53091+ help
53092+ Here you can choose the GID to disable socket access for. Remember to
53093+ add the users you want socket access disabled for to the GID
53094+ specified here. If the sysctl option is enabled, a sysctl option
53095+ with name "socket_all_gid" is created.
53096+
53097+config GRKERNSEC_SOCKET_CLIENT
53098+ bool "Deny client sockets to group"
53099+ depends on GRKERNSEC_SOCKET
53100+ help
53101+ If you say Y here, you will be able to choose a GID of whose users will
53102+ be unable to connect to other hosts from your machine, but will be
53103+ able to run servers. If this option is enabled, all users in the group
53104+ you specify will have to use passive mode when initiating ftp transfers
53105+ from the shell on your machine. If the sysctl option is enabled, a
53106+ sysctl option with name "socket_client" is created.
53107+
53108+config GRKERNSEC_SOCKET_CLIENT_GID
53109+ int "GID to deny client sockets for"
53110+ depends on GRKERNSEC_SOCKET_CLIENT
53111+ default 1003
53112+ help
53113+ Here you can choose the GID to disable client socket access for.
53114+ Remember to add the users you want client socket access disabled for to
53115+ the GID specified here. If the sysctl option is enabled, a sysctl
53116+ option with name "socket_client_gid" is created.
53117+
53118+config GRKERNSEC_SOCKET_SERVER
53119+ bool "Deny server sockets to group"
53120+ depends on GRKERNSEC_SOCKET
53121+ help
53122+ If you say Y here, you will be able to choose a GID of whose users will
53123+ be unable to run server applications from your machine. If the sysctl
53124+ option is enabled, a sysctl option with name "socket_server" is created.
53125+
53126+config GRKERNSEC_SOCKET_SERVER_GID
53127+ int "GID to deny server sockets for"
53128+ depends on GRKERNSEC_SOCKET_SERVER
53129+ default 1002
53130+ help
53131+ Here you can choose the GID to disable server socket access for.
53132+ Remember to add the users you want server socket access disabled for to
53133+ the GID specified here. If the sysctl option is enabled, a sysctl
53134+ option with name "socket_server_gid" is created.
53135+
53136+endmenu
53137+menu "Sysctl Support"
53138+depends on GRKERNSEC && SYSCTL
53139+
53140+config GRKERNSEC_SYSCTL
53141+ bool "Sysctl support"
53142+ default y if GRKERNSEC_CONFIG_AUTO
53143+ help
53144+ If you say Y here, you will be able to change the options that
53145+ grsecurity runs with at bootup, without having to recompile your
53146+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53147+ to enable (1) or disable (0) various features. All the sysctl entries
53148+ are mutable until the "grsec_lock" entry is set to a non-zero value.
53149+ All features enabled in the kernel configuration are disabled at boot
53150+ if you do not say Y to the "Turn on features by default" option.
53151+ All options should be set at startup, and the grsec_lock entry should
53152+ be set to a non-zero value after all the options are set.
53153+ *THIS IS EXTREMELY IMPORTANT*
53154+
53155+config GRKERNSEC_SYSCTL_DISTRO
53156+ bool "Extra sysctl support for distro makers (READ HELP)"
53157+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53158+ help
53159+ If you say Y here, additional sysctl options will be created
53160+ for features that affect processes running as root. Therefore,
53161+ it is critical when using this option that the grsec_lock entry be
53162+ enabled after boot. Only distros with prebuilt kernel packages
53163+ with this option enabled that can ensure grsec_lock is enabled
53164+ after boot should use this option.
53165+ *Failure to set grsec_lock after boot makes all grsec features
53166+ this option covers useless*
53167+
53168+ Currently this option creates the following sysctl entries:
53169+ "Disable Privileged I/O": "disable_priv_io"
53170+
53171+config GRKERNSEC_SYSCTL_ON
53172+ bool "Turn on features by default"
53173+ default y if GRKERNSEC_CONFIG_AUTO
53174+ depends on GRKERNSEC_SYSCTL
53175+ help
53176+ If you say Y here, instead of having all features enabled in the
53177+ kernel configuration disabled at boot time, the features will be
53178+ enabled at boot time. It is recommended you say Y here unless
53179+ there is some reason you would want all sysctl-tunable features to
53180+ be disabled by default. As mentioned elsewhere, it is important
53181+ to enable the grsec_lock entry once you have finished modifying
53182+ the sysctl entries.
53183+
53184+endmenu
53185+menu "Logging Options"
53186+depends on GRKERNSEC
53187+
53188+config GRKERNSEC_FLOODTIME
53189+ int "Seconds in between log messages (minimum)"
53190+ default 10
53191+ help
53192+ This option allows you to enforce the number of seconds between
53193+ grsecurity log messages. The default should be suitable for most
53194+ people, however, if you choose to change it, choose a value small enough
53195+ to allow informative logs to be produced, but large enough to
53196+ prevent flooding.
53197+
53198+config GRKERNSEC_FLOODBURST
53199+ int "Number of messages in a burst (maximum)"
53200+ default 6
53201+ help
53202+ This option allows you to choose the maximum number of messages allowed
53203+ within the flood time interval you chose in a separate option. The
53204+ default should be suitable for most people, however if you find that
53205+ many of your logs are being interpreted as flooding, you may want to
53206+ raise this value.
53207+
53208+endmenu
53209diff --git a/grsecurity/Makefile b/grsecurity/Makefile
53210new file mode 100644
53211index 0000000..1b9afa9
53212--- /dev/null
53213+++ b/grsecurity/Makefile
53214@@ -0,0 +1,38 @@
53215+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53216+# during 2001-2009 it has been completely redesigned by Brad Spengler
53217+# into an RBAC system
53218+#
53219+# All code in this directory and various hooks inserted throughout the kernel
53220+# are copyright Brad Spengler - Open Source Security, Inc., and released
53221+# under the GPL v2 or higher
53222+
53223+KBUILD_CFLAGS += -Werror
53224+
53225+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53226+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
53227+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53228+
53229+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53230+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53231+ gracl_learn.o grsec_log.o
53232+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53233+
53234+ifdef CONFIG_NET
53235+obj-y += grsec_sock.o
53236+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53237+endif
53238+
53239+ifndef CONFIG_GRKERNSEC
53240+obj-y += grsec_disabled.o
53241+endif
53242+
53243+ifdef CONFIG_GRKERNSEC_HIDESYM
53244+extra-y := grsec_hidesym.o
53245+$(obj)/grsec_hidesym.o:
53246+ @-chmod -f 500 /boot
53247+ @-chmod -f 500 /lib/modules
53248+ @-chmod -f 500 /lib64/modules
53249+ @-chmod -f 500 /lib32/modules
53250+ @-chmod -f 700 .
53251+ @echo ' grsec: protected kernel image paths'
53252+endif
53253diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
53254new file mode 100644
53255index 0000000..78353bd
53256--- /dev/null
53257+++ b/grsecurity/gracl.c
53258@@ -0,0 +1,4039 @@
53259+#include <linux/kernel.h>
53260+#include <linux/module.h>
53261+#include <linux/sched.h>
53262+#include <linux/mm.h>
53263+#include <linux/file.h>
53264+#include <linux/fs.h>
53265+#include <linux/namei.h>
53266+#include <linux/mount.h>
53267+#include <linux/tty.h>
53268+#include <linux/proc_fs.h>
53269+#include <linux/lglock.h>
53270+#include <linux/slab.h>
53271+#include <linux/vmalloc.h>
53272+#include <linux/types.h>
53273+#include <linux/sysctl.h>
53274+#include <linux/netdevice.h>
53275+#include <linux/ptrace.h>
53276+#include <linux/gracl.h>
53277+#include <linux/gralloc.h>
53278+#include <linux/security.h>
53279+#include <linux/grinternal.h>
53280+#include <linux/pid_namespace.h>
53281+#include <linux/stop_machine.h>
53282+#include <linux/fdtable.h>
53283+#include <linux/percpu.h>
53284+#include <linux/lglock.h>
53285+#include "../fs/mount.h"
53286+
53287+#include <asm/uaccess.h>
53288+#include <asm/errno.h>
53289+#include <asm/mman.h>
53290+
53291+extern struct lglock vfsmount_lock;
53292+
53293+static struct acl_role_db acl_role_set;
53294+static struct name_db name_set;
53295+static struct inodev_db inodev_set;
53296+
53297+/* for keeping track of userspace pointers used for subjects, so we
53298+ can share references in the kernel as well
53299+*/
53300+
53301+static struct path real_root;
53302+
53303+static struct acl_subj_map_db subj_map_set;
53304+
53305+static struct acl_role_label *default_role;
53306+
53307+static struct acl_role_label *role_list;
53308+
53309+static u16 acl_sp_role_value;
53310+
53311+extern char *gr_shared_page[4];
53312+static DEFINE_MUTEX(gr_dev_mutex);
53313+DEFINE_RWLOCK(gr_inode_lock);
53314+
53315+struct gr_arg *gr_usermode;
53316+
53317+static unsigned int gr_status __read_only = GR_STATUS_INIT;
53318+
53319+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
53320+extern void gr_clear_learn_entries(void);
53321+
53322+#ifdef CONFIG_GRKERNSEC_RESLOG
53323+extern void gr_log_resource(const struct task_struct *task,
53324+ const int res, const unsigned long wanted, const int gt);
53325+#endif
53326+
53327+unsigned char *gr_system_salt;
53328+unsigned char *gr_system_sum;
53329+
53330+static struct sprole_pw **acl_special_roles = NULL;
53331+static __u16 num_sprole_pws = 0;
53332+
53333+static struct acl_role_label *kernel_role = NULL;
53334+
53335+static unsigned int gr_auth_attempts = 0;
53336+static unsigned long gr_auth_expires = 0UL;
53337+
53338+#ifdef CONFIG_NET
53339+extern struct vfsmount *sock_mnt;
53340+#endif
53341+
53342+extern struct vfsmount *pipe_mnt;
53343+extern struct vfsmount *shm_mnt;
53344+#ifdef CONFIG_HUGETLBFS
53345+extern struct vfsmount *hugetlbfs_vfsmount;
53346+#endif
53347+
53348+static struct acl_object_label *fakefs_obj_rw;
53349+static struct acl_object_label *fakefs_obj_rwx;
53350+
53351+extern int gr_init_uidset(void);
53352+extern void gr_free_uidset(void);
53353+extern void gr_remove_uid(uid_t uid);
53354+extern int gr_find_uid(uid_t uid);
53355+
53356+__inline__ int
53357+gr_acl_is_enabled(void)
53358+{
53359+ return (gr_status & GR_READY);
53360+}
53361+
53362+#ifdef CONFIG_BTRFS_FS
53363+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53364+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53365+#endif
53366+
53367+static inline dev_t __get_dev(const struct dentry *dentry)
53368+{
53369+#ifdef CONFIG_BTRFS_FS
53370+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53371+ return get_btrfs_dev_from_inode(dentry->d_inode);
53372+ else
53373+#endif
53374+ return dentry->d_inode->i_sb->s_dev;
53375+}
53376+
53377+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53378+{
53379+ return __get_dev(dentry);
53380+}
53381+
53382+static char gr_task_roletype_to_char(struct task_struct *task)
53383+{
53384+ switch (task->role->roletype &
53385+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
53386+ GR_ROLE_SPECIAL)) {
53387+ case GR_ROLE_DEFAULT:
53388+ return 'D';
53389+ case GR_ROLE_USER:
53390+ return 'U';
53391+ case GR_ROLE_GROUP:
53392+ return 'G';
53393+ case GR_ROLE_SPECIAL:
53394+ return 'S';
53395+ }
53396+
53397+ return 'X';
53398+}
53399+
53400+char gr_roletype_to_char(void)
53401+{
53402+ return gr_task_roletype_to_char(current);
53403+}
53404+
53405+__inline__ int
53406+gr_acl_tpe_check(void)
53407+{
53408+ if (unlikely(!(gr_status & GR_READY)))
53409+ return 0;
53410+ if (current->role->roletype & GR_ROLE_TPE)
53411+ return 1;
53412+ else
53413+ return 0;
53414+}
53415+
53416+int
53417+gr_handle_rawio(const struct inode *inode)
53418+{
53419+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53420+ if (inode && S_ISBLK(inode->i_mode) &&
53421+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53422+ !capable(CAP_SYS_RAWIO))
53423+ return 1;
53424+#endif
53425+ return 0;
53426+}
53427+
53428+static int
53429+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
53430+{
53431+ if (likely(lena != lenb))
53432+ return 0;
53433+
53434+ return !memcmp(a, b, lena);
53435+}
53436+
53437+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
53438+{
53439+ *buflen -= namelen;
53440+ if (*buflen < 0)
53441+ return -ENAMETOOLONG;
53442+ *buffer -= namelen;
53443+ memcpy(*buffer, str, namelen);
53444+ return 0;
53445+}
53446+
53447+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
53448+{
53449+ return prepend(buffer, buflen, name->name, name->len);
53450+}
53451+
53452+static int prepend_path(const struct path *path, struct path *root,
53453+ char **buffer, int *buflen)
53454+{
53455+ struct dentry *dentry = path->dentry;
53456+ struct vfsmount *vfsmnt = path->mnt;
53457+ struct mount *mnt = real_mount(vfsmnt);
53458+ bool slash = false;
53459+ int error = 0;
53460+
53461+ while (dentry != root->dentry || vfsmnt != root->mnt) {
53462+ struct dentry * parent;
53463+
53464+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
53465+ /* Global root? */
53466+ if (!mnt_has_parent(mnt)) {
53467+ goto out;
53468+ }
53469+ dentry = mnt->mnt_mountpoint;
53470+ mnt = mnt->mnt_parent;
53471+ vfsmnt = &mnt->mnt;
53472+ continue;
53473+ }
53474+ parent = dentry->d_parent;
53475+ prefetch(parent);
53476+ spin_lock(&dentry->d_lock);
53477+ error = prepend_name(buffer, buflen, &dentry->d_name);
53478+ spin_unlock(&dentry->d_lock);
53479+ if (!error)
53480+ error = prepend(buffer, buflen, "/", 1);
53481+ if (error)
53482+ break;
53483+
53484+ slash = true;
53485+ dentry = parent;
53486+ }
53487+
53488+out:
53489+ if (!error && !slash)
53490+ error = prepend(buffer, buflen, "/", 1);
53491+
53492+ return error;
53493+}
53494+
53495+/* this must be called with vfsmount_lock and rename_lock held */
53496+
53497+static char *__our_d_path(const struct path *path, struct path *root,
53498+ char *buf, int buflen)
53499+{
53500+ char *res = buf + buflen;
53501+ int error;
53502+
53503+ prepend(&res, &buflen, "\0", 1);
53504+ error = prepend_path(path, root, &res, &buflen);
53505+ if (error)
53506+ return ERR_PTR(error);
53507+
53508+ return res;
53509+}
53510+
53511+static char *
53512+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
53513+{
53514+ char *retval;
53515+
53516+ retval = __our_d_path(path, root, buf, buflen);
53517+ if (unlikely(IS_ERR(retval)))
53518+ retval = strcpy(buf, "<path too long>");
53519+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
53520+ retval[1] = '\0';
53521+
53522+ return retval;
53523+}
53524+
53525+static char *
53526+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
53527+ char *buf, int buflen)
53528+{
53529+ struct path path;
53530+ char *res;
53531+
53532+ path.dentry = (struct dentry *)dentry;
53533+ path.mnt = (struct vfsmount *)vfsmnt;
53534+
53535+ /* we can use real_root.dentry, real_root.mnt, because this is only called
53536+ by the RBAC system */
53537+ res = gen_full_path(&path, &real_root, buf, buflen);
53538+
53539+ return res;
53540+}
53541+
53542+static char *
53543+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
53544+ char *buf, int buflen)
53545+{
53546+ char *res;
53547+ struct path path;
53548+ struct path root;
53549+ struct task_struct *reaper = init_pid_ns.child_reaper;
53550+
53551+ path.dentry = (struct dentry *)dentry;
53552+ path.mnt = (struct vfsmount *)vfsmnt;
53553+
53554+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
53555+ get_fs_root(reaper->fs, &root);
53556+
53557+ write_seqlock(&rename_lock);
53558+ br_read_lock(&vfsmount_lock);
53559+ res = gen_full_path(&path, &root, buf, buflen);
53560+ br_read_unlock(&vfsmount_lock);
53561+ write_sequnlock(&rename_lock);
53562+
53563+ path_put(&root);
53564+ return res;
53565+}
53566+
53567+static char *
53568+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
53569+{
53570+ char *ret;
53571+ write_seqlock(&rename_lock);
53572+ br_read_lock(&vfsmount_lock);
53573+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
53574+ PAGE_SIZE);
53575+ br_read_unlock(&vfsmount_lock);
53576+ write_sequnlock(&rename_lock);
53577+ return ret;
53578+}
53579+
53580+static char *
53581+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
53582+{
53583+ char *ret;
53584+ char *buf;
53585+ int buflen;
53586+
53587+ write_seqlock(&rename_lock);
53588+ br_read_lock(&vfsmount_lock);
53589+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
53590+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
53591+ buflen = (int)(ret - buf);
53592+ if (buflen >= 5)
53593+ prepend(&ret, &buflen, "/proc", 5);
53594+ else
53595+ ret = strcpy(buf, "<path too long>");
53596+ br_read_unlock(&vfsmount_lock);
53597+ write_sequnlock(&rename_lock);
53598+ return ret;
53599+}
53600+
53601+char *
53602+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
53603+{
53604+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
53605+ PAGE_SIZE);
53606+}
53607+
53608+char *
53609+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
53610+{
53611+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
53612+ PAGE_SIZE);
53613+}
53614+
53615+char *
53616+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
53617+{
53618+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
53619+ PAGE_SIZE);
53620+}
53621+
53622+char *
53623+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
53624+{
53625+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
53626+ PAGE_SIZE);
53627+}
53628+
53629+char *
53630+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
53631+{
53632+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
53633+ PAGE_SIZE);
53634+}
53635+
53636+__inline__ __u32
53637+to_gr_audit(const __u32 reqmode)
53638+{
53639+ /* masks off auditable permission flags, then shifts them to create
53640+ auditing flags, and adds the special case of append auditing if
53641+ we're requesting write */
53642+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
53643+}
53644+
53645+struct acl_subject_label *
53646+lookup_subject_map(const struct acl_subject_label *userp)
53647+{
53648+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
53649+ struct subject_map *match;
53650+
53651+ match = subj_map_set.s_hash[index];
53652+
53653+ while (match && match->user != userp)
53654+ match = match->next;
53655+
53656+ if (match != NULL)
53657+ return match->kernel;
53658+ else
53659+ return NULL;
53660+}
53661+
53662+static void
53663+insert_subj_map_entry(struct subject_map *subjmap)
53664+{
53665+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
53666+ struct subject_map **curr;
53667+
53668+ subjmap->prev = NULL;
53669+
53670+ curr = &subj_map_set.s_hash[index];
53671+ if (*curr != NULL)
53672+ (*curr)->prev = subjmap;
53673+
53674+ subjmap->next = *curr;
53675+ *curr = subjmap;
53676+
53677+ return;
53678+}
53679+
53680+static struct acl_role_label *
53681+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
53682+ const gid_t gid)
53683+{
53684+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
53685+ struct acl_role_label *match;
53686+ struct role_allowed_ip *ipp;
53687+ unsigned int x;
53688+ u32 curr_ip = task->signal->curr_ip;
53689+
53690+ task->signal->saved_ip = curr_ip;
53691+
53692+ match = acl_role_set.r_hash[index];
53693+
53694+ while (match) {
53695+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
53696+ for (x = 0; x < match->domain_child_num; x++) {
53697+ if (match->domain_children[x] == uid)
53698+ goto found;
53699+ }
53700+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
53701+ break;
53702+ match = match->next;
53703+ }
53704+found:
53705+ if (match == NULL) {
53706+ try_group:
53707+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
53708+ match = acl_role_set.r_hash[index];
53709+
53710+ while (match) {
53711+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
53712+ for (x = 0; x < match->domain_child_num; x++) {
53713+ if (match->domain_children[x] == gid)
53714+ goto found2;
53715+ }
53716+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
53717+ break;
53718+ match = match->next;
53719+ }
53720+found2:
53721+ if (match == NULL)
53722+ match = default_role;
53723+ if (match->allowed_ips == NULL)
53724+ return match;
53725+ else {
53726+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
53727+ if (likely
53728+ ((ntohl(curr_ip) & ipp->netmask) ==
53729+ (ntohl(ipp->addr) & ipp->netmask)))
53730+ return match;
53731+ }
53732+ match = default_role;
53733+ }
53734+ } else if (match->allowed_ips == NULL) {
53735+ return match;
53736+ } else {
53737+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
53738+ if (likely
53739+ ((ntohl(curr_ip) & ipp->netmask) ==
53740+ (ntohl(ipp->addr) & ipp->netmask)))
53741+ return match;
53742+ }
53743+ goto try_group;
53744+ }
53745+
53746+ return match;
53747+}
53748+
53749+struct acl_subject_label *
53750+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
53751+ const struct acl_role_label *role)
53752+{
53753+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
53754+ struct acl_subject_label *match;
53755+
53756+ match = role->subj_hash[index];
53757+
53758+ while (match && (match->inode != ino || match->device != dev ||
53759+ (match->mode & GR_DELETED))) {
53760+ match = match->next;
53761+ }
53762+
53763+ if (match && !(match->mode & GR_DELETED))
53764+ return match;
53765+ else
53766+ return NULL;
53767+}
53768+
53769+struct acl_subject_label *
53770+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
53771+ const struct acl_role_label *role)
53772+{
53773+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
53774+ struct acl_subject_label *match;
53775+
53776+ match = role->subj_hash[index];
53777+
53778+ while (match && (match->inode != ino || match->device != dev ||
53779+ !(match->mode & GR_DELETED))) {
53780+ match = match->next;
53781+ }
53782+
53783+ if (match && (match->mode & GR_DELETED))
53784+ return match;
53785+ else
53786+ return NULL;
53787+}
53788+
53789+static struct acl_object_label *
53790+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
53791+ const struct acl_subject_label *subj)
53792+{
53793+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
53794+ struct acl_object_label *match;
53795+
53796+ match = subj->obj_hash[index];
53797+
53798+ while (match && (match->inode != ino || match->device != dev ||
53799+ (match->mode & GR_DELETED))) {
53800+ match = match->next;
53801+ }
53802+
53803+ if (match && !(match->mode & GR_DELETED))
53804+ return match;
53805+ else
53806+ return NULL;
53807+}
53808+
53809+static struct acl_object_label *
53810+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
53811+ const struct acl_subject_label *subj)
53812+{
53813+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
53814+ struct acl_object_label *match;
53815+
53816+ match = subj->obj_hash[index];
53817+
53818+ while (match && (match->inode != ino || match->device != dev ||
53819+ !(match->mode & GR_DELETED))) {
53820+ match = match->next;
53821+ }
53822+
53823+ if (match && (match->mode & GR_DELETED))
53824+ return match;
53825+
53826+ match = subj->obj_hash[index];
53827+
53828+ while (match && (match->inode != ino || match->device != dev ||
53829+ (match->mode & GR_DELETED))) {
53830+ match = match->next;
53831+ }
53832+
53833+ if (match && !(match->mode & GR_DELETED))
53834+ return match;
53835+ else
53836+ return NULL;
53837+}
53838+
53839+static struct name_entry *
53840+lookup_name_entry(const char *name)
53841+{
53842+ unsigned int len = strlen(name);
53843+ unsigned int key = full_name_hash(name, len);
53844+ unsigned int index = key % name_set.n_size;
53845+ struct name_entry *match;
53846+
53847+ match = name_set.n_hash[index];
53848+
53849+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
53850+ match = match->next;
53851+
53852+ return match;
53853+}
53854+
53855+static struct name_entry *
53856+lookup_name_entry_create(const char *name)
53857+{
53858+ unsigned int len = strlen(name);
53859+ unsigned int key = full_name_hash(name, len);
53860+ unsigned int index = key % name_set.n_size;
53861+ struct name_entry *match;
53862+
53863+ match = name_set.n_hash[index];
53864+
53865+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
53866+ !match->deleted))
53867+ match = match->next;
53868+
53869+ if (match && match->deleted)
53870+ return match;
53871+
53872+ match = name_set.n_hash[index];
53873+
53874+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
53875+ match->deleted))
53876+ match = match->next;
53877+
53878+ if (match && !match->deleted)
53879+ return match;
53880+ else
53881+ return NULL;
53882+}
53883+
53884+static struct inodev_entry *
53885+lookup_inodev_entry(const ino_t ino, const dev_t dev)
53886+{
53887+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
53888+ struct inodev_entry *match;
53889+
53890+ match = inodev_set.i_hash[index];
53891+
53892+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
53893+ match = match->next;
53894+
53895+ return match;
53896+}
53897+
53898+static void
53899+insert_inodev_entry(struct inodev_entry *entry)
53900+{
53901+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
53902+ inodev_set.i_size);
53903+ struct inodev_entry **curr;
53904+
53905+ entry->prev = NULL;
53906+
53907+ curr = &inodev_set.i_hash[index];
53908+ if (*curr != NULL)
53909+ (*curr)->prev = entry;
53910+
53911+ entry->next = *curr;
53912+ *curr = entry;
53913+
53914+ return;
53915+}
53916+
53917+static void
53918+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
53919+{
53920+ unsigned int index =
53921+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
53922+ struct acl_role_label **curr;
53923+ struct acl_role_label *tmp, *tmp2;
53924+
53925+ curr = &acl_role_set.r_hash[index];
53926+
53927+ /* simple case, slot is empty, just set it to our role */
53928+ if (*curr == NULL) {
53929+ *curr = role;
53930+ } else {
53931+ /* example:
53932+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
53933+ 2 -> 3
53934+ */
53935+ /* first check to see if we can already be reached via this slot */
53936+ tmp = *curr;
53937+ while (tmp && tmp != role)
53938+ tmp = tmp->next;
53939+ if (tmp == role) {
53940+ /* we don't need to add ourselves to this slot's chain */
53941+ return;
53942+ }
53943+ /* we need to add ourselves to this chain, two cases */
53944+ if (role->next == NULL) {
53945+ /* simple case, append the current chain to our role */
53946+ role->next = *curr;
53947+ *curr = role;
53948+ } else {
53949+ /* 1 -> 2 -> 3 -> 4
53950+ 2 -> 3 -> 4
53951+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
53952+ */
53953+ /* trickier case: walk our role's chain until we find
53954+ the role for the start of the current slot's chain */
53955+ tmp = role;
53956+ tmp2 = *curr;
53957+ while (tmp->next && tmp->next != tmp2)
53958+ tmp = tmp->next;
53959+ if (tmp->next == tmp2) {
53960+ /* from example above, we found 3, so just
53961+ replace this slot's chain with ours */
53962+ *curr = role;
53963+ } else {
53964+ /* we didn't find a subset of our role's chain
53965+ in the current slot's chain, so append their
53966+ chain to ours, and set us as the first role in
53967+ the slot's chain
53968+
53969+ we could fold this case with the case above,
53970+ but making it explicit for clarity
53971+ */
53972+ tmp->next = tmp2;
53973+ *curr = role;
53974+ }
53975+ }
53976+ }
53977+
53978+ return;
53979+}
53980+
53981+static void
53982+insert_acl_role_label(struct acl_role_label *role)
53983+{
53984+ int i;
53985+
53986+ if (role_list == NULL) {
53987+ role_list = role;
53988+ role->prev = NULL;
53989+ } else {
53990+ role->prev = role_list;
53991+ role_list = role;
53992+ }
53993+
53994+ /* used for hash chains */
53995+ role->next = NULL;
53996+
53997+ if (role->roletype & GR_ROLE_DOMAIN) {
53998+ for (i = 0; i < role->domain_child_num; i++)
53999+ __insert_acl_role_label(role, role->domain_children[i]);
54000+ } else
54001+ __insert_acl_role_label(role, role->uidgid);
54002+}
54003+
54004+static int
54005+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
54006+{
54007+ struct name_entry **curr, *nentry;
54008+ struct inodev_entry *ientry;
54009+ unsigned int len = strlen(name);
54010+ unsigned int key = full_name_hash(name, len);
54011+ unsigned int index = key % name_set.n_size;
54012+
54013+ curr = &name_set.n_hash[index];
54014+
54015+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
54016+ curr = &((*curr)->next);
54017+
54018+ if (*curr != NULL)
54019+ return 1;
54020+
54021+ nentry = acl_alloc(sizeof (struct name_entry));
54022+ if (nentry == NULL)
54023+ return 0;
54024+ ientry = acl_alloc(sizeof (struct inodev_entry));
54025+ if (ientry == NULL)
54026+ return 0;
54027+ ientry->nentry = nentry;
54028+
54029+ nentry->key = key;
54030+ nentry->name = name;
54031+ nentry->inode = inode;
54032+ nentry->device = device;
54033+ nentry->len = len;
54034+ nentry->deleted = deleted;
54035+
54036+ nentry->prev = NULL;
54037+ curr = &name_set.n_hash[index];
54038+ if (*curr != NULL)
54039+ (*curr)->prev = nentry;
54040+ nentry->next = *curr;
54041+ *curr = nentry;
54042+
54043+ /* insert us into the table searchable by inode/dev */
54044+ insert_inodev_entry(ientry);
54045+
54046+ return 1;
54047+}
54048+
54049+static void
54050+insert_acl_obj_label(struct acl_object_label *obj,
54051+ struct acl_subject_label *subj)
54052+{
54053+ unsigned int index =
54054+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
54055+ struct acl_object_label **curr;
54056+
54057+
54058+ obj->prev = NULL;
54059+
54060+ curr = &subj->obj_hash[index];
54061+ if (*curr != NULL)
54062+ (*curr)->prev = obj;
54063+
54064+ obj->next = *curr;
54065+ *curr = obj;
54066+
54067+ return;
54068+}
54069+
54070+static void
54071+insert_acl_subj_label(struct acl_subject_label *obj,
54072+ struct acl_role_label *role)
54073+{
54074+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
54075+ struct acl_subject_label **curr;
54076+
54077+ obj->prev = NULL;
54078+
54079+ curr = &role->subj_hash[index];
54080+ if (*curr != NULL)
54081+ (*curr)->prev = obj;
54082+
54083+ obj->next = *curr;
54084+ *curr = obj;
54085+
54086+ return;
54087+}
54088+
54089+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
54090+
54091+static void *
54092+create_table(__u32 * len, int elementsize)
54093+{
54094+ unsigned int table_sizes[] = {
54095+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
54096+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
54097+ 4194301, 8388593, 16777213, 33554393, 67108859
54098+ };
54099+ void *newtable = NULL;
54100+ unsigned int pwr = 0;
54101+
54102+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
54103+ table_sizes[pwr] <= *len)
54104+ pwr++;
54105+
54106+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
54107+ return newtable;
54108+
54109+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
54110+ newtable =
54111+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
54112+ else
54113+ newtable = vmalloc(table_sizes[pwr] * elementsize);
54114+
54115+ *len = table_sizes[pwr];
54116+
54117+ return newtable;
54118+}
54119+
54120+static int
54121+init_variables(const struct gr_arg *arg)
54122+{
54123+ struct task_struct *reaper = init_pid_ns.child_reaper;
54124+ unsigned int stacksize;
54125+
54126+ subj_map_set.s_size = arg->role_db.num_subjects;
54127+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
54128+ name_set.n_size = arg->role_db.num_objects;
54129+ inodev_set.i_size = arg->role_db.num_objects;
54130+
54131+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
54132+ !name_set.n_size || !inodev_set.i_size)
54133+ return 1;
54134+
54135+ if (!gr_init_uidset())
54136+ return 1;
54137+
54138+ /* set up the stack that holds allocation info */
54139+
54140+ stacksize = arg->role_db.num_pointers + 5;
54141+
54142+ if (!acl_alloc_stack_init(stacksize))
54143+ return 1;
54144+
54145+ /* grab reference for the real root dentry and vfsmount */
54146+ get_fs_root(reaper->fs, &real_root);
54147+
54148+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54149+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
54150+#endif
54151+
54152+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
54153+ if (fakefs_obj_rw == NULL)
54154+ return 1;
54155+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
54156+
54157+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
54158+ if (fakefs_obj_rwx == NULL)
54159+ return 1;
54160+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
54161+
54162+ subj_map_set.s_hash =
54163+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
54164+ acl_role_set.r_hash =
54165+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
54166+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
54167+ inodev_set.i_hash =
54168+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
54169+
54170+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
54171+ !name_set.n_hash || !inodev_set.i_hash)
54172+ return 1;
54173+
54174+ memset(subj_map_set.s_hash, 0,
54175+ sizeof(struct subject_map *) * subj_map_set.s_size);
54176+ memset(acl_role_set.r_hash, 0,
54177+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
54178+ memset(name_set.n_hash, 0,
54179+ sizeof (struct name_entry *) * name_set.n_size);
54180+ memset(inodev_set.i_hash, 0,
54181+ sizeof (struct inodev_entry *) * inodev_set.i_size);
54182+
54183+ return 0;
54184+}
54185+
54186+/* free information not needed after startup
54187+ currently contains user->kernel pointer mappings for subjects
54188+*/
54189+
54190+static void
54191+free_init_variables(void)
54192+{
54193+ __u32 i;
54194+
54195+ if (subj_map_set.s_hash) {
54196+ for (i = 0; i < subj_map_set.s_size; i++) {
54197+ if (subj_map_set.s_hash[i]) {
54198+ kfree(subj_map_set.s_hash[i]);
54199+ subj_map_set.s_hash[i] = NULL;
54200+ }
54201+ }
54202+
54203+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
54204+ PAGE_SIZE)
54205+ kfree(subj_map_set.s_hash);
54206+ else
54207+ vfree(subj_map_set.s_hash);
54208+ }
54209+
54210+ return;
54211+}
54212+
54213+static void
54214+free_variables(void)
54215+{
54216+ struct acl_subject_label *s;
54217+ struct acl_role_label *r;
54218+ struct task_struct *task, *task2;
54219+ unsigned int x;
54220+
54221+ gr_clear_learn_entries();
54222+
54223+ read_lock(&tasklist_lock);
54224+ do_each_thread(task2, task) {
54225+ task->acl_sp_role = 0;
54226+ task->acl_role_id = 0;
54227+ task->acl = NULL;
54228+ task->role = NULL;
54229+ } while_each_thread(task2, task);
54230+ read_unlock(&tasklist_lock);
54231+
54232+ /* release the reference to the real root dentry and vfsmount */
54233+ path_put(&real_root);
54234+ memset(&real_root, 0, sizeof(real_root));
54235+
54236+ /* free all object hash tables */
54237+
54238+ FOR_EACH_ROLE_START(r)
54239+ if (r->subj_hash == NULL)
54240+ goto next_role;
54241+ FOR_EACH_SUBJECT_START(r, s, x)
54242+ if (s->obj_hash == NULL)
54243+ break;
54244+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
54245+ kfree(s->obj_hash);
54246+ else
54247+ vfree(s->obj_hash);
54248+ FOR_EACH_SUBJECT_END(s, x)
54249+ FOR_EACH_NESTED_SUBJECT_START(r, s)
54250+ if (s->obj_hash == NULL)
54251+ break;
54252+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
54253+ kfree(s->obj_hash);
54254+ else
54255+ vfree(s->obj_hash);
54256+ FOR_EACH_NESTED_SUBJECT_END(s)
54257+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
54258+ kfree(r->subj_hash);
54259+ else
54260+ vfree(r->subj_hash);
54261+ r->subj_hash = NULL;
54262+next_role:
54263+ FOR_EACH_ROLE_END(r)
54264+
54265+ acl_free_all();
54266+
54267+ if (acl_role_set.r_hash) {
54268+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
54269+ PAGE_SIZE)
54270+ kfree(acl_role_set.r_hash);
54271+ else
54272+ vfree(acl_role_set.r_hash);
54273+ }
54274+ if (name_set.n_hash) {
54275+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
54276+ PAGE_SIZE)
54277+ kfree(name_set.n_hash);
54278+ else
54279+ vfree(name_set.n_hash);
54280+ }
54281+
54282+ if (inodev_set.i_hash) {
54283+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
54284+ PAGE_SIZE)
54285+ kfree(inodev_set.i_hash);
54286+ else
54287+ vfree(inodev_set.i_hash);
54288+ }
54289+
54290+ gr_free_uidset();
54291+
54292+ memset(&name_set, 0, sizeof (struct name_db));
54293+ memset(&inodev_set, 0, sizeof (struct inodev_db));
54294+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
54295+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
54296+
54297+ default_role = NULL;
54298+ kernel_role = NULL;
54299+ role_list = NULL;
54300+
54301+ return;
54302+}
54303+
54304+static __u32
54305+count_user_objs(struct acl_object_label *userp)
54306+{
54307+ struct acl_object_label o_tmp;
54308+ __u32 num = 0;
54309+
54310+ while (userp) {
54311+ if (copy_from_user(&o_tmp, userp,
54312+ sizeof (struct acl_object_label)))
54313+ break;
54314+
54315+ userp = o_tmp.prev;
54316+ num++;
54317+ }
54318+
54319+ return num;
54320+}
54321+
54322+static struct acl_subject_label *
54323+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
54324+
54325+static int
54326+copy_user_glob(struct acl_object_label *obj)
54327+{
54328+ struct acl_object_label *g_tmp, **guser;
54329+ unsigned int len;
54330+ char *tmp;
54331+
54332+ if (obj->globbed == NULL)
54333+ return 0;
54334+
54335+ guser = &obj->globbed;
54336+ while (*guser) {
54337+ g_tmp = (struct acl_object_label *)
54338+ acl_alloc(sizeof (struct acl_object_label));
54339+ if (g_tmp == NULL)
54340+ return -ENOMEM;
54341+
54342+ if (copy_from_user(g_tmp, *guser,
54343+ sizeof (struct acl_object_label)))
54344+ return -EFAULT;
54345+
54346+ len = strnlen_user(g_tmp->filename, PATH_MAX);
54347+
54348+ if (!len || len >= PATH_MAX)
54349+ return -EINVAL;
54350+
54351+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54352+ return -ENOMEM;
54353+
54354+ if (copy_from_user(tmp, g_tmp->filename, len))
54355+ return -EFAULT;
54356+ tmp[len-1] = '\0';
54357+ g_tmp->filename = tmp;
54358+
54359+ *guser = g_tmp;
54360+ guser = &(g_tmp->next);
54361+ }
54362+
54363+ return 0;
54364+}
54365+
54366+static int
54367+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
54368+ struct acl_role_label *role)
54369+{
54370+ struct acl_object_label *o_tmp;
54371+ unsigned int len;
54372+ int ret;
54373+ char *tmp;
54374+
54375+ while (userp) {
54376+ if ((o_tmp = (struct acl_object_label *)
54377+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
54378+ return -ENOMEM;
54379+
54380+ if (copy_from_user(o_tmp, userp,
54381+ sizeof (struct acl_object_label)))
54382+ return -EFAULT;
54383+
54384+ userp = o_tmp->prev;
54385+
54386+ len = strnlen_user(o_tmp->filename, PATH_MAX);
54387+
54388+ if (!len || len >= PATH_MAX)
54389+ return -EINVAL;
54390+
54391+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54392+ return -ENOMEM;
54393+
54394+ if (copy_from_user(tmp, o_tmp->filename, len))
54395+ return -EFAULT;
54396+ tmp[len-1] = '\0';
54397+ o_tmp->filename = tmp;
54398+
54399+ insert_acl_obj_label(o_tmp, subj);
54400+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
54401+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
54402+ return -ENOMEM;
54403+
54404+ ret = copy_user_glob(o_tmp);
54405+ if (ret)
54406+ return ret;
54407+
54408+ if (o_tmp->nested) {
54409+ int already_copied;
54410+
54411+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
54412+ if (IS_ERR(o_tmp->nested))
54413+ return PTR_ERR(o_tmp->nested);
54414+
54415+ /* insert into nested subject list if we haven't copied this one yet
54416+ to prevent duplicate entries */
54417+ if (!already_copied) {
54418+ o_tmp->nested->next = role->hash->first;
54419+ role->hash->first = o_tmp->nested;
54420+ }
54421+ }
54422+ }
54423+
54424+ return 0;
54425+}
54426+
54427+static __u32
54428+count_user_subjs(struct acl_subject_label *userp)
54429+{
54430+ struct acl_subject_label s_tmp;
54431+ __u32 num = 0;
54432+
54433+ while (userp) {
54434+ if (copy_from_user(&s_tmp, userp,
54435+ sizeof (struct acl_subject_label)))
54436+ break;
54437+
54438+ userp = s_tmp.prev;
54439+ }
54440+
54441+ return num;
54442+}
54443+
54444+static int
54445+copy_user_allowedips(struct acl_role_label *rolep)
54446+{
54447+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
54448+
54449+ ruserip = rolep->allowed_ips;
54450+
54451+ while (ruserip) {
54452+ rlast = rtmp;
54453+
54454+ if ((rtmp = (struct role_allowed_ip *)
54455+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
54456+ return -ENOMEM;
54457+
54458+ if (copy_from_user(rtmp, ruserip,
54459+ sizeof (struct role_allowed_ip)))
54460+ return -EFAULT;
54461+
54462+ ruserip = rtmp->prev;
54463+
54464+ if (!rlast) {
54465+ rtmp->prev = NULL;
54466+ rolep->allowed_ips = rtmp;
54467+ } else {
54468+ rlast->next = rtmp;
54469+ rtmp->prev = rlast;
54470+ }
54471+
54472+ if (!ruserip)
54473+ rtmp->next = NULL;
54474+ }
54475+
54476+ return 0;
54477+}
54478+
54479+static int
54480+copy_user_transitions(struct acl_role_label *rolep)
54481+{
54482+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
54483+
54484+ unsigned int len;
54485+ char *tmp;
54486+
54487+ rusertp = rolep->transitions;
54488+
54489+ while (rusertp) {
54490+ rlast = rtmp;
54491+
54492+ if ((rtmp = (struct role_transition *)
54493+ acl_alloc(sizeof (struct role_transition))) == NULL)
54494+ return -ENOMEM;
54495+
54496+ if (copy_from_user(rtmp, rusertp,
54497+ sizeof (struct role_transition)))
54498+ return -EFAULT;
54499+
54500+ rusertp = rtmp->prev;
54501+
54502+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
54503+
54504+ if (!len || len >= GR_SPROLE_LEN)
54505+ return -EINVAL;
54506+
54507+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54508+ return -ENOMEM;
54509+
54510+ if (copy_from_user(tmp, rtmp->rolename, len))
54511+ return -EFAULT;
54512+ tmp[len-1] = '\0';
54513+ rtmp->rolename = tmp;
54514+
54515+ if (!rlast) {
54516+ rtmp->prev = NULL;
54517+ rolep->transitions = rtmp;
54518+ } else {
54519+ rlast->next = rtmp;
54520+ rtmp->prev = rlast;
54521+ }
54522+
54523+ if (!rusertp)
54524+ rtmp->next = NULL;
54525+ }
54526+
54527+ return 0;
54528+}
54529+
54530+static struct acl_subject_label *
54531+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
54532+{
54533+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
54534+ unsigned int len;
54535+ char *tmp;
54536+ __u32 num_objs;
54537+ struct acl_ip_label **i_tmp, *i_utmp2;
54538+ struct gr_hash_struct ghash;
54539+ struct subject_map *subjmap;
54540+ unsigned int i_num;
54541+ int err;
54542+
54543+ if (already_copied != NULL)
54544+ *already_copied = 0;
54545+
54546+ s_tmp = lookup_subject_map(userp);
54547+
54548+ /* we've already copied this subject into the kernel, just return
54549+ the reference to it, and don't copy it over again
54550+ */
54551+ if (s_tmp) {
54552+ if (already_copied != NULL)
54553+ *already_copied = 1;
54554+ return(s_tmp);
54555+ }
54556+
54557+ if ((s_tmp = (struct acl_subject_label *)
54558+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
54559+ return ERR_PTR(-ENOMEM);
54560+
54561+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
54562+ if (subjmap == NULL)
54563+ return ERR_PTR(-ENOMEM);
54564+
54565+ subjmap->user = userp;
54566+ subjmap->kernel = s_tmp;
54567+ insert_subj_map_entry(subjmap);
54568+
54569+ if (copy_from_user(s_tmp, userp,
54570+ sizeof (struct acl_subject_label)))
54571+ return ERR_PTR(-EFAULT);
54572+
54573+ len = strnlen_user(s_tmp->filename, PATH_MAX);
54574+
54575+ if (!len || len >= PATH_MAX)
54576+ return ERR_PTR(-EINVAL);
54577+
54578+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54579+ return ERR_PTR(-ENOMEM);
54580+
54581+ if (copy_from_user(tmp, s_tmp->filename, len))
54582+ return ERR_PTR(-EFAULT);
54583+ tmp[len-1] = '\0';
54584+ s_tmp->filename = tmp;
54585+
54586+ if (!strcmp(s_tmp->filename, "/"))
54587+ role->root_label = s_tmp;
54588+
54589+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
54590+ return ERR_PTR(-EFAULT);
54591+
54592+ /* copy user and group transition tables */
54593+
54594+ if (s_tmp->user_trans_num) {
54595+ uid_t *uidlist;
54596+
54597+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
54598+ if (uidlist == NULL)
54599+ return ERR_PTR(-ENOMEM);
54600+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
54601+ return ERR_PTR(-EFAULT);
54602+
54603+ s_tmp->user_transitions = uidlist;
54604+ }
54605+
54606+ if (s_tmp->group_trans_num) {
54607+ gid_t *gidlist;
54608+
54609+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
54610+ if (gidlist == NULL)
54611+ return ERR_PTR(-ENOMEM);
54612+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
54613+ return ERR_PTR(-EFAULT);
54614+
54615+ s_tmp->group_transitions = gidlist;
54616+ }
54617+
54618+ /* set up object hash table */
54619+ num_objs = count_user_objs(ghash.first);
54620+
54621+ s_tmp->obj_hash_size = num_objs;
54622+ s_tmp->obj_hash =
54623+ (struct acl_object_label **)
54624+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
54625+
54626+ if (!s_tmp->obj_hash)
54627+ return ERR_PTR(-ENOMEM);
54628+
54629+ memset(s_tmp->obj_hash, 0,
54630+ s_tmp->obj_hash_size *
54631+ sizeof (struct acl_object_label *));
54632+
54633+ /* add in objects */
54634+ err = copy_user_objs(ghash.first, s_tmp, role);
54635+
54636+ if (err)
54637+ return ERR_PTR(err);
54638+
54639+ /* set pointer for parent subject */
54640+ if (s_tmp->parent_subject) {
54641+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
54642+
54643+ if (IS_ERR(s_tmp2))
54644+ return s_tmp2;
54645+
54646+ s_tmp->parent_subject = s_tmp2;
54647+ }
54648+
54649+ /* add in ip acls */
54650+
54651+ if (!s_tmp->ip_num) {
54652+ s_tmp->ips = NULL;
54653+ goto insert;
54654+ }
54655+
54656+ i_tmp =
54657+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
54658+ sizeof (struct acl_ip_label *));
54659+
54660+ if (!i_tmp)
54661+ return ERR_PTR(-ENOMEM);
54662+
54663+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
54664+ *(i_tmp + i_num) =
54665+ (struct acl_ip_label *)
54666+ acl_alloc(sizeof (struct acl_ip_label));
54667+ if (!*(i_tmp + i_num))
54668+ return ERR_PTR(-ENOMEM);
54669+
54670+ if (copy_from_user
54671+ (&i_utmp2, s_tmp->ips + i_num,
54672+ sizeof (struct acl_ip_label *)))
54673+ return ERR_PTR(-EFAULT);
54674+
54675+ if (copy_from_user
54676+ (*(i_tmp + i_num), i_utmp2,
54677+ sizeof (struct acl_ip_label)))
54678+ return ERR_PTR(-EFAULT);
54679+
54680+ if ((*(i_tmp + i_num))->iface == NULL)
54681+ continue;
54682+
54683+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
54684+ if (!len || len >= IFNAMSIZ)
54685+ return ERR_PTR(-EINVAL);
54686+ tmp = acl_alloc(len);
54687+ if (tmp == NULL)
54688+ return ERR_PTR(-ENOMEM);
54689+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
54690+ return ERR_PTR(-EFAULT);
54691+ (*(i_tmp + i_num))->iface = tmp;
54692+ }
54693+
54694+ s_tmp->ips = i_tmp;
54695+
54696+insert:
54697+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
54698+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
54699+ return ERR_PTR(-ENOMEM);
54700+
54701+ return s_tmp;
54702+}
54703+
54704+static int
54705+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
54706+{
54707+ struct acl_subject_label s_pre;
54708+ struct acl_subject_label * ret;
54709+ int err;
54710+
54711+ while (userp) {
54712+ if (copy_from_user(&s_pre, userp,
54713+ sizeof (struct acl_subject_label)))
54714+ return -EFAULT;
54715+
54716+ ret = do_copy_user_subj(userp, role, NULL);
54717+
54718+ err = PTR_ERR(ret);
54719+ if (IS_ERR(ret))
54720+ return err;
54721+
54722+ insert_acl_subj_label(ret, role);
54723+
54724+ userp = s_pre.prev;
54725+ }
54726+
54727+ return 0;
54728+}
54729+
54730+static int
54731+copy_user_acl(struct gr_arg *arg)
54732+{
54733+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
54734+ struct acl_subject_label *subj_list;
54735+ struct sprole_pw *sptmp;
54736+ struct gr_hash_struct *ghash;
54737+ uid_t *domainlist;
54738+ unsigned int r_num;
54739+ unsigned int len;
54740+ char *tmp;
54741+ int err = 0;
54742+ __u16 i;
54743+ __u32 num_subjs;
54744+
54745+ /* we need a default and kernel role */
54746+ if (arg->role_db.num_roles < 2)
54747+ return -EINVAL;
54748+
54749+ /* copy special role authentication info from userspace */
54750+
54751+ num_sprole_pws = arg->num_sprole_pws;
54752+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
54753+
54754+ if (!acl_special_roles && num_sprole_pws)
54755+ return -ENOMEM;
54756+
54757+ for (i = 0; i < num_sprole_pws; i++) {
54758+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
54759+ if (!sptmp)
54760+ return -ENOMEM;
54761+ if (copy_from_user(sptmp, arg->sprole_pws + i,
54762+ sizeof (struct sprole_pw)))
54763+ return -EFAULT;
54764+
54765+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
54766+
54767+ if (!len || len >= GR_SPROLE_LEN)
54768+ return -EINVAL;
54769+
54770+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54771+ return -ENOMEM;
54772+
54773+ if (copy_from_user(tmp, sptmp->rolename, len))
54774+ return -EFAULT;
54775+
54776+ tmp[len-1] = '\0';
54777+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54778+ printk(KERN_ALERT "Copying special role %s\n", tmp);
54779+#endif
54780+ sptmp->rolename = tmp;
54781+ acl_special_roles[i] = sptmp;
54782+ }
54783+
54784+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
54785+
54786+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
54787+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
54788+
54789+ if (!r_tmp)
54790+ return -ENOMEM;
54791+
54792+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
54793+ sizeof (struct acl_role_label *)))
54794+ return -EFAULT;
54795+
54796+ if (copy_from_user(r_tmp, r_utmp2,
54797+ sizeof (struct acl_role_label)))
54798+ return -EFAULT;
54799+
54800+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
54801+
54802+ if (!len || len >= PATH_MAX)
54803+ return -EINVAL;
54804+
54805+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54806+ return -ENOMEM;
54807+
54808+ if (copy_from_user(tmp, r_tmp->rolename, len))
54809+ return -EFAULT;
54810+
54811+ tmp[len-1] = '\0';
54812+ r_tmp->rolename = tmp;
54813+
54814+ if (!strcmp(r_tmp->rolename, "default")
54815+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
54816+ default_role = r_tmp;
54817+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
54818+ kernel_role = r_tmp;
54819+ }
54820+
54821+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
54822+ return -ENOMEM;
54823+
54824+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
54825+ return -EFAULT;
54826+
54827+ r_tmp->hash = ghash;
54828+
54829+ num_subjs = count_user_subjs(r_tmp->hash->first);
54830+
54831+ r_tmp->subj_hash_size = num_subjs;
54832+ r_tmp->subj_hash =
54833+ (struct acl_subject_label **)
54834+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
54835+
54836+ if (!r_tmp->subj_hash)
54837+ return -ENOMEM;
54838+
54839+ err = copy_user_allowedips(r_tmp);
54840+ if (err)
54841+ return err;
54842+
54843+ /* copy domain info */
54844+ if (r_tmp->domain_children != NULL) {
54845+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
54846+ if (domainlist == NULL)
54847+ return -ENOMEM;
54848+
54849+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
54850+ return -EFAULT;
54851+
54852+ r_tmp->domain_children = domainlist;
54853+ }
54854+
54855+ err = copy_user_transitions(r_tmp);
54856+ if (err)
54857+ return err;
54858+
54859+ memset(r_tmp->subj_hash, 0,
54860+ r_tmp->subj_hash_size *
54861+ sizeof (struct acl_subject_label *));
54862+
54863+ /* acquire the list of subjects, then NULL out
54864+ the list prior to parsing the subjects for this role,
54865+ as during this parsing the list is replaced with a list
54866+ of *nested* subjects for the role
54867+ */
54868+ subj_list = r_tmp->hash->first;
54869+
54870+ /* set nested subject list to null */
54871+ r_tmp->hash->first = NULL;
54872+
54873+ err = copy_user_subjs(subj_list, r_tmp);
54874+
54875+ if (err)
54876+ return err;
54877+
54878+ insert_acl_role_label(r_tmp);
54879+ }
54880+
54881+ if (default_role == NULL || kernel_role == NULL)
54882+ return -EINVAL;
54883+
54884+ return err;
54885+}
54886+
54887+static int
54888+gracl_init(struct gr_arg *args)
54889+{
54890+ int error = 0;
54891+
54892+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
54893+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
54894+
54895+ if (init_variables(args)) {
54896+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
54897+ error = -ENOMEM;
54898+ free_variables();
54899+ goto out;
54900+ }
54901+
54902+ error = copy_user_acl(args);
54903+ free_init_variables();
54904+ if (error) {
54905+ free_variables();
54906+ goto out;
54907+ }
54908+
54909+ if ((error = gr_set_acls(0))) {
54910+ free_variables();
54911+ goto out;
54912+ }
54913+
54914+ pax_open_kernel();
54915+ gr_status |= GR_READY;
54916+ pax_close_kernel();
54917+
54918+ out:
54919+ return error;
54920+}
54921+
54922+/* derived from glibc fnmatch() 0: match, 1: no match*/
54923+
54924+static int
54925+glob_match(const char *p, const char *n)
54926+{
54927+ char c;
54928+
54929+ while ((c = *p++) != '\0') {
54930+ switch (c) {
54931+ case '?':
54932+ if (*n == '\0')
54933+ return 1;
54934+ else if (*n == '/')
54935+ return 1;
54936+ break;
54937+ case '\\':
54938+ if (*n != c)
54939+ return 1;
54940+ break;
54941+ case '*':
54942+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
54943+ if (*n == '/')
54944+ return 1;
54945+ else if (c == '?') {
54946+ if (*n == '\0')
54947+ return 1;
54948+ else
54949+ ++n;
54950+ }
54951+ }
54952+ if (c == '\0') {
54953+ return 0;
54954+ } else {
54955+ const char *endp;
54956+
54957+ if ((endp = strchr(n, '/')) == NULL)
54958+ endp = n + strlen(n);
54959+
54960+ if (c == '[') {
54961+ for (--p; n < endp; ++n)
54962+ if (!glob_match(p, n))
54963+ return 0;
54964+ } else if (c == '/') {
54965+ while (*n != '\0' && *n != '/')
54966+ ++n;
54967+ if (*n == '/' && !glob_match(p, n + 1))
54968+ return 0;
54969+ } else {
54970+ for (--p; n < endp; ++n)
54971+ if (*n == c && !glob_match(p, n))
54972+ return 0;
54973+ }
54974+
54975+ return 1;
54976+ }
54977+ case '[':
54978+ {
54979+ int not;
54980+ char cold;
54981+
54982+ if (*n == '\0' || *n == '/')
54983+ return 1;
54984+
54985+ not = (*p == '!' || *p == '^');
54986+ if (not)
54987+ ++p;
54988+
54989+ c = *p++;
54990+ for (;;) {
54991+ unsigned char fn = (unsigned char)*n;
54992+
54993+ if (c == '\0')
54994+ return 1;
54995+ else {
54996+ if (c == fn)
54997+ goto matched;
54998+ cold = c;
54999+ c = *p++;
55000+
55001+ if (c == '-' && *p != ']') {
55002+ unsigned char cend = *p++;
55003+
55004+ if (cend == '\0')
55005+ return 1;
55006+
55007+ if (cold <= fn && fn <= cend)
55008+ goto matched;
55009+
55010+ c = *p++;
55011+ }
55012+ }
55013+
55014+ if (c == ']')
55015+ break;
55016+ }
55017+ if (!not)
55018+ return 1;
55019+ break;
55020+ matched:
55021+ while (c != ']') {
55022+ if (c == '\0')
55023+ return 1;
55024+
55025+ c = *p++;
55026+ }
55027+ if (not)
55028+ return 1;
55029+ }
55030+ break;
55031+ default:
55032+ if (c != *n)
55033+ return 1;
55034+ }
55035+
55036+ ++n;
55037+ }
55038+
55039+ if (*n == '\0')
55040+ return 0;
55041+
55042+ if (*n == '/')
55043+ return 0;
55044+
55045+ return 1;
55046+}
55047+
55048+static struct acl_object_label *
55049+chk_glob_label(struct acl_object_label *globbed,
55050+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
55051+{
55052+ struct acl_object_label *tmp;
55053+
55054+ if (*path == NULL)
55055+ *path = gr_to_filename_nolock(dentry, mnt);
55056+
55057+ tmp = globbed;
55058+
55059+ while (tmp) {
55060+ if (!glob_match(tmp->filename, *path))
55061+ return tmp;
55062+ tmp = tmp->next;
55063+ }
55064+
55065+ return NULL;
55066+}
55067+
55068+static struct acl_object_label *
55069+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
55070+ const ino_t curr_ino, const dev_t curr_dev,
55071+ const struct acl_subject_label *subj, char **path, const int checkglob)
55072+{
55073+ struct acl_subject_label *tmpsubj;
55074+ struct acl_object_label *retval;
55075+ struct acl_object_label *retval2;
55076+
55077+ tmpsubj = (struct acl_subject_label *) subj;
55078+ read_lock(&gr_inode_lock);
55079+ do {
55080+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
55081+ if (retval) {
55082+ if (checkglob && retval->globbed) {
55083+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
55084+ if (retval2)
55085+ retval = retval2;
55086+ }
55087+ break;
55088+ }
55089+ } while ((tmpsubj = tmpsubj->parent_subject));
55090+ read_unlock(&gr_inode_lock);
55091+
55092+ return retval;
55093+}
55094+
55095+static __inline__ struct acl_object_label *
55096+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
55097+ struct dentry *curr_dentry,
55098+ const struct acl_subject_label *subj, char **path, const int checkglob)
55099+{
55100+ int newglob = checkglob;
55101+ ino_t inode;
55102+ dev_t device;
55103+
55104+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
55105+ as we don't want a / * rule to match instead of the / object
55106+ don't do this for create lookups that call this function though, since they're looking up
55107+ on the parent and thus need globbing checks on all paths
55108+ */
55109+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
55110+ newglob = GR_NO_GLOB;
55111+
55112+ spin_lock(&curr_dentry->d_lock);
55113+ inode = curr_dentry->d_inode->i_ino;
55114+ device = __get_dev(curr_dentry);
55115+ spin_unlock(&curr_dentry->d_lock);
55116+
55117+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
55118+}
55119+
55120+static struct acl_object_label *
55121+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55122+ const struct acl_subject_label *subj, char *path, const int checkglob)
55123+{
55124+ struct dentry *dentry = (struct dentry *) l_dentry;
55125+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
55126+ struct mount *real_mnt = real_mount(mnt);
55127+ struct acl_object_label *retval;
55128+ struct dentry *parent;
55129+
55130+ write_seqlock(&rename_lock);
55131+ br_read_lock(&vfsmount_lock);
55132+
55133+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
55134+#ifdef CONFIG_NET
55135+ mnt == sock_mnt ||
55136+#endif
55137+#ifdef CONFIG_HUGETLBFS
55138+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
55139+#endif
55140+ /* ignore Eric Biederman */
55141+ IS_PRIVATE(l_dentry->d_inode))) {
55142+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
55143+ goto out;
55144+ }
55145+
55146+ for (;;) {
55147+ if (dentry == real_root.dentry && mnt == real_root.mnt)
55148+ break;
55149+
55150+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
55151+ if (!mnt_has_parent(real_mnt))
55152+ break;
55153+
55154+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
55155+ if (retval != NULL)
55156+ goto out;
55157+
55158+ dentry = real_mnt->mnt_mountpoint;
55159+ real_mnt = real_mnt->mnt_parent;
55160+ mnt = &real_mnt->mnt;
55161+ continue;
55162+ }
55163+
55164+ parent = dentry->d_parent;
55165+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
55166+ if (retval != NULL)
55167+ goto out;
55168+
55169+ dentry = parent;
55170+ }
55171+
55172+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
55173+
55174+ /* real_root is pinned so we don't have to hold a reference */
55175+ if (retval == NULL)
55176+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
55177+out:
55178+ br_read_unlock(&vfsmount_lock);
55179+ write_sequnlock(&rename_lock);
55180+
55181+ BUG_ON(retval == NULL);
55182+
55183+ return retval;
55184+}
55185+
55186+static __inline__ struct acl_object_label *
55187+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55188+ const struct acl_subject_label *subj)
55189+{
55190+ char *path = NULL;
55191+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
55192+}
55193+
55194+static __inline__ struct acl_object_label *
55195+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55196+ const struct acl_subject_label *subj)
55197+{
55198+ char *path = NULL;
55199+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
55200+}
55201+
55202+static __inline__ struct acl_object_label *
55203+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55204+ const struct acl_subject_label *subj, char *path)
55205+{
55206+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
55207+}
55208+
55209+static struct acl_subject_label *
55210+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55211+ const struct acl_role_label *role)
55212+{
55213+ struct dentry *dentry = (struct dentry *) l_dentry;
55214+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
55215+ struct mount *real_mnt = real_mount(mnt);
55216+ struct acl_subject_label *retval;
55217+ struct dentry *parent;
55218+
55219+ write_seqlock(&rename_lock);
55220+ br_read_lock(&vfsmount_lock);
55221+
55222+ for (;;) {
55223+ if (dentry == real_root.dentry && mnt == real_root.mnt)
55224+ break;
55225+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
55226+ if (!mnt_has_parent(real_mnt))
55227+ break;
55228+
55229+ spin_lock(&dentry->d_lock);
55230+ read_lock(&gr_inode_lock);
55231+ retval =
55232+ lookup_acl_subj_label(dentry->d_inode->i_ino,
55233+ __get_dev(dentry), role);
55234+ read_unlock(&gr_inode_lock);
55235+ spin_unlock(&dentry->d_lock);
55236+ if (retval != NULL)
55237+ goto out;
55238+
55239+ dentry = real_mnt->mnt_mountpoint;
55240+ real_mnt = real_mnt->mnt_parent;
55241+ mnt = &real_mnt->mnt;
55242+ continue;
55243+ }
55244+
55245+ spin_lock(&dentry->d_lock);
55246+ read_lock(&gr_inode_lock);
55247+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
55248+ __get_dev(dentry), role);
55249+ read_unlock(&gr_inode_lock);
55250+ parent = dentry->d_parent;
55251+ spin_unlock(&dentry->d_lock);
55252+
55253+ if (retval != NULL)
55254+ goto out;
55255+
55256+ dentry = parent;
55257+ }
55258+
55259+ spin_lock(&dentry->d_lock);
55260+ read_lock(&gr_inode_lock);
55261+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
55262+ __get_dev(dentry), role);
55263+ read_unlock(&gr_inode_lock);
55264+ spin_unlock(&dentry->d_lock);
55265+
55266+ if (unlikely(retval == NULL)) {
55267+ /* real_root is pinned, we don't need to hold a reference */
55268+ read_lock(&gr_inode_lock);
55269+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
55270+ __get_dev(real_root.dentry), role);
55271+ read_unlock(&gr_inode_lock);
55272+ }
55273+out:
55274+ br_read_unlock(&vfsmount_lock);
55275+ write_sequnlock(&rename_lock);
55276+
55277+ BUG_ON(retval == NULL);
55278+
55279+ return retval;
55280+}
55281+
55282+static void
55283+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
55284+{
55285+ struct task_struct *task = current;
55286+ const struct cred *cred = current_cred();
55287+
55288+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
55289+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
55290+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
55291+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
55292+
55293+ return;
55294+}
55295+
55296+static void
55297+gr_log_learn_id_change(const char type, const unsigned int real,
55298+ const unsigned int effective, const unsigned int fs)
55299+{
55300+ struct task_struct *task = current;
55301+ const struct cred *cred = current_cred();
55302+
55303+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
55304+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
55305+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
55306+ type, real, effective, fs, &task->signal->saved_ip);
55307+
55308+ return;
55309+}
55310+
55311+__u32
55312+gr_search_file(const struct dentry * dentry, const __u32 mode,
55313+ const struct vfsmount * mnt)
55314+{
55315+ __u32 retval = mode;
55316+ struct acl_subject_label *curracl;
55317+ struct acl_object_label *currobj;
55318+
55319+ if (unlikely(!(gr_status & GR_READY)))
55320+ return (mode & ~GR_AUDITS);
55321+
55322+ curracl = current->acl;
55323+
55324+ currobj = chk_obj_label(dentry, mnt, curracl);
55325+ retval = currobj->mode & mode;
55326+
55327+ /* if we're opening a specified transfer file for writing
55328+ (e.g. /dev/initctl), then transfer our role to init
55329+ */
55330+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
55331+ current->role->roletype & GR_ROLE_PERSIST)) {
55332+ struct task_struct *task = init_pid_ns.child_reaper;
55333+
55334+ if (task->role != current->role) {
55335+ task->acl_sp_role = 0;
55336+ task->acl_role_id = current->acl_role_id;
55337+ task->role = current->role;
55338+ rcu_read_lock();
55339+ read_lock(&grsec_exec_file_lock);
55340+ gr_apply_subject_to_task(task);
55341+ read_unlock(&grsec_exec_file_lock);
55342+ rcu_read_unlock();
55343+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
55344+ }
55345+ }
55346+
55347+ if (unlikely
55348+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
55349+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
55350+ __u32 new_mode = mode;
55351+
55352+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
55353+
55354+ retval = new_mode;
55355+
55356+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
55357+ new_mode |= GR_INHERIT;
55358+
55359+ if (!(mode & GR_NOLEARN))
55360+ gr_log_learn(dentry, mnt, new_mode);
55361+ }
55362+
55363+ return retval;
55364+}
55365+
55366+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
55367+ const struct dentry *parent,
55368+ const struct vfsmount *mnt)
55369+{
55370+ struct name_entry *match;
55371+ struct acl_object_label *matchpo;
55372+ struct acl_subject_label *curracl;
55373+ char *path;
55374+
55375+ if (unlikely(!(gr_status & GR_READY)))
55376+ return NULL;
55377+
55378+ preempt_disable();
55379+ path = gr_to_filename_rbac(new_dentry, mnt);
55380+ match = lookup_name_entry_create(path);
55381+
55382+ curracl = current->acl;
55383+
55384+ if (match) {
55385+ read_lock(&gr_inode_lock);
55386+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
55387+ read_unlock(&gr_inode_lock);
55388+
55389+ if (matchpo) {
55390+ preempt_enable();
55391+ return matchpo;
55392+ }
55393+ }
55394+
55395+ // lookup parent
55396+
55397+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
55398+
55399+ preempt_enable();
55400+ return matchpo;
55401+}
55402+
55403+__u32
55404+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
55405+ const struct vfsmount * mnt, const __u32 mode)
55406+{
55407+ struct acl_object_label *matchpo;
55408+ __u32 retval;
55409+
55410+ if (unlikely(!(gr_status & GR_READY)))
55411+ return (mode & ~GR_AUDITS);
55412+
55413+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
55414+
55415+ retval = matchpo->mode & mode;
55416+
55417+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
55418+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
55419+ __u32 new_mode = mode;
55420+
55421+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
55422+
55423+ gr_log_learn(new_dentry, mnt, new_mode);
55424+ return new_mode;
55425+ }
55426+
55427+ return retval;
55428+}
55429+
55430+__u32
55431+gr_check_link(const struct dentry * new_dentry,
55432+ const struct dentry * parent_dentry,
55433+ const struct vfsmount * parent_mnt,
55434+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
55435+{
55436+ struct acl_object_label *obj;
55437+ __u32 oldmode, newmode;
55438+ __u32 needmode;
55439+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
55440+ GR_DELETE | GR_INHERIT;
55441+
55442+ if (unlikely(!(gr_status & GR_READY)))
55443+ return (GR_CREATE | GR_LINK);
55444+
55445+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
55446+ oldmode = obj->mode;
55447+
55448+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
55449+ newmode = obj->mode;
55450+
55451+ needmode = newmode & checkmodes;
55452+
55453+ // old name for hardlink must have at least the permissions of the new name
55454+ if ((oldmode & needmode) != needmode)
55455+ goto bad;
55456+
55457+ // if old name had restrictions/auditing, make sure the new name does as well
55458+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
55459+
55460+ // don't allow hardlinking of suid/sgid/fcapped files without permission
55461+ if (is_privileged_binary(old_dentry))
55462+ needmode |= GR_SETID;
55463+
55464+ if ((newmode & needmode) != needmode)
55465+ goto bad;
55466+
55467+ // enforce minimum permissions
55468+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
55469+ return newmode;
55470+bad:
55471+ needmode = oldmode;
55472+ if (is_privileged_binary(old_dentry))
55473+ needmode |= GR_SETID;
55474+
55475+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
55476+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
55477+ return (GR_CREATE | GR_LINK);
55478+ } else if (newmode & GR_SUPPRESS)
55479+ return GR_SUPPRESS;
55480+ else
55481+ return 0;
55482+}
55483+
55484+int
55485+gr_check_hidden_task(const struct task_struct *task)
55486+{
55487+ if (unlikely(!(gr_status & GR_READY)))
55488+ return 0;
55489+
55490+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
55491+ return 1;
55492+
55493+ return 0;
55494+}
55495+
55496+int
55497+gr_check_protected_task(const struct task_struct *task)
55498+{
55499+ if (unlikely(!(gr_status & GR_READY) || !task))
55500+ return 0;
55501+
55502+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
55503+ task->acl != current->acl)
55504+ return 1;
55505+
55506+ return 0;
55507+}
55508+
55509+int
55510+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
55511+{
55512+ struct task_struct *p;
55513+ int ret = 0;
55514+
55515+ if (unlikely(!(gr_status & GR_READY) || !pid))
55516+ return ret;
55517+
55518+ read_lock(&tasklist_lock);
55519+ do_each_pid_task(pid, type, p) {
55520+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
55521+ p->acl != current->acl) {
55522+ ret = 1;
55523+ goto out;
55524+ }
55525+ } while_each_pid_task(pid, type, p);
55526+out:
55527+ read_unlock(&tasklist_lock);
55528+
55529+ return ret;
55530+}
55531+
55532+void
55533+gr_copy_label(struct task_struct *tsk)
55534+{
55535+ tsk->signal->used_accept = 0;
55536+ tsk->acl_sp_role = 0;
55537+ tsk->acl_role_id = current->acl_role_id;
55538+ tsk->acl = current->acl;
55539+ tsk->role = current->role;
55540+ tsk->signal->curr_ip = current->signal->curr_ip;
55541+ tsk->signal->saved_ip = current->signal->saved_ip;
55542+ if (current->exec_file)
55543+ get_file(current->exec_file);
55544+ tsk->exec_file = current->exec_file;
55545+ tsk->is_writable = current->is_writable;
55546+ if (unlikely(current->signal->used_accept)) {
55547+ current->signal->curr_ip = 0;
55548+ current->signal->saved_ip = 0;
55549+ }
55550+
55551+ return;
55552+}
55553+
55554+static void
55555+gr_set_proc_res(struct task_struct *task)
55556+{
55557+ struct acl_subject_label *proc;
55558+ unsigned short i;
55559+
55560+ proc = task->acl;
55561+
55562+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
55563+ return;
55564+
55565+ for (i = 0; i < RLIM_NLIMITS; i++) {
55566+ if (!(proc->resmask & (1 << i)))
55567+ continue;
55568+
55569+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
55570+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
55571+ }
55572+
55573+ return;
55574+}
55575+
55576+extern int __gr_process_user_ban(struct user_struct *user);
55577+
55578+int
55579+gr_check_user_change(int real, int effective, int fs)
55580+{
55581+ unsigned int i;
55582+ __u16 num;
55583+ uid_t *uidlist;
55584+ int curuid;
55585+ int realok = 0;
55586+ int effectiveok = 0;
55587+ int fsok = 0;
55588+
55589+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55590+ struct user_struct *user;
55591+
55592+ if (real == -1)
55593+ goto skipit;
55594+
55595+ user = find_user(real);
55596+ if (user == NULL)
55597+ goto skipit;
55598+
55599+ if (__gr_process_user_ban(user)) {
55600+ /* for find_user */
55601+ free_uid(user);
55602+ return 1;
55603+ }
55604+
55605+ /* for find_user */
55606+ free_uid(user);
55607+
55608+skipit:
55609+#endif
55610+
55611+ if (unlikely(!(gr_status & GR_READY)))
55612+ return 0;
55613+
55614+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55615+ gr_log_learn_id_change('u', real, effective, fs);
55616+
55617+ num = current->acl->user_trans_num;
55618+ uidlist = current->acl->user_transitions;
55619+
55620+ if (uidlist == NULL)
55621+ return 0;
55622+
55623+ if (real == -1)
55624+ realok = 1;
55625+ if (effective == -1)
55626+ effectiveok = 1;
55627+ if (fs == -1)
55628+ fsok = 1;
55629+
55630+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
55631+ for (i = 0; i < num; i++) {
55632+ curuid = (int)uidlist[i];
55633+ if (real == curuid)
55634+ realok = 1;
55635+ if (effective == curuid)
55636+ effectiveok = 1;
55637+ if (fs == curuid)
55638+ fsok = 1;
55639+ }
55640+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
55641+ for (i = 0; i < num; i++) {
55642+ curuid = (int)uidlist[i];
55643+ if (real == curuid)
55644+ break;
55645+ if (effective == curuid)
55646+ break;
55647+ if (fs == curuid)
55648+ break;
55649+ }
55650+ /* not in deny list */
55651+ if (i == num) {
55652+ realok = 1;
55653+ effectiveok = 1;
55654+ fsok = 1;
55655+ }
55656+ }
55657+
55658+ if (realok && effectiveok && fsok)
55659+ return 0;
55660+ else {
55661+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
55662+ return 1;
55663+ }
55664+}
55665+
55666+int
55667+gr_check_group_change(int real, int effective, int fs)
55668+{
55669+ unsigned int i;
55670+ __u16 num;
55671+ gid_t *gidlist;
55672+ int curgid;
55673+ int realok = 0;
55674+ int effectiveok = 0;
55675+ int fsok = 0;
55676+
55677+ if (unlikely(!(gr_status & GR_READY)))
55678+ return 0;
55679+
55680+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55681+ gr_log_learn_id_change('g', real, effective, fs);
55682+
55683+ num = current->acl->group_trans_num;
55684+ gidlist = current->acl->group_transitions;
55685+
55686+ if (gidlist == NULL)
55687+ return 0;
55688+
55689+ if (real == -1)
55690+ realok = 1;
55691+ if (effective == -1)
55692+ effectiveok = 1;
55693+ if (fs == -1)
55694+ fsok = 1;
55695+
55696+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
55697+ for (i = 0; i < num; i++) {
55698+ curgid = (int)gidlist[i];
55699+ if (real == curgid)
55700+ realok = 1;
55701+ if (effective == curgid)
55702+ effectiveok = 1;
55703+ if (fs == curgid)
55704+ fsok = 1;
55705+ }
55706+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
55707+ for (i = 0; i < num; i++) {
55708+ curgid = (int)gidlist[i];
55709+ if (real == curgid)
55710+ break;
55711+ if (effective == curgid)
55712+ break;
55713+ if (fs == curgid)
55714+ break;
55715+ }
55716+ /* not in deny list */
55717+ if (i == num) {
55718+ realok = 1;
55719+ effectiveok = 1;
55720+ fsok = 1;
55721+ }
55722+ }
55723+
55724+ if (realok && effectiveok && fsok)
55725+ return 0;
55726+ else {
55727+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
55728+ return 1;
55729+ }
55730+}
55731+
55732+extern int gr_acl_is_capable(const int cap);
55733+
55734+void
55735+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
55736+{
55737+ struct acl_role_label *role = task->role;
55738+ struct acl_subject_label *subj = NULL;
55739+ struct acl_object_label *obj;
55740+ struct file *filp;
55741+
55742+ if (unlikely(!(gr_status & GR_READY)))
55743+ return;
55744+
55745+ filp = task->exec_file;
55746+
55747+ /* kernel process, we'll give them the kernel role */
55748+ if (unlikely(!filp)) {
55749+ task->role = kernel_role;
55750+ task->acl = kernel_role->root_label;
55751+ return;
55752+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
55753+ role = lookup_acl_role_label(task, uid, gid);
55754+
55755+ /* don't change the role if we're not a privileged process */
55756+ if (role && task->role != role &&
55757+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
55758+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
55759+ return;
55760+
55761+ /* perform subject lookup in possibly new role
55762+ we can use this result below in the case where role == task->role
55763+ */
55764+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
55765+
55766+ /* if we changed uid/gid, but result in the same role
55767+ and are using inheritance, don't lose the inherited subject
55768+ if current subject is other than what normal lookup
55769+ would result in, we arrived via inheritance, don't
55770+ lose subject
55771+ */
55772+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
55773+ (subj == task->acl)))
55774+ task->acl = subj;
55775+
55776+ task->role = role;
55777+
55778+ task->is_writable = 0;
55779+
55780+ /* ignore additional mmap checks for processes that are writable
55781+ by the default ACL */
55782+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
55783+ if (unlikely(obj->mode & GR_WRITE))
55784+ task->is_writable = 1;
55785+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
55786+ if (unlikely(obj->mode & GR_WRITE))
55787+ task->is_writable = 1;
55788+
55789+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
55790+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
55791+#endif
55792+
55793+ gr_set_proc_res(task);
55794+
55795+ return;
55796+}
55797+
55798+int
55799+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
55800+ const int unsafe_flags)
55801+{
55802+ struct task_struct *task = current;
55803+ struct acl_subject_label *newacl;
55804+ struct acl_object_label *obj;
55805+ __u32 retmode;
55806+
55807+ if (unlikely(!(gr_status & GR_READY)))
55808+ return 0;
55809+
55810+ newacl = chk_subj_label(dentry, mnt, task->role);
55811+
55812+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
55813+ did an exec
55814+ */
55815+ rcu_read_lock();
55816+ read_lock(&tasklist_lock);
55817+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
55818+ (task->parent->acl->mode & GR_POVERRIDE))) {
55819+ read_unlock(&tasklist_lock);
55820+ rcu_read_unlock();
55821+ goto skip_check;
55822+ }
55823+ read_unlock(&tasklist_lock);
55824+ rcu_read_unlock();
55825+
55826+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
55827+ !(task->role->roletype & GR_ROLE_GOD) &&
55828+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
55829+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
55830+ if (unsafe_flags & LSM_UNSAFE_SHARE)
55831+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
55832+ else
55833+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
55834+ return -EACCES;
55835+ }
55836+
55837+skip_check:
55838+
55839+ obj = chk_obj_label(dentry, mnt, task->acl);
55840+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
55841+
55842+ if (!(task->acl->mode & GR_INHERITLEARN) &&
55843+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
55844+ if (obj->nested)
55845+ task->acl = obj->nested;
55846+ else
55847+ task->acl = newacl;
55848+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
55849+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
55850+
55851+ task->is_writable = 0;
55852+
55853+ /* ignore additional mmap checks for processes that are writable
55854+ by the default ACL */
55855+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
55856+ if (unlikely(obj->mode & GR_WRITE))
55857+ task->is_writable = 1;
55858+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
55859+ if (unlikely(obj->mode & GR_WRITE))
55860+ task->is_writable = 1;
55861+
55862+ gr_set_proc_res(task);
55863+
55864+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
55865+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
55866+#endif
55867+ return 0;
55868+}
55869+
55870+/* always called with valid inodev ptr */
55871+static void
55872+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
55873+{
55874+ struct acl_object_label *matchpo;
55875+ struct acl_subject_label *matchps;
55876+ struct acl_subject_label *subj;
55877+ struct acl_role_label *role;
55878+ unsigned int x;
55879+
55880+ FOR_EACH_ROLE_START(role)
55881+ FOR_EACH_SUBJECT_START(role, subj, x)
55882+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
55883+ matchpo->mode |= GR_DELETED;
55884+ FOR_EACH_SUBJECT_END(subj,x)
55885+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
55886+ /* nested subjects aren't in the role's subj_hash table */
55887+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
55888+ matchpo->mode |= GR_DELETED;
55889+ FOR_EACH_NESTED_SUBJECT_END(subj)
55890+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
55891+ matchps->mode |= GR_DELETED;
55892+ FOR_EACH_ROLE_END(role)
55893+
55894+ inodev->nentry->deleted = 1;
55895+
55896+ return;
55897+}
55898+
55899+void
55900+gr_handle_delete(const ino_t ino, const dev_t dev)
55901+{
55902+ struct inodev_entry *inodev;
55903+
55904+ if (unlikely(!(gr_status & GR_READY)))
55905+ return;
55906+
55907+ write_lock(&gr_inode_lock);
55908+ inodev = lookup_inodev_entry(ino, dev);
55909+ if (inodev != NULL)
55910+ do_handle_delete(inodev, ino, dev);
55911+ write_unlock(&gr_inode_lock);
55912+
55913+ return;
55914+}
55915+
55916+static void
55917+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
55918+ const ino_t newinode, const dev_t newdevice,
55919+ struct acl_subject_label *subj)
55920+{
55921+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
55922+ struct acl_object_label *match;
55923+
55924+ match = subj->obj_hash[index];
55925+
55926+ while (match && (match->inode != oldinode ||
55927+ match->device != olddevice ||
55928+ !(match->mode & GR_DELETED)))
55929+ match = match->next;
55930+
55931+ if (match && (match->inode == oldinode)
55932+ && (match->device == olddevice)
55933+ && (match->mode & GR_DELETED)) {
55934+ if (match->prev == NULL) {
55935+ subj->obj_hash[index] = match->next;
55936+ if (match->next != NULL)
55937+ match->next->prev = NULL;
55938+ } else {
55939+ match->prev->next = match->next;
55940+ if (match->next != NULL)
55941+ match->next->prev = match->prev;
55942+ }
55943+ match->prev = NULL;
55944+ match->next = NULL;
55945+ match->inode = newinode;
55946+ match->device = newdevice;
55947+ match->mode &= ~GR_DELETED;
55948+
55949+ insert_acl_obj_label(match, subj);
55950+ }
55951+
55952+ return;
55953+}
55954+
55955+static void
55956+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
55957+ const ino_t newinode, const dev_t newdevice,
55958+ struct acl_role_label *role)
55959+{
55960+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
55961+ struct acl_subject_label *match;
55962+
55963+ match = role->subj_hash[index];
55964+
55965+ while (match && (match->inode != oldinode ||
55966+ match->device != olddevice ||
55967+ !(match->mode & GR_DELETED)))
55968+ match = match->next;
55969+
55970+ if (match && (match->inode == oldinode)
55971+ && (match->device == olddevice)
55972+ && (match->mode & GR_DELETED)) {
55973+ if (match->prev == NULL) {
55974+ role->subj_hash[index] = match->next;
55975+ if (match->next != NULL)
55976+ match->next->prev = NULL;
55977+ } else {
55978+ match->prev->next = match->next;
55979+ if (match->next != NULL)
55980+ match->next->prev = match->prev;
55981+ }
55982+ match->prev = NULL;
55983+ match->next = NULL;
55984+ match->inode = newinode;
55985+ match->device = newdevice;
55986+ match->mode &= ~GR_DELETED;
55987+
55988+ insert_acl_subj_label(match, role);
55989+ }
55990+
55991+ return;
55992+}
55993+
55994+static void
55995+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
55996+ const ino_t newinode, const dev_t newdevice)
55997+{
55998+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
55999+ struct inodev_entry *match;
56000+
56001+ match = inodev_set.i_hash[index];
56002+
56003+ while (match && (match->nentry->inode != oldinode ||
56004+ match->nentry->device != olddevice || !match->nentry->deleted))
56005+ match = match->next;
56006+
56007+ if (match && (match->nentry->inode == oldinode)
56008+ && (match->nentry->device == olddevice) &&
56009+ match->nentry->deleted) {
56010+ if (match->prev == NULL) {
56011+ inodev_set.i_hash[index] = match->next;
56012+ if (match->next != NULL)
56013+ match->next->prev = NULL;
56014+ } else {
56015+ match->prev->next = match->next;
56016+ if (match->next != NULL)
56017+ match->next->prev = match->prev;
56018+ }
56019+ match->prev = NULL;
56020+ match->next = NULL;
56021+ match->nentry->inode = newinode;
56022+ match->nentry->device = newdevice;
56023+ match->nentry->deleted = 0;
56024+
56025+ insert_inodev_entry(match);
56026+ }
56027+
56028+ return;
56029+}
56030+
56031+static void
56032+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
56033+{
56034+ struct acl_subject_label *subj;
56035+ struct acl_role_label *role;
56036+ unsigned int x;
56037+
56038+ FOR_EACH_ROLE_START(role)
56039+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
56040+
56041+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
56042+ if ((subj->inode == ino) && (subj->device == dev)) {
56043+ subj->inode = ino;
56044+ subj->device = dev;
56045+ }
56046+ /* nested subjects aren't in the role's subj_hash table */
56047+ update_acl_obj_label(matchn->inode, matchn->device,
56048+ ino, dev, subj);
56049+ FOR_EACH_NESTED_SUBJECT_END(subj)
56050+ FOR_EACH_SUBJECT_START(role, subj, x)
56051+ update_acl_obj_label(matchn->inode, matchn->device,
56052+ ino, dev, subj);
56053+ FOR_EACH_SUBJECT_END(subj,x)
56054+ FOR_EACH_ROLE_END(role)
56055+
56056+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
56057+
56058+ return;
56059+}
56060+
56061+static void
56062+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
56063+ const struct vfsmount *mnt)
56064+{
56065+ ino_t ino = dentry->d_inode->i_ino;
56066+ dev_t dev = __get_dev(dentry);
56067+
56068+ __do_handle_create(matchn, ino, dev);
56069+
56070+ return;
56071+}
56072+
56073+void
56074+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56075+{
56076+ struct name_entry *matchn;
56077+
56078+ if (unlikely(!(gr_status & GR_READY)))
56079+ return;
56080+
56081+ preempt_disable();
56082+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
56083+
56084+ if (unlikely((unsigned long)matchn)) {
56085+ write_lock(&gr_inode_lock);
56086+ do_handle_create(matchn, dentry, mnt);
56087+ write_unlock(&gr_inode_lock);
56088+ }
56089+ preempt_enable();
56090+
56091+ return;
56092+}
56093+
56094+void
56095+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56096+{
56097+ struct name_entry *matchn;
56098+
56099+ if (unlikely(!(gr_status & GR_READY)))
56100+ return;
56101+
56102+ preempt_disable();
56103+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
56104+
56105+ if (unlikely((unsigned long)matchn)) {
56106+ write_lock(&gr_inode_lock);
56107+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
56108+ write_unlock(&gr_inode_lock);
56109+ }
56110+ preempt_enable();
56111+
56112+ return;
56113+}
56114+
56115+void
56116+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56117+ struct dentry *old_dentry,
56118+ struct dentry *new_dentry,
56119+ struct vfsmount *mnt, const __u8 replace)
56120+{
56121+ struct name_entry *matchn;
56122+ struct inodev_entry *inodev;
56123+ struct inode *inode = new_dentry->d_inode;
56124+ ino_t old_ino = old_dentry->d_inode->i_ino;
56125+ dev_t old_dev = __get_dev(old_dentry);
56126+
56127+ /* vfs_rename swaps the name and parent link for old_dentry and
56128+ new_dentry
56129+ at this point, old_dentry has the new name, parent link, and inode
56130+ for the renamed file
56131+ if a file is being replaced by a rename, new_dentry has the inode
56132+ and name for the replaced file
56133+ */
56134+
56135+ if (unlikely(!(gr_status & GR_READY)))
56136+ return;
56137+
56138+ preempt_disable();
56139+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
56140+
56141+ /* we wouldn't have to check d_inode if it weren't for
56142+ NFS silly-renaming
56143+ */
56144+
56145+ write_lock(&gr_inode_lock);
56146+ if (unlikely(replace && inode)) {
56147+ ino_t new_ino = inode->i_ino;
56148+ dev_t new_dev = __get_dev(new_dentry);
56149+
56150+ inodev = lookup_inodev_entry(new_ino, new_dev);
56151+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
56152+ do_handle_delete(inodev, new_ino, new_dev);
56153+ }
56154+
56155+ inodev = lookup_inodev_entry(old_ino, old_dev);
56156+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
56157+ do_handle_delete(inodev, old_ino, old_dev);
56158+
56159+ if (unlikely((unsigned long)matchn))
56160+ do_handle_create(matchn, old_dentry, mnt);
56161+
56162+ write_unlock(&gr_inode_lock);
56163+ preempt_enable();
56164+
56165+ return;
56166+}
56167+
56168+static int
56169+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
56170+ unsigned char **sum)
56171+{
56172+ struct acl_role_label *r;
56173+ struct role_allowed_ip *ipp;
56174+ struct role_transition *trans;
56175+ unsigned int i;
56176+ int found = 0;
56177+ u32 curr_ip = current->signal->curr_ip;
56178+
56179+ current->signal->saved_ip = curr_ip;
56180+
56181+ /* check transition table */
56182+
56183+ for (trans = current->role->transitions; trans; trans = trans->next) {
56184+ if (!strcmp(rolename, trans->rolename)) {
56185+ found = 1;
56186+ break;
56187+ }
56188+ }
56189+
56190+ if (!found)
56191+ return 0;
56192+
56193+ /* handle special roles that do not require authentication
56194+ and check ip */
56195+
56196+ FOR_EACH_ROLE_START(r)
56197+ if (!strcmp(rolename, r->rolename) &&
56198+ (r->roletype & GR_ROLE_SPECIAL)) {
56199+ found = 0;
56200+ if (r->allowed_ips != NULL) {
56201+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
56202+ if ((ntohl(curr_ip) & ipp->netmask) ==
56203+ (ntohl(ipp->addr) & ipp->netmask))
56204+ found = 1;
56205+ }
56206+ } else
56207+ found = 2;
56208+ if (!found)
56209+ return 0;
56210+
56211+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
56212+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
56213+ *salt = NULL;
56214+ *sum = NULL;
56215+ return 1;
56216+ }
56217+ }
56218+ FOR_EACH_ROLE_END(r)
56219+
56220+ for (i = 0; i < num_sprole_pws; i++) {
56221+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
56222+ *salt = acl_special_roles[i]->salt;
56223+ *sum = acl_special_roles[i]->sum;
56224+ return 1;
56225+ }
56226+ }
56227+
56228+ return 0;
56229+}
56230+
56231+static void
56232+assign_special_role(char *rolename)
56233+{
56234+ struct acl_object_label *obj;
56235+ struct acl_role_label *r;
56236+ struct acl_role_label *assigned = NULL;
56237+ struct task_struct *tsk;
56238+ struct file *filp;
56239+
56240+ FOR_EACH_ROLE_START(r)
56241+ if (!strcmp(rolename, r->rolename) &&
56242+ (r->roletype & GR_ROLE_SPECIAL)) {
56243+ assigned = r;
56244+ break;
56245+ }
56246+ FOR_EACH_ROLE_END(r)
56247+
56248+ if (!assigned)
56249+ return;
56250+
56251+ read_lock(&tasklist_lock);
56252+ read_lock(&grsec_exec_file_lock);
56253+
56254+ tsk = current->real_parent;
56255+ if (tsk == NULL)
56256+ goto out_unlock;
56257+
56258+ filp = tsk->exec_file;
56259+ if (filp == NULL)
56260+ goto out_unlock;
56261+
56262+ tsk->is_writable = 0;
56263+
56264+ tsk->acl_sp_role = 1;
56265+ tsk->acl_role_id = ++acl_sp_role_value;
56266+ tsk->role = assigned;
56267+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
56268+
56269+ /* ignore additional mmap checks for processes that are writable
56270+ by the default ACL */
56271+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
56272+ if (unlikely(obj->mode & GR_WRITE))
56273+ tsk->is_writable = 1;
56274+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
56275+ if (unlikely(obj->mode & GR_WRITE))
56276+ tsk->is_writable = 1;
56277+
56278+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56279+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
56280+#endif
56281+
56282+out_unlock:
56283+ read_unlock(&grsec_exec_file_lock);
56284+ read_unlock(&tasklist_lock);
56285+ return;
56286+}
56287+
56288+int gr_check_secure_terminal(struct task_struct *task)
56289+{
56290+ struct task_struct *p, *p2, *p3;
56291+ struct files_struct *files;
56292+ struct fdtable *fdt;
56293+ struct file *our_file = NULL, *file;
56294+ int i;
56295+
56296+ if (task->signal->tty == NULL)
56297+ return 1;
56298+
56299+ files = get_files_struct(task);
56300+ if (files != NULL) {
56301+ rcu_read_lock();
56302+ fdt = files_fdtable(files);
56303+ for (i=0; i < fdt->max_fds; i++) {
56304+ file = fcheck_files(files, i);
56305+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
56306+ get_file(file);
56307+ our_file = file;
56308+ }
56309+ }
56310+ rcu_read_unlock();
56311+ put_files_struct(files);
56312+ }
56313+
56314+ if (our_file == NULL)
56315+ return 1;
56316+
56317+ read_lock(&tasklist_lock);
56318+ do_each_thread(p2, p) {
56319+ files = get_files_struct(p);
56320+ if (files == NULL ||
56321+ (p->signal && p->signal->tty == task->signal->tty)) {
56322+ if (files != NULL)
56323+ put_files_struct(files);
56324+ continue;
56325+ }
56326+ rcu_read_lock();
56327+ fdt = files_fdtable(files);
56328+ for (i=0; i < fdt->max_fds; i++) {
56329+ file = fcheck_files(files, i);
56330+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
56331+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
56332+ p3 = task;
56333+ while (p3->pid > 0) {
56334+ if (p3 == p)
56335+ break;
56336+ p3 = p3->real_parent;
56337+ }
56338+ if (p3 == p)
56339+ break;
56340+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
56341+ gr_handle_alertkill(p);
56342+ rcu_read_unlock();
56343+ put_files_struct(files);
56344+ read_unlock(&tasklist_lock);
56345+ fput(our_file);
56346+ return 0;
56347+ }
56348+ }
56349+ rcu_read_unlock();
56350+ put_files_struct(files);
56351+ } while_each_thread(p2, p);
56352+ read_unlock(&tasklist_lock);
56353+
56354+ fput(our_file);
56355+ return 1;
56356+}
56357+
56358+static int gr_rbac_disable(void *unused)
56359+{
56360+ pax_open_kernel();
56361+ gr_status &= ~GR_READY;
56362+ pax_close_kernel();
56363+
56364+ return 0;
56365+}
56366+
56367+ssize_t
56368+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
56369+{
56370+ struct gr_arg_wrapper uwrap;
56371+ unsigned char *sprole_salt = NULL;
56372+ unsigned char *sprole_sum = NULL;
56373+ int error = sizeof (struct gr_arg_wrapper);
56374+ int error2 = 0;
56375+
56376+ mutex_lock(&gr_dev_mutex);
56377+
56378+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
56379+ error = -EPERM;
56380+ goto out;
56381+ }
56382+
56383+ if (count != sizeof (struct gr_arg_wrapper)) {
56384+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
56385+ error = -EINVAL;
56386+ goto out;
56387+ }
56388+
56389+
56390+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
56391+ gr_auth_expires = 0;
56392+ gr_auth_attempts = 0;
56393+ }
56394+
56395+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
56396+ error = -EFAULT;
56397+ goto out;
56398+ }
56399+
56400+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
56401+ error = -EINVAL;
56402+ goto out;
56403+ }
56404+
56405+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
56406+ error = -EFAULT;
56407+ goto out;
56408+ }
56409+
56410+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
56411+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
56412+ time_after(gr_auth_expires, get_seconds())) {
56413+ error = -EBUSY;
56414+ goto out;
56415+ }
56416+
56417+ /* if non-root trying to do anything other than use a special role,
56418+ do not attempt authentication, do not count towards authentication
56419+ locking
56420+ */
56421+
56422+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
56423+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
56424+ !uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
56425+ error = -EPERM;
56426+ goto out;
56427+ }
56428+
56429+ /* ensure pw and special role name are null terminated */
56430+
56431+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
56432+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
56433+
56434+ /* Okay.
56435+ * We have our enough of the argument structure..(we have yet
56436+ * to copy_from_user the tables themselves) . Copy the tables
56437+ * only if we need them, i.e. for loading operations. */
56438+
56439+ switch (gr_usermode->mode) {
56440+ case GR_STATUS:
56441+ if (gr_status & GR_READY) {
56442+ error = 1;
56443+ if (!gr_check_secure_terminal(current))
56444+ error = 3;
56445+ } else
56446+ error = 2;
56447+ goto out;
56448+ case GR_SHUTDOWN:
56449+ if ((gr_status & GR_READY)
56450+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56451+ stop_machine(gr_rbac_disable, NULL, NULL);
56452+ free_variables();
56453+ memset(gr_usermode, 0, sizeof (struct gr_arg));
56454+ memset(gr_system_salt, 0, GR_SALT_LEN);
56455+ memset(gr_system_sum, 0, GR_SHA_LEN);
56456+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
56457+ } else if (gr_status & GR_READY) {
56458+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
56459+ error = -EPERM;
56460+ } else {
56461+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
56462+ error = -EAGAIN;
56463+ }
56464+ break;
56465+ case GR_ENABLE:
56466+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
56467+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
56468+ else {
56469+ if (gr_status & GR_READY)
56470+ error = -EAGAIN;
56471+ else
56472+ error = error2;
56473+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
56474+ }
56475+ break;
56476+ case GR_RELOAD:
56477+ if (!(gr_status & GR_READY)) {
56478+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
56479+ error = -EAGAIN;
56480+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56481+ stop_machine(gr_rbac_disable, NULL, NULL);
56482+ free_variables();
56483+ error2 = gracl_init(gr_usermode);
56484+ if (!error2)
56485+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
56486+ else {
56487+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
56488+ error = error2;
56489+ }
56490+ } else {
56491+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
56492+ error = -EPERM;
56493+ }
56494+ break;
56495+ case GR_SEGVMOD:
56496+ if (unlikely(!(gr_status & GR_READY))) {
56497+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
56498+ error = -EAGAIN;
56499+ break;
56500+ }
56501+
56502+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56503+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
56504+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
56505+ struct acl_subject_label *segvacl;
56506+ segvacl =
56507+ lookup_acl_subj_label(gr_usermode->segv_inode,
56508+ gr_usermode->segv_device,
56509+ current->role);
56510+ if (segvacl) {
56511+ segvacl->crashes = 0;
56512+ segvacl->expires = 0;
56513+ }
56514+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
56515+ gr_remove_uid(gr_usermode->segv_uid);
56516+ }
56517+ } else {
56518+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
56519+ error = -EPERM;
56520+ }
56521+ break;
56522+ case GR_SPROLE:
56523+ case GR_SPROLEPAM:
56524+ if (unlikely(!(gr_status & GR_READY))) {
56525+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
56526+ error = -EAGAIN;
56527+ break;
56528+ }
56529+
56530+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
56531+ current->role->expires = 0;
56532+ current->role->auth_attempts = 0;
56533+ }
56534+
56535+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
56536+ time_after(current->role->expires, get_seconds())) {
56537+ error = -EBUSY;
56538+ goto out;
56539+ }
56540+
56541+ if (lookup_special_role_auth
56542+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
56543+ && ((!sprole_salt && !sprole_sum)
56544+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
56545+ char *p = "";
56546+ assign_special_role(gr_usermode->sp_role);
56547+ read_lock(&tasklist_lock);
56548+ if (current->real_parent)
56549+ p = current->real_parent->role->rolename;
56550+ read_unlock(&tasklist_lock);
56551+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
56552+ p, acl_sp_role_value);
56553+ } else {
56554+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
56555+ error = -EPERM;
56556+ if(!(current->role->auth_attempts++))
56557+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
56558+
56559+ goto out;
56560+ }
56561+ break;
56562+ case GR_UNSPROLE:
56563+ if (unlikely(!(gr_status & GR_READY))) {
56564+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
56565+ error = -EAGAIN;
56566+ break;
56567+ }
56568+
56569+ if (current->role->roletype & GR_ROLE_SPECIAL) {
56570+ char *p = "";
56571+ int i = 0;
56572+
56573+ read_lock(&tasklist_lock);
56574+ if (current->real_parent) {
56575+ p = current->real_parent->role->rolename;
56576+ i = current->real_parent->acl_role_id;
56577+ }
56578+ read_unlock(&tasklist_lock);
56579+
56580+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
56581+ gr_set_acls(1);
56582+ } else {
56583+ error = -EPERM;
56584+ goto out;
56585+ }
56586+ break;
56587+ default:
56588+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
56589+ error = -EINVAL;
56590+ break;
56591+ }
56592+
56593+ if (error != -EPERM)
56594+ goto out;
56595+
56596+ if(!(gr_auth_attempts++))
56597+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
56598+
56599+ out:
56600+ mutex_unlock(&gr_dev_mutex);
56601+ return error;
56602+}
56603+
56604+/* must be called with
56605+ rcu_read_lock();
56606+ read_lock(&tasklist_lock);
56607+ read_lock(&grsec_exec_file_lock);
56608+*/
56609+int gr_apply_subject_to_task(struct task_struct *task)
56610+{
56611+ struct acl_object_label *obj;
56612+ char *tmpname;
56613+ struct acl_subject_label *tmpsubj;
56614+ struct file *filp;
56615+ struct name_entry *nmatch;
56616+
56617+ filp = task->exec_file;
56618+ if (filp == NULL)
56619+ return 0;
56620+
56621+ /* the following is to apply the correct subject
56622+ on binaries running when the RBAC system
56623+ is enabled, when the binaries have been
56624+ replaced or deleted since their execution
56625+ -----
56626+ when the RBAC system starts, the inode/dev
56627+ from exec_file will be one the RBAC system
56628+ is unaware of. It only knows the inode/dev
56629+ of the present file on disk, or the absence
56630+ of it.
56631+ */
56632+ preempt_disable();
56633+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
56634+
56635+ nmatch = lookup_name_entry(tmpname);
56636+ preempt_enable();
56637+ tmpsubj = NULL;
56638+ if (nmatch) {
56639+ if (nmatch->deleted)
56640+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
56641+ else
56642+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
56643+ if (tmpsubj != NULL)
56644+ task->acl = tmpsubj;
56645+ }
56646+ if (tmpsubj == NULL)
56647+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
56648+ task->role);
56649+ if (task->acl) {
56650+ task->is_writable = 0;
56651+ /* ignore additional mmap checks for processes that are writable
56652+ by the default ACL */
56653+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
56654+ if (unlikely(obj->mode & GR_WRITE))
56655+ task->is_writable = 1;
56656+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
56657+ if (unlikely(obj->mode & GR_WRITE))
56658+ task->is_writable = 1;
56659+
56660+ gr_set_proc_res(task);
56661+
56662+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56663+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
56664+#endif
56665+ } else {
56666+ return 1;
56667+ }
56668+
56669+ return 0;
56670+}
56671+
56672+int
56673+gr_set_acls(const int type)
56674+{
56675+ struct task_struct *task, *task2;
56676+ struct acl_role_label *role = current->role;
56677+ __u16 acl_role_id = current->acl_role_id;
56678+ const struct cred *cred;
56679+ int ret;
56680+
56681+ rcu_read_lock();
56682+ read_lock(&tasklist_lock);
56683+ read_lock(&grsec_exec_file_lock);
56684+ do_each_thread(task2, task) {
56685+ /* check to see if we're called from the exit handler,
56686+ if so, only replace ACLs that have inherited the admin
56687+ ACL */
56688+
56689+ if (type && (task->role != role ||
56690+ task->acl_role_id != acl_role_id))
56691+ continue;
56692+
56693+ task->acl_role_id = 0;
56694+ task->acl_sp_role = 0;
56695+
56696+ if (task->exec_file) {
56697+ cred = __task_cred(task);
56698+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
56699+ ret = gr_apply_subject_to_task(task);
56700+ if (ret) {
56701+ read_unlock(&grsec_exec_file_lock);
56702+ read_unlock(&tasklist_lock);
56703+ rcu_read_unlock();
56704+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
56705+ return ret;
56706+ }
56707+ } else {
56708+ // it's a kernel process
56709+ task->role = kernel_role;
56710+ task->acl = kernel_role->root_label;
56711+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
56712+ task->acl->mode &= ~GR_PROCFIND;
56713+#endif
56714+ }
56715+ } while_each_thread(task2, task);
56716+ read_unlock(&grsec_exec_file_lock);
56717+ read_unlock(&tasklist_lock);
56718+ rcu_read_unlock();
56719+
56720+ return 0;
56721+}
56722+
56723+void
56724+gr_learn_resource(const struct task_struct *task,
56725+ const int res, const unsigned long wanted, const int gt)
56726+{
56727+ struct acl_subject_label *acl;
56728+ const struct cred *cred;
56729+
56730+ if (unlikely((gr_status & GR_READY) &&
56731+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
56732+ goto skip_reslog;
56733+
56734+#ifdef CONFIG_GRKERNSEC_RESLOG
56735+ gr_log_resource(task, res, wanted, gt);
56736+#endif
56737+ skip_reslog:
56738+
56739+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
56740+ return;
56741+
56742+ acl = task->acl;
56743+
56744+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
56745+ !(acl->resmask & (1 << (unsigned short) res))))
56746+ return;
56747+
56748+ if (wanted >= acl->res[res].rlim_cur) {
56749+ unsigned long res_add;
56750+
56751+ res_add = wanted;
56752+ switch (res) {
56753+ case RLIMIT_CPU:
56754+ res_add += GR_RLIM_CPU_BUMP;
56755+ break;
56756+ case RLIMIT_FSIZE:
56757+ res_add += GR_RLIM_FSIZE_BUMP;
56758+ break;
56759+ case RLIMIT_DATA:
56760+ res_add += GR_RLIM_DATA_BUMP;
56761+ break;
56762+ case RLIMIT_STACK:
56763+ res_add += GR_RLIM_STACK_BUMP;
56764+ break;
56765+ case RLIMIT_CORE:
56766+ res_add += GR_RLIM_CORE_BUMP;
56767+ break;
56768+ case RLIMIT_RSS:
56769+ res_add += GR_RLIM_RSS_BUMP;
56770+ break;
56771+ case RLIMIT_NPROC:
56772+ res_add += GR_RLIM_NPROC_BUMP;
56773+ break;
56774+ case RLIMIT_NOFILE:
56775+ res_add += GR_RLIM_NOFILE_BUMP;
56776+ break;
56777+ case RLIMIT_MEMLOCK:
56778+ res_add += GR_RLIM_MEMLOCK_BUMP;
56779+ break;
56780+ case RLIMIT_AS:
56781+ res_add += GR_RLIM_AS_BUMP;
56782+ break;
56783+ case RLIMIT_LOCKS:
56784+ res_add += GR_RLIM_LOCKS_BUMP;
56785+ break;
56786+ case RLIMIT_SIGPENDING:
56787+ res_add += GR_RLIM_SIGPENDING_BUMP;
56788+ break;
56789+ case RLIMIT_MSGQUEUE:
56790+ res_add += GR_RLIM_MSGQUEUE_BUMP;
56791+ break;
56792+ case RLIMIT_NICE:
56793+ res_add += GR_RLIM_NICE_BUMP;
56794+ break;
56795+ case RLIMIT_RTPRIO:
56796+ res_add += GR_RLIM_RTPRIO_BUMP;
56797+ break;
56798+ case RLIMIT_RTTIME:
56799+ res_add += GR_RLIM_RTTIME_BUMP;
56800+ break;
56801+ }
56802+
56803+ acl->res[res].rlim_cur = res_add;
56804+
56805+ if (wanted > acl->res[res].rlim_max)
56806+ acl->res[res].rlim_max = res_add;
56807+
56808+ /* only log the subject filename, since resource logging is supported for
56809+ single-subject learning only */
56810+ rcu_read_lock();
56811+ cred = __task_cred(task);
56812+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
56813+ task->role->roletype, cred->uid, cred->gid, acl->filename,
56814+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
56815+ "", (unsigned long) res, &task->signal->saved_ip);
56816+ rcu_read_unlock();
56817+ }
56818+
56819+ return;
56820+}
56821+
56822+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
56823+void
56824+pax_set_initial_flags(struct linux_binprm *bprm)
56825+{
56826+ struct task_struct *task = current;
56827+ struct acl_subject_label *proc;
56828+ unsigned long flags;
56829+
56830+ if (unlikely(!(gr_status & GR_READY)))
56831+ return;
56832+
56833+ flags = pax_get_flags(task);
56834+
56835+ proc = task->acl;
56836+
56837+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
56838+ flags &= ~MF_PAX_PAGEEXEC;
56839+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
56840+ flags &= ~MF_PAX_SEGMEXEC;
56841+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
56842+ flags &= ~MF_PAX_RANDMMAP;
56843+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
56844+ flags &= ~MF_PAX_EMUTRAMP;
56845+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
56846+ flags &= ~MF_PAX_MPROTECT;
56847+
56848+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
56849+ flags |= MF_PAX_PAGEEXEC;
56850+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
56851+ flags |= MF_PAX_SEGMEXEC;
56852+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
56853+ flags |= MF_PAX_RANDMMAP;
56854+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
56855+ flags |= MF_PAX_EMUTRAMP;
56856+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
56857+ flags |= MF_PAX_MPROTECT;
56858+
56859+ pax_set_flags(task, flags);
56860+
56861+ return;
56862+}
56863+#endif
56864+
56865+int
56866+gr_handle_proc_ptrace(struct task_struct *task)
56867+{
56868+ struct file *filp;
56869+ struct task_struct *tmp = task;
56870+ struct task_struct *curtemp = current;
56871+ __u32 retmode;
56872+
56873+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
56874+ if (unlikely(!(gr_status & GR_READY)))
56875+ return 0;
56876+#endif
56877+
56878+ read_lock(&tasklist_lock);
56879+ read_lock(&grsec_exec_file_lock);
56880+ filp = task->exec_file;
56881+
56882+ while (tmp->pid > 0) {
56883+ if (tmp == curtemp)
56884+ break;
56885+ tmp = tmp->real_parent;
56886+ }
56887+
56888+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
56889+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
56890+ read_unlock(&grsec_exec_file_lock);
56891+ read_unlock(&tasklist_lock);
56892+ return 1;
56893+ }
56894+
56895+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56896+ if (!(gr_status & GR_READY)) {
56897+ read_unlock(&grsec_exec_file_lock);
56898+ read_unlock(&tasklist_lock);
56899+ return 0;
56900+ }
56901+#endif
56902+
56903+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
56904+ read_unlock(&grsec_exec_file_lock);
56905+ read_unlock(&tasklist_lock);
56906+
56907+ if (retmode & GR_NOPTRACE)
56908+ return 1;
56909+
56910+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
56911+ && (current->acl != task->acl || (current->acl != current->role->root_label
56912+ && current->pid != task->pid)))
56913+ return 1;
56914+
56915+ return 0;
56916+}
56917+
56918+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
56919+{
56920+ if (unlikely(!(gr_status & GR_READY)))
56921+ return;
56922+
56923+ if (!(current->role->roletype & GR_ROLE_GOD))
56924+ return;
56925+
56926+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
56927+ p->role->rolename, gr_task_roletype_to_char(p),
56928+ p->acl->filename);
56929+}
56930+
56931+int
56932+gr_handle_ptrace(struct task_struct *task, const long request)
56933+{
56934+ struct task_struct *tmp = task;
56935+ struct task_struct *curtemp = current;
56936+ __u32 retmode;
56937+
56938+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
56939+ if (unlikely(!(gr_status & GR_READY)))
56940+ return 0;
56941+#endif
56942+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
56943+ read_lock(&tasklist_lock);
56944+ while (tmp->pid > 0) {
56945+ if (tmp == curtemp)
56946+ break;
56947+ tmp = tmp->real_parent;
56948+ }
56949+
56950+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
56951+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
56952+ read_unlock(&tasklist_lock);
56953+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
56954+ return 1;
56955+ }
56956+ read_unlock(&tasklist_lock);
56957+ }
56958+
56959+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56960+ if (!(gr_status & GR_READY))
56961+ return 0;
56962+#endif
56963+
56964+ read_lock(&grsec_exec_file_lock);
56965+ if (unlikely(!task->exec_file)) {
56966+ read_unlock(&grsec_exec_file_lock);
56967+ return 0;
56968+ }
56969+
56970+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
56971+ read_unlock(&grsec_exec_file_lock);
56972+
56973+ if (retmode & GR_NOPTRACE) {
56974+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
56975+ return 1;
56976+ }
56977+
56978+ if (retmode & GR_PTRACERD) {
56979+ switch (request) {
56980+ case PTRACE_SEIZE:
56981+ case PTRACE_POKETEXT:
56982+ case PTRACE_POKEDATA:
56983+ case PTRACE_POKEUSR:
56984+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
56985+ case PTRACE_SETREGS:
56986+ case PTRACE_SETFPREGS:
56987+#endif
56988+#ifdef CONFIG_X86
56989+ case PTRACE_SETFPXREGS:
56990+#endif
56991+#ifdef CONFIG_ALTIVEC
56992+ case PTRACE_SETVRREGS:
56993+#endif
56994+ return 1;
56995+ default:
56996+ return 0;
56997+ }
56998+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
56999+ !(current->role->roletype & GR_ROLE_GOD) &&
57000+ (current->acl != task->acl)) {
57001+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
57002+ return 1;
57003+ }
57004+
57005+ return 0;
57006+}
57007+
57008+static int is_writable_mmap(const struct file *filp)
57009+{
57010+ struct task_struct *task = current;
57011+ struct acl_object_label *obj, *obj2;
57012+
57013+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
57014+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
57015+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
57016+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
57017+ task->role->root_label);
57018+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
57019+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
57020+ return 1;
57021+ }
57022+ }
57023+ return 0;
57024+}
57025+
57026+int
57027+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
57028+{
57029+ __u32 mode;
57030+
57031+ if (unlikely(!file || !(prot & PROT_EXEC)))
57032+ return 1;
57033+
57034+ if (is_writable_mmap(file))
57035+ return 0;
57036+
57037+ mode =
57038+ gr_search_file(file->f_path.dentry,
57039+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
57040+ file->f_path.mnt);
57041+
57042+ if (!gr_tpe_allow(file))
57043+ return 0;
57044+
57045+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
57046+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
57047+ return 0;
57048+ } else if (unlikely(!(mode & GR_EXEC))) {
57049+ return 0;
57050+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
57051+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
57052+ return 1;
57053+ }
57054+
57055+ return 1;
57056+}
57057+
57058+int
57059+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57060+{
57061+ __u32 mode;
57062+
57063+ if (unlikely(!file || !(prot & PROT_EXEC)))
57064+ return 1;
57065+
57066+ if (is_writable_mmap(file))
57067+ return 0;
57068+
57069+ mode =
57070+ gr_search_file(file->f_path.dentry,
57071+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
57072+ file->f_path.mnt);
57073+
57074+ if (!gr_tpe_allow(file))
57075+ return 0;
57076+
57077+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
57078+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
57079+ return 0;
57080+ } else if (unlikely(!(mode & GR_EXEC))) {
57081+ return 0;
57082+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
57083+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
57084+ return 1;
57085+ }
57086+
57087+ return 1;
57088+}
57089+
57090+void
57091+gr_acl_handle_psacct(struct task_struct *task, const long code)
57092+{
57093+ unsigned long runtime;
57094+ unsigned long cputime;
57095+ unsigned int wday, cday;
57096+ __u8 whr, chr;
57097+ __u8 wmin, cmin;
57098+ __u8 wsec, csec;
57099+ struct timespec timeval;
57100+
57101+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
57102+ !(task->acl->mode & GR_PROCACCT)))
57103+ return;
57104+
57105+ do_posix_clock_monotonic_gettime(&timeval);
57106+ runtime = timeval.tv_sec - task->start_time.tv_sec;
57107+ wday = runtime / (3600 * 24);
57108+ runtime -= wday * (3600 * 24);
57109+ whr = runtime / 3600;
57110+ runtime -= whr * 3600;
57111+ wmin = runtime / 60;
57112+ runtime -= wmin * 60;
57113+ wsec = runtime;
57114+
57115+ cputime = (task->utime + task->stime) / HZ;
57116+ cday = cputime / (3600 * 24);
57117+ cputime -= cday * (3600 * 24);
57118+ chr = cputime / 3600;
57119+ cputime -= chr * 3600;
57120+ cmin = cputime / 60;
57121+ cputime -= cmin * 60;
57122+ csec = cputime;
57123+
57124+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
57125+
57126+ return;
57127+}
57128+
57129+void gr_set_kernel_label(struct task_struct *task)
57130+{
57131+ if (gr_status & GR_READY) {
57132+ task->role = kernel_role;
57133+ task->acl = kernel_role->root_label;
57134+ }
57135+ return;
57136+}
57137+
57138+#ifdef CONFIG_TASKSTATS
57139+int gr_is_taskstats_denied(int pid)
57140+{
57141+ struct task_struct *task;
57142+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57143+ const struct cred *cred;
57144+#endif
57145+ int ret = 0;
57146+
57147+ /* restrict taskstats viewing to un-chrooted root users
57148+ who have the 'view' subject flag if the RBAC system is enabled
57149+ */
57150+
57151+ rcu_read_lock();
57152+ read_lock(&tasklist_lock);
57153+ task = find_task_by_vpid(pid);
57154+ if (task) {
57155+#ifdef CONFIG_GRKERNSEC_CHROOT
57156+ if (proc_is_chrooted(task))
57157+ ret = -EACCES;
57158+#endif
57159+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57160+ cred = __task_cred(task);
57161+#ifdef CONFIG_GRKERNSEC_PROC_USER
57162+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID))
57163+ ret = -EACCES;
57164+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57165+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && !groups_search(cred->group_info, grsec_proc_gid))
57166+ ret = -EACCES;
57167+#endif
57168+#endif
57169+ if (gr_status & GR_READY) {
57170+ if (!(task->acl->mode & GR_VIEW))
57171+ ret = -EACCES;
57172+ }
57173+ } else
57174+ ret = -ENOENT;
57175+
57176+ read_unlock(&tasklist_lock);
57177+ rcu_read_unlock();
57178+
57179+ return ret;
57180+}
57181+#endif
57182+
57183+/* AUXV entries are filled via a descendant of search_binary_handler
57184+ after we've already applied the subject for the target
57185+*/
57186+int gr_acl_enable_at_secure(void)
57187+{
57188+ if (unlikely(!(gr_status & GR_READY)))
57189+ return 0;
57190+
57191+ if (current->acl->mode & GR_ATSECURE)
57192+ return 1;
57193+
57194+ return 0;
57195+}
57196+
57197+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
57198+{
57199+ struct task_struct *task = current;
57200+ struct dentry *dentry = file->f_path.dentry;
57201+ struct vfsmount *mnt = file->f_path.mnt;
57202+ struct acl_object_label *obj, *tmp;
57203+ struct acl_subject_label *subj;
57204+ unsigned int bufsize;
57205+ int is_not_root;
57206+ char *path;
57207+ dev_t dev = __get_dev(dentry);
57208+
57209+ if (unlikely(!(gr_status & GR_READY)))
57210+ return 1;
57211+
57212+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57213+ return 1;
57214+
57215+ /* ignore Eric Biederman */
57216+ if (IS_PRIVATE(dentry->d_inode))
57217+ return 1;
57218+
57219+ subj = task->acl;
57220+ read_lock(&gr_inode_lock);
57221+ do {
57222+ obj = lookup_acl_obj_label(ino, dev, subj);
57223+ if (obj != NULL) {
57224+ read_unlock(&gr_inode_lock);
57225+ return (obj->mode & GR_FIND) ? 1 : 0;
57226+ }
57227+ } while ((subj = subj->parent_subject));
57228+ read_unlock(&gr_inode_lock);
57229+
57230+ /* this is purely an optimization since we're looking for an object
57231+ for the directory we're doing a readdir on
57232+ if it's possible for any globbed object to match the entry we're
57233+ filling into the directory, then the object we find here will be
57234+ an anchor point with attached globbed objects
57235+ */
57236+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
57237+ if (obj->globbed == NULL)
57238+ return (obj->mode & GR_FIND) ? 1 : 0;
57239+
57240+ is_not_root = ((obj->filename[0] == '/') &&
57241+ (obj->filename[1] == '\0')) ? 0 : 1;
57242+ bufsize = PAGE_SIZE - namelen - is_not_root;
57243+
57244+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
57245+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
57246+ return 1;
57247+
57248+ preempt_disable();
57249+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
57250+ bufsize);
57251+
57252+ bufsize = strlen(path);
57253+
57254+ /* if base is "/", don't append an additional slash */
57255+ if (is_not_root)
57256+ *(path + bufsize) = '/';
57257+ memcpy(path + bufsize + is_not_root, name, namelen);
57258+ *(path + bufsize + namelen + is_not_root) = '\0';
57259+
57260+ tmp = obj->globbed;
57261+ while (tmp) {
57262+ if (!glob_match(tmp->filename, path)) {
57263+ preempt_enable();
57264+ return (tmp->mode & GR_FIND) ? 1 : 0;
57265+ }
57266+ tmp = tmp->next;
57267+ }
57268+ preempt_enable();
57269+ return (obj->mode & GR_FIND) ? 1 : 0;
57270+}
57271+
57272+void gr_put_exec_file(struct task_struct *task)
57273+{
57274+ struct file *filp;
57275+
57276+ write_lock(&grsec_exec_file_lock);
57277+ filp = task->exec_file;
57278+ task->exec_file = NULL;
57279+ write_unlock(&grsec_exec_file_lock);
57280+
57281+ if (filp)
57282+ fput(filp);
57283+
57284+ return;
57285+}
57286+
57287+
57288+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
57289+EXPORT_SYMBOL(gr_acl_is_enabled);
57290+#endif
57291+EXPORT_SYMBOL(gr_learn_resource);
57292+EXPORT_SYMBOL(gr_set_kernel_label);
57293+#ifdef CONFIG_SECURITY
57294+EXPORT_SYMBOL(gr_check_user_change);
57295+EXPORT_SYMBOL(gr_check_group_change);
57296+#endif
57297+
57298diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
57299new file mode 100644
57300index 0000000..34fefda
57301--- /dev/null
57302+++ b/grsecurity/gracl_alloc.c
57303@@ -0,0 +1,105 @@
57304+#include <linux/kernel.h>
57305+#include <linux/mm.h>
57306+#include <linux/slab.h>
57307+#include <linux/vmalloc.h>
57308+#include <linux/gracl.h>
57309+#include <linux/grsecurity.h>
57310+
57311+static unsigned long alloc_stack_next = 1;
57312+static unsigned long alloc_stack_size = 1;
57313+static void **alloc_stack;
57314+
57315+static __inline__ int
57316+alloc_pop(void)
57317+{
57318+ if (alloc_stack_next == 1)
57319+ return 0;
57320+
57321+ kfree(alloc_stack[alloc_stack_next - 2]);
57322+
57323+ alloc_stack_next--;
57324+
57325+ return 1;
57326+}
57327+
57328+static __inline__ int
57329+alloc_push(void *buf)
57330+{
57331+ if (alloc_stack_next >= alloc_stack_size)
57332+ return 1;
57333+
57334+ alloc_stack[alloc_stack_next - 1] = buf;
57335+
57336+ alloc_stack_next++;
57337+
57338+ return 0;
57339+}
57340+
57341+void *
57342+acl_alloc(unsigned long len)
57343+{
57344+ void *ret = NULL;
57345+
57346+ if (!len || len > PAGE_SIZE)
57347+ goto out;
57348+
57349+ ret = kmalloc(len, GFP_KERNEL);
57350+
57351+ if (ret) {
57352+ if (alloc_push(ret)) {
57353+ kfree(ret);
57354+ ret = NULL;
57355+ }
57356+ }
57357+
57358+out:
57359+ return ret;
57360+}
57361+
57362+void *
57363+acl_alloc_num(unsigned long num, unsigned long len)
57364+{
57365+ if (!len || (num > (PAGE_SIZE / len)))
57366+ return NULL;
57367+
57368+ return acl_alloc(num * len);
57369+}
57370+
57371+void
57372+acl_free_all(void)
57373+{
57374+ if (gr_acl_is_enabled() || !alloc_stack)
57375+ return;
57376+
57377+ while (alloc_pop()) ;
57378+
57379+ if (alloc_stack) {
57380+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
57381+ kfree(alloc_stack);
57382+ else
57383+ vfree(alloc_stack);
57384+ }
57385+
57386+ alloc_stack = NULL;
57387+ alloc_stack_size = 1;
57388+ alloc_stack_next = 1;
57389+
57390+ return;
57391+}
57392+
57393+int
57394+acl_alloc_stack_init(unsigned long size)
57395+{
57396+ if ((size * sizeof (void *)) <= PAGE_SIZE)
57397+ alloc_stack =
57398+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
57399+ else
57400+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
57401+
57402+ alloc_stack_size = size;
57403+
57404+ if (!alloc_stack)
57405+ return 0;
57406+ else
57407+ return 1;
57408+}
57409diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
57410new file mode 100644
57411index 0000000..6d21049
57412--- /dev/null
57413+++ b/grsecurity/gracl_cap.c
57414@@ -0,0 +1,110 @@
57415+#include <linux/kernel.h>
57416+#include <linux/module.h>
57417+#include <linux/sched.h>
57418+#include <linux/gracl.h>
57419+#include <linux/grsecurity.h>
57420+#include <linux/grinternal.h>
57421+
57422+extern const char *captab_log[];
57423+extern int captab_log_entries;
57424+
57425+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57426+{
57427+ struct acl_subject_label *curracl;
57428+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
57429+ kernel_cap_t cap_audit = __cap_empty_set;
57430+
57431+ if (!gr_acl_is_enabled())
57432+ return 1;
57433+
57434+ curracl = task->acl;
57435+
57436+ cap_drop = curracl->cap_lower;
57437+ cap_mask = curracl->cap_mask;
57438+ cap_audit = curracl->cap_invert_audit;
57439+
57440+ while ((curracl = curracl->parent_subject)) {
57441+ /* if the cap isn't specified in the current computed mask but is specified in the
57442+ current level subject, and is lowered in the current level subject, then add
57443+ it to the set of dropped capabilities
57444+ otherwise, add the current level subject's mask to the current computed mask
57445+ */
57446+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
57447+ cap_raise(cap_mask, cap);
57448+ if (cap_raised(curracl->cap_lower, cap))
57449+ cap_raise(cap_drop, cap);
57450+ if (cap_raised(curracl->cap_invert_audit, cap))
57451+ cap_raise(cap_audit, cap);
57452+ }
57453+ }
57454+
57455+ if (!cap_raised(cap_drop, cap)) {
57456+ if (cap_raised(cap_audit, cap))
57457+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
57458+ return 1;
57459+ }
57460+
57461+ curracl = task->acl;
57462+
57463+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
57464+ && cap_raised(cred->cap_effective, cap)) {
57465+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
57466+ task->role->roletype, cred->uid,
57467+ cred->gid, task->exec_file ?
57468+ gr_to_filename(task->exec_file->f_path.dentry,
57469+ task->exec_file->f_path.mnt) : curracl->filename,
57470+ curracl->filename, 0UL,
57471+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
57472+ return 1;
57473+ }
57474+
57475+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
57476+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
57477+
57478+ return 0;
57479+}
57480+
57481+int
57482+gr_acl_is_capable(const int cap)
57483+{
57484+ return gr_task_acl_is_capable(current, current_cred(), cap);
57485+}
57486+
57487+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
57488+{
57489+ struct acl_subject_label *curracl;
57490+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
57491+
57492+ if (!gr_acl_is_enabled())
57493+ return 1;
57494+
57495+ curracl = task->acl;
57496+
57497+ cap_drop = curracl->cap_lower;
57498+ cap_mask = curracl->cap_mask;
57499+
57500+ while ((curracl = curracl->parent_subject)) {
57501+ /* if the cap isn't specified in the current computed mask but is specified in the
57502+ current level subject, and is lowered in the current level subject, then add
57503+ it to the set of dropped capabilities
57504+ otherwise, add the current level subject's mask to the current computed mask
57505+ */
57506+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
57507+ cap_raise(cap_mask, cap);
57508+ if (cap_raised(curracl->cap_lower, cap))
57509+ cap_raise(cap_drop, cap);
57510+ }
57511+ }
57512+
57513+ if (!cap_raised(cap_drop, cap))
57514+ return 1;
57515+
57516+ return 0;
57517+}
57518+
57519+int
57520+gr_acl_is_capable_nolog(const int cap)
57521+{
57522+ return gr_task_acl_is_capable_nolog(current, cap);
57523+}
57524+
57525diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
57526new file mode 100644
57527index 0000000..a340c17
57528--- /dev/null
57529+++ b/grsecurity/gracl_fs.c
57530@@ -0,0 +1,431 @@
57531+#include <linux/kernel.h>
57532+#include <linux/sched.h>
57533+#include <linux/types.h>
57534+#include <linux/fs.h>
57535+#include <linux/file.h>
57536+#include <linux/stat.h>
57537+#include <linux/grsecurity.h>
57538+#include <linux/grinternal.h>
57539+#include <linux/gracl.h>
57540+
57541+umode_t
57542+gr_acl_umask(void)
57543+{
57544+ if (unlikely(!gr_acl_is_enabled()))
57545+ return 0;
57546+
57547+ return current->role->umask;
57548+}
57549+
57550+__u32
57551+gr_acl_handle_hidden_file(const struct dentry * dentry,
57552+ const struct vfsmount * mnt)
57553+{
57554+ __u32 mode;
57555+
57556+ if (unlikely(!dentry->d_inode))
57557+ return GR_FIND;
57558+
57559+ mode =
57560+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
57561+
57562+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
57563+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
57564+ return mode;
57565+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
57566+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
57567+ return 0;
57568+ } else if (unlikely(!(mode & GR_FIND)))
57569+ return 0;
57570+
57571+ return GR_FIND;
57572+}
57573+
57574+__u32
57575+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57576+ int acc_mode)
57577+{
57578+ __u32 reqmode = GR_FIND;
57579+ __u32 mode;
57580+
57581+ if (unlikely(!dentry->d_inode))
57582+ return reqmode;
57583+
57584+ if (acc_mode & MAY_APPEND)
57585+ reqmode |= GR_APPEND;
57586+ else if (acc_mode & MAY_WRITE)
57587+ reqmode |= GR_WRITE;
57588+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
57589+ reqmode |= GR_READ;
57590+
57591+ mode =
57592+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
57593+ mnt);
57594+
57595+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57596+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
57597+ reqmode & GR_READ ? " reading" : "",
57598+ reqmode & GR_WRITE ? " writing" : reqmode &
57599+ GR_APPEND ? " appending" : "");
57600+ return reqmode;
57601+ } else
57602+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57603+ {
57604+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
57605+ reqmode & GR_READ ? " reading" : "",
57606+ reqmode & GR_WRITE ? " writing" : reqmode &
57607+ GR_APPEND ? " appending" : "");
57608+ return 0;
57609+ } else if (unlikely((mode & reqmode) != reqmode))
57610+ return 0;
57611+
57612+ return reqmode;
57613+}
57614+
57615+__u32
57616+gr_acl_handle_creat(const struct dentry * dentry,
57617+ const struct dentry * p_dentry,
57618+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57619+ const int imode)
57620+{
57621+ __u32 reqmode = GR_WRITE | GR_CREATE;
57622+ __u32 mode;
57623+
57624+ if (acc_mode & MAY_APPEND)
57625+ reqmode |= GR_APPEND;
57626+ // if a directory was required or the directory already exists, then
57627+ // don't count this open as a read
57628+ if ((acc_mode & MAY_READ) &&
57629+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
57630+ reqmode |= GR_READ;
57631+ if ((open_flags & O_CREAT) &&
57632+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
57633+ reqmode |= GR_SETID;
57634+
57635+ mode =
57636+ gr_check_create(dentry, p_dentry, p_mnt,
57637+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
57638+
57639+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57640+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
57641+ reqmode & GR_READ ? " reading" : "",
57642+ reqmode & GR_WRITE ? " writing" : reqmode &
57643+ GR_APPEND ? " appending" : "");
57644+ return reqmode;
57645+ } else
57646+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57647+ {
57648+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
57649+ reqmode & GR_READ ? " reading" : "",
57650+ reqmode & GR_WRITE ? " writing" : reqmode &
57651+ GR_APPEND ? " appending" : "");
57652+ return 0;
57653+ } else if (unlikely((mode & reqmode) != reqmode))
57654+ return 0;
57655+
57656+ return reqmode;
57657+}
57658+
57659+__u32
57660+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
57661+ const int fmode)
57662+{
57663+ __u32 mode, reqmode = GR_FIND;
57664+
57665+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
57666+ reqmode |= GR_EXEC;
57667+ if (fmode & S_IWOTH)
57668+ reqmode |= GR_WRITE;
57669+ if (fmode & S_IROTH)
57670+ reqmode |= GR_READ;
57671+
57672+ mode =
57673+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
57674+ mnt);
57675+
57676+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57677+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
57678+ reqmode & GR_READ ? " reading" : "",
57679+ reqmode & GR_WRITE ? " writing" : "",
57680+ reqmode & GR_EXEC ? " executing" : "");
57681+ return reqmode;
57682+ } else
57683+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57684+ {
57685+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
57686+ reqmode & GR_READ ? " reading" : "",
57687+ reqmode & GR_WRITE ? " writing" : "",
57688+ reqmode & GR_EXEC ? " executing" : "");
57689+ return 0;
57690+ } else if (unlikely((mode & reqmode) != reqmode))
57691+ return 0;
57692+
57693+ return reqmode;
57694+}
57695+
57696+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
57697+{
57698+ __u32 mode;
57699+
57700+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
57701+
57702+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
57703+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
57704+ return mode;
57705+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
57706+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
57707+ return 0;
57708+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
57709+ return 0;
57710+
57711+ return (reqmode);
57712+}
57713+
57714+__u32
57715+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57716+{
57717+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
57718+}
57719+
57720+__u32
57721+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
57722+{
57723+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
57724+}
57725+
57726+__u32
57727+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
57728+{
57729+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
57730+}
57731+
57732+__u32
57733+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
57734+{
57735+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
57736+}
57737+
57738+__u32
57739+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
57740+ umode_t *modeptr)
57741+{
57742+ umode_t mode;
57743+
57744+ *modeptr &= ~gr_acl_umask();
57745+ mode = *modeptr;
57746+
57747+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
57748+ return 1;
57749+
57750+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
57751+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
57752+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
57753+ GR_CHMOD_ACL_MSG);
57754+ } else {
57755+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
57756+ }
57757+}
57758+
57759+__u32
57760+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
57761+{
57762+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
57763+}
57764+
57765+__u32
57766+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
57767+{
57768+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
57769+}
57770+
57771+__u32
57772+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
57773+{
57774+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
57775+}
57776+
57777+__u32
57778+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
57779+{
57780+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
57781+ GR_UNIXCONNECT_ACL_MSG);
57782+}
57783+
57784+/* hardlinks require at minimum create and link permission,
57785+ any additional privilege required is based on the
57786+ privilege of the file being linked to
57787+*/
57788+__u32
57789+gr_acl_handle_link(const struct dentry * new_dentry,
57790+ const struct dentry * parent_dentry,
57791+ const struct vfsmount * parent_mnt,
57792+ const struct dentry * old_dentry,
57793+ const struct vfsmount * old_mnt, const struct filename *to)
57794+{
57795+ __u32 mode;
57796+ __u32 needmode = GR_CREATE | GR_LINK;
57797+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
57798+
57799+ mode =
57800+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
57801+ old_mnt);
57802+
57803+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
57804+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
57805+ return mode;
57806+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
57807+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
57808+ return 0;
57809+ } else if (unlikely((mode & needmode) != needmode))
57810+ return 0;
57811+
57812+ return 1;
57813+}
57814+
57815+__u32
57816+gr_acl_handle_symlink(const struct dentry * new_dentry,
57817+ const struct dentry * parent_dentry,
57818+ const struct vfsmount * parent_mnt, const struct filename *from)
57819+{
57820+ __u32 needmode = GR_WRITE | GR_CREATE;
57821+ __u32 mode;
57822+
57823+ mode =
57824+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
57825+ GR_CREATE | GR_AUDIT_CREATE |
57826+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
57827+
57828+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
57829+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
57830+ return mode;
57831+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
57832+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
57833+ return 0;
57834+ } else if (unlikely((mode & needmode) != needmode))
57835+ return 0;
57836+
57837+ return (GR_WRITE | GR_CREATE);
57838+}
57839+
57840+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
57841+{
57842+ __u32 mode;
57843+
57844+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
57845+
57846+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
57847+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
57848+ return mode;
57849+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
57850+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
57851+ return 0;
57852+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
57853+ return 0;
57854+
57855+ return (reqmode);
57856+}
57857+
57858+__u32
57859+gr_acl_handle_mknod(const struct dentry * new_dentry,
57860+ const struct dentry * parent_dentry,
57861+ const struct vfsmount * parent_mnt,
57862+ const int mode)
57863+{
57864+ __u32 reqmode = GR_WRITE | GR_CREATE;
57865+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
57866+ reqmode |= GR_SETID;
57867+
57868+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
57869+ reqmode, GR_MKNOD_ACL_MSG);
57870+}
57871+
57872+__u32
57873+gr_acl_handle_mkdir(const struct dentry *new_dentry,
57874+ const struct dentry *parent_dentry,
57875+ const struct vfsmount *parent_mnt)
57876+{
57877+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
57878+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
57879+}
57880+
57881+#define RENAME_CHECK_SUCCESS(old, new) \
57882+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
57883+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
57884+
57885+int
57886+gr_acl_handle_rename(struct dentry *new_dentry,
57887+ struct dentry *parent_dentry,
57888+ const struct vfsmount *parent_mnt,
57889+ struct dentry *old_dentry,
57890+ struct inode *old_parent_inode,
57891+ struct vfsmount *old_mnt, const struct filename *newname)
57892+{
57893+ __u32 comp1, comp2;
57894+ int error = 0;
57895+
57896+ if (unlikely(!gr_acl_is_enabled()))
57897+ return 0;
57898+
57899+ if (!new_dentry->d_inode) {
57900+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
57901+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
57902+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
57903+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
57904+ GR_DELETE | GR_AUDIT_DELETE |
57905+ GR_AUDIT_READ | GR_AUDIT_WRITE |
57906+ GR_SUPPRESS, old_mnt);
57907+ } else {
57908+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
57909+ GR_CREATE | GR_DELETE |
57910+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
57911+ GR_AUDIT_READ | GR_AUDIT_WRITE |
57912+ GR_SUPPRESS, parent_mnt);
57913+ comp2 =
57914+ gr_search_file(old_dentry,
57915+ GR_READ | GR_WRITE | GR_AUDIT_READ |
57916+ GR_DELETE | GR_AUDIT_DELETE |
57917+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
57918+ }
57919+
57920+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
57921+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
57922+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
57923+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
57924+ && !(comp2 & GR_SUPPRESS)) {
57925+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
57926+ error = -EACCES;
57927+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
57928+ error = -EACCES;
57929+
57930+ return error;
57931+}
57932+
57933+void
57934+gr_acl_handle_exit(void)
57935+{
57936+ u16 id;
57937+ char *rolename;
57938+
57939+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
57940+ !(current->role->roletype & GR_ROLE_PERSIST))) {
57941+ id = current->acl_role_id;
57942+ rolename = current->role->rolename;
57943+ gr_set_acls(1);
57944+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
57945+ }
57946+
57947+ gr_put_exec_file(current);
57948+ return;
57949+}
57950+
57951+int
57952+gr_acl_handle_procpidmem(const struct task_struct *task)
57953+{
57954+ if (unlikely(!gr_acl_is_enabled()))
57955+ return 0;
57956+
57957+ if (task != current && task->acl->mode & GR_PROTPROCFD)
57958+ return -EACCES;
57959+
57960+ return 0;
57961+}
57962diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
57963new file mode 100644
57964index 0000000..58800a7
57965--- /dev/null
57966+++ b/grsecurity/gracl_ip.c
57967@@ -0,0 +1,384 @@
57968+#include <linux/kernel.h>
57969+#include <asm/uaccess.h>
57970+#include <asm/errno.h>
57971+#include <net/sock.h>
57972+#include <linux/file.h>
57973+#include <linux/fs.h>
57974+#include <linux/net.h>
57975+#include <linux/in.h>
57976+#include <linux/skbuff.h>
57977+#include <linux/ip.h>
57978+#include <linux/udp.h>
57979+#include <linux/types.h>
57980+#include <linux/sched.h>
57981+#include <linux/netdevice.h>
57982+#include <linux/inetdevice.h>
57983+#include <linux/gracl.h>
57984+#include <linux/grsecurity.h>
57985+#include <linux/grinternal.h>
57986+
57987+#define GR_BIND 0x01
57988+#define GR_CONNECT 0x02
57989+#define GR_INVERT 0x04
57990+#define GR_BINDOVERRIDE 0x08
57991+#define GR_CONNECTOVERRIDE 0x10
57992+#define GR_SOCK_FAMILY 0x20
57993+
57994+static const char * gr_protocols[IPPROTO_MAX] = {
57995+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
57996+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
57997+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
57998+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
57999+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
58000+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
58001+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
58002+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
58003+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
58004+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
58005+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
58006+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
58007+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
58008+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
58009+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
58010+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
58011+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
58012+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
58013+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
58014+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
58015+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
58016+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
58017+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
58018+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
58019+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
58020+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
58021+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
58022+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
58023+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
58024+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
58025+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
58026+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
58027+ };
58028+
58029+static const char * gr_socktypes[SOCK_MAX] = {
58030+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
58031+ "unknown:7", "unknown:8", "unknown:9", "packet"
58032+ };
58033+
58034+static const char * gr_sockfamilies[AF_MAX+1] = {
58035+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
58036+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
58037+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
58038+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
58039+ };
58040+
58041+const char *
58042+gr_proto_to_name(unsigned char proto)
58043+{
58044+ return gr_protocols[proto];
58045+}
58046+
58047+const char *
58048+gr_socktype_to_name(unsigned char type)
58049+{
58050+ return gr_socktypes[type];
58051+}
58052+
58053+const char *
58054+gr_sockfamily_to_name(unsigned char family)
58055+{
58056+ return gr_sockfamilies[family];
58057+}
58058+
58059+int
58060+gr_search_socket(const int domain, const int type, const int protocol)
58061+{
58062+ struct acl_subject_label *curr;
58063+ const struct cred *cred = current_cred();
58064+
58065+ if (unlikely(!gr_acl_is_enabled()))
58066+ goto exit;
58067+
58068+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
58069+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
58070+ goto exit; // let the kernel handle it
58071+
58072+ curr = current->acl;
58073+
58074+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
58075+ /* the family is allowed, if this is PF_INET allow it only if
58076+ the extra sock type/protocol checks pass */
58077+ if (domain == PF_INET)
58078+ goto inet_check;
58079+ goto exit;
58080+ } else {
58081+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
58082+ __u32 fakeip = 0;
58083+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58084+ current->role->roletype, cred->uid,
58085+ cred->gid, current->exec_file ?
58086+ gr_to_filename(current->exec_file->f_path.dentry,
58087+ current->exec_file->f_path.mnt) :
58088+ curr->filename, curr->filename,
58089+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
58090+ &current->signal->saved_ip);
58091+ goto exit;
58092+ }
58093+ goto exit_fail;
58094+ }
58095+
58096+inet_check:
58097+ /* the rest of this checking is for IPv4 only */
58098+ if (!curr->ips)
58099+ goto exit;
58100+
58101+ if ((curr->ip_type & (1 << type)) &&
58102+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
58103+ goto exit;
58104+
58105+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
58106+ /* we don't place acls on raw sockets , and sometimes
58107+ dgram/ip sockets are opened for ioctl and not
58108+ bind/connect, so we'll fake a bind learn log */
58109+ if (type == SOCK_RAW || type == SOCK_PACKET) {
58110+ __u32 fakeip = 0;
58111+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58112+ current->role->roletype, cred->uid,
58113+ cred->gid, current->exec_file ?
58114+ gr_to_filename(current->exec_file->f_path.dentry,
58115+ current->exec_file->f_path.mnt) :
58116+ curr->filename, curr->filename,
58117+ &fakeip, 0, type,
58118+ protocol, GR_CONNECT, &current->signal->saved_ip);
58119+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
58120+ __u32 fakeip = 0;
58121+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58122+ current->role->roletype, cred->uid,
58123+ cred->gid, current->exec_file ?
58124+ gr_to_filename(current->exec_file->f_path.dentry,
58125+ current->exec_file->f_path.mnt) :
58126+ curr->filename, curr->filename,
58127+ &fakeip, 0, type,
58128+ protocol, GR_BIND, &current->signal->saved_ip);
58129+ }
58130+ /* we'll log when they use connect or bind */
58131+ goto exit;
58132+ }
58133+
58134+exit_fail:
58135+ if (domain == PF_INET)
58136+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
58137+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
58138+ else
58139+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
58140+ gr_socktype_to_name(type), protocol);
58141+
58142+ return 0;
58143+exit:
58144+ return 1;
58145+}
58146+
58147+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
58148+{
58149+ if ((ip->mode & mode) &&
58150+ (ip_port >= ip->low) &&
58151+ (ip_port <= ip->high) &&
58152+ ((ntohl(ip_addr) & our_netmask) ==
58153+ (ntohl(our_addr) & our_netmask))
58154+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
58155+ && (ip->type & (1 << type))) {
58156+ if (ip->mode & GR_INVERT)
58157+ return 2; // specifically denied
58158+ else
58159+ return 1; // allowed
58160+ }
58161+
58162+ return 0; // not specifically allowed, may continue parsing
58163+}
58164+
58165+static int
58166+gr_search_connectbind(const int full_mode, struct sock *sk,
58167+ struct sockaddr_in *addr, const int type)
58168+{
58169+ char iface[IFNAMSIZ] = {0};
58170+ struct acl_subject_label *curr;
58171+ struct acl_ip_label *ip;
58172+ struct inet_sock *isk;
58173+ struct net_device *dev;
58174+ struct in_device *idev;
58175+ unsigned long i;
58176+ int ret;
58177+ int mode = full_mode & (GR_BIND | GR_CONNECT);
58178+ __u32 ip_addr = 0;
58179+ __u32 our_addr;
58180+ __u32 our_netmask;
58181+ char *p;
58182+ __u16 ip_port = 0;
58183+ const struct cred *cred = current_cred();
58184+
58185+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
58186+ return 0;
58187+
58188+ curr = current->acl;
58189+ isk = inet_sk(sk);
58190+
58191+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
58192+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
58193+ addr->sin_addr.s_addr = curr->inaddr_any_override;
58194+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
58195+ struct sockaddr_in saddr;
58196+ int err;
58197+
58198+ saddr.sin_family = AF_INET;
58199+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
58200+ saddr.sin_port = isk->inet_sport;
58201+
58202+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
58203+ if (err)
58204+ return err;
58205+
58206+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
58207+ if (err)
58208+ return err;
58209+ }
58210+
58211+ if (!curr->ips)
58212+ return 0;
58213+
58214+ ip_addr = addr->sin_addr.s_addr;
58215+ ip_port = ntohs(addr->sin_port);
58216+
58217+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
58218+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58219+ current->role->roletype, cred->uid,
58220+ cred->gid, current->exec_file ?
58221+ gr_to_filename(current->exec_file->f_path.dentry,
58222+ current->exec_file->f_path.mnt) :
58223+ curr->filename, curr->filename,
58224+ &ip_addr, ip_port, type,
58225+ sk->sk_protocol, mode, &current->signal->saved_ip);
58226+ return 0;
58227+ }
58228+
58229+ for (i = 0; i < curr->ip_num; i++) {
58230+ ip = *(curr->ips + i);
58231+ if (ip->iface != NULL) {
58232+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
58233+ p = strchr(iface, ':');
58234+ if (p != NULL)
58235+ *p = '\0';
58236+ dev = dev_get_by_name(sock_net(sk), iface);
58237+ if (dev == NULL)
58238+ continue;
58239+ idev = in_dev_get(dev);
58240+ if (idev == NULL) {
58241+ dev_put(dev);
58242+ continue;
58243+ }
58244+ rcu_read_lock();
58245+ for_ifa(idev) {
58246+ if (!strcmp(ip->iface, ifa->ifa_label)) {
58247+ our_addr = ifa->ifa_address;
58248+ our_netmask = 0xffffffff;
58249+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
58250+ if (ret == 1) {
58251+ rcu_read_unlock();
58252+ in_dev_put(idev);
58253+ dev_put(dev);
58254+ return 0;
58255+ } else if (ret == 2) {
58256+ rcu_read_unlock();
58257+ in_dev_put(idev);
58258+ dev_put(dev);
58259+ goto denied;
58260+ }
58261+ }
58262+ } endfor_ifa(idev);
58263+ rcu_read_unlock();
58264+ in_dev_put(idev);
58265+ dev_put(dev);
58266+ } else {
58267+ our_addr = ip->addr;
58268+ our_netmask = ip->netmask;
58269+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
58270+ if (ret == 1)
58271+ return 0;
58272+ else if (ret == 2)
58273+ goto denied;
58274+ }
58275+ }
58276+
58277+denied:
58278+ if (mode == GR_BIND)
58279+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58280+ else if (mode == GR_CONNECT)
58281+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58282+
58283+ return -EACCES;
58284+}
58285+
58286+int
58287+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
58288+{
58289+ /* always allow disconnection of dgram sockets with connect */
58290+ if (addr->sin_family == AF_UNSPEC)
58291+ return 0;
58292+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
58293+}
58294+
58295+int
58296+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
58297+{
58298+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
58299+}
58300+
58301+int gr_search_listen(struct socket *sock)
58302+{
58303+ struct sock *sk = sock->sk;
58304+ struct sockaddr_in addr;
58305+
58306+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
58307+ addr.sin_port = inet_sk(sk)->inet_sport;
58308+
58309+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
58310+}
58311+
58312+int gr_search_accept(struct socket *sock)
58313+{
58314+ struct sock *sk = sock->sk;
58315+ struct sockaddr_in addr;
58316+
58317+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
58318+ addr.sin_port = inet_sk(sk)->inet_sport;
58319+
58320+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
58321+}
58322+
58323+int
58324+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
58325+{
58326+ if (addr)
58327+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
58328+ else {
58329+ struct sockaddr_in sin;
58330+ const struct inet_sock *inet = inet_sk(sk);
58331+
58332+ sin.sin_addr.s_addr = inet->inet_daddr;
58333+ sin.sin_port = inet->inet_dport;
58334+
58335+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
58336+ }
58337+}
58338+
58339+int
58340+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
58341+{
58342+ struct sockaddr_in sin;
58343+
58344+ if (unlikely(skb->len < sizeof (struct udphdr)))
58345+ return 0; // skip this packet
58346+
58347+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
58348+ sin.sin_port = udp_hdr(skb)->source;
58349+
58350+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
58351+}
58352diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
58353new file mode 100644
58354index 0000000..25f54ef
58355--- /dev/null
58356+++ b/grsecurity/gracl_learn.c
58357@@ -0,0 +1,207 @@
58358+#include <linux/kernel.h>
58359+#include <linux/mm.h>
58360+#include <linux/sched.h>
58361+#include <linux/poll.h>
58362+#include <linux/string.h>
58363+#include <linux/file.h>
58364+#include <linux/types.h>
58365+#include <linux/vmalloc.h>
58366+#include <linux/grinternal.h>
58367+
58368+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
58369+ size_t count, loff_t *ppos);
58370+extern int gr_acl_is_enabled(void);
58371+
58372+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
58373+static int gr_learn_attached;
58374+
58375+/* use a 512k buffer */
58376+#define LEARN_BUFFER_SIZE (512 * 1024)
58377+
58378+static DEFINE_SPINLOCK(gr_learn_lock);
58379+static DEFINE_MUTEX(gr_learn_user_mutex);
58380+
58381+/* we need to maintain two buffers, so that the kernel context of grlearn
58382+ uses a semaphore around the userspace copying, and the other kernel contexts
58383+ use a spinlock when copying into the buffer, since they cannot sleep
58384+*/
58385+static char *learn_buffer;
58386+static char *learn_buffer_user;
58387+static int learn_buffer_len;
58388+static int learn_buffer_user_len;
58389+
58390+static ssize_t
58391+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
58392+{
58393+ DECLARE_WAITQUEUE(wait, current);
58394+ ssize_t retval = 0;
58395+
58396+ add_wait_queue(&learn_wait, &wait);
58397+ set_current_state(TASK_INTERRUPTIBLE);
58398+ do {
58399+ mutex_lock(&gr_learn_user_mutex);
58400+ spin_lock(&gr_learn_lock);
58401+ if (learn_buffer_len)
58402+ break;
58403+ spin_unlock(&gr_learn_lock);
58404+ mutex_unlock(&gr_learn_user_mutex);
58405+ if (file->f_flags & O_NONBLOCK) {
58406+ retval = -EAGAIN;
58407+ goto out;
58408+ }
58409+ if (signal_pending(current)) {
58410+ retval = -ERESTARTSYS;
58411+ goto out;
58412+ }
58413+
58414+ schedule();
58415+ } while (1);
58416+
58417+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
58418+ learn_buffer_user_len = learn_buffer_len;
58419+ retval = learn_buffer_len;
58420+ learn_buffer_len = 0;
58421+
58422+ spin_unlock(&gr_learn_lock);
58423+
58424+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
58425+ retval = -EFAULT;
58426+
58427+ mutex_unlock(&gr_learn_user_mutex);
58428+out:
58429+ set_current_state(TASK_RUNNING);
58430+ remove_wait_queue(&learn_wait, &wait);
58431+ return retval;
58432+}
58433+
58434+static unsigned int
58435+poll_learn(struct file * file, poll_table * wait)
58436+{
58437+ poll_wait(file, &learn_wait, wait);
58438+
58439+ if (learn_buffer_len)
58440+ return (POLLIN | POLLRDNORM);
58441+
58442+ return 0;
58443+}
58444+
58445+void
58446+gr_clear_learn_entries(void)
58447+{
58448+ char *tmp;
58449+
58450+ mutex_lock(&gr_learn_user_mutex);
58451+ spin_lock(&gr_learn_lock);
58452+ tmp = learn_buffer;
58453+ learn_buffer = NULL;
58454+ spin_unlock(&gr_learn_lock);
58455+ if (tmp)
58456+ vfree(tmp);
58457+ if (learn_buffer_user != NULL) {
58458+ vfree(learn_buffer_user);
58459+ learn_buffer_user = NULL;
58460+ }
58461+ learn_buffer_len = 0;
58462+ mutex_unlock(&gr_learn_user_mutex);
58463+
58464+ return;
58465+}
58466+
58467+void
58468+gr_add_learn_entry(const char *fmt, ...)
58469+{
58470+ va_list args;
58471+ unsigned int len;
58472+
58473+ if (!gr_learn_attached)
58474+ return;
58475+
58476+ spin_lock(&gr_learn_lock);
58477+
58478+ /* leave a gap at the end so we know when it's "full" but don't have to
58479+ compute the exact length of the string we're trying to append
58480+ */
58481+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
58482+ spin_unlock(&gr_learn_lock);
58483+ wake_up_interruptible(&learn_wait);
58484+ return;
58485+ }
58486+ if (learn_buffer == NULL) {
58487+ spin_unlock(&gr_learn_lock);
58488+ return;
58489+ }
58490+
58491+ va_start(args, fmt);
58492+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
58493+ va_end(args);
58494+
58495+ learn_buffer_len += len + 1;
58496+
58497+ spin_unlock(&gr_learn_lock);
58498+ wake_up_interruptible(&learn_wait);
58499+
58500+ return;
58501+}
58502+
58503+static int
58504+open_learn(struct inode *inode, struct file *file)
58505+{
58506+ if (file->f_mode & FMODE_READ && gr_learn_attached)
58507+ return -EBUSY;
58508+ if (file->f_mode & FMODE_READ) {
58509+ int retval = 0;
58510+ mutex_lock(&gr_learn_user_mutex);
58511+ if (learn_buffer == NULL)
58512+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
58513+ if (learn_buffer_user == NULL)
58514+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
58515+ if (learn_buffer == NULL) {
58516+ retval = -ENOMEM;
58517+ goto out_error;
58518+ }
58519+ if (learn_buffer_user == NULL) {
58520+ retval = -ENOMEM;
58521+ goto out_error;
58522+ }
58523+ learn_buffer_len = 0;
58524+ learn_buffer_user_len = 0;
58525+ gr_learn_attached = 1;
58526+out_error:
58527+ mutex_unlock(&gr_learn_user_mutex);
58528+ return retval;
58529+ }
58530+ return 0;
58531+}
58532+
58533+static int
58534+close_learn(struct inode *inode, struct file *file)
58535+{
58536+ if (file->f_mode & FMODE_READ) {
58537+ char *tmp = NULL;
58538+ mutex_lock(&gr_learn_user_mutex);
58539+ spin_lock(&gr_learn_lock);
58540+ tmp = learn_buffer;
58541+ learn_buffer = NULL;
58542+ spin_unlock(&gr_learn_lock);
58543+ if (tmp)
58544+ vfree(tmp);
58545+ if (learn_buffer_user != NULL) {
58546+ vfree(learn_buffer_user);
58547+ learn_buffer_user = NULL;
58548+ }
58549+ learn_buffer_len = 0;
58550+ learn_buffer_user_len = 0;
58551+ gr_learn_attached = 0;
58552+ mutex_unlock(&gr_learn_user_mutex);
58553+ }
58554+
58555+ return 0;
58556+}
58557+
58558+const struct file_operations grsec_fops = {
58559+ .read = read_learn,
58560+ .write = write_grsec_handler,
58561+ .open = open_learn,
58562+ .release = close_learn,
58563+ .poll = poll_learn,
58564+};
58565diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
58566new file mode 100644
58567index 0000000..39645c9
58568--- /dev/null
58569+++ b/grsecurity/gracl_res.c
58570@@ -0,0 +1,68 @@
58571+#include <linux/kernel.h>
58572+#include <linux/sched.h>
58573+#include <linux/gracl.h>
58574+#include <linux/grinternal.h>
58575+
58576+static const char *restab_log[] = {
58577+ [RLIMIT_CPU] = "RLIMIT_CPU",
58578+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
58579+ [RLIMIT_DATA] = "RLIMIT_DATA",
58580+ [RLIMIT_STACK] = "RLIMIT_STACK",
58581+ [RLIMIT_CORE] = "RLIMIT_CORE",
58582+ [RLIMIT_RSS] = "RLIMIT_RSS",
58583+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
58584+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
58585+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
58586+ [RLIMIT_AS] = "RLIMIT_AS",
58587+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
58588+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
58589+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
58590+ [RLIMIT_NICE] = "RLIMIT_NICE",
58591+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
58592+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
58593+ [GR_CRASH_RES] = "RLIMIT_CRASH"
58594+};
58595+
58596+void
58597+gr_log_resource(const struct task_struct *task,
58598+ const int res, const unsigned long wanted, const int gt)
58599+{
58600+ const struct cred *cred;
58601+ unsigned long rlim;
58602+
58603+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
58604+ return;
58605+
58606+ // not yet supported resource
58607+ if (unlikely(!restab_log[res]))
58608+ return;
58609+
58610+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
58611+ rlim = task_rlimit_max(task, res);
58612+ else
58613+ rlim = task_rlimit(task, res);
58614+
58615+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
58616+ return;
58617+
58618+ rcu_read_lock();
58619+ cred = __task_cred(task);
58620+
58621+ if (res == RLIMIT_NPROC &&
58622+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
58623+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
58624+ goto out_rcu_unlock;
58625+ else if (res == RLIMIT_MEMLOCK &&
58626+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
58627+ goto out_rcu_unlock;
58628+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
58629+ goto out_rcu_unlock;
58630+ rcu_read_unlock();
58631+
58632+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
58633+
58634+ return;
58635+out_rcu_unlock:
58636+ rcu_read_unlock();
58637+ return;
58638+}
58639diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
58640new file mode 100644
58641index 0000000..25197e9
58642--- /dev/null
58643+++ b/grsecurity/gracl_segv.c
58644@@ -0,0 +1,299 @@
58645+#include <linux/kernel.h>
58646+#include <linux/mm.h>
58647+#include <asm/uaccess.h>
58648+#include <asm/errno.h>
58649+#include <asm/mman.h>
58650+#include <net/sock.h>
58651+#include <linux/file.h>
58652+#include <linux/fs.h>
58653+#include <linux/net.h>
58654+#include <linux/in.h>
58655+#include <linux/slab.h>
58656+#include <linux/types.h>
58657+#include <linux/sched.h>
58658+#include <linux/timer.h>
58659+#include <linux/gracl.h>
58660+#include <linux/grsecurity.h>
58661+#include <linux/grinternal.h>
58662+
58663+static struct crash_uid *uid_set;
58664+static unsigned short uid_used;
58665+static DEFINE_SPINLOCK(gr_uid_lock);
58666+extern rwlock_t gr_inode_lock;
58667+extern struct acl_subject_label *
58668+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
58669+ struct acl_role_label *role);
58670+
58671+#ifdef CONFIG_BTRFS_FS
58672+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
58673+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
58674+#endif
58675+
58676+static inline dev_t __get_dev(const struct dentry *dentry)
58677+{
58678+#ifdef CONFIG_BTRFS_FS
58679+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
58680+ return get_btrfs_dev_from_inode(dentry->d_inode);
58681+ else
58682+#endif
58683+ return dentry->d_inode->i_sb->s_dev;
58684+}
58685+
58686+int
58687+gr_init_uidset(void)
58688+{
58689+ uid_set =
58690+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
58691+ uid_used = 0;
58692+
58693+ return uid_set ? 1 : 0;
58694+}
58695+
58696+void
58697+gr_free_uidset(void)
58698+{
58699+ if (uid_set)
58700+ kfree(uid_set);
58701+
58702+ return;
58703+}
58704+
58705+int
58706+gr_find_uid(const uid_t uid)
58707+{
58708+ struct crash_uid *tmp = uid_set;
58709+ uid_t buid;
58710+ int low = 0, high = uid_used - 1, mid;
58711+
58712+ while (high >= low) {
58713+ mid = (low + high) >> 1;
58714+ buid = tmp[mid].uid;
58715+ if (buid == uid)
58716+ return mid;
58717+ if (buid > uid)
58718+ high = mid - 1;
58719+ if (buid < uid)
58720+ low = mid + 1;
58721+ }
58722+
58723+ return -1;
58724+}
58725+
58726+static __inline__ void
58727+gr_insertsort(void)
58728+{
58729+ unsigned short i, j;
58730+ struct crash_uid index;
58731+
58732+ for (i = 1; i < uid_used; i++) {
58733+ index = uid_set[i];
58734+ j = i;
58735+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
58736+ uid_set[j] = uid_set[j - 1];
58737+ j--;
58738+ }
58739+ uid_set[j] = index;
58740+ }
58741+
58742+ return;
58743+}
58744+
58745+static __inline__ void
58746+gr_insert_uid(const uid_t uid, const unsigned long expires)
58747+{
58748+ int loc;
58749+
58750+ if (uid_used == GR_UIDTABLE_MAX)
58751+ return;
58752+
58753+ loc = gr_find_uid(uid);
58754+
58755+ if (loc >= 0) {
58756+ uid_set[loc].expires = expires;
58757+ return;
58758+ }
58759+
58760+ uid_set[uid_used].uid = uid;
58761+ uid_set[uid_used].expires = expires;
58762+ uid_used++;
58763+
58764+ gr_insertsort();
58765+
58766+ return;
58767+}
58768+
58769+void
58770+gr_remove_uid(const unsigned short loc)
58771+{
58772+ unsigned short i;
58773+
58774+ for (i = loc + 1; i < uid_used; i++)
58775+ uid_set[i - 1] = uid_set[i];
58776+
58777+ uid_used--;
58778+
58779+ return;
58780+}
58781+
58782+int
58783+gr_check_crash_uid(const uid_t uid)
58784+{
58785+ int loc;
58786+ int ret = 0;
58787+
58788+ if (unlikely(!gr_acl_is_enabled()))
58789+ return 0;
58790+
58791+ spin_lock(&gr_uid_lock);
58792+ loc = gr_find_uid(uid);
58793+
58794+ if (loc < 0)
58795+ goto out_unlock;
58796+
58797+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
58798+ gr_remove_uid(loc);
58799+ else
58800+ ret = 1;
58801+
58802+out_unlock:
58803+ spin_unlock(&gr_uid_lock);
58804+ return ret;
58805+}
58806+
58807+static __inline__ int
58808+proc_is_setxid(const struct cred *cred)
58809+{
58810+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
58811+ !uid_eq(cred->uid, cred->fsuid))
58812+ return 1;
58813+ if (!uid_eq(cred->gid, cred->egid) || !uid_eq(cred->gid, cred->sgid) ||
58814+ !uid_eq(cred->gid, cred->fsgid))
58815+ return 1;
58816+
58817+ return 0;
58818+}
58819+
58820+extern int gr_fake_force_sig(int sig, struct task_struct *t);
58821+
58822+void
58823+gr_handle_crash(struct task_struct *task, const int sig)
58824+{
58825+ struct acl_subject_label *curr;
58826+ struct task_struct *tsk, *tsk2;
58827+ const struct cred *cred;
58828+ const struct cred *cred2;
58829+
58830+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
58831+ return;
58832+
58833+ if (unlikely(!gr_acl_is_enabled()))
58834+ return;
58835+
58836+ curr = task->acl;
58837+
58838+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
58839+ return;
58840+
58841+ if (time_before_eq(curr->expires, get_seconds())) {
58842+ curr->expires = 0;
58843+ curr->crashes = 0;
58844+ }
58845+
58846+ curr->crashes++;
58847+
58848+ if (!curr->expires)
58849+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
58850+
58851+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
58852+ time_after(curr->expires, get_seconds())) {
58853+ rcu_read_lock();
58854+ cred = __task_cred(task);
58855+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && proc_is_setxid(cred)) {
58856+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
58857+ spin_lock(&gr_uid_lock);
58858+ gr_insert_uid(cred->uid, curr->expires);
58859+ spin_unlock(&gr_uid_lock);
58860+ curr->expires = 0;
58861+ curr->crashes = 0;
58862+ read_lock(&tasklist_lock);
58863+ do_each_thread(tsk2, tsk) {
58864+ cred2 = __task_cred(tsk);
58865+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
58866+ gr_fake_force_sig(SIGKILL, tsk);
58867+ } while_each_thread(tsk2, tsk);
58868+ read_unlock(&tasklist_lock);
58869+ } else {
58870+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
58871+ read_lock(&tasklist_lock);
58872+ read_lock(&grsec_exec_file_lock);
58873+ do_each_thread(tsk2, tsk) {
58874+ if (likely(tsk != task)) {
58875+ // if this thread has the same subject as the one that triggered
58876+ // RES_CRASH and it's the same binary, kill it
58877+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
58878+ gr_fake_force_sig(SIGKILL, tsk);
58879+ }
58880+ } while_each_thread(tsk2, tsk);
58881+ read_unlock(&grsec_exec_file_lock);
58882+ read_unlock(&tasklist_lock);
58883+ }
58884+ rcu_read_unlock();
58885+ }
58886+
58887+ return;
58888+}
58889+
58890+int
58891+gr_check_crash_exec(const struct file *filp)
58892+{
58893+ struct acl_subject_label *curr;
58894+
58895+ if (unlikely(!gr_acl_is_enabled()))
58896+ return 0;
58897+
58898+ read_lock(&gr_inode_lock);
58899+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
58900+ __get_dev(filp->f_path.dentry),
58901+ current->role);
58902+ read_unlock(&gr_inode_lock);
58903+
58904+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
58905+ (!curr->crashes && !curr->expires))
58906+ return 0;
58907+
58908+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
58909+ time_after(curr->expires, get_seconds()))
58910+ return 1;
58911+ else if (time_before_eq(curr->expires, get_seconds())) {
58912+ curr->crashes = 0;
58913+ curr->expires = 0;
58914+ }
58915+
58916+ return 0;
58917+}
58918+
58919+void
58920+gr_handle_alertkill(struct task_struct *task)
58921+{
58922+ struct acl_subject_label *curracl;
58923+ __u32 curr_ip;
58924+ struct task_struct *p, *p2;
58925+
58926+ if (unlikely(!gr_acl_is_enabled()))
58927+ return;
58928+
58929+ curracl = task->acl;
58930+ curr_ip = task->signal->curr_ip;
58931+
58932+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
58933+ read_lock(&tasklist_lock);
58934+ do_each_thread(p2, p) {
58935+ if (p->signal->curr_ip == curr_ip)
58936+ gr_fake_force_sig(SIGKILL, p);
58937+ } while_each_thread(p2, p);
58938+ read_unlock(&tasklist_lock);
58939+ } else if (curracl->mode & GR_KILLPROC)
58940+ gr_fake_force_sig(SIGKILL, task);
58941+
58942+ return;
58943+}
58944diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
58945new file mode 100644
58946index 0000000..9d83a69
58947--- /dev/null
58948+++ b/grsecurity/gracl_shm.c
58949@@ -0,0 +1,40 @@
58950+#include <linux/kernel.h>
58951+#include <linux/mm.h>
58952+#include <linux/sched.h>
58953+#include <linux/file.h>
58954+#include <linux/ipc.h>
58955+#include <linux/gracl.h>
58956+#include <linux/grsecurity.h>
58957+#include <linux/grinternal.h>
58958+
58959+int
58960+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58961+ const time_t shm_createtime, const uid_t cuid, const int shmid)
58962+{
58963+ struct task_struct *task;
58964+
58965+ if (!gr_acl_is_enabled())
58966+ return 1;
58967+
58968+ rcu_read_lock();
58969+ read_lock(&tasklist_lock);
58970+
58971+ task = find_task_by_vpid(shm_cprid);
58972+
58973+ if (unlikely(!task))
58974+ task = find_task_by_vpid(shm_lapid);
58975+
58976+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
58977+ (task->pid == shm_lapid)) &&
58978+ (task->acl->mode & GR_PROTSHM) &&
58979+ (task->acl != current->acl))) {
58980+ read_unlock(&tasklist_lock);
58981+ rcu_read_unlock();
58982+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
58983+ return 0;
58984+ }
58985+ read_unlock(&tasklist_lock);
58986+ rcu_read_unlock();
58987+
58988+ return 1;
58989+}
58990diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
58991new file mode 100644
58992index 0000000..bc0be01
58993--- /dev/null
58994+++ b/grsecurity/grsec_chdir.c
58995@@ -0,0 +1,19 @@
58996+#include <linux/kernel.h>
58997+#include <linux/sched.h>
58998+#include <linux/fs.h>
58999+#include <linux/file.h>
59000+#include <linux/grsecurity.h>
59001+#include <linux/grinternal.h>
59002+
59003+void
59004+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
59005+{
59006+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59007+ if ((grsec_enable_chdir && grsec_enable_group &&
59008+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
59009+ !grsec_enable_group)) {
59010+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
59011+ }
59012+#endif
59013+ return;
59014+}
59015diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
59016new file mode 100644
59017index 0000000..9807ee2
59018--- /dev/null
59019+++ b/grsecurity/grsec_chroot.c
59020@@ -0,0 +1,368 @@
59021+#include <linux/kernel.h>
59022+#include <linux/module.h>
59023+#include <linux/sched.h>
59024+#include <linux/file.h>
59025+#include <linux/fs.h>
59026+#include <linux/mount.h>
59027+#include <linux/types.h>
59028+#include "../fs/mount.h"
59029+#include <linux/grsecurity.h>
59030+#include <linux/grinternal.h>
59031+
59032+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
59033+{
59034+#ifdef CONFIG_GRKERNSEC
59035+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
59036+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
59037+ task->gr_is_chrooted = 1;
59038+ else
59039+ task->gr_is_chrooted = 0;
59040+
59041+ task->gr_chroot_dentry = path->dentry;
59042+#endif
59043+ return;
59044+}
59045+
59046+void gr_clear_chroot_entries(struct task_struct *task)
59047+{
59048+#ifdef CONFIG_GRKERNSEC
59049+ task->gr_is_chrooted = 0;
59050+ task->gr_chroot_dentry = NULL;
59051+#endif
59052+ return;
59053+}
59054+
59055+int
59056+gr_handle_chroot_unix(const pid_t pid)
59057+{
59058+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59059+ struct task_struct *p;
59060+
59061+ if (unlikely(!grsec_enable_chroot_unix))
59062+ return 1;
59063+
59064+ if (likely(!proc_is_chrooted(current)))
59065+ return 1;
59066+
59067+ rcu_read_lock();
59068+ read_lock(&tasklist_lock);
59069+ p = find_task_by_vpid_unrestricted(pid);
59070+ if (unlikely(p && !have_same_root(current, p))) {
59071+ read_unlock(&tasklist_lock);
59072+ rcu_read_unlock();
59073+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
59074+ return 0;
59075+ }
59076+ read_unlock(&tasklist_lock);
59077+ rcu_read_unlock();
59078+#endif
59079+ return 1;
59080+}
59081+
59082+int
59083+gr_handle_chroot_nice(void)
59084+{
59085+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59086+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
59087+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
59088+ return -EPERM;
59089+ }
59090+#endif
59091+ return 0;
59092+}
59093+
59094+int
59095+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
59096+{
59097+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59098+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
59099+ && proc_is_chrooted(current)) {
59100+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
59101+ return -EACCES;
59102+ }
59103+#endif
59104+ return 0;
59105+}
59106+
59107+int
59108+gr_handle_chroot_rawio(const struct inode *inode)
59109+{
59110+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59111+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
59112+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
59113+ return 1;
59114+#endif
59115+ return 0;
59116+}
59117+
59118+int
59119+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
59120+{
59121+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59122+ struct task_struct *p;
59123+ int ret = 0;
59124+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
59125+ return ret;
59126+
59127+ read_lock(&tasklist_lock);
59128+ do_each_pid_task(pid, type, p) {
59129+ if (!have_same_root(current, p)) {
59130+ ret = 1;
59131+ goto out;
59132+ }
59133+ } while_each_pid_task(pid, type, p);
59134+out:
59135+ read_unlock(&tasklist_lock);
59136+ return ret;
59137+#endif
59138+ return 0;
59139+}
59140+
59141+int
59142+gr_pid_is_chrooted(struct task_struct *p)
59143+{
59144+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59145+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
59146+ return 0;
59147+
59148+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
59149+ !have_same_root(current, p)) {
59150+ return 1;
59151+ }
59152+#endif
59153+ return 0;
59154+}
59155+
59156+EXPORT_SYMBOL(gr_pid_is_chrooted);
59157+
59158+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
59159+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
59160+{
59161+ struct path path, currentroot;
59162+ int ret = 0;
59163+
59164+ path.dentry = (struct dentry *)u_dentry;
59165+ path.mnt = (struct vfsmount *)u_mnt;
59166+ get_fs_root(current->fs, &currentroot);
59167+ if (path_is_under(&path, &currentroot))
59168+ ret = 1;
59169+ path_put(&currentroot);
59170+
59171+ return ret;
59172+}
59173+#endif
59174+
59175+int
59176+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
59177+{
59178+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59179+ if (!grsec_enable_chroot_fchdir)
59180+ return 1;
59181+
59182+ if (!proc_is_chrooted(current))
59183+ return 1;
59184+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
59185+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
59186+ return 0;
59187+ }
59188+#endif
59189+ return 1;
59190+}
59191+
59192+int
59193+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59194+ const time_t shm_createtime)
59195+{
59196+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59197+ struct task_struct *p;
59198+ time_t starttime;
59199+
59200+ if (unlikely(!grsec_enable_chroot_shmat))
59201+ return 1;
59202+
59203+ if (likely(!proc_is_chrooted(current)))
59204+ return 1;
59205+
59206+ rcu_read_lock();
59207+ read_lock(&tasklist_lock);
59208+
59209+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
59210+ starttime = p->start_time.tv_sec;
59211+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
59212+ if (have_same_root(current, p)) {
59213+ goto allow;
59214+ } else {
59215+ read_unlock(&tasklist_lock);
59216+ rcu_read_unlock();
59217+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
59218+ return 0;
59219+ }
59220+ }
59221+ /* creator exited, pid reuse, fall through to next check */
59222+ }
59223+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
59224+ if (unlikely(!have_same_root(current, p))) {
59225+ read_unlock(&tasklist_lock);
59226+ rcu_read_unlock();
59227+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
59228+ return 0;
59229+ }
59230+ }
59231+
59232+allow:
59233+ read_unlock(&tasklist_lock);
59234+ rcu_read_unlock();
59235+#endif
59236+ return 1;
59237+}
59238+
59239+void
59240+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
59241+{
59242+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59243+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
59244+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
59245+#endif
59246+ return;
59247+}
59248+
59249+int
59250+gr_handle_chroot_mknod(const struct dentry *dentry,
59251+ const struct vfsmount *mnt, const int mode)
59252+{
59253+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59254+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
59255+ proc_is_chrooted(current)) {
59256+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
59257+ return -EPERM;
59258+ }
59259+#endif
59260+ return 0;
59261+}
59262+
59263+int
59264+gr_handle_chroot_mount(const struct dentry *dentry,
59265+ const struct vfsmount *mnt, const char *dev_name)
59266+{
59267+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59268+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
59269+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
59270+ return -EPERM;
59271+ }
59272+#endif
59273+ return 0;
59274+}
59275+
59276+int
59277+gr_handle_chroot_pivot(void)
59278+{
59279+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59280+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
59281+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
59282+ return -EPERM;
59283+ }
59284+#endif
59285+ return 0;
59286+}
59287+
59288+int
59289+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
59290+{
59291+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59292+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
59293+ !gr_is_outside_chroot(dentry, mnt)) {
59294+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
59295+ return -EPERM;
59296+ }
59297+#endif
59298+ return 0;
59299+}
59300+
59301+extern const char *captab_log[];
59302+extern int captab_log_entries;
59303+
59304+int
59305+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
59306+{
59307+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59308+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
59309+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
59310+ if (cap_raised(chroot_caps, cap)) {
59311+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
59312+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
59313+ }
59314+ return 0;
59315+ }
59316+ }
59317+#endif
59318+ return 1;
59319+}
59320+
59321+int
59322+gr_chroot_is_capable(const int cap)
59323+{
59324+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59325+ return gr_task_chroot_is_capable(current, current_cred(), cap);
59326+#endif
59327+ return 1;
59328+}
59329+
59330+int
59331+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
59332+{
59333+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59334+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
59335+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
59336+ if (cap_raised(chroot_caps, cap)) {
59337+ return 0;
59338+ }
59339+ }
59340+#endif
59341+ return 1;
59342+}
59343+
59344+int
59345+gr_chroot_is_capable_nolog(const int cap)
59346+{
59347+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59348+ return gr_task_chroot_is_capable_nolog(current, cap);
59349+#endif
59350+ return 1;
59351+}
59352+
59353+int
59354+gr_handle_chroot_sysctl(const int op)
59355+{
59356+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59357+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
59358+ proc_is_chrooted(current))
59359+ return -EACCES;
59360+#endif
59361+ return 0;
59362+}
59363+
59364+void
59365+gr_handle_chroot_chdir(struct path *path)
59366+{
59367+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59368+ if (grsec_enable_chroot_chdir)
59369+ set_fs_pwd(current->fs, path);
59370+#endif
59371+ return;
59372+}
59373+
59374+int
59375+gr_handle_chroot_chmod(const struct dentry *dentry,
59376+ const struct vfsmount *mnt, const int mode)
59377+{
59378+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59379+ /* allow chmod +s on directories, but not files */
59380+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
59381+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
59382+ proc_is_chrooted(current)) {
59383+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
59384+ return -EPERM;
59385+ }
59386+#endif
59387+ return 0;
59388+}
59389diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
59390new file mode 100644
59391index 0000000..7de2055
59392--- /dev/null
59393+++ b/grsecurity/grsec_disabled.c
59394@@ -0,0 +1,442 @@
59395+#include <linux/kernel.h>
59396+#include <linux/module.h>
59397+#include <linux/sched.h>
59398+#include <linux/file.h>
59399+#include <linux/fs.h>
59400+#include <linux/kdev_t.h>
59401+#include <linux/net.h>
59402+#include <linux/in.h>
59403+#include <linux/ip.h>
59404+#include <linux/skbuff.h>
59405+#include <linux/sysctl.h>
59406+
59407+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59408+void
59409+pax_set_initial_flags(struct linux_binprm *bprm)
59410+{
59411+ return;
59412+}
59413+#endif
59414+
59415+#ifdef CONFIG_SYSCTL
59416+__u32
59417+gr_handle_sysctl(const struct ctl_table * table, const int op)
59418+{
59419+ return 0;
59420+}
59421+#endif
59422+
59423+#ifdef CONFIG_TASKSTATS
59424+int gr_is_taskstats_denied(int pid)
59425+{
59426+ return 0;
59427+}
59428+#endif
59429+
59430+int
59431+gr_acl_is_enabled(void)
59432+{
59433+ return 0;
59434+}
59435+
59436+void
59437+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59438+{
59439+ return;
59440+}
59441+
59442+int
59443+gr_handle_rawio(const struct inode *inode)
59444+{
59445+ return 0;
59446+}
59447+
59448+void
59449+gr_acl_handle_psacct(struct task_struct *task, const long code)
59450+{
59451+ return;
59452+}
59453+
59454+int
59455+gr_handle_ptrace(struct task_struct *task, const long request)
59456+{
59457+ return 0;
59458+}
59459+
59460+int
59461+gr_handle_proc_ptrace(struct task_struct *task)
59462+{
59463+ return 0;
59464+}
59465+
59466+void
59467+gr_learn_resource(const struct task_struct *task,
59468+ const int res, const unsigned long wanted, const int gt)
59469+{
59470+ return;
59471+}
59472+
59473+int
59474+gr_set_acls(const int type)
59475+{
59476+ return 0;
59477+}
59478+
59479+int
59480+gr_check_hidden_task(const struct task_struct *tsk)
59481+{
59482+ return 0;
59483+}
59484+
59485+int
59486+gr_check_protected_task(const struct task_struct *task)
59487+{
59488+ return 0;
59489+}
59490+
59491+int
59492+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
59493+{
59494+ return 0;
59495+}
59496+
59497+void
59498+gr_copy_label(struct task_struct *tsk)
59499+{
59500+ return;
59501+}
59502+
59503+void
59504+gr_set_pax_flags(struct task_struct *task)
59505+{
59506+ return;
59507+}
59508+
59509+int
59510+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
59511+ const int unsafe_share)
59512+{
59513+ return 0;
59514+}
59515+
59516+void
59517+gr_handle_delete(const ino_t ino, const dev_t dev)
59518+{
59519+ return;
59520+}
59521+
59522+void
59523+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59524+{
59525+ return;
59526+}
59527+
59528+void
59529+gr_handle_crash(struct task_struct *task, const int sig)
59530+{
59531+ return;
59532+}
59533+
59534+int
59535+gr_check_crash_exec(const struct file *filp)
59536+{
59537+ return 0;
59538+}
59539+
59540+int
59541+gr_check_crash_uid(const uid_t uid)
59542+{
59543+ return 0;
59544+}
59545+
59546+void
59547+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59548+ struct dentry *old_dentry,
59549+ struct dentry *new_dentry,
59550+ struct vfsmount *mnt, const __u8 replace)
59551+{
59552+ return;
59553+}
59554+
59555+int
59556+gr_search_socket(const int family, const int type, const int protocol)
59557+{
59558+ return 1;
59559+}
59560+
59561+int
59562+gr_search_connectbind(const int mode, const struct socket *sock,
59563+ const struct sockaddr_in *addr)
59564+{
59565+ return 0;
59566+}
59567+
59568+void
59569+gr_handle_alertkill(struct task_struct *task)
59570+{
59571+ return;
59572+}
59573+
59574+__u32
59575+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
59576+{
59577+ return 1;
59578+}
59579+
59580+__u32
59581+gr_acl_handle_hidden_file(const struct dentry * dentry,
59582+ const struct vfsmount * mnt)
59583+{
59584+ return 1;
59585+}
59586+
59587+__u32
59588+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
59589+ int acc_mode)
59590+{
59591+ return 1;
59592+}
59593+
59594+__u32
59595+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
59596+{
59597+ return 1;
59598+}
59599+
59600+__u32
59601+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
59602+{
59603+ return 1;
59604+}
59605+
59606+int
59607+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
59608+ unsigned int *vm_flags)
59609+{
59610+ return 1;
59611+}
59612+
59613+__u32
59614+gr_acl_handle_truncate(const struct dentry * dentry,
59615+ const struct vfsmount * mnt)
59616+{
59617+ return 1;
59618+}
59619+
59620+__u32
59621+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
59622+{
59623+ return 1;
59624+}
59625+
59626+__u32
59627+gr_acl_handle_access(const struct dentry * dentry,
59628+ const struct vfsmount * mnt, const int fmode)
59629+{
59630+ return 1;
59631+}
59632+
59633+__u32
59634+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
59635+ umode_t *mode)
59636+{
59637+ return 1;
59638+}
59639+
59640+__u32
59641+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
59642+{
59643+ return 1;
59644+}
59645+
59646+__u32
59647+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
59648+{
59649+ return 1;
59650+}
59651+
59652+void
59653+grsecurity_init(void)
59654+{
59655+ return;
59656+}
59657+
59658+umode_t gr_acl_umask(void)
59659+{
59660+ return 0;
59661+}
59662+
59663+__u32
59664+gr_acl_handle_mknod(const struct dentry * new_dentry,
59665+ const struct dentry * parent_dentry,
59666+ const struct vfsmount * parent_mnt,
59667+ const int mode)
59668+{
59669+ return 1;
59670+}
59671+
59672+__u32
59673+gr_acl_handle_mkdir(const struct dentry * new_dentry,
59674+ const struct dentry * parent_dentry,
59675+ const struct vfsmount * parent_mnt)
59676+{
59677+ return 1;
59678+}
59679+
59680+__u32
59681+gr_acl_handle_symlink(const struct dentry * new_dentry,
59682+ const struct dentry * parent_dentry,
59683+ const struct vfsmount * parent_mnt, const struct filename *from)
59684+{
59685+ return 1;
59686+}
59687+
59688+__u32
59689+gr_acl_handle_link(const struct dentry * new_dentry,
59690+ const struct dentry * parent_dentry,
59691+ const struct vfsmount * parent_mnt,
59692+ const struct dentry * old_dentry,
59693+ const struct vfsmount * old_mnt, const struct filename *to)
59694+{
59695+ return 1;
59696+}
59697+
59698+int
59699+gr_acl_handle_rename(const struct dentry *new_dentry,
59700+ const struct dentry *parent_dentry,
59701+ const struct vfsmount *parent_mnt,
59702+ const struct dentry *old_dentry,
59703+ const struct inode *old_parent_inode,
59704+ const struct vfsmount *old_mnt, const struct filename *newname)
59705+{
59706+ return 0;
59707+}
59708+
59709+int
59710+gr_acl_handle_filldir(const struct file *file, const char *name,
59711+ const int namelen, const ino_t ino)
59712+{
59713+ return 1;
59714+}
59715+
59716+int
59717+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59718+ const time_t shm_createtime, const uid_t cuid, const int shmid)
59719+{
59720+ return 1;
59721+}
59722+
59723+int
59724+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
59725+{
59726+ return 0;
59727+}
59728+
59729+int
59730+gr_search_accept(const struct socket *sock)
59731+{
59732+ return 0;
59733+}
59734+
59735+int
59736+gr_search_listen(const struct socket *sock)
59737+{
59738+ return 0;
59739+}
59740+
59741+int
59742+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
59743+{
59744+ return 0;
59745+}
59746+
59747+__u32
59748+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
59749+{
59750+ return 1;
59751+}
59752+
59753+__u32
59754+gr_acl_handle_creat(const struct dentry * dentry,
59755+ const struct dentry * p_dentry,
59756+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
59757+ const int imode)
59758+{
59759+ return 1;
59760+}
59761+
59762+void
59763+gr_acl_handle_exit(void)
59764+{
59765+ return;
59766+}
59767+
59768+int
59769+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59770+{
59771+ return 1;
59772+}
59773+
59774+void
59775+gr_set_role_label(const uid_t uid, const gid_t gid)
59776+{
59777+ return;
59778+}
59779+
59780+int
59781+gr_acl_handle_procpidmem(const struct task_struct *task)
59782+{
59783+ return 0;
59784+}
59785+
59786+int
59787+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
59788+{
59789+ return 0;
59790+}
59791+
59792+int
59793+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
59794+{
59795+ return 0;
59796+}
59797+
59798+void
59799+gr_set_kernel_label(struct task_struct *task)
59800+{
59801+ return;
59802+}
59803+
59804+int
59805+gr_check_user_change(int real, int effective, int fs)
59806+{
59807+ return 0;
59808+}
59809+
59810+int
59811+gr_check_group_change(int real, int effective, int fs)
59812+{
59813+ return 0;
59814+}
59815+
59816+int gr_acl_enable_at_secure(void)
59817+{
59818+ return 0;
59819+}
59820+
59821+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
59822+{
59823+ return dentry->d_inode->i_sb->s_dev;
59824+}
59825+
59826+void gr_put_exec_file(struct task_struct *task)
59827+{
59828+ return;
59829+}
59830+
59831+EXPORT_SYMBOL(gr_learn_resource);
59832+EXPORT_SYMBOL(gr_set_kernel_label);
59833+#ifdef CONFIG_SECURITY
59834+EXPORT_SYMBOL(gr_check_user_change);
59835+EXPORT_SYMBOL(gr_check_group_change);
59836+#endif
59837diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
59838new file mode 100644
59839index 0000000..abfa971
59840--- /dev/null
59841+++ b/grsecurity/grsec_exec.c
59842@@ -0,0 +1,174 @@
59843+#include <linux/kernel.h>
59844+#include <linux/sched.h>
59845+#include <linux/file.h>
59846+#include <linux/binfmts.h>
59847+#include <linux/fs.h>
59848+#include <linux/types.h>
59849+#include <linux/grdefs.h>
59850+#include <linux/grsecurity.h>
59851+#include <linux/grinternal.h>
59852+#include <linux/capability.h>
59853+#include <linux/module.h>
59854+
59855+#include <asm/uaccess.h>
59856+
59857+#ifdef CONFIG_GRKERNSEC_EXECLOG
59858+static char gr_exec_arg_buf[132];
59859+static DEFINE_MUTEX(gr_exec_arg_mutex);
59860+#endif
59861+
59862+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
59863+
59864+void
59865+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
59866+{
59867+#ifdef CONFIG_GRKERNSEC_EXECLOG
59868+ char *grarg = gr_exec_arg_buf;
59869+ unsigned int i, x, execlen = 0;
59870+ char c;
59871+
59872+ if (!((grsec_enable_execlog && grsec_enable_group &&
59873+ in_group_p(grsec_audit_gid))
59874+ || (grsec_enable_execlog && !grsec_enable_group)))
59875+ return;
59876+
59877+ mutex_lock(&gr_exec_arg_mutex);
59878+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
59879+
59880+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
59881+ const char __user *p;
59882+ unsigned int len;
59883+
59884+ p = get_user_arg_ptr(argv, i);
59885+ if (IS_ERR(p))
59886+ goto log;
59887+
59888+ len = strnlen_user(p, 128 - execlen);
59889+ if (len > 128 - execlen)
59890+ len = 128 - execlen;
59891+ else if (len > 0)
59892+ len--;
59893+ if (copy_from_user(grarg + execlen, p, len))
59894+ goto log;
59895+
59896+ /* rewrite unprintable characters */
59897+ for (x = 0; x < len; x++) {
59898+ c = *(grarg + execlen + x);
59899+ if (c < 32 || c > 126)
59900+ *(grarg + execlen + x) = ' ';
59901+ }
59902+
59903+ execlen += len;
59904+ *(grarg + execlen) = ' ';
59905+ *(grarg + execlen + 1) = '\0';
59906+ execlen++;
59907+ }
59908+
59909+ log:
59910+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
59911+ bprm->file->f_path.mnt, grarg);
59912+ mutex_unlock(&gr_exec_arg_mutex);
59913+#endif
59914+ return;
59915+}
59916+
59917+#ifdef CONFIG_GRKERNSEC
59918+extern int gr_acl_is_capable(const int cap);
59919+extern int gr_acl_is_capable_nolog(const int cap);
59920+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
59921+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
59922+extern int gr_chroot_is_capable(const int cap);
59923+extern int gr_chroot_is_capable_nolog(const int cap);
59924+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
59925+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
59926+#endif
59927+
59928+const char *captab_log[] = {
59929+ "CAP_CHOWN",
59930+ "CAP_DAC_OVERRIDE",
59931+ "CAP_DAC_READ_SEARCH",
59932+ "CAP_FOWNER",
59933+ "CAP_FSETID",
59934+ "CAP_KILL",
59935+ "CAP_SETGID",
59936+ "CAP_SETUID",
59937+ "CAP_SETPCAP",
59938+ "CAP_LINUX_IMMUTABLE",
59939+ "CAP_NET_BIND_SERVICE",
59940+ "CAP_NET_BROADCAST",
59941+ "CAP_NET_ADMIN",
59942+ "CAP_NET_RAW",
59943+ "CAP_IPC_LOCK",
59944+ "CAP_IPC_OWNER",
59945+ "CAP_SYS_MODULE",
59946+ "CAP_SYS_RAWIO",
59947+ "CAP_SYS_CHROOT",
59948+ "CAP_SYS_PTRACE",
59949+ "CAP_SYS_PACCT",
59950+ "CAP_SYS_ADMIN",
59951+ "CAP_SYS_BOOT",
59952+ "CAP_SYS_NICE",
59953+ "CAP_SYS_RESOURCE",
59954+ "CAP_SYS_TIME",
59955+ "CAP_SYS_TTY_CONFIG",
59956+ "CAP_MKNOD",
59957+ "CAP_LEASE",
59958+ "CAP_AUDIT_WRITE",
59959+ "CAP_AUDIT_CONTROL",
59960+ "CAP_SETFCAP",
59961+ "CAP_MAC_OVERRIDE",
59962+ "CAP_MAC_ADMIN",
59963+ "CAP_SYSLOG",
59964+ "CAP_WAKE_ALARM"
59965+};
59966+
59967+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
59968+
59969+int gr_is_capable(const int cap)
59970+{
59971+#ifdef CONFIG_GRKERNSEC
59972+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
59973+ return 1;
59974+ return 0;
59975+#else
59976+ return 1;
59977+#endif
59978+}
59979+
59980+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
59981+{
59982+#ifdef CONFIG_GRKERNSEC
59983+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
59984+ return 1;
59985+ return 0;
59986+#else
59987+ return 1;
59988+#endif
59989+}
59990+
59991+int gr_is_capable_nolog(const int cap)
59992+{
59993+#ifdef CONFIG_GRKERNSEC
59994+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
59995+ return 1;
59996+ return 0;
59997+#else
59998+ return 1;
59999+#endif
60000+}
60001+
60002+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
60003+{
60004+#ifdef CONFIG_GRKERNSEC
60005+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
60006+ return 1;
60007+ return 0;
60008+#else
60009+ return 1;
60010+#endif
60011+}
60012+
60013+EXPORT_SYMBOL(gr_is_capable);
60014+EXPORT_SYMBOL(gr_is_capable_nolog);
60015+EXPORT_SYMBOL(gr_task_is_capable);
60016+EXPORT_SYMBOL(gr_task_is_capable_nolog);
60017diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
60018new file mode 100644
60019index 0000000..d3ee748
60020--- /dev/null
60021+++ b/grsecurity/grsec_fifo.c
60022@@ -0,0 +1,24 @@
60023+#include <linux/kernel.h>
60024+#include <linux/sched.h>
60025+#include <linux/fs.h>
60026+#include <linux/file.h>
60027+#include <linux/grinternal.h>
60028+
60029+int
60030+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
60031+ const struct dentry *dir, const int flag, const int acc_mode)
60032+{
60033+#ifdef CONFIG_GRKERNSEC_FIFO
60034+ const struct cred *cred = current_cred();
60035+
60036+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
60037+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
60038+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
60039+ (cred->fsuid != dentry->d_inode->i_uid)) {
60040+ if (!inode_permission(dentry->d_inode, acc_mode))
60041+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
60042+ return -EACCES;
60043+ }
60044+#endif
60045+ return 0;
60046+}
60047diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
60048new file mode 100644
60049index 0000000..8ca18bf
60050--- /dev/null
60051+++ b/grsecurity/grsec_fork.c
60052@@ -0,0 +1,23 @@
60053+#include <linux/kernel.h>
60054+#include <linux/sched.h>
60055+#include <linux/grsecurity.h>
60056+#include <linux/grinternal.h>
60057+#include <linux/errno.h>
60058+
60059+void
60060+gr_log_forkfail(const int retval)
60061+{
60062+#ifdef CONFIG_GRKERNSEC_FORKFAIL
60063+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
60064+ switch (retval) {
60065+ case -EAGAIN:
60066+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
60067+ break;
60068+ case -ENOMEM:
60069+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
60070+ break;
60071+ }
60072+ }
60073+#endif
60074+ return;
60075+}
60076diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
60077new file mode 100644
60078index 0000000..05a6015
60079--- /dev/null
60080+++ b/grsecurity/grsec_init.c
60081@@ -0,0 +1,283 @@
60082+#include <linux/kernel.h>
60083+#include <linux/sched.h>
60084+#include <linux/mm.h>
60085+#include <linux/gracl.h>
60086+#include <linux/slab.h>
60087+#include <linux/vmalloc.h>
60088+#include <linux/percpu.h>
60089+#include <linux/module.h>
60090+
60091+int grsec_enable_ptrace_readexec;
60092+int grsec_enable_setxid;
60093+int grsec_enable_symlinkown;
60094+int grsec_symlinkown_gid;
60095+int grsec_enable_brute;
60096+int grsec_enable_link;
60097+int grsec_enable_dmesg;
60098+int grsec_enable_harden_ptrace;
60099+int grsec_enable_fifo;
60100+int grsec_enable_execlog;
60101+int grsec_enable_signal;
60102+int grsec_enable_forkfail;
60103+int grsec_enable_audit_ptrace;
60104+int grsec_enable_time;
60105+int grsec_enable_audit_textrel;
60106+int grsec_enable_group;
60107+int grsec_audit_gid;
60108+int grsec_enable_chdir;
60109+int grsec_enable_mount;
60110+int grsec_enable_rofs;
60111+int grsec_enable_chroot_findtask;
60112+int grsec_enable_chroot_mount;
60113+int grsec_enable_chroot_shmat;
60114+int grsec_enable_chroot_fchdir;
60115+int grsec_enable_chroot_double;
60116+int grsec_enable_chroot_pivot;
60117+int grsec_enable_chroot_chdir;
60118+int grsec_enable_chroot_chmod;
60119+int grsec_enable_chroot_mknod;
60120+int grsec_enable_chroot_nice;
60121+int grsec_enable_chroot_execlog;
60122+int grsec_enable_chroot_caps;
60123+int grsec_enable_chroot_sysctl;
60124+int grsec_enable_chroot_unix;
60125+int grsec_enable_tpe;
60126+int grsec_tpe_gid;
60127+int grsec_enable_blackhole;
60128+#ifdef CONFIG_IPV6_MODULE
60129+EXPORT_SYMBOL(grsec_enable_blackhole);
60130+#endif
60131+int grsec_lastack_retries;
60132+int grsec_enable_tpe_all;
60133+int grsec_enable_tpe_invert;
60134+int grsec_enable_socket_all;
60135+int grsec_socket_all_gid;
60136+int grsec_enable_socket_client;
60137+int grsec_socket_client_gid;
60138+int grsec_enable_socket_server;
60139+int grsec_socket_server_gid;
60140+int grsec_resource_logging;
60141+int grsec_disable_privio;
60142+int grsec_enable_log_rwxmaps;
60143+int grsec_lock;
60144+
60145+DEFINE_SPINLOCK(grsec_alert_lock);
60146+unsigned long grsec_alert_wtime = 0;
60147+unsigned long grsec_alert_fyet = 0;
60148+
60149+DEFINE_SPINLOCK(grsec_audit_lock);
60150+
60151+DEFINE_RWLOCK(grsec_exec_file_lock);
60152+
60153+char *gr_shared_page[4];
60154+
60155+char *gr_alert_log_fmt;
60156+char *gr_audit_log_fmt;
60157+char *gr_alert_log_buf;
60158+char *gr_audit_log_buf;
60159+
60160+extern struct gr_arg *gr_usermode;
60161+extern unsigned char *gr_system_salt;
60162+extern unsigned char *gr_system_sum;
60163+
60164+void __init
60165+grsecurity_init(void)
60166+{
60167+ int j;
60168+ /* create the per-cpu shared pages */
60169+
60170+#ifdef CONFIG_X86
60171+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
60172+#endif
60173+
60174+ for (j = 0; j < 4; j++) {
60175+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
60176+ if (gr_shared_page[j] == NULL) {
60177+ panic("Unable to allocate grsecurity shared page");
60178+ return;
60179+ }
60180+ }
60181+
60182+ /* allocate log buffers */
60183+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
60184+ if (!gr_alert_log_fmt) {
60185+ panic("Unable to allocate grsecurity alert log format buffer");
60186+ return;
60187+ }
60188+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
60189+ if (!gr_audit_log_fmt) {
60190+ panic("Unable to allocate grsecurity audit log format buffer");
60191+ return;
60192+ }
60193+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
60194+ if (!gr_alert_log_buf) {
60195+ panic("Unable to allocate grsecurity alert log buffer");
60196+ return;
60197+ }
60198+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
60199+ if (!gr_audit_log_buf) {
60200+ panic("Unable to allocate grsecurity audit log buffer");
60201+ return;
60202+ }
60203+
60204+ /* allocate memory for authentication structure */
60205+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
60206+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
60207+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
60208+
60209+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
60210+ panic("Unable to allocate grsecurity authentication structure");
60211+ return;
60212+ }
60213+
60214+
60215+#ifdef CONFIG_GRKERNSEC_IO
60216+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
60217+ grsec_disable_privio = 1;
60218+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
60219+ grsec_disable_privio = 1;
60220+#else
60221+ grsec_disable_privio = 0;
60222+#endif
60223+#endif
60224+
60225+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60226+ /* for backward compatibility, tpe_invert always defaults to on if
60227+ enabled in the kernel
60228+ */
60229+ grsec_enable_tpe_invert = 1;
60230+#endif
60231+
60232+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
60233+#ifndef CONFIG_GRKERNSEC_SYSCTL
60234+ grsec_lock = 1;
60235+#endif
60236+
60237+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
60238+ grsec_enable_audit_textrel = 1;
60239+#endif
60240+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60241+ grsec_enable_log_rwxmaps = 1;
60242+#endif
60243+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
60244+ grsec_enable_group = 1;
60245+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
60246+#endif
60247+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
60248+ grsec_enable_ptrace_readexec = 1;
60249+#endif
60250+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
60251+ grsec_enable_chdir = 1;
60252+#endif
60253+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60254+ grsec_enable_harden_ptrace = 1;
60255+#endif
60256+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60257+ grsec_enable_mount = 1;
60258+#endif
60259+#ifdef CONFIG_GRKERNSEC_LINK
60260+ grsec_enable_link = 1;
60261+#endif
60262+#ifdef CONFIG_GRKERNSEC_BRUTE
60263+ grsec_enable_brute = 1;
60264+#endif
60265+#ifdef CONFIG_GRKERNSEC_DMESG
60266+ grsec_enable_dmesg = 1;
60267+#endif
60268+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
60269+ grsec_enable_blackhole = 1;
60270+ grsec_lastack_retries = 4;
60271+#endif
60272+#ifdef CONFIG_GRKERNSEC_FIFO
60273+ grsec_enable_fifo = 1;
60274+#endif
60275+#ifdef CONFIG_GRKERNSEC_EXECLOG
60276+ grsec_enable_execlog = 1;
60277+#endif
60278+#ifdef CONFIG_GRKERNSEC_SETXID
60279+ grsec_enable_setxid = 1;
60280+#endif
60281+#ifdef CONFIG_GRKERNSEC_SIGNAL
60282+ grsec_enable_signal = 1;
60283+#endif
60284+#ifdef CONFIG_GRKERNSEC_FORKFAIL
60285+ grsec_enable_forkfail = 1;
60286+#endif
60287+#ifdef CONFIG_GRKERNSEC_TIME
60288+ grsec_enable_time = 1;
60289+#endif
60290+#ifdef CONFIG_GRKERNSEC_RESLOG
60291+ grsec_resource_logging = 1;
60292+#endif
60293+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60294+ grsec_enable_chroot_findtask = 1;
60295+#endif
60296+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
60297+ grsec_enable_chroot_unix = 1;
60298+#endif
60299+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
60300+ grsec_enable_chroot_mount = 1;
60301+#endif
60302+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
60303+ grsec_enable_chroot_fchdir = 1;
60304+#endif
60305+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
60306+ grsec_enable_chroot_shmat = 1;
60307+#endif
60308+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60309+ grsec_enable_audit_ptrace = 1;
60310+#endif
60311+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
60312+ grsec_enable_chroot_double = 1;
60313+#endif
60314+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
60315+ grsec_enable_chroot_pivot = 1;
60316+#endif
60317+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
60318+ grsec_enable_chroot_chdir = 1;
60319+#endif
60320+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
60321+ grsec_enable_chroot_chmod = 1;
60322+#endif
60323+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
60324+ grsec_enable_chroot_mknod = 1;
60325+#endif
60326+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
60327+ grsec_enable_chroot_nice = 1;
60328+#endif
60329+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
60330+ grsec_enable_chroot_execlog = 1;
60331+#endif
60332+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
60333+ grsec_enable_chroot_caps = 1;
60334+#endif
60335+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
60336+ grsec_enable_chroot_sysctl = 1;
60337+#endif
60338+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
60339+ grsec_enable_symlinkown = 1;
60340+ grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
60341+#endif
60342+#ifdef CONFIG_GRKERNSEC_TPE
60343+ grsec_enable_tpe = 1;
60344+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
60345+#ifdef CONFIG_GRKERNSEC_TPE_ALL
60346+ grsec_enable_tpe_all = 1;
60347+#endif
60348+#endif
60349+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
60350+ grsec_enable_socket_all = 1;
60351+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
60352+#endif
60353+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
60354+ grsec_enable_socket_client = 1;
60355+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
60356+#endif
60357+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
60358+ grsec_enable_socket_server = 1;
60359+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
60360+#endif
60361+#endif
60362+
60363+ return;
60364+}
60365diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
60366new file mode 100644
60367index 0000000..6095407
60368--- /dev/null
60369+++ b/grsecurity/grsec_link.c
60370@@ -0,0 +1,58 @@
60371+#include <linux/kernel.h>
60372+#include <linux/sched.h>
60373+#include <linux/fs.h>
60374+#include <linux/file.h>
60375+#include <linux/grinternal.h>
60376+
60377+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
60378+{
60379+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
60380+ const struct inode *link_inode = link->dentry->d_inode;
60381+
60382+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
60383+ /* ignore root-owned links, e.g. /proc/self */
60384+ !uid_eq(link_inode->i_uid, GLOBAL_ROOT_UID) && target &&
60385+ !uid_eq(link_inode->i_uid, target->i_uid)) {
60386+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
60387+ return 1;
60388+ }
60389+#endif
60390+ return 0;
60391+}
60392+
60393+int
60394+gr_handle_follow_link(const struct inode *parent,
60395+ const struct inode *inode,
60396+ const struct dentry *dentry, const struct vfsmount *mnt)
60397+{
60398+#ifdef CONFIG_GRKERNSEC_LINK
60399+ const struct cred *cred = current_cred();
60400+
60401+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
60402+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
60403+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
60404+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
60405+ return -EACCES;
60406+ }
60407+#endif
60408+ return 0;
60409+}
60410+
60411+int
60412+gr_handle_hardlink(const struct dentry *dentry,
60413+ const struct vfsmount *mnt,
60414+ struct inode *inode, const int mode, const struct filename *to)
60415+{
60416+#ifdef CONFIG_GRKERNSEC_LINK
60417+ const struct cred *cred = current_cred();
60418+
60419+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
60420+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
60421+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
60422+ !capable(CAP_FOWNER) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
60423+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
60424+ return -EPERM;
60425+ }
60426+#endif
60427+ return 0;
60428+}
60429diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
60430new file mode 100644
60431index 0000000..a45d2e9
60432--- /dev/null
60433+++ b/grsecurity/grsec_log.c
60434@@ -0,0 +1,322 @@
60435+#include <linux/kernel.h>
60436+#include <linux/sched.h>
60437+#include <linux/file.h>
60438+#include <linux/tty.h>
60439+#include <linux/fs.h>
60440+#include <linux/grinternal.h>
60441+
60442+#ifdef CONFIG_TREE_PREEMPT_RCU
60443+#define DISABLE_PREEMPT() preempt_disable()
60444+#define ENABLE_PREEMPT() preempt_enable()
60445+#else
60446+#define DISABLE_PREEMPT()
60447+#define ENABLE_PREEMPT()
60448+#endif
60449+
60450+#define BEGIN_LOCKS(x) \
60451+ DISABLE_PREEMPT(); \
60452+ rcu_read_lock(); \
60453+ read_lock(&tasklist_lock); \
60454+ read_lock(&grsec_exec_file_lock); \
60455+ if (x != GR_DO_AUDIT) \
60456+ spin_lock(&grsec_alert_lock); \
60457+ else \
60458+ spin_lock(&grsec_audit_lock)
60459+
60460+#define END_LOCKS(x) \
60461+ if (x != GR_DO_AUDIT) \
60462+ spin_unlock(&grsec_alert_lock); \
60463+ else \
60464+ spin_unlock(&grsec_audit_lock); \
60465+ read_unlock(&grsec_exec_file_lock); \
60466+ read_unlock(&tasklist_lock); \
60467+ rcu_read_unlock(); \
60468+ ENABLE_PREEMPT(); \
60469+ if (x == GR_DONT_AUDIT) \
60470+ gr_handle_alertkill(current)
60471+
60472+enum {
60473+ FLOODING,
60474+ NO_FLOODING
60475+};
60476+
60477+extern char *gr_alert_log_fmt;
60478+extern char *gr_audit_log_fmt;
60479+extern char *gr_alert_log_buf;
60480+extern char *gr_audit_log_buf;
60481+
60482+static int gr_log_start(int audit)
60483+{
60484+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
60485+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
60486+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60487+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
60488+ unsigned long curr_secs = get_seconds();
60489+
60490+ if (audit == GR_DO_AUDIT)
60491+ goto set_fmt;
60492+
60493+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
60494+ grsec_alert_wtime = curr_secs;
60495+ grsec_alert_fyet = 0;
60496+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
60497+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
60498+ grsec_alert_fyet++;
60499+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
60500+ grsec_alert_wtime = curr_secs;
60501+ grsec_alert_fyet++;
60502+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
60503+ return FLOODING;
60504+ }
60505+ else return FLOODING;
60506+
60507+set_fmt:
60508+#endif
60509+ memset(buf, 0, PAGE_SIZE);
60510+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
60511+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
60512+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
60513+ } else if (current->signal->curr_ip) {
60514+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
60515+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
60516+ } else if (gr_acl_is_enabled()) {
60517+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
60518+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
60519+ } else {
60520+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
60521+ strcpy(buf, fmt);
60522+ }
60523+
60524+ return NO_FLOODING;
60525+}
60526+
60527+static void gr_log_middle(int audit, const char *msg, va_list ap)
60528+ __attribute__ ((format (printf, 2, 0)));
60529+
60530+static void gr_log_middle(int audit, const char *msg, va_list ap)
60531+{
60532+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60533+ unsigned int len = strlen(buf);
60534+
60535+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
60536+
60537+ return;
60538+}
60539+
60540+static void gr_log_middle_varargs(int audit, const char *msg, ...)
60541+ __attribute__ ((format (printf, 2, 3)));
60542+
60543+static void gr_log_middle_varargs(int audit, const char *msg, ...)
60544+{
60545+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60546+ unsigned int len = strlen(buf);
60547+ va_list ap;
60548+
60549+ va_start(ap, msg);
60550+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
60551+ va_end(ap);
60552+
60553+ return;
60554+}
60555+
60556+static void gr_log_end(int audit, int append_default)
60557+{
60558+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60559+
60560+ if (append_default) {
60561+ unsigned int len = strlen(buf);
60562+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
60563+ }
60564+
60565+ printk("%s\n", buf);
60566+
60567+ return;
60568+}
60569+
60570+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
60571+{
60572+ int logtype;
60573+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
60574+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
60575+ void *voidptr = NULL;
60576+ int num1 = 0, num2 = 0;
60577+ unsigned long ulong1 = 0, ulong2 = 0;
60578+ struct dentry *dentry = NULL;
60579+ struct vfsmount *mnt = NULL;
60580+ struct file *file = NULL;
60581+ struct task_struct *task = NULL;
60582+ const struct cred *cred, *pcred;
60583+ va_list ap;
60584+
60585+ BEGIN_LOCKS(audit);
60586+ logtype = gr_log_start(audit);
60587+ if (logtype == FLOODING) {
60588+ END_LOCKS(audit);
60589+ return;
60590+ }
60591+ va_start(ap, argtypes);
60592+ switch (argtypes) {
60593+ case GR_TTYSNIFF:
60594+ task = va_arg(ap, struct task_struct *);
60595+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
60596+ break;
60597+ case GR_SYSCTL_HIDDEN:
60598+ str1 = va_arg(ap, char *);
60599+ gr_log_middle_varargs(audit, msg, result, str1);
60600+ break;
60601+ case GR_RBAC:
60602+ dentry = va_arg(ap, struct dentry *);
60603+ mnt = va_arg(ap, struct vfsmount *);
60604+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
60605+ break;
60606+ case GR_RBAC_STR:
60607+ dentry = va_arg(ap, struct dentry *);
60608+ mnt = va_arg(ap, struct vfsmount *);
60609+ str1 = va_arg(ap, char *);
60610+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
60611+ break;
60612+ case GR_STR_RBAC:
60613+ str1 = va_arg(ap, char *);
60614+ dentry = va_arg(ap, struct dentry *);
60615+ mnt = va_arg(ap, struct vfsmount *);
60616+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
60617+ break;
60618+ case GR_RBAC_MODE2:
60619+ dentry = va_arg(ap, struct dentry *);
60620+ mnt = va_arg(ap, struct vfsmount *);
60621+ str1 = va_arg(ap, char *);
60622+ str2 = va_arg(ap, char *);
60623+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
60624+ break;
60625+ case GR_RBAC_MODE3:
60626+ dentry = va_arg(ap, struct dentry *);
60627+ mnt = va_arg(ap, struct vfsmount *);
60628+ str1 = va_arg(ap, char *);
60629+ str2 = va_arg(ap, char *);
60630+ str3 = va_arg(ap, char *);
60631+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
60632+ break;
60633+ case GR_FILENAME:
60634+ dentry = va_arg(ap, struct dentry *);
60635+ mnt = va_arg(ap, struct vfsmount *);
60636+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
60637+ break;
60638+ case GR_STR_FILENAME:
60639+ str1 = va_arg(ap, char *);
60640+ dentry = va_arg(ap, struct dentry *);
60641+ mnt = va_arg(ap, struct vfsmount *);
60642+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
60643+ break;
60644+ case GR_FILENAME_STR:
60645+ dentry = va_arg(ap, struct dentry *);
60646+ mnt = va_arg(ap, struct vfsmount *);
60647+ str1 = va_arg(ap, char *);
60648+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
60649+ break;
60650+ case GR_FILENAME_TWO_INT:
60651+ dentry = va_arg(ap, struct dentry *);
60652+ mnt = va_arg(ap, struct vfsmount *);
60653+ num1 = va_arg(ap, int);
60654+ num2 = va_arg(ap, int);
60655+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
60656+ break;
60657+ case GR_FILENAME_TWO_INT_STR:
60658+ dentry = va_arg(ap, struct dentry *);
60659+ mnt = va_arg(ap, struct vfsmount *);
60660+ num1 = va_arg(ap, int);
60661+ num2 = va_arg(ap, int);
60662+ str1 = va_arg(ap, char *);
60663+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
60664+ break;
60665+ case GR_TEXTREL:
60666+ file = va_arg(ap, struct file *);
60667+ ulong1 = va_arg(ap, unsigned long);
60668+ ulong2 = va_arg(ap, unsigned long);
60669+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
60670+ break;
60671+ case GR_PTRACE:
60672+ task = va_arg(ap, struct task_struct *);
60673+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
60674+ break;
60675+ case GR_RESOURCE:
60676+ task = va_arg(ap, struct task_struct *);
60677+ cred = __task_cred(task);
60678+ pcred = __task_cred(task->real_parent);
60679+ ulong1 = va_arg(ap, unsigned long);
60680+ str1 = va_arg(ap, char *);
60681+ ulong2 = va_arg(ap, unsigned long);
60682+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60683+ break;
60684+ case GR_CAP:
60685+ task = va_arg(ap, struct task_struct *);
60686+ cred = __task_cred(task);
60687+ pcred = __task_cred(task->real_parent);
60688+ str1 = va_arg(ap, char *);
60689+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60690+ break;
60691+ case GR_SIG:
60692+ str1 = va_arg(ap, char *);
60693+ voidptr = va_arg(ap, void *);
60694+ gr_log_middle_varargs(audit, msg, str1, voidptr);
60695+ break;
60696+ case GR_SIG2:
60697+ task = va_arg(ap, struct task_struct *);
60698+ cred = __task_cred(task);
60699+ pcred = __task_cred(task->real_parent);
60700+ num1 = va_arg(ap, int);
60701+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60702+ break;
60703+ case GR_CRASH1:
60704+ task = va_arg(ap, struct task_struct *);
60705+ cred = __task_cred(task);
60706+ pcred = __task_cred(task->real_parent);
60707+ ulong1 = va_arg(ap, unsigned long);
60708+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
60709+ break;
60710+ case GR_CRASH2:
60711+ task = va_arg(ap, struct task_struct *);
60712+ cred = __task_cred(task);
60713+ pcred = __task_cred(task->real_parent);
60714+ ulong1 = va_arg(ap, unsigned long);
60715+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
60716+ break;
60717+ case GR_RWXMAP:
60718+ file = va_arg(ap, struct file *);
60719+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
60720+ break;
60721+ case GR_PSACCT:
60722+ {
60723+ unsigned int wday, cday;
60724+ __u8 whr, chr;
60725+ __u8 wmin, cmin;
60726+ __u8 wsec, csec;
60727+ char cur_tty[64] = { 0 };
60728+ char parent_tty[64] = { 0 };
60729+
60730+ task = va_arg(ap, struct task_struct *);
60731+ wday = va_arg(ap, unsigned int);
60732+ cday = va_arg(ap, unsigned int);
60733+ whr = va_arg(ap, int);
60734+ chr = va_arg(ap, int);
60735+ wmin = va_arg(ap, int);
60736+ cmin = va_arg(ap, int);
60737+ wsec = va_arg(ap, int);
60738+ csec = va_arg(ap, int);
60739+ ulong1 = va_arg(ap, unsigned long);
60740+ cred = __task_cred(task);
60741+ pcred = __task_cred(task->real_parent);
60742+
60743+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60744+ }
60745+ break;
60746+ default:
60747+ gr_log_middle(audit, msg, ap);
60748+ }
60749+ va_end(ap);
60750+ // these don't need DEFAULTSECARGS printed on the end
60751+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
60752+ gr_log_end(audit, 0);
60753+ else
60754+ gr_log_end(audit, 1);
60755+ END_LOCKS(audit);
60756+}
60757diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
60758new file mode 100644
60759index 0000000..f536303
60760--- /dev/null
60761+++ b/grsecurity/grsec_mem.c
60762@@ -0,0 +1,40 @@
60763+#include <linux/kernel.h>
60764+#include <linux/sched.h>
60765+#include <linux/mm.h>
60766+#include <linux/mman.h>
60767+#include <linux/grinternal.h>
60768+
60769+void
60770+gr_handle_ioperm(void)
60771+{
60772+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
60773+ return;
60774+}
60775+
60776+void
60777+gr_handle_iopl(void)
60778+{
60779+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
60780+ return;
60781+}
60782+
60783+void
60784+gr_handle_mem_readwrite(u64 from, u64 to)
60785+{
60786+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
60787+ return;
60788+}
60789+
60790+void
60791+gr_handle_vm86(void)
60792+{
60793+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
60794+ return;
60795+}
60796+
60797+void
60798+gr_log_badprocpid(const char *entry)
60799+{
60800+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
60801+ return;
60802+}
60803diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
60804new file mode 100644
60805index 0000000..2131422
60806--- /dev/null
60807+++ b/grsecurity/grsec_mount.c
60808@@ -0,0 +1,62 @@
60809+#include <linux/kernel.h>
60810+#include <linux/sched.h>
60811+#include <linux/mount.h>
60812+#include <linux/grsecurity.h>
60813+#include <linux/grinternal.h>
60814+
60815+void
60816+gr_log_remount(const char *devname, const int retval)
60817+{
60818+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60819+ if (grsec_enable_mount && (retval >= 0))
60820+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
60821+#endif
60822+ return;
60823+}
60824+
60825+void
60826+gr_log_unmount(const char *devname, const int retval)
60827+{
60828+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60829+ if (grsec_enable_mount && (retval >= 0))
60830+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
60831+#endif
60832+ return;
60833+}
60834+
60835+void
60836+gr_log_mount(const char *from, const char *to, const int retval)
60837+{
60838+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60839+ if (grsec_enable_mount && (retval >= 0))
60840+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
60841+#endif
60842+ return;
60843+}
60844+
60845+int
60846+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
60847+{
60848+#ifdef CONFIG_GRKERNSEC_ROFS
60849+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
60850+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
60851+ return -EPERM;
60852+ } else
60853+ return 0;
60854+#endif
60855+ return 0;
60856+}
60857+
60858+int
60859+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
60860+{
60861+#ifdef CONFIG_GRKERNSEC_ROFS
60862+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
60863+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
60864+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
60865+ return -EPERM;
60866+ } else
60867+ return 0;
60868+#endif
60869+ return 0;
60870+}
60871diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
60872new file mode 100644
60873index 0000000..a3b12a0
60874--- /dev/null
60875+++ b/grsecurity/grsec_pax.c
60876@@ -0,0 +1,36 @@
60877+#include <linux/kernel.h>
60878+#include <linux/sched.h>
60879+#include <linux/mm.h>
60880+#include <linux/file.h>
60881+#include <linux/grinternal.h>
60882+#include <linux/grsecurity.h>
60883+
60884+void
60885+gr_log_textrel(struct vm_area_struct * vma)
60886+{
60887+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
60888+ if (grsec_enable_audit_textrel)
60889+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
60890+#endif
60891+ return;
60892+}
60893+
60894+void
60895+gr_log_rwxmmap(struct file *file)
60896+{
60897+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60898+ if (grsec_enable_log_rwxmaps)
60899+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
60900+#endif
60901+ return;
60902+}
60903+
60904+void
60905+gr_log_rwxmprotect(struct file *file)
60906+{
60907+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60908+ if (grsec_enable_log_rwxmaps)
60909+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
60910+#endif
60911+ return;
60912+}
60913diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
60914new file mode 100644
60915index 0000000..f7f29aa
60916--- /dev/null
60917+++ b/grsecurity/grsec_ptrace.c
60918@@ -0,0 +1,30 @@
60919+#include <linux/kernel.h>
60920+#include <linux/sched.h>
60921+#include <linux/grinternal.h>
60922+#include <linux/security.h>
60923+
60924+void
60925+gr_audit_ptrace(struct task_struct *task)
60926+{
60927+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60928+ if (grsec_enable_audit_ptrace)
60929+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
60930+#endif
60931+ return;
60932+}
60933+
60934+int
60935+gr_ptrace_readexec(struct file *file, int unsafe_flags)
60936+{
60937+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
60938+ const struct dentry *dentry = file->f_path.dentry;
60939+ const struct vfsmount *mnt = file->f_path.mnt;
60940+
60941+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
60942+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
60943+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
60944+ return -EACCES;
60945+ }
60946+#endif
60947+ return 0;
60948+}
60949diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
60950new file mode 100644
60951index 0000000..5c00416
60952--- /dev/null
60953+++ b/grsecurity/grsec_sig.c
60954@@ -0,0 +1,222 @@
60955+#include <linux/kernel.h>
60956+#include <linux/sched.h>
60957+#include <linux/delay.h>
60958+#include <linux/grsecurity.h>
60959+#include <linux/grinternal.h>
60960+#include <linux/hardirq.h>
60961+
60962+char *signames[] = {
60963+ [SIGSEGV] = "Segmentation fault",
60964+ [SIGILL] = "Illegal instruction",
60965+ [SIGABRT] = "Abort",
60966+ [SIGBUS] = "Invalid alignment/Bus error"
60967+};
60968+
60969+void
60970+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
60971+{
60972+#ifdef CONFIG_GRKERNSEC_SIGNAL
60973+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
60974+ (sig == SIGABRT) || (sig == SIGBUS))) {
60975+ if (t->pid == current->pid) {
60976+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
60977+ } else {
60978+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
60979+ }
60980+ }
60981+#endif
60982+ return;
60983+}
60984+
60985+int
60986+gr_handle_signal(const struct task_struct *p, const int sig)
60987+{
60988+#ifdef CONFIG_GRKERNSEC
60989+ /* ignore the 0 signal for protected task checks */
60990+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
60991+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
60992+ return -EPERM;
60993+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
60994+ return -EPERM;
60995+ }
60996+#endif
60997+ return 0;
60998+}
60999+
61000+#ifdef CONFIG_GRKERNSEC
61001+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
61002+
61003+int gr_fake_force_sig(int sig, struct task_struct *t)
61004+{
61005+ unsigned long int flags;
61006+ int ret, blocked, ignored;
61007+ struct k_sigaction *action;
61008+
61009+ spin_lock_irqsave(&t->sighand->siglock, flags);
61010+ action = &t->sighand->action[sig-1];
61011+ ignored = action->sa.sa_handler == SIG_IGN;
61012+ blocked = sigismember(&t->blocked, sig);
61013+ if (blocked || ignored) {
61014+ action->sa.sa_handler = SIG_DFL;
61015+ if (blocked) {
61016+ sigdelset(&t->blocked, sig);
61017+ recalc_sigpending_and_wake(t);
61018+ }
61019+ }
61020+ if (action->sa.sa_handler == SIG_DFL)
61021+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
61022+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
61023+
61024+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
61025+
61026+ return ret;
61027+}
61028+#endif
61029+
61030+#ifdef CONFIG_GRKERNSEC_BRUTE
61031+#define GR_USER_BAN_TIME (15 * 60)
61032+#define GR_DAEMON_BRUTE_TIME (30 * 60)
61033+
61034+static int __get_dumpable(unsigned long mm_flags)
61035+{
61036+ int ret;
61037+
61038+ ret = mm_flags & MMF_DUMPABLE_MASK;
61039+ return (ret >= 2) ? 2 : ret;
61040+}
61041+#endif
61042+
61043+void gr_handle_brute_attach(unsigned long mm_flags)
61044+{
61045+#ifdef CONFIG_GRKERNSEC_BRUTE
61046+ struct task_struct *p = current;
61047+ kuid_t uid = GLOBAL_ROOT_UID;
61048+ int daemon = 0;
61049+
61050+ if (!grsec_enable_brute)
61051+ return;
61052+
61053+ rcu_read_lock();
61054+ read_lock(&tasklist_lock);
61055+ read_lock(&grsec_exec_file_lock);
61056+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
61057+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
61058+ p->real_parent->brute = 1;
61059+ daemon = 1;
61060+ } else {
61061+ const struct cred *cred = __task_cred(p), *cred2;
61062+ struct task_struct *tsk, *tsk2;
61063+
61064+ if (!__get_dumpable(mm_flags) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
61065+ struct user_struct *user;
61066+
61067+ uid = cred->uid;
61068+
61069+ /* this is put upon execution past expiration */
61070+ user = find_user(uid);
61071+ if (user == NULL)
61072+ goto unlock;
61073+ user->banned = 1;
61074+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
61075+ if (user->ban_expires == ~0UL)
61076+ user->ban_expires--;
61077+
61078+ do_each_thread(tsk2, tsk) {
61079+ cred2 = __task_cred(tsk);
61080+ if (tsk != p && uid_eq(cred2->uid, uid))
61081+ gr_fake_force_sig(SIGKILL, tsk);
61082+ } while_each_thread(tsk2, tsk);
61083+ }
61084+ }
61085+unlock:
61086+ read_unlock(&grsec_exec_file_lock);
61087+ read_unlock(&tasklist_lock);
61088+ rcu_read_unlock();
61089+
61090+ if (!uid_eq(uid, GLOBAL_ROOT_UID))
61091+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
61092+ from_kuid_munged(&init_user_ns, uid), GR_USER_BAN_TIME / 60);
61093+ else if (daemon)
61094+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
61095+
61096+#endif
61097+ return;
61098+}
61099+
61100+void gr_handle_brute_check(void)
61101+{
61102+#ifdef CONFIG_GRKERNSEC_BRUTE
61103+ struct task_struct *p = current;
61104+
61105+ if (unlikely(p->brute)) {
61106+ if (!grsec_enable_brute)
61107+ p->brute = 0;
61108+ else if (time_before(get_seconds(), p->brute_expires))
61109+ msleep(30 * 1000);
61110+ }
61111+#endif
61112+ return;
61113+}
61114+
61115+void gr_handle_kernel_exploit(void)
61116+{
61117+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
61118+ const struct cred *cred;
61119+ struct task_struct *tsk, *tsk2;
61120+ struct user_struct *user;
61121+ kuid_t uid;
61122+
61123+ if (in_irq() || in_serving_softirq() || in_nmi())
61124+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
61125+
61126+ uid = current_uid();
61127+
61128+ if (uid_eq(uid, GLOBAL_ROOT_UID))
61129+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
61130+ else {
61131+ /* kill all the processes of this user, hold a reference
61132+ to their creds struct, and prevent them from creating
61133+ another process until system reset
61134+ */
61135+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
61136+ from_kuid_munged(&init_user_ns, uid));
61137+ /* we intentionally leak this ref */
61138+ user = get_uid(current->cred->user);
61139+ if (user) {
61140+ user->banned = 1;
61141+ user->ban_expires = ~0UL;
61142+ }
61143+
61144+ read_lock(&tasklist_lock);
61145+ do_each_thread(tsk2, tsk) {
61146+ cred = __task_cred(tsk);
61147+ if (uid_eq(cred->uid, uid))
61148+ gr_fake_force_sig(SIGKILL, tsk);
61149+ } while_each_thread(tsk2, tsk);
61150+ read_unlock(&tasklist_lock);
61151+ }
61152+#endif
61153+}
61154+
61155+int __gr_process_user_ban(struct user_struct *user)
61156+{
61157+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61158+ if (unlikely(user->banned)) {
61159+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
61160+ user->banned = 0;
61161+ user->ban_expires = 0;
61162+ free_uid(user);
61163+ } else
61164+ return -EPERM;
61165+ }
61166+#endif
61167+ return 0;
61168+}
61169+
61170+int gr_process_user_ban(void)
61171+{
61172+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61173+ return __gr_process_user_ban(current->cred->user);
61174+#endif
61175+ return 0;
61176+}
61177diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
61178new file mode 100644
61179index 0000000..4030d57
61180--- /dev/null
61181+++ b/grsecurity/grsec_sock.c
61182@@ -0,0 +1,244 @@
61183+#include <linux/kernel.h>
61184+#include <linux/module.h>
61185+#include <linux/sched.h>
61186+#include <linux/file.h>
61187+#include <linux/net.h>
61188+#include <linux/in.h>
61189+#include <linux/ip.h>
61190+#include <net/sock.h>
61191+#include <net/inet_sock.h>
61192+#include <linux/grsecurity.h>
61193+#include <linux/grinternal.h>
61194+#include <linux/gracl.h>
61195+
61196+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
61197+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
61198+
61199+EXPORT_SYMBOL(gr_search_udp_recvmsg);
61200+EXPORT_SYMBOL(gr_search_udp_sendmsg);
61201+
61202+#ifdef CONFIG_UNIX_MODULE
61203+EXPORT_SYMBOL(gr_acl_handle_unix);
61204+EXPORT_SYMBOL(gr_acl_handle_mknod);
61205+EXPORT_SYMBOL(gr_handle_chroot_unix);
61206+EXPORT_SYMBOL(gr_handle_create);
61207+#endif
61208+
61209+#ifdef CONFIG_GRKERNSEC
61210+#define gr_conn_table_size 32749
61211+struct conn_table_entry {
61212+ struct conn_table_entry *next;
61213+ struct signal_struct *sig;
61214+};
61215+
61216+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
61217+DEFINE_SPINLOCK(gr_conn_table_lock);
61218+
61219+extern const char * gr_socktype_to_name(unsigned char type);
61220+extern const char * gr_proto_to_name(unsigned char proto);
61221+extern const char * gr_sockfamily_to_name(unsigned char family);
61222+
61223+static __inline__ int
61224+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
61225+{
61226+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
61227+}
61228+
61229+static __inline__ int
61230+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
61231+ __u16 sport, __u16 dport)
61232+{
61233+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
61234+ sig->gr_sport == sport && sig->gr_dport == dport))
61235+ return 1;
61236+ else
61237+ return 0;
61238+}
61239+
61240+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
61241+{
61242+ struct conn_table_entry **match;
61243+ unsigned int index;
61244+
61245+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
61246+ sig->gr_sport, sig->gr_dport,
61247+ gr_conn_table_size);
61248+
61249+ newent->sig = sig;
61250+
61251+ match = &gr_conn_table[index];
61252+ newent->next = *match;
61253+ *match = newent;
61254+
61255+ return;
61256+}
61257+
61258+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
61259+{
61260+ struct conn_table_entry *match, *last = NULL;
61261+ unsigned int index;
61262+
61263+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
61264+ sig->gr_sport, sig->gr_dport,
61265+ gr_conn_table_size);
61266+
61267+ match = gr_conn_table[index];
61268+ while (match && !conn_match(match->sig,
61269+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
61270+ sig->gr_dport)) {
61271+ last = match;
61272+ match = match->next;
61273+ }
61274+
61275+ if (match) {
61276+ if (last)
61277+ last->next = match->next;
61278+ else
61279+ gr_conn_table[index] = NULL;
61280+ kfree(match);
61281+ }
61282+
61283+ return;
61284+}
61285+
61286+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
61287+ __u16 sport, __u16 dport)
61288+{
61289+ struct conn_table_entry *match;
61290+ unsigned int index;
61291+
61292+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
61293+
61294+ match = gr_conn_table[index];
61295+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
61296+ match = match->next;
61297+
61298+ if (match)
61299+ return match->sig;
61300+ else
61301+ return NULL;
61302+}
61303+
61304+#endif
61305+
61306+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
61307+{
61308+#ifdef CONFIG_GRKERNSEC
61309+ struct signal_struct *sig = task->signal;
61310+ struct conn_table_entry *newent;
61311+
61312+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
61313+ if (newent == NULL)
61314+ return;
61315+ /* no bh lock needed since we are called with bh disabled */
61316+ spin_lock(&gr_conn_table_lock);
61317+ gr_del_task_from_ip_table_nolock(sig);
61318+ sig->gr_saddr = inet->inet_rcv_saddr;
61319+ sig->gr_daddr = inet->inet_daddr;
61320+ sig->gr_sport = inet->inet_sport;
61321+ sig->gr_dport = inet->inet_dport;
61322+ gr_add_to_task_ip_table_nolock(sig, newent);
61323+ spin_unlock(&gr_conn_table_lock);
61324+#endif
61325+ return;
61326+}
61327+
61328+void gr_del_task_from_ip_table(struct task_struct *task)
61329+{
61330+#ifdef CONFIG_GRKERNSEC
61331+ spin_lock_bh(&gr_conn_table_lock);
61332+ gr_del_task_from_ip_table_nolock(task->signal);
61333+ spin_unlock_bh(&gr_conn_table_lock);
61334+#endif
61335+ return;
61336+}
61337+
61338+void
61339+gr_attach_curr_ip(const struct sock *sk)
61340+{
61341+#ifdef CONFIG_GRKERNSEC
61342+ struct signal_struct *p, *set;
61343+ const struct inet_sock *inet = inet_sk(sk);
61344+
61345+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
61346+ return;
61347+
61348+ set = current->signal;
61349+
61350+ spin_lock_bh(&gr_conn_table_lock);
61351+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
61352+ inet->inet_dport, inet->inet_sport);
61353+ if (unlikely(p != NULL)) {
61354+ set->curr_ip = p->curr_ip;
61355+ set->used_accept = 1;
61356+ gr_del_task_from_ip_table_nolock(p);
61357+ spin_unlock_bh(&gr_conn_table_lock);
61358+ return;
61359+ }
61360+ spin_unlock_bh(&gr_conn_table_lock);
61361+
61362+ set->curr_ip = inet->inet_daddr;
61363+ set->used_accept = 1;
61364+#endif
61365+ return;
61366+}
61367+
61368+int
61369+gr_handle_sock_all(const int family, const int type, const int protocol)
61370+{
61371+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
61372+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
61373+ (family != AF_UNIX)) {
61374+ if (family == AF_INET)
61375+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
61376+ else
61377+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
61378+ return -EACCES;
61379+ }
61380+#endif
61381+ return 0;
61382+}
61383+
61384+int
61385+gr_handle_sock_server(const struct sockaddr *sck)
61386+{
61387+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61388+ if (grsec_enable_socket_server &&
61389+ in_group_p(grsec_socket_server_gid) &&
61390+ sck && (sck->sa_family != AF_UNIX) &&
61391+ (sck->sa_family != AF_LOCAL)) {
61392+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
61393+ return -EACCES;
61394+ }
61395+#endif
61396+ return 0;
61397+}
61398+
61399+int
61400+gr_handle_sock_server_other(const struct sock *sck)
61401+{
61402+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61403+ if (grsec_enable_socket_server &&
61404+ in_group_p(grsec_socket_server_gid) &&
61405+ sck && (sck->sk_family != AF_UNIX) &&
61406+ (sck->sk_family != AF_LOCAL)) {
61407+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
61408+ return -EACCES;
61409+ }
61410+#endif
61411+ return 0;
61412+}
61413+
61414+int
61415+gr_handle_sock_client(const struct sockaddr *sck)
61416+{
61417+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
61418+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
61419+ sck && (sck->sa_family != AF_UNIX) &&
61420+ (sck->sa_family != AF_LOCAL)) {
61421+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
61422+ return -EACCES;
61423+ }
61424+#endif
61425+ return 0;
61426+}
61427diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
61428new file mode 100644
61429index 0000000..f55ef0f
61430--- /dev/null
61431+++ b/grsecurity/grsec_sysctl.c
61432@@ -0,0 +1,469 @@
61433+#include <linux/kernel.h>
61434+#include <linux/sched.h>
61435+#include <linux/sysctl.h>
61436+#include <linux/grsecurity.h>
61437+#include <linux/grinternal.h>
61438+
61439+int
61440+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
61441+{
61442+#ifdef CONFIG_GRKERNSEC_SYSCTL
61443+ if (dirname == NULL || name == NULL)
61444+ return 0;
61445+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
61446+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
61447+ return -EACCES;
61448+ }
61449+#endif
61450+ return 0;
61451+}
61452+
61453+#ifdef CONFIG_GRKERNSEC_ROFS
61454+static int __maybe_unused one = 1;
61455+#endif
61456+
61457+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
61458+struct ctl_table grsecurity_table[] = {
61459+#ifdef CONFIG_GRKERNSEC_SYSCTL
61460+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
61461+#ifdef CONFIG_GRKERNSEC_IO
61462+ {
61463+ .procname = "disable_priv_io",
61464+ .data = &grsec_disable_privio,
61465+ .maxlen = sizeof(int),
61466+ .mode = 0600,
61467+ .proc_handler = &proc_dointvec,
61468+ },
61469+#endif
61470+#endif
61471+#ifdef CONFIG_GRKERNSEC_LINK
61472+ {
61473+ .procname = "linking_restrictions",
61474+ .data = &grsec_enable_link,
61475+ .maxlen = sizeof(int),
61476+ .mode = 0600,
61477+ .proc_handler = &proc_dointvec,
61478+ },
61479+#endif
61480+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
61481+ {
61482+ .procname = "enforce_symlinksifowner",
61483+ .data = &grsec_enable_symlinkown,
61484+ .maxlen = sizeof(int),
61485+ .mode = 0600,
61486+ .proc_handler = &proc_dointvec,
61487+ },
61488+ {
61489+ .procname = "symlinkown_gid",
61490+ .data = &grsec_symlinkown_gid,
61491+ .maxlen = sizeof(int),
61492+ .mode = 0600,
61493+ .proc_handler = &proc_dointvec,
61494+ },
61495+#endif
61496+#ifdef CONFIG_GRKERNSEC_BRUTE
61497+ {
61498+ .procname = "deter_bruteforce",
61499+ .data = &grsec_enable_brute,
61500+ .maxlen = sizeof(int),
61501+ .mode = 0600,
61502+ .proc_handler = &proc_dointvec,
61503+ },
61504+#endif
61505+#ifdef CONFIG_GRKERNSEC_FIFO
61506+ {
61507+ .procname = "fifo_restrictions",
61508+ .data = &grsec_enable_fifo,
61509+ .maxlen = sizeof(int),
61510+ .mode = 0600,
61511+ .proc_handler = &proc_dointvec,
61512+ },
61513+#endif
61514+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
61515+ {
61516+ .procname = "ptrace_readexec",
61517+ .data = &grsec_enable_ptrace_readexec,
61518+ .maxlen = sizeof(int),
61519+ .mode = 0600,
61520+ .proc_handler = &proc_dointvec,
61521+ },
61522+#endif
61523+#ifdef CONFIG_GRKERNSEC_SETXID
61524+ {
61525+ .procname = "consistent_setxid",
61526+ .data = &grsec_enable_setxid,
61527+ .maxlen = sizeof(int),
61528+ .mode = 0600,
61529+ .proc_handler = &proc_dointvec,
61530+ },
61531+#endif
61532+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
61533+ {
61534+ .procname = "ip_blackhole",
61535+ .data = &grsec_enable_blackhole,
61536+ .maxlen = sizeof(int),
61537+ .mode = 0600,
61538+ .proc_handler = &proc_dointvec,
61539+ },
61540+ {
61541+ .procname = "lastack_retries",
61542+ .data = &grsec_lastack_retries,
61543+ .maxlen = sizeof(int),
61544+ .mode = 0600,
61545+ .proc_handler = &proc_dointvec,
61546+ },
61547+#endif
61548+#ifdef CONFIG_GRKERNSEC_EXECLOG
61549+ {
61550+ .procname = "exec_logging",
61551+ .data = &grsec_enable_execlog,
61552+ .maxlen = sizeof(int),
61553+ .mode = 0600,
61554+ .proc_handler = &proc_dointvec,
61555+ },
61556+#endif
61557+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
61558+ {
61559+ .procname = "rwxmap_logging",
61560+ .data = &grsec_enable_log_rwxmaps,
61561+ .maxlen = sizeof(int),
61562+ .mode = 0600,
61563+ .proc_handler = &proc_dointvec,
61564+ },
61565+#endif
61566+#ifdef CONFIG_GRKERNSEC_SIGNAL
61567+ {
61568+ .procname = "signal_logging",
61569+ .data = &grsec_enable_signal,
61570+ .maxlen = sizeof(int),
61571+ .mode = 0600,
61572+ .proc_handler = &proc_dointvec,
61573+ },
61574+#endif
61575+#ifdef CONFIG_GRKERNSEC_FORKFAIL
61576+ {
61577+ .procname = "forkfail_logging",
61578+ .data = &grsec_enable_forkfail,
61579+ .maxlen = sizeof(int),
61580+ .mode = 0600,
61581+ .proc_handler = &proc_dointvec,
61582+ },
61583+#endif
61584+#ifdef CONFIG_GRKERNSEC_TIME
61585+ {
61586+ .procname = "timechange_logging",
61587+ .data = &grsec_enable_time,
61588+ .maxlen = sizeof(int),
61589+ .mode = 0600,
61590+ .proc_handler = &proc_dointvec,
61591+ },
61592+#endif
61593+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
61594+ {
61595+ .procname = "chroot_deny_shmat",
61596+ .data = &grsec_enable_chroot_shmat,
61597+ .maxlen = sizeof(int),
61598+ .mode = 0600,
61599+ .proc_handler = &proc_dointvec,
61600+ },
61601+#endif
61602+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61603+ {
61604+ .procname = "chroot_deny_unix",
61605+ .data = &grsec_enable_chroot_unix,
61606+ .maxlen = sizeof(int),
61607+ .mode = 0600,
61608+ .proc_handler = &proc_dointvec,
61609+ },
61610+#endif
61611+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
61612+ {
61613+ .procname = "chroot_deny_mount",
61614+ .data = &grsec_enable_chroot_mount,
61615+ .maxlen = sizeof(int),
61616+ .mode = 0600,
61617+ .proc_handler = &proc_dointvec,
61618+ },
61619+#endif
61620+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
61621+ {
61622+ .procname = "chroot_deny_fchdir",
61623+ .data = &grsec_enable_chroot_fchdir,
61624+ .maxlen = sizeof(int),
61625+ .mode = 0600,
61626+ .proc_handler = &proc_dointvec,
61627+ },
61628+#endif
61629+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
61630+ {
61631+ .procname = "chroot_deny_chroot",
61632+ .data = &grsec_enable_chroot_double,
61633+ .maxlen = sizeof(int),
61634+ .mode = 0600,
61635+ .proc_handler = &proc_dointvec,
61636+ },
61637+#endif
61638+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
61639+ {
61640+ .procname = "chroot_deny_pivot",
61641+ .data = &grsec_enable_chroot_pivot,
61642+ .maxlen = sizeof(int),
61643+ .mode = 0600,
61644+ .proc_handler = &proc_dointvec,
61645+ },
61646+#endif
61647+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
61648+ {
61649+ .procname = "chroot_enforce_chdir",
61650+ .data = &grsec_enable_chroot_chdir,
61651+ .maxlen = sizeof(int),
61652+ .mode = 0600,
61653+ .proc_handler = &proc_dointvec,
61654+ },
61655+#endif
61656+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
61657+ {
61658+ .procname = "chroot_deny_chmod",
61659+ .data = &grsec_enable_chroot_chmod,
61660+ .maxlen = sizeof(int),
61661+ .mode = 0600,
61662+ .proc_handler = &proc_dointvec,
61663+ },
61664+#endif
61665+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
61666+ {
61667+ .procname = "chroot_deny_mknod",
61668+ .data = &grsec_enable_chroot_mknod,
61669+ .maxlen = sizeof(int),
61670+ .mode = 0600,
61671+ .proc_handler = &proc_dointvec,
61672+ },
61673+#endif
61674+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61675+ {
61676+ .procname = "chroot_restrict_nice",
61677+ .data = &grsec_enable_chroot_nice,
61678+ .maxlen = sizeof(int),
61679+ .mode = 0600,
61680+ .proc_handler = &proc_dointvec,
61681+ },
61682+#endif
61683+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
61684+ {
61685+ .procname = "chroot_execlog",
61686+ .data = &grsec_enable_chroot_execlog,
61687+ .maxlen = sizeof(int),
61688+ .mode = 0600,
61689+ .proc_handler = &proc_dointvec,
61690+ },
61691+#endif
61692+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61693+ {
61694+ .procname = "chroot_caps",
61695+ .data = &grsec_enable_chroot_caps,
61696+ .maxlen = sizeof(int),
61697+ .mode = 0600,
61698+ .proc_handler = &proc_dointvec,
61699+ },
61700+#endif
61701+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
61702+ {
61703+ .procname = "chroot_deny_sysctl",
61704+ .data = &grsec_enable_chroot_sysctl,
61705+ .maxlen = sizeof(int),
61706+ .mode = 0600,
61707+ .proc_handler = &proc_dointvec,
61708+ },
61709+#endif
61710+#ifdef CONFIG_GRKERNSEC_TPE
61711+ {
61712+ .procname = "tpe",
61713+ .data = &grsec_enable_tpe,
61714+ .maxlen = sizeof(int),
61715+ .mode = 0600,
61716+ .proc_handler = &proc_dointvec,
61717+ },
61718+ {
61719+ .procname = "tpe_gid",
61720+ .data = &grsec_tpe_gid,
61721+ .maxlen = sizeof(int),
61722+ .mode = 0600,
61723+ .proc_handler = &proc_dointvec,
61724+ },
61725+#endif
61726+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
61727+ {
61728+ .procname = "tpe_invert",
61729+ .data = &grsec_enable_tpe_invert,
61730+ .maxlen = sizeof(int),
61731+ .mode = 0600,
61732+ .proc_handler = &proc_dointvec,
61733+ },
61734+#endif
61735+#ifdef CONFIG_GRKERNSEC_TPE_ALL
61736+ {
61737+ .procname = "tpe_restrict_all",
61738+ .data = &grsec_enable_tpe_all,
61739+ .maxlen = sizeof(int),
61740+ .mode = 0600,
61741+ .proc_handler = &proc_dointvec,
61742+ },
61743+#endif
61744+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
61745+ {
61746+ .procname = "socket_all",
61747+ .data = &grsec_enable_socket_all,
61748+ .maxlen = sizeof(int),
61749+ .mode = 0600,
61750+ .proc_handler = &proc_dointvec,
61751+ },
61752+ {
61753+ .procname = "socket_all_gid",
61754+ .data = &grsec_socket_all_gid,
61755+ .maxlen = sizeof(int),
61756+ .mode = 0600,
61757+ .proc_handler = &proc_dointvec,
61758+ },
61759+#endif
61760+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
61761+ {
61762+ .procname = "socket_client",
61763+ .data = &grsec_enable_socket_client,
61764+ .maxlen = sizeof(int),
61765+ .mode = 0600,
61766+ .proc_handler = &proc_dointvec,
61767+ },
61768+ {
61769+ .procname = "socket_client_gid",
61770+ .data = &grsec_socket_client_gid,
61771+ .maxlen = sizeof(int),
61772+ .mode = 0600,
61773+ .proc_handler = &proc_dointvec,
61774+ },
61775+#endif
61776+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61777+ {
61778+ .procname = "socket_server",
61779+ .data = &grsec_enable_socket_server,
61780+ .maxlen = sizeof(int),
61781+ .mode = 0600,
61782+ .proc_handler = &proc_dointvec,
61783+ },
61784+ {
61785+ .procname = "socket_server_gid",
61786+ .data = &grsec_socket_server_gid,
61787+ .maxlen = sizeof(int),
61788+ .mode = 0600,
61789+ .proc_handler = &proc_dointvec,
61790+ },
61791+#endif
61792+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
61793+ {
61794+ .procname = "audit_group",
61795+ .data = &grsec_enable_group,
61796+ .maxlen = sizeof(int),
61797+ .mode = 0600,
61798+ .proc_handler = &proc_dointvec,
61799+ },
61800+ {
61801+ .procname = "audit_gid",
61802+ .data = &grsec_audit_gid,
61803+ .maxlen = sizeof(int),
61804+ .mode = 0600,
61805+ .proc_handler = &proc_dointvec,
61806+ },
61807+#endif
61808+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61809+ {
61810+ .procname = "audit_chdir",
61811+ .data = &grsec_enable_chdir,
61812+ .maxlen = sizeof(int),
61813+ .mode = 0600,
61814+ .proc_handler = &proc_dointvec,
61815+ },
61816+#endif
61817+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
61818+ {
61819+ .procname = "audit_mount",
61820+ .data = &grsec_enable_mount,
61821+ .maxlen = sizeof(int),
61822+ .mode = 0600,
61823+ .proc_handler = &proc_dointvec,
61824+ },
61825+#endif
61826+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
61827+ {
61828+ .procname = "audit_textrel",
61829+ .data = &grsec_enable_audit_textrel,
61830+ .maxlen = sizeof(int),
61831+ .mode = 0600,
61832+ .proc_handler = &proc_dointvec,
61833+ },
61834+#endif
61835+#ifdef CONFIG_GRKERNSEC_DMESG
61836+ {
61837+ .procname = "dmesg",
61838+ .data = &grsec_enable_dmesg,
61839+ .maxlen = sizeof(int),
61840+ .mode = 0600,
61841+ .proc_handler = &proc_dointvec,
61842+ },
61843+#endif
61844+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61845+ {
61846+ .procname = "chroot_findtask",
61847+ .data = &grsec_enable_chroot_findtask,
61848+ .maxlen = sizeof(int),
61849+ .mode = 0600,
61850+ .proc_handler = &proc_dointvec,
61851+ },
61852+#endif
61853+#ifdef CONFIG_GRKERNSEC_RESLOG
61854+ {
61855+ .procname = "resource_logging",
61856+ .data = &grsec_resource_logging,
61857+ .maxlen = sizeof(int),
61858+ .mode = 0600,
61859+ .proc_handler = &proc_dointvec,
61860+ },
61861+#endif
61862+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
61863+ {
61864+ .procname = "audit_ptrace",
61865+ .data = &grsec_enable_audit_ptrace,
61866+ .maxlen = sizeof(int),
61867+ .mode = 0600,
61868+ .proc_handler = &proc_dointvec,
61869+ },
61870+#endif
61871+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61872+ {
61873+ .procname = "harden_ptrace",
61874+ .data = &grsec_enable_harden_ptrace,
61875+ .maxlen = sizeof(int),
61876+ .mode = 0600,
61877+ .proc_handler = &proc_dointvec,
61878+ },
61879+#endif
61880+ {
61881+ .procname = "grsec_lock",
61882+ .data = &grsec_lock,
61883+ .maxlen = sizeof(int),
61884+ .mode = 0600,
61885+ .proc_handler = &proc_dointvec,
61886+ },
61887+#endif
61888+#ifdef CONFIG_GRKERNSEC_ROFS
61889+ {
61890+ .procname = "romount_protect",
61891+ .data = &grsec_enable_rofs,
61892+ .maxlen = sizeof(int),
61893+ .mode = 0600,
61894+ .proc_handler = &proc_dointvec_minmax,
61895+ .extra1 = &one,
61896+ .extra2 = &one,
61897+ },
61898+#endif
61899+ { }
61900+};
61901+#endif
61902diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
61903new file mode 100644
61904index 0000000..0dc13c3
61905--- /dev/null
61906+++ b/grsecurity/grsec_time.c
61907@@ -0,0 +1,16 @@
61908+#include <linux/kernel.h>
61909+#include <linux/sched.h>
61910+#include <linux/grinternal.h>
61911+#include <linux/module.h>
61912+
61913+void
61914+gr_log_timechange(void)
61915+{
61916+#ifdef CONFIG_GRKERNSEC_TIME
61917+ if (grsec_enable_time)
61918+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
61919+#endif
61920+ return;
61921+}
61922+
61923+EXPORT_SYMBOL(gr_log_timechange);
61924diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
61925new file mode 100644
61926index 0000000..07e0dc0
61927--- /dev/null
61928+++ b/grsecurity/grsec_tpe.c
61929@@ -0,0 +1,73 @@
61930+#include <linux/kernel.h>
61931+#include <linux/sched.h>
61932+#include <linux/file.h>
61933+#include <linux/fs.h>
61934+#include <linux/grinternal.h>
61935+
61936+extern int gr_acl_tpe_check(void);
61937+
61938+int
61939+gr_tpe_allow(const struct file *file)
61940+{
61941+#ifdef CONFIG_GRKERNSEC
61942+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
61943+ const struct cred *cred = current_cred();
61944+ char *msg = NULL;
61945+ char *msg2 = NULL;
61946+
61947+ // never restrict root
61948+ if (!cred->uid)
61949+ return 1;
61950+
61951+ if (grsec_enable_tpe) {
61952+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
61953+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
61954+ msg = "not being in trusted group";
61955+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
61956+ msg = "being in untrusted group";
61957+#else
61958+ if (in_group_p(grsec_tpe_gid))
61959+ msg = "being in untrusted group";
61960+#endif
61961+ }
61962+ if (!msg && gr_acl_tpe_check())
61963+ msg = "being in untrusted role";
61964+
61965+ // not in any affected group/role
61966+ if (!msg)
61967+ goto next_check;
61968+
61969+ if (inode->i_uid)
61970+ msg2 = "file in non-root-owned directory";
61971+ else if (inode->i_mode & S_IWOTH)
61972+ msg2 = "file in world-writable directory";
61973+ else if (inode->i_mode & S_IWGRP)
61974+ msg2 = "file in group-writable directory";
61975+
61976+ if (msg && msg2) {
61977+ char fullmsg[70] = {0};
61978+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
61979+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
61980+ return 0;
61981+ }
61982+ msg = NULL;
61983+next_check:
61984+#ifdef CONFIG_GRKERNSEC_TPE_ALL
61985+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
61986+ return 1;
61987+
61988+ if (inode->i_uid && (inode->i_uid != cred->uid))
61989+ msg = "directory not owned by user";
61990+ else if (inode->i_mode & S_IWOTH)
61991+ msg = "file in world-writable directory";
61992+ else if (inode->i_mode & S_IWGRP)
61993+ msg = "file in group-writable directory";
61994+
61995+ if (msg) {
61996+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
61997+ return 0;
61998+ }
61999+#endif
62000+#endif
62001+ return 1;
62002+}
62003diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
62004new file mode 100644
62005index 0000000..9f7b1ac
62006--- /dev/null
62007+++ b/grsecurity/grsum.c
62008@@ -0,0 +1,61 @@
62009+#include <linux/err.h>
62010+#include <linux/kernel.h>
62011+#include <linux/sched.h>
62012+#include <linux/mm.h>
62013+#include <linux/scatterlist.h>
62014+#include <linux/crypto.h>
62015+#include <linux/gracl.h>
62016+
62017+
62018+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
62019+#error "crypto and sha256 must be built into the kernel"
62020+#endif
62021+
62022+int
62023+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
62024+{
62025+ char *p;
62026+ struct crypto_hash *tfm;
62027+ struct hash_desc desc;
62028+ struct scatterlist sg;
62029+ unsigned char temp_sum[GR_SHA_LEN];
62030+ volatile int retval = 0;
62031+ volatile int dummy = 0;
62032+ unsigned int i;
62033+
62034+ sg_init_table(&sg, 1);
62035+
62036+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
62037+ if (IS_ERR(tfm)) {
62038+ /* should never happen, since sha256 should be built in */
62039+ return 1;
62040+ }
62041+
62042+ desc.tfm = tfm;
62043+ desc.flags = 0;
62044+
62045+ crypto_hash_init(&desc);
62046+
62047+ p = salt;
62048+ sg_set_buf(&sg, p, GR_SALT_LEN);
62049+ crypto_hash_update(&desc, &sg, sg.length);
62050+
62051+ p = entry->pw;
62052+ sg_set_buf(&sg, p, strlen(p));
62053+
62054+ crypto_hash_update(&desc, &sg, sg.length);
62055+
62056+ crypto_hash_final(&desc, temp_sum);
62057+
62058+ memset(entry->pw, 0, GR_PW_LEN);
62059+
62060+ for (i = 0; i < GR_SHA_LEN; i++)
62061+ if (sum[i] != temp_sum[i])
62062+ retval = 1;
62063+ else
62064+ dummy = 1; // waste a cycle
62065+
62066+ crypto_free_hash(tfm);
62067+
62068+ return retval;
62069+}
62070diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
62071index 77ff547..181834f 100644
62072--- a/include/asm-generic/4level-fixup.h
62073+++ b/include/asm-generic/4level-fixup.h
62074@@ -13,8 +13,10 @@
62075 #define pmd_alloc(mm, pud, address) \
62076 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
62077 NULL: pmd_offset(pud, address))
62078+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
62079
62080 #define pud_alloc(mm, pgd, address) (pgd)
62081+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
62082 #define pud_offset(pgd, start) (pgd)
62083 #define pud_none(pud) 0
62084 #define pud_bad(pud) 0
62085diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
62086index b7babf0..04ad282 100644
62087--- a/include/asm-generic/atomic-long.h
62088+++ b/include/asm-generic/atomic-long.h
62089@@ -22,6 +22,12 @@
62090
62091 typedef atomic64_t atomic_long_t;
62092
62093+#ifdef CONFIG_PAX_REFCOUNT
62094+typedef atomic64_unchecked_t atomic_long_unchecked_t;
62095+#else
62096+typedef atomic64_t atomic_long_unchecked_t;
62097+#endif
62098+
62099 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
62100
62101 static inline long atomic_long_read(atomic_long_t *l)
62102@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
62103 return (long)atomic64_read(v);
62104 }
62105
62106+#ifdef CONFIG_PAX_REFCOUNT
62107+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
62108+{
62109+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62110+
62111+ return (long)atomic64_read_unchecked(v);
62112+}
62113+#endif
62114+
62115 static inline void atomic_long_set(atomic_long_t *l, long i)
62116 {
62117 atomic64_t *v = (atomic64_t *)l;
62118@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
62119 atomic64_set(v, i);
62120 }
62121
62122+#ifdef CONFIG_PAX_REFCOUNT
62123+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
62124+{
62125+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62126+
62127+ atomic64_set_unchecked(v, i);
62128+}
62129+#endif
62130+
62131 static inline void atomic_long_inc(atomic_long_t *l)
62132 {
62133 atomic64_t *v = (atomic64_t *)l;
62134@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
62135 atomic64_inc(v);
62136 }
62137
62138+#ifdef CONFIG_PAX_REFCOUNT
62139+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
62140+{
62141+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62142+
62143+ atomic64_inc_unchecked(v);
62144+}
62145+#endif
62146+
62147 static inline void atomic_long_dec(atomic_long_t *l)
62148 {
62149 atomic64_t *v = (atomic64_t *)l;
62150@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
62151 atomic64_dec(v);
62152 }
62153
62154+#ifdef CONFIG_PAX_REFCOUNT
62155+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
62156+{
62157+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62158+
62159+ atomic64_dec_unchecked(v);
62160+}
62161+#endif
62162+
62163 static inline void atomic_long_add(long i, atomic_long_t *l)
62164 {
62165 atomic64_t *v = (atomic64_t *)l;
62166@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
62167 atomic64_add(i, v);
62168 }
62169
62170+#ifdef CONFIG_PAX_REFCOUNT
62171+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
62172+{
62173+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62174+
62175+ atomic64_add_unchecked(i, v);
62176+}
62177+#endif
62178+
62179 static inline void atomic_long_sub(long i, atomic_long_t *l)
62180 {
62181 atomic64_t *v = (atomic64_t *)l;
62182@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
62183 atomic64_sub(i, v);
62184 }
62185
62186+#ifdef CONFIG_PAX_REFCOUNT
62187+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
62188+{
62189+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62190+
62191+ atomic64_sub_unchecked(i, v);
62192+}
62193+#endif
62194+
62195 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
62196 {
62197 atomic64_t *v = (atomic64_t *)l;
62198@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
62199 return (long)atomic64_add_return(i, v);
62200 }
62201
62202+#ifdef CONFIG_PAX_REFCOUNT
62203+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
62204+{
62205+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62206+
62207+ return (long)atomic64_add_return_unchecked(i, v);
62208+}
62209+#endif
62210+
62211 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
62212 {
62213 atomic64_t *v = (atomic64_t *)l;
62214@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
62215 return (long)atomic64_inc_return(v);
62216 }
62217
62218+#ifdef CONFIG_PAX_REFCOUNT
62219+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
62220+{
62221+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62222+
62223+ return (long)atomic64_inc_return_unchecked(v);
62224+}
62225+#endif
62226+
62227 static inline long atomic_long_dec_return(atomic_long_t *l)
62228 {
62229 atomic64_t *v = (atomic64_t *)l;
62230@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
62231
62232 typedef atomic_t atomic_long_t;
62233
62234+#ifdef CONFIG_PAX_REFCOUNT
62235+typedef atomic_unchecked_t atomic_long_unchecked_t;
62236+#else
62237+typedef atomic_t atomic_long_unchecked_t;
62238+#endif
62239+
62240 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
62241 static inline long atomic_long_read(atomic_long_t *l)
62242 {
62243@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
62244 return (long)atomic_read(v);
62245 }
62246
62247+#ifdef CONFIG_PAX_REFCOUNT
62248+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
62249+{
62250+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62251+
62252+ return (long)atomic_read_unchecked(v);
62253+}
62254+#endif
62255+
62256 static inline void atomic_long_set(atomic_long_t *l, long i)
62257 {
62258 atomic_t *v = (atomic_t *)l;
62259@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
62260 atomic_set(v, i);
62261 }
62262
62263+#ifdef CONFIG_PAX_REFCOUNT
62264+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
62265+{
62266+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62267+
62268+ atomic_set_unchecked(v, i);
62269+}
62270+#endif
62271+
62272 static inline void atomic_long_inc(atomic_long_t *l)
62273 {
62274 atomic_t *v = (atomic_t *)l;
62275@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
62276 atomic_inc(v);
62277 }
62278
62279+#ifdef CONFIG_PAX_REFCOUNT
62280+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
62281+{
62282+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62283+
62284+ atomic_inc_unchecked(v);
62285+}
62286+#endif
62287+
62288 static inline void atomic_long_dec(atomic_long_t *l)
62289 {
62290 atomic_t *v = (atomic_t *)l;
62291@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
62292 atomic_dec(v);
62293 }
62294
62295+#ifdef CONFIG_PAX_REFCOUNT
62296+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
62297+{
62298+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62299+
62300+ atomic_dec_unchecked(v);
62301+}
62302+#endif
62303+
62304 static inline void atomic_long_add(long i, atomic_long_t *l)
62305 {
62306 atomic_t *v = (atomic_t *)l;
62307@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
62308 atomic_add(i, v);
62309 }
62310
62311+#ifdef CONFIG_PAX_REFCOUNT
62312+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
62313+{
62314+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62315+
62316+ atomic_add_unchecked(i, v);
62317+}
62318+#endif
62319+
62320 static inline void atomic_long_sub(long i, atomic_long_t *l)
62321 {
62322 atomic_t *v = (atomic_t *)l;
62323@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
62324 atomic_sub(i, v);
62325 }
62326
62327+#ifdef CONFIG_PAX_REFCOUNT
62328+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
62329+{
62330+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62331+
62332+ atomic_sub_unchecked(i, v);
62333+}
62334+#endif
62335+
62336 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
62337 {
62338 atomic_t *v = (atomic_t *)l;
62339@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
62340 return (long)atomic_add_return(i, v);
62341 }
62342
62343+#ifdef CONFIG_PAX_REFCOUNT
62344+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
62345+{
62346+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62347+
62348+ return (long)atomic_add_return_unchecked(i, v);
62349+}
62350+
62351+#endif
62352+
62353 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
62354 {
62355 atomic_t *v = (atomic_t *)l;
62356@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
62357 return (long)atomic_inc_return(v);
62358 }
62359
62360+#ifdef CONFIG_PAX_REFCOUNT
62361+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
62362+{
62363+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62364+
62365+ return (long)atomic_inc_return_unchecked(v);
62366+}
62367+#endif
62368+
62369 static inline long atomic_long_dec_return(atomic_long_t *l)
62370 {
62371 atomic_t *v = (atomic_t *)l;
62372@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
62373
62374 #endif /* BITS_PER_LONG == 64 */
62375
62376+#ifdef CONFIG_PAX_REFCOUNT
62377+static inline void pax_refcount_needs_these_functions(void)
62378+{
62379+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
62380+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
62381+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
62382+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
62383+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
62384+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
62385+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
62386+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
62387+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
62388+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
62389+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
62390+#ifdef CONFIG_X86
62391+ atomic_clear_mask_unchecked(0, NULL);
62392+ atomic_set_mask_unchecked(0, NULL);
62393+#endif
62394+
62395+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
62396+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
62397+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
62398+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
62399+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
62400+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
62401+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
62402+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
62403+}
62404+#else
62405+#define atomic_read_unchecked(v) atomic_read(v)
62406+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
62407+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
62408+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
62409+#define atomic_inc_unchecked(v) atomic_inc(v)
62410+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
62411+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
62412+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
62413+#define atomic_dec_unchecked(v) atomic_dec(v)
62414+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
62415+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
62416+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
62417+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
62418+
62419+#define atomic_long_read_unchecked(v) atomic_long_read(v)
62420+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
62421+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
62422+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
62423+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
62424+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
62425+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
62426+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
62427+#endif
62428+
62429 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
62430diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
62431index 1ced641..c896ee8 100644
62432--- a/include/asm-generic/atomic.h
62433+++ b/include/asm-generic/atomic.h
62434@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
62435 * Atomically clears the bits set in @mask from @v
62436 */
62437 #ifndef atomic_clear_mask
62438-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
62439+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
62440 {
62441 unsigned long flags;
62442
62443diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
62444index b18ce4f..2ee2843 100644
62445--- a/include/asm-generic/atomic64.h
62446+++ b/include/asm-generic/atomic64.h
62447@@ -16,6 +16,8 @@ typedef struct {
62448 long long counter;
62449 } atomic64_t;
62450
62451+typedef atomic64_t atomic64_unchecked_t;
62452+
62453 #define ATOMIC64_INIT(i) { (i) }
62454
62455 extern long long atomic64_read(const atomic64_t *v);
62456@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
62457 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
62458 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
62459
62460+#define atomic64_read_unchecked(v) atomic64_read(v)
62461+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
62462+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
62463+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
62464+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
62465+#define atomic64_inc_unchecked(v) atomic64_inc(v)
62466+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
62467+#define atomic64_dec_unchecked(v) atomic64_dec(v)
62468+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
62469+
62470 #endif /* _ASM_GENERIC_ATOMIC64_H */
62471diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
62472index 1bfcfe5..e04c5c9 100644
62473--- a/include/asm-generic/cache.h
62474+++ b/include/asm-generic/cache.h
62475@@ -6,7 +6,7 @@
62476 * cache lines need to provide their own cache.h.
62477 */
62478
62479-#define L1_CACHE_SHIFT 5
62480-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
62481+#define L1_CACHE_SHIFT 5UL
62482+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
62483
62484 #endif /* __ASM_GENERIC_CACHE_H */
62485diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
62486index 0d68a1e..b74a761 100644
62487--- a/include/asm-generic/emergency-restart.h
62488+++ b/include/asm-generic/emergency-restart.h
62489@@ -1,7 +1,7 @@
62490 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
62491 #define _ASM_GENERIC_EMERGENCY_RESTART_H
62492
62493-static inline void machine_emergency_restart(void)
62494+static inline __noreturn void machine_emergency_restart(void)
62495 {
62496 machine_restart(NULL);
62497 }
62498diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
62499index 90f99c7..00ce236 100644
62500--- a/include/asm-generic/kmap_types.h
62501+++ b/include/asm-generic/kmap_types.h
62502@@ -2,9 +2,9 @@
62503 #define _ASM_GENERIC_KMAP_TYPES_H
62504
62505 #ifdef __WITH_KM_FENCE
62506-# define KM_TYPE_NR 41
62507+# define KM_TYPE_NR 42
62508 #else
62509-# define KM_TYPE_NR 20
62510+# define KM_TYPE_NR 21
62511 #endif
62512
62513 #endif
62514diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
62515index 9ceb03b..62b0b8f 100644
62516--- a/include/asm-generic/local.h
62517+++ b/include/asm-generic/local.h
62518@@ -23,24 +23,37 @@ typedef struct
62519 atomic_long_t a;
62520 } local_t;
62521
62522+typedef struct {
62523+ atomic_long_unchecked_t a;
62524+} local_unchecked_t;
62525+
62526 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
62527
62528 #define local_read(l) atomic_long_read(&(l)->a)
62529+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
62530 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
62531+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
62532 #define local_inc(l) atomic_long_inc(&(l)->a)
62533+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
62534 #define local_dec(l) atomic_long_dec(&(l)->a)
62535+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
62536 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
62537+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
62538 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
62539+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
62540
62541 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
62542 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
62543 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
62544 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
62545 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
62546+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
62547 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
62548 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
62549+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
62550
62551 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
62552+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
62553 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
62554 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
62555 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
62556diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
62557index 725612b..9cc513a 100644
62558--- a/include/asm-generic/pgtable-nopmd.h
62559+++ b/include/asm-generic/pgtable-nopmd.h
62560@@ -1,14 +1,19 @@
62561 #ifndef _PGTABLE_NOPMD_H
62562 #define _PGTABLE_NOPMD_H
62563
62564-#ifndef __ASSEMBLY__
62565-
62566 #include <asm-generic/pgtable-nopud.h>
62567
62568-struct mm_struct;
62569-
62570 #define __PAGETABLE_PMD_FOLDED
62571
62572+#define PMD_SHIFT PUD_SHIFT
62573+#define PTRS_PER_PMD 1
62574+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
62575+#define PMD_MASK (~(PMD_SIZE-1))
62576+
62577+#ifndef __ASSEMBLY__
62578+
62579+struct mm_struct;
62580+
62581 /*
62582 * Having the pmd type consist of a pud gets the size right, and allows
62583 * us to conceptually access the pud entry that this pmd is folded into
62584@@ -16,11 +21,6 @@ struct mm_struct;
62585 */
62586 typedef struct { pud_t pud; } pmd_t;
62587
62588-#define PMD_SHIFT PUD_SHIFT
62589-#define PTRS_PER_PMD 1
62590-#define PMD_SIZE (1UL << PMD_SHIFT)
62591-#define PMD_MASK (~(PMD_SIZE-1))
62592-
62593 /*
62594 * The "pud_xxx()" functions here are trivial for a folded two-level
62595 * setup: the pmd is never bad, and a pmd always exists (as it's folded
62596diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
62597index 810431d..0ec4804f 100644
62598--- a/include/asm-generic/pgtable-nopud.h
62599+++ b/include/asm-generic/pgtable-nopud.h
62600@@ -1,10 +1,15 @@
62601 #ifndef _PGTABLE_NOPUD_H
62602 #define _PGTABLE_NOPUD_H
62603
62604-#ifndef __ASSEMBLY__
62605-
62606 #define __PAGETABLE_PUD_FOLDED
62607
62608+#define PUD_SHIFT PGDIR_SHIFT
62609+#define PTRS_PER_PUD 1
62610+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
62611+#define PUD_MASK (~(PUD_SIZE-1))
62612+
62613+#ifndef __ASSEMBLY__
62614+
62615 /*
62616 * Having the pud type consist of a pgd gets the size right, and allows
62617 * us to conceptually access the pgd entry that this pud is folded into
62618@@ -12,11 +17,6 @@
62619 */
62620 typedef struct { pgd_t pgd; } pud_t;
62621
62622-#define PUD_SHIFT PGDIR_SHIFT
62623-#define PTRS_PER_PUD 1
62624-#define PUD_SIZE (1UL << PUD_SHIFT)
62625-#define PUD_MASK (~(PUD_SIZE-1))
62626-
62627 /*
62628 * The "pgd_xxx()" functions here are trivial for a folded two-level
62629 * setup: the pud is never bad, and a pud always exists (as it's folded
62630@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
62631 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
62632
62633 #define pgd_populate(mm, pgd, pud) do { } while (0)
62634+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
62635 /*
62636 * (puds are folded into pgds so this doesn't get actually called,
62637 * but the define is needed for a generic inline function.)
62638diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
62639index b36ce40..019426d 100644
62640--- a/include/asm-generic/pgtable.h
62641+++ b/include/asm-generic/pgtable.h
62642@@ -554,6 +554,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
62643 #endif
62644 }
62645
62646+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
62647+static inline unsigned long pax_open_kernel(void) { return 0; }
62648+#endif
62649+
62650+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
62651+static inline unsigned long pax_close_kernel(void) { return 0; }
62652+#endif
62653+
62654 #endif /* CONFIG_MMU */
62655
62656 #endif /* !__ASSEMBLY__ */
62657diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
62658index d1ea7ce..b1ebf2a 100644
62659--- a/include/asm-generic/vmlinux.lds.h
62660+++ b/include/asm-generic/vmlinux.lds.h
62661@@ -218,6 +218,7 @@
62662 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
62663 VMLINUX_SYMBOL(__start_rodata) = .; \
62664 *(.rodata) *(.rodata.*) \
62665+ *(.data..read_only) \
62666 *(__vermagic) /* Kernel version magic */ \
62667 . = ALIGN(8); \
62668 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
62669@@ -725,17 +726,18 @@
62670 * section in the linker script will go there too. @phdr should have
62671 * a leading colon.
62672 *
62673- * Note that this macros defines __per_cpu_load as an absolute symbol.
62674+ * Note that this macros defines per_cpu_load as an absolute symbol.
62675 * If there is no need to put the percpu section at a predetermined
62676 * address, use PERCPU_SECTION.
62677 */
62678 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
62679- VMLINUX_SYMBOL(__per_cpu_load) = .; \
62680- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
62681+ per_cpu_load = .; \
62682+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
62683 - LOAD_OFFSET) { \
62684+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
62685 PERCPU_INPUT(cacheline) \
62686 } phdr \
62687- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
62688+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
62689
62690 /**
62691 * PERCPU_SECTION - define output section for percpu area, simple version
62692diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
62693index 418d270..bfd2794 100644
62694--- a/include/crypto/algapi.h
62695+++ b/include/crypto/algapi.h
62696@@ -34,7 +34,7 @@ struct crypto_type {
62697 unsigned int maskclear;
62698 unsigned int maskset;
62699 unsigned int tfmsize;
62700-};
62701+} __do_const;
62702
62703 struct crypto_instance {
62704 struct crypto_alg alg;
62705diff --git a/include/drm/drmP.h b/include/drm/drmP.h
62706index 3fd8280..2b3c415 100644
62707--- a/include/drm/drmP.h
62708+++ b/include/drm/drmP.h
62709@@ -72,6 +72,7 @@
62710 #include <linux/workqueue.h>
62711 #include <linux/poll.h>
62712 #include <asm/pgalloc.h>
62713+#include <asm/local.h>
62714 #include <drm/drm.h>
62715 #include <drm/drm_sarea.h>
62716
62717@@ -1068,7 +1069,7 @@ struct drm_device {
62718
62719 /** \name Usage Counters */
62720 /*@{ */
62721- int open_count; /**< Outstanding files open */
62722+ local_t open_count; /**< Outstanding files open */
62723 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
62724 atomic_t vma_count; /**< Outstanding vma areas open */
62725 int buf_use; /**< Buffers in use -- cannot alloc */
62726@@ -1079,7 +1080,7 @@ struct drm_device {
62727 /*@{ */
62728 unsigned long counters;
62729 enum drm_stat_type types[15];
62730- atomic_t counts[15];
62731+ atomic_unchecked_t counts[15];
62732 /*@} */
62733
62734 struct list_head filelist;
62735diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
62736index e01cc80..6fb6f25 100644
62737--- a/include/drm/drm_crtc_helper.h
62738+++ b/include/drm/drm_crtc_helper.h
62739@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
62740 struct drm_connector *connector);
62741 /* disable encoder when not in use - more explicit than dpms off */
62742 void (*disable)(struct drm_encoder *encoder);
62743-};
62744+} __no_const;
62745
62746 /**
62747 * drm_connector_helper_funcs - helper operations for connectors
62748diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
62749index d6d1da4..fdd1ac5 100644
62750--- a/include/drm/ttm/ttm_memory.h
62751+++ b/include/drm/ttm/ttm_memory.h
62752@@ -48,7 +48,7 @@
62753
62754 struct ttm_mem_shrink {
62755 int (*do_shrink) (struct ttm_mem_shrink *);
62756-};
62757+} __no_const;
62758
62759 /**
62760 * struct ttm_mem_global - Global memory accounting structure.
62761diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
62762index 22ef21c..75904ba 100644
62763--- a/include/linux/atmdev.h
62764+++ b/include/linux/atmdev.h
62765@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
62766 #endif
62767
62768 struct k_atm_aal_stats {
62769-#define __HANDLE_ITEM(i) atomic_t i
62770+#define __HANDLE_ITEM(i) atomic_unchecked_t i
62771 __AAL_STAT_ITEMS
62772 #undef __HANDLE_ITEM
62773 };
62774diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
62775index de0628e..38f42eb 100644
62776--- a/include/linux/binfmts.h
62777+++ b/include/linux/binfmts.h
62778@@ -75,6 +75,7 @@ struct linux_binfmt {
62779 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
62780 int (*load_shlib)(struct file *);
62781 int (*core_dump)(struct coredump_params *cprm);
62782+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
62783 unsigned long min_coredump; /* minimal dump size */
62784 };
62785
62786diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
62787index 1756001..ab117ec 100644
62788--- a/include/linux/blkdev.h
62789+++ b/include/linux/blkdev.h
62790@@ -1478,7 +1478,7 @@ struct block_device_operations {
62791 /* this callback is with swap_lock and sometimes page table lock held */
62792 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
62793 struct module *owner;
62794-};
62795+} __do_const;
62796
62797 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
62798 unsigned long);
62799diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
62800index 7c2e030..b72475d 100644
62801--- a/include/linux/blktrace_api.h
62802+++ b/include/linux/blktrace_api.h
62803@@ -23,7 +23,7 @@ struct blk_trace {
62804 struct dentry *dir;
62805 struct dentry *dropped_file;
62806 struct dentry *msg_file;
62807- atomic_t dropped;
62808+ atomic_unchecked_t dropped;
62809 };
62810
62811 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
62812diff --git a/include/linux/cache.h b/include/linux/cache.h
62813index 4c57065..4307975 100644
62814--- a/include/linux/cache.h
62815+++ b/include/linux/cache.h
62816@@ -16,6 +16,10 @@
62817 #define __read_mostly
62818 #endif
62819
62820+#ifndef __read_only
62821+#define __read_only __read_mostly
62822+#endif
62823+
62824 #ifndef ____cacheline_aligned
62825 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
62826 #endif
62827diff --git a/include/linux/capability.h b/include/linux/capability.h
62828index 98503b7..cc36d18 100644
62829--- a/include/linux/capability.h
62830+++ b/include/linux/capability.h
62831@@ -211,8 +211,13 @@ extern bool capable(int cap);
62832 extern bool ns_capable(struct user_namespace *ns, int cap);
62833 extern bool nsown_capable(int cap);
62834 extern bool inode_capable(const struct inode *inode, int cap);
62835+extern bool capable_nolog(int cap);
62836+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
62837+extern bool inode_capable_nolog(const struct inode *inode, int cap);
62838
62839 /* audit system wants to get cap info from files as well */
62840 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
62841
62842+extern int is_privileged_binary(const struct dentry *dentry);
62843+
62844 #endif /* !_LINUX_CAPABILITY_H */
62845diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
62846index 8609d57..86e4d79 100644
62847--- a/include/linux/cdrom.h
62848+++ b/include/linux/cdrom.h
62849@@ -87,7 +87,6 @@ struct cdrom_device_ops {
62850
62851 /* driver specifications */
62852 const int capability; /* capability flags */
62853- int n_minors; /* number of active minor devices */
62854 /* handle uniform packets for scsi type devices (scsi,atapi) */
62855 int (*generic_packet) (struct cdrom_device_info *,
62856 struct packet_command *);
62857diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
62858index 42e55de..1cd0e66 100644
62859--- a/include/linux/cleancache.h
62860+++ b/include/linux/cleancache.h
62861@@ -31,7 +31,7 @@ struct cleancache_ops {
62862 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
62863 void (*invalidate_inode)(int, struct cleancache_filekey);
62864 void (*invalidate_fs)(int);
62865-};
62866+} __no_const;
62867
62868 extern struct cleancache_ops
62869 cleancache_register_ops(struct cleancache_ops *ops);
62870diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
62871index 412bc6c..c31666e 100644
62872--- a/include/linux/compiler-gcc4.h
62873+++ b/include/linux/compiler-gcc4.h
62874@@ -32,6 +32,21 @@
62875 #define __linktime_error(message) __attribute__((__error__(message)))
62876
62877 #if __GNUC_MINOR__ >= 5
62878+
62879+#ifdef CONSTIFY_PLUGIN
62880+#define __no_const __attribute__((no_const))
62881+#define __do_const __attribute__((do_const))
62882+#endif
62883+
62884+#ifdef SIZE_OVERFLOW_PLUGIN
62885+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
62886+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
62887+#endif
62888+
62889+#ifdef LATENT_ENTROPY_PLUGIN
62890+#define __latent_entropy __attribute__((latent_entropy))
62891+#endif
62892+
62893 /*
62894 * Mark a position in code as unreachable. This can be used to
62895 * suppress control flow warnings after asm blocks that transfer
62896@@ -47,6 +62,11 @@
62897 #define __noclone __attribute__((__noclone__))
62898
62899 #endif
62900+
62901+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
62902+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
62903+#define __bos0(ptr) __bos((ptr), 0)
62904+#define __bos1(ptr) __bos((ptr), 1)
62905 #endif
62906
62907 #if __GNUC_MINOR__ >= 6
62908diff --git a/include/linux/compiler.h b/include/linux/compiler.h
62909index f430e41..38be90f 100644
62910--- a/include/linux/compiler.h
62911+++ b/include/linux/compiler.h
62912@@ -5,31 +5,62 @@
62913
62914 #ifdef __CHECKER__
62915 # define __user __attribute__((noderef, address_space(1)))
62916+# define __force_user __force __user
62917 # define __kernel __attribute__((address_space(0)))
62918+# define __force_kernel __force __kernel
62919 # define __safe __attribute__((safe))
62920 # define __force __attribute__((force))
62921 # define __nocast __attribute__((nocast))
62922 # define __iomem __attribute__((noderef, address_space(2)))
62923+# define __force_iomem __force __iomem
62924 # define __acquires(x) __attribute__((context(x,0,1)))
62925 # define __releases(x) __attribute__((context(x,1,0)))
62926 # define __acquire(x) __context__(x,1)
62927 # define __release(x) __context__(x,-1)
62928 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
62929 # define __percpu __attribute__((noderef, address_space(3)))
62930+# define __force_percpu __force __percpu
62931 #ifdef CONFIG_SPARSE_RCU_POINTER
62932 # define __rcu __attribute__((noderef, address_space(4)))
62933+# define __force_rcu __force __rcu
62934 #else
62935 # define __rcu
62936+# define __force_rcu
62937 #endif
62938 extern void __chk_user_ptr(const volatile void __user *);
62939 extern void __chk_io_ptr(const volatile void __iomem *);
62940+#elif defined(CHECKER_PLUGIN)
62941+//# define __user
62942+//# define __force_user
62943+//# define __kernel
62944+//# define __force_kernel
62945+# define __safe
62946+# define __force
62947+# define __nocast
62948+# define __iomem
62949+# define __force_iomem
62950+# define __chk_user_ptr(x) (void)0
62951+# define __chk_io_ptr(x) (void)0
62952+# define __builtin_warning(x, y...) (1)
62953+# define __acquires(x)
62954+# define __releases(x)
62955+# define __acquire(x) (void)0
62956+# define __release(x) (void)0
62957+# define __cond_lock(x,c) (c)
62958+# define __percpu
62959+# define __force_percpu
62960+# define __rcu
62961+# define __force_rcu
62962 #else
62963 # define __user
62964+# define __force_user
62965 # define __kernel
62966+# define __force_kernel
62967 # define __safe
62968 # define __force
62969 # define __nocast
62970 # define __iomem
62971+# define __force_iomem
62972 # define __chk_user_ptr(x) (void)0
62973 # define __chk_io_ptr(x) (void)0
62974 # define __builtin_warning(x, y...) (1)
62975@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
62976 # define __release(x) (void)0
62977 # define __cond_lock(x,c) (c)
62978 # define __percpu
62979+# define __force_percpu
62980 # define __rcu
62981+# define __force_rcu
62982 #endif
62983
62984 #ifdef __KERNEL__
62985@@ -264,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
62986 # define __attribute_const__ /* unimplemented */
62987 #endif
62988
62989+#ifndef __no_const
62990+# define __no_const
62991+#endif
62992+
62993+#ifndef __do_const
62994+# define __do_const
62995+#endif
62996+
62997+#ifndef __size_overflow
62998+# define __size_overflow(...)
62999+#endif
63000+
63001+#ifndef __intentional_overflow
63002+# define __intentional_overflow(...)
63003+#endif
63004+
63005+#ifndef __latent_entropy
63006+# define __latent_entropy
63007+#endif
63008+
63009 /*
63010 * Tell gcc if a function is cold. The compiler will assume any path
63011 * directly leading to the call is unlikely.
63012@@ -273,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
63013 #define __cold
63014 #endif
63015
63016+#ifndef __alloc_size
63017+#define __alloc_size(...)
63018+#endif
63019+
63020+#ifndef __bos
63021+#define __bos(ptr, arg)
63022+#endif
63023+
63024+#ifndef __bos0
63025+#define __bos0(ptr)
63026+#endif
63027+
63028+#ifndef __bos1
63029+#define __bos1(ptr)
63030+#endif
63031+
63032 /* Simple shorthand for a section definition */
63033 #ifndef __section
63034 # define __section(S) __attribute__ ((__section__(#S)))
63035@@ -312,6 +381,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
63036 * use is to mediate communication between process-level code and irq/NMI
63037 * handlers, all running on the same CPU.
63038 */
63039-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
63040+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
63041+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
63042
63043 #endif /* __LINUX_COMPILER_H */
63044diff --git a/include/linux/cred.h b/include/linux/cred.h
63045index ebbed2c..908cc2c 100644
63046--- a/include/linux/cred.h
63047+++ b/include/linux/cred.h
63048@@ -208,6 +208,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
63049 static inline void validate_process_creds(void)
63050 {
63051 }
63052+static inline void validate_task_creds(struct task_struct *task)
63053+{
63054+}
63055 #endif
63056
63057 /**
63058diff --git a/include/linux/crypto.h b/include/linux/crypto.h
63059index b92eadf..b4ecdc1 100644
63060--- a/include/linux/crypto.h
63061+++ b/include/linux/crypto.h
63062@@ -373,7 +373,7 @@ struct cipher_tfm {
63063 const u8 *key, unsigned int keylen);
63064 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
63065 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
63066-};
63067+} __no_const;
63068
63069 struct hash_tfm {
63070 int (*init)(struct hash_desc *desc);
63071@@ -394,13 +394,13 @@ struct compress_tfm {
63072 int (*cot_decompress)(struct crypto_tfm *tfm,
63073 const u8 *src, unsigned int slen,
63074 u8 *dst, unsigned int *dlen);
63075-};
63076+} __no_const;
63077
63078 struct rng_tfm {
63079 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
63080 unsigned int dlen);
63081 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
63082-};
63083+} __no_const;
63084
63085 #define crt_ablkcipher crt_u.ablkcipher
63086 #define crt_aead crt_u.aead
63087diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
63088index 7925bf0..d5143d2 100644
63089--- a/include/linux/decompress/mm.h
63090+++ b/include/linux/decompress/mm.h
63091@@ -77,7 +77,7 @@ static void free(void *where)
63092 * warnings when not needed (indeed large_malloc / large_free are not
63093 * needed by inflate */
63094
63095-#define malloc(a) kmalloc(a, GFP_KERNEL)
63096+#define malloc(a) kmalloc((a), GFP_KERNEL)
63097 #define free(a) kfree(a)
63098
63099 #define large_malloc(a) vmalloc(a)
63100diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
63101index 94af418..b1ca7a2 100644
63102--- a/include/linux/dma-mapping.h
63103+++ b/include/linux/dma-mapping.h
63104@@ -54,7 +54,7 @@ struct dma_map_ops {
63105 u64 (*get_required_mask)(struct device *dev);
63106 #endif
63107 int is_phys;
63108-};
63109+} __do_const;
63110
63111 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
63112
63113diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
63114index d3201e4..8281e63 100644
63115--- a/include/linux/dmaengine.h
63116+++ b/include/linux/dmaengine.h
63117@@ -1018,9 +1018,9 @@ struct dma_pinned_list {
63118 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
63119 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
63120
63121-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
63122+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
63123 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
63124-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
63125+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
63126 struct dma_pinned_list *pinned_list, struct page *page,
63127 unsigned int offset, size_t len);
63128
63129diff --git a/include/linux/elf.h b/include/linux/elf.h
63130index 8c9048e..16a4665 100644
63131--- a/include/linux/elf.h
63132+++ b/include/linux/elf.h
63133@@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
63134 #define elf_note elf32_note
63135 #define elf_addr_t Elf32_Off
63136 #define Elf_Half Elf32_Half
63137+#define elf_dyn Elf32_Dyn
63138
63139 #else
63140
63141@@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
63142 #define elf_note elf64_note
63143 #define elf_addr_t Elf64_Off
63144 #define Elf_Half Elf64_Half
63145+#define elf_dyn Elf64_Dyn
63146
63147 #endif
63148
63149diff --git a/include/linux/filter.h b/include/linux/filter.h
63150index 24d251f..7afb83d 100644
63151--- a/include/linux/filter.h
63152+++ b/include/linux/filter.h
63153@@ -20,6 +20,7 @@ struct compat_sock_fprog {
63154
63155 struct sk_buff;
63156 struct sock;
63157+struct bpf_jit_work;
63158
63159 struct sk_filter
63160 {
63161@@ -27,6 +28,9 @@ struct sk_filter
63162 unsigned int len; /* Number of filter blocks */
63163 unsigned int (*bpf_func)(const struct sk_buff *skb,
63164 const struct sock_filter *filter);
63165+#ifdef CONFIG_BPF_JIT
63166+ struct bpf_jit_work *work;
63167+#endif
63168 struct rcu_head rcu;
63169 struct sock_filter insns[0];
63170 };
63171diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
63172index 3044254..9767f41 100644
63173--- a/include/linux/frontswap.h
63174+++ b/include/linux/frontswap.h
63175@@ -11,7 +11,7 @@ struct frontswap_ops {
63176 int (*load)(unsigned, pgoff_t, struct page *);
63177 void (*invalidate_page)(unsigned, pgoff_t);
63178 void (*invalidate_area)(unsigned);
63179-};
63180+} __no_const;
63181
63182 extern bool frontswap_enabled;
63183 extern struct frontswap_ops
63184diff --git a/include/linux/fs.h b/include/linux/fs.h
63185index 75fe9a1..8417cac 100644
63186--- a/include/linux/fs.h
63187+++ b/include/linux/fs.h
63188@@ -1543,7 +1543,8 @@ struct file_operations {
63189 int (*setlease)(struct file *, long, struct file_lock **);
63190 long (*fallocate)(struct file *file, int mode, loff_t offset,
63191 loff_t len);
63192-};
63193+} __do_const;
63194+typedef struct file_operations __no_const file_operations_no_const;
63195
63196 struct inode_operations {
63197 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
63198@@ -2667,4 +2668,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
63199 inode->i_flags |= S_NOSEC;
63200 }
63201
63202+static inline bool is_sidechannel_device(const struct inode *inode)
63203+{
63204+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
63205+ umode_t mode = inode->i_mode;
63206+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
63207+#else
63208+ return false;
63209+#endif
63210+}
63211+
63212 #endif /* _LINUX_FS_H */
63213diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
63214index 003dc0f..3c4ea97 100644
63215--- a/include/linux/fs_struct.h
63216+++ b/include/linux/fs_struct.h
63217@@ -6,7 +6,7 @@
63218 #include <linux/seqlock.h>
63219
63220 struct fs_struct {
63221- int users;
63222+ atomic_t users;
63223 spinlock_t lock;
63224 seqcount_t seq;
63225 int umask;
63226diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
63227index ce31408..b1ad003 100644
63228--- a/include/linux/fscache-cache.h
63229+++ b/include/linux/fscache-cache.h
63230@@ -102,7 +102,7 @@ struct fscache_operation {
63231 fscache_operation_release_t release;
63232 };
63233
63234-extern atomic_t fscache_op_debug_id;
63235+extern atomic_unchecked_t fscache_op_debug_id;
63236 extern void fscache_op_work_func(struct work_struct *work);
63237
63238 extern void fscache_enqueue_operation(struct fscache_operation *);
63239@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
63240 {
63241 INIT_WORK(&op->work, fscache_op_work_func);
63242 atomic_set(&op->usage, 1);
63243- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63244+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63245 op->processor = processor;
63246 op->release = release;
63247 INIT_LIST_HEAD(&op->pend_link);
63248diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
63249index 0fbfb46..508eb0d 100644
63250--- a/include/linux/fsnotify.h
63251+++ b/include/linux/fsnotify.h
63252@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
63253 struct inode *inode = path->dentry->d_inode;
63254 __u32 mask = FS_ACCESS;
63255
63256+ if (is_sidechannel_device(inode))
63257+ return;
63258+
63259 if (S_ISDIR(inode->i_mode))
63260 mask |= FS_ISDIR;
63261
63262@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
63263 struct inode *inode = path->dentry->d_inode;
63264 __u32 mask = FS_MODIFY;
63265
63266+ if (is_sidechannel_device(inode))
63267+ return;
63268+
63269 if (S_ISDIR(inode->i_mode))
63270 mask |= FS_ISDIR;
63271
63272@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
63273 */
63274 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
63275 {
63276- return kstrdup(name, GFP_KERNEL);
63277+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
63278 }
63279
63280 /*
63281diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
63282index 642928c..93afe6a 100644
63283--- a/include/linux/ftrace_event.h
63284+++ b/include/linux/ftrace_event.h
63285@@ -266,7 +266,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
63286 extern int trace_add_event_call(struct ftrace_event_call *call);
63287 extern void trace_remove_event_call(struct ftrace_event_call *call);
63288
63289-#define is_signed_type(type) (((type)(-1)) < 0)
63290+#define is_signed_type(type) (((type)(-1)) < (type)1)
63291
63292 int trace_set_clr_event(const char *system, const char *event, int set);
63293
63294diff --git a/include/linux/genhd.h b/include/linux/genhd.h
63295index 4f440b3..342233a 100644
63296--- a/include/linux/genhd.h
63297+++ b/include/linux/genhd.h
63298@@ -190,7 +190,7 @@ struct gendisk {
63299 struct kobject *slave_dir;
63300
63301 struct timer_rand_state *random;
63302- atomic_t sync_io; /* RAID */
63303+ atomic_unchecked_t sync_io; /* RAID */
63304 struct disk_events *ev;
63305 #ifdef CONFIG_BLK_DEV_INTEGRITY
63306 struct blk_integrity *integrity;
63307diff --git a/include/linux/gfp.h b/include/linux/gfp.h
63308index d0a7967..63c4c47 100644
63309--- a/include/linux/gfp.h
63310+++ b/include/linux/gfp.h
63311@@ -35,6 +35,12 @@ struct vm_area_struct;
63312 #define ___GFP_OTHER_NODE 0x800000u
63313 #define ___GFP_WRITE 0x1000000u
63314
63315+#ifdef CONFIG_PAX_USERCOPY_SLABS
63316+#define ___GFP_USERCOPY 0x2000000u
63317+#else
63318+#define ___GFP_USERCOPY 0
63319+#endif
63320+
63321 /*
63322 * GFP bitmasks..
63323 *
63324@@ -89,6 +95,7 @@ struct vm_area_struct;
63325 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
63326 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
63327 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
63328+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
63329
63330 /*
63331 * This may seem redundant, but it's a way of annotating false positives vs.
63332@@ -96,7 +103,7 @@ struct vm_area_struct;
63333 */
63334 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
63335
63336-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
63337+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
63338 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
63339
63340 /* This equals 0, but use constants in case they ever change */
63341@@ -150,6 +157,8 @@ struct vm_area_struct;
63342 /* 4GB DMA on some platforms */
63343 #define GFP_DMA32 __GFP_DMA32
63344
63345+#define GFP_USERCOPY __GFP_USERCOPY
63346+
63347 /* Convert GFP flags to their corresponding migrate type */
63348 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
63349 {
63350diff --git a/include/linux/gracl.h b/include/linux/gracl.h
63351new file mode 100644
63352index 0000000..ebe6d72
63353--- /dev/null
63354+++ b/include/linux/gracl.h
63355@@ -0,0 +1,319 @@
63356+#ifndef GR_ACL_H
63357+#define GR_ACL_H
63358+
63359+#include <linux/grdefs.h>
63360+#include <linux/resource.h>
63361+#include <linux/capability.h>
63362+#include <linux/dcache.h>
63363+#include <asm/resource.h>
63364+
63365+/* Major status information */
63366+
63367+#define GR_VERSION "grsecurity 2.9.1"
63368+#define GRSECURITY_VERSION 0x2901
63369+
63370+enum {
63371+ GR_SHUTDOWN = 0,
63372+ GR_ENABLE = 1,
63373+ GR_SPROLE = 2,
63374+ GR_RELOAD = 3,
63375+ GR_SEGVMOD = 4,
63376+ GR_STATUS = 5,
63377+ GR_UNSPROLE = 6,
63378+ GR_PASSSET = 7,
63379+ GR_SPROLEPAM = 8,
63380+};
63381+
63382+/* Password setup definitions
63383+ * kernel/grhash.c */
63384+enum {
63385+ GR_PW_LEN = 128,
63386+ GR_SALT_LEN = 16,
63387+ GR_SHA_LEN = 32,
63388+};
63389+
63390+enum {
63391+ GR_SPROLE_LEN = 64,
63392+};
63393+
63394+enum {
63395+ GR_NO_GLOB = 0,
63396+ GR_REG_GLOB,
63397+ GR_CREATE_GLOB
63398+};
63399+
63400+#define GR_NLIMITS 32
63401+
63402+/* Begin Data Structures */
63403+
63404+struct sprole_pw {
63405+ unsigned char *rolename;
63406+ unsigned char salt[GR_SALT_LEN];
63407+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
63408+};
63409+
63410+struct name_entry {
63411+ __u32 key;
63412+ ino_t inode;
63413+ dev_t device;
63414+ char *name;
63415+ __u16 len;
63416+ __u8 deleted;
63417+ struct name_entry *prev;
63418+ struct name_entry *next;
63419+};
63420+
63421+struct inodev_entry {
63422+ struct name_entry *nentry;
63423+ struct inodev_entry *prev;
63424+ struct inodev_entry *next;
63425+};
63426+
63427+struct acl_role_db {
63428+ struct acl_role_label **r_hash;
63429+ __u32 r_size;
63430+};
63431+
63432+struct inodev_db {
63433+ struct inodev_entry **i_hash;
63434+ __u32 i_size;
63435+};
63436+
63437+struct name_db {
63438+ struct name_entry **n_hash;
63439+ __u32 n_size;
63440+};
63441+
63442+struct crash_uid {
63443+ uid_t uid;
63444+ unsigned long expires;
63445+};
63446+
63447+struct gr_hash_struct {
63448+ void **table;
63449+ void **nametable;
63450+ void *first;
63451+ __u32 table_size;
63452+ __u32 used_size;
63453+ int type;
63454+};
63455+
63456+/* Userspace Grsecurity ACL data structures */
63457+
63458+struct acl_subject_label {
63459+ char *filename;
63460+ ino_t inode;
63461+ dev_t device;
63462+ __u32 mode;
63463+ kernel_cap_t cap_mask;
63464+ kernel_cap_t cap_lower;
63465+ kernel_cap_t cap_invert_audit;
63466+
63467+ struct rlimit res[GR_NLIMITS];
63468+ __u32 resmask;
63469+
63470+ __u8 user_trans_type;
63471+ __u8 group_trans_type;
63472+ uid_t *user_transitions;
63473+ gid_t *group_transitions;
63474+ __u16 user_trans_num;
63475+ __u16 group_trans_num;
63476+
63477+ __u32 sock_families[2];
63478+ __u32 ip_proto[8];
63479+ __u32 ip_type;
63480+ struct acl_ip_label **ips;
63481+ __u32 ip_num;
63482+ __u32 inaddr_any_override;
63483+
63484+ __u32 crashes;
63485+ unsigned long expires;
63486+
63487+ struct acl_subject_label *parent_subject;
63488+ struct gr_hash_struct *hash;
63489+ struct acl_subject_label *prev;
63490+ struct acl_subject_label *next;
63491+
63492+ struct acl_object_label **obj_hash;
63493+ __u32 obj_hash_size;
63494+ __u16 pax_flags;
63495+};
63496+
63497+struct role_allowed_ip {
63498+ __u32 addr;
63499+ __u32 netmask;
63500+
63501+ struct role_allowed_ip *prev;
63502+ struct role_allowed_ip *next;
63503+};
63504+
63505+struct role_transition {
63506+ char *rolename;
63507+
63508+ struct role_transition *prev;
63509+ struct role_transition *next;
63510+};
63511+
63512+struct acl_role_label {
63513+ char *rolename;
63514+ uid_t uidgid;
63515+ __u16 roletype;
63516+
63517+ __u16 auth_attempts;
63518+ unsigned long expires;
63519+
63520+ struct acl_subject_label *root_label;
63521+ struct gr_hash_struct *hash;
63522+
63523+ struct acl_role_label *prev;
63524+ struct acl_role_label *next;
63525+
63526+ struct role_transition *transitions;
63527+ struct role_allowed_ip *allowed_ips;
63528+ uid_t *domain_children;
63529+ __u16 domain_child_num;
63530+
63531+ umode_t umask;
63532+
63533+ struct acl_subject_label **subj_hash;
63534+ __u32 subj_hash_size;
63535+};
63536+
63537+struct user_acl_role_db {
63538+ struct acl_role_label **r_table;
63539+ __u32 num_pointers; /* Number of allocations to track */
63540+ __u32 num_roles; /* Number of roles */
63541+ __u32 num_domain_children; /* Number of domain children */
63542+ __u32 num_subjects; /* Number of subjects */
63543+ __u32 num_objects; /* Number of objects */
63544+};
63545+
63546+struct acl_object_label {
63547+ char *filename;
63548+ ino_t inode;
63549+ dev_t device;
63550+ __u32 mode;
63551+
63552+ struct acl_subject_label *nested;
63553+ struct acl_object_label *globbed;
63554+
63555+ /* next two structures not used */
63556+
63557+ struct acl_object_label *prev;
63558+ struct acl_object_label *next;
63559+};
63560+
63561+struct acl_ip_label {
63562+ char *iface;
63563+ __u32 addr;
63564+ __u32 netmask;
63565+ __u16 low, high;
63566+ __u8 mode;
63567+ __u32 type;
63568+ __u32 proto[8];
63569+
63570+ /* next two structures not used */
63571+
63572+ struct acl_ip_label *prev;
63573+ struct acl_ip_label *next;
63574+};
63575+
63576+struct gr_arg {
63577+ struct user_acl_role_db role_db;
63578+ unsigned char pw[GR_PW_LEN];
63579+ unsigned char salt[GR_SALT_LEN];
63580+ unsigned char sum[GR_SHA_LEN];
63581+ unsigned char sp_role[GR_SPROLE_LEN];
63582+ struct sprole_pw *sprole_pws;
63583+ dev_t segv_device;
63584+ ino_t segv_inode;
63585+ uid_t segv_uid;
63586+ __u16 num_sprole_pws;
63587+ __u16 mode;
63588+};
63589+
63590+struct gr_arg_wrapper {
63591+ struct gr_arg *arg;
63592+ __u32 version;
63593+ __u32 size;
63594+};
63595+
63596+struct subject_map {
63597+ struct acl_subject_label *user;
63598+ struct acl_subject_label *kernel;
63599+ struct subject_map *prev;
63600+ struct subject_map *next;
63601+};
63602+
63603+struct acl_subj_map_db {
63604+ struct subject_map **s_hash;
63605+ __u32 s_size;
63606+};
63607+
63608+/* End Data Structures Section */
63609+
63610+/* Hash functions generated by empirical testing by Brad Spengler
63611+ Makes good use of the low bits of the inode. Generally 0-1 times
63612+ in loop for successful match. 0-3 for unsuccessful match.
63613+ Shift/add algorithm with modulus of table size and an XOR*/
63614+
63615+static __inline__ unsigned int
63616+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
63617+{
63618+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
63619+}
63620+
63621+ static __inline__ unsigned int
63622+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
63623+{
63624+ return ((const unsigned long)userp % sz);
63625+}
63626+
63627+static __inline__ unsigned int
63628+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
63629+{
63630+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
63631+}
63632+
63633+static __inline__ unsigned int
63634+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
63635+{
63636+ return full_name_hash((const unsigned char *)name, len) % sz;
63637+}
63638+
63639+#define FOR_EACH_ROLE_START(role) \
63640+ role = role_list; \
63641+ while (role) {
63642+
63643+#define FOR_EACH_ROLE_END(role) \
63644+ role = role->prev; \
63645+ }
63646+
63647+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
63648+ subj = NULL; \
63649+ iter = 0; \
63650+ while (iter < role->subj_hash_size) { \
63651+ if (subj == NULL) \
63652+ subj = role->subj_hash[iter]; \
63653+ if (subj == NULL) { \
63654+ iter++; \
63655+ continue; \
63656+ }
63657+
63658+#define FOR_EACH_SUBJECT_END(subj,iter) \
63659+ subj = subj->next; \
63660+ if (subj == NULL) \
63661+ iter++; \
63662+ }
63663+
63664+
63665+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
63666+ subj = role->hash->first; \
63667+ while (subj != NULL) {
63668+
63669+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
63670+ subj = subj->next; \
63671+ }
63672+
63673+#endif
63674+
63675diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
63676new file mode 100644
63677index 0000000..323ecf2
63678--- /dev/null
63679+++ b/include/linux/gralloc.h
63680@@ -0,0 +1,9 @@
63681+#ifndef __GRALLOC_H
63682+#define __GRALLOC_H
63683+
63684+void acl_free_all(void);
63685+int acl_alloc_stack_init(unsigned long size);
63686+void *acl_alloc(unsigned long len);
63687+void *acl_alloc_num(unsigned long num, unsigned long len);
63688+
63689+#endif
63690diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
63691new file mode 100644
63692index 0000000..be66033
63693--- /dev/null
63694+++ b/include/linux/grdefs.h
63695@@ -0,0 +1,140 @@
63696+#ifndef GRDEFS_H
63697+#define GRDEFS_H
63698+
63699+/* Begin grsecurity status declarations */
63700+
63701+enum {
63702+ GR_READY = 0x01,
63703+ GR_STATUS_INIT = 0x00 // disabled state
63704+};
63705+
63706+/* Begin ACL declarations */
63707+
63708+/* Role flags */
63709+
63710+enum {
63711+ GR_ROLE_USER = 0x0001,
63712+ GR_ROLE_GROUP = 0x0002,
63713+ GR_ROLE_DEFAULT = 0x0004,
63714+ GR_ROLE_SPECIAL = 0x0008,
63715+ GR_ROLE_AUTH = 0x0010,
63716+ GR_ROLE_NOPW = 0x0020,
63717+ GR_ROLE_GOD = 0x0040,
63718+ GR_ROLE_LEARN = 0x0080,
63719+ GR_ROLE_TPE = 0x0100,
63720+ GR_ROLE_DOMAIN = 0x0200,
63721+ GR_ROLE_PAM = 0x0400,
63722+ GR_ROLE_PERSIST = 0x0800
63723+};
63724+
63725+/* ACL Subject and Object mode flags */
63726+enum {
63727+ GR_DELETED = 0x80000000
63728+};
63729+
63730+/* ACL Object-only mode flags */
63731+enum {
63732+ GR_READ = 0x00000001,
63733+ GR_APPEND = 0x00000002,
63734+ GR_WRITE = 0x00000004,
63735+ GR_EXEC = 0x00000008,
63736+ GR_FIND = 0x00000010,
63737+ GR_INHERIT = 0x00000020,
63738+ GR_SETID = 0x00000040,
63739+ GR_CREATE = 0x00000080,
63740+ GR_DELETE = 0x00000100,
63741+ GR_LINK = 0x00000200,
63742+ GR_AUDIT_READ = 0x00000400,
63743+ GR_AUDIT_APPEND = 0x00000800,
63744+ GR_AUDIT_WRITE = 0x00001000,
63745+ GR_AUDIT_EXEC = 0x00002000,
63746+ GR_AUDIT_FIND = 0x00004000,
63747+ GR_AUDIT_INHERIT= 0x00008000,
63748+ GR_AUDIT_SETID = 0x00010000,
63749+ GR_AUDIT_CREATE = 0x00020000,
63750+ GR_AUDIT_DELETE = 0x00040000,
63751+ GR_AUDIT_LINK = 0x00080000,
63752+ GR_PTRACERD = 0x00100000,
63753+ GR_NOPTRACE = 0x00200000,
63754+ GR_SUPPRESS = 0x00400000,
63755+ GR_NOLEARN = 0x00800000,
63756+ GR_INIT_TRANSFER= 0x01000000
63757+};
63758+
63759+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
63760+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
63761+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
63762+
63763+/* ACL subject-only mode flags */
63764+enum {
63765+ GR_KILL = 0x00000001,
63766+ GR_VIEW = 0x00000002,
63767+ GR_PROTECTED = 0x00000004,
63768+ GR_LEARN = 0x00000008,
63769+ GR_OVERRIDE = 0x00000010,
63770+ /* just a placeholder, this mode is only used in userspace */
63771+ GR_DUMMY = 0x00000020,
63772+ GR_PROTSHM = 0x00000040,
63773+ GR_KILLPROC = 0x00000080,
63774+ GR_KILLIPPROC = 0x00000100,
63775+ /* just a placeholder, this mode is only used in userspace */
63776+ GR_NOTROJAN = 0x00000200,
63777+ GR_PROTPROCFD = 0x00000400,
63778+ GR_PROCACCT = 0x00000800,
63779+ GR_RELAXPTRACE = 0x00001000,
63780+ //GR_NESTED = 0x00002000,
63781+ GR_INHERITLEARN = 0x00004000,
63782+ GR_PROCFIND = 0x00008000,
63783+ GR_POVERRIDE = 0x00010000,
63784+ GR_KERNELAUTH = 0x00020000,
63785+ GR_ATSECURE = 0x00040000,
63786+ GR_SHMEXEC = 0x00080000
63787+};
63788+
63789+enum {
63790+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
63791+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
63792+ GR_PAX_ENABLE_MPROTECT = 0x0004,
63793+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
63794+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
63795+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
63796+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
63797+ GR_PAX_DISABLE_MPROTECT = 0x0400,
63798+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
63799+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
63800+};
63801+
63802+enum {
63803+ GR_ID_USER = 0x01,
63804+ GR_ID_GROUP = 0x02,
63805+};
63806+
63807+enum {
63808+ GR_ID_ALLOW = 0x01,
63809+ GR_ID_DENY = 0x02,
63810+};
63811+
63812+#define GR_CRASH_RES 31
63813+#define GR_UIDTABLE_MAX 500
63814+
63815+/* begin resource learning section */
63816+enum {
63817+ GR_RLIM_CPU_BUMP = 60,
63818+ GR_RLIM_FSIZE_BUMP = 50000,
63819+ GR_RLIM_DATA_BUMP = 10000,
63820+ GR_RLIM_STACK_BUMP = 1000,
63821+ GR_RLIM_CORE_BUMP = 10000,
63822+ GR_RLIM_RSS_BUMP = 500000,
63823+ GR_RLIM_NPROC_BUMP = 1,
63824+ GR_RLIM_NOFILE_BUMP = 5,
63825+ GR_RLIM_MEMLOCK_BUMP = 50000,
63826+ GR_RLIM_AS_BUMP = 500000,
63827+ GR_RLIM_LOCKS_BUMP = 2,
63828+ GR_RLIM_SIGPENDING_BUMP = 5,
63829+ GR_RLIM_MSGQUEUE_BUMP = 10000,
63830+ GR_RLIM_NICE_BUMP = 1,
63831+ GR_RLIM_RTPRIO_BUMP = 1,
63832+ GR_RLIM_RTTIME_BUMP = 1000000
63833+};
63834+
63835+#endif
63836diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
63837new file mode 100644
63838index 0000000..c9292f7
63839--- /dev/null
63840+++ b/include/linux/grinternal.h
63841@@ -0,0 +1,223 @@
63842+#ifndef __GRINTERNAL_H
63843+#define __GRINTERNAL_H
63844+
63845+#ifdef CONFIG_GRKERNSEC
63846+
63847+#include <linux/fs.h>
63848+#include <linux/mnt_namespace.h>
63849+#include <linux/nsproxy.h>
63850+#include <linux/gracl.h>
63851+#include <linux/grdefs.h>
63852+#include <linux/grmsg.h>
63853+
63854+void gr_add_learn_entry(const char *fmt, ...)
63855+ __attribute__ ((format (printf, 1, 2)));
63856+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
63857+ const struct vfsmount *mnt);
63858+__u32 gr_check_create(const struct dentry *new_dentry,
63859+ const struct dentry *parent,
63860+ const struct vfsmount *mnt, const __u32 mode);
63861+int gr_check_protected_task(const struct task_struct *task);
63862+__u32 to_gr_audit(const __u32 reqmode);
63863+int gr_set_acls(const int type);
63864+int gr_apply_subject_to_task(struct task_struct *task);
63865+int gr_acl_is_enabled(void);
63866+char gr_roletype_to_char(void);
63867+
63868+void gr_handle_alertkill(struct task_struct *task);
63869+char *gr_to_filename(const struct dentry *dentry,
63870+ const struct vfsmount *mnt);
63871+char *gr_to_filename1(const struct dentry *dentry,
63872+ const struct vfsmount *mnt);
63873+char *gr_to_filename2(const struct dentry *dentry,
63874+ const struct vfsmount *mnt);
63875+char *gr_to_filename3(const struct dentry *dentry,
63876+ const struct vfsmount *mnt);
63877+
63878+extern int grsec_enable_ptrace_readexec;
63879+extern int grsec_enable_harden_ptrace;
63880+extern int grsec_enable_link;
63881+extern int grsec_enable_fifo;
63882+extern int grsec_enable_execve;
63883+extern int grsec_enable_shm;
63884+extern int grsec_enable_execlog;
63885+extern int grsec_enable_signal;
63886+extern int grsec_enable_audit_ptrace;
63887+extern int grsec_enable_forkfail;
63888+extern int grsec_enable_time;
63889+extern int grsec_enable_rofs;
63890+extern int grsec_enable_chroot_shmat;
63891+extern int grsec_enable_chroot_mount;
63892+extern int grsec_enable_chroot_double;
63893+extern int grsec_enable_chroot_pivot;
63894+extern int grsec_enable_chroot_chdir;
63895+extern int grsec_enable_chroot_chmod;
63896+extern int grsec_enable_chroot_mknod;
63897+extern int grsec_enable_chroot_fchdir;
63898+extern int grsec_enable_chroot_nice;
63899+extern int grsec_enable_chroot_execlog;
63900+extern int grsec_enable_chroot_caps;
63901+extern int grsec_enable_chroot_sysctl;
63902+extern int grsec_enable_chroot_unix;
63903+extern int grsec_enable_symlinkown;
63904+extern int grsec_symlinkown_gid;
63905+extern int grsec_enable_tpe;
63906+extern int grsec_tpe_gid;
63907+extern int grsec_enable_tpe_all;
63908+extern int grsec_enable_tpe_invert;
63909+extern int grsec_enable_socket_all;
63910+extern int grsec_socket_all_gid;
63911+extern int grsec_enable_socket_client;
63912+extern int grsec_socket_client_gid;
63913+extern int grsec_enable_socket_server;
63914+extern int grsec_socket_server_gid;
63915+extern int grsec_audit_gid;
63916+extern int grsec_enable_group;
63917+extern int grsec_enable_audit_textrel;
63918+extern int grsec_enable_log_rwxmaps;
63919+extern int grsec_enable_mount;
63920+extern int grsec_enable_chdir;
63921+extern int grsec_resource_logging;
63922+extern int grsec_enable_blackhole;
63923+extern int grsec_lastack_retries;
63924+extern int grsec_enable_brute;
63925+extern int grsec_lock;
63926+
63927+extern spinlock_t grsec_alert_lock;
63928+extern unsigned long grsec_alert_wtime;
63929+extern unsigned long grsec_alert_fyet;
63930+
63931+extern spinlock_t grsec_audit_lock;
63932+
63933+extern rwlock_t grsec_exec_file_lock;
63934+
63935+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
63936+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
63937+ (tsk)->exec_file->f_vfsmnt) : "/")
63938+
63939+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
63940+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
63941+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
63942+
63943+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
63944+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
63945+ (tsk)->exec_file->f_vfsmnt) : "/")
63946+
63947+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
63948+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
63949+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
63950+
63951+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
63952+
63953+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
63954+
63955+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
63956+ (task)->pid, (cred)->uid, \
63957+ (cred)->euid, (cred)->gid, (cred)->egid, \
63958+ gr_parent_task_fullpath(task), \
63959+ (task)->real_parent->comm, (task)->real_parent->pid, \
63960+ (pcred)->uid, (pcred)->euid, \
63961+ (pcred)->gid, (pcred)->egid
63962+
63963+#define GR_CHROOT_CAPS {{ \
63964+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
63965+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
63966+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
63967+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
63968+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
63969+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
63970+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
63971+
63972+#define security_learn(normal_msg,args...) \
63973+({ \
63974+ read_lock(&grsec_exec_file_lock); \
63975+ gr_add_learn_entry(normal_msg "\n", ## args); \
63976+ read_unlock(&grsec_exec_file_lock); \
63977+})
63978+
63979+enum {
63980+ GR_DO_AUDIT,
63981+ GR_DONT_AUDIT,
63982+ /* used for non-audit messages that we shouldn't kill the task on */
63983+ GR_DONT_AUDIT_GOOD
63984+};
63985+
63986+enum {
63987+ GR_TTYSNIFF,
63988+ GR_RBAC,
63989+ GR_RBAC_STR,
63990+ GR_STR_RBAC,
63991+ GR_RBAC_MODE2,
63992+ GR_RBAC_MODE3,
63993+ GR_FILENAME,
63994+ GR_SYSCTL_HIDDEN,
63995+ GR_NOARGS,
63996+ GR_ONE_INT,
63997+ GR_ONE_INT_TWO_STR,
63998+ GR_ONE_STR,
63999+ GR_STR_INT,
64000+ GR_TWO_STR_INT,
64001+ GR_TWO_INT,
64002+ GR_TWO_U64,
64003+ GR_THREE_INT,
64004+ GR_FIVE_INT_TWO_STR,
64005+ GR_TWO_STR,
64006+ GR_THREE_STR,
64007+ GR_FOUR_STR,
64008+ GR_STR_FILENAME,
64009+ GR_FILENAME_STR,
64010+ GR_FILENAME_TWO_INT,
64011+ GR_FILENAME_TWO_INT_STR,
64012+ GR_TEXTREL,
64013+ GR_PTRACE,
64014+ GR_RESOURCE,
64015+ GR_CAP,
64016+ GR_SIG,
64017+ GR_SIG2,
64018+ GR_CRASH1,
64019+ GR_CRASH2,
64020+ GR_PSACCT,
64021+ GR_RWXMAP
64022+};
64023+
64024+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
64025+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
64026+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
64027+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
64028+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
64029+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
64030+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
64031+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
64032+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
64033+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
64034+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
64035+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
64036+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
64037+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
64038+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
64039+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
64040+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
64041+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
64042+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
64043+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
64044+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
64045+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
64046+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
64047+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
64048+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
64049+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
64050+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
64051+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
64052+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
64053+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
64054+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
64055+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
64056+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
64057+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
64058+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
64059+
64060+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
64061+
64062+#endif
64063+
64064+#endif
64065diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
64066new file mode 100644
64067index 0000000..2bd4c8d
64068--- /dev/null
64069+++ b/include/linux/grmsg.h
64070@@ -0,0 +1,111 @@
64071+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
64072+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
64073+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
64074+#define GR_STOPMOD_MSG "denied modification of module state by "
64075+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
64076+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
64077+#define GR_IOPERM_MSG "denied use of ioperm() by "
64078+#define GR_IOPL_MSG "denied use of iopl() by "
64079+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
64080+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
64081+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
64082+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
64083+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
64084+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
64085+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
64086+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
64087+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
64088+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
64089+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
64090+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
64091+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
64092+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
64093+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
64094+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
64095+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
64096+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
64097+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
64098+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
64099+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
64100+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
64101+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
64102+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
64103+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
64104+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
64105+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
64106+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
64107+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
64108+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
64109+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
64110+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
64111+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
64112+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
64113+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
64114+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
64115+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
64116+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
64117+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
64118+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
64119+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
64120+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
64121+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
64122+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
64123+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
64124+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
64125+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
64126+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
64127+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
64128+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
64129+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
64130+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
64131+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
64132+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
64133+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
64134+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
64135+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
64136+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
64137+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
64138+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
64139+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
64140+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
64141+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
64142+#define GR_FAILFORK_MSG "failed fork with errno %s by "
64143+#define GR_NICE_CHROOT_MSG "denied priority change by "
64144+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
64145+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
64146+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
64147+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
64148+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
64149+#define GR_TIME_MSG "time set by "
64150+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
64151+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
64152+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
64153+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
64154+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
64155+#define GR_BIND_MSG "denied bind() by "
64156+#define GR_CONNECT_MSG "denied connect() by "
64157+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
64158+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
64159+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
64160+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
64161+#define GR_CAP_ACL_MSG "use of %s denied for "
64162+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
64163+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
64164+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
64165+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
64166+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
64167+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
64168+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
64169+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
64170+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
64171+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
64172+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
64173+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
64174+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
64175+#define GR_VM86_MSG "denied use of vm86 by "
64176+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
64177+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
64178+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
64179+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
64180+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
64181+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
64182diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
64183new file mode 100644
64184index 0000000..f2f5d5b
64185--- /dev/null
64186+++ b/include/linux/grsecurity.h
64187@@ -0,0 +1,239 @@
64188+#ifndef GR_SECURITY_H
64189+#define GR_SECURITY_H
64190+#include <linux/fs.h>
64191+#include <linux/fs_struct.h>
64192+#include <linux/binfmts.h>
64193+#include <linux/gracl.h>
64194+
64195+/* notify of brain-dead configs */
64196+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64197+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
64198+#endif
64199+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
64200+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
64201+#endif
64202+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
64203+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
64204+#endif
64205+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
64206+#error "CONFIG_PAX enabled, but no PaX options are enabled."
64207+#endif
64208+
64209+#include <linux/compat.h>
64210+
64211+struct user_arg_ptr {
64212+#ifdef CONFIG_COMPAT
64213+ bool is_compat;
64214+#endif
64215+ union {
64216+ const char __user *const __user *native;
64217+#ifdef CONFIG_COMPAT
64218+ const compat_uptr_t __user *compat;
64219+#endif
64220+ } ptr;
64221+};
64222+
64223+void gr_handle_brute_attach(unsigned long mm_flags);
64224+void gr_handle_brute_check(void);
64225+void gr_handle_kernel_exploit(void);
64226+int gr_process_user_ban(void);
64227+
64228+char gr_roletype_to_char(void);
64229+
64230+int gr_acl_enable_at_secure(void);
64231+
64232+int gr_check_user_change(int real, int effective, int fs);
64233+int gr_check_group_change(int real, int effective, int fs);
64234+
64235+void gr_del_task_from_ip_table(struct task_struct *p);
64236+
64237+int gr_pid_is_chrooted(struct task_struct *p);
64238+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
64239+int gr_handle_chroot_nice(void);
64240+int gr_handle_chroot_sysctl(const int op);
64241+int gr_handle_chroot_setpriority(struct task_struct *p,
64242+ const int niceval);
64243+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
64244+int gr_handle_chroot_chroot(const struct dentry *dentry,
64245+ const struct vfsmount *mnt);
64246+void gr_handle_chroot_chdir(struct path *path);
64247+int gr_handle_chroot_chmod(const struct dentry *dentry,
64248+ const struct vfsmount *mnt, const int mode);
64249+int gr_handle_chroot_mknod(const struct dentry *dentry,
64250+ const struct vfsmount *mnt, const int mode);
64251+int gr_handle_chroot_mount(const struct dentry *dentry,
64252+ const struct vfsmount *mnt,
64253+ const char *dev_name);
64254+int gr_handle_chroot_pivot(void);
64255+int gr_handle_chroot_unix(const pid_t pid);
64256+
64257+int gr_handle_rawio(const struct inode *inode);
64258+
64259+void gr_handle_ioperm(void);
64260+void gr_handle_iopl(void);
64261+
64262+umode_t gr_acl_umask(void);
64263+
64264+int gr_tpe_allow(const struct file *file);
64265+
64266+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
64267+void gr_clear_chroot_entries(struct task_struct *task);
64268+
64269+void gr_log_forkfail(const int retval);
64270+void gr_log_timechange(void);
64271+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
64272+void gr_log_chdir(const struct dentry *dentry,
64273+ const struct vfsmount *mnt);
64274+void gr_log_chroot_exec(const struct dentry *dentry,
64275+ const struct vfsmount *mnt);
64276+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
64277+void gr_log_remount(const char *devname, const int retval);
64278+void gr_log_unmount(const char *devname, const int retval);
64279+void gr_log_mount(const char *from, const char *to, const int retval);
64280+void gr_log_textrel(struct vm_area_struct *vma);
64281+void gr_log_rwxmmap(struct file *file);
64282+void gr_log_rwxmprotect(struct file *file);
64283+
64284+int gr_handle_follow_link(const struct inode *parent,
64285+ const struct inode *inode,
64286+ const struct dentry *dentry,
64287+ const struct vfsmount *mnt);
64288+int gr_handle_fifo(const struct dentry *dentry,
64289+ const struct vfsmount *mnt,
64290+ const struct dentry *dir, const int flag,
64291+ const int acc_mode);
64292+int gr_handle_hardlink(const struct dentry *dentry,
64293+ const struct vfsmount *mnt,
64294+ struct inode *inode,
64295+ const int mode, const struct filename *to);
64296+
64297+int gr_is_capable(const int cap);
64298+int gr_is_capable_nolog(const int cap);
64299+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64300+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
64301+
64302+void gr_learn_resource(const struct task_struct *task, const int limit,
64303+ const unsigned long wanted, const int gt);
64304+void gr_copy_label(struct task_struct *tsk);
64305+void gr_handle_crash(struct task_struct *task, const int sig);
64306+int gr_handle_signal(const struct task_struct *p, const int sig);
64307+int gr_check_crash_uid(const uid_t uid);
64308+int gr_check_protected_task(const struct task_struct *task);
64309+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
64310+int gr_acl_handle_mmap(const struct file *file,
64311+ const unsigned long prot);
64312+int gr_acl_handle_mprotect(const struct file *file,
64313+ const unsigned long prot);
64314+int gr_check_hidden_task(const struct task_struct *tsk);
64315+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
64316+ const struct vfsmount *mnt);
64317+__u32 gr_acl_handle_utime(const struct dentry *dentry,
64318+ const struct vfsmount *mnt);
64319+__u32 gr_acl_handle_access(const struct dentry *dentry,
64320+ const struct vfsmount *mnt, const int fmode);
64321+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
64322+ const struct vfsmount *mnt, umode_t *mode);
64323+__u32 gr_acl_handle_chown(const struct dentry *dentry,
64324+ const struct vfsmount *mnt);
64325+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
64326+ const struct vfsmount *mnt);
64327+int gr_handle_ptrace(struct task_struct *task, const long request);
64328+int gr_handle_proc_ptrace(struct task_struct *task);
64329+__u32 gr_acl_handle_execve(const struct dentry *dentry,
64330+ const struct vfsmount *mnt);
64331+int gr_check_crash_exec(const struct file *filp);
64332+int gr_acl_is_enabled(void);
64333+void gr_set_kernel_label(struct task_struct *task);
64334+void gr_set_role_label(struct task_struct *task, const uid_t uid,
64335+ const gid_t gid);
64336+int gr_set_proc_label(const struct dentry *dentry,
64337+ const struct vfsmount *mnt,
64338+ const int unsafe_flags);
64339+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
64340+ const struct vfsmount *mnt);
64341+__u32 gr_acl_handle_open(const struct dentry *dentry,
64342+ const struct vfsmount *mnt, int acc_mode);
64343+__u32 gr_acl_handle_creat(const struct dentry *dentry,
64344+ const struct dentry *p_dentry,
64345+ const struct vfsmount *p_mnt,
64346+ int open_flags, int acc_mode, const int imode);
64347+void gr_handle_create(const struct dentry *dentry,
64348+ const struct vfsmount *mnt);
64349+void gr_handle_proc_create(const struct dentry *dentry,
64350+ const struct inode *inode);
64351+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
64352+ const struct dentry *parent_dentry,
64353+ const struct vfsmount *parent_mnt,
64354+ const int mode);
64355+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
64356+ const struct dentry *parent_dentry,
64357+ const struct vfsmount *parent_mnt);
64358+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
64359+ const struct vfsmount *mnt);
64360+void gr_handle_delete(const ino_t ino, const dev_t dev);
64361+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
64362+ const struct vfsmount *mnt);
64363+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
64364+ const struct dentry *parent_dentry,
64365+ const struct vfsmount *parent_mnt,
64366+ const struct filename *from);
64367+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
64368+ const struct dentry *parent_dentry,
64369+ const struct vfsmount *parent_mnt,
64370+ const struct dentry *old_dentry,
64371+ const struct vfsmount *old_mnt, const struct filename *to);
64372+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
64373+int gr_acl_handle_rename(struct dentry *new_dentry,
64374+ struct dentry *parent_dentry,
64375+ const struct vfsmount *parent_mnt,
64376+ struct dentry *old_dentry,
64377+ struct inode *old_parent_inode,
64378+ struct vfsmount *old_mnt, const struct filename *newname);
64379+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
64380+ struct dentry *old_dentry,
64381+ struct dentry *new_dentry,
64382+ struct vfsmount *mnt, const __u8 replace);
64383+__u32 gr_check_link(const struct dentry *new_dentry,
64384+ const struct dentry *parent_dentry,
64385+ const struct vfsmount *parent_mnt,
64386+ const struct dentry *old_dentry,
64387+ const struct vfsmount *old_mnt);
64388+int gr_acl_handle_filldir(const struct file *file, const char *name,
64389+ const unsigned int namelen, const ino_t ino);
64390+
64391+__u32 gr_acl_handle_unix(const struct dentry *dentry,
64392+ const struct vfsmount *mnt);
64393+void gr_acl_handle_exit(void);
64394+void gr_acl_handle_psacct(struct task_struct *task, const long code);
64395+int gr_acl_handle_procpidmem(const struct task_struct *task);
64396+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
64397+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
64398+void gr_audit_ptrace(struct task_struct *task);
64399+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
64400+void gr_put_exec_file(struct task_struct *task);
64401+
64402+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
64403+
64404+#ifdef CONFIG_GRKERNSEC
64405+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
64406+void gr_handle_vm86(void);
64407+void gr_handle_mem_readwrite(u64 from, u64 to);
64408+
64409+void gr_log_badprocpid(const char *entry);
64410+
64411+extern int grsec_enable_dmesg;
64412+extern int grsec_disable_privio;
64413+
64414+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64415+extern int grsec_proc_gid;
64416+#endif
64417+
64418+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64419+extern int grsec_enable_chroot_findtask;
64420+#endif
64421+#ifdef CONFIG_GRKERNSEC_SETXID
64422+extern int grsec_enable_setxid;
64423+#endif
64424+#endif
64425+
64426+#endif
64427diff --git a/include/linux/grsock.h b/include/linux/grsock.h
64428new file mode 100644
64429index 0000000..e7ffaaf
64430--- /dev/null
64431+++ b/include/linux/grsock.h
64432@@ -0,0 +1,19 @@
64433+#ifndef __GRSOCK_H
64434+#define __GRSOCK_H
64435+
64436+extern void gr_attach_curr_ip(const struct sock *sk);
64437+extern int gr_handle_sock_all(const int family, const int type,
64438+ const int protocol);
64439+extern int gr_handle_sock_server(const struct sockaddr *sck);
64440+extern int gr_handle_sock_server_other(const struct sock *sck);
64441+extern int gr_handle_sock_client(const struct sockaddr *sck);
64442+extern int gr_search_connect(struct socket * sock,
64443+ struct sockaddr_in * addr);
64444+extern int gr_search_bind(struct socket * sock,
64445+ struct sockaddr_in * addr);
64446+extern int gr_search_listen(struct socket * sock);
64447+extern int gr_search_accept(struct socket * sock);
64448+extern int gr_search_socket(const int domain, const int type,
64449+ const int protocol);
64450+
64451+#endif
64452diff --git a/include/linux/highmem.h b/include/linux/highmem.h
64453index ef788b5..ac41b7b 100644
64454--- a/include/linux/highmem.h
64455+++ b/include/linux/highmem.h
64456@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
64457 kunmap_atomic(kaddr);
64458 }
64459
64460+static inline void sanitize_highpage(struct page *page)
64461+{
64462+ void *kaddr;
64463+ unsigned long flags;
64464+
64465+ local_irq_save(flags);
64466+ kaddr = kmap_atomic(page);
64467+ clear_page(kaddr);
64468+ kunmap_atomic(kaddr);
64469+ local_irq_restore(flags);
64470+}
64471+
64472 static inline void zero_user_segments(struct page *page,
64473 unsigned start1, unsigned end1,
64474 unsigned start2, unsigned end2)
64475diff --git a/include/linux/i2c.h b/include/linux/i2c.h
64476index 800de22..7a2fa46 100644
64477--- a/include/linux/i2c.h
64478+++ b/include/linux/i2c.h
64479@@ -367,6 +367,7 @@ struct i2c_algorithm {
64480 /* To determine what the adapter supports */
64481 u32 (*functionality) (struct i2c_adapter *);
64482 };
64483+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
64484
64485 /*
64486 * i2c_adapter is the structure used to identify a physical i2c bus along
64487diff --git a/include/linux/i2o.h b/include/linux/i2o.h
64488index d23c3c2..eb63c81 100644
64489--- a/include/linux/i2o.h
64490+++ b/include/linux/i2o.h
64491@@ -565,7 +565,7 @@ struct i2o_controller {
64492 struct i2o_device *exec; /* Executive */
64493 #if BITS_PER_LONG == 64
64494 spinlock_t context_list_lock; /* lock for context_list */
64495- atomic_t context_list_counter; /* needed for unique contexts */
64496+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
64497 struct list_head context_list; /* list of context id's
64498 and pointers */
64499 #endif
64500diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
64501index aff7ad8..3942bbd 100644
64502--- a/include/linux/if_pppox.h
64503+++ b/include/linux/if_pppox.h
64504@@ -76,7 +76,7 @@ struct pppox_proto {
64505 int (*ioctl)(struct socket *sock, unsigned int cmd,
64506 unsigned long arg);
64507 struct module *owner;
64508-};
64509+} __do_const;
64510
64511 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
64512 extern void unregister_pppox_proto(int proto_num);
64513diff --git a/include/linux/init.h b/include/linux/init.h
64514index e59041e..df0a975 100644
64515--- a/include/linux/init.h
64516+++ b/include/linux/init.h
64517@@ -39,9 +39,36 @@
64518 * Also note, that this data cannot be "const".
64519 */
64520
64521+#ifdef MODULE
64522+#define add_init_latent_entropy
64523+#define add_devinit_latent_entropy
64524+#define add_cpuinit_latent_entropy
64525+#define add_meminit_latent_entropy
64526+#else
64527+#define add_init_latent_entropy __latent_entropy
64528+
64529+#ifdef CONFIG_HOTPLUG
64530+#define add_devinit_latent_entropy
64531+#else
64532+#define add_devinit_latent_entropy __latent_entropy
64533+#endif
64534+
64535+#ifdef CONFIG_HOTPLUG_CPU
64536+#define add_cpuinit_latent_entropy
64537+#else
64538+#define add_cpuinit_latent_entropy __latent_entropy
64539+#endif
64540+
64541+#ifdef CONFIG_MEMORY_HOTPLUG
64542+#define add_meminit_latent_entropy
64543+#else
64544+#define add_meminit_latent_entropy __latent_entropy
64545+#endif
64546+#endif
64547+
64548 /* These are for everybody (although not all archs will actually
64549 discard it in modules) */
64550-#define __init __section(.init.text) __cold notrace
64551+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
64552 #define __initdata __section(.init.data)
64553 #define __initconst __constsection(.init.rodata)
64554 #define __exitdata __section(.exit.data)
64555@@ -94,7 +121,7 @@
64556 #define __exit __section(.exit.text) __exitused __cold notrace
64557
64558 /* Used for HOTPLUG */
64559-#define __devinit __section(.devinit.text) __cold notrace
64560+#define __devinit __section(.devinit.text) __cold notrace add_devinit_latent_entropy
64561 #define __devinitdata __section(.devinit.data)
64562 #define __devinitconst __constsection(.devinit.rodata)
64563 #define __devexit __section(.devexit.text) __exitused __cold notrace
64564@@ -102,7 +129,7 @@
64565 #define __devexitconst __constsection(.devexit.rodata)
64566
64567 /* Used for HOTPLUG_CPU */
64568-#define __cpuinit __section(.cpuinit.text) __cold notrace
64569+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
64570 #define __cpuinitdata __section(.cpuinit.data)
64571 #define __cpuinitconst __constsection(.cpuinit.rodata)
64572 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
64573@@ -110,7 +137,7 @@
64574 #define __cpuexitconst __constsection(.cpuexit.rodata)
64575
64576 /* Used for MEMORY_HOTPLUG */
64577-#define __meminit __section(.meminit.text) __cold notrace
64578+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
64579 #define __meminitdata __section(.meminit.data)
64580 #define __meminitconst __constsection(.meminit.rodata)
64581 #define __memexit __section(.memexit.text) __exitused __cold notrace
64582diff --git a/include/linux/init_task.h b/include/linux/init_task.h
64583index 6d087c5..401cab8 100644
64584--- a/include/linux/init_task.h
64585+++ b/include/linux/init_task.h
64586@@ -143,6 +143,12 @@ extern struct task_group root_task_group;
64587
64588 #define INIT_TASK_COMM "swapper"
64589
64590+#ifdef CONFIG_X86
64591+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
64592+#else
64593+#define INIT_TASK_THREAD_INFO
64594+#endif
64595+
64596 /*
64597 * INIT_TASK is used to set up the first task table, touch at
64598 * your own risk!. Base=0, limit=0x1fffff (=2MB)
64599@@ -182,6 +188,7 @@ extern struct task_group root_task_group;
64600 RCU_POINTER_INITIALIZER(cred, &init_cred), \
64601 .comm = INIT_TASK_COMM, \
64602 .thread = INIT_THREAD, \
64603+ INIT_TASK_THREAD_INFO \
64604 .fs = &init_fs, \
64605 .files = &init_files, \
64606 .signal = &init_signals, \
64607diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
64608index 5e4e617..eee383d 100644
64609--- a/include/linux/interrupt.h
64610+++ b/include/linux/interrupt.h
64611@@ -435,7 +435,7 @@ enum
64612 /* map softirq index to softirq name. update 'softirq_to_name' in
64613 * kernel/softirq.c when adding a new softirq.
64614 */
64615-extern char *softirq_to_name[NR_SOFTIRQS];
64616+extern const char * const softirq_to_name[NR_SOFTIRQS];
64617
64618 /* softirq mask and active fields moved to irq_cpustat_t in
64619 * asm/hardirq.h to get better cache usage. KAO
64620@@ -443,12 +443,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
64621
64622 struct softirq_action
64623 {
64624- void (*action)(struct softirq_action *);
64625+ void (*action)(void);
64626 };
64627
64628 asmlinkage void do_softirq(void);
64629 asmlinkage void __do_softirq(void);
64630-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
64631+extern void open_softirq(int nr, void (*action)(void));
64632 extern void softirq_init(void);
64633 extern void __raise_softirq_irqoff(unsigned int nr);
64634
64635diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
64636index 6883e19..06992b1 100644
64637--- a/include/linux/kallsyms.h
64638+++ b/include/linux/kallsyms.h
64639@@ -15,7 +15,8 @@
64640
64641 struct module;
64642
64643-#ifdef CONFIG_KALLSYMS
64644+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
64645+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64646 /* Lookup the address for a symbol. Returns 0 if not found. */
64647 unsigned long kallsyms_lookup_name(const char *name);
64648
64649@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
64650 /* Stupid that this does nothing, but I didn't create this mess. */
64651 #define __print_symbol(fmt, addr)
64652 #endif /*CONFIG_KALLSYMS*/
64653+#else /* when included by kallsyms.c, vsnprintf.c, or
64654+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
64655+extern void __print_symbol(const char *fmt, unsigned long address);
64656+extern int sprint_backtrace(char *buffer, unsigned long address);
64657+extern int sprint_symbol(char *buffer, unsigned long address);
64658+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
64659+const char *kallsyms_lookup(unsigned long addr,
64660+ unsigned long *symbolsize,
64661+ unsigned long *offset,
64662+ char **modname, char *namebuf);
64663+#endif
64664
64665 /* This macro allows us to keep printk typechecking */
64666 static __printf(1, 2)
64667diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
64668index 4dff0c6..1ca9b72 100644
64669--- a/include/linux/kgdb.h
64670+++ b/include/linux/kgdb.h
64671@@ -53,7 +53,7 @@ extern int kgdb_connected;
64672 extern int kgdb_io_module_registered;
64673
64674 extern atomic_t kgdb_setting_breakpoint;
64675-extern atomic_t kgdb_cpu_doing_single_step;
64676+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
64677
64678 extern struct task_struct *kgdb_usethread;
64679 extern struct task_struct *kgdb_contthread;
64680@@ -255,7 +255,7 @@ struct kgdb_arch {
64681 void (*correct_hw_break)(void);
64682
64683 void (*enable_nmi)(bool on);
64684-};
64685+} __do_const;
64686
64687 /**
64688 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
64689@@ -280,7 +280,7 @@ struct kgdb_io {
64690 void (*pre_exception) (void);
64691 void (*post_exception) (void);
64692 int is_console;
64693-};
64694+} __do_const;
64695
64696 extern struct kgdb_arch arch_kgdb_ops;
64697
64698diff --git a/include/linux/kmod.h b/include/linux/kmod.h
64699index 5398d58..5883a34 100644
64700--- a/include/linux/kmod.h
64701+++ b/include/linux/kmod.h
64702@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
64703 * usually useless though. */
64704 extern __printf(2, 3)
64705 int __request_module(bool wait, const char *name, ...);
64706+extern __printf(3, 4)
64707+int ___request_module(bool wait, char *param_name, const char *name, ...);
64708 #define request_module(mod...) __request_module(true, mod)
64709 #define request_module_nowait(mod...) __request_module(false, mod)
64710 #define try_then_request_module(x, mod...) \
64711diff --git a/include/linux/kobject.h b/include/linux/kobject.h
64712index 1e57449..4fede7b 100644
64713--- a/include/linux/kobject.h
64714+++ b/include/linux/kobject.h
64715@@ -111,7 +111,7 @@ struct kobj_type {
64716 struct attribute **default_attrs;
64717 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
64718 const void *(*namespace)(struct kobject *kobj);
64719-};
64720+} __do_const;
64721
64722 struct kobj_uevent_env {
64723 char *envp[UEVENT_NUM_ENVP];
64724diff --git a/include/linux/kref.h b/include/linux/kref.h
64725index 65af688..0592677 100644
64726--- a/include/linux/kref.h
64727+++ b/include/linux/kref.h
64728@@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
64729 static inline int kref_sub(struct kref *kref, unsigned int count,
64730 void (*release)(struct kref *kref))
64731 {
64732- WARN_ON(release == NULL);
64733+ BUG_ON(release == NULL);
64734
64735 if (atomic_sub_and_test((int) count, &kref->refcount)) {
64736 release(kref);
64737diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
64738index ecc5543..0e96bcc 100644
64739--- a/include/linux/kvm_host.h
64740+++ b/include/linux/kvm_host.h
64741@@ -403,7 +403,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
64742 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
64743 void vcpu_put(struct kvm_vcpu *vcpu);
64744
64745-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
64746+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
64747 struct module *module);
64748 void kvm_exit(void);
64749
64750@@ -558,7 +558,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
64751 struct kvm_guest_debug *dbg);
64752 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
64753
64754-int kvm_arch_init(void *opaque);
64755+int kvm_arch_init(const void *opaque);
64756 void kvm_arch_exit(void);
64757
64758 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
64759diff --git a/include/linux/libata.h b/include/linux/libata.h
64760index e931c9a..7aa8f6f 100644
64761--- a/include/linux/libata.h
64762+++ b/include/linux/libata.h
64763@@ -915,7 +915,7 @@ struct ata_port_operations {
64764 * fields must be pointers.
64765 */
64766 const struct ata_port_operations *inherits;
64767-};
64768+} __do_const;
64769
64770 struct ata_port_info {
64771 unsigned long flags;
64772diff --git a/include/linux/list.h b/include/linux/list.h
64773index cc6d2aa..71febca 100644
64774--- a/include/linux/list.h
64775+++ b/include/linux/list.h
64776@@ -112,6 +112,9 @@ extern void __list_del_entry(struct list_head *entry);
64777 extern void list_del(struct list_head *entry);
64778 #endif
64779
64780+extern void pax_list_add_tail(struct list_head *new, struct list_head *head);
64781+extern void pax_list_del(struct list_head *entry);
64782+
64783 /**
64784 * list_replace - replace old entry by new one
64785 * @old : the element to be replaced
64786diff --git a/include/linux/mm.h b/include/linux/mm.h
64787index 280dae5..39046ec 100644
64788--- a/include/linux/mm.h
64789+++ b/include/linux/mm.h
64790@@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
64791 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
64792 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
64793 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
64794+
64795+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64796+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
64797+#endif
64798+
64799 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
64800
64801 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
64802@@ -1039,34 +1044,6 @@ int set_page_dirty(struct page *page);
64803 int set_page_dirty_lock(struct page *page);
64804 int clear_page_dirty_for_io(struct page *page);
64805
64806-/* Is the vma a continuation of the stack vma above it? */
64807-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
64808-{
64809- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
64810-}
64811-
64812-static inline int stack_guard_page_start(struct vm_area_struct *vma,
64813- unsigned long addr)
64814-{
64815- return (vma->vm_flags & VM_GROWSDOWN) &&
64816- (vma->vm_start == addr) &&
64817- !vma_growsdown(vma->vm_prev, addr);
64818-}
64819-
64820-/* Is the vma a continuation of the stack vma below it? */
64821-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
64822-{
64823- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
64824-}
64825-
64826-static inline int stack_guard_page_end(struct vm_area_struct *vma,
64827- unsigned long addr)
64828-{
64829- return (vma->vm_flags & VM_GROWSUP) &&
64830- (vma->vm_end == addr) &&
64831- !vma_growsup(vma->vm_next, addr);
64832-}
64833-
64834 extern pid_t
64835 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
64836
64837@@ -1166,6 +1143,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
64838 }
64839 #endif
64840
64841+#ifdef CONFIG_MMU
64842+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
64843+#else
64844+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
64845+{
64846+ return __pgprot(0);
64847+}
64848+#endif
64849+
64850 int vma_wants_writenotify(struct vm_area_struct *vma);
64851
64852 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
64853@@ -1184,8 +1170,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
64854 {
64855 return 0;
64856 }
64857+
64858+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
64859+ unsigned long address)
64860+{
64861+ return 0;
64862+}
64863 #else
64864 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
64865+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
64866 #endif
64867
64868 #ifdef __PAGETABLE_PMD_FOLDED
64869@@ -1194,8 +1187,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
64870 {
64871 return 0;
64872 }
64873+
64874+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
64875+ unsigned long address)
64876+{
64877+ return 0;
64878+}
64879 #else
64880 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
64881+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
64882 #endif
64883
64884 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
64885@@ -1213,11 +1213,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
64886 NULL: pud_offset(pgd, address);
64887 }
64888
64889+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
64890+{
64891+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
64892+ NULL: pud_offset(pgd, address);
64893+}
64894+
64895 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
64896 {
64897 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
64898 NULL: pmd_offset(pud, address);
64899 }
64900+
64901+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
64902+{
64903+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
64904+ NULL: pmd_offset(pud, address);
64905+}
64906 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
64907
64908 #if USE_SPLIT_PTLOCKS
64909@@ -1447,6 +1459,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
64910 unsigned long, unsigned long,
64911 unsigned long, unsigned long);
64912 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
64913+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
64914
64915 /* These take the mm semaphore themselves */
64916 extern unsigned long vm_brk(unsigned long, unsigned long);
64917@@ -1510,6 +1523,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
64918 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
64919 struct vm_area_struct **pprev);
64920
64921+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
64922+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
64923+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
64924+
64925 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
64926 NULL if none. Assume start_addr < end_addr. */
64927 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
64928@@ -1538,15 +1555,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
64929 return vma;
64930 }
64931
64932-#ifdef CONFIG_MMU
64933-pgprot_t vm_get_page_prot(unsigned long vm_flags);
64934-#else
64935-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
64936-{
64937- return __pgprot(0);
64938-}
64939-#endif
64940-
64941 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
64942 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
64943 unsigned long pfn, unsigned long size, pgprot_t);
64944@@ -1652,7 +1660,7 @@ extern int unpoison_memory(unsigned long pfn);
64945 extern int sysctl_memory_failure_early_kill;
64946 extern int sysctl_memory_failure_recovery;
64947 extern void shake_page(struct page *p, int access);
64948-extern atomic_long_t mce_bad_pages;
64949+extern atomic_long_unchecked_t mce_bad_pages;
64950 extern int soft_offline_page(struct page *page, int flags);
64951
64952 extern void dump_page(struct page *page);
64953@@ -1683,5 +1691,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
64954 static inline bool page_is_guard(struct page *page) { return false; }
64955 #endif /* CONFIG_DEBUG_PAGEALLOC */
64956
64957+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
64958+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
64959+#else
64960+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
64961+#endif
64962+
64963 #endif /* __KERNEL__ */
64964 #endif /* _LINUX_MM_H */
64965diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
64966index 31f8a3a..499f1db 100644
64967--- a/include/linux/mm_types.h
64968+++ b/include/linux/mm_types.h
64969@@ -275,6 +275,8 @@ struct vm_area_struct {
64970 #ifdef CONFIG_NUMA
64971 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
64972 #endif
64973+
64974+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
64975 };
64976
64977 struct core_thread {
64978@@ -348,7 +350,7 @@ struct mm_struct {
64979 unsigned long def_flags;
64980 unsigned long nr_ptes; /* Page table pages */
64981 unsigned long start_code, end_code, start_data, end_data;
64982- unsigned long start_brk, brk, start_stack;
64983+ unsigned long brk_gap, start_brk, brk, start_stack;
64984 unsigned long arg_start, arg_end, env_start, env_end;
64985
64986 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
64987@@ -399,6 +401,24 @@ struct mm_struct {
64988 struct cpumask cpumask_allocation;
64989 #endif
64990 struct uprobes_state uprobes_state;
64991+
64992+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64993+ unsigned long pax_flags;
64994+#endif
64995+
64996+#ifdef CONFIG_PAX_DLRESOLVE
64997+ unsigned long call_dl_resolve;
64998+#endif
64999+
65000+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
65001+ unsigned long call_syscall;
65002+#endif
65003+
65004+#ifdef CONFIG_PAX_ASLR
65005+ unsigned long delta_mmap; /* randomized offset */
65006+ unsigned long delta_stack; /* randomized offset */
65007+#endif
65008+
65009 };
65010
65011 static inline void mm_init_cpumask(struct mm_struct *mm)
65012diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
65013index c5d5278..f0b68c8 100644
65014--- a/include/linux/mmiotrace.h
65015+++ b/include/linux/mmiotrace.h
65016@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
65017 /* Called from ioremap.c */
65018 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
65019 void __iomem *addr);
65020-extern void mmiotrace_iounmap(volatile void __iomem *addr);
65021+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
65022
65023 /* For anyone to insert markers. Remember trailing newline. */
65024 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
65025@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
65026 {
65027 }
65028
65029-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
65030+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
65031 {
65032 }
65033
65034diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
65035index a23923b..073fee4 100644
65036--- a/include/linux/mmzone.h
65037+++ b/include/linux/mmzone.h
65038@@ -421,7 +421,7 @@ struct zone {
65039 unsigned long flags; /* zone flags, see below */
65040
65041 /* Zone statistics */
65042- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
65043+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
65044
65045 /*
65046 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
65047diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
65048index fed3def..7cc3f93 100644
65049--- a/include/linux/mod_devicetable.h
65050+++ b/include/linux/mod_devicetable.h
65051@@ -12,7 +12,7 @@
65052 typedef unsigned long kernel_ulong_t;
65053 #endif
65054
65055-#define PCI_ANY_ID (~0)
65056+#define PCI_ANY_ID ((__u16)~0)
65057
65058 struct pci_device_id {
65059 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
65060@@ -139,7 +139,7 @@ struct usb_device_id {
65061 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
65062 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
65063
65064-#define HID_ANY_ID (~0)
65065+#define HID_ANY_ID (~0U)
65066 #define HID_BUS_ANY 0xffff
65067 #define HID_GROUP_ANY 0x0000
65068
65069diff --git a/include/linux/module.h b/include/linux/module.h
65070index 7760c6d..983ee18 100644
65071--- a/include/linux/module.h
65072+++ b/include/linux/module.h
65073@@ -17,9 +17,11 @@
65074 #include <linux/moduleparam.h>
65075 #include <linux/tracepoint.h>
65076 #include <linux/export.h>
65077+#include <linux/fs.h>
65078
65079 #include <linux/percpu.h>
65080 #include <asm/module.h>
65081+#include <asm/pgtable.h>
65082
65083 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
65084 #define MODULE_SIG_STRING "~Module signature appended~\n"
65085@@ -281,19 +283,16 @@ struct module
65086 int (*init)(void);
65087
65088 /* If this is non-NULL, vfree after init() returns */
65089- void *module_init;
65090+ void *module_init_rx, *module_init_rw;
65091
65092 /* Here is the actual code + data, vfree'd on unload. */
65093- void *module_core;
65094+ void *module_core_rx, *module_core_rw;
65095
65096 /* Here are the sizes of the init and core sections */
65097- unsigned int init_size, core_size;
65098+ unsigned int init_size_rw, core_size_rw;
65099
65100 /* The size of the executable code in each section. */
65101- unsigned int init_text_size, core_text_size;
65102-
65103- /* Size of RO sections of the module (text+rodata) */
65104- unsigned int init_ro_size, core_ro_size;
65105+ unsigned int init_size_rx, core_size_rx;
65106
65107 /* Arch-specific module values */
65108 struct mod_arch_specific arch;
65109@@ -349,6 +348,10 @@ struct module
65110 #ifdef CONFIG_EVENT_TRACING
65111 struct ftrace_event_call **trace_events;
65112 unsigned int num_trace_events;
65113+ struct file_operations trace_id;
65114+ struct file_operations trace_enable;
65115+ struct file_operations trace_format;
65116+ struct file_operations trace_filter;
65117 #endif
65118 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
65119 unsigned int num_ftrace_callsites;
65120@@ -396,16 +399,46 @@ bool is_module_address(unsigned long addr);
65121 bool is_module_percpu_address(unsigned long addr);
65122 bool is_module_text_address(unsigned long addr);
65123
65124+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
65125+{
65126+
65127+#ifdef CONFIG_PAX_KERNEXEC
65128+ if (ktla_ktva(addr) >= (unsigned long)start &&
65129+ ktla_ktva(addr) < (unsigned long)start + size)
65130+ return 1;
65131+#endif
65132+
65133+ return ((void *)addr >= start && (void *)addr < start + size);
65134+}
65135+
65136+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
65137+{
65138+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
65139+}
65140+
65141+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
65142+{
65143+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
65144+}
65145+
65146+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
65147+{
65148+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
65149+}
65150+
65151+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
65152+{
65153+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
65154+}
65155+
65156 static inline int within_module_core(unsigned long addr, struct module *mod)
65157 {
65158- return (unsigned long)mod->module_core <= addr &&
65159- addr < (unsigned long)mod->module_core + mod->core_size;
65160+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
65161 }
65162
65163 static inline int within_module_init(unsigned long addr, struct module *mod)
65164 {
65165- return (unsigned long)mod->module_init <= addr &&
65166- addr < (unsigned long)mod->module_init + mod->init_size;
65167+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
65168 }
65169
65170 /* Search for module by name: must hold module_mutex. */
65171diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
65172index 560ca53..5ee8d73 100644
65173--- a/include/linux/moduleloader.h
65174+++ b/include/linux/moduleloader.h
65175@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
65176
65177 /* Allocator used for allocating struct module, core sections and init
65178 sections. Returns NULL on failure. */
65179-void *module_alloc(unsigned long size);
65180+void *module_alloc(unsigned long size) __size_overflow(1);
65181+
65182+#ifdef CONFIG_PAX_KERNEXEC
65183+void *module_alloc_exec(unsigned long size) __size_overflow(1);
65184+#else
65185+#define module_alloc_exec(x) module_alloc(x)
65186+#endif
65187
65188 /* Free memory returned from module_alloc. */
65189 void module_free(struct module *mod, void *module_region);
65190
65191+#ifdef CONFIG_PAX_KERNEXEC
65192+void module_free_exec(struct module *mod, void *module_region);
65193+#else
65194+#define module_free_exec(x, y) module_free((x), (y))
65195+#endif
65196+
65197 /*
65198 * Apply the given relocation to the (simplified) ELF. Return -error
65199 * or 0.
65200@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
65201 unsigned int relsec,
65202 struct module *me)
65203 {
65204+#ifdef CONFIG_MODULES
65205 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
65206+#endif
65207 return -ENOEXEC;
65208 }
65209 #endif
65210@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
65211 unsigned int relsec,
65212 struct module *me)
65213 {
65214+#ifdef CONFIG_MODULES
65215 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
65216+#endif
65217 return -ENOEXEC;
65218 }
65219 #endif
65220diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
65221index d6a5806..7c13347 100644
65222--- a/include/linux/moduleparam.h
65223+++ b/include/linux/moduleparam.h
65224@@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
65225 * @len is usually just sizeof(string).
65226 */
65227 #define module_param_string(name, string, len, perm) \
65228- static const struct kparam_string __param_string_##name \
65229+ static const struct kparam_string __param_string_##name __used \
65230 = { len, string }; \
65231 __module_param_call(MODULE_PARAM_PREFIX, name, \
65232 &param_ops_string, \
65233@@ -425,7 +425,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
65234 */
65235 #define module_param_array_named(name, array, type, nump, perm) \
65236 param_check_##type(name, &(array)[0]); \
65237- static const struct kparam_array __param_arr_##name \
65238+ static const struct kparam_array __param_arr_##name __used \
65239 = { .max = ARRAY_SIZE(array), .num = nump, \
65240 .ops = &param_ops_##type, \
65241 .elemsize = sizeof(array[0]), .elem = array }; \
65242diff --git a/include/linux/namei.h b/include/linux/namei.h
65243index 4bf19d8..5268cea 100644
65244--- a/include/linux/namei.h
65245+++ b/include/linux/namei.h
65246@@ -18,7 +18,7 @@ struct nameidata {
65247 unsigned seq;
65248 int last_type;
65249 unsigned depth;
65250- char *saved_names[MAX_NESTED_LINKS + 1];
65251+ const char *saved_names[MAX_NESTED_LINKS + 1];
65252 };
65253
65254 /*
65255@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
65256
65257 extern void nd_jump_link(struct nameidata *nd, struct path *path);
65258
65259-static inline void nd_set_link(struct nameidata *nd, char *path)
65260+static inline void nd_set_link(struct nameidata *nd, const char *path)
65261 {
65262 nd->saved_names[nd->depth] = path;
65263 }
65264
65265-static inline char *nd_get_link(struct nameidata *nd)
65266+static inline const char *nd_get_link(const struct nameidata *nd)
65267 {
65268 return nd->saved_names[nd->depth];
65269 }
65270diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
65271index a848ffc..3bbbaee 100644
65272--- a/include/linux/netdevice.h
65273+++ b/include/linux/netdevice.h
65274@@ -999,6 +999,7 @@ struct net_device_ops {
65275 struct net_device *dev,
65276 int idx);
65277 };
65278+typedef struct net_device_ops __no_const net_device_ops_no_const;
65279
65280 /*
65281 * The DEVICE structure.
65282@@ -1059,7 +1060,7 @@ struct net_device {
65283 int iflink;
65284
65285 struct net_device_stats stats;
65286- atomic_long_t rx_dropped; /* dropped packets by core network
65287+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
65288 * Do not use this in drivers.
65289 */
65290
65291diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
65292index 7958e84..ed74d7a 100644
65293--- a/include/linux/netfilter/ipset/ip_set.h
65294+++ b/include/linux/netfilter/ipset/ip_set.h
65295@@ -98,7 +98,7 @@ struct ip_set_type_variant {
65296 /* Return true if "b" set is the same as "a"
65297 * according to the create set parameters */
65298 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
65299-};
65300+} __do_const;
65301
65302 /* The core set type structure */
65303 struct ip_set_type {
65304diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
65305index 4966dde..7d8ce06 100644
65306--- a/include/linux/netfilter/nfnetlink.h
65307+++ b/include/linux/netfilter/nfnetlink.h
65308@@ -16,7 +16,7 @@ struct nfnl_callback {
65309 const struct nlattr * const cda[]);
65310 const struct nla_policy *policy; /* netlink attribute policy */
65311 const u_int16_t attr_count; /* number of nlattr's */
65312-};
65313+} __do_const;
65314
65315 struct nfnetlink_subsystem {
65316 const char *name;
65317diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
65318new file mode 100644
65319index 0000000..33f4af8
65320--- /dev/null
65321+++ b/include/linux/netfilter/xt_gradm.h
65322@@ -0,0 +1,9 @@
65323+#ifndef _LINUX_NETFILTER_XT_GRADM_H
65324+#define _LINUX_NETFILTER_XT_GRADM_H 1
65325+
65326+struct xt_gradm_mtinfo {
65327+ __u16 flags;
65328+ __u16 invflags;
65329+};
65330+
65331+#endif
65332diff --git a/include/linux/notifier.h b/include/linux/notifier.h
65333index d65746e..62e72c2 100644
65334--- a/include/linux/notifier.h
65335+++ b/include/linux/notifier.h
65336@@ -51,7 +51,8 @@ struct notifier_block {
65337 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
65338 struct notifier_block __rcu *next;
65339 int priority;
65340-};
65341+} __do_const;
65342+typedef struct notifier_block __no_const notifier_block_no_const;
65343
65344 struct atomic_notifier_head {
65345 spinlock_t lock;
65346diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
65347index a4c5624..79d6d88 100644
65348--- a/include/linux/oprofile.h
65349+++ b/include/linux/oprofile.h
65350@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
65351 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
65352 char const * name, ulong * val);
65353
65354-/** Create a file for read-only access to an atomic_t. */
65355+/** Create a file for read-only access to an atomic_unchecked_t. */
65356 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
65357- char const * name, atomic_t * val);
65358+ char const * name, atomic_unchecked_t * val);
65359
65360 /** create a directory */
65361 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
65362diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
65363index 6bfb2faa..1204767 100644
65364--- a/include/linux/perf_event.h
65365+++ b/include/linux/perf_event.h
65366@@ -328,8 +328,8 @@ struct perf_event {
65367
65368 enum perf_event_active_state state;
65369 unsigned int attach_state;
65370- local64_t count;
65371- atomic64_t child_count;
65372+ local64_t count; /* PaX: fix it one day */
65373+ atomic64_unchecked_t child_count;
65374
65375 /*
65376 * These are the total time in nanoseconds that the event
65377@@ -380,8 +380,8 @@ struct perf_event {
65378 * These accumulate total time (in nanoseconds) that children
65379 * events have been enabled and running, respectively.
65380 */
65381- atomic64_t child_total_time_enabled;
65382- atomic64_t child_total_time_running;
65383+ atomic64_unchecked_t child_total_time_enabled;
65384+ atomic64_unchecked_t child_total_time_running;
65385
65386 /*
65387 * Protect attach/detach and child_list:
65388diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
65389index ad1a427..6419649 100644
65390--- a/include/linux/pipe_fs_i.h
65391+++ b/include/linux/pipe_fs_i.h
65392@@ -45,9 +45,9 @@ struct pipe_buffer {
65393 struct pipe_inode_info {
65394 wait_queue_head_t wait;
65395 unsigned int nrbufs, curbuf, buffers;
65396- unsigned int readers;
65397- unsigned int writers;
65398- unsigned int waiting_writers;
65399+ atomic_t readers;
65400+ atomic_t writers;
65401+ atomic_t waiting_writers;
65402 unsigned int r_counter;
65403 unsigned int w_counter;
65404 struct page *tmp_page;
65405diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
65406index 5f28cae..3d23723 100644
65407--- a/include/linux/platform_data/usb-ehci-s5p.h
65408+++ b/include/linux/platform_data/usb-ehci-s5p.h
65409@@ -14,7 +14,7 @@
65410 struct s5p_ehci_platdata {
65411 int (*phy_init)(struct platform_device *pdev, int type);
65412 int (*phy_exit)(struct platform_device *pdev, int type);
65413-};
65414+} __no_const;
65415
65416 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
65417
65418diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
65419index f271860..6b3bec5 100644
65420--- a/include/linux/pm_runtime.h
65421+++ b/include/linux/pm_runtime.h
65422@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
65423
65424 static inline void pm_runtime_mark_last_busy(struct device *dev)
65425 {
65426- ACCESS_ONCE(dev->power.last_busy) = jiffies;
65427+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
65428 }
65429
65430 #else /* !CONFIG_PM_RUNTIME */
65431diff --git a/include/linux/poison.h b/include/linux/poison.h
65432index 2110a81..13a11bb 100644
65433--- a/include/linux/poison.h
65434+++ b/include/linux/poison.h
65435@@ -19,8 +19,8 @@
65436 * under normal circumstances, used to verify that nobody uses
65437 * non-initialized list entries.
65438 */
65439-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
65440-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
65441+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
65442+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
65443
65444 /********** include/linux/timer.h **********/
65445 /*
65446diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
65447index 4a496eb..d9c5659 100644
65448--- a/include/linux/power/smartreflex.h
65449+++ b/include/linux/power/smartreflex.h
65450@@ -238,7 +238,7 @@ struct omap_sr_class_data {
65451 int (*notify)(struct omap_sr *sr, u32 status);
65452 u8 notify_flags;
65453 u8 class_type;
65454-};
65455+} __do_const;
65456
65457 /**
65458 * struct omap_sr_nvalue_table - Smartreflex n-target value info
65459diff --git a/include/linux/printk.h b/include/linux/printk.h
65460index 9afc01e..92c32e8 100644
65461--- a/include/linux/printk.h
65462+++ b/include/linux/printk.h
65463@@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
65464 extern int printk_needs_cpu(int cpu);
65465 extern void printk_tick(void);
65466
65467+extern int kptr_restrict;
65468+
65469 #ifdef CONFIG_PRINTK
65470 asmlinkage __printf(5, 0)
65471 int vprintk_emit(int facility, int level,
65472@@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
65473
65474 extern int printk_delay_msec;
65475 extern int dmesg_restrict;
65476-extern int kptr_restrict;
65477
65478 void log_buf_kexec_setup(void);
65479 void __init setup_log_buf(int early);
65480diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
65481index 3fd2e87..75db910 100644
65482--- a/include/linux/proc_fs.h
65483+++ b/include/linux/proc_fs.h
65484@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
65485 return proc_create_data(name, mode, parent, proc_fops, NULL);
65486 }
65487
65488+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
65489+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
65490+{
65491+#ifdef CONFIG_GRKERNSEC_PROC_USER
65492+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
65493+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65494+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
65495+#else
65496+ return proc_create_data(name, mode, parent, proc_fops, NULL);
65497+#endif
65498+}
65499+
65500 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
65501 umode_t mode, struct proc_dir_entry *base,
65502 read_proc_t *read_proc, void * data)
65503diff --git a/include/linux/random.h b/include/linux/random.h
65504index 6330ed4..419c6c3 100644
65505--- a/include/linux/random.h
65506+++ b/include/linux/random.h
65507@@ -30,12 +30,17 @@ void srandom32(u32 seed);
65508
65509 u32 prandom32(struct rnd_state *);
65510
65511+static inline unsigned long pax_get_random_long(void)
65512+{
65513+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
65514+}
65515+
65516 /*
65517 * Handle minimum values for seeds
65518 */
65519 static inline u32 __seed(u32 x, u32 m)
65520 {
65521- return (x < m) ? x + m : x;
65522+ return (x <= m) ? x + m + 1 : x;
65523 }
65524
65525 /**
65526diff --git a/include/linux/reboot.h b/include/linux/reboot.h
65527index 23b3630..e1bc12b 100644
65528--- a/include/linux/reboot.h
65529+++ b/include/linux/reboot.h
65530@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
65531 * Architecture-specific implementations of sys_reboot commands.
65532 */
65533
65534-extern void machine_restart(char *cmd);
65535-extern void machine_halt(void);
65536-extern void machine_power_off(void);
65537+extern void machine_restart(char *cmd) __noreturn;
65538+extern void machine_halt(void) __noreturn;
65539+extern void machine_power_off(void) __noreturn;
65540
65541 extern void machine_shutdown(void);
65542 struct pt_regs;
65543@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
65544 */
65545
65546 extern void kernel_restart_prepare(char *cmd);
65547-extern void kernel_restart(char *cmd);
65548-extern void kernel_halt(void);
65549-extern void kernel_power_off(void);
65550+extern void kernel_restart(char *cmd) __noreturn;
65551+extern void kernel_halt(void) __noreturn;
65552+extern void kernel_power_off(void) __noreturn;
65553
65554 extern int C_A_D; /* for sysctl */
65555 void ctrl_alt_del(void);
65556@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
65557 * Emergency restart, callable from an interrupt handler.
65558 */
65559
65560-extern void emergency_restart(void);
65561+extern void emergency_restart(void) __noreturn;
65562 #include <asm/emergency-restart.h>
65563
65564 #endif /* _LINUX_REBOOT_H */
65565diff --git a/include/linux/regset.h b/include/linux/regset.h
65566index 8e0c9fe..fdb64bc 100644
65567--- a/include/linux/regset.h
65568+++ b/include/linux/regset.h
65569@@ -161,7 +161,7 @@ struct user_regset {
65570 unsigned int align;
65571 unsigned int bias;
65572 unsigned int core_note_type;
65573-};
65574+} __do_const;
65575
65576 /**
65577 * struct user_regset_view - available regsets
65578diff --git a/include/linux/relay.h b/include/linux/relay.h
65579index 91cacc3..b55ff74 100644
65580--- a/include/linux/relay.h
65581+++ b/include/linux/relay.h
65582@@ -160,7 +160,7 @@ struct rchan_callbacks
65583 * The callback should return 0 if successful, negative if not.
65584 */
65585 int (*remove_buf_file)(struct dentry *dentry);
65586-};
65587+} __no_const;
65588
65589 /*
65590 * CONFIG_RELAY kernel API, kernel/relay.c
65591diff --git a/include/linux/rio.h b/include/linux/rio.h
65592index a3e7842..d973ca6 100644
65593--- a/include/linux/rio.h
65594+++ b/include/linux/rio.h
65595@@ -339,7 +339,7 @@ struct rio_ops {
65596 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
65597 u64 rstart, u32 size, u32 flags);
65598 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
65599-};
65600+} __no_const;
65601
65602 #define RIO_RESOURCE_MEM 0x00000100
65603 #define RIO_RESOURCE_DOORBELL 0x00000200
65604diff --git a/include/linux/rmap.h b/include/linux/rmap.h
65605index bfe1f47..6a33ee3 100644
65606--- a/include/linux/rmap.h
65607+++ b/include/linux/rmap.h
65608@@ -134,8 +134,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
65609 void anon_vma_init(void); /* create anon_vma_cachep */
65610 int anon_vma_prepare(struct vm_area_struct *);
65611 void unlink_anon_vmas(struct vm_area_struct *);
65612-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
65613-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
65614+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
65615+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
65616
65617 static inline void anon_vma_merge(struct vm_area_struct *vma,
65618 struct vm_area_struct *next)
65619diff --git a/include/linux/sched.h b/include/linux/sched.h
65620index 0dd42a0..f5dc099 100644
65621--- a/include/linux/sched.h
65622+++ b/include/linux/sched.h
65623@@ -61,6 +61,7 @@ struct bio_list;
65624 struct fs_struct;
65625 struct perf_event_context;
65626 struct blk_plug;
65627+struct linux_binprm;
65628
65629 /*
65630 * List of flags we want to share for kernel threads,
65631@@ -344,10 +345,23 @@ struct user_namespace;
65632 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
65633
65634 extern int sysctl_max_map_count;
65635+extern unsigned long sysctl_heap_stack_gap;
65636
65637 #include <linux/aio.h>
65638
65639 #ifdef CONFIG_MMU
65640+
65641+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
65642+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
65643+#else
65644+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
65645+{
65646+ return 0;
65647+}
65648+#endif
65649+
65650+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
65651+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
65652 extern void arch_pick_mmap_layout(struct mm_struct *mm);
65653 extern unsigned long
65654 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
65655@@ -614,6 +628,17 @@ struct signal_struct {
65656 #ifdef CONFIG_TASKSTATS
65657 struct taskstats *stats;
65658 #endif
65659+
65660+#ifdef CONFIG_GRKERNSEC
65661+ u32 curr_ip;
65662+ u32 saved_ip;
65663+ u32 gr_saddr;
65664+ u32 gr_daddr;
65665+ u16 gr_sport;
65666+ u16 gr_dport;
65667+ u8 used_accept:1;
65668+#endif
65669+
65670 #ifdef CONFIG_AUDIT
65671 unsigned audit_tty;
65672 struct tty_audit_buf *tty_audit_buf;
65673@@ -691,6 +716,11 @@ struct user_struct {
65674 struct key *session_keyring; /* UID's default session keyring */
65675 #endif
65676
65677+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65678+ unsigned int banned;
65679+ unsigned long ban_expires;
65680+#endif
65681+
65682 /* Hash table maintenance information */
65683 struct hlist_node uidhash_node;
65684 kuid_t uid;
65685@@ -1312,8 +1342,8 @@ struct task_struct {
65686 struct list_head thread_group;
65687
65688 struct completion *vfork_done; /* for vfork() */
65689- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
65690- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
65691+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
65692+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
65693
65694 cputime_t utime, stime, utimescaled, stimescaled;
65695 cputime_t gtime;
65696@@ -1329,11 +1359,6 @@ struct task_struct {
65697 struct task_cputime cputime_expires;
65698 struct list_head cpu_timers[3];
65699
65700-/* process credentials */
65701- const struct cred __rcu *real_cred; /* objective and real subjective task
65702- * credentials (COW) */
65703- const struct cred __rcu *cred; /* effective (overridable) subjective task
65704- * credentials (COW) */
65705 char comm[TASK_COMM_LEN]; /* executable name excluding path
65706 - access with [gs]et_task_comm (which lock
65707 it with task_lock())
65708@@ -1350,6 +1375,10 @@ struct task_struct {
65709 #endif
65710 /* CPU-specific state of this task */
65711 struct thread_struct thread;
65712+/* thread_info moved to task_struct */
65713+#ifdef CONFIG_X86
65714+ struct thread_info tinfo;
65715+#endif
65716 /* filesystem information */
65717 struct fs_struct *fs;
65718 /* open file information */
65719@@ -1423,6 +1452,10 @@ struct task_struct {
65720 gfp_t lockdep_reclaim_gfp;
65721 #endif
65722
65723+/* process credentials */
65724+ const struct cred __rcu *real_cred; /* objective and real subjective task
65725+ * credentials (COW) */
65726+
65727 /* journalling filesystem info */
65728 void *journal_info;
65729
65730@@ -1461,6 +1494,10 @@ struct task_struct {
65731 /* cg_list protected by css_set_lock and tsk->alloc_lock */
65732 struct list_head cg_list;
65733 #endif
65734+
65735+ const struct cred __rcu *cred; /* effective (overridable) subjective task
65736+ * credentials (COW) */
65737+
65738 #ifdef CONFIG_FUTEX
65739 struct robust_list_head __user *robust_list;
65740 #ifdef CONFIG_COMPAT
65741@@ -1548,8 +1585,75 @@ struct task_struct {
65742 #ifdef CONFIG_UPROBES
65743 struct uprobe_task *utask;
65744 #endif
65745+
65746+#ifdef CONFIG_GRKERNSEC
65747+ /* grsecurity */
65748+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65749+ u64 exec_id;
65750+#endif
65751+#ifdef CONFIG_GRKERNSEC_SETXID
65752+ const struct cred *delayed_cred;
65753+#endif
65754+ struct dentry *gr_chroot_dentry;
65755+ struct acl_subject_label *acl;
65756+ struct acl_role_label *role;
65757+ struct file *exec_file;
65758+ unsigned long brute_expires;
65759+ u16 acl_role_id;
65760+ /* is this the task that authenticated to the special role */
65761+ u8 acl_sp_role;
65762+ u8 is_writable;
65763+ u8 brute;
65764+ u8 gr_is_chrooted;
65765+#endif
65766+
65767 };
65768
65769+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
65770+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
65771+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
65772+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
65773+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
65774+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
65775+
65776+#ifdef CONFIG_PAX_SOFTMODE
65777+extern int pax_softmode;
65778+#endif
65779+
65780+extern int pax_check_flags(unsigned long *);
65781+
65782+/* if tsk != current then task_lock must be held on it */
65783+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65784+static inline unsigned long pax_get_flags(struct task_struct *tsk)
65785+{
65786+ if (likely(tsk->mm))
65787+ return tsk->mm->pax_flags;
65788+ else
65789+ return 0UL;
65790+}
65791+
65792+/* if tsk != current then task_lock must be held on it */
65793+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
65794+{
65795+ if (likely(tsk->mm)) {
65796+ tsk->mm->pax_flags = flags;
65797+ return 0;
65798+ }
65799+ return -EINVAL;
65800+}
65801+#endif
65802+
65803+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
65804+extern void pax_set_initial_flags(struct linux_binprm *bprm);
65805+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
65806+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
65807+#endif
65808+
65809+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
65810+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
65811+extern void pax_report_refcount_overflow(struct pt_regs *regs);
65812+extern void check_object_size(const void *ptr, unsigned long n, bool to);
65813+
65814 /* Future-safe accessor for struct task_struct's cpus_allowed. */
65815 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
65816
65817@@ -2092,7 +2196,9 @@ void yield(void);
65818 extern struct exec_domain default_exec_domain;
65819
65820 union thread_union {
65821+#ifndef CONFIG_X86
65822 struct thread_info thread_info;
65823+#endif
65824 unsigned long stack[THREAD_SIZE/sizeof(long)];
65825 };
65826
65827@@ -2125,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
65828 */
65829
65830 extern struct task_struct *find_task_by_vpid(pid_t nr);
65831+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
65832 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
65833 struct pid_namespace *ns);
65834
65835@@ -2281,7 +2388,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
65836 extern void exit_itimers(struct signal_struct *);
65837 extern void flush_itimer_signals(void);
65838
65839-extern void do_group_exit(int);
65840+extern __noreturn void do_group_exit(int);
65841
65842 extern void daemonize(const char *, ...);
65843 extern int allow_signal(int);
65844@@ -2485,9 +2592,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
65845
65846 #endif
65847
65848-static inline int object_is_on_stack(void *obj)
65849+static inline int object_starts_on_stack(void *obj)
65850 {
65851- void *stack = task_stack_page(current);
65852+ const void *stack = task_stack_page(current);
65853
65854 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
65855 }
65856diff --git a/include/linux/security.h b/include/linux/security.h
65857index 05e88bd..5cda002 100644
65858--- a/include/linux/security.h
65859+++ b/include/linux/security.h
65860@@ -26,6 +26,7 @@
65861 #include <linux/capability.h>
65862 #include <linux/slab.h>
65863 #include <linux/err.h>
65864+#include <linux/grsecurity.h>
65865
65866 struct linux_binprm;
65867 struct cred;
65868diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
65869index 68a04a3..866e6a1 100644
65870--- a/include/linux/seq_file.h
65871+++ b/include/linux/seq_file.h
65872@@ -26,6 +26,9 @@ struct seq_file {
65873 struct mutex lock;
65874 const struct seq_operations *op;
65875 int poll_event;
65876+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65877+ u64 exec_id;
65878+#endif
65879 #ifdef CONFIG_USER_NS
65880 struct user_namespace *user_ns;
65881 #endif
65882@@ -38,6 +41,7 @@ struct seq_operations {
65883 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
65884 int (*show) (struct seq_file *m, void *v);
65885 };
65886+typedef struct seq_operations __no_const seq_operations_no_const;
65887
65888 #define SEQ_SKIP 1
65889
65890diff --git a/include/linux/shm.h b/include/linux/shm.h
65891index bcf8a6a..4d0af77 100644
65892--- a/include/linux/shm.h
65893+++ b/include/linux/shm.h
65894@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
65895
65896 /* The task created the shm object. NULL if the task is dead. */
65897 struct task_struct *shm_creator;
65898+#ifdef CONFIG_GRKERNSEC
65899+ time_t shm_createtime;
65900+ pid_t shm_lapid;
65901+#endif
65902 };
65903
65904 /* shm_mode upper byte flags */
65905diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
65906index 6a2c34e..a1f320f 100644
65907--- a/include/linux/skbuff.h
65908+++ b/include/linux/skbuff.h
65909@@ -577,7 +577,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
65910 extern struct sk_buff *__alloc_skb(unsigned int size,
65911 gfp_t priority, int flags, int node);
65912 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
65913-static inline struct sk_buff *alloc_skb(unsigned int size,
65914+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
65915 gfp_t priority)
65916 {
65917 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
65918@@ -687,7 +687,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
65919 */
65920 static inline int skb_queue_empty(const struct sk_buff_head *list)
65921 {
65922- return list->next == (struct sk_buff *)list;
65923+ return list->next == (const struct sk_buff *)list;
65924 }
65925
65926 /**
65927@@ -700,7 +700,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
65928 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
65929 const struct sk_buff *skb)
65930 {
65931- return skb->next == (struct sk_buff *)list;
65932+ return skb->next == (const struct sk_buff *)list;
65933 }
65934
65935 /**
65936@@ -713,7 +713,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
65937 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
65938 const struct sk_buff *skb)
65939 {
65940- return skb->prev == (struct sk_buff *)list;
65941+ return skb->prev == (const struct sk_buff *)list;
65942 }
65943
65944 /**
65945@@ -1626,7 +1626,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
65946 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
65947 */
65948 #ifndef NET_SKB_PAD
65949-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
65950+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
65951 #endif
65952
65953 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
65954@@ -2204,7 +2204,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
65955 int noblock, int *err);
65956 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
65957 struct poll_table_struct *wait);
65958-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
65959+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
65960 int offset, struct iovec *to,
65961 int size);
65962 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
65963diff --git a/include/linux/slab.h b/include/linux/slab.h
65964index 83d1a14..209e1a6 100644
65965--- a/include/linux/slab.h
65966+++ b/include/linux/slab.h
65967@@ -11,12 +11,20 @@
65968
65969 #include <linux/gfp.h>
65970 #include <linux/types.h>
65971+#include <linux/err.h>
65972
65973 /*
65974 * Flags to pass to kmem_cache_create().
65975 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
65976 */
65977 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
65978+
65979+#ifdef CONFIG_PAX_USERCOPY_SLABS
65980+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
65981+#else
65982+#define SLAB_USERCOPY 0x00000000UL
65983+#endif
65984+
65985 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
65986 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
65987 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
65988@@ -87,10 +95,13 @@
65989 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
65990 * Both make kfree a no-op.
65991 */
65992-#define ZERO_SIZE_PTR ((void *)16)
65993+#define ZERO_SIZE_PTR \
65994+({ \
65995+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
65996+ (void *)(-MAX_ERRNO-1L); \
65997+})
65998
65999-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
66000- (unsigned long)ZERO_SIZE_PTR)
66001+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
66002
66003 /*
66004 * Common fields provided in kmem_cache by all slab allocators
66005@@ -110,7 +121,7 @@ struct kmem_cache {
66006 unsigned int align; /* Alignment as calculated */
66007 unsigned long flags; /* Active flags on the slab */
66008 const char *name; /* Slab name for sysfs */
66009- int refcount; /* Use counter */
66010+ atomic_t refcount; /* Use counter */
66011 void (*ctor)(void *); /* Called on object slot creation */
66012 struct list_head list; /* List of all slab caches on the system */
66013 };
66014@@ -185,6 +196,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
66015 void kfree(const void *);
66016 void kzfree(const void *);
66017 size_t ksize(const void *);
66018+const char *check_heap_object(const void *ptr, unsigned long n);
66019+bool is_usercopy_object(const void *ptr);
66020
66021 /*
66022 * Allocator specific definitions. These are mainly used to establish optimized
66023@@ -323,7 +336,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
66024 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
66025 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
66026 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
66027-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
66028+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
66029 #define kmalloc_track_caller(size, flags) \
66030 __kmalloc_track_caller(size, flags, _RET_IP_)
66031 #else
66032@@ -343,7 +356,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
66033 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
66034 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
66035 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
66036-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
66037+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
66038 #define kmalloc_node_track_caller(size, flags, node) \
66039 __kmalloc_node_track_caller(size, flags, node, \
66040 _RET_IP_)
66041diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
66042index cc290f0..0ba60931 100644
66043--- a/include/linux/slab_def.h
66044+++ b/include/linux/slab_def.h
66045@@ -52,7 +52,7 @@ struct kmem_cache {
66046 /* 4) cache creation/removal */
66047 const char *name;
66048 struct list_head list;
66049- int refcount;
66050+ atomic_t refcount;
66051 int object_size;
66052 int align;
66053
66054@@ -68,10 +68,10 @@ struct kmem_cache {
66055 unsigned long node_allocs;
66056 unsigned long node_frees;
66057 unsigned long node_overflow;
66058- atomic_t allochit;
66059- atomic_t allocmiss;
66060- atomic_t freehit;
66061- atomic_t freemiss;
66062+ atomic_unchecked_t allochit;
66063+ atomic_unchecked_t allocmiss;
66064+ atomic_unchecked_t freehit;
66065+ atomic_unchecked_t freemiss;
66066
66067 /*
66068 * If debugging is enabled, then the allocator can add additional
66069@@ -104,11 +104,16 @@ struct cache_sizes {
66070 #ifdef CONFIG_ZONE_DMA
66071 struct kmem_cache *cs_dmacachep;
66072 #endif
66073+
66074+#ifdef CONFIG_PAX_USERCOPY_SLABS
66075+ struct kmem_cache *cs_usercopycachep;
66076+#endif
66077+
66078 };
66079 extern struct cache_sizes malloc_sizes[];
66080
66081 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
66082-void *__kmalloc(size_t size, gfp_t flags);
66083+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
66084
66085 #ifdef CONFIG_TRACING
66086 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
66087@@ -145,6 +150,13 @@ found:
66088 cachep = malloc_sizes[i].cs_dmacachep;
66089 else
66090 #endif
66091+
66092+#ifdef CONFIG_PAX_USERCOPY_SLABS
66093+ if (flags & GFP_USERCOPY)
66094+ cachep = malloc_sizes[i].cs_usercopycachep;
66095+ else
66096+#endif
66097+
66098 cachep = malloc_sizes[i].cs_cachep;
66099
66100 ret = kmem_cache_alloc_trace(cachep, flags, size);
66101@@ -155,7 +167,7 @@ found:
66102 }
66103
66104 #ifdef CONFIG_NUMA
66105-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
66106+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66107 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
66108
66109 #ifdef CONFIG_TRACING
66110@@ -198,6 +210,13 @@ found:
66111 cachep = malloc_sizes[i].cs_dmacachep;
66112 else
66113 #endif
66114+
66115+#ifdef CONFIG_PAX_USERCOPY_SLABS
66116+ if (flags & GFP_USERCOPY)
66117+ cachep = malloc_sizes[i].cs_usercopycachep;
66118+ else
66119+#endif
66120+
66121 cachep = malloc_sizes[i].cs_cachep;
66122
66123 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
66124diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
66125index f28e14a..7831211 100644
66126--- a/include/linux/slob_def.h
66127+++ b/include/linux/slob_def.h
66128@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
66129 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
66130 }
66131
66132-void *__kmalloc_node(size_t size, gfp_t flags, int node);
66133+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66134
66135 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
66136 {
66137@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
66138 return __kmalloc_node(size, flags, NUMA_NO_NODE);
66139 }
66140
66141-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
66142+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
66143 {
66144 return kmalloc(size, flags);
66145 }
66146diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
66147index df448ad..b99e7f6 100644
66148--- a/include/linux/slub_def.h
66149+++ b/include/linux/slub_def.h
66150@@ -91,7 +91,7 @@ struct kmem_cache {
66151 struct kmem_cache_order_objects max;
66152 struct kmem_cache_order_objects min;
66153 gfp_t allocflags; /* gfp flags to use on each alloc */
66154- int refcount; /* Refcount for slab cache destroy */
66155+ atomic_t refcount; /* Refcount for slab cache destroy */
66156 void (*ctor)(void *);
66157 int inuse; /* Offset to metadata */
66158 int align; /* Alignment */
66159@@ -152,7 +152,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
66160 * Sorry that the following has to be that ugly but some versions of GCC
66161 * have trouble with constant propagation and loops.
66162 */
66163-static __always_inline int kmalloc_index(size_t size)
66164+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
66165 {
66166 if (!size)
66167 return 0;
66168@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
66169 }
66170
66171 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
66172-void *__kmalloc(size_t size, gfp_t flags);
66173+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
66174
66175 static __always_inline void *
66176 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
66177@@ -258,7 +258,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
66178 }
66179 #endif
66180
66181-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
66182+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
66183 {
66184 unsigned int order = get_order(size);
66185 return kmalloc_order_trace(size, flags, order);
66186@@ -283,7 +283,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
66187 }
66188
66189 #ifdef CONFIG_NUMA
66190-void *__kmalloc_node(size_t size, gfp_t flags, int node);
66191+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66192 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
66193
66194 #ifdef CONFIG_TRACING
66195diff --git a/include/linux/sonet.h b/include/linux/sonet.h
66196index 680f9a3..f13aeb0 100644
66197--- a/include/linux/sonet.h
66198+++ b/include/linux/sonet.h
66199@@ -7,7 +7,7 @@
66200 #include <uapi/linux/sonet.h>
66201
66202 struct k_sonet_stats {
66203-#define __HANDLE_ITEM(i) atomic_t i
66204+#define __HANDLE_ITEM(i) atomic_unchecked_t i
66205 __SONET_ITEMS
66206 #undef __HANDLE_ITEM
66207 };
66208diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
66209index 34206b8..f019e06 100644
66210--- a/include/linux/sunrpc/clnt.h
66211+++ b/include/linux/sunrpc/clnt.h
66212@@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
66213 {
66214 switch (sap->sa_family) {
66215 case AF_INET:
66216- return ntohs(((struct sockaddr_in *)sap)->sin_port);
66217+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
66218 case AF_INET6:
66219- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
66220+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
66221 }
66222 return 0;
66223 }
66224@@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
66225 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
66226 const struct sockaddr *src)
66227 {
66228- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
66229+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
66230 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
66231
66232 dsin->sin_family = ssin->sin_family;
66233@@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
66234 if (sa->sa_family != AF_INET6)
66235 return 0;
66236
66237- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
66238+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
66239 }
66240
66241 #endif /* __KERNEL__ */
66242diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
66243index 0b8e3e6..33e0a01 100644
66244--- a/include/linux/sunrpc/svc_rdma.h
66245+++ b/include/linux/sunrpc/svc_rdma.h
66246@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
66247 extern unsigned int svcrdma_max_requests;
66248 extern unsigned int svcrdma_max_req_size;
66249
66250-extern atomic_t rdma_stat_recv;
66251-extern atomic_t rdma_stat_read;
66252-extern atomic_t rdma_stat_write;
66253-extern atomic_t rdma_stat_sq_starve;
66254-extern atomic_t rdma_stat_rq_starve;
66255-extern atomic_t rdma_stat_rq_poll;
66256-extern atomic_t rdma_stat_rq_prod;
66257-extern atomic_t rdma_stat_sq_poll;
66258-extern atomic_t rdma_stat_sq_prod;
66259+extern atomic_unchecked_t rdma_stat_recv;
66260+extern atomic_unchecked_t rdma_stat_read;
66261+extern atomic_unchecked_t rdma_stat_write;
66262+extern atomic_unchecked_t rdma_stat_sq_starve;
66263+extern atomic_unchecked_t rdma_stat_rq_starve;
66264+extern atomic_unchecked_t rdma_stat_rq_poll;
66265+extern atomic_unchecked_t rdma_stat_rq_prod;
66266+extern atomic_unchecked_t rdma_stat_sq_poll;
66267+extern atomic_unchecked_t rdma_stat_sq_prod;
66268
66269 #define RPCRDMA_VERSION 1
66270
66271diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
66272index cd844a6..3ca3592 100644
66273--- a/include/linux/sysctl.h
66274+++ b/include/linux/sysctl.h
66275@@ -41,6 +41,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
66276
66277 extern int proc_dostring(struct ctl_table *, int,
66278 void __user *, size_t *, loff_t *);
66279+extern int proc_dostring_modpriv(struct ctl_table *, int,
66280+ void __user *, size_t *, loff_t *);
66281 extern int proc_dointvec(struct ctl_table *, int,
66282 void __user *, size_t *, loff_t *);
66283 extern int proc_dointvec_minmax(struct ctl_table *, int,
66284diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
66285index 7faf933..eb6f5e3 100644
66286--- a/include/linux/sysrq.h
66287+++ b/include/linux/sysrq.h
66288@@ -36,7 +36,7 @@ struct sysrq_key_op {
66289 char *help_msg;
66290 char *action_msg;
66291 int enable_mask;
66292-};
66293+} __do_const;
66294
66295 #ifdef CONFIG_MAGIC_SYSRQ
66296
66297diff --git a/include/linux/tty.h b/include/linux/tty.h
66298index f0b4eb4..1c4854e 100644
66299--- a/include/linux/tty.h
66300+++ b/include/linux/tty.h
66301@@ -192,7 +192,7 @@ struct tty_port {
66302 const struct tty_port_operations *ops; /* Port operations */
66303 spinlock_t lock; /* Lock protecting tty field */
66304 int blocked_open; /* Waiting to open */
66305- int count; /* Usage count */
66306+ atomic_t count; /* Usage count */
66307 wait_queue_head_t open_wait; /* Open waiters */
66308 wait_queue_head_t close_wait; /* Close waiters */
66309 wait_queue_head_t delta_msr_wait; /* Modem status change */
66310@@ -513,7 +513,7 @@ extern int tty_port_open(struct tty_port *port,
66311 struct tty_struct *tty, struct file *filp);
66312 static inline int tty_port_users(struct tty_port *port)
66313 {
66314- return port->count + port->blocked_open;
66315+ return atomic_read(&port->count) + port->blocked_open;
66316 }
66317
66318 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
66319diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
66320index dd976cf..e272742 100644
66321--- a/include/linux/tty_driver.h
66322+++ b/include/linux/tty_driver.h
66323@@ -284,7 +284,7 @@ struct tty_operations {
66324 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
66325 #endif
66326 const struct file_operations *proc_fops;
66327-};
66328+} __do_const;
66329
66330 struct tty_driver {
66331 int magic; /* magic number for this structure */
66332diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
66333index fb79dd8d..07d4773 100644
66334--- a/include/linux/tty_ldisc.h
66335+++ b/include/linux/tty_ldisc.h
66336@@ -149,7 +149,7 @@ struct tty_ldisc_ops {
66337
66338 struct module *owner;
66339
66340- int refcount;
66341+ atomic_t refcount;
66342 };
66343
66344 struct tty_ldisc {
66345diff --git a/include/linux/types.h b/include/linux/types.h
66346index 1cc0e4b..0d50edf 100644
66347--- a/include/linux/types.h
66348+++ b/include/linux/types.h
66349@@ -175,10 +175,26 @@ typedef struct {
66350 int counter;
66351 } atomic_t;
66352
66353+#ifdef CONFIG_PAX_REFCOUNT
66354+typedef struct {
66355+ int counter;
66356+} atomic_unchecked_t;
66357+#else
66358+typedef atomic_t atomic_unchecked_t;
66359+#endif
66360+
66361 #ifdef CONFIG_64BIT
66362 typedef struct {
66363 long counter;
66364 } atomic64_t;
66365+
66366+#ifdef CONFIG_PAX_REFCOUNT
66367+typedef struct {
66368+ long counter;
66369+} atomic64_unchecked_t;
66370+#else
66371+typedef atomic64_t atomic64_unchecked_t;
66372+#endif
66373 #endif
66374
66375 struct list_head {
66376diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
66377index 5ca0951..ab496a5 100644
66378--- a/include/linux/uaccess.h
66379+++ b/include/linux/uaccess.h
66380@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
66381 long ret; \
66382 mm_segment_t old_fs = get_fs(); \
66383 \
66384- set_fs(KERNEL_DS); \
66385 pagefault_disable(); \
66386- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
66387- pagefault_enable(); \
66388+ set_fs(KERNEL_DS); \
66389+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
66390 set_fs(old_fs); \
66391+ pagefault_enable(); \
66392 ret; \
66393 })
66394
66395diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
66396index 99c1b4d..bb94261 100644
66397--- a/include/linux/unaligned/access_ok.h
66398+++ b/include/linux/unaligned/access_ok.h
66399@@ -6,32 +6,32 @@
66400
66401 static inline u16 get_unaligned_le16(const void *p)
66402 {
66403- return le16_to_cpup((__le16 *)p);
66404+ return le16_to_cpup((const __le16 *)p);
66405 }
66406
66407 static inline u32 get_unaligned_le32(const void *p)
66408 {
66409- return le32_to_cpup((__le32 *)p);
66410+ return le32_to_cpup((const __le32 *)p);
66411 }
66412
66413 static inline u64 get_unaligned_le64(const void *p)
66414 {
66415- return le64_to_cpup((__le64 *)p);
66416+ return le64_to_cpup((const __le64 *)p);
66417 }
66418
66419 static inline u16 get_unaligned_be16(const void *p)
66420 {
66421- return be16_to_cpup((__be16 *)p);
66422+ return be16_to_cpup((const __be16 *)p);
66423 }
66424
66425 static inline u32 get_unaligned_be32(const void *p)
66426 {
66427- return be32_to_cpup((__be32 *)p);
66428+ return be32_to_cpup((const __be32 *)p);
66429 }
66430
66431 static inline u64 get_unaligned_be64(const void *p)
66432 {
66433- return be64_to_cpup((__be64 *)p);
66434+ return be64_to_cpup((const __be64 *)p);
66435 }
66436
66437 static inline void put_unaligned_le16(u16 val, void *p)
66438diff --git a/include/linux/usb.h b/include/linux/usb.h
66439index 10278d1..e21ec3c 100644
66440--- a/include/linux/usb.h
66441+++ b/include/linux/usb.h
66442@@ -551,7 +551,7 @@ struct usb_device {
66443 int maxchild;
66444
66445 u32 quirks;
66446- atomic_t urbnum;
66447+ atomic_unchecked_t urbnum;
66448
66449 unsigned long active_duration;
66450
66451diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
66452index c5d36c6..108f4f9 100644
66453--- a/include/linux/usb/renesas_usbhs.h
66454+++ b/include/linux/usb/renesas_usbhs.h
66455@@ -39,7 +39,7 @@ enum {
66456 */
66457 struct renesas_usbhs_driver_callback {
66458 int (*notify_hotplug)(struct platform_device *pdev);
66459-};
66460+} __no_const;
66461
66462 /*
66463 * callback functions for platform
66464diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
66465index 6f8fbcf..8259001 100644
66466--- a/include/linux/vermagic.h
66467+++ b/include/linux/vermagic.h
66468@@ -25,9 +25,35 @@
66469 #define MODULE_ARCH_VERMAGIC ""
66470 #endif
66471
66472+#ifdef CONFIG_PAX_REFCOUNT
66473+#define MODULE_PAX_REFCOUNT "REFCOUNT "
66474+#else
66475+#define MODULE_PAX_REFCOUNT ""
66476+#endif
66477+
66478+#ifdef CONSTIFY_PLUGIN
66479+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
66480+#else
66481+#define MODULE_CONSTIFY_PLUGIN ""
66482+#endif
66483+
66484+#ifdef STACKLEAK_PLUGIN
66485+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
66486+#else
66487+#define MODULE_STACKLEAK_PLUGIN ""
66488+#endif
66489+
66490+#ifdef CONFIG_GRKERNSEC
66491+#define MODULE_GRSEC "GRSEC "
66492+#else
66493+#define MODULE_GRSEC ""
66494+#endif
66495+
66496 #define VERMAGIC_STRING \
66497 UTS_RELEASE " " \
66498 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
66499 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
66500- MODULE_ARCH_VERMAGIC
66501+ MODULE_ARCH_VERMAGIC \
66502+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
66503+ MODULE_GRSEC
66504
66505diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
66506index 6071e91..ca6a489 100644
66507--- a/include/linux/vmalloc.h
66508+++ b/include/linux/vmalloc.h
66509@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
66510 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
66511 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
66512 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
66513+
66514+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66515+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
66516+#endif
66517+
66518 /* bits [20..32] reserved for arch specific ioremap internals */
66519
66520 /*
66521@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
66522 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
66523 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
66524 unsigned long start, unsigned long end, gfp_t gfp_mask,
66525- pgprot_t prot, int node, const void *caller);
66526+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
66527 extern void vfree(const void *addr);
66528
66529 extern void *vmap(struct page **pages, unsigned int count,
66530@@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
66531 extern void free_vm_area(struct vm_struct *area);
66532
66533 /* for /dev/kmem */
66534-extern long vread(char *buf, char *addr, unsigned long count);
66535-extern long vwrite(char *buf, char *addr, unsigned long count);
66536+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
66537+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
66538
66539 /*
66540 * Internals. Dont't use..
66541diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
66542index 92a86b2..1d9eb3c 100644
66543--- a/include/linux/vmstat.h
66544+++ b/include/linux/vmstat.h
66545@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
66546 /*
66547 * Zone based page accounting with per cpu differentials.
66548 */
66549-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66550+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66551
66552 static inline void zone_page_state_add(long x, struct zone *zone,
66553 enum zone_stat_item item)
66554 {
66555- atomic_long_add(x, &zone->vm_stat[item]);
66556- atomic_long_add(x, &vm_stat[item]);
66557+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
66558+ atomic_long_add_unchecked(x, &vm_stat[item]);
66559 }
66560
66561 static inline unsigned long global_page_state(enum zone_stat_item item)
66562 {
66563- long x = atomic_long_read(&vm_stat[item]);
66564+ long x = atomic_long_read_unchecked(&vm_stat[item]);
66565 #ifdef CONFIG_SMP
66566 if (x < 0)
66567 x = 0;
66568@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
66569 static inline unsigned long zone_page_state(struct zone *zone,
66570 enum zone_stat_item item)
66571 {
66572- long x = atomic_long_read(&zone->vm_stat[item]);
66573+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
66574 #ifdef CONFIG_SMP
66575 if (x < 0)
66576 x = 0;
66577@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
66578 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
66579 enum zone_stat_item item)
66580 {
66581- long x = atomic_long_read(&zone->vm_stat[item]);
66582+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
66583
66584 #ifdef CONFIG_SMP
66585 int cpu;
66586@@ -218,8 +218,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
66587
66588 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
66589 {
66590- atomic_long_inc(&zone->vm_stat[item]);
66591- atomic_long_inc(&vm_stat[item]);
66592+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
66593+ atomic_long_inc_unchecked(&vm_stat[item]);
66594 }
66595
66596 static inline void __inc_zone_page_state(struct page *page,
66597@@ -230,8 +230,8 @@ static inline void __inc_zone_page_state(struct page *page,
66598
66599 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
66600 {
66601- atomic_long_dec(&zone->vm_stat[item]);
66602- atomic_long_dec(&vm_stat[item]);
66603+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
66604+ atomic_long_dec_unchecked(&vm_stat[item]);
66605 }
66606
66607 static inline void __dec_zone_page_state(struct page *page,
66608diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
66609index 95d1c91..6798cca 100644
66610--- a/include/media/v4l2-dev.h
66611+++ b/include/media/v4l2-dev.h
66612@@ -76,7 +76,7 @@ struct v4l2_file_operations {
66613 int (*mmap) (struct file *, struct vm_area_struct *);
66614 int (*open) (struct file *);
66615 int (*release) (struct file *);
66616-};
66617+} __do_const;
66618
66619 /*
66620 * Newer version of video_device, handled by videodev2.c
66621diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
66622index e48b571..7e40de4 100644
66623--- a/include/media/v4l2-ioctl.h
66624+++ b/include/media/v4l2-ioctl.h
66625@@ -282,7 +282,6 @@ struct v4l2_ioctl_ops {
66626 bool valid_prio, int cmd, void *arg);
66627 };
66628
66629-
66630 /* v4l debugging and diagnostics */
66631
66632 /* Debug bitmask flags to be used on V4L2 */
66633diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
66634index 9e5425b..8136ffc 100644
66635--- a/include/net/caif/cfctrl.h
66636+++ b/include/net/caif/cfctrl.h
66637@@ -52,7 +52,7 @@ struct cfctrl_rsp {
66638 void (*radioset_rsp)(void);
66639 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
66640 struct cflayer *client_layer);
66641-};
66642+} __no_const;
66643
66644 /* Link Setup Parameters for CAIF-Links. */
66645 struct cfctrl_link_param {
66646@@ -101,8 +101,8 @@ struct cfctrl_request_info {
66647 struct cfctrl {
66648 struct cfsrvl serv;
66649 struct cfctrl_rsp res;
66650- atomic_t req_seq_no;
66651- atomic_t rsp_seq_no;
66652+ atomic_unchecked_t req_seq_no;
66653+ atomic_unchecked_t rsp_seq_no;
66654 struct list_head list;
66655 /* Protects from simultaneous access to first_req list */
66656 spinlock_t info_list_lock;
66657diff --git a/include/net/flow.h b/include/net/flow.h
66658index 628e11b..4c475df 100644
66659--- a/include/net/flow.h
66660+++ b/include/net/flow.h
66661@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
66662
66663 extern void flow_cache_flush(void);
66664 extern void flow_cache_flush_deferred(void);
66665-extern atomic_t flow_cache_genid;
66666+extern atomic_unchecked_t flow_cache_genid;
66667
66668 #endif
66669diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
66670index e5062c9..e709988 100644
66671--- a/include/net/gro_cells.h
66672+++ b/include/net/gro_cells.h
66673@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
66674 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
66675
66676 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
66677- atomic_long_inc(&dev->rx_dropped);
66678+ atomic_long_inc_unchecked(&dev->rx_dropped);
66679 kfree_skb(skb);
66680 return;
66681 }
66682diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
66683index 1832927..ce39aea 100644
66684--- a/include/net/inet_connection_sock.h
66685+++ b/include/net/inet_connection_sock.h
66686@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
66687 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
66688 int (*bind_conflict)(const struct sock *sk,
66689 const struct inet_bind_bucket *tb, bool relax);
66690-};
66691+} __do_const;
66692
66693 /** inet_connection_sock - INET connection oriented sock
66694 *
66695diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
66696index 53f464d..ba76aaa 100644
66697--- a/include/net/inetpeer.h
66698+++ b/include/net/inetpeer.h
66699@@ -47,8 +47,8 @@ struct inet_peer {
66700 */
66701 union {
66702 struct {
66703- atomic_t rid; /* Frag reception counter */
66704- atomic_t ip_id_count; /* IP ID for the next packet */
66705+ atomic_unchecked_t rid; /* Frag reception counter */
66706+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
66707 };
66708 struct rcu_head rcu;
66709 struct inet_peer *gc_next;
66710@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
66711 more++;
66712 inet_peer_refcheck(p);
66713 do {
66714- old = atomic_read(&p->ip_id_count);
66715+ old = atomic_read_unchecked(&p->ip_id_count);
66716 new = old + more;
66717 if (!new)
66718 new = 1;
66719- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
66720+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
66721 return new;
66722 }
66723
66724diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
66725index 9497be1..5a4fafe 100644
66726--- a/include/net/ip_fib.h
66727+++ b/include/net/ip_fib.h
66728@@ -169,7 +169,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
66729
66730 #define FIB_RES_SADDR(net, res) \
66731 ((FIB_RES_NH(res).nh_saddr_genid == \
66732- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
66733+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
66734 FIB_RES_NH(res).nh_saddr : \
66735 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
66736 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
66737diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
66738index ee75ccd..2cc2b95 100644
66739--- a/include/net/ip_vs.h
66740+++ b/include/net/ip_vs.h
66741@@ -510,7 +510,7 @@ struct ip_vs_conn {
66742 struct ip_vs_conn *control; /* Master control connection */
66743 atomic_t n_control; /* Number of controlled ones */
66744 struct ip_vs_dest *dest; /* real server */
66745- atomic_t in_pkts; /* incoming packet counter */
66746+ atomic_unchecked_t in_pkts; /* incoming packet counter */
66747
66748 /* packet transmitter for different forwarding methods. If it
66749 mangles the packet, it must return NF_DROP or better NF_STOLEN,
66750@@ -648,7 +648,7 @@ struct ip_vs_dest {
66751 __be16 port; /* port number of the server */
66752 union nf_inet_addr addr; /* IP address of the server */
66753 volatile unsigned int flags; /* dest status flags */
66754- atomic_t conn_flags; /* flags to copy to conn */
66755+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
66756 atomic_t weight; /* server weight */
66757
66758 atomic_t refcnt; /* reference counter */
66759diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
66760index 80ffde3..968b0f4 100644
66761--- a/include/net/irda/ircomm_tty.h
66762+++ b/include/net/irda/ircomm_tty.h
66763@@ -35,6 +35,7 @@
66764 #include <linux/termios.h>
66765 #include <linux/timer.h>
66766 #include <linux/tty.h> /* struct tty_struct */
66767+#include <asm/local.h>
66768
66769 #include <net/irda/irias_object.h>
66770 #include <net/irda/ircomm_core.h>
66771diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
66772index cc7c197..9f2da2a 100644
66773--- a/include/net/iucv/af_iucv.h
66774+++ b/include/net/iucv/af_iucv.h
66775@@ -141,7 +141,7 @@ struct iucv_sock {
66776 struct iucv_sock_list {
66777 struct hlist_head head;
66778 rwlock_t lock;
66779- atomic_t autobind_name;
66780+ atomic_unchecked_t autobind_name;
66781 };
66782
66783 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
66784diff --git a/include/net/neighbour.h b/include/net/neighbour.h
66785index 0dab173..1b76af0 100644
66786--- a/include/net/neighbour.h
66787+++ b/include/net/neighbour.h
66788@@ -123,7 +123,7 @@ struct neigh_ops {
66789 void (*error_report)(struct neighbour *, struct sk_buff *);
66790 int (*output)(struct neighbour *, struct sk_buff *);
66791 int (*connected_output)(struct neighbour *, struct sk_buff *);
66792-};
66793+} __do_const;
66794
66795 struct pneigh_entry {
66796 struct pneigh_entry *next;
66797diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
66798index 95e6466..251016d 100644
66799--- a/include/net/net_namespace.h
66800+++ b/include/net/net_namespace.h
66801@@ -110,7 +110,7 @@ struct net {
66802 #endif
66803 struct netns_ipvs *ipvs;
66804 struct sock *diag_nlsk;
66805- atomic_t rt_genid;
66806+ atomic_unchecked_t rt_genid;
66807 };
66808
66809 /*
66810@@ -320,12 +320,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
66811
66812 static inline int rt_genid(struct net *net)
66813 {
66814- return atomic_read(&net->rt_genid);
66815+ return atomic_read_unchecked(&net->rt_genid);
66816 }
66817
66818 static inline void rt_genid_bump(struct net *net)
66819 {
66820- atomic_inc(&net->rt_genid);
66821+ atomic_inc_unchecked(&net->rt_genid);
66822 }
66823
66824 #endif /* __NET_NET_NAMESPACE_H */
66825diff --git a/include/net/netdma.h b/include/net/netdma.h
66826index 8ba8ce2..99b7fff 100644
66827--- a/include/net/netdma.h
66828+++ b/include/net/netdma.h
66829@@ -24,7 +24,7 @@
66830 #include <linux/dmaengine.h>
66831 #include <linux/skbuff.h>
66832
66833-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
66834+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
66835 struct sk_buff *skb, int offset, struct iovec *to,
66836 size_t len, struct dma_pinned_list *pinned_list);
66837
66838diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
66839index 252fd10..aa1421f 100644
66840--- a/include/net/netfilter/nf_queue.h
66841+++ b/include/net/netfilter/nf_queue.h
66842@@ -22,7 +22,7 @@ struct nf_queue_handler {
66843 int (*outfn)(struct nf_queue_entry *entry,
66844 unsigned int queuenum);
66845 char *name;
66846-};
66847+} __do_const;
66848
66849 extern int nf_register_queue_handler(u_int8_t pf,
66850 const struct nf_queue_handler *qh);
66851diff --git a/include/net/netlink.h b/include/net/netlink.h
66852index 9690b0f..87aded7 100644
66853--- a/include/net/netlink.h
66854+++ b/include/net/netlink.h
66855@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
66856 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
66857 {
66858 if (mark)
66859- skb_trim(skb, (unsigned char *) mark - skb->data);
66860+ skb_trim(skb, (const unsigned char *) mark - skb->data);
66861 }
66862
66863 /**
66864diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
66865index 2ae2b83..dbdc85e 100644
66866--- a/include/net/netns/ipv4.h
66867+++ b/include/net/netns/ipv4.h
66868@@ -64,7 +64,7 @@ struct netns_ipv4 {
66869 kgid_t sysctl_ping_group_range[2];
66870 long sysctl_tcp_mem[3];
66871
66872- atomic_t dev_addr_genid;
66873+ atomic_unchecked_t dev_addr_genid;
66874
66875 #ifdef CONFIG_IP_MROUTE
66876 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
66877diff --git a/include/net/protocol.h b/include/net/protocol.h
66878index 929528c..c84d4f6 100644
66879--- a/include/net/protocol.h
66880+++ b/include/net/protocol.h
66881@@ -48,7 +48,7 @@ struct net_protocol {
66882 int (*gro_complete)(struct sk_buff *skb);
66883 unsigned int no_policy:1,
66884 netns_ok:1;
66885-};
66886+} __do_const;
66887
66888 #if IS_ENABLED(CONFIG_IPV6)
66889 struct inet6_protocol {
66890@@ -69,7 +69,7 @@ struct inet6_protocol {
66891 int (*gro_complete)(struct sk_buff *skb);
66892
66893 unsigned int flags; /* INET6_PROTO_xxx */
66894-};
66895+} __do_const;
66896
66897 #define INET6_PROTO_NOPOLICY 0x1
66898 #define INET6_PROTO_FINAL 0x2
66899diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
66900index 9c6414f..fbd0524 100644
66901--- a/include/net/sctp/sctp.h
66902+++ b/include/net/sctp/sctp.h
66903@@ -318,9 +318,9 @@ do { \
66904
66905 #else /* SCTP_DEBUG */
66906
66907-#define SCTP_DEBUG_PRINTK(whatever...)
66908-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
66909-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
66910+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
66911+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
66912+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
66913 #define SCTP_ENABLE_DEBUG
66914 #define SCTP_DISABLE_DEBUG
66915 #define SCTP_ASSERT(expr, str, func)
66916diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
66917index 64158aa..b65533c 100644
66918--- a/include/net/sctp/structs.h
66919+++ b/include/net/sctp/structs.h
66920@@ -496,7 +496,7 @@ struct sctp_af {
66921 int sockaddr_len;
66922 sa_family_t sa_family;
66923 struct list_head list;
66924-};
66925+} __do_const;
66926
66927 struct sctp_af *sctp_get_af_specific(sa_family_t);
66928 int sctp_register_af(struct sctp_af *);
66929@@ -516,7 +516,7 @@ struct sctp_pf {
66930 struct sctp_association *asoc);
66931 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
66932 struct sctp_af *af;
66933-};
66934+} __do_const;
66935
66936
66937 /* Structure to track chunk fragments that have been acked, but peer
66938diff --git a/include/net/sock.h b/include/net/sock.h
66939index c945fba..e162e56 100644
66940--- a/include/net/sock.h
66941+++ b/include/net/sock.h
66942@@ -304,7 +304,7 @@ struct sock {
66943 #ifdef CONFIG_RPS
66944 __u32 sk_rxhash;
66945 #endif
66946- atomic_t sk_drops;
66947+ atomic_unchecked_t sk_drops;
66948 int sk_rcvbuf;
66949
66950 struct sk_filter __rcu *sk_filter;
66951@@ -1763,7 +1763,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
66952 }
66953
66954 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
66955- char __user *from, char *to,
66956+ char __user *from, unsigned char *to,
66957 int copy, int offset)
66958 {
66959 if (skb->ip_summed == CHECKSUM_NONE) {
66960@@ -2022,7 +2022,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
66961 }
66962 }
66963
66964-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
66965+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
66966
66967 /**
66968 * sk_page_frag - return an appropriate page_frag
66969diff --git a/include/net/tcp.h b/include/net/tcp.h
66970index 4af45e3..af97861 100644
66971--- a/include/net/tcp.h
66972+++ b/include/net/tcp.h
66973@@ -531,7 +531,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
66974 extern void tcp_xmit_retransmit_queue(struct sock *);
66975 extern void tcp_simple_retransmit(struct sock *);
66976 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
66977-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
66978+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
66979
66980 extern void tcp_send_probe0(struct sock *);
66981 extern void tcp_send_partial(struct sock *);
66982@@ -702,8 +702,8 @@ struct tcp_skb_cb {
66983 struct inet6_skb_parm h6;
66984 #endif
66985 } header; /* For incoming frames */
66986- __u32 seq; /* Starting sequence number */
66987- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
66988+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
66989+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
66990 __u32 when; /* used to compute rtt's */
66991 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
66992
66993@@ -717,7 +717,7 @@ struct tcp_skb_cb {
66994
66995 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
66996 /* 1 byte hole */
66997- __u32 ack_seq; /* Sequence number ACK'd */
66998+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
66999 };
67000
67001 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
67002diff --git a/include/net/xfrm.h b/include/net/xfrm.h
67003index 63445ed..74ef61d 100644
67004--- a/include/net/xfrm.h
67005+++ b/include/net/xfrm.h
67006@@ -423,7 +423,7 @@ struct xfrm_mode {
67007 struct module *owner;
67008 unsigned int encap;
67009 int flags;
67010-};
67011+} __do_const;
67012
67013 /* Flags for xfrm_mode. */
67014 enum {
67015@@ -514,7 +514,7 @@ struct xfrm_policy {
67016 struct timer_list timer;
67017
67018 struct flow_cache_object flo;
67019- atomic_t genid;
67020+ atomic_unchecked_t genid;
67021 u32 priority;
67022 u32 index;
67023 struct xfrm_mark mark;
67024diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
67025index 1a046b1..ee0bef0 100644
67026--- a/include/rdma/iw_cm.h
67027+++ b/include/rdma/iw_cm.h
67028@@ -122,7 +122,7 @@ struct iw_cm_verbs {
67029 int backlog);
67030
67031 int (*destroy_listen)(struct iw_cm_id *cm_id);
67032-};
67033+} __no_const;
67034
67035 /**
67036 * iw_create_cm_id - Create an IW CM identifier.
67037diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
67038index 399162b..b337f1a 100644
67039--- a/include/scsi/libfc.h
67040+++ b/include/scsi/libfc.h
67041@@ -762,6 +762,7 @@ struct libfc_function_template {
67042 */
67043 void (*disc_stop_final) (struct fc_lport *);
67044 };
67045+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
67046
67047 /**
67048 * struct fc_disc - Discovery context
67049@@ -866,7 +867,7 @@ struct fc_lport {
67050 struct fc_vport *vport;
67051
67052 /* Operational Information */
67053- struct libfc_function_template tt;
67054+ libfc_function_template_no_const tt;
67055 u8 link_up;
67056 u8 qfull;
67057 enum fc_lport_state state;
67058diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
67059index 55367b0..d97bd2a 100644
67060--- a/include/scsi/scsi_device.h
67061+++ b/include/scsi/scsi_device.h
67062@@ -169,9 +169,9 @@ struct scsi_device {
67063 unsigned int max_device_blocked; /* what device_blocked counts down from */
67064 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
67065
67066- atomic_t iorequest_cnt;
67067- atomic_t iodone_cnt;
67068- atomic_t ioerr_cnt;
67069+ atomic_unchecked_t iorequest_cnt;
67070+ atomic_unchecked_t iodone_cnt;
67071+ atomic_unchecked_t ioerr_cnt;
67072
67073 struct device sdev_gendev,
67074 sdev_dev;
67075diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
67076index b797e8f..8e2c3aa 100644
67077--- a/include/scsi/scsi_transport_fc.h
67078+++ b/include/scsi/scsi_transport_fc.h
67079@@ -751,7 +751,8 @@ struct fc_function_template {
67080 unsigned long show_host_system_hostname:1;
67081
67082 unsigned long disable_target_scan:1;
67083-};
67084+} __do_const;
67085+typedef struct fc_function_template __no_const fc_function_template_no_const;
67086
67087
67088 /**
67089diff --git a/include/sound/soc.h b/include/sound/soc.h
67090index 91244a0..89ca1a7 100644
67091--- a/include/sound/soc.h
67092+++ b/include/sound/soc.h
67093@@ -769,7 +769,7 @@ struct snd_soc_codec_driver {
67094 /* probe ordering - for components with runtime dependencies */
67095 int probe_order;
67096 int remove_order;
67097-};
67098+} __do_const;
67099
67100 /* SoC platform interface */
67101 struct snd_soc_platform_driver {
67102@@ -815,7 +815,7 @@ struct snd_soc_platform_driver {
67103 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
67104 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
67105 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
67106-};
67107+} __do_const;
67108
67109 struct snd_soc_platform {
67110 const char *name;
67111diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
67112index fca8bbe..c0242ea 100644
67113--- a/include/target/target_core_base.h
67114+++ b/include/target/target_core_base.h
67115@@ -760,7 +760,7 @@ struct se_device {
67116 spinlock_t stats_lock;
67117 /* Active commands on this virtual SE device */
67118 atomic_t simple_cmds;
67119- atomic_t dev_ordered_id;
67120+ atomic_unchecked_t dev_ordered_id;
67121 atomic_t dev_ordered_sync;
67122 atomic_t dev_qf_count;
67123 struct se_obj dev_obj;
67124diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
67125new file mode 100644
67126index 0000000..fb634b7
67127--- /dev/null
67128+++ b/include/trace/events/fs.h
67129@@ -0,0 +1,53 @@
67130+#undef TRACE_SYSTEM
67131+#define TRACE_SYSTEM fs
67132+
67133+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
67134+#define _TRACE_FS_H
67135+
67136+#include <linux/fs.h>
67137+#include <linux/tracepoint.h>
67138+
67139+TRACE_EVENT(do_sys_open,
67140+
67141+ TP_PROTO(const char *filename, int flags, int mode),
67142+
67143+ TP_ARGS(filename, flags, mode),
67144+
67145+ TP_STRUCT__entry(
67146+ __string( filename, filename )
67147+ __field( int, flags )
67148+ __field( int, mode )
67149+ ),
67150+
67151+ TP_fast_assign(
67152+ __assign_str(filename, filename);
67153+ __entry->flags = flags;
67154+ __entry->mode = mode;
67155+ ),
67156+
67157+ TP_printk("\"%s\" %x %o",
67158+ __get_str(filename), __entry->flags, __entry->mode)
67159+);
67160+
67161+TRACE_EVENT(open_exec,
67162+
67163+ TP_PROTO(const char *filename),
67164+
67165+ TP_ARGS(filename),
67166+
67167+ TP_STRUCT__entry(
67168+ __string( filename, filename )
67169+ ),
67170+
67171+ TP_fast_assign(
67172+ __assign_str(filename, filename);
67173+ ),
67174+
67175+ TP_printk("\"%s\"",
67176+ __get_str(filename))
67177+);
67178+
67179+#endif /* _TRACE_FS_H */
67180+
67181+/* This part must be outside protection */
67182+#include <trace/define_trace.h>
67183diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
67184index 1c09820..7f5ec79 100644
67185--- a/include/trace/events/irq.h
67186+++ b/include/trace/events/irq.h
67187@@ -36,7 +36,7 @@ struct softirq_action;
67188 */
67189 TRACE_EVENT(irq_handler_entry,
67190
67191- TP_PROTO(int irq, struct irqaction *action),
67192+ TP_PROTO(int irq, const struct irqaction *action),
67193
67194 TP_ARGS(irq, action),
67195
67196@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
67197 */
67198 TRACE_EVENT(irq_handler_exit,
67199
67200- TP_PROTO(int irq, struct irqaction *action, int ret),
67201+ TP_PROTO(int irq, const struct irqaction *action, int ret),
67202
67203 TP_ARGS(irq, action, ret),
67204
67205diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
67206index 7caf44c..23c6f27 100644
67207--- a/include/uapi/linux/a.out.h
67208+++ b/include/uapi/linux/a.out.h
67209@@ -39,6 +39,14 @@ enum machine_type {
67210 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
67211 };
67212
67213+/* Constants for the N_FLAGS field */
67214+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
67215+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
67216+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
67217+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
67218+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
67219+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
67220+
67221 #if !defined (N_MAGIC)
67222 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
67223 #endif
67224diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
67225index d876736..b36014e 100644
67226--- a/include/uapi/linux/byteorder/little_endian.h
67227+++ b/include/uapi/linux/byteorder/little_endian.h
67228@@ -42,51 +42,51 @@
67229
67230 static inline __le64 __cpu_to_le64p(const __u64 *p)
67231 {
67232- return (__force __le64)*p;
67233+ return (__force const __le64)*p;
67234 }
67235 static inline __u64 __le64_to_cpup(const __le64 *p)
67236 {
67237- return (__force __u64)*p;
67238+ return (__force const __u64)*p;
67239 }
67240 static inline __le32 __cpu_to_le32p(const __u32 *p)
67241 {
67242- return (__force __le32)*p;
67243+ return (__force const __le32)*p;
67244 }
67245 static inline __u32 __le32_to_cpup(const __le32 *p)
67246 {
67247- return (__force __u32)*p;
67248+ return (__force const __u32)*p;
67249 }
67250 static inline __le16 __cpu_to_le16p(const __u16 *p)
67251 {
67252- return (__force __le16)*p;
67253+ return (__force const __le16)*p;
67254 }
67255 static inline __u16 __le16_to_cpup(const __le16 *p)
67256 {
67257- return (__force __u16)*p;
67258+ return (__force const __u16)*p;
67259 }
67260 static inline __be64 __cpu_to_be64p(const __u64 *p)
67261 {
67262- return (__force __be64)__swab64p(p);
67263+ return (__force const __be64)__swab64p(p);
67264 }
67265 static inline __u64 __be64_to_cpup(const __be64 *p)
67266 {
67267- return __swab64p((__u64 *)p);
67268+ return __swab64p((const __u64 *)p);
67269 }
67270 static inline __be32 __cpu_to_be32p(const __u32 *p)
67271 {
67272- return (__force __be32)__swab32p(p);
67273+ return (__force const __be32)__swab32p(p);
67274 }
67275 static inline __u32 __be32_to_cpup(const __be32 *p)
67276 {
67277- return __swab32p((__u32 *)p);
67278+ return __swab32p((const __u32 *)p);
67279 }
67280 static inline __be16 __cpu_to_be16p(const __u16 *p)
67281 {
67282- return (__force __be16)__swab16p(p);
67283+ return (__force const __be16)__swab16p(p);
67284 }
67285 static inline __u16 __be16_to_cpup(const __be16 *p)
67286 {
67287- return __swab16p((__u16 *)p);
67288+ return __swab16p((const __u16 *)p);
67289 }
67290 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
67291 #define __le64_to_cpus(x) do { (void)(x); } while (0)
67292diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
67293index 126a817..d522bd1 100644
67294--- a/include/uapi/linux/elf.h
67295+++ b/include/uapi/linux/elf.h
67296@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
67297 #define PT_GNU_EH_FRAME 0x6474e550
67298
67299 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
67300+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
67301+
67302+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
67303+
67304+/* Constants for the e_flags field */
67305+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
67306+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
67307+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
67308+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
67309+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
67310+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
67311
67312 /*
67313 * Extended Numbering
67314@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
67315 #define DT_DEBUG 21
67316 #define DT_TEXTREL 22
67317 #define DT_JMPREL 23
67318+#define DT_FLAGS 30
67319+ #define DF_TEXTREL 0x00000004
67320 #define DT_ENCODING 32
67321 #define OLD_DT_LOOS 0x60000000
67322 #define DT_LOOS 0x6000000d
67323@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
67324 #define PF_W 0x2
67325 #define PF_X 0x1
67326
67327+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
67328+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
67329+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
67330+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
67331+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
67332+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
67333+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
67334+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
67335+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
67336+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
67337+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
67338+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
67339+
67340 typedef struct elf32_phdr{
67341 Elf32_Word p_type;
67342 Elf32_Off p_offset;
67343@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
67344 #define EI_OSABI 7
67345 #define EI_PAD 8
67346
67347+#define EI_PAX 14
67348+
67349 #define ELFMAG0 0x7f /* EI_MAG */
67350 #define ELFMAG1 'E'
67351 #define ELFMAG2 'L'
67352diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
67353index aa169c4..6a2771d 100644
67354--- a/include/uapi/linux/personality.h
67355+++ b/include/uapi/linux/personality.h
67356@@ -30,6 +30,7 @@ enum {
67357 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
67358 ADDR_NO_RANDOMIZE | \
67359 ADDR_COMPAT_LAYOUT | \
67360+ ADDR_LIMIT_3GB | \
67361 MMAP_PAGE_ZERO)
67362
67363 /*
67364diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
67365index 7530e74..e714828 100644
67366--- a/include/uapi/linux/screen_info.h
67367+++ b/include/uapi/linux/screen_info.h
67368@@ -43,7 +43,8 @@ struct screen_info {
67369 __u16 pages; /* 0x32 */
67370 __u16 vesa_attributes; /* 0x34 */
67371 __u32 capabilities; /* 0x36 */
67372- __u8 _reserved[6]; /* 0x3a */
67373+ __u16 vesapm_size; /* 0x3a */
67374+ __u8 _reserved[4]; /* 0x3c */
67375 } __attribute__((packed));
67376
67377 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
67378diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
67379index 6d67213..8dab561 100644
67380--- a/include/uapi/linux/sysctl.h
67381+++ b/include/uapi/linux/sysctl.h
67382@@ -155,7 +155,11 @@ enum
67383 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
67384 };
67385
67386-
67387+#ifdef CONFIG_PAX_SOFTMODE
67388+enum {
67389+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
67390+};
67391+#endif
67392
67393 /* CTL_VM names: */
67394 enum
67395diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
67396index 26607bd..588b65f 100644
67397--- a/include/uapi/linux/xattr.h
67398+++ b/include/uapi/linux/xattr.h
67399@@ -60,5 +60,9 @@
67400 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
67401 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
67402
67403+/* User namespace */
67404+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
67405+#define XATTR_PAX_FLAGS_SUFFIX "flags"
67406+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
67407
67408 #endif /* _UAPI_LINUX_XATTR_H */
67409diff --git a/include/video/omapdss.h b/include/video/omapdss.h
67410index 3729173..7b2df7e 100644
67411--- a/include/video/omapdss.h
67412+++ b/include/video/omapdss.h
67413@@ -323,7 +323,7 @@ struct omap_dss_board_info {
67414 int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
67415 void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
67416 int (*set_min_bus_tput)(struct device *dev, unsigned long r);
67417-};
67418+} __do_const;
67419
67420 /* Init with the board info */
67421 extern int omap_display_init(struct omap_dss_board_info *board_data);
67422diff --git a/include/video/udlfb.h b/include/video/udlfb.h
67423index f9466fa..f4e2b81 100644
67424--- a/include/video/udlfb.h
67425+++ b/include/video/udlfb.h
67426@@ -53,10 +53,10 @@ struct dlfb_data {
67427 u32 pseudo_palette[256];
67428 int blank_mode; /*one of FB_BLANK_ */
67429 /* blit-only rendering path metrics, exposed through sysfs */
67430- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
67431- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
67432- atomic_t bytes_sent; /* to usb, after compression including overhead */
67433- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
67434+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
67435+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
67436+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
67437+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
67438 };
67439
67440 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
67441diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
67442index 0993a22..32ba2fe 100644
67443--- a/include/video/uvesafb.h
67444+++ b/include/video/uvesafb.h
67445@@ -177,6 +177,7 @@ struct uvesafb_par {
67446 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
67447 u8 pmi_setpal; /* PMI for palette changes */
67448 u16 *pmi_base; /* protected mode interface location */
67449+ u8 *pmi_code; /* protected mode code location */
67450 void *pmi_start;
67451 void *pmi_pal;
67452 u8 *vbe_state_orig; /*
67453diff --git a/init/Kconfig b/init/Kconfig
67454index 6fdd6e3..5b01610 100644
67455--- a/init/Kconfig
67456+++ b/init/Kconfig
67457@@ -925,6 +925,7 @@ endif # CGROUPS
67458
67459 config CHECKPOINT_RESTORE
67460 bool "Checkpoint/restore support" if EXPERT
67461+ depends on !GRKERNSEC
67462 default n
67463 help
67464 Enables additional kernel features in a sake of checkpoint/restore.
67465@@ -1016,6 +1017,8 @@ config UIDGID_CONVERTED
67466 depends on OCFS2_FS = n
67467 depends on XFS_FS = n
67468
67469+ depends on GRKERNSEC = n
67470+
67471 config UIDGID_STRICT_TYPE_CHECKS
67472 bool "Require conversions between uid/gids and their internal representation"
67473 depends on UIDGID_CONVERTED
67474@@ -1405,7 +1408,7 @@ config SLUB_DEBUG
67475
67476 config COMPAT_BRK
67477 bool "Disable heap randomization"
67478- default y
67479+ default n
67480 help
67481 Randomizing heap placement makes heap exploits harder, but it
67482 also breaks ancient binaries (including anything libc5 based).
67483@@ -1648,7 +1651,7 @@ config INIT_ALL_POSSIBLE
67484 config STOP_MACHINE
67485 bool
67486 default y
67487- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
67488+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
67489 help
67490 Need stop_machine() primitive.
67491
67492diff --git a/init/Makefile b/init/Makefile
67493index 7bc47ee..6da2dc7 100644
67494--- a/init/Makefile
67495+++ b/init/Makefile
67496@@ -2,6 +2,9 @@
67497 # Makefile for the linux kernel.
67498 #
67499
67500+ccflags-y := $(GCC_PLUGINS_CFLAGS)
67501+asflags-y := $(GCC_PLUGINS_AFLAGS)
67502+
67503 obj-y := main.o version.o mounts.o
67504 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
67505 obj-y += noinitramfs.o
67506diff --git a/init/do_mounts.c b/init/do_mounts.c
67507index f8a6642..4e5ee1b 100644
67508--- a/init/do_mounts.c
67509+++ b/init/do_mounts.c
67510@@ -336,11 +336,11 @@ static void __init get_fs_names(char *page)
67511 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
67512 {
67513 struct super_block *s;
67514- int err = sys_mount(name, "/root", fs, flags, data);
67515+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
67516 if (err)
67517 return err;
67518
67519- sys_chdir("/root");
67520+ sys_chdir((const char __force_user *)"/root");
67521 s = current->fs->pwd.dentry->d_sb;
67522 ROOT_DEV = s->s_dev;
67523 printk(KERN_INFO
67524@@ -461,18 +461,18 @@ void __init change_floppy(char *fmt, ...)
67525 va_start(args, fmt);
67526 vsprintf(buf, fmt, args);
67527 va_end(args);
67528- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
67529+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
67530 if (fd >= 0) {
67531 sys_ioctl(fd, FDEJECT, 0);
67532 sys_close(fd);
67533 }
67534 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
67535- fd = sys_open("/dev/console", O_RDWR, 0);
67536+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
67537 if (fd >= 0) {
67538 sys_ioctl(fd, TCGETS, (long)&termios);
67539 termios.c_lflag &= ~ICANON;
67540 sys_ioctl(fd, TCSETSF, (long)&termios);
67541- sys_read(fd, &c, 1);
67542+ sys_read(fd, (char __user *)&c, 1);
67543 termios.c_lflag |= ICANON;
67544 sys_ioctl(fd, TCSETSF, (long)&termios);
67545 sys_close(fd);
67546@@ -566,6 +566,6 @@ void __init prepare_namespace(void)
67547 mount_root();
67548 out:
67549 devtmpfs_mount("dev");
67550- sys_mount(".", "/", NULL, MS_MOVE, NULL);
67551- sys_chroot(".");
67552+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
67553+ sys_chroot((const char __force_user *)".");
67554 }
67555diff --git a/init/do_mounts.h b/init/do_mounts.h
67556index f5b978a..69dbfe8 100644
67557--- a/init/do_mounts.h
67558+++ b/init/do_mounts.h
67559@@ -15,15 +15,15 @@ extern int root_mountflags;
67560
67561 static inline int create_dev(char *name, dev_t dev)
67562 {
67563- sys_unlink(name);
67564- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
67565+ sys_unlink((char __force_user *)name);
67566+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
67567 }
67568
67569 #if BITS_PER_LONG == 32
67570 static inline u32 bstat(char *name)
67571 {
67572 struct stat64 stat;
67573- if (sys_stat64(name, &stat) != 0)
67574+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
67575 return 0;
67576 if (!S_ISBLK(stat.st_mode))
67577 return 0;
67578@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
67579 static inline u32 bstat(char *name)
67580 {
67581 struct stat stat;
67582- if (sys_newstat(name, &stat) != 0)
67583+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
67584 return 0;
67585 if (!S_ISBLK(stat.st_mode))
67586 return 0;
67587diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
67588index 5e4ded5..aa3cd7e 100644
67589--- a/init/do_mounts_initrd.c
67590+++ b/init/do_mounts_initrd.c
67591@@ -54,8 +54,8 @@ static void __init handle_initrd(void)
67592 create_dev("/dev/root.old", Root_RAM0);
67593 /* mount initrd on rootfs' /root */
67594 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
67595- sys_mkdir("/old", 0700);
67596- sys_chdir("/old");
67597+ sys_mkdir((const char __force_user *)"/old", 0700);
67598+ sys_chdir((const char __force_user *)"/old");
67599
67600 /*
67601 * In case that a resume from disk is carried out by linuxrc or one of
67602@@ -69,31 +69,31 @@ static void __init handle_initrd(void)
67603 current->flags &= ~PF_FREEZER_SKIP;
67604
67605 /* move initrd to rootfs' /old */
67606- sys_mount("..", ".", NULL, MS_MOVE, NULL);
67607+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
67608 /* switch root and cwd back to / of rootfs */
67609- sys_chroot("..");
67610+ sys_chroot((const char __force_user *)"..");
67611
67612 if (new_decode_dev(real_root_dev) == Root_RAM0) {
67613- sys_chdir("/old");
67614+ sys_chdir((const char __force_user *)"/old");
67615 return;
67616 }
67617
67618- sys_chdir("/");
67619+ sys_chdir((const char __force_user *)"/");
67620 ROOT_DEV = new_decode_dev(real_root_dev);
67621 mount_root();
67622
67623 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
67624- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
67625+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
67626 if (!error)
67627 printk("okay\n");
67628 else {
67629- int fd = sys_open("/dev/root.old", O_RDWR, 0);
67630+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
67631 if (error == -ENOENT)
67632 printk("/initrd does not exist. Ignored.\n");
67633 else
67634 printk("failed\n");
67635 printk(KERN_NOTICE "Unmounting old root\n");
67636- sys_umount("/old", MNT_DETACH);
67637+ sys_umount((char __force_user *)"/old", MNT_DETACH);
67638 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
67639 if (fd < 0) {
67640 error = fd;
67641@@ -116,11 +116,11 @@ int __init initrd_load(void)
67642 * mounted in the normal path.
67643 */
67644 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
67645- sys_unlink("/initrd.image");
67646+ sys_unlink((const char __force_user *)"/initrd.image");
67647 handle_initrd();
67648 return 1;
67649 }
67650 }
67651- sys_unlink("/initrd.image");
67652+ sys_unlink((const char __force_user *)"/initrd.image");
67653 return 0;
67654 }
67655diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
67656index 8cb6db5..d729f50 100644
67657--- a/init/do_mounts_md.c
67658+++ b/init/do_mounts_md.c
67659@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
67660 partitioned ? "_d" : "", minor,
67661 md_setup_args[ent].device_names);
67662
67663- fd = sys_open(name, 0, 0);
67664+ fd = sys_open((char __force_user *)name, 0, 0);
67665 if (fd < 0) {
67666 printk(KERN_ERR "md: open failed - cannot start "
67667 "array %s\n", name);
67668@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
67669 * array without it
67670 */
67671 sys_close(fd);
67672- fd = sys_open(name, 0, 0);
67673+ fd = sys_open((char __force_user *)name, 0, 0);
67674 sys_ioctl(fd, BLKRRPART, 0);
67675 }
67676 sys_close(fd);
67677@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
67678
67679 wait_for_device_probe();
67680
67681- fd = sys_open("/dev/md0", 0, 0);
67682+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
67683 if (fd >= 0) {
67684 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
67685 sys_close(fd);
67686diff --git a/init/init_task.c b/init/init_task.c
67687index 8b2f399..f0797c9 100644
67688--- a/init/init_task.c
67689+++ b/init/init_task.c
67690@@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
67691 * Initial thread structure. Alignment of this is handled by a special
67692 * linker map entry.
67693 */
67694+#ifdef CONFIG_X86
67695+union thread_union init_thread_union __init_task_data;
67696+#else
67697 union thread_union init_thread_union __init_task_data =
67698 { INIT_THREAD_INFO(init_task) };
67699+#endif
67700diff --git a/init/initramfs.c b/init/initramfs.c
67701index 84c6bf1..8899338 100644
67702--- a/init/initramfs.c
67703+++ b/init/initramfs.c
67704@@ -84,7 +84,7 @@ static void __init free_hash(void)
67705 }
67706 }
67707
67708-static long __init do_utime(char *filename, time_t mtime)
67709+static long __init do_utime(char __force_user *filename, time_t mtime)
67710 {
67711 struct timespec t[2];
67712
67713@@ -119,7 +119,7 @@ static void __init dir_utime(void)
67714 struct dir_entry *de, *tmp;
67715 list_for_each_entry_safe(de, tmp, &dir_list, list) {
67716 list_del(&de->list);
67717- do_utime(de->name, de->mtime);
67718+ do_utime((char __force_user *)de->name, de->mtime);
67719 kfree(de->name);
67720 kfree(de);
67721 }
67722@@ -281,7 +281,7 @@ static int __init maybe_link(void)
67723 if (nlink >= 2) {
67724 char *old = find_link(major, minor, ino, mode, collected);
67725 if (old)
67726- return (sys_link(old, collected) < 0) ? -1 : 1;
67727+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
67728 }
67729 return 0;
67730 }
67731@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
67732 {
67733 struct stat st;
67734
67735- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
67736+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
67737 if (S_ISDIR(st.st_mode))
67738- sys_rmdir(path);
67739+ sys_rmdir((char __force_user *)path);
67740 else
67741- sys_unlink(path);
67742+ sys_unlink((char __force_user *)path);
67743 }
67744 }
67745
67746@@ -315,7 +315,7 @@ static int __init do_name(void)
67747 int openflags = O_WRONLY|O_CREAT;
67748 if (ml != 1)
67749 openflags |= O_TRUNC;
67750- wfd = sys_open(collected, openflags, mode);
67751+ wfd = sys_open((char __force_user *)collected, openflags, mode);
67752
67753 if (wfd >= 0) {
67754 sys_fchown(wfd, uid, gid);
67755@@ -327,17 +327,17 @@ static int __init do_name(void)
67756 }
67757 }
67758 } else if (S_ISDIR(mode)) {
67759- sys_mkdir(collected, mode);
67760- sys_chown(collected, uid, gid);
67761- sys_chmod(collected, mode);
67762+ sys_mkdir((char __force_user *)collected, mode);
67763+ sys_chown((char __force_user *)collected, uid, gid);
67764+ sys_chmod((char __force_user *)collected, mode);
67765 dir_add(collected, mtime);
67766 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
67767 S_ISFIFO(mode) || S_ISSOCK(mode)) {
67768 if (maybe_link() == 0) {
67769- sys_mknod(collected, mode, rdev);
67770- sys_chown(collected, uid, gid);
67771- sys_chmod(collected, mode);
67772- do_utime(collected, mtime);
67773+ sys_mknod((char __force_user *)collected, mode, rdev);
67774+ sys_chown((char __force_user *)collected, uid, gid);
67775+ sys_chmod((char __force_user *)collected, mode);
67776+ do_utime((char __force_user *)collected, mtime);
67777 }
67778 }
67779 return 0;
67780@@ -346,15 +346,15 @@ static int __init do_name(void)
67781 static int __init do_copy(void)
67782 {
67783 if (count >= body_len) {
67784- sys_write(wfd, victim, body_len);
67785+ sys_write(wfd, (char __force_user *)victim, body_len);
67786 sys_close(wfd);
67787- do_utime(vcollected, mtime);
67788+ do_utime((char __force_user *)vcollected, mtime);
67789 kfree(vcollected);
67790 eat(body_len);
67791 state = SkipIt;
67792 return 0;
67793 } else {
67794- sys_write(wfd, victim, count);
67795+ sys_write(wfd, (char __force_user *)victim, count);
67796 body_len -= count;
67797 eat(count);
67798 return 1;
67799@@ -365,9 +365,9 @@ static int __init do_symlink(void)
67800 {
67801 collected[N_ALIGN(name_len) + body_len] = '\0';
67802 clean_path(collected, 0);
67803- sys_symlink(collected + N_ALIGN(name_len), collected);
67804- sys_lchown(collected, uid, gid);
67805- do_utime(collected, mtime);
67806+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
67807+ sys_lchown((char __force_user *)collected, uid, gid);
67808+ do_utime((char __force_user *)collected, mtime);
67809 state = SkipIt;
67810 next_state = Reset;
67811 return 0;
67812diff --git a/init/main.c b/init/main.c
67813index e33e09d..b699703 100644
67814--- a/init/main.c
67815+++ b/init/main.c
67816@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
67817 extern void tc_init(void);
67818 #endif
67819
67820+extern void grsecurity_init(void);
67821+
67822 /*
67823 * Debug helper: via this flag we know that we are in 'early bootup code'
67824 * where only the boot processor is running with IRQ disabled. This means
67825@@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
67826
67827 __setup("reset_devices", set_reset_devices);
67828
67829+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67830+int grsec_proc_gid = CONFIG_GRKERNSEC_PROC_GID;
67831+static int __init setup_grsec_proc_gid(char *str)
67832+{
67833+ grsec_proc_gid = (int)simple_strtol(str, NULL, 0);
67834+ return 1;
67835+}
67836+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
67837+#endif
67838+
67839+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
67840+extern char pax_enter_kernel_user[];
67841+extern char pax_exit_kernel_user[];
67842+extern pgdval_t clone_pgd_mask;
67843+#endif
67844+
67845+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
67846+static int __init setup_pax_nouderef(char *str)
67847+{
67848+#ifdef CONFIG_X86_32
67849+ unsigned int cpu;
67850+ struct desc_struct *gdt;
67851+
67852+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
67853+ gdt = get_cpu_gdt_table(cpu);
67854+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
67855+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
67856+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
67857+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
67858+ }
67859+ loadsegment(ds, __KERNEL_DS);
67860+ loadsegment(es, __KERNEL_DS);
67861+ loadsegment(ss, __KERNEL_DS);
67862+#else
67863+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
67864+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
67865+ clone_pgd_mask = ~(pgdval_t)0UL;
67866+#endif
67867+
67868+ return 0;
67869+}
67870+early_param("pax_nouderef", setup_pax_nouderef);
67871+#endif
67872+
67873+#ifdef CONFIG_PAX_SOFTMODE
67874+int pax_softmode;
67875+
67876+static int __init setup_pax_softmode(char *str)
67877+{
67878+ get_option(&str, &pax_softmode);
67879+ return 1;
67880+}
67881+__setup("pax_softmode=", setup_pax_softmode);
67882+#endif
67883+
67884 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
67885 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
67886 static const char *panic_later, *panic_param;
67887@@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
67888 {
67889 int count = preempt_count();
67890 int ret;
67891+ const char *msg1 = "", *msg2 = "";
67892
67893 if (initcall_debug)
67894 ret = do_one_initcall_debug(fn);
67895@@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
67896 sprintf(msgbuf, "error code %d ", ret);
67897
67898 if (preempt_count() != count) {
67899- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
67900+ msg1 = " preemption imbalance";
67901 preempt_count() = count;
67902 }
67903 if (irqs_disabled()) {
67904- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
67905+ msg2 = " disabled interrupts";
67906 local_irq_enable();
67907 }
67908- if (msgbuf[0]) {
67909- printk("initcall %pF returned with %s\n", fn, msgbuf);
67910+ if (msgbuf[0] || *msg1 || *msg2) {
67911+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
67912 }
67913
67914 return ret;
67915@@ -743,6 +801,10 @@ static char *initcall_level_names[] __initdata = {
67916 "late",
67917 };
67918
67919+#ifdef CONFIG_PAX_LATENT_ENTROPY
67920+u64 latent_entropy;
67921+#endif
67922+
67923 static void __init do_initcall_level(int level)
67924 {
67925 extern const struct kernel_param __start___param[], __stop___param[];
67926@@ -755,8 +817,14 @@ static void __init do_initcall_level(int level)
67927 level, level,
67928 &repair_env_string);
67929
67930- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
67931+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
67932 do_one_initcall(*fn);
67933+
67934+#ifdef CONFIG_PAX_LATENT_ENTROPY
67935+ add_device_randomness(&latent_entropy, sizeof(latent_entropy));
67936+#endif
67937+
67938+ }
67939 }
67940
67941 static void __init do_initcalls(void)
67942@@ -790,8 +858,14 @@ static void __init do_pre_smp_initcalls(void)
67943 {
67944 initcall_t *fn;
67945
67946- for (fn = __initcall_start; fn < __initcall0_start; fn++)
67947+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
67948 do_one_initcall(*fn);
67949+
67950+#ifdef CONFIG_PAX_LATENT_ENTROPY
67951+ add_device_randomness(&latent_entropy, sizeof(latent_entropy));
67952+#endif
67953+
67954+ }
67955 }
67956
67957 static int run_init_process(const char *init_filename)
67958@@ -876,7 +950,7 @@ static void __init kernel_init_freeable(void)
67959 do_basic_setup();
67960
67961 /* Open the /dev/console on the rootfs, this should never fail */
67962- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
67963+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
67964 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
67965
67966 (void) sys_dup(0);
67967@@ -889,11 +963,13 @@ static void __init kernel_init_freeable(void)
67968 if (!ramdisk_execute_command)
67969 ramdisk_execute_command = "/init";
67970
67971- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
67972+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
67973 ramdisk_execute_command = NULL;
67974 prepare_namespace();
67975 }
67976
67977+ grsecurity_init();
67978+
67979 /*
67980 * Ok, we have completed the initial bootup, and
67981 * we're essentially up and running. Get rid of the
67982diff --git a/ipc/mqueue.c b/ipc/mqueue.c
67983index 71a3ca1..cc330ee 100644
67984--- a/ipc/mqueue.c
67985+++ b/ipc/mqueue.c
67986@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
67987 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
67988 info->attr.mq_msgsize);
67989
67990+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
67991 spin_lock(&mq_lock);
67992 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
67993 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
67994diff --git a/ipc/msg.c b/ipc/msg.c
67995index a71af5a..a90a110 100644
67996--- a/ipc/msg.c
67997+++ b/ipc/msg.c
67998@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
67999 return security_msg_queue_associate(msq, msgflg);
68000 }
68001
68002+static struct ipc_ops msg_ops = {
68003+ .getnew = newque,
68004+ .associate = msg_security,
68005+ .more_checks = NULL
68006+};
68007+
68008 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
68009 {
68010 struct ipc_namespace *ns;
68011- struct ipc_ops msg_ops;
68012 struct ipc_params msg_params;
68013
68014 ns = current->nsproxy->ipc_ns;
68015
68016- msg_ops.getnew = newque;
68017- msg_ops.associate = msg_security;
68018- msg_ops.more_checks = NULL;
68019-
68020 msg_params.key = key;
68021 msg_params.flg = msgflg;
68022
68023diff --git a/ipc/sem.c b/ipc/sem.c
68024index 58d31f1..cce7a55 100644
68025--- a/ipc/sem.c
68026+++ b/ipc/sem.c
68027@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
68028 return 0;
68029 }
68030
68031+static struct ipc_ops sem_ops = {
68032+ .getnew = newary,
68033+ .associate = sem_security,
68034+ .more_checks = sem_more_checks
68035+};
68036+
68037 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
68038 {
68039 struct ipc_namespace *ns;
68040- struct ipc_ops sem_ops;
68041 struct ipc_params sem_params;
68042
68043 ns = current->nsproxy->ipc_ns;
68044@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
68045 if (nsems < 0 || nsems > ns->sc_semmsl)
68046 return -EINVAL;
68047
68048- sem_ops.getnew = newary;
68049- sem_ops.associate = sem_security;
68050- sem_ops.more_checks = sem_more_checks;
68051-
68052 sem_params.key = key;
68053 sem_params.flg = semflg;
68054 sem_params.u.nsems = nsems;
68055diff --git a/ipc/shm.c b/ipc/shm.c
68056index dff40c9..9450e27 100644
68057--- a/ipc/shm.c
68058+++ b/ipc/shm.c
68059@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
68060 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
68061 #endif
68062
68063+#ifdef CONFIG_GRKERNSEC
68064+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68065+ const time_t shm_createtime, const uid_t cuid,
68066+ const int shmid);
68067+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68068+ const time_t shm_createtime);
68069+#endif
68070+
68071 void shm_init_ns(struct ipc_namespace *ns)
68072 {
68073 ns->shm_ctlmax = SHMMAX;
68074@@ -520,6 +528,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
68075 shp->shm_lprid = 0;
68076 shp->shm_atim = shp->shm_dtim = 0;
68077 shp->shm_ctim = get_seconds();
68078+#ifdef CONFIG_GRKERNSEC
68079+ {
68080+ struct timespec timeval;
68081+ do_posix_clock_monotonic_gettime(&timeval);
68082+
68083+ shp->shm_createtime = timeval.tv_sec;
68084+ }
68085+#endif
68086 shp->shm_segsz = size;
68087 shp->shm_nattch = 0;
68088 shp->shm_file = file;
68089@@ -571,18 +587,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
68090 return 0;
68091 }
68092
68093+static struct ipc_ops shm_ops = {
68094+ .getnew = newseg,
68095+ .associate = shm_security,
68096+ .more_checks = shm_more_checks
68097+};
68098+
68099 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
68100 {
68101 struct ipc_namespace *ns;
68102- struct ipc_ops shm_ops;
68103 struct ipc_params shm_params;
68104
68105 ns = current->nsproxy->ipc_ns;
68106
68107- shm_ops.getnew = newseg;
68108- shm_ops.associate = shm_security;
68109- shm_ops.more_checks = shm_more_checks;
68110-
68111 shm_params.key = key;
68112 shm_params.flg = shmflg;
68113 shm_params.u.size = size;
68114@@ -1003,6 +1020,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
68115 f_mode = FMODE_READ | FMODE_WRITE;
68116 }
68117 if (shmflg & SHM_EXEC) {
68118+
68119+#ifdef CONFIG_PAX_MPROTECT
68120+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
68121+ goto out;
68122+#endif
68123+
68124 prot |= PROT_EXEC;
68125 acc_mode |= S_IXUGO;
68126 }
68127@@ -1026,9 +1049,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
68128 if (err)
68129 goto out_unlock;
68130
68131+#ifdef CONFIG_GRKERNSEC
68132+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
68133+ shp->shm_perm.cuid, shmid) ||
68134+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
68135+ err = -EACCES;
68136+ goto out_unlock;
68137+ }
68138+#endif
68139+
68140 path = shp->shm_file->f_path;
68141 path_get(&path);
68142 shp->shm_nattch++;
68143+#ifdef CONFIG_GRKERNSEC
68144+ shp->shm_lapid = current->pid;
68145+#endif
68146 size = i_size_read(path.dentry->d_inode);
68147 shm_unlock(shp);
68148
68149diff --git a/kernel/acct.c b/kernel/acct.c
68150index 051e071..15e0920 100644
68151--- a/kernel/acct.c
68152+++ b/kernel/acct.c
68153@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
68154 */
68155 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
68156 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
68157- file->f_op->write(file, (char *)&ac,
68158+ file->f_op->write(file, (char __force_user *)&ac,
68159 sizeof(acct_t), &file->f_pos);
68160 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
68161 set_fs(fs);
68162diff --git a/kernel/audit.c b/kernel/audit.c
68163index 40414e9..c920b72 100644
68164--- a/kernel/audit.c
68165+++ b/kernel/audit.c
68166@@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
68167 3) suppressed due to audit_rate_limit
68168 4) suppressed due to audit_backlog_limit
68169 */
68170-static atomic_t audit_lost = ATOMIC_INIT(0);
68171+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
68172
68173 /* The netlink socket. */
68174 static struct sock *audit_sock;
68175@@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
68176 unsigned long now;
68177 int print;
68178
68179- atomic_inc(&audit_lost);
68180+ atomic_inc_unchecked(&audit_lost);
68181
68182 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
68183
68184@@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
68185 printk(KERN_WARNING
68186 "audit: audit_lost=%d audit_rate_limit=%d "
68187 "audit_backlog_limit=%d\n",
68188- atomic_read(&audit_lost),
68189+ atomic_read_unchecked(&audit_lost),
68190 audit_rate_limit,
68191 audit_backlog_limit);
68192 audit_panic(message);
68193@@ -677,7 +677,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
68194 status_set.pid = audit_pid;
68195 status_set.rate_limit = audit_rate_limit;
68196 status_set.backlog_limit = audit_backlog_limit;
68197- status_set.lost = atomic_read(&audit_lost);
68198+ status_set.lost = atomic_read_unchecked(&audit_lost);
68199 status_set.backlog = skb_queue_len(&audit_skb_queue);
68200 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
68201 &status_set, sizeof(status_set));
68202diff --git a/kernel/auditsc.c b/kernel/auditsc.c
68203index 157e989..b28b365 100644
68204--- a/kernel/auditsc.c
68205+++ b/kernel/auditsc.c
68206@@ -2352,7 +2352,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
68207 }
68208
68209 /* global counter which is incremented every time something logs in */
68210-static atomic_t session_id = ATOMIC_INIT(0);
68211+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
68212
68213 /**
68214 * audit_set_loginuid - set current task's audit_context loginuid
68215@@ -2376,7 +2376,7 @@ int audit_set_loginuid(kuid_t loginuid)
68216 return -EPERM;
68217 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
68218
68219- sessionid = atomic_inc_return(&session_id);
68220+ sessionid = atomic_inc_return_unchecked(&session_id);
68221 if (context && context->in_syscall) {
68222 struct audit_buffer *ab;
68223
68224diff --git a/kernel/capability.c b/kernel/capability.c
68225index 493d972..ea17248 100644
68226--- a/kernel/capability.c
68227+++ b/kernel/capability.c
68228@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
68229 * before modification is attempted and the application
68230 * fails.
68231 */
68232+ if (tocopy > ARRAY_SIZE(kdata))
68233+ return -EFAULT;
68234+
68235 if (copy_to_user(dataptr, kdata, tocopy
68236 * sizeof(struct __user_cap_data_struct))) {
68237 return -EFAULT;
68238@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
68239 int ret;
68240
68241 rcu_read_lock();
68242- ret = security_capable(__task_cred(t), ns, cap);
68243+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
68244+ gr_task_is_capable(t, __task_cred(t), cap);
68245 rcu_read_unlock();
68246
68247- return (ret == 0);
68248+ return ret;
68249 }
68250
68251 /**
68252@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
68253 int ret;
68254
68255 rcu_read_lock();
68256- ret = security_capable_noaudit(__task_cred(t), ns, cap);
68257+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
68258 rcu_read_unlock();
68259
68260- return (ret == 0);
68261+ return ret;
68262 }
68263
68264 /**
68265@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
68266 BUG();
68267 }
68268
68269- if (security_capable(current_cred(), ns, cap) == 0) {
68270+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
68271 current->flags |= PF_SUPERPRIV;
68272 return true;
68273 }
68274@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
68275 }
68276 EXPORT_SYMBOL(ns_capable);
68277
68278+bool ns_capable_nolog(struct user_namespace *ns, int cap)
68279+{
68280+ if (unlikely(!cap_valid(cap))) {
68281+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
68282+ BUG();
68283+ }
68284+
68285+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
68286+ current->flags |= PF_SUPERPRIV;
68287+ return true;
68288+ }
68289+ return false;
68290+}
68291+EXPORT_SYMBOL(ns_capable_nolog);
68292+
68293 /**
68294 * capable - Determine if the current task has a superior capability in effect
68295 * @cap: The capability to be tested for
68296@@ -408,6 +427,12 @@ bool capable(int cap)
68297 }
68298 EXPORT_SYMBOL(capable);
68299
68300+bool capable_nolog(int cap)
68301+{
68302+ return ns_capable_nolog(&init_user_ns, cap);
68303+}
68304+EXPORT_SYMBOL(capable_nolog);
68305+
68306 /**
68307 * nsown_capable - Check superior capability to one's own user_ns
68308 * @cap: The capability in question
68309@@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
68310
68311 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
68312 }
68313+
68314+bool inode_capable_nolog(const struct inode *inode, int cap)
68315+{
68316+ struct user_namespace *ns = current_user_ns();
68317+
68318+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
68319+}
68320diff --git a/kernel/cgroup.c b/kernel/cgroup.c
68321index ad99830..992d8a7 100644
68322--- a/kernel/cgroup.c
68323+++ b/kernel/cgroup.c
68324@@ -5514,7 +5514,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
68325 struct css_set *cg = link->cg;
68326 struct task_struct *task;
68327 int count = 0;
68328- seq_printf(seq, "css_set %p\n", cg);
68329+ seq_printf(seq, "css_set %pK\n", cg);
68330 list_for_each_entry(task, &cg->tasks, cg_list) {
68331 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
68332 seq_puts(seq, " ...\n");
68333diff --git a/kernel/compat.c b/kernel/compat.c
68334index c28a306..b4d0cf3 100644
68335--- a/kernel/compat.c
68336+++ b/kernel/compat.c
68337@@ -13,6 +13,7 @@
68338
68339 #include <linux/linkage.h>
68340 #include <linux/compat.h>
68341+#include <linux/module.h>
68342 #include <linux/errno.h>
68343 #include <linux/time.h>
68344 #include <linux/signal.h>
68345@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
68346 mm_segment_t oldfs;
68347 long ret;
68348
68349- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
68350+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
68351 oldfs = get_fs();
68352 set_fs(KERNEL_DS);
68353 ret = hrtimer_nanosleep_restart(restart);
68354@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
68355 oldfs = get_fs();
68356 set_fs(KERNEL_DS);
68357 ret = hrtimer_nanosleep(&tu,
68358- rmtp ? (struct timespec __user *)&rmt : NULL,
68359+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
68360 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
68361 set_fs(oldfs);
68362
68363@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
68364 mm_segment_t old_fs = get_fs();
68365
68366 set_fs(KERNEL_DS);
68367- ret = sys_sigpending((old_sigset_t __user *) &s);
68368+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
68369 set_fs(old_fs);
68370 if (ret == 0)
68371 ret = put_user(s, set);
68372@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
68373 mm_segment_t old_fs = get_fs();
68374
68375 set_fs(KERNEL_DS);
68376- ret = sys_old_getrlimit(resource, &r);
68377+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
68378 set_fs(old_fs);
68379
68380 if (!ret) {
68381@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
68382 mm_segment_t old_fs = get_fs();
68383
68384 set_fs(KERNEL_DS);
68385- ret = sys_getrusage(who, (struct rusage __user *) &r);
68386+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
68387 set_fs(old_fs);
68388
68389 if (ret)
68390@@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
68391 set_fs (KERNEL_DS);
68392 ret = sys_wait4(pid,
68393 (stat_addr ?
68394- (unsigned int __user *) &status : NULL),
68395- options, (struct rusage __user *) &r);
68396+ (unsigned int __force_user *) &status : NULL),
68397+ options, (struct rusage __force_user *) &r);
68398 set_fs (old_fs);
68399
68400 if (ret > 0) {
68401@@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
68402 memset(&info, 0, sizeof(info));
68403
68404 set_fs(KERNEL_DS);
68405- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
68406- uru ? (struct rusage __user *)&ru : NULL);
68407+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
68408+ uru ? (struct rusage __force_user *)&ru : NULL);
68409 set_fs(old_fs);
68410
68411 if ((ret < 0) || (info.si_signo == 0))
68412@@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
68413 oldfs = get_fs();
68414 set_fs(KERNEL_DS);
68415 err = sys_timer_settime(timer_id, flags,
68416- (struct itimerspec __user *) &newts,
68417- (struct itimerspec __user *) &oldts);
68418+ (struct itimerspec __force_user *) &newts,
68419+ (struct itimerspec __force_user *) &oldts);
68420 set_fs(oldfs);
68421 if (!err && old && put_compat_itimerspec(old, &oldts))
68422 return -EFAULT;
68423@@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
68424 oldfs = get_fs();
68425 set_fs(KERNEL_DS);
68426 err = sys_timer_gettime(timer_id,
68427- (struct itimerspec __user *) &ts);
68428+ (struct itimerspec __force_user *) &ts);
68429 set_fs(oldfs);
68430 if (!err && put_compat_itimerspec(setting, &ts))
68431 return -EFAULT;
68432@@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
68433 oldfs = get_fs();
68434 set_fs(KERNEL_DS);
68435 err = sys_clock_settime(which_clock,
68436- (struct timespec __user *) &ts);
68437+ (struct timespec __force_user *) &ts);
68438 set_fs(oldfs);
68439 return err;
68440 }
68441@@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
68442 oldfs = get_fs();
68443 set_fs(KERNEL_DS);
68444 err = sys_clock_gettime(which_clock,
68445- (struct timespec __user *) &ts);
68446+ (struct timespec __force_user *) &ts);
68447 set_fs(oldfs);
68448 if (!err && put_compat_timespec(&ts, tp))
68449 return -EFAULT;
68450@@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
68451
68452 oldfs = get_fs();
68453 set_fs(KERNEL_DS);
68454- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
68455+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
68456 set_fs(oldfs);
68457
68458 err = compat_put_timex(utp, &txc);
68459@@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
68460 oldfs = get_fs();
68461 set_fs(KERNEL_DS);
68462 err = sys_clock_getres(which_clock,
68463- (struct timespec __user *) &ts);
68464+ (struct timespec __force_user *) &ts);
68465 set_fs(oldfs);
68466 if (!err && tp && put_compat_timespec(&ts, tp))
68467 return -EFAULT;
68468@@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
68469 long err;
68470 mm_segment_t oldfs;
68471 struct timespec tu;
68472- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
68473+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
68474
68475- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
68476+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
68477 oldfs = get_fs();
68478 set_fs(KERNEL_DS);
68479 err = clock_nanosleep_restart(restart);
68480@@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
68481 oldfs = get_fs();
68482 set_fs(KERNEL_DS);
68483 err = sys_clock_nanosleep(which_clock, flags,
68484- (struct timespec __user *) &in,
68485- (struct timespec __user *) &out);
68486+ (struct timespec __force_user *) &in,
68487+ (struct timespec __force_user *) &out);
68488 set_fs(oldfs);
68489
68490 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
68491diff --git a/kernel/configs.c b/kernel/configs.c
68492index 42e8fa0..9e7406b 100644
68493--- a/kernel/configs.c
68494+++ b/kernel/configs.c
68495@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
68496 struct proc_dir_entry *entry;
68497
68498 /* create the current config file */
68499+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
68500+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
68501+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
68502+ &ikconfig_file_ops);
68503+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68504+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
68505+ &ikconfig_file_ops);
68506+#endif
68507+#else
68508 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
68509 &ikconfig_file_ops);
68510+#endif
68511+
68512 if (!entry)
68513 return -ENOMEM;
68514
68515diff --git a/kernel/cred.c b/kernel/cred.c
68516index 48cea3d..3476734 100644
68517--- a/kernel/cred.c
68518+++ b/kernel/cred.c
68519@@ -207,6 +207,16 @@ void exit_creds(struct task_struct *tsk)
68520 validate_creds(cred);
68521 alter_cred_subscribers(cred, -1);
68522 put_cred(cred);
68523+
68524+#ifdef CONFIG_GRKERNSEC_SETXID
68525+ cred = (struct cred *) tsk->delayed_cred;
68526+ if (cred != NULL) {
68527+ tsk->delayed_cred = NULL;
68528+ validate_creds(cred);
68529+ alter_cred_subscribers(cred, -1);
68530+ put_cred(cred);
68531+ }
68532+#endif
68533 }
68534
68535 /**
68536@@ -469,7 +479,7 @@ error_put:
68537 * Always returns 0 thus allowing this function to be tail-called at the end
68538 * of, say, sys_setgid().
68539 */
68540-int commit_creds(struct cred *new)
68541+static int __commit_creds(struct cred *new)
68542 {
68543 struct task_struct *task = current;
68544 const struct cred *old = task->real_cred;
68545@@ -488,6 +498,8 @@ int commit_creds(struct cred *new)
68546
68547 get_cred(new); /* we will require a ref for the subj creds too */
68548
68549+ gr_set_role_label(task, new->uid, new->gid);
68550+
68551 /* dumpability changes */
68552 if (!uid_eq(old->euid, new->euid) ||
68553 !gid_eq(old->egid, new->egid) ||
68554@@ -537,6 +549,101 @@ int commit_creds(struct cred *new)
68555 put_cred(old);
68556 return 0;
68557 }
68558+#ifdef CONFIG_GRKERNSEC_SETXID
68559+extern int set_user(struct cred *new);
68560+
68561+void gr_delayed_cred_worker(void)
68562+{
68563+ const struct cred *new = current->delayed_cred;
68564+ struct cred *ncred;
68565+
68566+ current->delayed_cred = NULL;
68567+
68568+ if (current_uid() && new != NULL) {
68569+ // from doing get_cred on it when queueing this
68570+ put_cred(new);
68571+ return;
68572+ } else if (new == NULL)
68573+ return;
68574+
68575+ ncred = prepare_creds();
68576+ if (!ncred)
68577+ goto die;
68578+ // uids
68579+ ncred->uid = new->uid;
68580+ ncred->euid = new->euid;
68581+ ncred->suid = new->suid;
68582+ ncred->fsuid = new->fsuid;
68583+ // gids
68584+ ncred->gid = new->gid;
68585+ ncred->egid = new->egid;
68586+ ncred->sgid = new->sgid;
68587+ ncred->fsgid = new->fsgid;
68588+ // groups
68589+ if (set_groups(ncred, new->group_info) < 0) {
68590+ abort_creds(ncred);
68591+ goto die;
68592+ }
68593+ // caps
68594+ ncred->securebits = new->securebits;
68595+ ncred->cap_inheritable = new->cap_inheritable;
68596+ ncred->cap_permitted = new->cap_permitted;
68597+ ncred->cap_effective = new->cap_effective;
68598+ ncred->cap_bset = new->cap_bset;
68599+
68600+ if (set_user(ncred)) {
68601+ abort_creds(ncred);
68602+ goto die;
68603+ }
68604+
68605+ // from doing get_cred on it when queueing this
68606+ put_cred(new);
68607+
68608+ __commit_creds(ncred);
68609+ return;
68610+die:
68611+ // from doing get_cred on it when queueing this
68612+ put_cred(new);
68613+ do_group_exit(SIGKILL);
68614+}
68615+#endif
68616+
68617+int commit_creds(struct cred *new)
68618+{
68619+#ifdef CONFIG_GRKERNSEC_SETXID
68620+ int ret;
68621+ int schedule_it = 0;
68622+ struct task_struct *t;
68623+
68624+ /* we won't get called with tasklist_lock held for writing
68625+ and interrupts disabled as the cred struct in that case is
68626+ init_cred
68627+ */
68628+ if (grsec_enable_setxid && !current_is_single_threaded() &&
68629+ !current_uid() && new->uid) {
68630+ schedule_it = 1;
68631+ }
68632+ ret = __commit_creds(new);
68633+ if (schedule_it) {
68634+ rcu_read_lock();
68635+ read_lock(&tasklist_lock);
68636+ for (t = next_thread(current); t != current;
68637+ t = next_thread(t)) {
68638+ if (t->delayed_cred == NULL) {
68639+ t->delayed_cred = get_cred(new);
68640+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
68641+ set_tsk_need_resched(t);
68642+ }
68643+ }
68644+ read_unlock(&tasklist_lock);
68645+ rcu_read_unlock();
68646+ }
68647+ return ret;
68648+#else
68649+ return __commit_creds(new);
68650+#endif
68651+}
68652+
68653 EXPORT_SYMBOL(commit_creds);
68654
68655 /**
68656diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
68657index 9a61738..c5c8f3a 100644
68658--- a/kernel/debug/debug_core.c
68659+++ b/kernel/debug/debug_core.c
68660@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
68661 */
68662 static atomic_t masters_in_kgdb;
68663 static atomic_t slaves_in_kgdb;
68664-static atomic_t kgdb_break_tasklet_var;
68665+static atomic_unchecked_t kgdb_break_tasklet_var;
68666 atomic_t kgdb_setting_breakpoint;
68667
68668 struct task_struct *kgdb_usethread;
68669@@ -132,7 +132,7 @@ int kgdb_single_step;
68670 static pid_t kgdb_sstep_pid;
68671
68672 /* to keep track of the CPU which is doing the single stepping*/
68673-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
68674+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
68675
68676 /*
68677 * If you are debugging a problem where roundup (the collection of
68678@@ -540,7 +540,7 @@ return_normal:
68679 * kernel will only try for the value of sstep_tries before
68680 * giving up and continuing on.
68681 */
68682- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
68683+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
68684 (kgdb_info[cpu].task &&
68685 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
68686 atomic_set(&kgdb_active, -1);
68687@@ -634,8 +634,8 @@ cpu_master_loop:
68688 }
68689
68690 kgdb_restore:
68691- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
68692- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
68693+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
68694+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
68695 if (kgdb_info[sstep_cpu].task)
68696 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
68697 else
68698@@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
68699 static void kgdb_tasklet_bpt(unsigned long ing)
68700 {
68701 kgdb_breakpoint();
68702- atomic_set(&kgdb_break_tasklet_var, 0);
68703+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
68704 }
68705
68706 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
68707
68708 void kgdb_schedule_breakpoint(void)
68709 {
68710- if (atomic_read(&kgdb_break_tasklet_var) ||
68711+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
68712 atomic_read(&kgdb_active) != -1 ||
68713 atomic_read(&kgdb_setting_breakpoint))
68714 return;
68715- atomic_inc(&kgdb_break_tasklet_var);
68716+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
68717 tasklet_schedule(&kgdb_tasklet_breakpoint);
68718 }
68719 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
68720diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
68721index 4d5f8d5..4743f33 100644
68722--- a/kernel/debug/kdb/kdb_main.c
68723+++ b/kernel/debug/kdb/kdb_main.c
68724@@ -1972,7 +1972,7 @@ static int kdb_lsmod(int argc, const char **argv)
68725 list_for_each_entry(mod, kdb_modules, list) {
68726
68727 kdb_printf("%-20s%8u 0x%p ", mod->name,
68728- mod->core_size, (void *)mod);
68729+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
68730 #ifdef CONFIG_MODULE_UNLOAD
68731 kdb_printf("%4ld ", module_refcount(mod));
68732 #endif
68733@@ -1982,7 +1982,7 @@ static int kdb_lsmod(int argc, const char **argv)
68734 kdb_printf(" (Loading)");
68735 else
68736 kdb_printf(" (Live)");
68737- kdb_printf(" 0x%p", mod->module_core);
68738+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
68739
68740 #ifdef CONFIG_MODULE_UNLOAD
68741 {
68742diff --git a/kernel/events/core.c b/kernel/events/core.c
68743index dbccf83..8c66482 100644
68744--- a/kernel/events/core.c
68745+++ b/kernel/events/core.c
68746@@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
68747 return 0;
68748 }
68749
68750-static atomic64_t perf_event_id;
68751+static atomic64_unchecked_t perf_event_id;
68752
68753 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
68754 enum event_type_t event_type);
68755@@ -2668,7 +2668,7 @@ static void __perf_event_read(void *info)
68756
68757 static inline u64 perf_event_count(struct perf_event *event)
68758 {
68759- return local64_read(&event->count) + atomic64_read(&event->child_count);
68760+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
68761 }
68762
68763 static u64 perf_event_read(struct perf_event *event)
68764@@ -2998,9 +2998,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
68765 mutex_lock(&event->child_mutex);
68766 total += perf_event_read(event);
68767 *enabled += event->total_time_enabled +
68768- atomic64_read(&event->child_total_time_enabled);
68769+ atomic64_read_unchecked(&event->child_total_time_enabled);
68770 *running += event->total_time_running +
68771- atomic64_read(&event->child_total_time_running);
68772+ atomic64_read_unchecked(&event->child_total_time_running);
68773
68774 list_for_each_entry(child, &event->child_list, child_list) {
68775 total += perf_event_read(child);
68776@@ -3403,10 +3403,10 @@ void perf_event_update_userpage(struct perf_event *event)
68777 userpg->offset -= local64_read(&event->hw.prev_count);
68778
68779 userpg->time_enabled = enabled +
68780- atomic64_read(&event->child_total_time_enabled);
68781+ atomic64_read_unchecked(&event->child_total_time_enabled);
68782
68783 userpg->time_running = running +
68784- atomic64_read(&event->child_total_time_running);
68785+ atomic64_read_unchecked(&event->child_total_time_running);
68786
68787 arch_perf_update_userpage(userpg, now);
68788
68789@@ -3965,11 +3965,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
68790 values[n++] = perf_event_count(event);
68791 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
68792 values[n++] = enabled +
68793- atomic64_read(&event->child_total_time_enabled);
68794+ atomic64_read_unchecked(&event->child_total_time_enabled);
68795 }
68796 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
68797 values[n++] = running +
68798- atomic64_read(&event->child_total_time_running);
68799+ atomic64_read_unchecked(&event->child_total_time_running);
68800 }
68801 if (read_format & PERF_FORMAT_ID)
68802 values[n++] = primary_event_id(event);
68803@@ -4712,12 +4712,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
68804 * need to add enough zero bytes after the string to handle
68805 * the 64bit alignment we do later.
68806 */
68807- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
68808+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
68809 if (!buf) {
68810 name = strncpy(tmp, "//enomem", sizeof(tmp));
68811 goto got_name;
68812 }
68813- name = d_path(&file->f_path, buf, PATH_MAX);
68814+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
68815 if (IS_ERR(name)) {
68816 name = strncpy(tmp, "//toolong", sizeof(tmp));
68817 goto got_name;
68818@@ -6156,7 +6156,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
68819 event->parent = parent_event;
68820
68821 event->ns = get_pid_ns(current->nsproxy->pid_ns);
68822- event->id = atomic64_inc_return(&perf_event_id);
68823+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
68824
68825 event->state = PERF_EVENT_STATE_INACTIVE;
68826
68827@@ -6774,10 +6774,10 @@ static void sync_child_event(struct perf_event *child_event,
68828 /*
68829 * Add back the child's count to the parent's count:
68830 */
68831- atomic64_add(child_val, &parent_event->child_count);
68832- atomic64_add(child_event->total_time_enabled,
68833+ atomic64_add_unchecked(child_val, &parent_event->child_count);
68834+ atomic64_add_unchecked(child_event->total_time_enabled,
68835 &parent_event->child_total_time_enabled);
68836- atomic64_add(child_event->total_time_running,
68837+ atomic64_add_unchecked(child_event->total_time_running,
68838 &parent_event->child_total_time_running);
68839
68840 /*
68841diff --git a/kernel/exit.c b/kernel/exit.c
68842index 346616c..f103b28 100644
68843--- a/kernel/exit.c
68844+++ b/kernel/exit.c
68845@@ -182,6 +182,10 @@ void release_task(struct task_struct * p)
68846 struct task_struct *leader;
68847 int zap_leader;
68848 repeat:
68849+#ifdef CONFIG_NET
68850+ gr_del_task_from_ip_table(p);
68851+#endif
68852+
68853 /* don't need to get the RCU readlock here - the process is dead and
68854 * can't be modifying its own credentials. But shut RCU-lockdep up */
68855 rcu_read_lock();
68856@@ -394,7 +398,7 @@ int allow_signal(int sig)
68857 * know it'll be handled, so that they don't get converted to
68858 * SIGKILL or just silently dropped.
68859 */
68860- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
68861+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
68862 recalc_sigpending();
68863 spin_unlock_irq(&current->sighand->siglock);
68864 return 0;
68865@@ -430,6 +434,9 @@ void daemonize(const char *name, ...)
68866 vsnprintf(current->comm, sizeof(current->comm), name, args);
68867 va_end(args);
68868
68869+ gr_put_exec_file(current);
68870+ gr_set_kernel_label(current);
68871+
68872 /*
68873 * If we were started as result of loading a module, close all of the
68874 * user space pages. We don't need them, and if we didn't close them
68875@@ -812,6 +819,8 @@ void do_exit(long code)
68876 struct task_struct *tsk = current;
68877 int group_dead;
68878
68879+ set_fs(USER_DS);
68880+
68881 profile_task_exit(tsk);
68882
68883 WARN_ON(blk_needs_flush_plug(tsk));
68884@@ -828,7 +837,6 @@ void do_exit(long code)
68885 * mm_release()->clear_child_tid() from writing to a user-controlled
68886 * kernel address.
68887 */
68888- set_fs(USER_DS);
68889
68890 ptrace_event(PTRACE_EVENT_EXIT, code);
68891
68892@@ -887,6 +895,9 @@ void do_exit(long code)
68893 tsk->exit_code = code;
68894 taskstats_exit(tsk, group_dead);
68895
68896+ gr_acl_handle_psacct(tsk, code);
68897+ gr_acl_handle_exit();
68898+
68899 exit_mm(tsk);
68900
68901 if (group_dead)
68902@@ -1007,7 +1018,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
68903 * Take down every thread in the group. This is called by fatal signals
68904 * as well as by sys_exit_group (below).
68905 */
68906-void
68907+__noreturn void
68908 do_group_exit(int exit_code)
68909 {
68910 struct signal_struct *sig = current->signal;
68911diff --git a/kernel/fork.c b/kernel/fork.c
68912index acc4cb6..b524cb5 100644
68913--- a/kernel/fork.c
68914+++ b/kernel/fork.c
68915@@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
68916 *stackend = STACK_END_MAGIC; /* for overflow detection */
68917
68918 #ifdef CONFIG_CC_STACKPROTECTOR
68919- tsk->stack_canary = get_random_int();
68920+ tsk->stack_canary = pax_get_random_long();
68921 #endif
68922
68923 /*
68924@@ -344,13 +344,81 @@ free_tsk:
68925 }
68926
68927 #ifdef CONFIG_MMU
68928+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
68929+{
68930+ struct vm_area_struct *tmp;
68931+ unsigned long charge;
68932+ struct mempolicy *pol;
68933+ struct file *file;
68934+
68935+ charge = 0;
68936+ if (mpnt->vm_flags & VM_ACCOUNT) {
68937+ unsigned long len = vma_pages(mpnt);
68938+
68939+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
68940+ goto fail_nomem;
68941+ charge = len;
68942+ }
68943+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68944+ if (!tmp)
68945+ goto fail_nomem;
68946+ *tmp = *mpnt;
68947+ tmp->vm_mm = mm;
68948+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
68949+ pol = mpol_dup(vma_policy(mpnt));
68950+ if (IS_ERR(pol))
68951+ goto fail_nomem_policy;
68952+ vma_set_policy(tmp, pol);
68953+ if (anon_vma_fork(tmp, mpnt))
68954+ goto fail_nomem_anon_vma_fork;
68955+ tmp->vm_flags &= ~VM_LOCKED;
68956+ tmp->vm_next = tmp->vm_prev = NULL;
68957+ tmp->vm_mirror = NULL;
68958+ file = tmp->vm_file;
68959+ if (file) {
68960+ struct inode *inode = file->f_path.dentry->d_inode;
68961+ struct address_space *mapping = file->f_mapping;
68962+
68963+ get_file(file);
68964+ if (tmp->vm_flags & VM_DENYWRITE)
68965+ atomic_dec(&inode->i_writecount);
68966+ mutex_lock(&mapping->i_mmap_mutex);
68967+ if (tmp->vm_flags & VM_SHARED)
68968+ mapping->i_mmap_writable++;
68969+ flush_dcache_mmap_lock(mapping);
68970+ /* insert tmp into the share list, just after mpnt */
68971+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
68972+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
68973+ else
68974+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
68975+ flush_dcache_mmap_unlock(mapping);
68976+ mutex_unlock(&mapping->i_mmap_mutex);
68977+ }
68978+
68979+ /*
68980+ * Clear hugetlb-related page reserves for children. This only
68981+ * affects MAP_PRIVATE mappings. Faults generated by the child
68982+ * are not guaranteed to succeed, even if read-only
68983+ */
68984+ if (is_vm_hugetlb_page(tmp))
68985+ reset_vma_resv_huge_pages(tmp);
68986+
68987+ return tmp;
68988+
68989+fail_nomem_anon_vma_fork:
68990+ mpol_put(pol);
68991+fail_nomem_policy:
68992+ kmem_cache_free(vm_area_cachep, tmp);
68993+fail_nomem:
68994+ vm_unacct_memory(charge);
68995+ return NULL;
68996+}
68997+
68998 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
68999 {
69000 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
69001 struct rb_node **rb_link, *rb_parent;
69002 int retval;
69003- unsigned long charge;
69004- struct mempolicy *pol;
69005
69006 down_write(&oldmm->mmap_sem);
69007 flush_cache_dup_mm(oldmm);
69008@@ -363,8 +431,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69009 mm->locked_vm = 0;
69010 mm->mmap = NULL;
69011 mm->mmap_cache = NULL;
69012- mm->free_area_cache = oldmm->mmap_base;
69013- mm->cached_hole_size = ~0UL;
69014+ mm->free_area_cache = oldmm->free_area_cache;
69015+ mm->cached_hole_size = oldmm->cached_hole_size;
69016 mm->map_count = 0;
69017 cpumask_clear(mm_cpumask(mm));
69018 mm->mm_rb = RB_ROOT;
69019@@ -380,57 +448,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69020
69021 prev = NULL;
69022 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
69023- struct file *file;
69024-
69025 if (mpnt->vm_flags & VM_DONTCOPY) {
69026 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
69027 -vma_pages(mpnt));
69028 continue;
69029 }
69030- charge = 0;
69031- if (mpnt->vm_flags & VM_ACCOUNT) {
69032- unsigned long len = vma_pages(mpnt);
69033-
69034- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
69035- goto fail_nomem;
69036- charge = len;
69037- }
69038- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69039- if (!tmp)
69040- goto fail_nomem;
69041- *tmp = *mpnt;
69042- INIT_LIST_HEAD(&tmp->anon_vma_chain);
69043- pol = mpol_dup(vma_policy(mpnt));
69044- retval = PTR_ERR(pol);
69045- if (IS_ERR(pol))
69046- goto fail_nomem_policy;
69047- vma_set_policy(tmp, pol);
69048- tmp->vm_mm = mm;
69049- if (anon_vma_fork(tmp, mpnt))
69050- goto fail_nomem_anon_vma_fork;
69051- tmp->vm_flags &= ~VM_LOCKED;
69052- tmp->vm_next = tmp->vm_prev = NULL;
69053- file = tmp->vm_file;
69054- if (file) {
69055- struct inode *inode = file->f_path.dentry->d_inode;
69056- struct address_space *mapping = file->f_mapping;
69057-
69058- get_file(file);
69059- if (tmp->vm_flags & VM_DENYWRITE)
69060- atomic_dec(&inode->i_writecount);
69061- mutex_lock(&mapping->i_mmap_mutex);
69062- if (tmp->vm_flags & VM_SHARED)
69063- mapping->i_mmap_writable++;
69064- flush_dcache_mmap_lock(mapping);
69065- /* insert tmp into the share list, just after mpnt */
69066- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
69067- vma_nonlinear_insert(tmp,
69068- &mapping->i_mmap_nonlinear);
69069- else
69070- vma_interval_tree_insert_after(tmp, mpnt,
69071- &mapping->i_mmap);
69072- flush_dcache_mmap_unlock(mapping);
69073- mutex_unlock(&mapping->i_mmap_mutex);
69074+ tmp = dup_vma(mm, oldmm, mpnt);
69075+ if (!tmp) {
69076+ retval = -ENOMEM;
69077+ goto out;
69078 }
69079
69080 /*
69081@@ -462,6 +488,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69082 if (retval)
69083 goto out;
69084 }
69085+
69086+#ifdef CONFIG_PAX_SEGMEXEC
69087+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
69088+ struct vm_area_struct *mpnt_m;
69089+
69090+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
69091+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
69092+
69093+ if (!mpnt->vm_mirror)
69094+ continue;
69095+
69096+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
69097+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
69098+ mpnt->vm_mirror = mpnt_m;
69099+ } else {
69100+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
69101+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
69102+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
69103+ mpnt->vm_mirror->vm_mirror = mpnt;
69104+ }
69105+ }
69106+ BUG_ON(mpnt_m);
69107+ }
69108+#endif
69109+
69110 /* a new mm has just been created */
69111 arch_dup_mmap(oldmm, mm);
69112 retval = 0;
69113@@ -470,14 +521,6 @@ out:
69114 flush_tlb_mm(oldmm);
69115 up_write(&oldmm->mmap_sem);
69116 return retval;
69117-fail_nomem_anon_vma_fork:
69118- mpol_put(pol);
69119-fail_nomem_policy:
69120- kmem_cache_free(vm_area_cachep, tmp);
69121-fail_nomem:
69122- retval = -ENOMEM;
69123- vm_unacct_memory(charge);
69124- goto out;
69125 }
69126
69127 static inline int mm_alloc_pgd(struct mm_struct *mm)
69128@@ -692,8 +735,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
69129 return ERR_PTR(err);
69130
69131 mm = get_task_mm(task);
69132- if (mm && mm != current->mm &&
69133- !ptrace_may_access(task, mode)) {
69134+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
69135+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
69136 mmput(mm);
69137 mm = ERR_PTR(-EACCES);
69138 }
69139@@ -912,13 +955,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
69140 spin_unlock(&fs->lock);
69141 return -EAGAIN;
69142 }
69143- fs->users++;
69144+ atomic_inc(&fs->users);
69145 spin_unlock(&fs->lock);
69146 return 0;
69147 }
69148 tsk->fs = copy_fs_struct(fs);
69149 if (!tsk->fs)
69150 return -ENOMEM;
69151+ /* Carry through gr_chroot_dentry and is_chrooted instead
69152+ of recomputing it here. Already copied when the task struct
69153+ is duplicated. This allows pivot_root to not be treated as
69154+ a chroot
69155+ */
69156+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
69157+
69158 return 0;
69159 }
69160
69161@@ -1183,6 +1233,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
69162 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
69163 #endif
69164 retval = -EAGAIN;
69165+
69166+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
69167+
69168 if (atomic_read(&p->real_cred->user->processes) >=
69169 task_rlimit(p, RLIMIT_NPROC)) {
69170 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
69171@@ -1422,6 +1475,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
69172 goto bad_fork_free_pid;
69173 }
69174
69175+ /* synchronizes with gr_set_acls()
69176+ we need to call this past the point of no return for fork()
69177+ */
69178+ gr_copy_label(p);
69179+
69180 if (clone_flags & CLONE_THREAD) {
69181 current->signal->nr_threads++;
69182 atomic_inc(&current->signal->live);
69183@@ -1505,6 +1563,8 @@ bad_fork_cleanup_count:
69184 bad_fork_free:
69185 free_task(p);
69186 fork_out:
69187+ gr_log_forkfail(retval);
69188+
69189 return ERR_PTR(retval);
69190 }
69191
69192@@ -1605,6 +1665,8 @@ long do_fork(unsigned long clone_flags,
69193 if (clone_flags & CLONE_PARENT_SETTID)
69194 put_user(nr, parent_tidptr);
69195
69196+ gr_handle_brute_check();
69197+
69198 if (clone_flags & CLONE_VFORK) {
69199 p->vfork_done = &vfork;
69200 init_completion(&vfork);
69201@@ -1714,7 +1776,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
69202 return 0;
69203
69204 /* don't need lock here; in the worst case we'll do useless copy */
69205- if (fs->users == 1)
69206+ if (atomic_read(&fs->users) == 1)
69207 return 0;
69208
69209 *new_fsp = copy_fs_struct(fs);
69210@@ -1803,7 +1865,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
69211 fs = current->fs;
69212 spin_lock(&fs->lock);
69213 current->fs = new_fs;
69214- if (--fs->users)
69215+ gr_set_chroot_entries(current, &current->fs->root);
69216+ if (atomic_dec_return(&fs->users))
69217 new_fs = NULL;
69218 else
69219 new_fs = fs;
69220diff --git a/kernel/futex.c b/kernel/futex.c
69221index 19eb089..b8c65ea 100644
69222--- a/kernel/futex.c
69223+++ b/kernel/futex.c
69224@@ -54,6 +54,7 @@
69225 #include <linux/mount.h>
69226 #include <linux/pagemap.h>
69227 #include <linux/syscalls.h>
69228+#include <linux/ptrace.h>
69229 #include <linux/signal.h>
69230 #include <linux/export.h>
69231 #include <linux/magic.h>
69232@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
69233 struct page *page, *page_head;
69234 int err, ro = 0;
69235
69236+#ifdef CONFIG_PAX_SEGMEXEC
69237+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
69238+ return -EFAULT;
69239+#endif
69240+
69241 /*
69242 * The futex address must be "naturally" aligned.
69243 */
69244@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
69245 {
69246 u32 curval;
69247 int i;
69248+ mm_segment_t oldfs;
69249
69250 /*
69251 * This will fail and we want it. Some arch implementations do
69252@@ -2744,8 +2751,11 @@ static int __init futex_init(void)
69253 * implementation, the non-functional ones will return
69254 * -ENOSYS.
69255 */
69256+ oldfs = get_fs();
69257+ set_fs(USER_DS);
69258 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
69259 futex_cmpxchg_enabled = 1;
69260+ set_fs(oldfs);
69261
69262 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
69263 plist_head_init(&futex_queues[i].chain);
69264diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
69265index 9b22d03..6295b62 100644
69266--- a/kernel/gcov/base.c
69267+++ b/kernel/gcov/base.c
69268@@ -102,11 +102,6 @@ void gcov_enable_events(void)
69269 }
69270
69271 #ifdef CONFIG_MODULES
69272-static inline int within(void *addr, void *start, unsigned long size)
69273-{
69274- return ((addr >= start) && (addr < start + size));
69275-}
69276-
69277 /* Update list and generate events when modules are unloaded. */
69278 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
69279 void *data)
69280@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
69281 prev = NULL;
69282 /* Remove entries located in module from linked list. */
69283 for (info = gcov_info_head; info; info = info->next) {
69284- if (within(info, mod->module_core, mod->core_size)) {
69285+ if (within_module_core_rw((unsigned long)info, mod)) {
69286 if (prev)
69287 prev->next = info->next;
69288 else
69289diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
69290index 6db7a5e..25b6648 100644
69291--- a/kernel/hrtimer.c
69292+++ b/kernel/hrtimer.c
69293@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
69294 local_irq_restore(flags);
69295 }
69296
69297-static void run_hrtimer_softirq(struct softirq_action *h)
69298+static void run_hrtimer_softirq(void)
69299 {
69300 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
69301
69302diff --git a/kernel/jump_label.c b/kernel/jump_label.c
69303index 60f48fa..7f3a770 100644
69304--- a/kernel/jump_label.c
69305+++ b/kernel/jump_label.c
69306@@ -13,6 +13,7 @@
69307 #include <linux/sort.h>
69308 #include <linux/err.h>
69309 #include <linux/static_key.h>
69310+#include <linux/mm.h>
69311
69312 #ifdef HAVE_JUMP_LABEL
69313
69314@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
69315
69316 size = (((unsigned long)stop - (unsigned long)start)
69317 / sizeof(struct jump_entry));
69318+ pax_open_kernel();
69319 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
69320+ pax_close_kernel();
69321 }
69322
69323 static void jump_label_update(struct static_key *key, int enable);
69324@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
69325 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
69326 struct jump_entry *iter;
69327
69328+ pax_open_kernel();
69329 for (iter = iter_start; iter < iter_stop; iter++) {
69330 if (within_module_init(iter->code, mod))
69331 iter->code = 0;
69332 }
69333+ pax_close_kernel();
69334 }
69335
69336 static int
69337diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
69338index 2169fee..45c017a 100644
69339--- a/kernel/kallsyms.c
69340+++ b/kernel/kallsyms.c
69341@@ -11,6 +11,9 @@
69342 * Changed the compression method from stem compression to "table lookup"
69343 * compression (see scripts/kallsyms.c for a more complete description)
69344 */
69345+#ifdef CONFIG_GRKERNSEC_HIDESYM
69346+#define __INCLUDED_BY_HIDESYM 1
69347+#endif
69348 #include <linux/kallsyms.h>
69349 #include <linux/module.h>
69350 #include <linux/init.h>
69351@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
69352
69353 static inline int is_kernel_inittext(unsigned long addr)
69354 {
69355+ if (system_state != SYSTEM_BOOTING)
69356+ return 0;
69357+
69358 if (addr >= (unsigned long)_sinittext
69359 && addr <= (unsigned long)_einittext)
69360 return 1;
69361 return 0;
69362 }
69363
69364+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69365+#ifdef CONFIG_MODULES
69366+static inline int is_module_text(unsigned long addr)
69367+{
69368+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
69369+ return 1;
69370+
69371+ addr = ktla_ktva(addr);
69372+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
69373+}
69374+#else
69375+static inline int is_module_text(unsigned long addr)
69376+{
69377+ return 0;
69378+}
69379+#endif
69380+#endif
69381+
69382 static inline int is_kernel_text(unsigned long addr)
69383 {
69384 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
69385@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
69386
69387 static inline int is_kernel(unsigned long addr)
69388 {
69389+
69390+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69391+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
69392+ return 1;
69393+
69394+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
69395+#else
69396 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
69397+#endif
69398+
69399 return 1;
69400 return in_gate_area_no_mm(addr);
69401 }
69402
69403 static int is_ksym_addr(unsigned long addr)
69404 {
69405+
69406+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69407+ if (is_module_text(addr))
69408+ return 0;
69409+#endif
69410+
69411 if (all_var)
69412 return is_kernel(addr);
69413
69414@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
69415
69416 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
69417 {
69418- iter->name[0] = '\0';
69419 iter->nameoff = get_symbol_offset(new_pos);
69420 iter->pos = new_pos;
69421 }
69422@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
69423 {
69424 struct kallsym_iter *iter = m->private;
69425
69426+#ifdef CONFIG_GRKERNSEC_HIDESYM
69427+ if (current_uid())
69428+ return 0;
69429+#endif
69430+
69431 /* Some debugging symbols have no name. Ignore them. */
69432 if (!iter->name[0])
69433 return 0;
69434@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
69435 */
69436 type = iter->exported ? toupper(iter->type) :
69437 tolower(iter->type);
69438+
69439 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
69440 type, iter->name, iter->module_name);
69441 } else
69442@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
69443 struct kallsym_iter *iter;
69444 int ret;
69445
69446- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
69447+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
69448 if (!iter)
69449 return -ENOMEM;
69450 reset_iter(iter, 0);
69451diff --git a/kernel/kcmp.c b/kernel/kcmp.c
69452index 30b7b22..c726387 100644
69453--- a/kernel/kcmp.c
69454+++ b/kernel/kcmp.c
69455@@ -98,6 +98,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
69456 struct task_struct *task1, *task2;
69457 int ret;
69458
69459+#ifdef CONFIG_GRKERNSEC
69460+ return -ENOSYS;
69461+#endif
69462+
69463 rcu_read_lock();
69464
69465 /*
69466diff --git a/kernel/kexec.c b/kernel/kexec.c
69467index 5e4bd78..00c5b91 100644
69468--- a/kernel/kexec.c
69469+++ b/kernel/kexec.c
69470@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
69471 unsigned long flags)
69472 {
69473 struct compat_kexec_segment in;
69474- struct kexec_segment out, __user *ksegments;
69475+ struct kexec_segment out;
69476+ struct kexec_segment __user *ksegments;
69477 unsigned long i, result;
69478
69479 /* Don't allow clients that don't understand the native
69480diff --git a/kernel/kmod.c b/kernel/kmod.c
69481index 1c317e3..4a92a55 100644
69482--- a/kernel/kmod.c
69483+++ b/kernel/kmod.c
69484@@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
69485 kfree(info->argv);
69486 }
69487
69488-static int call_modprobe(char *module_name, int wait)
69489+static int call_modprobe(char *module_name, char *module_param, int wait)
69490 {
69491 static char *envp[] = {
69492 "HOME=/",
69493@@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
69494 NULL
69495 };
69496
69497- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
69498+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
69499 if (!argv)
69500 goto out;
69501
69502@@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
69503 argv[1] = "-q";
69504 argv[2] = "--";
69505 argv[3] = module_name; /* check free_modprobe_argv() */
69506- argv[4] = NULL;
69507+ argv[4] = module_param;
69508+ argv[5] = NULL;
69509
69510 return call_usermodehelper_fns(modprobe_path, argv, envp,
69511 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
69512@@ -120,9 +121,8 @@ out:
69513 * If module auto-loading support is disabled then this function
69514 * becomes a no-operation.
69515 */
69516-int __request_module(bool wait, const char *fmt, ...)
69517+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
69518 {
69519- va_list args;
69520 char module_name[MODULE_NAME_LEN];
69521 unsigned int max_modprobes;
69522 int ret;
69523@@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
69524 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
69525 static int kmod_loop_msg;
69526
69527- va_start(args, fmt);
69528- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
69529- va_end(args);
69530+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
69531 if (ret >= MODULE_NAME_LEN)
69532 return -ENAMETOOLONG;
69533
69534@@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
69535 if (ret)
69536 return ret;
69537
69538+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69539+ if (!current_uid()) {
69540+ /* hack to workaround consolekit/udisks stupidity */
69541+ read_lock(&tasklist_lock);
69542+ if (!strcmp(current->comm, "mount") &&
69543+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
69544+ read_unlock(&tasklist_lock);
69545+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
69546+ return -EPERM;
69547+ }
69548+ read_unlock(&tasklist_lock);
69549+ }
69550+#endif
69551+
69552 /* If modprobe needs a service that is in a module, we get a recursive
69553 * loop. Limit the number of running kmod threads to max_threads/2 or
69554 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
69555@@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
69556
69557 trace_module_request(module_name, wait, _RET_IP_);
69558
69559- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
69560+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
69561
69562 atomic_dec(&kmod_concurrent);
69563 return ret;
69564 }
69565+
69566+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
69567+{
69568+ va_list args;
69569+ int ret;
69570+
69571+ va_start(args, fmt);
69572+ ret = ____request_module(wait, module_param, fmt, args);
69573+ va_end(args);
69574+
69575+ return ret;
69576+}
69577+
69578+int __request_module(bool wait, const char *fmt, ...)
69579+{
69580+ va_list args;
69581+ int ret;
69582+
69583+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69584+ if (current_uid()) {
69585+ char module_param[MODULE_NAME_LEN];
69586+
69587+ memset(module_param, 0, sizeof(module_param));
69588+
69589+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
69590+
69591+ va_start(args, fmt);
69592+ ret = ____request_module(wait, module_param, fmt, args);
69593+ va_end(args);
69594+
69595+ return ret;
69596+ }
69597+#endif
69598+
69599+ va_start(args, fmt);
69600+ ret = ____request_module(wait, NULL, fmt, args);
69601+ va_end(args);
69602+
69603+ return ret;
69604+}
69605+
69606 EXPORT_SYMBOL(__request_module);
69607 #endif /* CONFIG_MODULES */
69608
69609@@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
69610 *
69611 * Thus the __user pointer cast is valid here.
69612 */
69613- sys_wait4(pid, (int __user *)&ret, 0, NULL);
69614+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
69615
69616 /*
69617 * If ret is 0, either ____call_usermodehelper failed and the
69618diff --git a/kernel/kprobes.c b/kernel/kprobes.c
69619index 098f396..fe85ff1 100644
69620--- a/kernel/kprobes.c
69621+++ b/kernel/kprobes.c
69622@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
69623 * kernel image and loaded module images reside. This is required
69624 * so x86_64 can correctly handle the %rip-relative fixups.
69625 */
69626- kip->insns = module_alloc(PAGE_SIZE);
69627+ kip->insns = module_alloc_exec(PAGE_SIZE);
69628 if (!kip->insns) {
69629 kfree(kip);
69630 return NULL;
69631@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
69632 */
69633 if (!list_is_singular(&kip->list)) {
69634 list_del(&kip->list);
69635- module_free(NULL, kip->insns);
69636+ module_free_exec(NULL, kip->insns);
69637 kfree(kip);
69638 }
69639 return 1;
69640@@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
69641 {
69642 int i, err = 0;
69643 unsigned long offset = 0, size = 0;
69644- char *modname, namebuf[128];
69645+ char *modname, namebuf[KSYM_NAME_LEN];
69646 const char *symbol_name;
69647 void *addr;
69648 struct kprobe_blackpoint *kb;
69649@@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
69650 kprobe_type = "k";
69651
69652 if (sym)
69653- seq_printf(pi, "%p %s %s+0x%x %s ",
69654+ seq_printf(pi, "%pK %s %s+0x%x %s ",
69655 p->addr, kprobe_type, sym, offset,
69656 (modname ? modname : " "));
69657 else
69658- seq_printf(pi, "%p %s %p ",
69659+ seq_printf(pi, "%pK %s %pK ",
69660 p->addr, kprobe_type, p->addr);
69661
69662 if (!pp)
69663@@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
69664 const char *sym = NULL;
69665 unsigned int i = *(loff_t *) v;
69666 unsigned long offset = 0;
69667- char *modname, namebuf[128];
69668+ char *modname, namebuf[KSYM_NAME_LEN];
69669
69670 head = &kprobe_table[i];
69671 preempt_disable();
69672diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
69673index 4e316e1..5501eef 100644
69674--- a/kernel/ksysfs.c
69675+++ b/kernel/ksysfs.c
69676@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
69677 {
69678 if (count+1 > UEVENT_HELPER_PATH_LEN)
69679 return -ENOENT;
69680+ if (!capable(CAP_SYS_ADMIN))
69681+ return -EPERM;
69682 memcpy(uevent_helper, buf, count);
69683 uevent_helper[count] = '\0';
69684 if (count && uevent_helper[count-1] == '\n')
69685diff --git a/kernel/lockdep.c b/kernel/lockdep.c
69686index 7981e5b..7f2105c 100644
69687--- a/kernel/lockdep.c
69688+++ b/kernel/lockdep.c
69689@@ -590,6 +590,10 @@ static int static_obj(void *obj)
69690 end = (unsigned long) &_end,
69691 addr = (unsigned long) obj;
69692
69693+#ifdef CONFIG_PAX_KERNEXEC
69694+ start = ktla_ktva(start);
69695+#endif
69696+
69697 /*
69698 * static variable?
69699 */
69700@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
69701 if (!static_obj(lock->key)) {
69702 debug_locks_off();
69703 printk("INFO: trying to register non-static key.\n");
69704+ printk("lock:%pS key:%pS.\n", lock, lock->key);
69705 printk("the code is fine but needs lockdep annotation.\n");
69706 printk("turning off the locking correctness validator.\n");
69707 dump_stack();
69708@@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
69709 if (!class)
69710 return 0;
69711 }
69712- atomic_inc((atomic_t *)&class->ops);
69713+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
69714 if (very_verbose(class)) {
69715 printk("\nacquire class [%p] %s", class->key, class->name);
69716 if (class->name_version > 1)
69717diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
69718index 91c32a0..7b88d63 100644
69719--- a/kernel/lockdep_proc.c
69720+++ b/kernel/lockdep_proc.c
69721@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
69722
69723 static void print_name(struct seq_file *m, struct lock_class *class)
69724 {
69725- char str[128];
69726+ char str[KSYM_NAME_LEN];
69727 const char *name = class->name;
69728
69729 if (!name) {
69730@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
69731 return 0;
69732 }
69733
69734- seq_printf(m, "%p", class->key);
69735+ seq_printf(m, "%pK", class->key);
69736 #ifdef CONFIG_DEBUG_LOCKDEP
69737 seq_printf(m, " OPS:%8ld", class->ops);
69738 #endif
69739@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
69740
69741 list_for_each_entry(entry, &class->locks_after, entry) {
69742 if (entry->distance == 1) {
69743- seq_printf(m, " -> [%p] ", entry->class->key);
69744+ seq_printf(m, " -> [%pK] ", entry->class->key);
69745 print_name(m, entry->class);
69746 seq_puts(m, "\n");
69747 }
69748@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
69749 if (!class->key)
69750 continue;
69751
69752- seq_printf(m, "[%p] ", class->key);
69753+ seq_printf(m, "[%pK] ", class->key);
69754 print_name(m, class);
69755 seq_puts(m, "\n");
69756 }
69757@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
69758 if (!i)
69759 seq_line(m, '-', 40-namelen, namelen);
69760
69761- snprintf(ip, sizeof(ip), "[<%p>]",
69762+ snprintf(ip, sizeof(ip), "[<%pK>]",
69763 (void *)class->contention_point[i]);
69764 seq_printf(m, "%40s %14lu %29s %pS\n",
69765 name, stats->contention_point[i],
69766@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
69767 if (!i)
69768 seq_line(m, '-', 40-namelen, namelen);
69769
69770- snprintf(ip, sizeof(ip), "[<%p>]",
69771+ snprintf(ip, sizeof(ip), "[<%pK>]",
69772 (void *)class->contending_point[i]);
69773 seq_printf(m, "%40s %14lu %29s %pS\n",
69774 name, stats->contending_point[i],
69775diff --git a/kernel/module.c b/kernel/module.c
69776index 6e48c3a..ac2ef5b 100644
69777--- a/kernel/module.c
69778+++ b/kernel/module.c
69779@@ -59,6 +59,7 @@
69780 #include <linux/pfn.h>
69781 #include <linux/bsearch.h>
69782 #include <linux/fips.h>
69783+#include <linux/grsecurity.h>
69784 #include "module-internal.h"
69785
69786 #define CREATE_TRACE_POINTS
69787@@ -153,7 +154,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
69788
69789 /* Bounds of module allocation, for speeding __module_address.
69790 * Protected by module_mutex. */
69791-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
69792+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
69793+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
69794
69795 int register_module_notifier(struct notifier_block * nb)
69796 {
69797@@ -318,7 +320,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
69798 return true;
69799
69800 list_for_each_entry_rcu(mod, &modules, list) {
69801- struct symsearch arr[] = {
69802+ struct symsearch modarr[] = {
69803 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
69804 NOT_GPL_ONLY, false },
69805 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
69806@@ -340,7 +342,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
69807 #endif
69808 };
69809
69810- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
69811+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
69812 return true;
69813 }
69814 return false;
69815@@ -472,7 +474,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
69816 static int percpu_modalloc(struct module *mod,
69817 unsigned long size, unsigned long align)
69818 {
69819- if (align > PAGE_SIZE) {
69820+ if (align-1 >= PAGE_SIZE) {
69821 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
69822 mod->name, align, PAGE_SIZE);
69823 align = PAGE_SIZE;
69824@@ -1072,7 +1074,7 @@ struct module_attribute module_uevent =
69825 static ssize_t show_coresize(struct module_attribute *mattr,
69826 struct module_kobject *mk, char *buffer)
69827 {
69828- return sprintf(buffer, "%u\n", mk->mod->core_size);
69829+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
69830 }
69831
69832 static struct module_attribute modinfo_coresize =
69833@@ -1081,7 +1083,7 @@ static struct module_attribute modinfo_coresize =
69834 static ssize_t show_initsize(struct module_attribute *mattr,
69835 struct module_kobject *mk, char *buffer)
69836 {
69837- return sprintf(buffer, "%u\n", mk->mod->init_size);
69838+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
69839 }
69840
69841 static struct module_attribute modinfo_initsize =
69842@@ -1295,7 +1297,7 @@ resolve_symbol_wait(struct module *mod,
69843 */
69844 #ifdef CONFIG_SYSFS
69845
69846-#ifdef CONFIG_KALLSYMS
69847+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
69848 static inline bool sect_empty(const Elf_Shdr *sect)
69849 {
69850 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
69851@@ -1761,21 +1763,21 @@ static void set_section_ro_nx(void *base,
69852
69853 static void unset_module_core_ro_nx(struct module *mod)
69854 {
69855- set_page_attributes(mod->module_core + mod->core_text_size,
69856- mod->module_core + mod->core_size,
69857+ set_page_attributes(mod->module_core_rw,
69858+ mod->module_core_rw + mod->core_size_rw,
69859 set_memory_x);
69860- set_page_attributes(mod->module_core,
69861- mod->module_core + mod->core_ro_size,
69862+ set_page_attributes(mod->module_core_rx,
69863+ mod->module_core_rx + mod->core_size_rx,
69864 set_memory_rw);
69865 }
69866
69867 static void unset_module_init_ro_nx(struct module *mod)
69868 {
69869- set_page_attributes(mod->module_init + mod->init_text_size,
69870- mod->module_init + mod->init_size,
69871+ set_page_attributes(mod->module_init_rw,
69872+ mod->module_init_rw + mod->init_size_rw,
69873 set_memory_x);
69874- set_page_attributes(mod->module_init,
69875- mod->module_init + mod->init_ro_size,
69876+ set_page_attributes(mod->module_init_rx,
69877+ mod->module_init_rx + mod->init_size_rx,
69878 set_memory_rw);
69879 }
69880
69881@@ -1786,14 +1788,14 @@ void set_all_modules_text_rw(void)
69882
69883 mutex_lock(&module_mutex);
69884 list_for_each_entry_rcu(mod, &modules, list) {
69885- if ((mod->module_core) && (mod->core_text_size)) {
69886- set_page_attributes(mod->module_core,
69887- mod->module_core + mod->core_text_size,
69888+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
69889+ set_page_attributes(mod->module_core_rx,
69890+ mod->module_core_rx + mod->core_size_rx,
69891 set_memory_rw);
69892 }
69893- if ((mod->module_init) && (mod->init_text_size)) {
69894- set_page_attributes(mod->module_init,
69895- mod->module_init + mod->init_text_size,
69896+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
69897+ set_page_attributes(mod->module_init_rx,
69898+ mod->module_init_rx + mod->init_size_rx,
69899 set_memory_rw);
69900 }
69901 }
69902@@ -1807,14 +1809,14 @@ void set_all_modules_text_ro(void)
69903
69904 mutex_lock(&module_mutex);
69905 list_for_each_entry_rcu(mod, &modules, list) {
69906- if ((mod->module_core) && (mod->core_text_size)) {
69907- set_page_attributes(mod->module_core,
69908- mod->module_core + mod->core_text_size,
69909+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
69910+ set_page_attributes(mod->module_core_rx,
69911+ mod->module_core_rx + mod->core_size_rx,
69912 set_memory_ro);
69913 }
69914- if ((mod->module_init) && (mod->init_text_size)) {
69915- set_page_attributes(mod->module_init,
69916- mod->module_init + mod->init_text_size,
69917+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
69918+ set_page_attributes(mod->module_init_rx,
69919+ mod->module_init_rx + mod->init_size_rx,
69920 set_memory_ro);
69921 }
69922 }
69923@@ -1860,16 +1862,19 @@ static void free_module(struct module *mod)
69924
69925 /* This may be NULL, but that's OK */
69926 unset_module_init_ro_nx(mod);
69927- module_free(mod, mod->module_init);
69928+ module_free(mod, mod->module_init_rw);
69929+ module_free_exec(mod, mod->module_init_rx);
69930 kfree(mod->args);
69931 percpu_modfree(mod);
69932
69933 /* Free lock-classes: */
69934- lockdep_free_key_range(mod->module_core, mod->core_size);
69935+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
69936+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
69937
69938 /* Finally, free the core (containing the module structure) */
69939 unset_module_core_ro_nx(mod);
69940- module_free(mod, mod->module_core);
69941+ module_free_exec(mod, mod->module_core_rx);
69942+ module_free(mod, mod->module_core_rw);
69943
69944 #ifdef CONFIG_MPU
69945 update_protections(current->mm);
69946@@ -1939,9 +1944,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
69947 int ret = 0;
69948 const struct kernel_symbol *ksym;
69949
69950+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69951+ int is_fs_load = 0;
69952+ int register_filesystem_found = 0;
69953+ char *p;
69954+
69955+ p = strstr(mod->args, "grsec_modharden_fs");
69956+ if (p) {
69957+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
69958+ /* copy \0 as well */
69959+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
69960+ is_fs_load = 1;
69961+ }
69962+#endif
69963+
69964 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
69965 const char *name = info->strtab + sym[i].st_name;
69966
69967+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69968+ /* it's a real shame this will never get ripped and copied
69969+ upstream! ;(
69970+ */
69971+ if (is_fs_load && !strcmp(name, "register_filesystem"))
69972+ register_filesystem_found = 1;
69973+#endif
69974+
69975 switch (sym[i].st_shndx) {
69976 case SHN_COMMON:
69977 /* We compiled with -fno-common. These are not
69978@@ -1962,7 +1989,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
69979 ksym = resolve_symbol_wait(mod, info, name);
69980 /* Ok if resolved. */
69981 if (ksym && !IS_ERR(ksym)) {
69982+ pax_open_kernel();
69983 sym[i].st_value = ksym->value;
69984+ pax_close_kernel();
69985 break;
69986 }
69987
69988@@ -1981,11 +2010,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
69989 secbase = (unsigned long)mod_percpu(mod);
69990 else
69991 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
69992+ pax_open_kernel();
69993 sym[i].st_value += secbase;
69994+ pax_close_kernel();
69995 break;
69996 }
69997 }
69998
69999+#ifdef CONFIG_GRKERNSEC_MODHARDEN
70000+ if (is_fs_load && !register_filesystem_found) {
70001+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
70002+ ret = -EPERM;
70003+ }
70004+#endif
70005+
70006 return ret;
70007 }
70008
70009@@ -2069,22 +2107,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
70010 || s->sh_entsize != ~0UL
70011 || strstarts(sname, ".init"))
70012 continue;
70013- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
70014+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
70015+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
70016+ else
70017+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
70018 pr_debug("\t%s\n", sname);
70019 }
70020- switch (m) {
70021- case 0: /* executable */
70022- mod->core_size = debug_align(mod->core_size);
70023- mod->core_text_size = mod->core_size;
70024- break;
70025- case 1: /* RO: text and ro-data */
70026- mod->core_size = debug_align(mod->core_size);
70027- mod->core_ro_size = mod->core_size;
70028- break;
70029- case 3: /* whole core */
70030- mod->core_size = debug_align(mod->core_size);
70031- break;
70032- }
70033 }
70034
70035 pr_debug("Init section allocation order:\n");
70036@@ -2098,23 +2126,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
70037 || s->sh_entsize != ~0UL
70038 || !strstarts(sname, ".init"))
70039 continue;
70040- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
70041- | INIT_OFFSET_MASK);
70042+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
70043+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
70044+ else
70045+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
70046+ s->sh_entsize |= INIT_OFFSET_MASK;
70047 pr_debug("\t%s\n", sname);
70048 }
70049- switch (m) {
70050- case 0: /* executable */
70051- mod->init_size = debug_align(mod->init_size);
70052- mod->init_text_size = mod->init_size;
70053- break;
70054- case 1: /* RO: text and ro-data */
70055- mod->init_size = debug_align(mod->init_size);
70056- mod->init_ro_size = mod->init_size;
70057- break;
70058- case 3: /* whole init */
70059- mod->init_size = debug_align(mod->init_size);
70060- break;
70061- }
70062 }
70063 }
70064
70065@@ -2286,7 +2304,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
70066
70067 /* Put symbol section at end of init part of module. */
70068 symsect->sh_flags |= SHF_ALLOC;
70069- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
70070+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
70071 info->index.sym) | INIT_OFFSET_MASK;
70072 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
70073
70074@@ -2306,13 +2324,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
70075 }
70076
70077 /* Append room for core symbols at end of core part. */
70078- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
70079- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
70080- mod->core_size += strtab_size;
70081+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
70082+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
70083+ mod->core_size_rx += strtab_size;
70084
70085 /* Put string table section at end of init part of module. */
70086 strsect->sh_flags |= SHF_ALLOC;
70087- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
70088+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
70089 info->index.str) | INIT_OFFSET_MASK;
70090 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
70091 }
70092@@ -2330,12 +2348,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
70093 /* Make sure we get permanent strtab: don't use info->strtab. */
70094 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
70095
70096+ pax_open_kernel();
70097+
70098 /* Set types up while we still have access to sections. */
70099 for (i = 0; i < mod->num_symtab; i++)
70100 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
70101
70102- mod->core_symtab = dst = mod->module_core + info->symoffs;
70103- mod->core_strtab = s = mod->module_core + info->stroffs;
70104+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
70105+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
70106 src = mod->symtab;
70107 *s++ = 0;
70108 for (ndst = i = 0; i < mod->num_symtab; i++) {
70109@@ -2348,6 +2368,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
70110 }
70111 }
70112 mod->core_num_syms = ndst;
70113+
70114+ pax_close_kernel();
70115 }
70116 #else
70117 static inline void layout_symtab(struct module *mod, struct load_info *info)
70118@@ -2381,17 +2403,33 @@ void * __weak module_alloc(unsigned long size)
70119 return size == 0 ? NULL : vmalloc_exec(size);
70120 }
70121
70122-static void *module_alloc_update_bounds(unsigned long size)
70123+static void *module_alloc_update_bounds_rw(unsigned long size)
70124 {
70125 void *ret = module_alloc(size);
70126
70127 if (ret) {
70128 mutex_lock(&module_mutex);
70129 /* Update module bounds. */
70130- if ((unsigned long)ret < module_addr_min)
70131- module_addr_min = (unsigned long)ret;
70132- if ((unsigned long)ret + size > module_addr_max)
70133- module_addr_max = (unsigned long)ret + size;
70134+ if ((unsigned long)ret < module_addr_min_rw)
70135+ module_addr_min_rw = (unsigned long)ret;
70136+ if ((unsigned long)ret + size > module_addr_max_rw)
70137+ module_addr_max_rw = (unsigned long)ret + size;
70138+ mutex_unlock(&module_mutex);
70139+ }
70140+ return ret;
70141+}
70142+
70143+static void *module_alloc_update_bounds_rx(unsigned long size)
70144+{
70145+ void *ret = module_alloc_exec(size);
70146+
70147+ if (ret) {
70148+ mutex_lock(&module_mutex);
70149+ /* Update module bounds. */
70150+ if ((unsigned long)ret < module_addr_min_rx)
70151+ module_addr_min_rx = (unsigned long)ret;
70152+ if ((unsigned long)ret + size > module_addr_max_rx)
70153+ module_addr_max_rx = (unsigned long)ret + size;
70154 mutex_unlock(&module_mutex);
70155 }
70156 return ret;
70157@@ -2610,8 +2648,14 @@ static struct module *setup_load_info(struct load_info *info)
70158 static int check_modinfo(struct module *mod, struct load_info *info)
70159 {
70160 const char *modmagic = get_modinfo(info, "vermagic");
70161+ const char *license = get_modinfo(info, "license");
70162 int err;
70163
70164+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
70165+ if (!license || !license_is_gpl_compatible(license))
70166+ return -ENOEXEC;
70167+#endif
70168+
70169 /* This is allowed: modprobe --force will invalidate it. */
70170 if (!modmagic) {
70171 err = try_to_force_load(mod, "bad vermagic");
70172@@ -2634,7 +2678,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
70173 }
70174
70175 /* Set up license info based on the info section */
70176- set_license(mod, get_modinfo(info, "license"));
70177+ set_license(mod, license);
70178
70179 return 0;
70180 }
70181@@ -2728,7 +2772,7 @@ static int move_module(struct module *mod, struct load_info *info)
70182 void *ptr;
70183
70184 /* Do the allocs. */
70185- ptr = module_alloc_update_bounds(mod->core_size);
70186+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
70187 /*
70188 * The pointer to this block is stored in the module structure
70189 * which is inside the block. Just mark it as not being a
70190@@ -2738,23 +2782,50 @@ static int move_module(struct module *mod, struct load_info *info)
70191 if (!ptr)
70192 return -ENOMEM;
70193
70194- memset(ptr, 0, mod->core_size);
70195- mod->module_core = ptr;
70196+ memset(ptr, 0, mod->core_size_rw);
70197+ mod->module_core_rw = ptr;
70198
70199- ptr = module_alloc_update_bounds(mod->init_size);
70200+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
70201 /*
70202 * The pointer to this block is stored in the module structure
70203 * which is inside the block. This block doesn't need to be
70204 * scanned as it contains data and code that will be freed
70205 * after the module is initialized.
70206 */
70207- kmemleak_ignore(ptr);
70208- if (!ptr && mod->init_size) {
70209- module_free(mod, mod->module_core);
70210+ kmemleak_not_leak(ptr);
70211+ if (!ptr && mod->init_size_rw) {
70212+ module_free(mod, mod->module_core_rw);
70213 return -ENOMEM;
70214 }
70215- memset(ptr, 0, mod->init_size);
70216- mod->module_init = ptr;
70217+ memset(ptr, 0, mod->init_size_rw);
70218+ mod->module_init_rw = ptr;
70219+
70220+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
70221+ kmemleak_not_leak(ptr);
70222+ if (!ptr) {
70223+ module_free(mod, mod->module_init_rw);
70224+ module_free(mod, mod->module_core_rw);
70225+ return -ENOMEM;
70226+ }
70227+
70228+ pax_open_kernel();
70229+ memset(ptr, 0, mod->core_size_rx);
70230+ pax_close_kernel();
70231+ mod->module_core_rx = ptr;
70232+
70233+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
70234+ kmemleak_not_leak(ptr);
70235+ if (!ptr && mod->init_size_rx) {
70236+ module_free_exec(mod, mod->module_core_rx);
70237+ module_free(mod, mod->module_init_rw);
70238+ module_free(mod, mod->module_core_rw);
70239+ return -ENOMEM;
70240+ }
70241+
70242+ pax_open_kernel();
70243+ memset(ptr, 0, mod->init_size_rx);
70244+ pax_close_kernel();
70245+ mod->module_init_rx = ptr;
70246
70247 /* Transfer each section which specifies SHF_ALLOC */
70248 pr_debug("final section addresses:\n");
70249@@ -2765,16 +2836,45 @@ static int move_module(struct module *mod, struct load_info *info)
70250 if (!(shdr->sh_flags & SHF_ALLOC))
70251 continue;
70252
70253- if (shdr->sh_entsize & INIT_OFFSET_MASK)
70254- dest = mod->module_init
70255- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70256- else
70257- dest = mod->module_core + shdr->sh_entsize;
70258+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
70259+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
70260+ dest = mod->module_init_rw
70261+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70262+ else
70263+ dest = mod->module_init_rx
70264+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70265+ } else {
70266+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
70267+ dest = mod->module_core_rw + shdr->sh_entsize;
70268+ else
70269+ dest = mod->module_core_rx + shdr->sh_entsize;
70270+ }
70271+
70272+ if (shdr->sh_type != SHT_NOBITS) {
70273+
70274+#ifdef CONFIG_PAX_KERNEXEC
70275+#ifdef CONFIG_X86_64
70276+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
70277+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
70278+#endif
70279+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
70280+ pax_open_kernel();
70281+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
70282+ pax_close_kernel();
70283+ } else
70284+#endif
70285
70286- if (shdr->sh_type != SHT_NOBITS)
70287 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
70288+ }
70289 /* Update sh_addr to point to copy in image. */
70290- shdr->sh_addr = (unsigned long)dest;
70291+
70292+#ifdef CONFIG_PAX_KERNEXEC
70293+ if (shdr->sh_flags & SHF_EXECINSTR)
70294+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
70295+ else
70296+#endif
70297+
70298+ shdr->sh_addr = (unsigned long)dest;
70299 pr_debug("\t0x%lx %s\n",
70300 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
70301 }
70302@@ -2829,12 +2929,12 @@ static void flush_module_icache(const struct module *mod)
70303 * Do it before processing of module parameters, so the module
70304 * can provide parameter accessor functions of its own.
70305 */
70306- if (mod->module_init)
70307- flush_icache_range((unsigned long)mod->module_init,
70308- (unsigned long)mod->module_init
70309- + mod->init_size);
70310- flush_icache_range((unsigned long)mod->module_core,
70311- (unsigned long)mod->module_core + mod->core_size);
70312+ if (mod->module_init_rx)
70313+ flush_icache_range((unsigned long)mod->module_init_rx,
70314+ (unsigned long)mod->module_init_rx
70315+ + mod->init_size_rx);
70316+ flush_icache_range((unsigned long)mod->module_core_rx,
70317+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
70318
70319 set_fs(old_fs);
70320 }
70321@@ -2904,8 +3004,10 @@ out:
70322 static void module_deallocate(struct module *mod, struct load_info *info)
70323 {
70324 percpu_modfree(mod);
70325- module_free(mod, mod->module_init);
70326- module_free(mod, mod->module_core);
70327+ module_free_exec(mod, mod->module_init_rx);
70328+ module_free_exec(mod, mod->module_core_rx);
70329+ module_free(mod, mod->module_init_rw);
70330+ module_free(mod, mod->module_core_rw);
70331 }
70332
70333 int __weak module_finalize(const Elf_Ehdr *hdr,
70334@@ -2918,7 +3020,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
70335 static int post_relocation(struct module *mod, const struct load_info *info)
70336 {
70337 /* Sort exception table now relocations are done. */
70338+ pax_open_kernel();
70339 sort_extable(mod->extable, mod->extable + mod->num_exentries);
70340+ pax_close_kernel();
70341
70342 /* Copy relocated percpu area over. */
70343 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
70344@@ -2989,9 +3093,38 @@ static struct module *load_module(void __user *umod,
70345 if (err)
70346 goto free_unload;
70347
70348+ /* Now copy in args */
70349+ mod->args = strndup_user(uargs, ~0UL >> 1);
70350+ if (IS_ERR(mod->args)) {
70351+ err = PTR_ERR(mod->args);
70352+ goto free_unload;
70353+ }
70354+
70355 /* Set up MODINFO_ATTR fields */
70356 setup_modinfo(mod, &info);
70357
70358+#ifdef CONFIG_GRKERNSEC_MODHARDEN
70359+ {
70360+ char *p, *p2;
70361+
70362+ if (strstr(mod->args, "grsec_modharden_netdev")) {
70363+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
70364+ err = -EPERM;
70365+ goto free_modinfo;
70366+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
70367+ p += sizeof("grsec_modharden_normal") - 1;
70368+ p2 = strstr(p, "_");
70369+ if (p2) {
70370+ *p2 = '\0';
70371+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
70372+ *p2 = '_';
70373+ }
70374+ err = -EPERM;
70375+ goto free_modinfo;
70376+ }
70377+ }
70378+#endif
70379+
70380 /* Fix up syms, so that st_value is a pointer to location. */
70381 err = simplify_symbols(mod, &info);
70382 if (err < 0)
70383@@ -3007,13 +3140,6 @@ static struct module *load_module(void __user *umod,
70384
70385 flush_module_icache(mod);
70386
70387- /* Now copy in args */
70388- mod->args = strndup_user(uargs, ~0UL >> 1);
70389- if (IS_ERR(mod->args)) {
70390- err = PTR_ERR(mod->args);
70391- goto free_arch_cleanup;
70392- }
70393-
70394 /* Mark state as coming so strong_try_module_get() ignores us. */
70395 mod->state = MODULE_STATE_COMING;
70396
70397@@ -3081,11 +3207,11 @@ again:
70398 unlock:
70399 mutex_unlock(&module_mutex);
70400 synchronize_sched();
70401- kfree(mod->args);
70402 free_arch_cleanup:
70403 module_arch_cleanup(mod);
70404 free_modinfo:
70405 free_modinfo(mod);
70406+ kfree(mod->args);
70407 free_unload:
70408 module_unload_free(mod);
70409 free_module:
70410@@ -3126,16 +3252,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
70411 MODULE_STATE_COMING, mod);
70412
70413 /* Set RO and NX regions for core */
70414- set_section_ro_nx(mod->module_core,
70415- mod->core_text_size,
70416- mod->core_ro_size,
70417- mod->core_size);
70418+ set_section_ro_nx(mod->module_core_rx,
70419+ mod->core_size_rx,
70420+ mod->core_size_rx,
70421+ mod->core_size_rx);
70422
70423 /* Set RO and NX regions for init */
70424- set_section_ro_nx(mod->module_init,
70425- mod->init_text_size,
70426- mod->init_ro_size,
70427- mod->init_size);
70428+ set_section_ro_nx(mod->module_init_rx,
70429+ mod->init_size_rx,
70430+ mod->init_size_rx,
70431+ mod->init_size_rx);
70432
70433 do_mod_ctors(mod);
70434 /* Start the module */
70435@@ -3180,11 +3306,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
70436 mod->strtab = mod->core_strtab;
70437 #endif
70438 unset_module_init_ro_nx(mod);
70439- module_free(mod, mod->module_init);
70440- mod->module_init = NULL;
70441- mod->init_size = 0;
70442- mod->init_ro_size = 0;
70443- mod->init_text_size = 0;
70444+ module_free(mod, mod->module_init_rw);
70445+ module_free_exec(mod, mod->module_init_rx);
70446+ mod->module_init_rw = NULL;
70447+ mod->module_init_rx = NULL;
70448+ mod->init_size_rw = 0;
70449+ mod->init_size_rx = 0;
70450 mutex_unlock(&module_mutex);
70451 wake_up_all(&module_wq);
70452
70453@@ -3216,10 +3343,16 @@ static const char *get_ksymbol(struct module *mod,
70454 unsigned long nextval;
70455
70456 /* At worse, next value is at end of module */
70457- if (within_module_init(addr, mod))
70458- nextval = (unsigned long)mod->module_init+mod->init_text_size;
70459+ if (within_module_init_rx(addr, mod))
70460+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
70461+ else if (within_module_init_rw(addr, mod))
70462+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
70463+ else if (within_module_core_rx(addr, mod))
70464+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
70465+ else if (within_module_core_rw(addr, mod))
70466+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
70467 else
70468- nextval = (unsigned long)mod->module_core+mod->core_text_size;
70469+ return NULL;
70470
70471 /* Scan for closest preceding symbol, and next symbol. (ELF
70472 starts real symbols at 1). */
70473@@ -3454,7 +3587,7 @@ static int m_show(struct seq_file *m, void *p)
70474 char buf[8];
70475
70476 seq_printf(m, "%s %u",
70477- mod->name, mod->init_size + mod->core_size);
70478+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
70479 print_unload_info(m, mod);
70480
70481 /* Informative for users. */
70482@@ -3463,7 +3596,7 @@ static int m_show(struct seq_file *m, void *p)
70483 mod->state == MODULE_STATE_COMING ? "Loading":
70484 "Live");
70485 /* Used by oprofile and other similar tools. */
70486- seq_printf(m, " 0x%pK", mod->module_core);
70487+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
70488
70489 /* Taints info */
70490 if (mod->taints)
70491@@ -3499,7 +3632,17 @@ static const struct file_operations proc_modules_operations = {
70492
70493 static int __init proc_modules_init(void)
70494 {
70495+#ifndef CONFIG_GRKERNSEC_HIDESYM
70496+#ifdef CONFIG_GRKERNSEC_PROC_USER
70497+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
70498+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70499+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
70500+#else
70501 proc_create("modules", 0, NULL, &proc_modules_operations);
70502+#endif
70503+#else
70504+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
70505+#endif
70506 return 0;
70507 }
70508 module_init(proc_modules_init);
70509@@ -3558,12 +3701,12 @@ struct module *__module_address(unsigned long addr)
70510 {
70511 struct module *mod;
70512
70513- if (addr < module_addr_min || addr > module_addr_max)
70514+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
70515+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
70516 return NULL;
70517
70518 list_for_each_entry_rcu(mod, &modules, list)
70519- if (within_module_core(addr, mod)
70520- || within_module_init(addr, mod))
70521+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
70522 return mod;
70523 return NULL;
70524 }
70525@@ -3597,11 +3740,20 @@ bool is_module_text_address(unsigned long addr)
70526 */
70527 struct module *__module_text_address(unsigned long addr)
70528 {
70529- struct module *mod = __module_address(addr);
70530+ struct module *mod;
70531+
70532+#ifdef CONFIG_X86_32
70533+ addr = ktla_ktva(addr);
70534+#endif
70535+
70536+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
70537+ return NULL;
70538+
70539+ mod = __module_address(addr);
70540+
70541 if (mod) {
70542 /* Make sure it's within the text section. */
70543- if (!within(addr, mod->module_init, mod->init_text_size)
70544- && !within(addr, mod->module_core, mod->core_text_size))
70545+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
70546 mod = NULL;
70547 }
70548 return mod;
70549diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
70550index 7e3443f..b2a1e6b 100644
70551--- a/kernel/mutex-debug.c
70552+++ b/kernel/mutex-debug.c
70553@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
70554 }
70555
70556 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70557- struct thread_info *ti)
70558+ struct task_struct *task)
70559 {
70560 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
70561
70562 /* Mark the current thread as blocked on the lock: */
70563- ti->task->blocked_on = waiter;
70564+ task->blocked_on = waiter;
70565 }
70566
70567 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70568- struct thread_info *ti)
70569+ struct task_struct *task)
70570 {
70571 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
70572- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
70573- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
70574- ti->task->blocked_on = NULL;
70575+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
70576+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
70577+ task->blocked_on = NULL;
70578
70579 list_del_init(&waiter->list);
70580 waiter->task = NULL;
70581diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
70582index 0799fd3..d06ae3b 100644
70583--- a/kernel/mutex-debug.h
70584+++ b/kernel/mutex-debug.h
70585@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
70586 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
70587 extern void debug_mutex_add_waiter(struct mutex *lock,
70588 struct mutex_waiter *waiter,
70589- struct thread_info *ti);
70590+ struct task_struct *task);
70591 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70592- struct thread_info *ti);
70593+ struct task_struct *task);
70594 extern void debug_mutex_unlock(struct mutex *lock);
70595 extern void debug_mutex_init(struct mutex *lock, const char *name,
70596 struct lock_class_key *key);
70597diff --git a/kernel/mutex.c b/kernel/mutex.c
70598index a307cc9..27fd2e9 100644
70599--- a/kernel/mutex.c
70600+++ b/kernel/mutex.c
70601@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70602 spin_lock_mutex(&lock->wait_lock, flags);
70603
70604 debug_mutex_lock_common(lock, &waiter);
70605- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
70606+ debug_mutex_add_waiter(lock, &waiter, task);
70607
70608 /* add waiting tasks to the end of the waitqueue (FIFO): */
70609 list_add_tail(&waiter.list, &lock->wait_list);
70610@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70611 * TASK_UNINTERRUPTIBLE case.)
70612 */
70613 if (unlikely(signal_pending_state(state, task))) {
70614- mutex_remove_waiter(lock, &waiter,
70615- task_thread_info(task));
70616+ mutex_remove_waiter(lock, &waiter, task);
70617 mutex_release(&lock->dep_map, 1, ip);
70618 spin_unlock_mutex(&lock->wait_lock, flags);
70619
70620@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70621 done:
70622 lock_acquired(&lock->dep_map, ip);
70623 /* got the lock - rejoice! */
70624- mutex_remove_waiter(lock, &waiter, current_thread_info());
70625+ mutex_remove_waiter(lock, &waiter, task);
70626 mutex_set_owner(lock);
70627
70628 /* set it to 0 if there are no waiters left: */
70629diff --git a/kernel/notifier.c b/kernel/notifier.c
70630index 2d5cc4c..d9ea600 100644
70631--- a/kernel/notifier.c
70632+++ b/kernel/notifier.c
70633@@ -5,6 +5,7 @@
70634 #include <linux/rcupdate.h>
70635 #include <linux/vmalloc.h>
70636 #include <linux/reboot.h>
70637+#include <linux/mm.h>
70638
70639 /*
70640 * Notifier list for kernel code which wants to be called
70641@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
70642 while ((*nl) != NULL) {
70643 if (n->priority > (*nl)->priority)
70644 break;
70645- nl = &((*nl)->next);
70646+ nl = (struct notifier_block **)&((*nl)->next);
70647 }
70648- n->next = *nl;
70649+ pax_open_kernel();
70650+ *(const void **)&n->next = *nl;
70651 rcu_assign_pointer(*nl, n);
70652+ pax_close_kernel();
70653 return 0;
70654 }
70655
70656@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
70657 return 0;
70658 if (n->priority > (*nl)->priority)
70659 break;
70660- nl = &((*nl)->next);
70661+ nl = (struct notifier_block **)&((*nl)->next);
70662 }
70663- n->next = *nl;
70664+ pax_open_kernel();
70665+ *(const void **)&n->next = *nl;
70666 rcu_assign_pointer(*nl, n);
70667+ pax_close_kernel();
70668 return 0;
70669 }
70670
70671@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
70672 {
70673 while ((*nl) != NULL) {
70674 if ((*nl) == n) {
70675+ pax_open_kernel();
70676 rcu_assign_pointer(*nl, n->next);
70677+ pax_close_kernel();
70678 return 0;
70679 }
70680- nl = &((*nl)->next);
70681+ nl = (struct notifier_block **)&((*nl)->next);
70682 }
70683 return -ENOENT;
70684 }
70685diff --git a/kernel/panic.c b/kernel/panic.c
70686index e1b2822..5edc1d9 100644
70687--- a/kernel/panic.c
70688+++ b/kernel/panic.c
70689@@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
70690 const char *board;
70691
70692 printk(KERN_WARNING "------------[ cut here ]------------\n");
70693- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
70694+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
70695 board = dmi_get_system_info(DMI_PRODUCT_NAME);
70696 if (board)
70697 printk(KERN_WARNING "Hardware name: %s\n", board);
70698@@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
70699 */
70700 void __stack_chk_fail(void)
70701 {
70702- panic("stack-protector: Kernel stack is corrupted in: %p\n",
70703+ dump_stack();
70704+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
70705 __builtin_return_address(0));
70706 }
70707 EXPORT_SYMBOL(__stack_chk_fail);
70708diff --git a/kernel/pid.c b/kernel/pid.c
70709index aebd4f5..1693c13 100644
70710--- a/kernel/pid.c
70711+++ b/kernel/pid.c
70712@@ -33,6 +33,7 @@
70713 #include <linux/rculist.h>
70714 #include <linux/bootmem.h>
70715 #include <linux/hash.h>
70716+#include <linux/security.h>
70717 #include <linux/pid_namespace.h>
70718 #include <linux/init_task.h>
70719 #include <linux/syscalls.h>
70720@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
70721
70722 int pid_max = PID_MAX_DEFAULT;
70723
70724-#define RESERVED_PIDS 300
70725+#define RESERVED_PIDS 500
70726
70727 int pid_max_min = RESERVED_PIDS + 1;
70728 int pid_max_max = PID_MAX_LIMIT;
70729@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
70730 */
70731 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
70732 {
70733+ struct task_struct *task;
70734+
70735 rcu_lockdep_assert(rcu_read_lock_held(),
70736 "find_task_by_pid_ns() needs rcu_read_lock()"
70737 " protection");
70738- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
70739+
70740+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
70741+
70742+ if (gr_pid_is_chrooted(task))
70743+ return NULL;
70744+
70745+ return task;
70746 }
70747
70748 struct task_struct *find_task_by_vpid(pid_t vnr)
70749@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
70750 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
70751 }
70752
70753+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
70754+{
70755+ rcu_lockdep_assert(rcu_read_lock_held(),
70756+ "find_task_by_pid_ns() needs rcu_read_lock()"
70757+ " protection");
70758+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
70759+}
70760+
70761 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
70762 {
70763 struct pid *pid;
70764diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
70765index 125cb67..2e5c8ad 100644
70766--- a/kernel/posix-cpu-timers.c
70767+++ b/kernel/posix-cpu-timers.c
70768@@ -6,9 +6,11 @@
70769 #include <linux/posix-timers.h>
70770 #include <linux/errno.h>
70771 #include <linux/math64.h>
70772+#include <linux/security.h>
70773 #include <asm/uaccess.h>
70774 #include <linux/kernel_stat.h>
70775 #include <trace/events/timer.h>
70776+#include <linux/random.h>
70777
70778 /*
70779 * Called after updating RLIMIT_CPU to run cpu timer and update
70780@@ -494,6 +496,8 @@ static void cleanup_timers(struct list_head *head,
70781 */
70782 void posix_cpu_timers_exit(struct task_struct *tsk)
70783 {
70784+ add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
70785+ sizeof(unsigned long long));
70786 cleanup_timers(tsk->cpu_timers,
70787 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
70788
70789@@ -1578,14 +1582,14 @@ struct k_clock clock_posix_cpu = {
70790
70791 static __init int init_posix_cpu_timers(void)
70792 {
70793- struct k_clock process = {
70794+ static struct k_clock process = {
70795 .clock_getres = process_cpu_clock_getres,
70796 .clock_get = process_cpu_clock_get,
70797 .timer_create = process_cpu_timer_create,
70798 .nsleep = process_cpu_nsleep,
70799 .nsleep_restart = process_cpu_nsleep_restart,
70800 };
70801- struct k_clock thread = {
70802+ static struct k_clock thread = {
70803 .clock_getres = thread_cpu_clock_getres,
70804 .clock_get = thread_cpu_clock_get,
70805 .timer_create = thread_cpu_timer_create,
70806diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
70807index 69185ae..cc2847a 100644
70808--- a/kernel/posix-timers.c
70809+++ b/kernel/posix-timers.c
70810@@ -43,6 +43,7 @@
70811 #include <linux/idr.h>
70812 #include <linux/posix-clock.h>
70813 #include <linux/posix-timers.h>
70814+#include <linux/grsecurity.h>
70815 #include <linux/syscalls.h>
70816 #include <linux/wait.h>
70817 #include <linux/workqueue.h>
70818@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
70819 * which we beg off on and pass to do_sys_settimeofday().
70820 */
70821
70822-static struct k_clock posix_clocks[MAX_CLOCKS];
70823+static struct k_clock *posix_clocks[MAX_CLOCKS];
70824
70825 /*
70826 * These ones are defined below.
70827@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
70828 */
70829 static __init int init_posix_timers(void)
70830 {
70831- struct k_clock clock_realtime = {
70832+ static struct k_clock clock_realtime = {
70833 .clock_getres = hrtimer_get_res,
70834 .clock_get = posix_clock_realtime_get,
70835 .clock_set = posix_clock_realtime_set,
70836@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
70837 .timer_get = common_timer_get,
70838 .timer_del = common_timer_del,
70839 };
70840- struct k_clock clock_monotonic = {
70841+ static struct k_clock clock_monotonic = {
70842 .clock_getres = hrtimer_get_res,
70843 .clock_get = posix_ktime_get_ts,
70844 .nsleep = common_nsleep,
70845@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
70846 .timer_get = common_timer_get,
70847 .timer_del = common_timer_del,
70848 };
70849- struct k_clock clock_monotonic_raw = {
70850+ static struct k_clock clock_monotonic_raw = {
70851 .clock_getres = hrtimer_get_res,
70852 .clock_get = posix_get_monotonic_raw,
70853 };
70854- struct k_clock clock_realtime_coarse = {
70855+ static struct k_clock clock_realtime_coarse = {
70856 .clock_getres = posix_get_coarse_res,
70857 .clock_get = posix_get_realtime_coarse,
70858 };
70859- struct k_clock clock_monotonic_coarse = {
70860+ static struct k_clock clock_monotonic_coarse = {
70861 .clock_getres = posix_get_coarse_res,
70862 .clock_get = posix_get_monotonic_coarse,
70863 };
70864- struct k_clock clock_boottime = {
70865+ static struct k_clock clock_boottime = {
70866 .clock_getres = hrtimer_get_res,
70867 .clock_get = posix_get_boottime,
70868 .nsleep = common_nsleep,
70869@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
70870 return;
70871 }
70872
70873- posix_clocks[clock_id] = *new_clock;
70874+ posix_clocks[clock_id] = new_clock;
70875 }
70876 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
70877
70878@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
70879 return (id & CLOCKFD_MASK) == CLOCKFD ?
70880 &clock_posix_dynamic : &clock_posix_cpu;
70881
70882- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
70883+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
70884 return NULL;
70885- return &posix_clocks[id];
70886+ return posix_clocks[id];
70887 }
70888
70889 static int common_timer_create(struct k_itimer *new_timer)
70890@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
70891 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
70892 return -EFAULT;
70893
70894+ /* only the CLOCK_REALTIME clock can be set, all other clocks
70895+ have their clock_set fptr set to a nosettime dummy function
70896+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
70897+ call common_clock_set, which calls do_sys_settimeofday, which
70898+ we hook
70899+ */
70900+
70901 return kc->clock_set(which_clock, &new_tp);
70902 }
70903
70904diff --git a/kernel/power/process.c b/kernel/power/process.c
70905index 87da817..30ddd13 100644
70906--- a/kernel/power/process.c
70907+++ b/kernel/power/process.c
70908@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
70909 u64 elapsed_csecs64;
70910 unsigned int elapsed_csecs;
70911 bool wakeup = false;
70912+ bool timedout = false;
70913
70914 do_gettimeofday(&start);
70915
70916@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
70917
70918 while (true) {
70919 todo = 0;
70920+ if (time_after(jiffies, end_time))
70921+ timedout = true;
70922 read_lock(&tasklist_lock);
70923 do_each_thread(g, p) {
70924 if (p == current || !freeze_task(p))
70925@@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
70926 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
70927 * transition can't race with task state testing here.
70928 */
70929- if (!task_is_stopped_or_traced(p) &&
70930- !freezer_should_skip(p))
70931+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
70932 todo++;
70933+ if (timedout) {
70934+ printk(KERN_ERR "Task refusing to freeze:\n");
70935+ sched_show_task(p);
70936+ }
70937+ }
70938 } while_each_thread(g, p);
70939 read_unlock(&tasklist_lock);
70940
70941@@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
70942 todo += wq_busy;
70943 }
70944
70945- if (!todo || time_after(jiffies, end_time))
70946+ if (!todo || timedout)
70947 break;
70948
70949 if (pm_wakeup_pending()) {
70950diff --git a/kernel/printk.c b/kernel/printk.c
70951index f8e0b5a..dda2a5c 100644
70952--- a/kernel/printk.c
70953+++ b/kernel/printk.c
70954@@ -817,6 +817,11 @@ static int check_syslog_permissions(int type, bool from_file)
70955 if (from_file && type != SYSLOG_ACTION_OPEN)
70956 return 0;
70957
70958+#ifdef CONFIG_GRKERNSEC_DMESG
70959+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
70960+ return -EPERM;
70961+#endif
70962+
70963 if (syslog_action_restricted(type)) {
70964 if (capable(CAP_SYSLOG))
70965 return 0;
70966diff --git a/kernel/profile.c b/kernel/profile.c
70967index 76b8e77..a2930e8 100644
70968--- a/kernel/profile.c
70969+++ b/kernel/profile.c
70970@@ -39,7 +39,7 @@ struct profile_hit {
70971 /* Oprofile timer tick hook */
70972 static int (*timer_hook)(struct pt_regs *) __read_mostly;
70973
70974-static atomic_t *prof_buffer;
70975+static atomic_unchecked_t *prof_buffer;
70976 static unsigned long prof_len, prof_shift;
70977
70978 int prof_on __read_mostly;
70979@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
70980 hits[i].pc = 0;
70981 continue;
70982 }
70983- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
70984+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
70985 hits[i].hits = hits[i].pc = 0;
70986 }
70987 }
70988@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
70989 * Add the current hit(s) and flush the write-queue out
70990 * to the global buffer:
70991 */
70992- atomic_add(nr_hits, &prof_buffer[pc]);
70993+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
70994 for (i = 0; i < NR_PROFILE_HIT; ++i) {
70995- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
70996+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
70997 hits[i].pc = hits[i].hits = 0;
70998 }
70999 out:
71000@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
71001 {
71002 unsigned long pc;
71003 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
71004- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
71005+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
71006 }
71007 #endif /* !CONFIG_SMP */
71008
71009@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
71010 return -EFAULT;
71011 buf++; p++; count--; read++;
71012 }
71013- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
71014+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
71015 if (copy_to_user(buf, (void *)pnt, count))
71016 return -EFAULT;
71017 read += count;
71018@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
71019 }
71020 #endif
71021 profile_discard_flip_buffers();
71022- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
71023+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
71024 return count;
71025 }
71026
71027diff --git a/kernel/ptrace.c b/kernel/ptrace.c
71028index 1f5e55d..8b8f969 100644
71029--- a/kernel/ptrace.c
71030+++ b/kernel/ptrace.c
71031@@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
71032
71033 if (seize)
71034 flags |= PT_SEIZED;
71035- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
71036+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
71037 flags |= PT_PTRACE_CAP;
71038 task->ptrace = flags;
71039
71040@@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
71041 break;
71042 return -EIO;
71043 }
71044- if (copy_to_user(dst, buf, retval))
71045+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
71046 return -EFAULT;
71047 copied += retval;
71048 src += retval;
71049@@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
71050 bool seized = child->ptrace & PT_SEIZED;
71051 int ret = -EIO;
71052 siginfo_t siginfo, *si;
71053- void __user *datavp = (void __user *) data;
71054+ void __user *datavp = (__force void __user *) data;
71055 unsigned long __user *datalp = datavp;
71056 unsigned long flags;
71057
71058@@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
71059 goto out;
71060 }
71061
71062+ if (gr_handle_ptrace(child, request)) {
71063+ ret = -EPERM;
71064+ goto out_put_task_struct;
71065+ }
71066+
71067 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71068 ret = ptrace_attach(child, request, addr, data);
71069 /*
71070 * Some architectures need to do book-keeping after
71071 * a ptrace attach.
71072 */
71073- if (!ret)
71074+ if (!ret) {
71075 arch_ptrace_attach(child);
71076+ gr_audit_ptrace(child);
71077+ }
71078 goto out_put_task_struct;
71079 }
71080
71081@@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
71082 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
71083 if (copied != sizeof(tmp))
71084 return -EIO;
71085- return put_user(tmp, (unsigned long __user *)data);
71086+ return put_user(tmp, (__force unsigned long __user *)data);
71087 }
71088
71089 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
71090@@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
71091 goto out;
71092 }
71093
71094+ if (gr_handle_ptrace(child, request)) {
71095+ ret = -EPERM;
71096+ goto out_put_task_struct;
71097+ }
71098+
71099 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71100 ret = ptrace_attach(child, request, addr, data);
71101 /*
71102 * Some architectures need to do book-keeping after
71103 * a ptrace attach.
71104 */
71105- if (!ret)
71106+ if (!ret) {
71107 arch_ptrace_attach(child);
71108+ gr_audit_ptrace(child);
71109+ }
71110 goto out_put_task_struct;
71111 }
71112
71113diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
71114index e4c6a59..c86621a 100644
71115--- a/kernel/rcutiny.c
71116+++ b/kernel/rcutiny.c
71117@@ -46,7 +46,7 @@
71118 struct rcu_ctrlblk;
71119 static void invoke_rcu_callbacks(void);
71120 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
71121-static void rcu_process_callbacks(struct softirq_action *unused);
71122+static void rcu_process_callbacks(void);
71123 static void __call_rcu(struct rcu_head *head,
71124 void (*func)(struct rcu_head *rcu),
71125 struct rcu_ctrlblk *rcp);
71126@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
71127 rcu_is_callbacks_kthread()));
71128 }
71129
71130-static void rcu_process_callbacks(struct softirq_action *unused)
71131+static void rcu_process_callbacks(void)
71132 {
71133 __rcu_process_callbacks(&rcu_sched_ctrlblk);
71134 __rcu_process_callbacks(&rcu_bh_ctrlblk);
71135diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
71136index 3d01902..afbf46e 100644
71137--- a/kernel/rcutiny_plugin.h
71138+++ b/kernel/rcutiny_plugin.h
71139@@ -893,7 +893,7 @@ static int rcu_kthread(void *arg)
71140 have_rcu_kthread_work = morework;
71141 local_irq_restore(flags);
71142 if (work)
71143- rcu_process_callbacks(NULL);
71144+ rcu_process_callbacks();
71145 schedule_timeout_interruptible(1); /* Leave CPU for others. */
71146 }
71147
71148diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
71149index aaa7b9f..055ff1e 100644
71150--- a/kernel/rcutorture.c
71151+++ b/kernel/rcutorture.c
71152@@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
71153 { 0 };
71154 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
71155 { 0 };
71156-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
71157-static atomic_t n_rcu_torture_alloc;
71158-static atomic_t n_rcu_torture_alloc_fail;
71159-static atomic_t n_rcu_torture_free;
71160-static atomic_t n_rcu_torture_mberror;
71161-static atomic_t n_rcu_torture_error;
71162+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
71163+static atomic_unchecked_t n_rcu_torture_alloc;
71164+static atomic_unchecked_t n_rcu_torture_alloc_fail;
71165+static atomic_unchecked_t n_rcu_torture_free;
71166+static atomic_unchecked_t n_rcu_torture_mberror;
71167+static atomic_unchecked_t n_rcu_torture_error;
71168 static long n_rcu_torture_barrier_error;
71169 static long n_rcu_torture_boost_ktrerror;
71170 static long n_rcu_torture_boost_rterror;
71171@@ -272,11 +272,11 @@ rcu_torture_alloc(void)
71172
71173 spin_lock_bh(&rcu_torture_lock);
71174 if (list_empty(&rcu_torture_freelist)) {
71175- atomic_inc(&n_rcu_torture_alloc_fail);
71176+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
71177 spin_unlock_bh(&rcu_torture_lock);
71178 return NULL;
71179 }
71180- atomic_inc(&n_rcu_torture_alloc);
71181+ atomic_inc_unchecked(&n_rcu_torture_alloc);
71182 p = rcu_torture_freelist.next;
71183 list_del_init(p);
71184 spin_unlock_bh(&rcu_torture_lock);
71185@@ -289,7 +289,7 @@ rcu_torture_alloc(void)
71186 static void
71187 rcu_torture_free(struct rcu_torture *p)
71188 {
71189- atomic_inc(&n_rcu_torture_free);
71190+ atomic_inc_unchecked(&n_rcu_torture_free);
71191 spin_lock_bh(&rcu_torture_lock);
71192 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
71193 spin_unlock_bh(&rcu_torture_lock);
71194@@ -410,7 +410,7 @@ rcu_torture_cb(struct rcu_head *p)
71195 i = rp->rtort_pipe_count;
71196 if (i > RCU_TORTURE_PIPE_LEN)
71197 i = RCU_TORTURE_PIPE_LEN;
71198- atomic_inc(&rcu_torture_wcount[i]);
71199+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
71200 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
71201 rp->rtort_mbtest = 0;
71202 rcu_torture_free(rp);
71203@@ -459,7 +459,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
71204 i = rp->rtort_pipe_count;
71205 if (i > RCU_TORTURE_PIPE_LEN)
71206 i = RCU_TORTURE_PIPE_LEN;
71207- atomic_inc(&rcu_torture_wcount[i]);
71208+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
71209 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
71210 rp->rtort_mbtest = 0;
71211 list_del(&rp->rtort_free);
71212@@ -1002,7 +1002,7 @@ rcu_torture_writer(void *arg)
71213 i = old_rp->rtort_pipe_count;
71214 if (i > RCU_TORTURE_PIPE_LEN)
71215 i = RCU_TORTURE_PIPE_LEN;
71216- atomic_inc(&rcu_torture_wcount[i]);
71217+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
71218 old_rp->rtort_pipe_count++;
71219 cur_ops->deferred_free(old_rp);
71220 }
71221@@ -1087,7 +1087,7 @@ static void rcu_torture_timer(unsigned long unused)
71222 }
71223 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
71224 if (p->rtort_mbtest == 0)
71225- atomic_inc(&n_rcu_torture_mberror);
71226+ atomic_inc_unchecked(&n_rcu_torture_mberror);
71227 spin_lock(&rand_lock);
71228 cur_ops->read_delay(&rand);
71229 n_rcu_torture_timers++;
71230@@ -1151,7 +1151,7 @@ rcu_torture_reader(void *arg)
71231 }
71232 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
71233 if (p->rtort_mbtest == 0)
71234- atomic_inc(&n_rcu_torture_mberror);
71235+ atomic_inc_unchecked(&n_rcu_torture_mberror);
71236 cur_ops->read_delay(&rand);
71237 preempt_disable();
71238 pipe_count = p->rtort_pipe_count;
71239@@ -1210,11 +1210,11 @@ rcu_torture_printk(char *page)
71240 rcu_torture_current,
71241 rcu_torture_current_version,
71242 list_empty(&rcu_torture_freelist),
71243- atomic_read(&n_rcu_torture_alloc),
71244- atomic_read(&n_rcu_torture_alloc_fail),
71245- atomic_read(&n_rcu_torture_free));
71246+ atomic_read_unchecked(&n_rcu_torture_alloc),
71247+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
71248+ atomic_read_unchecked(&n_rcu_torture_free));
71249 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
71250- atomic_read(&n_rcu_torture_mberror),
71251+ atomic_read_unchecked(&n_rcu_torture_mberror),
71252 n_rcu_torture_boost_ktrerror,
71253 n_rcu_torture_boost_rterror);
71254 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
71255@@ -1233,14 +1233,14 @@ rcu_torture_printk(char *page)
71256 n_barrier_attempts,
71257 n_rcu_torture_barrier_error);
71258 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
71259- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
71260+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
71261 n_rcu_torture_barrier_error != 0 ||
71262 n_rcu_torture_boost_ktrerror != 0 ||
71263 n_rcu_torture_boost_rterror != 0 ||
71264 n_rcu_torture_boost_failure != 0 ||
71265 i > 1) {
71266 cnt += sprintf(&page[cnt], "!!! ");
71267- atomic_inc(&n_rcu_torture_error);
71268+ atomic_inc_unchecked(&n_rcu_torture_error);
71269 WARN_ON_ONCE(1);
71270 }
71271 cnt += sprintf(&page[cnt], "Reader Pipe: ");
71272@@ -1254,7 +1254,7 @@ rcu_torture_printk(char *page)
71273 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
71274 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
71275 cnt += sprintf(&page[cnt], " %d",
71276- atomic_read(&rcu_torture_wcount[i]));
71277+ atomic_read_unchecked(&rcu_torture_wcount[i]));
71278 }
71279 cnt += sprintf(&page[cnt], "\n");
71280 if (cur_ops->stats)
71281@@ -1938,7 +1938,7 @@ rcu_torture_cleanup(void)
71282
71283 if (cur_ops->cleanup)
71284 cur_ops->cleanup();
71285- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
71286+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
71287 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
71288 else if (n_online_successes != n_online_attempts ||
71289 n_offline_successes != n_offline_attempts)
71290@@ -2007,18 +2007,18 @@ rcu_torture_init(void)
71291
71292 rcu_torture_current = NULL;
71293 rcu_torture_current_version = 0;
71294- atomic_set(&n_rcu_torture_alloc, 0);
71295- atomic_set(&n_rcu_torture_alloc_fail, 0);
71296- atomic_set(&n_rcu_torture_free, 0);
71297- atomic_set(&n_rcu_torture_mberror, 0);
71298- atomic_set(&n_rcu_torture_error, 0);
71299+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
71300+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
71301+ atomic_set_unchecked(&n_rcu_torture_free, 0);
71302+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
71303+ atomic_set_unchecked(&n_rcu_torture_error, 0);
71304 n_rcu_torture_barrier_error = 0;
71305 n_rcu_torture_boost_ktrerror = 0;
71306 n_rcu_torture_boost_rterror = 0;
71307 n_rcu_torture_boost_failure = 0;
71308 n_rcu_torture_boosts = 0;
71309 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
71310- atomic_set(&rcu_torture_wcount[i], 0);
71311+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
71312 for_each_possible_cpu(cpu) {
71313 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
71314 per_cpu(rcu_torture_count, cpu)[i] = 0;
71315diff --git a/kernel/rcutree.c b/kernel/rcutree.c
71316index 2682295..0f2297e 100644
71317--- a/kernel/rcutree.c
71318+++ b/kernel/rcutree.c
71319@@ -348,9 +348,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
71320 rcu_prepare_for_idle(smp_processor_id());
71321 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
71322 smp_mb__before_atomic_inc(); /* See above. */
71323- atomic_inc(&rdtp->dynticks);
71324+ atomic_inc_unchecked(&rdtp->dynticks);
71325 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
71326- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
71327+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
71328
71329 /*
71330 * It is illegal to enter an extended quiescent state while
71331@@ -508,10 +508,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
71332 int user)
71333 {
71334 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
71335- atomic_inc(&rdtp->dynticks);
71336+ atomic_inc_unchecked(&rdtp->dynticks);
71337 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
71338 smp_mb__after_atomic_inc(); /* See above. */
71339- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
71340+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
71341 rcu_cleanup_after_idle(smp_processor_id());
71342 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
71343 if (!user && !is_idle_task(current)) {
71344@@ -670,14 +670,14 @@ void rcu_nmi_enter(void)
71345 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
71346
71347 if (rdtp->dynticks_nmi_nesting == 0 &&
71348- (atomic_read(&rdtp->dynticks) & 0x1))
71349+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
71350 return;
71351 rdtp->dynticks_nmi_nesting++;
71352 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
71353- atomic_inc(&rdtp->dynticks);
71354+ atomic_inc_unchecked(&rdtp->dynticks);
71355 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
71356 smp_mb__after_atomic_inc(); /* See above. */
71357- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
71358+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
71359 }
71360
71361 /**
71362@@ -696,9 +696,9 @@ void rcu_nmi_exit(void)
71363 return;
71364 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
71365 smp_mb__before_atomic_inc(); /* See above. */
71366- atomic_inc(&rdtp->dynticks);
71367+ atomic_inc_unchecked(&rdtp->dynticks);
71368 smp_mb__after_atomic_inc(); /* Force delay to next write. */
71369- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
71370+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
71371 }
71372
71373 /**
71374@@ -712,7 +712,7 @@ int rcu_is_cpu_idle(void)
71375 int ret;
71376
71377 preempt_disable();
71378- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
71379+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
71380 preempt_enable();
71381 return ret;
71382 }
71383@@ -795,7 +795,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
71384 */
71385 static int dyntick_save_progress_counter(struct rcu_data *rdp)
71386 {
71387- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
71388+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
71389 return (rdp->dynticks_snap & 0x1) == 0;
71390 }
71391
71392@@ -810,7 +810,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
71393 unsigned int curr;
71394 unsigned int snap;
71395
71396- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
71397+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
71398 snap = (unsigned int)rdp->dynticks_snap;
71399
71400 /*
71401@@ -858,10 +858,10 @@ static int jiffies_till_stall_check(void)
71402 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
71403 */
71404 if (till_stall_check < 3) {
71405- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
71406+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
71407 till_stall_check = 3;
71408 } else if (till_stall_check > 300) {
71409- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
71410+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
71411 till_stall_check = 300;
71412 }
71413 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
71414@@ -1589,7 +1589,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
71415 rsp->qlen += rdp->qlen;
71416 rdp->n_cbs_orphaned += rdp->qlen;
71417 rdp->qlen_lazy = 0;
71418- ACCESS_ONCE(rdp->qlen) = 0;
71419+ ACCESS_ONCE_RW(rdp->qlen) = 0;
71420 }
71421
71422 /*
71423@@ -1831,7 +1831,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
71424 }
71425 smp_mb(); /* List handling before counting for rcu_barrier(). */
71426 rdp->qlen_lazy -= count_lazy;
71427- ACCESS_ONCE(rdp->qlen) -= count;
71428+ ACCESS_ONCE_RW(rdp->qlen) -= count;
71429 rdp->n_cbs_invoked += count;
71430
71431 /* Reinstate batch limit if we have worked down the excess. */
71432@@ -2024,7 +2024,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
71433 /*
71434 * Do RCU core processing for the current CPU.
71435 */
71436-static void rcu_process_callbacks(struct softirq_action *unused)
71437+static void rcu_process_callbacks(void)
71438 {
71439 struct rcu_state *rsp;
71440
71441@@ -2136,7 +2136,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
71442 local_irq_restore(flags);
71443 return;
71444 }
71445- ACCESS_ONCE(rdp->qlen)++;
71446+ ACCESS_ONCE_RW(rdp->qlen)++;
71447 if (lazy)
71448 rdp->qlen_lazy++;
71449 else
71450@@ -2250,8 +2250,8 @@ void synchronize_rcu_bh(void)
71451 }
71452 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
71453
71454-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
71455-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
71456+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
71457+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
71458
71459 static int synchronize_sched_expedited_cpu_stop(void *data)
71460 {
71461@@ -2312,7 +2312,7 @@ void synchronize_sched_expedited(void)
71462 int firstsnap, s, snap, trycount = 0;
71463
71464 /* Note that atomic_inc_return() implies full memory barrier. */
71465- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
71466+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
71467 get_online_cpus();
71468 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
71469
71470@@ -2334,7 +2334,7 @@ void synchronize_sched_expedited(void)
71471 }
71472
71473 /* Check to see if someone else did our work for us. */
71474- s = atomic_read(&sync_sched_expedited_done);
71475+ s = atomic_read_unchecked(&sync_sched_expedited_done);
71476 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
71477 smp_mb(); /* ensure test happens before caller kfree */
71478 return;
71479@@ -2349,7 +2349,7 @@ void synchronize_sched_expedited(void)
71480 * grace period works for us.
71481 */
71482 get_online_cpus();
71483- snap = atomic_read(&sync_sched_expedited_started);
71484+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
71485 smp_mb(); /* ensure read is before try_stop_cpus(). */
71486 }
71487
71488@@ -2360,12 +2360,12 @@ void synchronize_sched_expedited(void)
71489 * than we did beat us to the punch.
71490 */
71491 do {
71492- s = atomic_read(&sync_sched_expedited_done);
71493+ s = atomic_read_unchecked(&sync_sched_expedited_done);
71494 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
71495 smp_mb(); /* ensure test happens before caller kfree */
71496 break;
71497 }
71498- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
71499+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
71500
71501 put_online_cpus();
71502 }
71503@@ -2539,7 +2539,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
71504 * ACCESS_ONCE() to prevent the compiler from speculating
71505 * the increment to precede the early-exit check.
71506 */
71507- ACCESS_ONCE(rsp->n_barrier_done)++;
71508+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
71509 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
71510 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
71511 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
71512@@ -2581,7 +2581,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
71513
71514 /* Increment ->n_barrier_done to prevent duplicate work. */
71515 smp_mb(); /* Keep increment after above mechanism. */
71516- ACCESS_ONCE(rsp->n_barrier_done)++;
71517+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
71518 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
71519 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
71520 smp_mb(); /* Keep increment before caller's subsequent code. */
71521@@ -2626,10 +2626,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
71522 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
71523 init_callback_list(rdp);
71524 rdp->qlen_lazy = 0;
71525- ACCESS_ONCE(rdp->qlen) = 0;
71526+ ACCESS_ONCE_RW(rdp->qlen) = 0;
71527 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
71528 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
71529- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
71530+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
71531 #ifdef CONFIG_RCU_USER_QS
71532 WARN_ON_ONCE(rdp->dynticks->in_user);
71533 #endif
71534@@ -2664,8 +2664,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
71535 rdp->blimit = blimit;
71536 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
71537 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
71538- atomic_set(&rdp->dynticks->dynticks,
71539- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
71540+ atomic_set_unchecked(&rdp->dynticks->dynticks,
71541+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
71542 rcu_prepare_for_idle_init(cpu);
71543 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
71544
71545diff --git a/kernel/rcutree.h b/kernel/rcutree.h
71546index a240f03..d469618 100644
71547--- a/kernel/rcutree.h
71548+++ b/kernel/rcutree.h
71549@@ -86,7 +86,7 @@ struct rcu_dynticks {
71550 long long dynticks_nesting; /* Track irq/process nesting level. */
71551 /* Process level is worth LLONG_MAX/2. */
71552 int dynticks_nmi_nesting; /* Track NMI nesting level. */
71553- atomic_t dynticks; /* Even value for idle, else odd. */
71554+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
71555 #ifdef CONFIG_RCU_FAST_NO_HZ
71556 int dyntick_drain; /* Prepare-for-idle state variable. */
71557 unsigned long dyntick_holdoff;
71558diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
71559index f921154..34c4873 100644
71560--- a/kernel/rcutree_plugin.h
71561+++ b/kernel/rcutree_plugin.h
71562@@ -865,7 +865,7 @@ void synchronize_rcu_expedited(void)
71563
71564 /* Clean up and exit. */
71565 smp_mb(); /* ensure expedited GP seen before counter increment. */
71566- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
71567+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
71568 unlock_mb_ret:
71569 mutex_unlock(&sync_rcu_preempt_exp_mutex);
71570 mb_ret:
71571@@ -2040,7 +2040,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
71572 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
71573 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
71574 cpu, ticks_value, ticks_title,
71575- atomic_read(&rdtp->dynticks) & 0xfff,
71576+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
71577 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
71578 fast_no_hz);
71579 }
71580diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
71581index 693513b..b9f1d63 100644
71582--- a/kernel/rcutree_trace.c
71583+++ b/kernel/rcutree_trace.c
71584@@ -92,7 +92,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
71585 rdp->completed, rdp->gpnum,
71586 rdp->passed_quiesce, rdp->qs_pending);
71587 seq_printf(m, " dt=%d/%llx/%d df=%lu",
71588- atomic_read(&rdp->dynticks->dynticks),
71589+ atomic_read_unchecked(&rdp->dynticks->dynticks),
71590 rdp->dynticks->dynticks_nesting,
71591 rdp->dynticks->dynticks_nmi_nesting,
71592 rdp->dynticks_fqs);
71593@@ -154,7 +154,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
71594 rdp->completed, rdp->gpnum,
71595 rdp->passed_quiesce, rdp->qs_pending);
71596 seq_printf(m, ",%d,%llx,%d,%lu",
71597- atomic_read(&rdp->dynticks->dynticks),
71598+ atomic_read_unchecked(&rdp->dynticks->dynticks),
71599 rdp->dynticks->dynticks_nesting,
71600 rdp->dynticks->dynticks_nmi_nesting,
71601 rdp->dynticks_fqs);
71602diff --git a/kernel/resource.c b/kernel/resource.c
71603index 73f35d4..4684fc4 100644
71604--- a/kernel/resource.c
71605+++ b/kernel/resource.c
71606@@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
71607
71608 static int __init ioresources_init(void)
71609 {
71610+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71611+#ifdef CONFIG_GRKERNSEC_PROC_USER
71612+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
71613+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
71614+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71615+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
71616+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
71617+#endif
71618+#else
71619 proc_create("ioports", 0, NULL, &proc_ioports_operations);
71620 proc_create("iomem", 0, NULL, &proc_iomem_operations);
71621+#endif
71622 return 0;
71623 }
71624 __initcall(ioresources_init);
71625diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
71626index 98ec494..4241d6d 100644
71627--- a/kernel/rtmutex-tester.c
71628+++ b/kernel/rtmutex-tester.c
71629@@ -20,7 +20,7 @@
71630 #define MAX_RT_TEST_MUTEXES 8
71631
71632 static spinlock_t rttest_lock;
71633-static atomic_t rttest_event;
71634+static atomic_unchecked_t rttest_event;
71635
71636 struct test_thread_data {
71637 int opcode;
71638@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71639
71640 case RTTEST_LOCKCONT:
71641 td->mutexes[td->opdata] = 1;
71642- td->event = atomic_add_return(1, &rttest_event);
71643+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71644 return 0;
71645
71646 case RTTEST_RESET:
71647@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71648 return 0;
71649
71650 case RTTEST_RESETEVENT:
71651- atomic_set(&rttest_event, 0);
71652+ atomic_set_unchecked(&rttest_event, 0);
71653 return 0;
71654
71655 default:
71656@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71657 return ret;
71658
71659 td->mutexes[id] = 1;
71660- td->event = atomic_add_return(1, &rttest_event);
71661+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71662 rt_mutex_lock(&mutexes[id]);
71663- td->event = atomic_add_return(1, &rttest_event);
71664+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71665 td->mutexes[id] = 4;
71666 return 0;
71667
71668@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71669 return ret;
71670
71671 td->mutexes[id] = 1;
71672- td->event = atomic_add_return(1, &rttest_event);
71673+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71674 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
71675- td->event = atomic_add_return(1, &rttest_event);
71676+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71677 td->mutexes[id] = ret ? 0 : 4;
71678 return ret ? -EINTR : 0;
71679
71680@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71681 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
71682 return ret;
71683
71684- td->event = atomic_add_return(1, &rttest_event);
71685+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71686 rt_mutex_unlock(&mutexes[id]);
71687- td->event = atomic_add_return(1, &rttest_event);
71688+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71689 td->mutexes[id] = 0;
71690 return 0;
71691
71692@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
71693 break;
71694
71695 td->mutexes[dat] = 2;
71696- td->event = atomic_add_return(1, &rttest_event);
71697+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71698 break;
71699
71700 default:
71701@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
71702 return;
71703
71704 td->mutexes[dat] = 3;
71705- td->event = atomic_add_return(1, &rttest_event);
71706+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71707 break;
71708
71709 case RTTEST_LOCKNOWAIT:
71710@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
71711 return;
71712
71713 td->mutexes[dat] = 1;
71714- td->event = atomic_add_return(1, &rttest_event);
71715+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71716 return;
71717
71718 default:
71719diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
71720index 15f60d0..7e50319 100644
71721--- a/kernel/sched/auto_group.c
71722+++ b/kernel/sched/auto_group.c
71723@@ -11,7 +11,7 @@
71724
71725 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
71726 static struct autogroup autogroup_default;
71727-static atomic_t autogroup_seq_nr;
71728+static atomic_unchecked_t autogroup_seq_nr;
71729
71730 void __init autogroup_init(struct task_struct *init_task)
71731 {
71732@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
71733
71734 kref_init(&ag->kref);
71735 init_rwsem(&ag->lock);
71736- ag->id = atomic_inc_return(&autogroup_seq_nr);
71737+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
71738 ag->tg = tg;
71739 #ifdef CONFIG_RT_GROUP_SCHED
71740 /*
71741diff --git a/kernel/sched/core.c b/kernel/sched/core.c
71742index 2d8927f..f617765 100644
71743--- a/kernel/sched/core.c
71744+++ b/kernel/sched/core.c
71745@@ -3562,6 +3562,8 @@ int can_nice(const struct task_struct *p, const int nice)
71746 /* convert nice value [19,-20] to rlimit style value [1,40] */
71747 int nice_rlim = 20 - nice;
71748
71749+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
71750+
71751 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
71752 capable(CAP_SYS_NICE));
71753 }
71754@@ -3595,7 +3597,8 @@ SYSCALL_DEFINE1(nice, int, increment)
71755 if (nice > 19)
71756 nice = 19;
71757
71758- if (increment < 0 && !can_nice(current, nice))
71759+ if (increment < 0 && (!can_nice(current, nice) ||
71760+ gr_handle_chroot_nice()))
71761 return -EPERM;
71762
71763 retval = security_task_setnice(current, nice);
71764@@ -3749,6 +3752,7 @@ recheck:
71765 unsigned long rlim_rtprio =
71766 task_rlimit(p, RLIMIT_RTPRIO);
71767
71768+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
71769 /* can't set/change the rt policy */
71770 if (policy != p->policy && !rlim_rtprio)
71771 return -EPERM;
71772diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
71773index 6b800a1..0c36227 100644
71774--- a/kernel/sched/fair.c
71775+++ b/kernel/sched/fair.c
71776@@ -4890,7 +4890,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
71777 * run_rebalance_domains is triggered when needed from the scheduler tick.
71778 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
71779 */
71780-static void run_rebalance_domains(struct softirq_action *h)
71781+static void run_rebalance_domains(void)
71782 {
71783 int this_cpu = smp_processor_id();
71784 struct rq *this_rq = cpu_rq(this_cpu);
71785diff --git a/kernel/signal.c b/kernel/signal.c
71786index e4d4014..76cf5dd 100644
71787--- a/kernel/signal.c
71788+++ b/kernel/signal.c
71789@@ -49,12 +49,12 @@ static struct kmem_cache *sigqueue_cachep;
71790
71791 int print_fatal_signals __read_mostly;
71792
71793-static void __user *sig_handler(struct task_struct *t, int sig)
71794+static __sighandler_t sig_handler(struct task_struct *t, int sig)
71795 {
71796 return t->sighand->action[sig - 1].sa.sa_handler;
71797 }
71798
71799-static int sig_handler_ignored(void __user *handler, int sig)
71800+static int sig_handler_ignored(__sighandler_t handler, int sig)
71801 {
71802 /* Is it explicitly or implicitly ignored? */
71803 return handler == SIG_IGN ||
71804@@ -63,7 +63,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
71805
71806 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
71807 {
71808- void __user *handler;
71809+ __sighandler_t handler;
71810
71811 handler = sig_handler(t, sig);
71812
71813@@ -367,6 +367,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
71814 atomic_inc(&user->sigpending);
71815 rcu_read_unlock();
71816
71817+ if (!override_rlimit)
71818+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
71819+
71820 if (override_rlimit ||
71821 atomic_read(&user->sigpending) <=
71822 task_rlimit(t, RLIMIT_SIGPENDING)) {
71823@@ -491,7 +494,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
71824
71825 int unhandled_signal(struct task_struct *tsk, int sig)
71826 {
71827- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
71828+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
71829 if (is_global_init(tsk))
71830 return 1;
71831 if (handler != SIG_IGN && handler != SIG_DFL)
71832@@ -817,6 +820,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
71833 }
71834 }
71835
71836+ /* allow glibc communication via tgkill to other threads in our
71837+ thread group */
71838+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
71839+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
71840+ && gr_handle_signal(t, sig))
71841+ return -EPERM;
71842+
71843 return security_task_kill(t, info, sig, 0);
71844 }
71845
71846@@ -1198,7 +1208,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
71847 return send_signal(sig, info, p, 1);
71848 }
71849
71850-static int
71851+int
71852 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
71853 {
71854 return send_signal(sig, info, t, 0);
71855@@ -1235,6 +1245,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
71856 unsigned long int flags;
71857 int ret, blocked, ignored;
71858 struct k_sigaction *action;
71859+ int is_unhandled = 0;
71860
71861 spin_lock_irqsave(&t->sighand->siglock, flags);
71862 action = &t->sighand->action[sig-1];
71863@@ -1249,9 +1260,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
71864 }
71865 if (action->sa.sa_handler == SIG_DFL)
71866 t->signal->flags &= ~SIGNAL_UNKILLABLE;
71867+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
71868+ is_unhandled = 1;
71869 ret = specific_send_sig_info(sig, info, t);
71870 spin_unlock_irqrestore(&t->sighand->siglock, flags);
71871
71872+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
71873+ normal operation */
71874+ if (is_unhandled) {
71875+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
71876+ gr_handle_crash(t, sig);
71877+ }
71878+
71879 return ret;
71880 }
71881
71882@@ -1318,8 +1338,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
71883 ret = check_kill_permission(sig, info, p);
71884 rcu_read_unlock();
71885
71886- if (!ret && sig)
71887+ if (!ret && sig) {
71888 ret = do_send_sig_info(sig, info, p, true);
71889+ if (!ret)
71890+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
71891+ }
71892
71893 return ret;
71894 }
71895@@ -2864,7 +2887,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
71896 int error = -ESRCH;
71897
71898 rcu_read_lock();
71899- p = find_task_by_vpid(pid);
71900+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
71901+ /* allow glibc communication via tgkill to other threads in our
71902+ thread group */
71903+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
71904+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
71905+ p = find_task_by_vpid_unrestricted(pid);
71906+ else
71907+#endif
71908+ p = find_task_by_vpid(pid);
71909 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
71910 error = check_kill_permission(sig, info, p);
71911 /*
71912diff --git a/kernel/softirq.c b/kernel/softirq.c
71913index cc96bdc..8bb9750 100644
71914--- a/kernel/softirq.c
71915+++ b/kernel/softirq.c
71916@@ -57,7 +57,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
71917
71918 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
71919
71920-char *softirq_to_name[NR_SOFTIRQS] = {
71921+const char * const softirq_to_name[NR_SOFTIRQS] = {
71922 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
71923 "TASKLET", "SCHED", "HRTIMER", "RCU"
71924 };
71925@@ -244,7 +244,7 @@ restart:
71926 kstat_incr_softirqs_this_cpu(vec_nr);
71927
71928 trace_softirq_entry(vec_nr);
71929- h->action(h);
71930+ h->action();
71931 trace_softirq_exit(vec_nr);
71932 if (unlikely(prev_count != preempt_count())) {
71933 printk(KERN_ERR "huh, entered softirq %u %s %p"
71934@@ -391,9 +391,11 @@ void __raise_softirq_irqoff(unsigned int nr)
71935 or_softirq_pending(1UL << nr);
71936 }
71937
71938-void open_softirq(int nr, void (*action)(struct softirq_action *))
71939+void open_softirq(int nr, void (*action)(void))
71940 {
71941- softirq_vec[nr].action = action;
71942+ pax_open_kernel();
71943+ *(void **)&softirq_vec[nr].action = action;
71944+ pax_close_kernel();
71945 }
71946
71947 /*
71948@@ -447,7 +449,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
71949
71950 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
71951
71952-static void tasklet_action(struct softirq_action *a)
71953+static void tasklet_action(void)
71954 {
71955 struct tasklet_struct *list;
71956
71957@@ -482,7 +484,7 @@ static void tasklet_action(struct softirq_action *a)
71958 }
71959 }
71960
71961-static void tasklet_hi_action(struct softirq_action *a)
71962+static void tasklet_hi_action(void)
71963 {
71964 struct tasklet_struct *list;
71965
71966diff --git a/kernel/srcu.c b/kernel/srcu.c
71967index 97c465e..d83f3bb 100644
71968--- a/kernel/srcu.c
71969+++ b/kernel/srcu.c
71970@@ -302,9 +302,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
71971 preempt_disable();
71972 idx = rcu_dereference_index_check(sp->completed,
71973 rcu_read_lock_sched_held()) & 0x1;
71974- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
71975+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
71976 smp_mb(); /* B */ /* Avoid leaking the critical section. */
71977- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
71978+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
71979 preempt_enable();
71980 return idx;
71981 }
71982@@ -320,7 +320,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
71983 {
71984 preempt_disable();
71985 smp_mb(); /* C */ /* Avoid leaking the critical section. */
71986- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
71987+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
71988 preempt_enable();
71989 }
71990 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
71991diff --git a/kernel/sys.c b/kernel/sys.c
71992index e6e0ece..1f2e413 100644
71993--- a/kernel/sys.c
71994+++ b/kernel/sys.c
71995@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
71996 error = -EACCES;
71997 goto out;
71998 }
71999+
72000+ if (gr_handle_chroot_setpriority(p, niceval)) {
72001+ error = -EACCES;
72002+ goto out;
72003+ }
72004+
72005 no_nice = security_task_setnice(p, niceval);
72006 if (no_nice) {
72007 error = no_nice;
72008@@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
72009 goto error;
72010 }
72011
72012+ if (gr_check_group_change(new->gid, new->egid, -1))
72013+ goto error;
72014+
72015 if (rgid != (gid_t) -1 ||
72016 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
72017 new->sgid = new->egid;
72018@@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
72019 old = current_cred();
72020
72021 retval = -EPERM;
72022+
72023+ if (gr_check_group_change(kgid, kgid, kgid))
72024+ goto error;
72025+
72026 if (nsown_capable(CAP_SETGID))
72027 new->gid = new->egid = new->sgid = new->fsgid = kgid;
72028 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
72029@@ -647,7 +660,7 @@ error:
72030 /*
72031 * change the user struct in a credentials set to match the new UID
72032 */
72033-static int set_user(struct cred *new)
72034+int set_user(struct cred *new)
72035 {
72036 struct user_struct *new_user;
72037
72038@@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
72039 goto error;
72040 }
72041
72042+ if (gr_check_user_change(new->uid, new->euid, -1))
72043+ goto error;
72044+
72045 if (!uid_eq(new->uid, old->uid)) {
72046 retval = set_user(new);
72047 if (retval < 0)
72048@@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
72049 old = current_cred();
72050
72051 retval = -EPERM;
72052+
72053+ if (gr_check_crash_uid(kuid))
72054+ goto error;
72055+ if (gr_check_user_change(kuid, kuid, kuid))
72056+ goto error;
72057+
72058 if (nsown_capable(CAP_SETUID)) {
72059 new->suid = new->uid = kuid;
72060 if (!uid_eq(kuid, old->uid)) {
72061@@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
72062 goto error;
72063 }
72064
72065+ if (gr_check_user_change(kruid, keuid, -1))
72066+ goto error;
72067+
72068 if (ruid != (uid_t) -1) {
72069 new->uid = kruid;
72070 if (!uid_eq(kruid, old->uid)) {
72071@@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
72072 goto error;
72073 }
72074
72075+ if (gr_check_group_change(krgid, kegid, -1))
72076+ goto error;
72077+
72078 if (rgid != (gid_t) -1)
72079 new->gid = krgid;
72080 if (egid != (gid_t) -1)
72081@@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
72082 if (!uid_valid(kuid))
72083 return old_fsuid;
72084
72085+ if (gr_check_user_change(-1, -1, kuid))
72086+ goto error;
72087+
72088 new = prepare_creds();
72089 if (!new)
72090 return old_fsuid;
72091@@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
72092 }
72093 }
72094
72095+error:
72096 abort_creds(new);
72097 return old_fsuid;
72098
72099@@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
72100 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
72101 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
72102 nsown_capable(CAP_SETGID)) {
72103+ if (gr_check_group_change(-1, -1, kgid))
72104+ goto error;
72105+
72106 if (!gid_eq(kgid, old->fsgid)) {
72107 new->fsgid = kgid;
72108 goto change_okay;
72109 }
72110 }
72111
72112+error:
72113 abort_creds(new);
72114 return old_fsgid;
72115
72116@@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
72117 return -EFAULT;
72118
72119 down_read(&uts_sem);
72120- error = __copy_to_user(&name->sysname, &utsname()->sysname,
72121+ error = __copy_to_user(name->sysname, &utsname()->sysname,
72122 __OLD_UTS_LEN);
72123 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
72124- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
72125+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
72126 __OLD_UTS_LEN);
72127 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
72128- error |= __copy_to_user(&name->release, &utsname()->release,
72129+ error |= __copy_to_user(name->release, &utsname()->release,
72130 __OLD_UTS_LEN);
72131 error |= __put_user(0, name->release + __OLD_UTS_LEN);
72132- error |= __copy_to_user(&name->version, &utsname()->version,
72133+ error |= __copy_to_user(name->version, &utsname()->version,
72134 __OLD_UTS_LEN);
72135 error |= __put_user(0, name->version + __OLD_UTS_LEN);
72136- error |= __copy_to_user(&name->machine, &utsname()->machine,
72137+ error |= __copy_to_user(name->machine, &utsname()->machine,
72138 __OLD_UTS_LEN);
72139 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
72140 up_read(&uts_sem);
72141@@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
72142 error = get_dumpable(me->mm);
72143 break;
72144 case PR_SET_DUMPABLE:
72145- if (arg2 < 0 || arg2 > 1) {
72146+ if (arg2 > 1) {
72147 error = -EINVAL;
72148 break;
72149 }
72150diff --git a/kernel/sysctl.c b/kernel/sysctl.c
72151index 26f65ea..df8e5ad 100644
72152--- a/kernel/sysctl.c
72153+++ b/kernel/sysctl.c
72154@@ -92,7 +92,6 @@
72155
72156
72157 #if defined(CONFIG_SYSCTL)
72158-
72159 /* External variables not in a header file. */
72160 extern int sysctl_overcommit_memory;
72161 extern int sysctl_overcommit_ratio;
72162@@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
72163 void __user *buffer, size_t *lenp, loff_t *ppos);
72164 #endif
72165
72166-#ifdef CONFIG_PRINTK
72167 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72168 void __user *buffer, size_t *lenp, loff_t *ppos);
72169-#endif
72170
72171 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
72172 void __user *buffer, size_t *lenp, loff_t *ppos);
72173@@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
72174
72175 #endif
72176
72177+extern struct ctl_table grsecurity_table[];
72178+
72179 static struct ctl_table kern_table[];
72180 static struct ctl_table vm_table[];
72181 static struct ctl_table fs_table[];
72182@@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
72183 int sysctl_legacy_va_layout;
72184 #endif
72185
72186+#ifdef CONFIG_PAX_SOFTMODE
72187+static ctl_table pax_table[] = {
72188+ {
72189+ .procname = "softmode",
72190+ .data = &pax_softmode,
72191+ .maxlen = sizeof(unsigned int),
72192+ .mode = 0600,
72193+ .proc_handler = &proc_dointvec,
72194+ },
72195+
72196+ { }
72197+};
72198+#endif
72199+
72200 /* The default sysctl tables: */
72201
72202 static struct ctl_table sysctl_base_table[] = {
72203@@ -266,6 +279,22 @@ static int max_extfrag_threshold = 1000;
72204 #endif
72205
72206 static struct ctl_table kern_table[] = {
72207+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
72208+ {
72209+ .procname = "grsecurity",
72210+ .mode = 0500,
72211+ .child = grsecurity_table,
72212+ },
72213+#endif
72214+
72215+#ifdef CONFIG_PAX_SOFTMODE
72216+ {
72217+ .procname = "pax",
72218+ .mode = 0500,
72219+ .child = pax_table,
72220+ },
72221+#endif
72222+
72223 {
72224 .procname = "sched_child_runs_first",
72225 .data = &sysctl_sched_child_runs_first,
72226@@ -552,7 +581,7 @@ static struct ctl_table kern_table[] = {
72227 .data = &modprobe_path,
72228 .maxlen = KMOD_PATH_LEN,
72229 .mode = 0644,
72230- .proc_handler = proc_dostring,
72231+ .proc_handler = proc_dostring_modpriv,
72232 },
72233 {
72234 .procname = "modules_disabled",
72235@@ -719,16 +748,20 @@ static struct ctl_table kern_table[] = {
72236 .extra1 = &zero,
72237 .extra2 = &one,
72238 },
72239+#endif
72240 {
72241 .procname = "kptr_restrict",
72242 .data = &kptr_restrict,
72243 .maxlen = sizeof(int),
72244 .mode = 0644,
72245 .proc_handler = proc_dointvec_minmax_sysadmin,
72246+#ifdef CONFIG_GRKERNSEC_HIDESYM
72247+ .extra1 = &two,
72248+#else
72249 .extra1 = &zero,
72250+#endif
72251 .extra2 = &two,
72252 },
72253-#endif
72254 {
72255 .procname = "ngroups_max",
72256 .data = &ngroups_max,
72257@@ -1225,6 +1258,13 @@ static struct ctl_table vm_table[] = {
72258 .proc_handler = proc_dointvec_minmax,
72259 .extra1 = &zero,
72260 },
72261+ {
72262+ .procname = "heap_stack_gap",
72263+ .data = &sysctl_heap_stack_gap,
72264+ .maxlen = sizeof(sysctl_heap_stack_gap),
72265+ .mode = 0644,
72266+ .proc_handler = proc_doulongvec_minmax,
72267+ },
72268 #else
72269 {
72270 .procname = "nr_trim_pages",
72271@@ -1675,6 +1715,16 @@ int proc_dostring(struct ctl_table *table, int write,
72272 buffer, lenp, ppos);
72273 }
72274
72275+int proc_dostring_modpriv(struct ctl_table *table, int write,
72276+ void __user *buffer, size_t *lenp, loff_t *ppos)
72277+{
72278+ if (write && !capable(CAP_SYS_MODULE))
72279+ return -EPERM;
72280+
72281+ return _proc_do_string(table->data, table->maxlen, write,
72282+ buffer, lenp, ppos);
72283+}
72284+
72285 static size_t proc_skip_spaces(char **buf)
72286 {
72287 size_t ret;
72288@@ -1780,6 +1830,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
72289 len = strlen(tmp);
72290 if (len > *size)
72291 len = *size;
72292+ if (len > sizeof(tmp))
72293+ len = sizeof(tmp);
72294 if (copy_to_user(*buf, tmp, len))
72295 return -EFAULT;
72296 *size -= len;
72297@@ -1972,7 +2024,6 @@ static int proc_taint(struct ctl_table *table, int write,
72298 return err;
72299 }
72300
72301-#ifdef CONFIG_PRINTK
72302 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72303 void __user *buffer, size_t *lenp, loff_t *ppos)
72304 {
72305@@ -1981,7 +2032,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72306
72307 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
72308 }
72309-#endif
72310
72311 struct do_proc_dointvec_minmax_conv_param {
72312 int *min;
72313@@ -2128,8 +2178,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
72314 *i = val;
72315 } else {
72316 val = convdiv * (*i) / convmul;
72317- if (!first)
72318+ if (!first) {
72319 err = proc_put_char(&buffer, &left, '\t');
72320+ if (err)
72321+ break;
72322+ }
72323 err = proc_put_long(&buffer, &left, val, false);
72324 if (err)
72325 break;
72326@@ -2521,6 +2574,12 @@ int proc_dostring(struct ctl_table *table, int write,
72327 return -ENOSYS;
72328 }
72329
72330+int proc_dostring_modpriv(struct ctl_table *table, int write,
72331+ void __user *buffer, size_t *lenp, loff_t *ppos)
72332+{
72333+ return -ENOSYS;
72334+}
72335+
72336 int proc_dointvec(struct ctl_table *table, int write,
72337 void __user *buffer, size_t *lenp, loff_t *ppos)
72338 {
72339@@ -2577,5 +2636,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
72340 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
72341 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
72342 EXPORT_SYMBOL(proc_dostring);
72343+EXPORT_SYMBOL(proc_dostring_modpriv);
72344 EXPORT_SYMBOL(proc_doulongvec_minmax);
72345 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
72346diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
72347index 65bdcf1..21eb831 100644
72348--- a/kernel/sysctl_binary.c
72349+++ b/kernel/sysctl_binary.c
72350@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
72351 int i;
72352
72353 set_fs(KERNEL_DS);
72354- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
72355+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
72356 set_fs(old_fs);
72357 if (result < 0)
72358 goto out_kfree;
72359@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
72360 }
72361
72362 set_fs(KERNEL_DS);
72363- result = vfs_write(file, buffer, str - buffer, &pos);
72364+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
72365 set_fs(old_fs);
72366 if (result < 0)
72367 goto out_kfree;
72368@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
72369 int i;
72370
72371 set_fs(KERNEL_DS);
72372- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
72373+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
72374 set_fs(old_fs);
72375 if (result < 0)
72376 goto out_kfree;
72377@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
72378 }
72379
72380 set_fs(KERNEL_DS);
72381- result = vfs_write(file, buffer, str - buffer, &pos);
72382+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
72383 set_fs(old_fs);
72384 if (result < 0)
72385 goto out_kfree;
72386@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
72387 int i;
72388
72389 set_fs(KERNEL_DS);
72390- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
72391+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
72392 set_fs(old_fs);
72393 if (result < 0)
72394 goto out;
72395@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
72396 __le16 dnaddr;
72397
72398 set_fs(KERNEL_DS);
72399- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
72400+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
72401 set_fs(old_fs);
72402 if (result < 0)
72403 goto out;
72404@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
72405 le16_to_cpu(dnaddr) & 0x3ff);
72406
72407 set_fs(KERNEL_DS);
72408- result = vfs_write(file, buf, len, &pos);
72409+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
72410 set_fs(old_fs);
72411 if (result < 0)
72412 goto out;
72413diff --git a/kernel/taskstats.c b/kernel/taskstats.c
72414index 145bb4d..b2aa969 100644
72415--- a/kernel/taskstats.c
72416+++ b/kernel/taskstats.c
72417@@ -28,9 +28,12 @@
72418 #include <linux/fs.h>
72419 #include <linux/file.h>
72420 #include <linux/pid_namespace.h>
72421+#include <linux/grsecurity.h>
72422 #include <net/genetlink.h>
72423 #include <linux/atomic.h>
72424
72425+extern int gr_is_taskstats_denied(int pid);
72426+
72427 /*
72428 * Maximum length of a cpumask that can be specified in
72429 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
72430@@ -570,6 +573,9 @@ err:
72431
72432 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
72433 {
72434+ if (gr_is_taskstats_denied(current->pid))
72435+ return -EACCES;
72436+
72437 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
72438 return cmd_attr_register_cpumask(info);
72439 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
72440diff --git a/kernel/time.c b/kernel/time.c
72441index d226c6a..c7c0960 100644
72442--- a/kernel/time.c
72443+++ b/kernel/time.c
72444@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
72445 return error;
72446
72447 if (tz) {
72448+ /* we log in do_settimeofday called below, so don't log twice
72449+ */
72450+ if (!tv)
72451+ gr_log_timechange();
72452+
72453 sys_tz = *tz;
72454 update_vsyscall_tz();
72455 if (firsttime) {
72456diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
72457index f11d83b..d016d91 100644
72458--- a/kernel/time/alarmtimer.c
72459+++ b/kernel/time/alarmtimer.c
72460@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
72461 struct platform_device *pdev;
72462 int error = 0;
72463 int i;
72464- struct k_clock alarm_clock = {
72465+ static struct k_clock alarm_clock = {
72466 .clock_getres = alarm_clock_getres,
72467 .clock_get = alarm_clock_get,
72468 .timer_create = alarm_timer_create,
72469diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
72470index f113755..ec24223 100644
72471--- a/kernel/time/tick-broadcast.c
72472+++ b/kernel/time/tick-broadcast.c
72473@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
72474 * then clear the broadcast bit.
72475 */
72476 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
72477- int cpu = smp_processor_id();
72478+ cpu = smp_processor_id();
72479
72480 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
72481 tick_broadcast_clear_oneshot(cpu);
72482diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
72483index e424970..4c7962b 100644
72484--- a/kernel/time/timekeeping.c
72485+++ b/kernel/time/timekeeping.c
72486@@ -15,6 +15,7 @@
72487 #include <linux/init.h>
72488 #include <linux/mm.h>
72489 #include <linux/sched.h>
72490+#include <linux/grsecurity.h>
72491 #include <linux/syscore_ops.h>
72492 #include <linux/clocksource.h>
72493 #include <linux/jiffies.h>
72494@@ -368,6 +369,8 @@ int do_settimeofday(const struct timespec *tv)
72495 if (!timespec_valid_strict(tv))
72496 return -EINVAL;
72497
72498+ gr_log_timechange();
72499+
72500 write_seqlock_irqsave(&tk->lock, flags);
72501
72502 timekeeping_forward_now(tk);
72503diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
72504index af5a7e9..715611a 100644
72505--- a/kernel/time/timer_list.c
72506+++ b/kernel/time/timer_list.c
72507@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
72508
72509 static void print_name_offset(struct seq_file *m, void *sym)
72510 {
72511+#ifdef CONFIG_GRKERNSEC_HIDESYM
72512+ SEQ_printf(m, "<%p>", NULL);
72513+#else
72514 char symname[KSYM_NAME_LEN];
72515
72516 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
72517 SEQ_printf(m, "<%pK>", sym);
72518 else
72519 SEQ_printf(m, "%s", symname);
72520+#endif
72521 }
72522
72523 static void
72524@@ -112,7 +116,11 @@ next_one:
72525 static void
72526 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
72527 {
72528+#ifdef CONFIG_GRKERNSEC_HIDESYM
72529+ SEQ_printf(m, " .base: %p\n", NULL);
72530+#else
72531 SEQ_printf(m, " .base: %pK\n", base);
72532+#endif
72533 SEQ_printf(m, " .index: %d\n",
72534 base->index);
72535 SEQ_printf(m, " .resolution: %Lu nsecs\n",
72536@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
72537 {
72538 struct proc_dir_entry *pe;
72539
72540+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72541+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
72542+#else
72543 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
72544+#endif
72545 if (!pe)
72546 return -ENOMEM;
72547 return 0;
72548diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
72549index 0b537f2..40d6c20 100644
72550--- a/kernel/time/timer_stats.c
72551+++ b/kernel/time/timer_stats.c
72552@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
72553 static unsigned long nr_entries;
72554 static struct entry entries[MAX_ENTRIES];
72555
72556-static atomic_t overflow_count;
72557+static atomic_unchecked_t overflow_count;
72558
72559 /*
72560 * The entries are in a hash-table, for fast lookup:
72561@@ -140,7 +140,7 @@ static void reset_entries(void)
72562 nr_entries = 0;
72563 memset(entries, 0, sizeof(entries));
72564 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
72565- atomic_set(&overflow_count, 0);
72566+ atomic_set_unchecked(&overflow_count, 0);
72567 }
72568
72569 static struct entry *alloc_entry(void)
72570@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
72571 if (likely(entry))
72572 entry->count++;
72573 else
72574- atomic_inc(&overflow_count);
72575+ atomic_inc_unchecked(&overflow_count);
72576
72577 out_unlock:
72578 raw_spin_unlock_irqrestore(lock, flags);
72579@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
72580
72581 static void print_name_offset(struct seq_file *m, unsigned long addr)
72582 {
72583+#ifdef CONFIG_GRKERNSEC_HIDESYM
72584+ seq_printf(m, "<%p>", NULL);
72585+#else
72586 char symname[KSYM_NAME_LEN];
72587
72588 if (lookup_symbol_name(addr, symname) < 0)
72589- seq_printf(m, "<%p>", (void *)addr);
72590+ seq_printf(m, "<%pK>", (void *)addr);
72591 else
72592 seq_printf(m, "%s", symname);
72593+#endif
72594 }
72595
72596 static int tstats_show(struct seq_file *m, void *v)
72597@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
72598
72599 seq_puts(m, "Timer Stats Version: v0.2\n");
72600 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
72601- if (atomic_read(&overflow_count))
72602+ if (atomic_read_unchecked(&overflow_count))
72603 seq_printf(m, "Overflow: %d entries\n",
72604- atomic_read(&overflow_count));
72605+ atomic_read_unchecked(&overflow_count));
72606
72607 for (i = 0; i < nr_entries; i++) {
72608 entry = entries + i;
72609@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
72610 {
72611 struct proc_dir_entry *pe;
72612
72613+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72614+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
72615+#else
72616 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
72617+#endif
72618 if (!pe)
72619 return -ENOMEM;
72620 return 0;
72621diff --git a/kernel/timer.c b/kernel/timer.c
72622index 367d008..46857a0 100644
72623--- a/kernel/timer.c
72624+++ b/kernel/timer.c
72625@@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
72626 /*
72627 * This function runs timers and the timer-tq in bottom half context.
72628 */
72629-static void run_timer_softirq(struct softirq_action *h)
72630+static void run_timer_softirq(void)
72631 {
72632 struct tvec_base *base = __this_cpu_read(tvec_bases);
72633
72634@@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
72635 return NOTIFY_OK;
72636 }
72637
72638-static struct notifier_block __cpuinitdata timers_nb = {
72639+static struct notifier_block __cpuinitconst timers_nb = {
72640 .notifier_call = timer_cpu_notify,
72641 };
72642
72643diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
72644index c0bd030..62a1927 100644
72645--- a/kernel/trace/blktrace.c
72646+++ b/kernel/trace/blktrace.c
72647@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
72648 struct blk_trace *bt = filp->private_data;
72649 char buf[16];
72650
72651- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
72652+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
72653
72654 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
72655 }
72656@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
72657 return 1;
72658
72659 bt = buf->chan->private_data;
72660- atomic_inc(&bt->dropped);
72661+ atomic_inc_unchecked(&bt->dropped);
72662 return 0;
72663 }
72664
72665@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
72666
72667 bt->dir = dir;
72668 bt->dev = dev;
72669- atomic_set(&bt->dropped, 0);
72670+ atomic_set_unchecked(&bt->dropped, 0);
72671
72672 ret = -EIO;
72673 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
72674diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
72675index 51b7159..18137d6 100644
72676--- a/kernel/trace/ftrace.c
72677+++ b/kernel/trace/ftrace.c
72678@@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
72679 if (unlikely(ftrace_disabled))
72680 return 0;
72681
72682+ ret = ftrace_arch_code_modify_prepare();
72683+ FTRACE_WARN_ON(ret);
72684+ if (ret)
72685+ return 0;
72686+
72687 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
72688+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
72689 if (ret) {
72690 ftrace_bug(ret, ip);
72691- return 0;
72692 }
72693- return 1;
72694+ return ret ? 0 : 1;
72695 }
72696
72697 /*
72698@@ -2965,7 +2970,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
72699
72700 int
72701 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
72702- void *data)
72703+ void *data)
72704 {
72705 struct ftrace_func_probe *entry;
72706 struct ftrace_page *pg;
72707@@ -3832,8 +3837,10 @@ static int ftrace_process_locs(struct module *mod,
72708 if (!count)
72709 return 0;
72710
72711+ pax_open_kernel();
72712 sort(start, count, sizeof(*start),
72713 ftrace_cmp_ips, ftrace_swap_ips);
72714+ pax_close_kernel();
72715
72716 start_pg = ftrace_allocate_pages(count);
72717 if (!start_pg)
72718@@ -4541,8 +4548,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
72719 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
72720
72721 static int ftrace_graph_active;
72722-static struct notifier_block ftrace_suspend_notifier;
72723-
72724 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
72725 {
72726 return 0;
72727@@ -4686,6 +4691,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
72728 return NOTIFY_DONE;
72729 }
72730
72731+static struct notifier_block ftrace_suspend_notifier = {
72732+ .notifier_call = ftrace_suspend_notifier_call
72733+};
72734+
72735 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
72736 trace_func_graph_ent_t entryfunc)
72737 {
72738@@ -4699,7 +4708,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
72739 goto out;
72740 }
72741
72742- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
72743 register_pm_notifier(&ftrace_suspend_notifier);
72744
72745 ftrace_graph_active++;
72746diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
72747index 4cb5e51..e7e05d9 100644
72748--- a/kernel/trace/ring_buffer.c
72749+++ b/kernel/trace/ring_buffer.c
72750@@ -346,9 +346,9 @@ struct buffer_data_page {
72751 */
72752 struct buffer_page {
72753 struct list_head list; /* list of buffer pages */
72754- local_t write; /* index for next write */
72755+ local_unchecked_t write; /* index for next write */
72756 unsigned read; /* index for next read */
72757- local_t entries; /* entries on this page */
72758+ local_unchecked_t entries; /* entries on this page */
72759 unsigned long real_end; /* real end of data */
72760 struct buffer_data_page *page; /* Actual data page */
72761 };
72762@@ -460,8 +460,8 @@ struct ring_buffer_per_cpu {
72763 unsigned long lost_events;
72764 unsigned long last_overrun;
72765 local_t entries_bytes;
72766- local_t commit_overrun;
72767- local_t overrun;
72768+ local_unchecked_t commit_overrun;
72769+ local_unchecked_t overrun;
72770 local_t entries;
72771 local_t committing;
72772 local_t commits;
72773@@ -860,8 +860,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
72774 *
72775 * We add a counter to the write field to denote this.
72776 */
72777- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
72778- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
72779+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
72780+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
72781
72782 /*
72783 * Just make sure we have seen our old_write and synchronize
72784@@ -889,8 +889,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
72785 * cmpxchg to only update if an interrupt did not already
72786 * do it for us. If the cmpxchg fails, we don't care.
72787 */
72788- (void)local_cmpxchg(&next_page->write, old_write, val);
72789- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
72790+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
72791+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
72792
72793 /*
72794 * No need to worry about races with clearing out the commit.
72795@@ -1249,12 +1249,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
72796
72797 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
72798 {
72799- return local_read(&bpage->entries) & RB_WRITE_MASK;
72800+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
72801 }
72802
72803 static inline unsigned long rb_page_write(struct buffer_page *bpage)
72804 {
72805- return local_read(&bpage->write) & RB_WRITE_MASK;
72806+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
72807 }
72808
72809 static int
72810@@ -1349,7 +1349,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
72811 * bytes consumed in ring buffer from here.
72812 * Increment overrun to account for the lost events.
72813 */
72814- local_add(page_entries, &cpu_buffer->overrun);
72815+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
72816 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
72817 }
72818
72819@@ -1905,7 +1905,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
72820 * it is our responsibility to update
72821 * the counters.
72822 */
72823- local_add(entries, &cpu_buffer->overrun);
72824+ local_add_unchecked(entries, &cpu_buffer->overrun);
72825 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
72826
72827 /*
72828@@ -2055,7 +2055,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
72829 if (tail == BUF_PAGE_SIZE)
72830 tail_page->real_end = 0;
72831
72832- local_sub(length, &tail_page->write);
72833+ local_sub_unchecked(length, &tail_page->write);
72834 return;
72835 }
72836
72837@@ -2090,7 +2090,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
72838 rb_event_set_padding(event);
72839
72840 /* Set the write back to the previous setting */
72841- local_sub(length, &tail_page->write);
72842+ local_sub_unchecked(length, &tail_page->write);
72843 return;
72844 }
72845
72846@@ -2102,7 +2102,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
72847
72848 /* Set write to end of buffer */
72849 length = (tail + length) - BUF_PAGE_SIZE;
72850- local_sub(length, &tail_page->write);
72851+ local_sub_unchecked(length, &tail_page->write);
72852 }
72853
72854 /*
72855@@ -2128,7 +2128,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
72856 * about it.
72857 */
72858 if (unlikely(next_page == commit_page)) {
72859- local_inc(&cpu_buffer->commit_overrun);
72860+ local_inc_unchecked(&cpu_buffer->commit_overrun);
72861 goto out_reset;
72862 }
72863
72864@@ -2182,7 +2182,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
72865 cpu_buffer->tail_page) &&
72866 (cpu_buffer->commit_page ==
72867 cpu_buffer->reader_page))) {
72868- local_inc(&cpu_buffer->commit_overrun);
72869+ local_inc_unchecked(&cpu_buffer->commit_overrun);
72870 goto out_reset;
72871 }
72872 }
72873@@ -2230,7 +2230,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
72874 length += RB_LEN_TIME_EXTEND;
72875
72876 tail_page = cpu_buffer->tail_page;
72877- write = local_add_return(length, &tail_page->write);
72878+ write = local_add_return_unchecked(length, &tail_page->write);
72879
72880 /* set write to only the index of the write */
72881 write &= RB_WRITE_MASK;
72882@@ -2247,7 +2247,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
72883 kmemcheck_annotate_bitfield(event, bitfield);
72884 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
72885
72886- local_inc(&tail_page->entries);
72887+ local_inc_unchecked(&tail_page->entries);
72888
72889 /*
72890 * If this is the first commit on the page, then update
72891@@ -2280,7 +2280,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
72892
72893 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
72894 unsigned long write_mask =
72895- local_read(&bpage->write) & ~RB_WRITE_MASK;
72896+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
72897 unsigned long event_length = rb_event_length(event);
72898 /*
72899 * This is on the tail page. It is possible that
72900@@ -2290,7 +2290,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
72901 */
72902 old_index += write_mask;
72903 new_index += write_mask;
72904- index = local_cmpxchg(&bpage->write, old_index, new_index);
72905+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
72906 if (index == old_index) {
72907 /* update counters */
72908 local_sub(event_length, &cpu_buffer->entries_bytes);
72909@@ -2629,7 +2629,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
72910
72911 /* Do the likely case first */
72912 if (likely(bpage->page == (void *)addr)) {
72913- local_dec(&bpage->entries);
72914+ local_dec_unchecked(&bpage->entries);
72915 return;
72916 }
72917
72918@@ -2641,7 +2641,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
72919 start = bpage;
72920 do {
72921 if (bpage->page == (void *)addr) {
72922- local_dec(&bpage->entries);
72923+ local_dec_unchecked(&bpage->entries);
72924 return;
72925 }
72926 rb_inc_page(cpu_buffer, &bpage);
72927@@ -2923,7 +2923,7 @@ static inline unsigned long
72928 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
72929 {
72930 return local_read(&cpu_buffer->entries) -
72931- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
72932+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
72933 }
72934
72935 /**
72936@@ -3011,7 +3011,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
72937 return 0;
72938
72939 cpu_buffer = buffer->buffers[cpu];
72940- ret = local_read(&cpu_buffer->overrun);
72941+ ret = local_read_unchecked(&cpu_buffer->overrun);
72942
72943 return ret;
72944 }
72945@@ -3032,7 +3032,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
72946 return 0;
72947
72948 cpu_buffer = buffer->buffers[cpu];
72949- ret = local_read(&cpu_buffer->commit_overrun);
72950+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
72951
72952 return ret;
72953 }
72954@@ -3077,7 +3077,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
72955 /* if you care about this being correct, lock the buffer */
72956 for_each_buffer_cpu(buffer, cpu) {
72957 cpu_buffer = buffer->buffers[cpu];
72958- overruns += local_read(&cpu_buffer->overrun);
72959+ overruns += local_read_unchecked(&cpu_buffer->overrun);
72960 }
72961
72962 return overruns;
72963@@ -3253,8 +3253,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
72964 /*
72965 * Reset the reader page to size zero.
72966 */
72967- local_set(&cpu_buffer->reader_page->write, 0);
72968- local_set(&cpu_buffer->reader_page->entries, 0);
72969+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
72970+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
72971 local_set(&cpu_buffer->reader_page->page->commit, 0);
72972 cpu_buffer->reader_page->real_end = 0;
72973
72974@@ -3288,7 +3288,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
72975 * want to compare with the last_overrun.
72976 */
72977 smp_mb();
72978- overwrite = local_read(&(cpu_buffer->overrun));
72979+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
72980
72981 /*
72982 * Here's the tricky part.
72983@@ -3858,8 +3858,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
72984
72985 cpu_buffer->head_page
72986 = list_entry(cpu_buffer->pages, struct buffer_page, list);
72987- local_set(&cpu_buffer->head_page->write, 0);
72988- local_set(&cpu_buffer->head_page->entries, 0);
72989+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
72990+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
72991 local_set(&cpu_buffer->head_page->page->commit, 0);
72992
72993 cpu_buffer->head_page->read = 0;
72994@@ -3869,14 +3869,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
72995
72996 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
72997 INIT_LIST_HEAD(&cpu_buffer->new_pages);
72998- local_set(&cpu_buffer->reader_page->write, 0);
72999- local_set(&cpu_buffer->reader_page->entries, 0);
73000+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
73001+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
73002 local_set(&cpu_buffer->reader_page->page->commit, 0);
73003 cpu_buffer->reader_page->read = 0;
73004
73005- local_set(&cpu_buffer->commit_overrun, 0);
73006+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
73007 local_set(&cpu_buffer->entries_bytes, 0);
73008- local_set(&cpu_buffer->overrun, 0);
73009+ local_set_unchecked(&cpu_buffer->overrun, 0);
73010 local_set(&cpu_buffer->entries, 0);
73011 local_set(&cpu_buffer->committing, 0);
73012 local_set(&cpu_buffer->commits, 0);
73013@@ -4279,8 +4279,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
73014 rb_init_page(bpage);
73015 bpage = reader->page;
73016 reader->page = *data_page;
73017- local_set(&reader->write, 0);
73018- local_set(&reader->entries, 0);
73019+ local_set_unchecked(&reader->write, 0);
73020+ local_set_unchecked(&reader->entries, 0);
73021 reader->read = 0;
73022 *data_page = bpage;
73023
73024diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
73025index 31e4f55..62da00f 100644
73026--- a/kernel/trace/trace.c
73027+++ b/kernel/trace/trace.c
73028@@ -4436,10 +4436,9 @@ static const struct file_operations tracing_dyn_info_fops = {
73029 };
73030 #endif
73031
73032-static struct dentry *d_tracer;
73033-
73034 struct dentry *tracing_init_dentry(void)
73035 {
73036+ static struct dentry *d_tracer;
73037 static int once;
73038
73039 if (d_tracer)
73040@@ -4459,10 +4458,9 @@ struct dentry *tracing_init_dentry(void)
73041 return d_tracer;
73042 }
73043
73044-static struct dentry *d_percpu;
73045-
73046 struct dentry *tracing_dentry_percpu(void)
73047 {
73048+ static struct dentry *d_percpu;
73049 static int once;
73050 struct dentry *d_tracer;
73051
73052diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
73053index d608d09..7eddcb1 100644
73054--- a/kernel/trace/trace_events.c
73055+++ b/kernel/trace/trace_events.c
73056@@ -1320,10 +1320,6 @@ static LIST_HEAD(ftrace_module_file_list);
73057 struct ftrace_module_file_ops {
73058 struct list_head list;
73059 struct module *mod;
73060- struct file_operations id;
73061- struct file_operations enable;
73062- struct file_operations format;
73063- struct file_operations filter;
73064 };
73065
73066 static struct ftrace_module_file_ops *
73067@@ -1344,17 +1340,12 @@ trace_create_file_ops(struct module *mod)
73068
73069 file_ops->mod = mod;
73070
73071- file_ops->id = ftrace_event_id_fops;
73072- file_ops->id.owner = mod;
73073-
73074- file_ops->enable = ftrace_enable_fops;
73075- file_ops->enable.owner = mod;
73076-
73077- file_ops->filter = ftrace_event_filter_fops;
73078- file_ops->filter.owner = mod;
73079-
73080- file_ops->format = ftrace_event_format_fops;
73081- file_ops->format.owner = mod;
73082+ pax_open_kernel();
73083+ *(void **)&mod->trace_id.owner = mod;
73084+ *(void **)&mod->trace_enable.owner = mod;
73085+ *(void **)&mod->trace_filter.owner = mod;
73086+ *(void **)&mod->trace_format.owner = mod;
73087+ pax_close_kernel();
73088
73089 list_add(&file_ops->list, &ftrace_module_file_list);
73090
73091@@ -1378,8 +1369,8 @@ static void trace_module_add_events(struct module *mod)
73092
73093 for_each_event(call, start, end) {
73094 __trace_add_event_call(*call, mod,
73095- &file_ops->id, &file_ops->enable,
73096- &file_ops->filter, &file_ops->format);
73097+ &mod->trace_id, &mod->trace_enable,
73098+ &mod->trace_filter, &mod->trace_format);
73099 }
73100 }
73101
73102diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
73103index fd3c8aa..5f324a6 100644
73104--- a/kernel/trace/trace_mmiotrace.c
73105+++ b/kernel/trace/trace_mmiotrace.c
73106@@ -24,7 +24,7 @@ struct header_iter {
73107 static struct trace_array *mmio_trace_array;
73108 static bool overrun_detected;
73109 static unsigned long prev_overruns;
73110-static atomic_t dropped_count;
73111+static atomic_unchecked_t dropped_count;
73112
73113 static void mmio_reset_data(struct trace_array *tr)
73114 {
73115@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
73116
73117 static unsigned long count_overruns(struct trace_iterator *iter)
73118 {
73119- unsigned long cnt = atomic_xchg(&dropped_count, 0);
73120+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
73121 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
73122
73123 if (over > prev_overruns)
73124@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
73125 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
73126 sizeof(*entry), 0, pc);
73127 if (!event) {
73128- atomic_inc(&dropped_count);
73129+ atomic_inc_unchecked(&dropped_count);
73130 return;
73131 }
73132 entry = ring_buffer_event_data(event);
73133@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
73134 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
73135 sizeof(*entry), 0, pc);
73136 if (!event) {
73137- atomic_inc(&dropped_count);
73138+ atomic_inc_unchecked(&dropped_count);
73139 return;
73140 }
73141 entry = ring_buffer_event_data(event);
73142diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
73143index 123b189..1e9e2a6 100644
73144--- a/kernel/trace/trace_output.c
73145+++ b/kernel/trace/trace_output.c
73146@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
73147
73148 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
73149 if (!IS_ERR(p)) {
73150- p = mangle_path(s->buffer + s->len, p, "\n");
73151+ p = mangle_path(s->buffer + s->len, p, "\n\\");
73152 if (p) {
73153 s->len = p - s->buffer;
73154 return 1;
73155@@ -824,14 +824,16 @@ int register_ftrace_event(struct trace_event *event)
73156 goto out;
73157 }
73158
73159+ pax_open_kernel();
73160 if (event->funcs->trace == NULL)
73161- event->funcs->trace = trace_nop_print;
73162+ *(void **)&event->funcs->trace = trace_nop_print;
73163 if (event->funcs->raw == NULL)
73164- event->funcs->raw = trace_nop_print;
73165+ *(void **)&event->funcs->raw = trace_nop_print;
73166 if (event->funcs->hex == NULL)
73167- event->funcs->hex = trace_nop_print;
73168+ *(void **)&event->funcs->hex = trace_nop_print;
73169 if (event->funcs->binary == NULL)
73170- event->funcs->binary = trace_nop_print;
73171+ *(void **)&event->funcs->binary = trace_nop_print;
73172+ pax_close_kernel();
73173
73174 key = event->type & (EVENT_HASHSIZE - 1);
73175
73176diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
73177index 0c1b1657..95337e9 100644
73178--- a/kernel/trace/trace_stack.c
73179+++ b/kernel/trace/trace_stack.c
73180@@ -53,7 +53,7 @@ static inline void check_stack(void)
73181 return;
73182
73183 /* we do not handle interrupt stacks yet */
73184- if (!object_is_on_stack(&this_size))
73185+ if (!object_starts_on_stack(&this_size))
73186 return;
73187
73188 local_irq_save(flags);
73189diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
73190index 28e9d6c9..50381bd 100644
73191--- a/lib/Kconfig.debug
73192+++ b/lib/Kconfig.debug
73193@@ -1278,6 +1278,7 @@ config LATENCYTOP
73194 depends on DEBUG_KERNEL
73195 depends on STACKTRACE_SUPPORT
73196 depends on PROC_FS
73197+ depends on !GRKERNSEC_HIDESYM
73198 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
73199 select KALLSYMS
73200 select KALLSYMS_ALL
73201@@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
73202
73203 config PROVIDE_OHCI1394_DMA_INIT
73204 bool "Remote debugging over FireWire early on boot"
73205- depends on PCI && X86
73206+ depends on PCI && X86 && !GRKERNSEC
73207 help
73208 If you want to debug problems which hang or crash the kernel early
73209 on boot and the crashing machine has a FireWire port, you can use
73210@@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
73211
73212 config FIREWIRE_OHCI_REMOTE_DMA
73213 bool "Remote debugging over FireWire with firewire-ohci"
73214- depends on FIREWIRE_OHCI
73215+ depends on FIREWIRE_OHCI && !GRKERNSEC
73216 help
73217 This option lets you use the FireWire bus for remote debugging
73218 with help of the firewire-ohci driver. It enables unfiltered
73219diff --git a/lib/Makefile b/lib/Makefile
73220index a08b791..a3ff1eb 100644
73221--- a/lib/Makefile
73222+++ b/lib/Makefile
73223@@ -46,7 +46,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
73224
73225 obj-$(CONFIG_BTREE) += btree.o
73226 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
73227-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
73228+obj-y += list_debug.o
73229 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
73230
73231 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
73232diff --git a/lib/bitmap.c b/lib/bitmap.c
73233index 06fdfa1..97c5c7d 100644
73234--- a/lib/bitmap.c
73235+++ b/lib/bitmap.c
73236@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
73237 {
73238 int c, old_c, totaldigits, ndigits, nchunks, nbits;
73239 u32 chunk;
73240- const char __user __force *ubuf = (const char __user __force *)buf;
73241+ const char __user *ubuf = (const char __force_user *)buf;
73242
73243 bitmap_zero(maskp, nmaskbits);
73244
73245@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
73246 {
73247 if (!access_ok(VERIFY_READ, ubuf, ulen))
73248 return -EFAULT;
73249- return __bitmap_parse((const char __force *)ubuf,
73250+ return __bitmap_parse((const char __force_kernel *)ubuf,
73251 ulen, 1, maskp, nmaskbits);
73252
73253 }
73254@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
73255 {
73256 unsigned a, b;
73257 int c, old_c, totaldigits;
73258- const char __user __force *ubuf = (const char __user __force *)buf;
73259+ const char __user *ubuf = (const char __force_user *)buf;
73260 int exp_digit, in_range;
73261
73262 totaldigits = c = 0;
73263@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
73264 {
73265 if (!access_ok(VERIFY_READ, ubuf, ulen))
73266 return -EFAULT;
73267- return __bitmap_parselist((const char __force *)ubuf,
73268+ return __bitmap_parselist((const char __force_kernel *)ubuf,
73269 ulen, 1, maskp, nmaskbits);
73270 }
73271 EXPORT_SYMBOL(bitmap_parselist_user);
73272diff --git a/lib/bug.c b/lib/bug.c
73273index a28c141..2bd3d95 100644
73274--- a/lib/bug.c
73275+++ b/lib/bug.c
73276@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
73277 return BUG_TRAP_TYPE_NONE;
73278
73279 bug = find_bug(bugaddr);
73280+ if (!bug)
73281+ return BUG_TRAP_TYPE_NONE;
73282
73283 file = NULL;
73284 line = 0;
73285diff --git a/lib/debugobjects.c b/lib/debugobjects.c
73286index d11808c..dc2d6f8 100644
73287--- a/lib/debugobjects.c
73288+++ b/lib/debugobjects.c
73289@@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
73290 if (limit > 4)
73291 return;
73292
73293- is_on_stack = object_is_on_stack(addr);
73294+ is_on_stack = object_starts_on_stack(addr);
73295 if (is_on_stack == onstack)
73296 return;
73297
73298diff --git a/lib/devres.c b/lib/devres.c
73299index 80b9c76..9e32279 100644
73300--- a/lib/devres.c
73301+++ b/lib/devres.c
73302@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
73303 void devm_iounmap(struct device *dev, void __iomem *addr)
73304 {
73305 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
73306- (void *)addr));
73307+ (void __force *)addr));
73308 iounmap(addr);
73309 }
73310 EXPORT_SYMBOL(devm_iounmap);
73311@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
73312 {
73313 ioport_unmap(addr);
73314 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
73315- devm_ioport_map_match, (void *)addr));
73316+ devm_ioport_map_match, (void __force *)addr));
73317 }
73318 EXPORT_SYMBOL(devm_ioport_unmap);
73319
73320diff --git a/lib/dma-debug.c b/lib/dma-debug.c
73321index d84beb9..da44791 100644
73322--- a/lib/dma-debug.c
73323+++ b/lib/dma-debug.c
73324@@ -754,7 +754,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
73325
73326 void dma_debug_add_bus(struct bus_type *bus)
73327 {
73328- struct notifier_block *nb;
73329+ notifier_block_no_const *nb;
73330
73331 if (global_disable)
73332 return;
73333@@ -919,7 +919,7 @@ out:
73334
73335 static void check_for_stack(struct device *dev, void *addr)
73336 {
73337- if (object_is_on_stack(addr))
73338+ if (object_starts_on_stack(addr))
73339 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
73340 "stack [addr=%p]\n", addr);
73341 }
73342diff --git a/lib/inflate.c b/lib/inflate.c
73343index 013a761..c28f3fc 100644
73344--- a/lib/inflate.c
73345+++ b/lib/inflate.c
73346@@ -269,7 +269,7 @@ static void free(void *where)
73347 malloc_ptr = free_mem_ptr;
73348 }
73349 #else
73350-#define malloc(a) kmalloc(a, GFP_KERNEL)
73351+#define malloc(a) kmalloc((a), GFP_KERNEL)
73352 #define free(a) kfree(a)
73353 #endif
73354
73355diff --git a/lib/ioremap.c b/lib/ioremap.c
73356index 0c9216c..863bd89 100644
73357--- a/lib/ioremap.c
73358+++ b/lib/ioremap.c
73359@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
73360 unsigned long next;
73361
73362 phys_addr -= addr;
73363- pmd = pmd_alloc(&init_mm, pud, addr);
73364+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
73365 if (!pmd)
73366 return -ENOMEM;
73367 do {
73368@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
73369 unsigned long next;
73370
73371 phys_addr -= addr;
73372- pud = pud_alloc(&init_mm, pgd, addr);
73373+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
73374 if (!pud)
73375 return -ENOMEM;
73376 do {
73377diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
73378index bd2bea9..6b3c95e 100644
73379--- a/lib/is_single_threaded.c
73380+++ b/lib/is_single_threaded.c
73381@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
73382 struct task_struct *p, *t;
73383 bool ret;
73384
73385+ if (!mm)
73386+ return true;
73387+
73388 if (atomic_read(&task->signal->live) != 1)
73389 return false;
73390
73391diff --git a/lib/list_debug.c b/lib/list_debug.c
73392index c24c2f7..3fc5da0 100644
73393--- a/lib/list_debug.c
73394+++ b/lib/list_debug.c
73395@@ -11,7 +11,9 @@
73396 #include <linux/bug.h>
73397 #include <linux/kernel.h>
73398 #include <linux/rculist.h>
73399+#include <linux/mm.h>
73400
73401+#ifdef CONFIG_DEBUG_LIST
73402 /*
73403 * Insert a new entry between two known consecutive entries.
73404 *
73405@@ -19,21 +21,32 @@
73406 * the prev/next entries already!
73407 */
73408
73409-void __list_add(struct list_head *new,
73410- struct list_head *prev,
73411- struct list_head *next)
73412+static bool __list_add_debug(struct list_head *new,
73413+ struct list_head *prev,
73414+ struct list_head *next)
73415 {
73416- WARN(next->prev != prev,
73417+ if (WARN(next->prev != prev,
73418 "list_add corruption. next->prev should be "
73419 "prev (%p), but was %p. (next=%p).\n",
73420- prev, next->prev, next);
73421- WARN(prev->next != next,
73422+ prev, next->prev, next) ||
73423+ WARN(prev->next != next,
73424 "list_add corruption. prev->next should be "
73425 "next (%p), but was %p. (prev=%p).\n",
73426- next, prev->next, prev);
73427- WARN(new == prev || new == next,
73428+ next, prev->next, prev) ||
73429+ WARN(new == prev || new == next,
73430 "list_add double add: new=%p, prev=%p, next=%p.\n",
73431- new, prev, next);
73432+ new, prev, next))
73433+ return false;
73434+ return true;
73435+}
73436+
73437+void __list_add(struct list_head *new,
73438+ struct list_head *prev,
73439+ struct list_head *next)
73440+{
73441+ if (!__list_add_debug(new, prev, next))
73442+ return;
73443+
73444 next->prev = new;
73445 new->next = next;
73446 new->prev = prev;
73447@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
73448 }
73449 EXPORT_SYMBOL(__list_add);
73450
73451-void __list_del_entry(struct list_head *entry)
73452+static bool __list_del_entry_debug(struct list_head *entry)
73453 {
73454 struct list_head *prev, *next;
73455
73456@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
73457 WARN(next->prev != entry,
73458 "list_del corruption. next->prev should be %p, "
73459 "but was %p\n", entry, next->prev))
73460+ return false;
73461+ return true;
73462+}
73463+
73464+void __list_del_entry(struct list_head *entry)
73465+{
73466+ if (!__list_del_entry_debug(entry))
73467 return;
73468
73469- __list_del(prev, next);
73470+ __list_del(entry->prev, entry->next);
73471 }
73472 EXPORT_SYMBOL(__list_del_entry);
73473
73474@@ -86,15 +106,54 @@ EXPORT_SYMBOL(list_del);
73475 void __list_add_rcu(struct list_head *new,
73476 struct list_head *prev, struct list_head *next)
73477 {
73478- WARN(next->prev != prev,
73479+ if (WARN(next->prev != prev,
73480 "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
73481- prev, next->prev, next);
73482- WARN(prev->next != next,
73483+ prev, next->prev, next) ||
73484+ WARN(prev->next != next,
73485 "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
73486- next, prev->next, prev);
73487+ next, prev->next, prev))
73488+ return;
73489+
73490 new->next = next;
73491 new->prev = prev;
73492 rcu_assign_pointer(list_next_rcu(prev), new);
73493 next->prev = new;
73494 }
73495 EXPORT_SYMBOL(__list_add_rcu);
73496+#endif
73497+
73498+void pax_list_add_tail(struct list_head *new, struct list_head *head)
73499+{
73500+ struct list_head *prev, *next;
73501+
73502+ prev = head->prev;
73503+ next = head;
73504+
73505+#ifdef CONFIG_DEBUG_LIST
73506+ if (!__list_add_debug(new, prev, next))
73507+ return;
73508+#endif
73509+
73510+ pax_open_kernel();
73511+ next->prev = new;
73512+ new->next = next;
73513+ new->prev = prev;
73514+ prev->next = new;
73515+ pax_close_kernel();
73516+}
73517+EXPORT_SYMBOL(pax_list_add_tail);
73518+
73519+void pax_list_del(struct list_head *entry)
73520+{
73521+#ifdef CONFIG_DEBUG_LIST
73522+ if (!__list_del_entry_debug(entry))
73523+ return;
73524+#endif
73525+
73526+ pax_open_kernel();
73527+ __list_del(entry->prev, entry->next);
73528+ entry->next = LIST_POISON1;
73529+ entry->prev = LIST_POISON2;
73530+ pax_close_kernel();
73531+}
73532+EXPORT_SYMBOL(pax_list_del);
73533diff --git a/lib/radix-tree.c b/lib/radix-tree.c
73534index e796429..6e38f9f 100644
73535--- a/lib/radix-tree.c
73536+++ b/lib/radix-tree.c
73537@@ -92,7 +92,7 @@ struct radix_tree_preload {
73538 int nr;
73539 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
73540 };
73541-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
73542+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
73543
73544 static inline void *ptr_to_indirect(void *ptr)
73545 {
73546diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
73547index bb2b201..46abaf9 100644
73548--- a/lib/strncpy_from_user.c
73549+++ b/lib/strncpy_from_user.c
73550@@ -21,7 +21,7 @@
73551 */
73552 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
73553 {
73554- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73555+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73556 long res = 0;
73557
73558 /*
73559diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
73560index a28df52..3d55877 100644
73561--- a/lib/strnlen_user.c
73562+++ b/lib/strnlen_user.c
73563@@ -26,7 +26,7 @@
73564 */
73565 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
73566 {
73567- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73568+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73569 long align, res = 0;
73570 unsigned long c;
73571
73572diff --git a/lib/vsprintf.c b/lib/vsprintf.c
73573index 39c99fe..18f060b 100644
73574--- a/lib/vsprintf.c
73575+++ b/lib/vsprintf.c
73576@@ -16,6 +16,9 @@
73577 * - scnprintf and vscnprintf
73578 */
73579
73580+#ifdef CONFIG_GRKERNSEC_HIDESYM
73581+#define __INCLUDED_BY_HIDESYM 1
73582+#endif
73583 #include <stdarg.h>
73584 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
73585 #include <linux/types.h>
73586@@ -533,7 +536,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
73587 char sym[KSYM_SYMBOL_LEN];
73588 if (ext == 'B')
73589 sprint_backtrace(sym, value);
73590- else if (ext != 'f' && ext != 's')
73591+ else if (ext != 'f' && ext != 's' && ext != 'a')
73592 sprint_symbol(sym, value);
73593 else
73594 sprint_symbol_no_offset(sym, value);
73595@@ -966,7 +969,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
73596 return number(buf, end, *(const netdev_features_t *)addr, spec);
73597 }
73598
73599+#ifdef CONFIG_GRKERNSEC_HIDESYM
73600+int kptr_restrict __read_mostly = 2;
73601+#else
73602 int kptr_restrict __read_mostly;
73603+#endif
73604
73605 /*
73606 * Show a '%p' thing. A kernel extension is that the '%p' is followed
73607@@ -980,6 +987,8 @@ int kptr_restrict __read_mostly;
73608 * - 'S' For symbolic direct pointers with offset
73609 * - 's' For symbolic direct pointers without offset
73610 * - 'B' For backtraced symbolic direct pointers with offset
73611+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
73612+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
73613 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
73614 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
73615 * - 'M' For a 6-byte MAC address, it prints the address in the
73616@@ -1035,12 +1044,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73617
73618 if (!ptr && *fmt != 'K') {
73619 /*
73620- * Print (null) with the same width as a pointer so it makes
73621+ * Print (nil) with the same width as a pointer so it makes
73622 * tabular output look nice.
73623 */
73624 if (spec.field_width == -1)
73625 spec.field_width = default_width;
73626- return string(buf, end, "(null)", spec);
73627+ return string(buf, end, "(nil)", spec);
73628 }
73629
73630 switch (*fmt) {
73631@@ -1050,6 +1059,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73632 /* Fallthrough */
73633 case 'S':
73634 case 's':
73635+#ifdef CONFIG_GRKERNSEC_HIDESYM
73636+ break;
73637+#else
73638+ return symbol_string(buf, end, ptr, spec, *fmt);
73639+#endif
73640+ case 'A':
73641+ case 'a':
73642 case 'B':
73643 return symbol_string(buf, end, ptr, spec, *fmt);
73644 case 'R':
73645@@ -1090,6 +1106,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73646 va_end(va);
73647 return buf;
73648 }
73649+ case 'P':
73650+ break;
73651 case 'K':
73652 /*
73653 * %pK cannot be used in IRQ context because its test
73654@@ -1113,6 +1131,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73655 }
73656 break;
73657 }
73658+
73659+#ifdef CONFIG_GRKERNSEC_HIDESYM
73660+ /* 'P' = approved pointers to copy to userland,
73661+ as in the /proc/kallsyms case, as we make it display nothing
73662+ for non-root users, and the real contents for root users
73663+ Also ignore 'K' pointers, since we force their NULLing for non-root users
73664+ above
73665+ */
73666+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
73667+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
73668+ dump_stack();
73669+ ptr = NULL;
73670+ }
73671+#endif
73672+
73673 spec.flags |= SMALL;
73674 if (spec.field_width == -1) {
73675 spec.field_width = default_width;
73676@@ -1831,11 +1864,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
73677 typeof(type) value; \
73678 if (sizeof(type) == 8) { \
73679 args = PTR_ALIGN(args, sizeof(u32)); \
73680- *(u32 *)&value = *(u32 *)args; \
73681- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
73682+ *(u32 *)&value = *(const u32 *)args; \
73683+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
73684 } else { \
73685 args = PTR_ALIGN(args, sizeof(type)); \
73686- value = *(typeof(type) *)args; \
73687+ value = *(const typeof(type) *)args; \
73688 } \
73689 args += sizeof(type); \
73690 value; \
73691@@ -1898,7 +1931,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
73692 case FORMAT_TYPE_STR: {
73693 const char *str_arg = args;
73694 args += strlen(str_arg) + 1;
73695- str = string(str, end, (char *)str_arg, spec);
73696+ str = string(str, end, str_arg, spec);
73697 break;
73698 }
73699
73700diff --git a/localversion-grsec b/localversion-grsec
73701new file mode 100644
73702index 0000000..7cd6065
73703--- /dev/null
73704+++ b/localversion-grsec
73705@@ -0,0 +1 @@
73706+-grsec
73707diff --git a/mm/Kconfig b/mm/Kconfig
73708index a3f8ddd..f31e92e 100644
73709--- a/mm/Kconfig
73710+++ b/mm/Kconfig
73711@@ -252,10 +252,10 @@ config KSM
73712 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
73713
73714 config DEFAULT_MMAP_MIN_ADDR
73715- int "Low address space to protect from user allocation"
73716+ int "Low address space to protect from user allocation"
73717 depends on MMU
73718- default 4096
73719- help
73720+ default 65536
73721+ help
73722 This is the portion of low virtual memory which should be protected
73723 from userspace allocation. Keeping a user from writing to low pages
73724 can help reduce the impact of kernel NULL pointer bugs.
73725@@ -286,7 +286,7 @@ config MEMORY_FAILURE
73726
73727 config HWPOISON_INJECT
73728 tristate "HWPoison pages injector"
73729- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
73730+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
73731 select PROC_PAGE_MONITOR
73732
73733 config NOMMU_INITIAL_TRIM_EXCESS
73734diff --git a/mm/filemap.c b/mm/filemap.c
73735index 83efee7..3f99381 100644
73736--- a/mm/filemap.c
73737+++ b/mm/filemap.c
73738@@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
73739 struct address_space *mapping = file->f_mapping;
73740
73741 if (!mapping->a_ops->readpage)
73742- return -ENOEXEC;
73743+ return -ENODEV;
73744 file_accessed(file);
73745 vma->vm_ops = &generic_file_vm_ops;
73746 return 0;
73747@@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
73748 *pos = i_size_read(inode);
73749
73750 if (limit != RLIM_INFINITY) {
73751+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
73752 if (*pos >= limit) {
73753 send_sig(SIGXFSZ, current, 0);
73754 return -EFBIG;
73755diff --git a/mm/fremap.c b/mm/fremap.c
73756index a0aaf0e..20325c3 100644
73757--- a/mm/fremap.c
73758+++ b/mm/fremap.c
73759@@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
73760 retry:
73761 vma = find_vma(mm, start);
73762
73763+#ifdef CONFIG_PAX_SEGMEXEC
73764+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
73765+ goto out;
73766+#endif
73767+
73768 /*
73769 * Make sure the vma is shared, that it supports prefaulting,
73770 * and that the remapped range is valid and fully within
73771diff --git a/mm/highmem.c b/mm/highmem.c
73772index 09fc744..3936897 100644
73773--- a/mm/highmem.c
73774+++ b/mm/highmem.c
73775@@ -138,9 +138,10 @@ static void flush_all_zero_pkmaps(void)
73776 * So no dangers, even with speculative execution.
73777 */
73778 page = pte_page(pkmap_page_table[i]);
73779+ pax_open_kernel();
73780 pte_clear(&init_mm, (unsigned long)page_address(page),
73781 &pkmap_page_table[i]);
73782-
73783+ pax_close_kernel();
73784 set_page_address(page, NULL);
73785 need_flush = 1;
73786 }
73787@@ -199,9 +200,11 @@ start:
73788 }
73789 }
73790 vaddr = PKMAP_ADDR(last_pkmap_nr);
73791+
73792+ pax_open_kernel();
73793 set_pte_at(&init_mm, vaddr,
73794 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
73795-
73796+ pax_close_kernel();
73797 pkmap_count[last_pkmap_nr] = 1;
73798 set_page_address(page, (void *)vaddr);
73799
73800diff --git a/mm/huge_memory.c b/mm/huge_memory.c
73801index 40f17c3..c1cc011 100644
73802--- a/mm/huge_memory.c
73803+++ b/mm/huge_memory.c
73804@@ -710,7 +710,7 @@ out:
73805 * run pte_offset_map on the pmd, if an huge pmd could
73806 * materialize from under us from a different thread.
73807 */
73808- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
73809+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
73810 return VM_FAULT_OOM;
73811 /* if an huge pmd materialized from under us just retry later */
73812 if (unlikely(pmd_trans_huge(*pmd)))
73813diff --git a/mm/hugetlb.c b/mm/hugetlb.c
73814index f198aca..a19a5a5 100644
73815--- a/mm/hugetlb.c
73816+++ b/mm/hugetlb.c
73817@@ -2509,6 +2509,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
73818 return 1;
73819 }
73820
73821+#ifdef CONFIG_PAX_SEGMEXEC
73822+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
73823+{
73824+ struct mm_struct *mm = vma->vm_mm;
73825+ struct vm_area_struct *vma_m;
73826+ unsigned long address_m;
73827+ pte_t *ptep_m;
73828+
73829+ vma_m = pax_find_mirror_vma(vma);
73830+ if (!vma_m)
73831+ return;
73832+
73833+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
73834+ address_m = address + SEGMEXEC_TASK_SIZE;
73835+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
73836+ get_page(page_m);
73837+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
73838+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
73839+}
73840+#endif
73841+
73842 /*
73843 * Hugetlb_cow() should be called with page lock of the original hugepage held.
73844 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
73845@@ -2627,6 +2648,11 @@ retry_avoidcopy:
73846 make_huge_pte(vma, new_page, 1));
73847 page_remove_rmap(old_page);
73848 hugepage_add_new_anon_rmap(new_page, vma, address);
73849+
73850+#ifdef CONFIG_PAX_SEGMEXEC
73851+ pax_mirror_huge_pte(vma, address, new_page);
73852+#endif
73853+
73854 /* Make the old page be freed below */
73855 new_page = old_page;
73856 }
73857@@ -2786,6 +2812,10 @@ retry:
73858 && (vma->vm_flags & VM_SHARED)));
73859 set_huge_pte_at(mm, address, ptep, new_pte);
73860
73861+#ifdef CONFIG_PAX_SEGMEXEC
73862+ pax_mirror_huge_pte(vma, address, page);
73863+#endif
73864+
73865 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
73866 /* Optimization, do the COW without a second fault */
73867 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
73868@@ -2815,6 +2845,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
73869 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
73870 struct hstate *h = hstate_vma(vma);
73871
73872+#ifdef CONFIG_PAX_SEGMEXEC
73873+ struct vm_area_struct *vma_m;
73874+#endif
73875+
73876 address &= huge_page_mask(h);
73877
73878 ptep = huge_pte_offset(mm, address);
73879@@ -2828,6 +2862,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
73880 VM_FAULT_SET_HINDEX(hstate_index(h));
73881 }
73882
73883+#ifdef CONFIG_PAX_SEGMEXEC
73884+ vma_m = pax_find_mirror_vma(vma);
73885+ if (vma_m) {
73886+ unsigned long address_m;
73887+
73888+ if (vma->vm_start > vma_m->vm_start) {
73889+ address_m = address;
73890+ address -= SEGMEXEC_TASK_SIZE;
73891+ vma = vma_m;
73892+ h = hstate_vma(vma);
73893+ } else
73894+ address_m = address + SEGMEXEC_TASK_SIZE;
73895+
73896+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
73897+ return VM_FAULT_OOM;
73898+ address_m &= HPAGE_MASK;
73899+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
73900+ }
73901+#endif
73902+
73903 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
73904 if (!ptep)
73905 return VM_FAULT_OOM;
73906diff --git a/mm/internal.h b/mm/internal.h
73907index 3c5197d..08d0065 100644
73908--- a/mm/internal.h
73909+++ b/mm/internal.h
73910@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
73911 * in mm/page_alloc.c
73912 */
73913 extern void __free_pages_bootmem(struct page *page, unsigned int order);
73914+extern void free_compound_page(struct page *page);
73915 extern void prep_compound_page(struct page *page, unsigned long order);
73916 #ifdef CONFIG_MEMORY_FAILURE
73917 extern bool is_free_buddy_page(struct page *page);
73918diff --git a/mm/kmemleak.c b/mm/kmemleak.c
73919index a217cc5..74c9ec0 100644
73920--- a/mm/kmemleak.c
73921+++ b/mm/kmemleak.c
73922@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
73923
73924 for (i = 0; i < object->trace_len; i++) {
73925 void *ptr = (void *)object->trace[i];
73926- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
73927+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
73928 }
73929 }
73930
73931diff --git a/mm/maccess.c b/mm/maccess.c
73932index d53adf9..03a24bf 100644
73933--- a/mm/maccess.c
73934+++ b/mm/maccess.c
73935@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
73936 set_fs(KERNEL_DS);
73937 pagefault_disable();
73938 ret = __copy_from_user_inatomic(dst,
73939- (__force const void __user *)src, size);
73940+ (const void __force_user *)src, size);
73941 pagefault_enable();
73942 set_fs(old_fs);
73943
73944@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
73945
73946 set_fs(KERNEL_DS);
73947 pagefault_disable();
73948- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
73949+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
73950 pagefault_enable();
73951 set_fs(old_fs);
73952
73953diff --git a/mm/madvise.c b/mm/madvise.c
73954index 03dfa5c..b032917 100644
73955--- a/mm/madvise.c
73956+++ b/mm/madvise.c
73957@@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
73958 pgoff_t pgoff;
73959 unsigned long new_flags = vma->vm_flags;
73960
73961+#ifdef CONFIG_PAX_SEGMEXEC
73962+ struct vm_area_struct *vma_m;
73963+#endif
73964+
73965 switch (behavior) {
73966 case MADV_NORMAL:
73967 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
73968@@ -123,6 +127,13 @@ success:
73969 /*
73970 * vm_flags is protected by the mmap_sem held in write mode.
73971 */
73972+
73973+#ifdef CONFIG_PAX_SEGMEXEC
73974+ vma_m = pax_find_mirror_vma(vma);
73975+ if (vma_m)
73976+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
73977+#endif
73978+
73979 vma->vm_flags = new_flags;
73980
73981 out:
73982@@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
73983 struct vm_area_struct ** prev,
73984 unsigned long start, unsigned long end)
73985 {
73986+
73987+#ifdef CONFIG_PAX_SEGMEXEC
73988+ struct vm_area_struct *vma_m;
73989+#endif
73990+
73991 *prev = vma;
73992 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
73993 return -EINVAL;
73994@@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
73995 zap_page_range(vma, start, end - start, &details);
73996 } else
73997 zap_page_range(vma, start, end - start, NULL);
73998+
73999+#ifdef CONFIG_PAX_SEGMEXEC
74000+ vma_m = pax_find_mirror_vma(vma);
74001+ if (vma_m) {
74002+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
74003+ struct zap_details details = {
74004+ .nonlinear_vma = vma_m,
74005+ .last_index = ULONG_MAX,
74006+ };
74007+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
74008+ } else
74009+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
74010+ }
74011+#endif
74012+
74013 return 0;
74014 }
74015
74016@@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
74017 if (end < start)
74018 goto out;
74019
74020+#ifdef CONFIG_PAX_SEGMEXEC
74021+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
74022+ if (end > SEGMEXEC_TASK_SIZE)
74023+ goto out;
74024+ } else
74025+#endif
74026+
74027+ if (end > TASK_SIZE)
74028+ goto out;
74029+
74030 error = 0;
74031 if (end == start)
74032 goto out;
74033diff --git a/mm/memory-failure.c b/mm/memory-failure.c
74034index 8b20278..05dac18 100644
74035--- a/mm/memory-failure.c
74036+++ b/mm/memory-failure.c
74037@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
74038
74039 int sysctl_memory_failure_recovery __read_mostly = 1;
74040
74041-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
74042+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
74043
74044 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
74045
74046@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
74047 pfn, t->comm, t->pid);
74048 si.si_signo = SIGBUS;
74049 si.si_errno = 0;
74050- si.si_addr = (void *)addr;
74051+ si.si_addr = (void __user *)addr;
74052 #ifdef __ARCH_SI_TRAPNO
74053 si.si_trapno = trapno;
74054 #endif
74055@@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
74056 }
74057
74058 nr_pages = 1 << compound_trans_order(hpage);
74059- atomic_long_add(nr_pages, &mce_bad_pages);
74060+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
74061
74062 /*
74063 * We need/can do nothing about count=0 pages.
74064@@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
74065 if (!PageHWPoison(hpage)
74066 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
74067 || (p != hpage && TestSetPageHWPoison(hpage))) {
74068- atomic_long_sub(nr_pages, &mce_bad_pages);
74069+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74070 return 0;
74071 }
74072 set_page_hwpoison_huge_page(hpage);
74073@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
74074 }
74075 if (hwpoison_filter(p)) {
74076 if (TestClearPageHWPoison(p))
74077- atomic_long_sub(nr_pages, &mce_bad_pages);
74078+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74079 unlock_page(hpage);
74080 put_page(hpage);
74081 return 0;
74082@@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
74083 return 0;
74084 }
74085 if (TestClearPageHWPoison(p))
74086- atomic_long_sub(nr_pages, &mce_bad_pages);
74087+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74088 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
74089 return 0;
74090 }
74091@@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
74092 */
74093 if (TestClearPageHWPoison(page)) {
74094 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
74095- atomic_long_sub(nr_pages, &mce_bad_pages);
74096+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74097 freeit = 1;
74098 if (PageHuge(page))
74099 clear_page_hwpoison_huge_page(page);
74100@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
74101 }
74102 done:
74103 if (!PageHWPoison(hpage))
74104- atomic_long_add(1 << compound_trans_order(hpage),
74105+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
74106 &mce_bad_pages);
74107 set_page_hwpoison_huge_page(hpage);
74108 dequeue_hwpoisoned_huge_page(hpage);
74109@@ -1582,7 +1582,7 @@ int soft_offline_page(struct page *page, int flags)
74110 return ret;
74111
74112 done:
74113- atomic_long_add(1, &mce_bad_pages);
74114+ atomic_long_add_unchecked(1, &mce_bad_pages);
74115 SetPageHWPoison(page);
74116 /* keep elevated page count for bad page */
74117 return ret;
74118diff --git a/mm/memory.c b/mm/memory.c
74119index f2973b2..fd020a7 100644
74120--- a/mm/memory.c
74121+++ b/mm/memory.c
74122@@ -431,6 +431,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
74123 free_pte_range(tlb, pmd, addr);
74124 } while (pmd++, addr = next, addr != end);
74125
74126+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
74127 start &= PUD_MASK;
74128 if (start < floor)
74129 return;
74130@@ -445,6 +446,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
74131 pmd = pmd_offset(pud, start);
74132 pud_clear(pud);
74133 pmd_free_tlb(tlb, pmd, start);
74134+#endif
74135+
74136 }
74137
74138 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74139@@ -464,6 +467,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74140 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
74141 } while (pud++, addr = next, addr != end);
74142
74143+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
74144 start &= PGDIR_MASK;
74145 if (start < floor)
74146 return;
74147@@ -478,6 +482,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74148 pud = pud_offset(pgd, start);
74149 pgd_clear(pgd);
74150 pud_free_tlb(tlb, pud, start);
74151+#endif
74152+
74153 }
74154
74155 /*
74156@@ -1626,12 +1632,6 @@ no_page_table:
74157 return page;
74158 }
74159
74160-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
74161-{
74162- return stack_guard_page_start(vma, addr) ||
74163- stack_guard_page_end(vma, addr+PAGE_SIZE);
74164-}
74165-
74166 /**
74167 * __get_user_pages() - pin user pages in memory
74168 * @tsk: task_struct of target task
74169@@ -1704,10 +1704,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74170 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
74171 i = 0;
74172
74173- do {
74174+ while (nr_pages) {
74175 struct vm_area_struct *vma;
74176
74177- vma = find_extend_vma(mm, start);
74178+ vma = find_vma(mm, start);
74179 if (!vma && in_gate_area(mm, start)) {
74180 unsigned long pg = start & PAGE_MASK;
74181 pgd_t *pgd;
74182@@ -1755,7 +1755,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74183 goto next_page;
74184 }
74185
74186- if (!vma ||
74187+ if (!vma || start < vma->vm_start ||
74188 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
74189 !(vm_flags & vma->vm_flags))
74190 return i ? : -EFAULT;
74191@@ -1782,11 +1782,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74192 int ret;
74193 unsigned int fault_flags = 0;
74194
74195- /* For mlock, just skip the stack guard page. */
74196- if (foll_flags & FOLL_MLOCK) {
74197- if (stack_guard_page(vma, start))
74198- goto next_page;
74199- }
74200 if (foll_flags & FOLL_WRITE)
74201 fault_flags |= FAULT_FLAG_WRITE;
74202 if (nonblocking)
74203@@ -1860,7 +1855,7 @@ next_page:
74204 start += PAGE_SIZE;
74205 nr_pages--;
74206 } while (nr_pages && start < vma->vm_end);
74207- } while (nr_pages);
74208+ }
74209 return i;
74210 }
74211 EXPORT_SYMBOL(__get_user_pages);
74212@@ -2067,6 +2062,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
74213 page_add_file_rmap(page);
74214 set_pte_at(mm, addr, pte, mk_pte(page, prot));
74215
74216+#ifdef CONFIG_PAX_SEGMEXEC
74217+ pax_mirror_file_pte(vma, addr, page, ptl);
74218+#endif
74219+
74220 retval = 0;
74221 pte_unmap_unlock(pte, ptl);
74222 return retval;
74223@@ -2111,9 +2110,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
74224 if (!page_count(page))
74225 return -EINVAL;
74226 if (!(vma->vm_flags & VM_MIXEDMAP)) {
74227+
74228+#ifdef CONFIG_PAX_SEGMEXEC
74229+ struct vm_area_struct *vma_m;
74230+#endif
74231+
74232 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
74233 BUG_ON(vma->vm_flags & VM_PFNMAP);
74234 vma->vm_flags |= VM_MIXEDMAP;
74235+
74236+#ifdef CONFIG_PAX_SEGMEXEC
74237+ vma_m = pax_find_mirror_vma(vma);
74238+ if (vma_m)
74239+ vma_m->vm_flags |= VM_MIXEDMAP;
74240+#endif
74241+
74242 }
74243 return insert_page(vma, addr, page, vma->vm_page_prot);
74244 }
74245@@ -2196,6 +2207,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
74246 unsigned long pfn)
74247 {
74248 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
74249+ BUG_ON(vma->vm_mirror);
74250
74251 if (addr < vma->vm_start || addr >= vma->vm_end)
74252 return -EFAULT;
74253@@ -2396,7 +2408,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
74254
74255 BUG_ON(pud_huge(*pud));
74256
74257- pmd = pmd_alloc(mm, pud, addr);
74258+ pmd = (mm == &init_mm) ?
74259+ pmd_alloc_kernel(mm, pud, addr) :
74260+ pmd_alloc(mm, pud, addr);
74261 if (!pmd)
74262 return -ENOMEM;
74263 do {
74264@@ -2416,7 +2430,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
74265 unsigned long next;
74266 int err;
74267
74268- pud = pud_alloc(mm, pgd, addr);
74269+ pud = (mm == &init_mm) ?
74270+ pud_alloc_kernel(mm, pgd, addr) :
74271+ pud_alloc(mm, pgd, addr);
74272 if (!pud)
74273 return -ENOMEM;
74274 do {
74275@@ -2504,6 +2520,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
74276 copy_user_highpage(dst, src, va, vma);
74277 }
74278
74279+#ifdef CONFIG_PAX_SEGMEXEC
74280+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
74281+{
74282+ struct mm_struct *mm = vma->vm_mm;
74283+ spinlock_t *ptl;
74284+ pte_t *pte, entry;
74285+
74286+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
74287+ entry = *pte;
74288+ if (!pte_present(entry)) {
74289+ if (!pte_none(entry)) {
74290+ BUG_ON(pte_file(entry));
74291+ free_swap_and_cache(pte_to_swp_entry(entry));
74292+ pte_clear_not_present_full(mm, address, pte, 0);
74293+ }
74294+ } else {
74295+ struct page *page;
74296+
74297+ flush_cache_page(vma, address, pte_pfn(entry));
74298+ entry = ptep_clear_flush(vma, address, pte);
74299+ BUG_ON(pte_dirty(entry));
74300+ page = vm_normal_page(vma, address, entry);
74301+ if (page) {
74302+ update_hiwater_rss(mm);
74303+ if (PageAnon(page))
74304+ dec_mm_counter_fast(mm, MM_ANONPAGES);
74305+ else
74306+ dec_mm_counter_fast(mm, MM_FILEPAGES);
74307+ page_remove_rmap(page);
74308+ page_cache_release(page);
74309+ }
74310+ }
74311+ pte_unmap_unlock(pte, ptl);
74312+}
74313+
74314+/* PaX: if vma is mirrored, synchronize the mirror's PTE
74315+ *
74316+ * the ptl of the lower mapped page is held on entry and is not released on exit
74317+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
74318+ */
74319+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
74320+{
74321+ struct mm_struct *mm = vma->vm_mm;
74322+ unsigned long address_m;
74323+ spinlock_t *ptl_m;
74324+ struct vm_area_struct *vma_m;
74325+ pmd_t *pmd_m;
74326+ pte_t *pte_m, entry_m;
74327+
74328+ BUG_ON(!page_m || !PageAnon(page_m));
74329+
74330+ vma_m = pax_find_mirror_vma(vma);
74331+ if (!vma_m)
74332+ return;
74333+
74334+ BUG_ON(!PageLocked(page_m));
74335+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74336+ address_m = address + SEGMEXEC_TASK_SIZE;
74337+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74338+ pte_m = pte_offset_map(pmd_m, address_m);
74339+ ptl_m = pte_lockptr(mm, pmd_m);
74340+ if (ptl != ptl_m) {
74341+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74342+ if (!pte_none(*pte_m))
74343+ goto out;
74344+ }
74345+
74346+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
74347+ page_cache_get(page_m);
74348+ page_add_anon_rmap(page_m, vma_m, address_m);
74349+ inc_mm_counter_fast(mm, MM_ANONPAGES);
74350+ set_pte_at(mm, address_m, pte_m, entry_m);
74351+ update_mmu_cache(vma_m, address_m, entry_m);
74352+out:
74353+ if (ptl != ptl_m)
74354+ spin_unlock(ptl_m);
74355+ pte_unmap(pte_m);
74356+ unlock_page(page_m);
74357+}
74358+
74359+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
74360+{
74361+ struct mm_struct *mm = vma->vm_mm;
74362+ unsigned long address_m;
74363+ spinlock_t *ptl_m;
74364+ struct vm_area_struct *vma_m;
74365+ pmd_t *pmd_m;
74366+ pte_t *pte_m, entry_m;
74367+
74368+ BUG_ON(!page_m || PageAnon(page_m));
74369+
74370+ vma_m = pax_find_mirror_vma(vma);
74371+ if (!vma_m)
74372+ return;
74373+
74374+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74375+ address_m = address + SEGMEXEC_TASK_SIZE;
74376+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74377+ pte_m = pte_offset_map(pmd_m, address_m);
74378+ ptl_m = pte_lockptr(mm, pmd_m);
74379+ if (ptl != ptl_m) {
74380+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74381+ if (!pte_none(*pte_m))
74382+ goto out;
74383+ }
74384+
74385+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
74386+ page_cache_get(page_m);
74387+ page_add_file_rmap(page_m);
74388+ inc_mm_counter_fast(mm, MM_FILEPAGES);
74389+ set_pte_at(mm, address_m, pte_m, entry_m);
74390+ update_mmu_cache(vma_m, address_m, entry_m);
74391+out:
74392+ if (ptl != ptl_m)
74393+ spin_unlock(ptl_m);
74394+ pte_unmap(pte_m);
74395+}
74396+
74397+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
74398+{
74399+ struct mm_struct *mm = vma->vm_mm;
74400+ unsigned long address_m;
74401+ spinlock_t *ptl_m;
74402+ struct vm_area_struct *vma_m;
74403+ pmd_t *pmd_m;
74404+ pte_t *pte_m, entry_m;
74405+
74406+ vma_m = pax_find_mirror_vma(vma);
74407+ if (!vma_m)
74408+ return;
74409+
74410+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74411+ address_m = address + SEGMEXEC_TASK_SIZE;
74412+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74413+ pte_m = pte_offset_map(pmd_m, address_m);
74414+ ptl_m = pte_lockptr(mm, pmd_m);
74415+ if (ptl != ptl_m) {
74416+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74417+ if (!pte_none(*pte_m))
74418+ goto out;
74419+ }
74420+
74421+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
74422+ set_pte_at(mm, address_m, pte_m, entry_m);
74423+out:
74424+ if (ptl != ptl_m)
74425+ spin_unlock(ptl_m);
74426+ pte_unmap(pte_m);
74427+}
74428+
74429+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
74430+{
74431+ struct page *page_m;
74432+ pte_t entry;
74433+
74434+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
74435+ goto out;
74436+
74437+ entry = *pte;
74438+ page_m = vm_normal_page(vma, address, entry);
74439+ if (!page_m)
74440+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
74441+ else if (PageAnon(page_m)) {
74442+ if (pax_find_mirror_vma(vma)) {
74443+ pte_unmap_unlock(pte, ptl);
74444+ lock_page(page_m);
74445+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
74446+ if (pte_same(entry, *pte))
74447+ pax_mirror_anon_pte(vma, address, page_m, ptl);
74448+ else
74449+ unlock_page(page_m);
74450+ }
74451+ } else
74452+ pax_mirror_file_pte(vma, address, page_m, ptl);
74453+
74454+out:
74455+ pte_unmap_unlock(pte, ptl);
74456+}
74457+#endif
74458+
74459 /*
74460 * This routine handles present pages, when users try to write
74461 * to a shared page. It is done by copying the page to a new address
74462@@ -2720,6 +2916,12 @@ gotten:
74463 */
74464 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
74465 if (likely(pte_same(*page_table, orig_pte))) {
74466+
74467+#ifdef CONFIG_PAX_SEGMEXEC
74468+ if (pax_find_mirror_vma(vma))
74469+ BUG_ON(!trylock_page(new_page));
74470+#endif
74471+
74472 if (old_page) {
74473 if (!PageAnon(old_page)) {
74474 dec_mm_counter_fast(mm, MM_FILEPAGES);
74475@@ -2771,6 +2973,10 @@ gotten:
74476 page_remove_rmap(old_page);
74477 }
74478
74479+#ifdef CONFIG_PAX_SEGMEXEC
74480+ pax_mirror_anon_pte(vma, address, new_page, ptl);
74481+#endif
74482+
74483 /* Free the old page.. */
74484 new_page = old_page;
74485 ret |= VM_FAULT_WRITE;
74486@@ -3051,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
74487 swap_free(entry);
74488 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
74489 try_to_free_swap(page);
74490+
74491+#ifdef CONFIG_PAX_SEGMEXEC
74492+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
74493+#endif
74494+
74495 unlock_page(page);
74496 if (swapcache) {
74497 /*
74498@@ -3074,6 +3285,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
74499
74500 /* No need to invalidate - it was non-present before */
74501 update_mmu_cache(vma, address, page_table);
74502+
74503+#ifdef CONFIG_PAX_SEGMEXEC
74504+ pax_mirror_anon_pte(vma, address, page, ptl);
74505+#endif
74506+
74507 unlock:
74508 pte_unmap_unlock(page_table, ptl);
74509 out:
74510@@ -3093,40 +3309,6 @@ out_release:
74511 }
74512
74513 /*
74514- * This is like a special single-page "expand_{down|up}wards()",
74515- * except we must first make sure that 'address{-|+}PAGE_SIZE'
74516- * doesn't hit another vma.
74517- */
74518-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
74519-{
74520- address &= PAGE_MASK;
74521- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
74522- struct vm_area_struct *prev = vma->vm_prev;
74523-
74524- /*
74525- * Is there a mapping abutting this one below?
74526- *
74527- * That's only ok if it's the same stack mapping
74528- * that has gotten split..
74529- */
74530- if (prev && prev->vm_end == address)
74531- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
74532-
74533- expand_downwards(vma, address - PAGE_SIZE);
74534- }
74535- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
74536- struct vm_area_struct *next = vma->vm_next;
74537-
74538- /* As VM_GROWSDOWN but s/below/above/ */
74539- if (next && next->vm_start == address + PAGE_SIZE)
74540- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
74541-
74542- expand_upwards(vma, address + PAGE_SIZE);
74543- }
74544- return 0;
74545-}
74546-
74547-/*
74548 * We enter with non-exclusive mmap_sem (to exclude vma changes,
74549 * but allow concurrent faults), and pte mapped but not yet locked.
74550 * We return with mmap_sem still held, but pte unmapped and unlocked.
74551@@ -3135,27 +3317,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
74552 unsigned long address, pte_t *page_table, pmd_t *pmd,
74553 unsigned int flags)
74554 {
74555- struct page *page;
74556+ struct page *page = NULL;
74557 spinlock_t *ptl;
74558 pte_t entry;
74559
74560- pte_unmap(page_table);
74561-
74562- /* Check if we need to add a guard page to the stack */
74563- if (check_stack_guard_page(vma, address) < 0)
74564- return VM_FAULT_SIGBUS;
74565-
74566- /* Use the zero-page for reads */
74567 if (!(flags & FAULT_FLAG_WRITE)) {
74568 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
74569 vma->vm_page_prot));
74570- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
74571+ ptl = pte_lockptr(mm, pmd);
74572+ spin_lock(ptl);
74573 if (!pte_none(*page_table))
74574 goto unlock;
74575 goto setpte;
74576 }
74577
74578 /* Allocate our own private page. */
74579+ pte_unmap(page_table);
74580+
74581 if (unlikely(anon_vma_prepare(vma)))
74582 goto oom;
74583 page = alloc_zeroed_user_highpage_movable(vma, address);
74584@@ -3174,6 +3352,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
74585 if (!pte_none(*page_table))
74586 goto release;
74587
74588+#ifdef CONFIG_PAX_SEGMEXEC
74589+ if (pax_find_mirror_vma(vma))
74590+ BUG_ON(!trylock_page(page));
74591+#endif
74592+
74593 inc_mm_counter_fast(mm, MM_ANONPAGES);
74594 page_add_new_anon_rmap(page, vma, address);
74595 setpte:
74596@@ -3181,6 +3364,12 @@ setpte:
74597
74598 /* No need to invalidate - it was non-present before */
74599 update_mmu_cache(vma, address, page_table);
74600+
74601+#ifdef CONFIG_PAX_SEGMEXEC
74602+ if (page)
74603+ pax_mirror_anon_pte(vma, address, page, ptl);
74604+#endif
74605+
74606 unlock:
74607 pte_unmap_unlock(page_table, ptl);
74608 return 0;
74609@@ -3324,6 +3513,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74610 */
74611 /* Only go through if we didn't race with anybody else... */
74612 if (likely(pte_same(*page_table, orig_pte))) {
74613+
74614+#ifdef CONFIG_PAX_SEGMEXEC
74615+ if (anon && pax_find_mirror_vma(vma))
74616+ BUG_ON(!trylock_page(page));
74617+#endif
74618+
74619 flush_icache_page(vma, page);
74620 entry = mk_pte(page, vma->vm_page_prot);
74621 if (flags & FAULT_FLAG_WRITE)
74622@@ -3343,6 +3538,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74623
74624 /* no need to invalidate: a not-present page won't be cached */
74625 update_mmu_cache(vma, address, page_table);
74626+
74627+#ifdef CONFIG_PAX_SEGMEXEC
74628+ if (anon)
74629+ pax_mirror_anon_pte(vma, address, page, ptl);
74630+ else
74631+ pax_mirror_file_pte(vma, address, page, ptl);
74632+#endif
74633+
74634 } else {
74635 if (cow_page)
74636 mem_cgroup_uncharge_page(cow_page);
74637@@ -3497,6 +3700,12 @@ int handle_pte_fault(struct mm_struct *mm,
74638 if (flags & FAULT_FLAG_WRITE)
74639 flush_tlb_fix_spurious_fault(vma, address);
74640 }
74641+
74642+#ifdef CONFIG_PAX_SEGMEXEC
74643+ pax_mirror_pte(vma, address, pte, pmd, ptl);
74644+ return 0;
74645+#endif
74646+
74647 unlock:
74648 pte_unmap_unlock(pte, ptl);
74649 return 0;
74650@@ -3513,6 +3722,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74651 pmd_t *pmd;
74652 pte_t *pte;
74653
74654+#ifdef CONFIG_PAX_SEGMEXEC
74655+ struct vm_area_struct *vma_m;
74656+#endif
74657+
74658 __set_current_state(TASK_RUNNING);
74659
74660 count_vm_event(PGFAULT);
74661@@ -3524,6 +3737,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74662 if (unlikely(is_vm_hugetlb_page(vma)))
74663 return hugetlb_fault(mm, vma, address, flags);
74664
74665+#ifdef CONFIG_PAX_SEGMEXEC
74666+ vma_m = pax_find_mirror_vma(vma);
74667+ if (vma_m) {
74668+ unsigned long address_m;
74669+ pgd_t *pgd_m;
74670+ pud_t *pud_m;
74671+ pmd_t *pmd_m;
74672+
74673+ if (vma->vm_start > vma_m->vm_start) {
74674+ address_m = address;
74675+ address -= SEGMEXEC_TASK_SIZE;
74676+ vma = vma_m;
74677+ } else
74678+ address_m = address + SEGMEXEC_TASK_SIZE;
74679+
74680+ pgd_m = pgd_offset(mm, address_m);
74681+ pud_m = pud_alloc(mm, pgd_m, address_m);
74682+ if (!pud_m)
74683+ return VM_FAULT_OOM;
74684+ pmd_m = pmd_alloc(mm, pud_m, address_m);
74685+ if (!pmd_m)
74686+ return VM_FAULT_OOM;
74687+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
74688+ return VM_FAULT_OOM;
74689+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
74690+ }
74691+#endif
74692+
74693 retry:
74694 pgd = pgd_offset(mm, address);
74695 pud = pud_alloc(mm, pgd, address);
74696@@ -3565,7 +3806,7 @@ retry:
74697 * run pte_offset_map on the pmd, if an huge pmd could
74698 * materialize from under us from a different thread.
74699 */
74700- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
74701+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
74702 return VM_FAULT_OOM;
74703 /* if an huge pmd materialized from under us just retry later */
74704 if (unlikely(pmd_trans_huge(*pmd)))
74705@@ -3602,6 +3843,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
74706 spin_unlock(&mm->page_table_lock);
74707 return 0;
74708 }
74709+
74710+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
74711+{
74712+ pud_t *new = pud_alloc_one(mm, address);
74713+ if (!new)
74714+ return -ENOMEM;
74715+
74716+ smp_wmb(); /* See comment in __pte_alloc */
74717+
74718+ spin_lock(&mm->page_table_lock);
74719+ if (pgd_present(*pgd)) /* Another has populated it */
74720+ pud_free(mm, new);
74721+ else
74722+ pgd_populate_kernel(mm, pgd, new);
74723+ spin_unlock(&mm->page_table_lock);
74724+ return 0;
74725+}
74726 #endif /* __PAGETABLE_PUD_FOLDED */
74727
74728 #ifndef __PAGETABLE_PMD_FOLDED
74729@@ -3632,6 +3890,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
74730 spin_unlock(&mm->page_table_lock);
74731 return 0;
74732 }
74733+
74734+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
74735+{
74736+ pmd_t *new = pmd_alloc_one(mm, address);
74737+ if (!new)
74738+ return -ENOMEM;
74739+
74740+ smp_wmb(); /* See comment in __pte_alloc */
74741+
74742+ spin_lock(&mm->page_table_lock);
74743+#ifndef __ARCH_HAS_4LEVEL_HACK
74744+ if (pud_present(*pud)) /* Another has populated it */
74745+ pmd_free(mm, new);
74746+ else
74747+ pud_populate_kernel(mm, pud, new);
74748+#else
74749+ if (pgd_present(*pud)) /* Another has populated it */
74750+ pmd_free(mm, new);
74751+ else
74752+ pgd_populate_kernel(mm, pud, new);
74753+#endif /* __ARCH_HAS_4LEVEL_HACK */
74754+ spin_unlock(&mm->page_table_lock);
74755+ return 0;
74756+}
74757 #endif /* __PAGETABLE_PMD_FOLDED */
74758
74759 int make_pages_present(unsigned long addr, unsigned long end)
74760@@ -3669,7 +3951,7 @@ static int __init gate_vma_init(void)
74761 gate_vma.vm_start = FIXADDR_USER_START;
74762 gate_vma.vm_end = FIXADDR_USER_END;
74763 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
74764- gate_vma.vm_page_prot = __P101;
74765+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
74766
74767 return 0;
74768 }
74769diff --git a/mm/mempolicy.c b/mm/mempolicy.c
74770index 002c281..9429765 100644
74771--- a/mm/mempolicy.c
74772+++ b/mm/mempolicy.c
74773@@ -655,6 +655,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
74774 unsigned long vmstart;
74775 unsigned long vmend;
74776
74777+#ifdef CONFIG_PAX_SEGMEXEC
74778+ struct vm_area_struct *vma_m;
74779+#endif
74780+
74781 vma = find_vma(mm, start);
74782 if (!vma || vma->vm_start > start)
74783 return -EFAULT;
74784@@ -691,9 +695,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
74785 if (err)
74786 goto out;
74787 }
74788+
74789 err = vma_replace_policy(vma, new_pol);
74790 if (err)
74791 goto out;
74792+
74793+#ifdef CONFIG_PAX_SEGMEXEC
74794+ vma_m = pax_find_mirror_vma(vma);
74795+ if (vma_m) {
74796+ err = vma_replace_policy(vma_m, new_pol);
74797+ if (err)
74798+ goto out;
74799+ }
74800+#endif
74801+
74802 }
74803
74804 out:
74805@@ -1150,6 +1165,17 @@ static long do_mbind(unsigned long start, unsigned long len,
74806
74807 if (end < start)
74808 return -EINVAL;
74809+
74810+#ifdef CONFIG_PAX_SEGMEXEC
74811+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
74812+ if (end > SEGMEXEC_TASK_SIZE)
74813+ return -EINVAL;
74814+ } else
74815+#endif
74816+
74817+ if (end > TASK_SIZE)
74818+ return -EINVAL;
74819+
74820 if (end == start)
74821 return 0;
74822
74823@@ -1373,8 +1399,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
74824 */
74825 tcred = __task_cred(task);
74826 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
74827- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
74828- !capable(CAP_SYS_NICE)) {
74829+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
74830 rcu_read_unlock();
74831 err = -EPERM;
74832 goto out_put;
74833@@ -1405,6 +1430,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
74834 goto out;
74835 }
74836
74837+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74838+ if (mm != current->mm &&
74839+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
74840+ mmput(mm);
74841+ err = -EPERM;
74842+ goto out;
74843+ }
74844+#endif
74845+
74846 err = do_migrate_pages(mm, old, new,
74847 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
74848
74849diff --git a/mm/migrate.c b/mm/migrate.c
74850index 77ed2d7..317d528 100644
74851--- a/mm/migrate.c
74852+++ b/mm/migrate.c
74853@@ -1350,8 +1350,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
74854 */
74855 tcred = __task_cred(task);
74856 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
74857- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
74858- !capable(CAP_SYS_NICE)) {
74859+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
74860 rcu_read_unlock();
74861 err = -EPERM;
74862 goto out;
74863diff --git a/mm/mlock.c b/mm/mlock.c
74864index f0b9ce5..da8d069 100644
74865--- a/mm/mlock.c
74866+++ b/mm/mlock.c
74867@@ -13,6 +13,7 @@
74868 #include <linux/pagemap.h>
74869 #include <linux/mempolicy.h>
74870 #include <linux/syscalls.h>
74871+#include <linux/security.h>
74872 #include <linux/sched.h>
74873 #include <linux/export.h>
74874 #include <linux/rmap.h>
74875@@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
74876 {
74877 unsigned long nstart, end, tmp;
74878 struct vm_area_struct * vma, * prev;
74879- int error;
74880+ int error = 0;
74881
74882 VM_BUG_ON(start & ~PAGE_MASK);
74883 VM_BUG_ON(len != PAGE_ALIGN(len));
74884@@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
74885 return -EINVAL;
74886 if (end == start)
74887 return 0;
74888+ if (end > TASK_SIZE)
74889+ return -EINVAL;
74890+
74891 vma = find_vma(current->mm, start);
74892 if (!vma || vma->vm_start > start)
74893 return -ENOMEM;
74894@@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
74895 for (nstart = start ; ; ) {
74896 vm_flags_t newflags;
74897
74898+#ifdef CONFIG_PAX_SEGMEXEC
74899+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
74900+ break;
74901+#endif
74902+
74903 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
74904
74905 newflags = vma->vm_flags | VM_LOCKED;
74906@@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
74907 lock_limit >>= PAGE_SHIFT;
74908
74909 /* check against resource limits */
74910+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
74911 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
74912 error = do_mlock(start, len, 1);
74913 up_write(&current->mm->mmap_sem);
74914@@ -517,17 +527,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
74915 static int do_mlockall(int flags)
74916 {
74917 struct vm_area_struct * vma, * prev = NULL;
74918- unsigned int def_flags = 0;
74919
74920 if (flags & MCL_FUTURE)
74921- def_flags = VM_LOCKED;
74922- current->mm->def_flags = def_flags;
74923+ current->mm->def_flags |= VM_LOCKED;
74924+ else
74925+ current->mm->def_flags &= ~VM_LOCKED;
74926 if (flags == MCL_FUTURE)
74927 goto out;
74928
74929 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
74930 vm_flags_t newflags;
74931
74932+#ifdef CONFIG_PAX_SEGMEXEC
74933+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
74934+ break;
74935+#endif
74936+
74937+ BUG_ON(vma->vm_end > TASK_SIZE);
74938 newflags = vma->vm_flags | VM_LOCKED;
74939 if (!(flags & MCL_CURRENT))
74940 newflags &= ~VM_LOCKED;
74941@@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
74942 lock_limit >>= PAGE_SHIFT;
74943
74944 ret = -ENOMEM;
74945+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
74946 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
74947 capable(CAP_IPC_LOCK))
74948 ret = do_mlockall(flags);
74949diff --git a/mm/mmap.c b/mm/mmap.c
74950index 9a796c4..21f8e50 100644
74951--- a/mm/mmap.c
74952+++ b/mm/mmap.c
74953@@ -31,6 +31,7 @@
74954 #include <linux/audit.h>
74955 #include <linux/khugepaged.h>
74956 #include <linux/uprobes.h>
74957+#include <linux/random.h>
74958
74959 #include <asm/uaccess.h>
74960 #include <asm/cacheflush.h>
74961@@ -47,6 +48,16 @@
74962 #define arch_rebalance_pgtables(addr, len) (addr)
74963 #endif
74964
74965+static inline void verify_mm_writelocked(struct mm_struct *mm)
74966+{
74967+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
74968+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
74969+ up_read(&mm->mmap_sem);
74970+ BUG();
74971+ }
74972+#endif
74973+}
74974+
74975 static void unmap_region(struct mm_struct *mm,
74976 struct vm_area_struct *vma, struct vm_area_struct *prev,
74977 unsigned long start, unsigned long end);
74978@@ -66,22 +77,32 @@ static void unmap_region(struct mm_struct *mm,
74979 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
74980 *
74981 */
74982-pgprot_t protection_map[16] = {
74983+pgprot_t protection_map[16] __read_only = {
74984 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
74985 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
74986 };
74987
74988-pgprot_t vm_get_page_prot(unsigned long vm_flags)
74989+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
74990 {
74991- return __pgprot(pgprot_val(protection_map[vm_flags &
74992+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
74993 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
74994 pgprot_val(arch_vm_get_page_prot(vm_flags)));
74995+
74996+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
74997+ if (!(__supported_pte_mask & _PAGE_NX) &&
74998+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
74999+ (vm_flags & (VM_READ | VM_WRITE)))
75000+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
75001+#endif
75002+
75003+ return prot;
75004 }
75005 EXPORT_SYMBOL(vm_get_page_prot);
75006
75007 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
75008 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
75009 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
75010+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
75011 /*
75012 * Make sure vm_committed_as in one cacheline and not cacheline shared with
75013 * other variables. It can be updated by several CPUs frequently.
75014@@ -223,6 +244,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
75015 struct vm_area_struct *next = vma->vm_next;
75016
75017 might_sleep();
75018+ BUG_ON(vma->vm_mirror);
75019 if (vma->vm_ops && vma->vm_ops->close)
75020 vma->vm_ops->close(vma);
75021 if (vma->vm_file)
75022@@ -266,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
75023 * not page aligned -Ram Gupta
75024 */
75025 rlim = rlimit(RLIMIT_DATA);
75026+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
75027 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
75028 (mm->end_data - mm->start_data) > rlim)
75029 goto out;
75030@@ -736,6 +759,12 @@ static int
75031 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
75032 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
75033 {
75034+
75035+#ifdef CONFIG_PAX_SEGMEXEC
75036+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
75037+ return 0;
75038+#endif
75039+
75040 if (is_mergeable_vma(vma, file, vm_flags) &&
75041 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
75042 if (vma->vm_pgoff == vm_pgoff)
75043@@ -755,6 +784,12 @@ static int
75044 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
75045 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
75046 {
75047+
75048+#ifdef CONFIG_PAX_SEGMEXEC
75049+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
75050+ return 0;
75051+#endif
75052+
75053 if (is_mergeable_vma(vma, file, vm_flags) &&
75054 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
75055 pgoff_t vm_pglen;
75056@@ -797,13 +832,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
75057 struct vm_area_struct *vma_merge(struct mm_struct *mm,
75058 struct vm_area_struct *prev, unsigned long addr,
75059 unsigned long end, unsigned long vm_flags,
75060- struct anon_vma *anon_vma, struct file *file,
75061+ struct anon_vma *anon_vma, struct file *file,
75062 pgoff_t pgoff, struct mempolicy *policy)
75063 {
75064 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
75065 struct vm_area_struct *area, *next;
75066 int err;
75067
75068+#ifdef CONFIG_PAX_SEGMEXEC
75069+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
75070+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
75071+
75072+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
75073+#endif
75074+
75075 /*
75076 * We later require that vma->vm_flags == vm_flags,
75077 * so this tests vma->vm_flags & VM_SPECIAL, too.
75078@@ -819,6 +861,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75079 if (next && next->vm_end == end) /* cases 6, 7, 8 */
75080 next = next->vm_next;
75081
75082+#ifdef CONFIG_PAX_SEGMEXEC
75083+ if (prev)
75084+ prev_m = pax_find_mirror_vma(prev);
75085+ if (area)
75086+ area_m = pax_find_mirror_vma(area);
75087+ if (next)
75088+ next_m = pax_find_mirror_vma(next);
75089+#endif
75090+
75091 /*
75092 * Can it merge with the predecessor?
75093 */
75094@@ -838,9 +889,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75095 /* cases 1, 6 */
75096 err = vma_adjust(prev, prev->vm_start,
75097 next->vm_end, prev->vm_pgoff, NULL);
75098- } else /* cases 2, 5, 7 */
75099+
75100+#ifdef CONFIG_PAX_SEGMEXEC
75101+ if (!err && prev_m)
75102+ err = vma_adjust(prev_m, prev_m->vm_start,
75103+ next_m->vm_end, prev_m->vm_pgoff, NULL);
75104+#endif
75105+
75106+ } else { /* cases 2, 5, 7 */
75107 err = vma_adjust(prev, prev->vm_start,
75108 end, prev->vm_pgoff, NULL);
75109+
75110+#ifdef CONFIG_PAX_SEGMEXEC
75111+ if (!err && prev_m)
75112+ err = vma_adjust(prev_m, prev_m->vm_start,
75113+ end_m, prev_m->vm_pgoff, NULL);
75114+#endif
75115+
75116+ }
75117 if (err)
75118 return NULL;
75119 khugepaged_enter_vma_merge(prev);
75120@@ -854,12 +920,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75121 mpol_equal(policy, vma_policy(next)) &&
75122 can_vma_merge_before(next, vm_flags,
75123 anon_vma, file, pgoff+pglen)) {
75124- if (prev && addr < prev->vm_end) /* case 4 */
75125+ if (prev && addr < prev->vm_end) { /* case 4 */
75126 err = vma_adjust(prev, prev->vm_start,
75127 addr, prev->vm_pgoff, NULL);
75128- else /* cases 3, 8 */
75129+
75130+#ifdef CONFIG_PAX_SEGMEXEC
75131+ if (!err && prev_m)
75132+ err = vma_adjust(prev_m, prev_m->vm_start,
75133+ addr_m, prev_m->vm_pgoff, NULL);
75134+#endif
75135+
75136+ } else { /* cases 3, 8 */
75137 err = vma_adjust(area, addr, next->vm_end,
75138 next->vm_pgoff - pglen, NULL);
75139+
75140+#ifdef CONFIG_PAX_SEGMEXEC
75141+ if (!err && area_m)
75142+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
75143+ next_m->vm_pgoff - pglen, NULL);
75144+#endif
75145+
75146+ }
75147 if (err)
75148 return NULL;
75149 khugepaged_enter_vma_merge(area);
75150@@ -968,16 +1049,13 @@ none:
75151 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
75152 struct file *file, long pages)
75153 {
75154- const unsigned long stack_flags
75155- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
75156-
75157 mm->total_vm += pages;
75158
75159 if (file) {
75160 mm->shared_vm += pages;
75161 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
75162 mm->exec_vm += pages;
75163- } else if (flags & stack_flags)
75164+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
75165 mm->stack_vm += pages;
75166 }
75167 #endif /* CONFIG_PROC_FS */
75168@@ -1013,7 +1091,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75169 * (the exception is when the underlying filesystem is noexec
75170 * mounted, in which case we dont add PROT_EXEC.)
75171 */
75172- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
75173+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
75174 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
75175 prot |= PROT_EXEC;
75176
75177@@ -1039,7 +1117,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75178 /* Obtain the address to map to. we verify (or select) it and ensure
75179 * that it represents a valid section of the address space.
75180 */
75181- addr = get_unmapped_area(file, addr, len, pgoff, flags);
75182+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
75183 if (addr & ~PAGE_MASK)
75184 return addr;
75185
75186@@ -1050,6 +1128,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75187 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
75188 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
75189
75190+#ifdef CONFIG_PAX_MPROTECT
75191+ if (mm->pax_flags & MF_PAX_MPROTECT) {
75192+#ifndef CONFIG_PAX_MPROTECT_COMPAT
75193+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
75194+ gr_log_rwxmmap(file);
75195+
75196+#ifdef CONFIG_PAX_EMUPLT
75197+ vm_flags &= ~VM_EXEC;
75198+#else
75199+ return -EPERM;
75200+#endif
75201+
75202+ }
75203+
75204+ if (!(vm_flags & VM_EXEC))
75205+ vm_flags &= ~VM_MAYEXEC;
75206+#else
75207+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
75208+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
75209+#endif
75210+ else
75211+ vm_flags &= ~VM_MAYWRITE;
75212+ }
75213+#endif
75214+
75215+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
75216+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
75217+ vm_flags &= ~VM_PAGEEXEC;
75218+#endif
75219+
75220 if (flags & MAP_LOCKED)
75221 if (!can_do_mlock())
75222 return -EPERM;
75223@@ -1061,6 +1169,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75224 locked += mm->locked_vm;
75225 lock_limit = rlimit(RLIMIT_MEMLOCK);
75226 lock_limit >>= PAGE_SHIFT;
75227+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
75228 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
75229 return -EAGAIN;
75230 }
75231@@ -1127,6 +1236,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75232 }
75233 }
75234
75235+ if (!gr_acl_handle_mmap(file, prot))
75236+ return -EACCES;
75237+
75238 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
75239 }
75240
75241@@ -1203,7 +1315,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
75242 vm_flags_t vm_flags = vma->vm_flags;
75243
75244 /* If it was private or non-writable, the write bit is already clear */
75245- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
75246+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
75247 return 0;
75248
75249 /* The backer wishes to know when pages are first written to? */
75250@@ -1252,13 +1364,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
75251 unsigned long charged = 0;
75252 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
75253
75254+#ifdef CONFIG_PAX_SEGMEXEC
75255+ struct vm_area_struct *vma_m = NULL;
75256+#endif
75257+
75258+ /*
75259+ * mm->mmap_sem is required to protect against another thread
75260+ * changing the mappings in case we sleep.
75261+ */
75262+ verify_mm_writelocked(mm);
75263+
75264 /* Clear old maps */
75265 error = -ENOMEM;
75266-munmap_back:
75267 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
75268 if (do_munmap(mm, addr, len))
75269 return -ENOMEM;
75270- goto munmap_back;
75271+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
75272 }
75273
75274 /* Check against address space limit. */
75275@@ -1307,6 +1428,16 @@ munmap_back:
75276 goto unacct_error;
75277 }
75278
75279+#ifdef CONFIG_PAX_SEGMEXEC
75280+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
75281+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
75282+ if (!vma_m) {
75283+ error = -ENOMEM;
75284+ goto free_vma;
75285+ }
75286+ }
75287+#endif
75288+
75289 vma->vm_mm = mm;
75290 vma->vm_start = addr;
75291 vma->vm_end = addr + len;
75292@@ -1331,6 +1462,13 @@ munmap_back:
75293 if (error)
75294 goto unmap_and_free_vma;
75295
75296+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
75297+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
75298+ vma->vm_flags |= VM_PAGEEXEC;
75299+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
75300+ }
75301+#endif
75302+
75303 /* Can addr have changed??
75304 *
75305 * Answer: Yes, several device drivers can do it in their
75306@@ -1365,6 +1503,11 @@ munmap_back:
75307 vma_link(mm, vma, prev, rb_link, rb_parent);
75308 file = vma->vm_file;
75309
75310+#ifdef CONFIG_PAX_SEGMEXEC
75311+ if (vma_m)
75312+ BUG_ON(pax_mirror_vma(vma_m, vma));
75313+#endif
75314+
75315 /* Once vma denies write, undo our temporary denial count */
75316 if (correct_wcount)
75317 atomic_inc(&inode->i_writecount);
75318@@ -1372,6 +1515,7 @@ out:
75319 perf_event_mmap(vma);
75320
75321 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
75322+ track_exec_limit(mm, addr, addr + len, vm_flags);
75323 if (vm_flags & VM_LOCKED) {
75324 if (!mlock_vma_pages_range(vma, addr, addr + len))
75325 mm->locked_vm += (len >> PAGE_SHIFT);
75326@@ -1393,6 +1537,12 @@ unmap_and_free_vma:
75327 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
75328 charged = 0;
75329 free_vma:
75330+
75331+#ifdef CONFIG_PAX_SEGMEXEC
75332+ if (vma_m)
75333+ kmem_cache_free(vm_area_cachep, vma_m);
75334+#endif
75335+
75336 kmem_cache_free(vm_area_cachep, vma);
75337 unacct_error:
75338 if (charged)
75339@@ -1400,6 +1550,62 @@ unacct_error:
75340 return error;
75341 }
75342
75343+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
75344+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
75345+{
75346+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
75347+ return (random32() & 0xFF) << PAGE_SHIFT;
75348+
75349+ return 0;
75350+}
75351+#endif
75352+
75353+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
75354+{
75355+ if (!vma) {
75356+#ifdef CONFIG_STACK_GROWSUP
75357+ if (addr > sysctl_heap_stack_gap)
75358+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
75359+ else
75360+ vma = find_vma(current->mm, 0);
75361+ if (vma && (vma->vm_flags & VM_GROWSUP))
75362+ return false;
75363+#endif
75364+ return true;
75365+ }
75366+
75367+ if (addr + len > vma->vm_start)
75368+ return false;
75369+
75370+ if (vma->vm_flags & VM_GROWSDOWN)
75371+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
75372+#ifdef CONFIG_STACK_GROWSUP
75373+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
75374+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
75375+#endif
75376+ else if (offset)
75377+ return offset <= vma->vm_start - addr - len;
75378+
75379+ return true;
75380+}
75381+
75382+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
75383+{
75384+ if (vma->vm_start < len)
75385+ return -ENOMEM;
75386+
75387+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
75388+ if (offset <= vma->vm_start - len)
75389+ return vma->vm_start - len - offset;
75390+ else
75391+ return -ENOMEM;
75392+ }
75393+
75394+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
75395+ return vma->vm_start - len - sysctl_heap_stack_gap;
75396+ return -ENOMEM;
75397+}
75398+
75399 /* Get an address range which is currently unmapped.
75400 * For shmat() with addr=0.
75401 *
75402@@ -1426,18 +1632,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
75403 if (flags & MAP_FIXED)
75404 return addr;
75405
75406+#ifdef CONFIG_PAX_RANDMMAP
75407+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
75408+#endif
75409+
75410 if (addr) {
75411 addr = PAGE_ALIGN(addr);
75412- vma = find_vma(mm, addr);
75413- if (TASK_SIZE - len >= addr &&
75414- (!vma || addr + len <= vma->vm_start))
75415- return addr;
75416+ if (TASK_SIZE - len >= addr) {
75417+ vma = find_vma(mm, addr);
75418+ if (check_heap_stack_gap(vma, addr, len))
75419+ return addr;
75420+ }
75421 }
75422 if (len > mm->cached_hole_size) {
75423- start_addr = addr = mm->free_area_cache;
75424+ start_addr = addr = mm->free_area_cache;
75425 } else {
75426- start_addr = addr = TASK_UNMAPPED_BASE;
75427- mm->cached_hole_size = 0;
75428+ start_addr = addr = mm->mmap_base;
75429+ mm->cached_hole_size = 0;
75430 }
75431
75432 full_search:
75433@@ -1448,34 +1659,40 @@ full_search:
75434 * Start a new search - just in case we missed
75435 * some holes.
75436 */
75437- if (start_addr != TASK_UNMAPPED_BASE) {
75438- addr = TASK_UNMAPPED_BASE;
75439- start_addr = addr;
75440+ if (start_addr != mm->mmap_base) {
75441+ start_addr = addr = mm->mmap_base;
75442 mm->cached_hole_size = 0;
75443 goto full_search;
75444 }
75445 return -ENOMEM;
75446 }
75447- if (!vma || addr + len <= vma->vm_start) {
75448- /*
75449- * Remember the place where we stopped the search:
75450- */
75451- mm->free_area_cache = addr + len;
75452- return addr;
75453- }
75454+ if (check_heap_stack_gap(vma, addr, len))
75455+ break;
75456 if (addr + mm->cached_hole_size < vma->vm_start)
75457 mm->cached_hole_size = vma->vm_start - addr;
75458 addr = vma->vm_end;
75459 }
75460+
75461+ /*
75462+ * Remember the place where we stopped the search:
75463+ */
75464+ mm->free_area_cache = addr + len;
75465+ return addr;
75466 }
75467 #endif
75468
75469 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
75470 {
75471+
75472+#ifdef CONFIG_PAX_SEGMEXEC
75473+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
75474+ return;
75475+#endif
75476+
75477 /*
75478 * Is this a new hole at the lowest possible address?
75479 */
75480- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
75481+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
75482 mm->free_area_cache = addr;
75483 }
75484
75485@@ -1491,7 +1708,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
75486 {
75487 struct vm_area_struct *vma;
75488 struct mm_struct *mm = current->mm;
75489- unsigned long addr = addr0, start_addr;
75490+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
75491
75492 /* requested length too big for entire address space */
75493 if (len > TASK_SIZE)
75494@@ -1500,13 +1717,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
75495 if (flags & MAP_FIXED)
75496 return addr;
75497
75498+#ifdef CONFIG_PAX_RANDMMAP
75499+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
75500+#endif
75501+
75502 /* requesting a specific address */
75503 if (addr) {
75504 addr = PAGE_ALIGN(addr);
75505- vma = find_vma(mm, addr);
75506- if (TASK_SIZE - len >= addr &&
75507- (!vma || addr + len <= vma->vm_start))
75508- return addr;
75509+ if (TASK_SIZE - len >= addr) {
75510+ vma = find_vma(mm, addr);
75511+ if (check_heap_stack_gap(vma, addr, len))
75512+ return addr;
75513+ }
75514 }
75515
75516 /* check if free_area_cache is useful for us */
75517@@ -1530,7 +1752,7 @@ try_again:
75518 * return with success:
75519 */
75520 vma = find_vma(mm, addr);
75521- if (!vma || addr+len <= vma->vm_start)
75522+ if (check_heap_stack_gap(vma, addr, len))
75523 /* remember the address as a hint for next time */
75524 return (mm->free_area_cache = addr);
75525
75526@@ -1539,8 +1761,8 @@ try_again:
75527 mm->cached_hole_size = vma->vm_start - addr;
75528
75529 /* try just below the current vma->vm_start */
75530- addr = vma->vm_start-len;
75531- } while (len < vma->vm_start);
75532+ addr = skip_heap_stack_gap(vma, len);
75533+ } while (!IS_ERR_VALUE(addr));
75534
75535 fail:
75536 /*
75537@@ -1563,13 +1785,21 @@ fail:
75538 * can happen with large stack limits and large mmap()
75539 * allocations.
75540 */
75541+ mm->mmap_base = TASK_UNMAPPED_BASE;
75542+
75543+#ifdef CONFIG_PAX_RANDMMAP
75544+ if (mm->pax_flags & MF_PAX_RANDMMAP)
75545+ mm->mmap_base += mm->delta_mmap;
75546+#endif
75547+
75548+ mm->free_area_cache = mm->mmap_base;
75549 mm->cached_hole_size = ~0UL;
75550- mm->free_area_cache = TASK_UNMAPPED_BASE;
75551 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
75552 /*
75553 * Restore the topdown base:
75554 */
75555- mm->free_area_cache = mm->mmap_base;
75556+ mm->mmap_base = base;
75557+ mm->free_area_cache = base;
75558 mm->cached_hole_size = ~0UL;
75559
75560 return addr;
75561@@ -1578,6 +1808,12 @@ fail:
75562
75563 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
75564 {
75565+
75566+#ifdef CONFIG_PAX_SEGMEXEC
75567+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
75568+ return;
75569+#endif
75570+
75571 /*
75572 * Is this a new hole at the highest possible address?
75573 */
75574@@ -1585,8 +1821,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
75575 mm->free_area_cache = addr;
75576
75577 /* dont allow allocations above current base */
75578- if (mm->free_area_cache > mm->mmap_base)
75579+ if (mm->free_area_cache > mm->mmap_base) {
75580 mm->free_area_cache = mm->mmap_base;
75581+ mm->cached_hole_size = ~0UL;
75582+ }
75583 }
75584
75585 unsigned long
75586@@ -1685,6 +1923,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
75587 return vma;
75588 }
75589
75590+#ifdef CONFIG_PAX_SEGMEXEC
75591+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
75592+{
75593+ struct vm_area_struct *vma_m;
75594+
75595+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
75596+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
75597+ BUG_ON(vma->vm_mirror);
75598+ return NULL;
75599+ }
75600+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
75601+ vma_m = vma->vm_mirror;
75602+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
75603+ BUG_ON(vma->vm_file != vma_m->vm_file);
75604+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
75605+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
75606+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
75607+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
75608+ return vma_m;
75609+}
75610+#endif
75611+
75612 /*
75613 * Verify that the stack growth is acceptable and
75614 * update accounting. This is shared with both the
75615@@ -1701,6 +1961,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75616 return -ENOMEM;
75617
75618 /* Stack limit test */
75619+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
75620 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
75621 return -ENOMEM;
75622
75623@@ -1711,6 +1972,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75624 locked = mm->locked_vm + grow;
75625 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
75626 limit >>= PAGE_SHIFT;
75627+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
75628 if (locked > limit && !capable(CAP_IPC_LOCK))
75629 return -ENOMEM;
75630 }
75631@@ -1740,37 +2002,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75632 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
75633 * vma is the last one with address > vma->vm_end. Have to extend vma.
75634 */
75635+#ifndef CONFIG_IA64
75636+static
75637+#endif
75638 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
75639 {
75640 int error;
75641+ bool locknext;
75642
75643 if (!(vma->vm_flags & VM_GROWSUP))
75644 return -EFAULT;
75645
75646+ /* Also guard against wrapping around to address 0. */
75647+ if (address < PAGE_ALIGN(address+1))
75648+ address = PAGE_ALIGN(address+1);
75649+ else
75650+ return -ENOMEM;
75651+
75652 /*
75653 * We must make sure the anon_vma is allocated
75654 * so that the anon_vma locking is not a noop.
75655 */
75656 if (unlikely(anon_vma_prepare(vma)))
75657 return -ENOMEM;
75658+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
75659+ if (locknext && anon_vma_prepare(vma->vm_next))
75660+ return -ENOMEM;
75661 vma_lock_anon_vma(vma);
75662+ if (locknext)
75663+ vma_lock_anon_vma(vma->vm_next);
75664
75665 /*
75666 * vma->vm_start/vm_end cannot change under us because the caller
75667 * is required to hold the mmap_sem in read mode. We need the
75668- * anon_vma lock to serialize against concurrent expand_stacks.
75669- * Also guard against wrapping around to address 0.
75670+ * anon_vma locks to serialize against concurrent expand_stacks
75671+ * and expand_upwards.
75672 */
75673- if (address < PAGE_ALIGN(address+4))
75674- address = PAGE_ALIGN(address+4);
75675- else {
75676- vma_unlock_anon_vma(vma);
75677- return -ENOMEM;
75678- }
75679 error = 0;
75680
75681 /* Somebody else might have raced and expanded it already */
75682- if (address > vma->vm_end) {
75683+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
75684+ error = -ENOMEM;
75685+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
75686 unsigned long size, grow;
75687
75688 size = address - vma->vm_start;
75689@@ -1787,6 +2060,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
75690 }
75691 }
75692 }
75693+ if (locknext)
75694+ vma_unlock_anon_vma(vma->vm_next);
75695 vma_unlock_anon_vma(vma);
75696 khugepaged_enter_vma_merge(vma);
75697 validate_mm(vma->vm_mm);
75698@@ -1801,6 +2076,8 @@ int expand_downwards(struct vm_area_struct *vma,
75699 unsigned long address)
75700 {
75701 int error;
75702+ bool lockprev = false;
75703+ struct vm_area_struct *prev;
75704
75705 /*
75706 * We must make sure the anon_vma is allocated
75707@@ -1814,6 +2091,15 @@ int expand_downwards(struct vm_area_struct *vma,
75708 if (error)
75709 return error;
75710
75711+ prev = vma->vm_prev;
75712+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
75713+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
75714+#endif
75715+ if (lockprev && anon_vma_prepare(prev))
75716+ return -ENOMEM;
75717+ if (lockprev)
75718+ vma_lock_anon_vma(prev);
75719+
75720 vma_lock_anon_vma(vma);
75721
75722 /*
75723@@ -1823,9 +2109,17 @@ int expand_downwards(struct vm_area_struct *vma,
75724 */
75725
75726 /* Somebody else might have raced and expanded it already */
75727- if (address < vma->vm_start) {
75728+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
75729+ error = -ENOMEM;
75730+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
75731 unsigned long size, grow;
75732
75733+#ifdef CONFIG_PAX_SEGMEXEC
75734+ struct vm_area_struct *vma_m;
75735+
75736+ vma_m = pax_find_mirror_vma(vma);
75737+#endif
75738+
75739 size = vma->vm_end - address;
75740 grow = (vma->vm_start - address) >> PAGE_SHIFT;
75741
75742@@ -1837,6 +2131,17 @@ int expand_downwards(struct vm_area_struct *vma,
75743 vma->vm_start = address;
75744 vma->vm_pgoff -= grow;
75745 anon_vma_interval_tree_post_update_vma(vma);
75746+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
75747+
75748+#ifdef CONFIG_PAX_SEGMEXEC
75749+ if (vma_m) {
75750+ anon_vma_interval_tree_pre_update_vma(vma_m);
75751+ vma_m->vm_start -= grow << PAGE_SHIFT;
75752+ vma_m->vm_pgoff -= grow;
75753+ anon_vma_interval_tree_post_update_vma(vma_m);
75754+ }
75755+#endif
75756+
75757 perf_event_mmap(vma);
75758 }
75759 }
75760@@ -1914,6 +2219,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
75761 do {
75762 long nrpages = vma_pages(vma);
75763
75764+#ifdef CONFIG_PAX_SEGMEXEC
75765+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
75766+ vma = remove_vma(vma);
75767+ continue;
75768+ }
75769+#endif
75770+
75771 if (vma->vm_flags & VM_ACCOUNT)
75772 nr_accounted += nrpages;
75773 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
75774@@ -1959,6 +2271,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
75775 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
75776 vma->vm_prev = NULL;
75777 do {
75778+
75779+#ifdef CONFIG_PAX_SEGMEXEC
75780+ if (vma->vm_mirror) {
75781+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
75782+ vma->vm_mirror->vm_mirror = NULL;
75783+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
75784+ vma->vm_mirror = NULL;
75785+ }
75786+#endif
75787+
75788 rb_erase(&vma->vm_rb, &mm->mm_rb);
75789 mm->map_count--;
75790 tail_vma = vma;
75791@@ -1987,14 +2309,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
75792 struct vm_area_struct *new;
75793 int err = -ENOMEM;
75794
75795+#ifdef CONFIG_PAX_SEGMEXEC
75796+ struct vm_area_struct *vma_m, *new_m = NULL;
75797+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
75798+#endif
75799+
75800 if (is_vm_hugetlb_page(vma) && (addr &
75801 ~(huge_page_mask(hstate_vma(vma)))))
75802 return -EINVAL;
75803
75804+#ifdef CONFIG_PAX_SEGMEXEC
75805+ vma_m = pax_find_mirror_vma(vma);
75806+#endif
75807+
75808 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75809 if (!new)
75810 goto out_err;
75811
75812+#ifdef CONFIG_PAX_SEGMEXEC
75813+ if (vma_m) {
75814+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75815+ if (!new_m) {
75816+ kmem_cache_free(vm_area_cachep, new);
75817+ goto out_err;
75818+ }
75819+ }
75820+#endif
75821+
75822 /* most fields are the same, copy all, and then fixup */
75823 *new = *vma;
75824
75825@@ -2007,6 +2348,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
75826 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
75827 }
75828
75829+#ifdef CONFIG_PAX_SEGMEXEC
75830+ if (vma_m) {
75831+ *new_m = *vma_m;
75832+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
75833+ new_m->vm_mirror = new;
75834+ new->vm_mirror = new_m;
75835+
75836+ if (new_below)
75837+ new_m->vm_end = addr_m;
75838+ else {
75839+ new_m->vm_start = addr_m;
75840+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
75841+ }
75842+ }
75843+#endif
75844+
75845 pol = mpol_dup(vma_policy(vma));
75846 if (IS_ERR(pol)) {
75847 err = PTR_ERR(pol);
75848@@ -2029,6 +2386,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
75849 else
75850 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
75851
75852+#ifdef CONFIG_PAX_SEGMEXEC
75853+ if (!err && vma_m) {
75854+ if (anon_vma_clone(new_m, vma_m))
75855+ goto out_free_mpol;
75856+
75857+ mpol_get(pol);
75858+ vma_set_policy(new_m, pol);
75859+
75860+ if (new_m->vm_file)
75861+ get_file(new_m->vm_file);
75862+
75863+ if (new_m->vm_ops && new_m->vm_ops->open)
75864+ new_m->vm_ops->open(new_m);
75865+
75866+ if (new_below)
75867+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
75868+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
75869+ else
75870+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
75871+
75872+ if (err) {
75873+ if (new_m->vm_ops && new_m->vm_ops->close)
75874+ new_m->vm_ops->close(new_m);
75875+ if (new_m->vm_file)
75876+ fput(new_m->vm_file);
75877+ mpol_put(pol);
75878+ }
75879+ }
75880+#endif
75881+
75882 /* Success. */
75883 if (!err)
75884 return 0;
75885@@ -2038,10 +2425,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
75886 new->vm_ops->close(new);
75887 if (new->vm_file)
75888 fput(new->vm_file);
75889- unlink_anon_vmas(new);
75890 out_free_mpol:
75891 mpol_put(pol);
75892 out_free_vma:
75893+
75894+#ifdef CONFIG_PAX_SEGMEXEC
75895+ if (new_m) {
75896+ unlink_anon_vmas(new_m);
75897+ kmem_cache_free(vm_area_cachep, new_m);
75898+ }
75899+#endif
75900+
75901+ unlink_anon_vmas(new);
75902 kmem_cache_free(vm_area_cachep, new);
75903 out_err:
75904 return err;
75905@@ -2054,6 +2449,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
75906 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
75907 unsigned long addr, int new_below)
75908 {
75909+
75910+#ifdef CONFIG_PAX_SEGMEXEC
75911+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
75912+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
75913+ if (mm->map_count >= sysctl_max_map_count-1)
75914+ return -ENOMEM;
75915+ } else
75916+#endif
75917+
75918 if (mm->map_count >= sysctl_max_map_count)
75919 return -ENOMEM;
75920
75921@@ -2065,11 +2469,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
75922 * work. This now handles partial unmappings.
75923 * Jeremy Fitzhardinge <jeremy@goop.org>
75924 */
75925+#ifdef CONFIG_PAX_SEGMEXEC
75926 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
75927 {
75928+ int ret = __do_munmap(mm, start, len);
75929+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
75930+ return ret;
75931+
75932+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
75933+}
75934+
75935+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
75936+#else
75937+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
75938+#endif
75939+{
75940 unsigned long end;
75941 struct vm_area_struct *vma, *prev, *last;
75942
75943+ /*
75944+ * mm->mmap_sem is required to protect against another thread
75945+ * changing the mappings in case we sleep.
75946+ */
75947+ verify_mm_writelocked(mm);
75948+
75949 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
75950 return -EINVAL;
75951
75952@@ -2144,6 +2567,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
75953 /* Fix up all other VM information */
75954 remove_vma_list(mm, vma);
75955
75956+ track_exec_limit(mm, start, end, 0UL);
75957+
75958 return 0;
75959 }
75960
75961@@ -2152,6 +2577,13 @@ int vm_munmap(unsigned long start, size_t len)
75962 int ret;
75963 struct mm_struct *mm = current->mm;
75964
75965+
75966+#ifdef CONFIG_PAX_SEGMEXEC
75967+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
75968+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
75969+ return -EINVAL;
75970+#endif
75971+
75972 down_write(&mm->mmap_sem);
75973 ret = do_munmap(mm, start, len);
75974 up_write(&mm->mmap_sem);
75975@@ -2165,16 +2597,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
75976 return vm_munmap(addr, len);
75977 }
75978
75979-static inline void verify_mm_writelocked(struct mm_struct *mm)
75980-{
75981-#ifdef CONFIG_DEBUG_VM
75982- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
75983- WARN_ON(1);
75984- up_read(&mm->mmap_sem);
75985- }
75986-#endif
75987-}
75988-
75989 /*
75990 * this is really a simplified "do_mmap". it only handles
75991 * anonymous maps. eventually we may be able to do some
75992@@ -2188,6 +2610,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
75993 struct rb_node ** rb_link, * rb_parent;
75994 pgoff_t pgoff = addr >> PAGE_SHIFT;
75995 int error;
75996+ unsigned long charged;
75997
75998 len = PAGE_ALIGN(len);
75999 if (!len)
76000@@ -2195,16 +2618,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76001
76002 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
76003
76004+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
76005+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
76006+ flags &= ~VM_EXEC;
76007+
76008+#ifdef CONFIG_PAX_MPROTECT
76009+ if (mm->pax_flags & MF_PAX_MPROTECT)
76010+ flags &= ~VM_MAYEXEC;
76011+#endif
76012+
76013+ }
76014+#endif
76015+
76016 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
76017 if (error & ~PAGE_MASK)
76018 return error;
76019
76020+ charged = len >> PAGE_SHIFT;
76021+
76022 /*
76023 * mlock MCL_FUTURE?
76024 */
76025 if (mm->def_flags & VM_LOCKED) {
76026 unsigned long locked, lock_limit;
76027- locked = len >> PAGE_SHIFT;
76028+ locked = charged;
76029 locked += mm->locked_vm;
76030 lock_limit = rlimit(RLIMIT_MEMLOCK);
76031 lock_limit >>= PAGE_SHIFT;
76032@@ -2221,21 +2658,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76033 /*
76034 * Clear old maps. this also does some error checking for us
76035 */
76036- munmap_back:
76037 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
76038 if (do_munmap(mm, addr, len))
76039 return -ENOMEM;
76040- goto munmap_back;
76041+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
76042 }
76043
76044 /* Check against address space limits *after* clearing old maps... */
76045- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
76046+ if (!may_expand_vm(mm, charged))
76047 return -ENOMEM;
76048
76049 if (mm->map_count > sysctl_max_map_count)
76050 return -ENOMEM;
76051
76052- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
76053+ if (security_vm_enough_memory_mm(mm, charged))
76054 return -ENOMEM;
76055
76056 /* Can we just expand an old private anonymous mapping? */
76057@@ -2249,7 +2685,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76058 */
76059 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76060 if (!vma) {
76061- vm_unacct_memory(len >> PAGE_SHIFT);
76062+ vm_unacct_memory(charged);
76063 return -ENOMEM;
76064 }
76065
76066@@ -2263,11 +2699,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76067 vma_link(mm, vma, prev, rb_link, rb_parent);
76068 out:
76069 perf_event_mmap(vma);
76070- mm->total_vm += len >> PAGE_SHIFT;
76071+ mm->total_vm += charged;
76072 if (flags & VM_LOCKED) {
76073 if (!mlock_vma_pages_range(vma, addr, addr + len))
76074- mm->locked_vm += (len >> PAGE_SHIFT);
76075+ mm->locked_vm += charged;
76076 }
76077+ track_exec_limit(mm, addr, addr + len, flags);
76078 return addr;
76079 }
76080
76081@@ -2325,6 +2762,7 @@ void exit_mmap(struct mm_struct *mm)
76082 while (vma) {
76083 if (vma->vm_flags & VM_ACCOUNT)
76084 nr_accounted += vma_pages(vma);
76085+ vma->vm_mirror = NULL;
76086 vma = remove_vma(vma);
76087 }
76088 vm_unacct_memory(nr_accounted);
76089@@ -2341,6 +2779,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
76090 struct vm_area_struct *prev;
76091 struct rb_node **rb_link, *rb_parent;
76092
76093+#ifdef CONFIG_PAX_SEGMEXEC
76094+ struct vm_area_struct *vma_m = NULL;
76095+#endif
76096+
76097+ if (security_mmap_addr(vma->vm_start))
76098+ return -EPERM;
76099+
76100 /*
76101 * The vm_pgoff of a purely anonymous vma should be irrelevant
76102 * until its first write fault, when page's anon_vma and index
76103@@ -2364,7 +2809,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
76104 security_vm_enough_memory_mm(mm, vma_pages(vma)))
76105 return -ENOMEM;
76106
76107+#ifdef CONFIG_PAX_SEGMEXEC
76108+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
76109+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76110+ if (!vma_m)
76111+ return -ENOMEM;
76112+ }
76113+#endif
76114+
76115 vma_link(mm, vma, prev, rb_link, rb_parent);
76116+
76117+#ifdef CONFIG_PAX_SEGMEXEC
76118+ if (vma_m)
76119+ BUG_ON(pax_mirror_vma(vma_m, vma));
76120+#endif
76121+
76122 return 0;
76123 }
76124
76125@@ -2384,6 +2843,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
76126 struct mempolicy *pol;
76127 bool faulted_in_anon_vma = true;
76128
76129+ BUG_ON(vma->vm_mirror);
76130+
76131 /*
76132 * If anonymous vma has not yet been faulted, update new pgoff
76133 * to match new location, to increase its chance of merging.
76134@@ -2450,6 +2911,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
76135 return NULL;
76136 }
76137
76138+#ifdef CONFIG_PAX_SEGMEXEC
76139+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
76140+{
76141+ struct vm_area_struct *prev_m;
76142+ struct rb_node **rb_link_m, *rb_parent_m;
76143+ struct mempolicy *pol_m;
76144+
76145+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
76146+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
76147+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
76148+ *vma_m = *vma;
76149+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
76150+ if (anon_vma_clone(vma_m, vma))
76151+ return -ENOMEM;
76152+ pol_m = vma_policy(vma_m);
76153+ mpol_get(pol_m);
76154+ vma_set_policy(vma_m, pol_m);
76155+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
76156+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
76157+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
76158+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
76159+ if (vma_m->vm_file)
76160+ get_file(vma_m->vm_file);
76161+ if (vma_m->vm_ops && vma_m->vm_ops->open)
76162+ vma_m->vm_ops->open(vma_m);
76163+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
76164+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
76165+ vma_m->vm_mirror = vma;
76166+ vma->vm_mirror = vma_m;
76167+ return 0;
76168+}
76169+#endif
76170+
76171 /*
76172 * Return true if the calling process may expand its vm space by the passed
76173 * number of pages
76174@@ -2461,6 +2955,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
76175
76176 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
76177
76178+#ifdef CONFIG_PAX_RANDMMAP
76179+ if (mm->pax_flags & MF_PAX_RANDMMAP)
76180+ cur -= mm->brk_gap;
76181+#endif
76182+
76183+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
76184 if (cur + npages > lim)
76185 return 0;
76186 return 1;
76187@@ -2531,6 +3031,22 @@ int install_special_mapping(struct mm_struct *mm,
76188 vma->vm_start = addr;
76189 vma->vm_end = addr + len;
76190
76191+#ifdef CONFIG_PAX_MPROTECT
76192+ if (mm->pax_flags & MF_PAX_MPROTECT) {
76193+#ifndef CONFIG_PAX_MPROTECT_COMPAT
76194+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
76195+ return -EPERM;
76196+ if (!(vm_flags & VM_EXEC))
76197+ vm_flags &= ~VM_MAYEXEC;
76198+#else
76199+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
76200+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
76201+#endif
76202+ else
76203+ vm_flags &= ~VM_MAYWRITE;
76204+ }
76205+#endif
76206+
76207 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
76208 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76209
76210diff --git a/mm/mprotect.c b/mm/mprotect.c
76211index a409926..8b32e6d 100644
76212--- a/mm/mprotect.c
76213+++ b/mm/mprotect.c
76214@@ -23,10 +23,17 @@
76215 #include <linux/mmu_notifier.h>
76216 #include <linux/migrate.h>
76217 #include <linux/perf_event.h>
76218+
76219+#ifdef CONFIG_PAX_MPROTECT
76220+#include <linux/elf.h>
76221+#include <linux/binfmts.h>
76222+#endif
76223+
76224 #include <asm/uaccess.h>
76225 #include <asm/pgtable.h>
76226 #include <asm/cacheflush.h>
76227 #include <asm/tlbflush.h>
76228+#include <asm/mmu_context.h>
76229
76230 #ifndef pgprot_modify
76231 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
76232@@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
76233 flush_tlb_range(vma, start, end);
76234 }
76235
76236+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
76237+/* called while holding the mmap semaphor for writing except stack expansion */
76238+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
76239+{
76240+ unsigned long oldlimit, newlimit = 0UL;
76241+
76242+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
76243+ return;
76244+
76245+ spin_lock(&mm->page_table_lock);
76246+ oldlimit = mm->context.user_cs_limit;
76247+ if ((prot & VM_EXEC) && oldlimit < end)
76248+ /* USER_CS limit moved up */
76249+ newlimit = end;
76250+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
76251+ /* USER_CS limit moved down */
76252+ newlimit = start;
76253+
76254+ if (newlimit) {
76255+ mm->context.user_cs_limit = newlimit;
76256+
76257+#ifdef CONFIG_SMP
76258+ wmb();
76259+ cpus_clear(mm->context.cpu_user_cs_mask);
76260+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
76261+#endif
76262+
76263+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
76264+ }
76265+ spin_unlock(&mm->page_table_lock);
76266+ if (newlimit == end) {
76267+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
76268+
76269+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
76270+ if (is_vm_hugetlb_page(vma))
76271+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
76272+ else
76273+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
76274+ }
76275+}
76276+#endif
76277+
76278 int
76279 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76280 unsigned long start, unsigned long end, unsigned long newflags)
76281@@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76282 int error;
76283 int dirty_accountable = 0;
76284
76285+#ifdef CONFIG_PAX_SEGMEXEC
76286+ struct vm_area_struct *vma_m = NULL;
76287+ unsigned long start_m, end_m;
76288+
76289+ start_m = start + SEGMEXEC_TASK_SIZE;
76290+ end_m = end + SEGMEXEC_TASK_SIZE;
76291+#endif
76292+
76293 if (newflags == oldflags) {
76294 *pprev = vma;
76295 return 0;
76296 }
76297
76298+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
76299+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
76300+
76301+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
76302+ return -ENOMEM;
76303+
76304+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
76305+ return -ENOMEM;
76306+ }
76307+
76308 /*
76309 * If we make a private mapping writable we increase our commit;
76310 * but (without finer accounting) cannot reduce our commit if we
76311@@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76312 }
76313 }
76314
76315+#ifdef CONFIG_PAX_SEGMEXEC
76316+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
76317+ if (start != vma->vm_start) {
76318+ error = split_vma(mm, vma, start, 1);
76319+ if (error)
76320+ goto fail;
76321+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
76322+ *pprev = (*pprev)->vm_next;
76323+ }
76324+
76325+ if (end != vma->vm_end) {
76326+ error = split_vma(mm, vma, end, 0);
76327+ if (error)
76328+ goto fail;
76329+ }
76330+
76331+ if (pax_find_mirror_vma(vma)) {
76332+ error = __do_munmap(mm, start_m, end_m - start_m);
76333+ if (error)
76334+ goto fail;
76335+ } else {
76336+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76337+ if (!vma_m) {
76338+ error = -ENOMEM;
76339+ goto fail;
76340+ }
76341+ vma->vm_flags = newflags;
76342+ error = pax_mirror_vma(vma_m, vma);
76343+ if (error) {
76344+ vma->vm_flags = oldflags;
76345+ goto fail;
76346+ }
76347+ }
76348+ }
76349+#endif
76350+
76351 /*
76352 * First try to merge with previous and/or next vma.
76353 */
76354@@ -204,9 +307,21 @@ success:
76355 * vm_flags and vm_page_prot are protected by the mmap_sem
76356 * held in write mode.
76357 */
76358+
76359+#ifdef CONFIG_PAX_SEGMEXEC
76360+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
76361+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
76362+#endif
76363+
76364 vma->vm_flags = newflags;
76365+
76366+#ifdef CONFIG_PAX_MPROTECT
76367+ if (mm->binfmt && mm->binfmt->handle_mprotect)
76368+ mm->binfmt->handle_mprotect(vma, newflags);
76369+#endif
76370+
76371 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
76372- vm_get_page_prot(newflags));
76373+ vm_get_page_prot(vma->vm_flags));
76374
76375 if (vma_wants_writenotify(vma)) {
76376 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
76377@@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76378 end = start + len;
76379 if (end <= start)
76380 return -ENOMEM;
76381+
76382+#ifdef CONFIG_PAX_SEGMEXEC
76383+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
76384+ if (end > SEGMEXEC_TASK_SIZE)
76385+ return -EINVAL;
76386+ } else
76387+#endif
76388+
76389+ if (end > TASK_SIZE)
76390+ return -EINVAL;
76391+
76392 if (!arch_validate_prot(prot))
76393 return -EINVAL;
76394
76395@@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76396 /*
76397 * Does the application expect PROT_READ to imply PROT_EXEC:
76398 */
76399- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
76400+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
76401 prot |= PROT_EXEC;
76402
76403 vm_flags = calc_vm_prot_bits(prot);
76404@@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76405 if (start > vma->vm_start)
76406 prev = vma;
76407
76408+#ifdef CONFIG_PAX_MPROTECT
76409+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
76410+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
76411+#endif
76412+
76413 for (nstart = start ; ; ) {
76414 unsigned long newflags;
76415
76416@@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76417
76418 /* newflags >> 4 shift VM_MAY% in place of VM_% */
76419 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
76420+ if (prot & (PROT_WRITE | PROT_EXEC))
76421+ gr_log_rwxmprotect(vma->vm_file);
76422+
76423+ error = -EACCES;
76424+ goto out;
76425+ }
76426+
76427+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
76428 error = -EACCES;
76429 goto out;
76430 }
76431@@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76432 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
76433 if (error)
76434 goto out;
76435+
76436+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
76437+
76438 nstart = tmp;
76439
76440 if (nstart < prev->vm_end)
76441diff --git a/mm/mremap.c b/mm/mremap.c
76442index 1b61c2d..1cc0e3c 100644
76443--- a/mm/mremap.c
76444+++ b/mm/mremap.c
76445@@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
76446 continue;
76447 pte = ptep_get_and_clear(mm, old_addr, old_pte);
76448 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
76449+
76450+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
76451+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
76452+ pte = pte_exprotect(pte);
76453+#endif
76454+
76455 set_pte_at(mm, new_addr, new_pte, pte);
76456 }
76457
76458@@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
76459 if (is_vm_hugetlb_page(vma))
76460 goto Einval;
76461
76462+#ifdef CONFIG_PAX_SEGMEXEC
76463+ if (pax_find_mirror_vma(vma))
76464+ goto Einval;
76465+#endif
76466+
76467 /* We can't remap across vm area boundaries */
76468 if (old_len > vma->vm_end - addr)
76469 goto Efault;
76470@@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
76471 unsigned long ret = -EINVAL;
76472 unsigned long charged = 0;
76473 unsigned long map_flags;
76474+ unsigned long pax_task_size = TASK_SIZE;
76475
76476 if (new_addr & ~PAGE_MASK)
76477 goto out;
76478
76479- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
76480+#ifdef CONFIG_PAX_SEGMEXEC
76481+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
76482+ pax_task_size = SEGMEXEC_TASK_SIZE;
76483+#endif
76484+
76485+ pax_task_size -= PAGE_SIZE;
76486+
76487+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
76488 goto out;
76489
76490 /* Check if the location we're moving into overlaps the
76491 * old location at all, and fail if it does.
76492 */
76493- if ((new_addr <= addr) && (new_addr+new_len) > addr)
76494- goto out;
76495-
76496- if ((addr <= new_addr) && (addr+old_len) > new_addr)
76497+ if (addr + old_len > new_addr && new_addr + new_len > addr)
76498 goto out;
76499
76500 ret = do_munmap(mm, new_addr, new_len);
76501@@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76502 struct vm_area_struct *vma;
76503 unsigned long ret = -EINVAL;
76504 unsigned long charged = 0;
76505+ unsigned long pax_task_size = TASK_SIZE;
76506
76507 down_write(&current->mm->mmap_sem);
76508
76509@@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76510 if (!new_len)
76511 goto out;
76512
76513+#ifdef CONFIG_PAX_SEGMEXEC
76514+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
76515+ pax_task_size = SEGMEXEC_TASK_SIZE;
76516+#endif
76517+
76518+ pax_task_size -= PAGE_SIZE;
76519+
76520+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
76521+ old_len > pax_task_size || addr > pax_task_size-old_len)
76522+ goto out;
76523+
76524 if (flags & MREMAP_FIXED) {
76525 if (flags & MREMAP_MAYMOVE)
76526 ret = mremap_to(addr, old_len, new_addr, new_len);
76527@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76528 addr + new_len);
76529 }
76530 ret = addr;
76531+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
76532 goto out;
76533 }
76534 }
76535@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76536 goto out;
76537 }
76538
76539+ map_flags = vma->vm_flags;
76540 ret = move_vma(vma, addr, old_len, new_len, new_addr);
76541+ if (!(ret & ~PAGE_MASK)) {
76542+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
76543+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
76544+ }
76545 }
76546 out:
76547 if (ret & ~PAGE_MASK)
76548diff --git a/mm/nommu.c b/mm/nommu.c
76549index 45131b4..c521665 100644
76550--- a/mm/nommu.c
76551+++ b/mm/nommu.c
76552@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
76553 int sysctl_overcommit_ratio = 50; /* default is 50% */
76554 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
76555 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
76556-int heap_stack_gap = 0;
76557
76558 atomic_long_t mmap_pages_allocated;
76559
76560@@ -824,15 +823,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
76561 EXPORT_SYMBOL(find_vma);
76562
76563 /*
76564- * find a VMA
76565- * - we don't extend stack VMAs under NOMMU conditions
76566- */
76567-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
76568-{
76569- return find_vma(mm, addr);
76570-}
76571-
76572-/*
76573 * expand a stack to a given address
76574 * - not supported under NOMMU conditions
76575 */
76576@@ -1540,6 +1530,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
76577
76578 /* most fields are the same, copy all, and then fixup */
76579 *new = *vma;
76580+ INIT_LIST_HEAD(&new->anon_vma_chain);
76581 *region = *vma->vm_region;
76582 new->vm_region = region;
76583
76584diff --git a/mm/page_alloc.c b/mm/page_alloc.c
76585index ceb4168..d7774f2 100644
76586--- a/mm/page_alloc.c
76587+++ b/mm/page_alloc.c
76588@@ -340,7 +340,7 @@ out:
76589 * This usage means that zero-order pages may not be compound.
76590 */
76591
76592-static void free_compound_page(struct page *page)
76593+void free_compound_page(struct page *page)
76594 {
76595 __free_pages_ok(page, compound_order(page));
76596 }
76597@@ -693,6 +693,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
76598 int i;
76599 int bad = 0;
76600
76601+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76602+ unsigned long index = 1UL << order;
76603+#endif
76604+
76605 trace_mm_page_free(page, order);
76606 kmemcheck_free_shadow(page, order);
76607
76608@@ -708,6 +712,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
76609 debug_check_no_obj_freed(page_address(page),
76610 PAGE_SIZE << order);
76611 }
76612+
76613+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76614+ for (; index; --index)
76615+ sanitize_highpage(page + index - 1);
76616+#endif
76617+
76618 arch_free_page(page, order);
76619 kernel_map_pages(page, 1 << order, 0);
76620
76621@@ -849,8 +859,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
76622 arch_alloc_page(page, order);
76623 kernel_map_pages(page, 1 << order, 1);
76624
76625+#ifndef CONFIG_PAX_MEMORY_SANITIZE
76626 if (gfp_flags & __GFP_ZERO)
76627 prep_zero_page(page, order, gfp_flags);
76628+#endif
76629
76630 if (order && (gfp_flags & __GFP_COMP))
76631 prep_compound_page(page, order);
76632@@ -3684,7 +3696,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
76633 unsigned long pfn;
76634
76635 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
76636+#ifdef CONFIG_X86_32
76637+ /* boot failures in VMware 8 on 32bit vanilla since
76638+ this change */
76639+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
76640+#else
76641 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
76642+#endif
76643 return 1;
76644 }
76645 return 0;
76646diff --git a/mm/percpu.c b/mm/percpu.c
76647index ddc5efb..f632d2c 100644
76648--- a/mm/percpu.c
76649+++ b/mm/percpu.c
76650@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
76651 static unsigned int pcpu_high_unit_cpu __read_mostly;
76652
76653 /* the address of the first chunk which starts with the kernel static area */
76654-void *pcpu_base_addr __read_mostly;
76655+void *pcpu_base_addr __read_only;
76656 EXPORT_SYMBOL_GPL(pcpu_base_addr);
76657
76658 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
76659diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
76660index 926b466..b23df53 100644
76661--- a/mm/process_vm_access.c
76662+++ b/mm/process_vm_access.c
76663@@ -13,6 +13,7 @@
76664 #include <linux/uio.h>
76665 #include <linux/sched.h>
76666 #include <linux/highmem.h>
76667+#include <linux/security.h>
76668 #include <linux/ptrace.h>
76669 #include <linux/slab.h>
76670 #include <linux/syscalls.h>
76671@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
76672 size_t iov_l_curr_offset = 0;
76673 ssize_t iov_len;
76674
76675+ return -ENOSYS; // PaX: until properly audited
76676+
76677 /*
76678 * Work out how many pages of struct pages we're going to need
76679 * when eventually calling get_user_pages
76680 */
76681 for (i = 0; i < riovcnt; i++) {
76682 iov_len = rvec[i].iov_len;
76683- if (iov_len > 0) {
76684- nr_pages_iov = ((unsigned long)rvec[i].iov_base
76685- + iov_len)
76686- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
76687- / PAGE_SIZE + 1;
76688- nr_pages = max(nr_pages, nr_pages_iov);
76689- }
76690+ if (iov_len <= 0)
76691+ continue;
76692+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
76693+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
76694+ nr_pages = max(nr_pages, nr_pages_iov);
76695 }
76696
76697 if (nr_pages == 0)
76698@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
76699 goto free_proc_pages;
76700 }
76701
76702+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
76703+ rc = -EPERM;
76704+ goto put_task_struct;
76705+ }
76706+
76707 mm = mm_access(task, PTRACE_MODE_ATTACH);
76708 if (!mm || IS_ERR(mm)) {
76709 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
76710diff --git a/mm/rmap.c b/mm/rmap.c
76711index 2ee1ef0..2e175ba 100644
76712--- a/mm/rmap.c
76713+++ b/mm/rmap.c
76714@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76715 struct anon_vma *anon_vma = vma->anon_vma;
76716 struct anon_vma_chain *avc;
76717
76718+#ifdef CONFIG_PAX_SEGMEXEC
76719+ struct anon_vma_chain *avc_m = NULL;
76720+#endif
76721+
76722 might_sleep();
76723 if (unlikely(!anon_vma)) {
76724 struct mm_struct *mm = vma->vm_mm;
76725@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76726 if (!avc)
76727 goto out_enomem;
76728
76729+#ifdef CONFIG_PAX_SEGMEXEC
76730+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
76731+ if (!avc_m)
76732+ goto out_enomem_free_avc;
76733+#endif
76734+
76735 anon_vma = find_mergeable_anon_vma(vma);
76736 allocated = NULL;
76737 if (!anon_vma) {
76738@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76739 /* page_table_lock to protect against threads */
76740 spin_lock(&mm->page_table_lock);
76741 if (likely(!vma->anon_vma)) {
76742+
76743+#ifdef CONFIG_PAX_SEGMEXEC
76744+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
76745+
76746+ if (vma_m) {
76747+ BUG_ON(vma_m->anon_vma);
76748+ vma_m->anon_vma = anon_vma;
76749+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
76750+ avc_m = NULL;
76751+ }
76752+#endif
76753+
76754 vma->anon_vma = anon_vma;
76755 anon_vma_chain_link(vma, avc, anon_vma);
76756 allocated = NULL;
76757@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76758
76759 if (unlikely(allocated))
76760 put_anon_vma(allocated);
76761+
76762+#ifdef CONFIG_PAX_SEGMEXEC
76763+ if (unlikely(avc_m))
76764+ anon_vma_chain_free(avc_m);
76765+#endif
76766+
76767 if (unlikely(avc))
76768 anon_vma_chain_free(avc);
76769 }
76770 return 0;
76771
76772 out_enomem_free_avc:
76773+
76774+#ifdef CONFIG_PAX_SEGMEXEC
76775+ if (avc_m)
76776+ anon_vma_chain_free(avc_m);
76777+#endif
76778+
76779 anon_vma_chain_free(avc);
76780 out_enomem:
76781 return -ENOMEM;
76782@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
76783 * Attach the anon_vmas from src to dst.
76784 * Returns 0 on success, -ENOMEM on failure.
76785 */
76786-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
76787+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
76788 {
76789 struct anon_vma_chain *avc, *pavc;
76790 struct anon_vma *root = NULL;
76791@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
76792 * the corresponding VMA in the parent process is attached to.
76793 * Returns 0 on success, non-zero on failure.
76794 */
76795-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
76796+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
76797 {
76798 struct anon_vma_chain *avc;
76799 struct anon_vma *anon_vma;
76800diff --git a/mm/shmem.c b/mm/shmem.c
76801index 50c5b8f..0bc87f7 100644
76802--- a/mm/shmem.c
76803+++ b/mm/shmem.c
76804@@ -31,7 +31,7 @@
76805 #include <linux/export.h>
76806 #include <linux/swap.h>
76807
76808-static struct vfsmount *shm_mnt;
76809+struct vfsmount *shm_mnt;
76810
76811 #ifdef CONFIG_SHMEM
76812 /*
76813@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
76814 #define BOGO_DIRENT_SIZE 20
76815
76816 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
76817-#define SHORT_SYMLINK_LEN 128
76818+#define SHORT_SYMLINK_LEN 64
76819
76820 /*
76821 * shmem_fallocate and shmem_writepage communicate via inode->i_private
76822@@ -2112,6 +2112,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
76823 static int shmem_xattr_validate(const char *name)
76824 {
76825 struct { const char *prefix; size_t len; } arr[] = {
76826+
76827+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
76828+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
76829+#endif
76830+
76831 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
76832 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
76833 };
76834@@ -2167,6 +2172,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
76835 if (err)
76836 return err;
76837
76838+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
76839+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
76840+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
76841+ return -EOPNOTSUPP;
76842+ if (size > 8)
76843+ return -EINVAL;
76844+ }
76845+#endif
76846+
76847 return simple_xattr_set(&info->xattrs, name, value, size, flags);
76848 }
76849
76850@@ -2466,8 +2480,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
76851 int err = -ENOMEM;
76852
76853 /* Round up to L1_CACHE_BYTES to resist false sharing */
76854- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
76855- L1_CACHE_BYTES), GFP_KERNEL);
76856+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
76857 if (!sbinfo)
76858 return -ENOMEM;
76859
76860diff --git a/mm/slab.c b/mm/slab.c
76861index 33d3363..93c6810 100644
76862--- a/mm/slab.c
76863+++ b/mm/slab.c
76864@@ -164,7 +164,7 @@ static bool pfmemalloc_active __read_mostly;
76865
76866 /* Legal flag mask for kmem_cache_create(). */
76867 #if DEBUG
76868-# define CREATE_MASK (SLAB_RED_ZONE | \
76869+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
76870 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
76871 SLAB_CACHE_DMA | \
76872 SLAB_STORE_USER | \
76873@@ -172,7 +172,7 @@ static bool pfmemalloc_active __read_mostly;
76874 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
76875 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
76876 #else
76877-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
76878+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
76879 SLAB_CACHE_DMA | \
76880 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
76881 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
76882@@ -322,7 +322,7 @@ struct kmem_list3 {
76883 * Need this for bootstrapping a per node allocator.
76884 */
76885 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
76886-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
76887+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
76888 #define CACHE_CACHE 0
76889 #define SIZE_AC MAX_NUMNODES
76890 #define SIZE_L3 (2 * MAX_NUMNODES)
76891@@ -423,10 +423,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
76892 if ((x)->max_freeable < i) \
76893 (x)->max_freeable = i; \
76894 } while (0)
76895-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
76896-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
76897-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
76898-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
76899+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
76900+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
76901+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
76902+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
76903 #else
76904 #define STATS_INC_ACTIVE(x) do { } while (0)
76905 #define STATS_DEC_ACTIVE(x) do { } while (0)
76906@@ -534,7 +534,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
76907 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
76908 */
76909 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
76910- const struct slab *slab, void *obj)
76911+ const struct slab *slab, const void *obj)
76912 {
76913 u32 offset = (obj - slab->s_mem);
76914 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
76915@@ -555,12 +555,13 @@ EXPORT_SYMBOL(malloc_sizes);
76916 struct cache_names {
76917 char *name;
76918 char *name_dma;
76919+ char *name_usercopy;
76920 };
76921
76922 static struct cache_names __initdata cache_names[] = {
76923-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
76924+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
76925 #include <linux/kmalloc_sizes.h>
76926- {NULL,}
76927+ {NULL}
76928 #undef CACHE
76929 };
76930
76931@@ -721,6 +722,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
76932 if (unlikely(gfpflags & GFP_DMA))
76933 return csizep->cs_dmacachep;
76934 #endif
76935+
76936+#ifdef CONFIG_PAX_USERCOPY_SLABS
76937+ if (unlikely(gfpflags & GFP_USERCOPY))
76938+ return csizep->cs_usercopycachep;
76939+#endif
76940+
76941 return csizep->cs_cachep;
76942 }
76943
76944@@ -1676,7 +1683,7 @@ void __init kmem_cache_init(void)
76945 sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
76946 sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
76947 sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
76948- __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
76949+ __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
76950 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
76951
76952 if (INDEX_AC != INDEX_L3) {
76953@@ -1685,7 +1692,7 @@ void __init kmem_cache_init(void)
76954 sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
76955 sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
76956 sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
76957- __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
76958+ __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
76959 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
76960 }
76961
76962@@ -1705,7 +1712,7 @@ void __init kmem_cache_init(void)
76963 sizes->cs_cachep->size = sizes->cs_size;
76964 sizes->cs_cachep->object_size = sizes->cs_size;
76965 sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
76966- __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
76967+ __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
76968 list_add(&sizes->cs_cachep->list, &slab_caches);
76969 }
76970 #ifdef CONFIG_ZONE_DMA
76971@@ -1718,6 +1725,17 @@ void __init kmem_cache_init(void)
76972 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
76973 list_add(&sizes->cs_dmacachep->list, &slab_caches);
76974 #endif
76975+
76976+#ifdef CONFIG_PAX_USERCOPY_SLABS
76977+ sizes->cs_usercopycachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
76978+ sizes->cs_usercopycachep->name = names->name_dma;
76979+ sizes->cs_usercopycachep->size = sizes->cs_size;
76980+ sizes->cs_usercopycachep->object_size = sizes->cs_size;
76981+ sizes->cs_usercopycachep->align = ARCH_KMALLOC_MINALIGN;
76982+ __kmem_cache_create(sizes->cs_usercopycachep, ARCH_KMALLOC_FLAGS| SLAB_PANIC|SLAB_USERCOPY);
76983+ list_add(&sizes->cs_usercopycachep->list, &slab_caches);
76984+#endif
76985+
76986 sizes++;
76987 names++;
76988 }
76989@@ -4405,10 +4423,10 @@ static int s_show(struct seq_file *m, void *p)
76990 }
76991 /* cpu stats */
76992 {
76993- unsigned long allochit = atomic_read(&cachep->allochit);
76994- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
76995- unsigned long freehit = atomic_read(&cachep->freehit);
76996- unsigned long freemiss = atomic_read(&cachep->freemiss);
76997+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
76998+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
76999+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
77000+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
77001
77002 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
77003 allochit, allocmiss, freehit, freemiss);
77004@@ -4667,13 +4685,71 @@ static int __init slab_proc_init(void)
77005 {
77006 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
77007 #ifdef CONFIG_DEBUG_SLAB_LEAK
77008- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
77009+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
77010 #endif
77011 return 0;
77012 }
77013 module_init(slab_proc_init);
77014 #endif
77015
77016+bool is_usercopy_object(const void *ptr)
77017+{
77018+ struct page *page;
77019+ struct kmem_cache *cachep;
77020+
77021+ if (ZERO_OR_NULL_PTR(ptr))
77022+ return false;
77023+
77024+ if (!slab_is_available())
77025+ return false;
77026+
77027+ if (!virt_addr_valid(ptr))
77028+ return false;
77029+
77030+ page = virt_to_head_page(ptr);
77031+
77032+ if (!PageSlab(page))
77033+ return false;
77034+
77035+ cachep = page->slab_cache;
77036+ return cachep->flags & SLAB_USERCOPY;
77037+}
77038+
77039+#ifdef CONFIG_PAX_USERCOPY
77040+const char *check_heap_object(const void *ptr, unsigned long n)
77041+{
77042+ struct page *page;
77043+ struct kmem_cache *cachep;
77044+ struct slab *slabp;
77045+ unsigned int objnr;
77046+ unsigned long offset;
77047+
77048+ if (ZERO_OR_NULL_PTR(ptr))
77049+ return "<null>";
77050+
77051+ if (!virt_addr_valid(ptr))
77052+ return NULL;
77053+
77054+ page = virt_to_head_page(ptr);
77055+
77056+ if (!PageSlab(page))
77057+ return NULL;
77058+
77059+ cachep = page->slab_cache;
77060+ if (!(cachep->flags & SLAB_USERCOPY))
77061+ return cachep->name;
77062+
77063+ slabp = page->slab_page;
77064+ objnr = obj_to_index(cachep, slabp, ptr);
77065+ BUG_ON(objnr >= cachep->num);
77066+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
77067+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
77068+ return NULL;
77069+
77070+ return cachep->name;
77071+}
77072+#endif
77073+
77074 /**
77075 * ksize - get the actual amount of memory allocated for a given object
77076 * @objp: Pointer to the object
77077diff --git a/mm/slab_common.c b/mm/slab_common.c
77078index 069a24e6..226a310 100644
77079--- a/mm/slab_common.c
77080+++ b/mm/slab_common.c
77081@@ -127,7 +127,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
77082 err = __kmem_cache_create(s, flags);
77083 if (!err) {
77084
77085- s->refcount = 1;
77086+ atomic_set(&s->refcount, 1);
77087 list_add(&s->list, &slab_caches);
77088
77089 } else {
77090@@ -163,8 +163,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
77091 {
77092 get_online_cpus();
77093 mutex_lock(&slab_mutex);
77094- s->refcount--;
77095- if (!s->refcount) {
77096+ if (atomic_dec_and_test(&s->refcount)) {
77097 list_del(&s->list);
77098
77099 if (!__kmem_cache_shutdown(s)) {
77100diff --git a/mm/slob.c b/mm/slob.c
77101index 1e921c5..1ce12c2 100644
77102--- a/mm/slob.c
77103+++ b/mm/slob.c
77104@@ -159,7 +159,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
77105 /*
77106 * Return the size of a slob block.
77107 */
77108-static slobidx_t slob_units(slob_t *s)
77109+static slobidx_t slob_units(const slob_t *s)
77110 {
77111 if (s->units > 0)
77112 return s->units;
77113@@ -169,7 +169,7 @@ static slobidx_t slob_units(slob_t *s)
77114 /*
77115 * Return the next free slob block pointer after this one.
77116 */
77117-static slob_t *slob_next(slob_t *s)
77118+static slob_t *slob_next(const slob_t *s)
77119 {
77120 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
77121 slobidx_t next;
77122@@ -184,14 +184,14 @@ static slob_t *slob_next(slob_t *s)
77123 /*
77124 * Returns true if s is the last free block in its page.
77125 */
77126-static int slob_last(slob_t *s)
77127+static int slob_last(const slob_t *s)
77128 {
77129 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
77130 }
77131
77132-static void *slob_new_pages(gfp_t gfp, int order, int node)
77133+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
77134 {
77135- void *page;
77136+ struct page *page;
77137
77138 #ifdef CONFIG_NUMA
77139 if (node != NUMA_NO_NODE)
77140@@ -203,14 +203,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
77141 if (!page)
77142 return NULL;
77143
77144- return page_address(page);
77145+ __SetPageSlab(page);
77146+ return page;
77147 }
77148
77149-static void slob_free_pages(void *b, int order)
77150+static void slob_free_pages(struct page *sp, int order)
77151 {
77152 if (current->reclaim_state)
77153 current->reclaim_state->reclaimed_slab += 1 << order;
77154- free_pages((unsigned long)b, order);
77155+ __ClearPageSlab(sp);
77156+ reset_page_mapcount(sp);
77157+ sp->private = 0;
77158+ __free_pages(sp, order);
77159 }
77160
77161 /*
77162@@ -315,15 +319,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
77163
77164 /* Not enough space: must allocate a new page */
77165 if (!b) {
77166- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
77167- if (!b)
77168+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
77169+ if (!sp)
77170 return NULL;
77171- sp = virt_to_page(b);
77172- __SetPageSlab(sp);
77173+ b = page_address(sp);
77174
77175 spin_lock_irqsave(&slob_lock, flags);
77176 sp->units = SLOB_UNITS(PAGE_SIZE);
77177 sp->freelist = b;
77178+ sp->private = 0;
77179 INIT_LIST_HEAD(&sp->list);
77180 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
77181 set_slob_page_free(sp, slob_list);
77182@@ -361,9 +365,7 @@ static void slob_free(void *block, int size)
77183 if (slob_page_free(sp))
77184 clear_slob_page_free(sp);
77185 spin_unlock_irqrestore(&slob_lock, flags);
77186- __ClearPageSlab(sp);
77187- reset_page_mapcount(sp);
77188- slob_free_pages(b, 0);
77189+ slob_free_pages(sp, 0);
77190 return;
77191 }
77192
77193@@ -426,11 +428,10 @@ out:
77194 */
77195
77196 static __always_inline void *
77197-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77198+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
77199 {
77200- unsigned int *m;
77201- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77202- void *ret;
77203+ slob_t *m;
77204+ void *ret = NULL;
77205
77206 gfp &= gfp_allowed_mask;
77207
77208@@ -444,20 +445,23 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77209
77210 if (!m)
77211 return NULL;
77212- *m = size;
77213+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
77214+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
77215+ m[0].units = size;
77216+ m[1].units = align;
77217 ret = (void *)m + align;
77218
77219 trace_kmalloc_node(caller, ret,
77220 size, size + align, gfp, node);
77221 } else {
77222 unsigned int order = get_order(size);
77223+ struct page *page;
77224
77225 if (likely(order))
77226 gfp |= __GFP_COMP;
77227- ret = slob_new_pages(gfp, order, node);
77228- if (ret) {
77229- struct page *page;
77230- page = virt_to_page(ret);
77231+ page = slob_new_pages(gfp, order, node);
77232+ if (page) {
77233+ ret = page_address(page);
77234 page->private = size;
77235 }
77236
77237@@ -465,7 +469,17 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77238 size, PAGE_SIZE << order, gfp, node);
77239 }
77240
77241- kmemleak_alloc(ret, size, 1, gfp);
77242+ return ret;
77243+}
77244+
77245+static __always_inline void *
77246+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77247+{
77248+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77249+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
77250+
77251+ if (!ZERO_OR_NULL_PTR(ret))
77252+ kmemleak_alloc(ret, size, 1, gfp);
77253 return ret;
77254 }
77255
77256@@ -501,15 +515,91 @@ void kfree(const void *block)
77257 kmemleak_free(block);
77258
77259 sp = virt_to_page(block);
77260- if (PageSlab(sp)) {
77261+ VM_BUG_ON(!PageSlab(sp));
77262+ if (!sp->private) {
77263 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77264- unsigned int *m = (unsigned int *)(block - align);
77265- slob_free(m, *m + align);
77266- } else
77267+ slob_t *m = (slob_t *)(block - align);
77268+ slob_free(m, m[0].units + align);
77269+ } else {
77270+ __ClearPageSlab(sp);
77271+ reset_page_mapcount(sp);
77272+ sp->private = 0;
77273 put_page(sp);
77274+ }
77275 }
77276 EXPORT_SYMBOL(kfree);
77277
77278+bool is_usercopy_object(const void *ptr)
77279+{
77280+ if (!slab_is_available())
77281+ return false;
77282+
77283+ // PAX: TODO
77284+
77285+ return false;
77286+}
77287+
77288+#ifdef CONFIG_PAX_USERCOPY
77289+const char *check_heap_object(const void *ptr, unsigned long n)
77290+{
77291+ struct page *page;
77292+ const slob_t *free;
77293+ const void *base;
77294+ unsigned long flags;
77295+
77296+ if (ZERO_OR_NULL_PTR(ptr))
77297+ return "<null>";
77298+
77299+ if (!virt_addr_valid(ptr))
77300+ return NULL;
77301+
77302+ page = virt_to_head_page(ptr);
77303+ if (!PageSlab(page))
77304+ return NULL;
77305+
77306+ if (page->private) {
77307+ base = page;
77308+ if (base <= ptr && n <= page->private - (ptr - base))
77309+ return NULL;
77310+ return "<slob>";
77311+ }
77312+
77313+ /* some tricky double walking to find the chunk */
77314+ spin_lock_irqsave(&slob_lock, flags);
77315+ base = (void *)((unsigned long)ptr & PAGE_MASK);
77316+ free = page->freelist;
77317+
77318+ while (!slob_last(free) && (void *)free <= ptr) {
77319+ base = free + slob_units(free);
77320+ free = slob_next(free);
77321+ }
77322+
77323+ while (base < (void *)free) {
77324+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
77325+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
77326+ int offset;
77327+
77328+ if (ptr < base + align)
77329+ break;
77330+
77331+ offset = ptr - base - align;
77332+ if (offset >= m) {
77333+ base += size;
77334+ continue;
77335+ }
77336+
77337+ if (n > m - offset)
77338+ break;
77339+
77340+ spin_unlock_irqrestore(&slob_lock, flags);
77341+ return NULL;
77342+ }
77343+
77344+ spin_unlock_irqrestore(&slob_lock, flags);
77345+ return "<slob>";
77346+}
77347+#endif
77348+
77349 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
77350 size_t ksize(const void *block)
77351 {
77352@@ -520,10 +610,11 @@ size_t ksize(const void *block)
77353 return 0;
77354
77355 sp = virt_to_page(block);
77356- if (PageSlab(sp)) {
77357+ VM_BUG_ON(!PageSlab(sp));
77358+ if (!sp->private) {
77359 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77360- unsigned int *m = (unsigned int *)(block - align);
77361- return SLOB_UNITS(*m) * SLOB_UNIT;
77362+ slob_t *m = (slob_t *)(block - align);
77363+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
77364 } else
77365 return sp->private;
77366 }
77367@@ -550,23 +641,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
77368
77369 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
77370 {
77371- void *b;
77372+ void *b = NULL;
77373
77374 flags &= gfp_allowed_mask;
77375
77376 lockdep_trace_alloc(flags);
77377
77378+#ifdef CONFIG_PAX_USERCOPY_SLABS
77379+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
77380+#else
77381 if (c->size < PAGE_SIZE) {
77382 b = slob_alloc(c->size, flags, c->align, node);
77383 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
77384 SLOB_UNITS(c->size) * SLOB_UNIT,
77385 flags, node);
77386 } else {
77387- b = slob_new_pages(flags, get_order(c->size), node);
77388+ struct page *sp;
77389+
77390+ sp = slob_new_pages(flags, get_order(c->size), node);
77391+ if (sp) {
77392+ b = page_address(sp);
77393+ sp->private = c->size;
77394+ }
77395 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
77396 PAGE_SIZE << get_order(c->size),
77397 flags, node);
77398 }
77399+#endif
77400
77401 if (c->ctor)
77402 c->ctor(b);
77403@@ -578,10 +679,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
77404
77405 static void __kmem_cache_free(void *b, int size)
77406 {
77407- if (size < PAGE_SIZE)
77408+ struct page *sp;
77409+
77410+ sp = virt_to_page(b);
77411+ BUG_ON(!PageSlab(sp));
77412+ if (!sp->private)
77413 slob_free(b, size);
77414 else
77415- slob_free_pages(b, get_order(size));
77416+ slob_free_pages(sp, get_order(size));
77417 }
77418
77419 static void kmem_rcu_free(struct rcu_head *head)
77420@@ -594,17 +699,31 @@ static void kmem_rcu_free(struct rcu_head *head)
77421
77422 void kmem_cache_free(struct kmem_cache *c, void *b)
77423 {
77424+ int size = c->size;
77425+
77426+#ifdef CONFIG_PAX_USERCOPY_SLABS
77427+ if (size + c->align < PAGE_SIZE) {
77428+ size += c->align;
77429+ b -= c->align;
77430+ }
77431+#endif
77432+
77433 kmemleak_free_recursive(b, c->flags);
77434 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
77435 struct slob_rcu *slob_rcu;
77436- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
77437- slob_rcu->size = c->size;
77438+ slob_rcu = b + (size - sizeof(struct slob_rcu));
77439+ slob_rcu->size = size;
77440 call_rcu(&slob_rcu->head, kmem_rcu_free);
77441 } else {
77442- __kmem_cache_free(b, c->size);
77443+ __kmem_cache_free(b, size);
77444 }
77445
77446+#ifdef CONFIG_PAX_USERCOPY_SLABS
77447+ trace_kfree(_RET_IP_, b);
77448+#else
77449 trace_kmem_cache_free(_RET_IP_, b);
77450+#endif
77451+
77452 }
77453 EXPORT_SYMBOL(kmem_cache_free);
77454
77455diff --git a/mm/slub.c b/mm/slub.c
77456index a0d6984..e280e5d 100644
77457--- a/mm/slub.c
77458+++ b/mm/slub.c
77459@@ -201,7 +201,7 @@ struct track {
77460
77461 enum track_item { TRACK_ALLOC, TRACK_FREE };
77462
77463-#ifdef CONFIG_SYSFS
77464+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77465 static int sysfs_slab_add(struct kmem_cache *);
77466 static int sysfs_slab_alias(struct kmem_cache *, const char *);
77467 static void sysfs_slab_remove(struct kmem_cache *);
77468@@ -521,7 +521,7 @@ static void print_track(const char *s, struct track *t)
77469 if (!t->addr)
77470 return;
77471
77472- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
77473+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
77474 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
77475 #ifdef CONFIG_STACKTRACE
77476 {
77477@@ -2623,6 +2623,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
77478
77479 page = virt_to_head_page(x);
77480
77481+ BUG_ON(!PageSlab(page));
77482+
77483 if (kmem_cache_debug(s) && page->slab != s) {
77484 pr_err("kmem_cache_free: Wrong slab cache. %s but object"
77485 " is from %s\n", page->slab->name, s->name);
77486@@ -2663,7 +2665,7 @@ static int slub_min_objects;
77487 * Merge control. If this is set then no merging of slab caches will occur.
77488 * (Could be removed. This was introduced to pacify the merge skeptics.)
77489 */
77490-static int slub_nomerge;
77491+static int slub_nomerge = 1;
77492
77493 /*
77494 * Calculate the order of allocation given an slab object size.
77495@@ -3225,6 +3227,10 @@ EXPORT_SYMBOL(kmalloc_caches);
77496 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
77497 #endif
77498
77499+#ifdef CONFIG_PAX_USERCOPY_SLABS
77500+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
77501+#endif
77502+
77503 static int __init setup_slub_min_order(char *str)
77504 {
77505 get_option(&str, &slub_min_order);
77506@@ -3342,6 +3348,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
77507 return kmalloc_dma_caches[index];
77508
77509 #endif
77510+
77511+#ifdef CONFIG_PAX_USERCOPY_SLABS
77512+ if (flags & SLAB_USERCOPY)
77513+ return kmalloc_usercopy_caches[index];
77514+
77515+#endif
77516+
77517 return kmalloc_caches[index];
77518 }
77519
77520@@ -3410,6 +3423,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
77521 EXPORT_SYMBOL(__kmalloc_node);
77522 #endif
77523
77524+bool is_usercopy_object(const void *ptr)
77525+{
77526+ struct page *page;
77527+ struct kmem_cache *s;
77528+
77529+ if (ZERO_OR_NULL_PTR(ptr))
77530+ return false;
77531+
77532+ if (!slab_is_available())
77533+ return false;
77534+
77535+ if (!virt_addr_valid(ptr))
77536+ return false;
77537+
77538+ page = virt_to_head_page(ptr);
77539+
77540+ if (!PageSlab(page))
77541+ return false;
77542+
77543+ s = page->slab;
77544+ return s->flags & SLAB_USERCOPY;
77545+}
77546+
77547+#ifdef CONFIG_PAX_USERCOPY
77548+const char *check_heap_object(const void *ptr, unsigned long n)
77549+{
77550+ struct page *page;
77551+ struct kmem_cache *s;
77552+ unsigned long offset;
77553+
77554+ if (ZERO_OR_NULL_PTR(ptr))
77555+ return "<null>";
77556+
77557+ if (!virt_addr_valid(ptr))
77558+ return NULL;
77559+
77560+ page = virt_to_head_page(ptr);
77561+
77562+ if (!PageSlab(page))
77563+ return NULL;
77564+
77565+ s = page->slab;
77566+ if (!(s->flags & SLAB_USERCOPY))
77567+ return s->name;
77568+
77569+ offset = (ptr - page_address(page)) % s->size;
77570+ if (offset <= s->object_size && n <= s->object_size - offset)
77571+ return NULL;
77572+
77573+ return s->name;
77574+}
77575+#endif
77576+
77577 size_t ksize(const void *object)
77578 {
77579 struct page *page;
77580@@ -3684,7 +3750,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
77581 int node;
77582
77583 list_add(&s->list, &slab_caches);
77584- s->refcount = -1;
77585+ atomic_set(&s->refcount, -1);
77586
77587 for_each_node_state(node, N_NORMAL_MEMORY) {
77588 struct kmem_cache_node *n = get_node(s, node);
77589@@ -3807,17 +3873,17 @@ void __init kmem_cache_init(void)
77590
77591 /* Caches that are not of the two-to-the-power-of size */
77592 if (KMALLOC_MIN_SIZE <= 32) {
77593- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
77594+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
77595 caches++;
77596 }
77597
77598 if (KMALLOC_MIN_SIZE <= 64) {
77599- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
77600+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
77601 caches++;
77602 }
77603
77604 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
77605- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
77606+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
77607 caches++;
77608 }
77609
77610@@ -3859,6 +3925,22 @@ void __init kmem_cache_init(void)
77611 }
77612 }
77613 #endif
77614+
77615+#ifdef CONFIG_PAX_USERCOPY_SLABS
77616+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
77617+ struct kmem_cache *s = kmalloc_caches[i];
77618+
77619+ if (s && s->size) {
77620+ char *name = kasprintf(GFP_NOWAIT,
77621+ "usercopy-kmalloc-%d", s->object_size);
77622+
77623+ BUG_ON(!name);
77624+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
77625+ s->object_size, SLAB_USERCOPY);
77626+ }
77627+ }
77628+#endif
77629+
77630 printk(KERN_INFO
77631 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
77632 " CPUs=%d, Nodes=%d\n",
77633@@ -3885,7 +3967,7 @@ static int slab_unmergeable(struct kmem_cache *s)
77634 /*
77635 * We may have set a slab to be unmergeable during bootstrap.
77636 */
77637- if (s->refcount < 0)
77638+ if (atomic_read(&s->refcount) < 0)
77639 return 1;
77640
77641 return 0;
77642@@ -3939,7 +4021,7 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
77643
77644 s = find_mergeable(size, align, flags, name, ctor);
77645 if (s) {
77646- s->refcount++;
77647+ atomic_inc(&s->refcount);
77648 /*
77649 * Adjust the object sizes so that we clear
77650 * the complete object on kzalloc.
77651@@ -3948,7 +4030,7 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
77652 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
77653
77654 if (sysfs_slab_alias(s, name)) {
77655- s->refcount--;
77656+ atomic_dec(&s->refcount);
77657 s = NULL;
77658 }
77659 }
77660@@ -4063,7 +4145,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
77661 }
77662 #endif
77663
77664-#ifdef CONFIG_SYSFS
77665+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77666 static int count_inuse(struct page *page)
77667 {
77668 return page->inuse;
77669@@ -4450,12 +4532,12 @@ static void resiliency_test(void)
77670 validate_slab_cache(kmalloc_caches[9]);
77671 }
77672 #else
77673-#ifdef CONFIG_SYSFS
77674+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77675 static void resiliency_test(void) {};
77676 #endif
77677 #endif
77678
77679-#ifdef CONFIG_SYSFS
77680+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77681 enum slab_stat_type {
77682 SL_ALL, /* All slabs */
77683 SL_PARTIAL, /* Only partially allocated slabs */
77684@@ -4699,7 +4781,7 @@ SLAB_ATTR_RO(ctor);
77685
77686 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
77687 {
77688- return sprintf(buf, "%d\n", s->refcount - 1);
77689+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
77690 }
77691 SLAB_ATTR_RO(aliases);
77692
77693@@ -5261,6 +5343,7 @@ static char *create_unique_id(struct kmem_cache *s)
77694 return name;
77695 }
77696
77697+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77698 static int sysfs_slab_add(struct kmem_cache *s)
77699 {
77700 int err;
77701@@ -5323,6 +5406,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
77702 kobject_del(&s->kobj);
77703 kobject_put(&s->kobj);
77704 }
77705+#endif
77706
77707 /*
77708 * Need to buffer aliases during bootup until sysfs becomes
77709@@ -5336,6 +5420,7 @@ struct saved_alias {
77710
77711 static struct saved_alias *alias_list;
77712
77713+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77714 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
77715 {
77716 struct saved_alias *al;
77717@@ -5358,6 +5443,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
77718 alias_list = al;
77719 return 0;
77720 }
77721+#endif
77722
77723 static int __init slab_sysfs_init(void)
77724 {
77725diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
77726index 1b7e22a..3fcd4f3 100644
77727--- a/mm/sparse-vmemmap.c
77728+++ b/mm/sparse-vmemmap.c
77729@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
77730 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
77731 if (!p)
77732 return NULL;
77733- pud_populate(&init_mm, pud, p);
77734+ pud_populate_kernel(&init_mm, pud, p);
77735 }
77736 return pud;
77737 }
77738@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
77739 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
77740 if (!p)
77741 return NULL;
77742- pgd_populate(&init_mm, pgd, p);
77743+ pgd_populate_kernel(&init_mm, pgd, p);
77744 }
77745 return pgd;
77746 }
77747diff --git a/mm/swap.c b/mm/swap.c
77748index 6310dc2..3662b3f 100644
77749--- a/mm/swap.c
77750+++ b/mm/swap.c
77751@@ -30,6 +30,7 @@
77752 #include <linux/backing-dev.h>
77753 #include <linux/memcontrol.h>
77754 #include <linux/gfp.h>
77755+#include <linux/hugetlb.h>
77756
77757 #include "internal.h"
77758
77759@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
77760
77761 __page_cache_release(page);
77762 dtor = get_compound_page_dtor(page);
77763+ if (!PageHuge(page))
77764+ BUG_ON(dtor != free_compound_page);
77765 (*dtor)(page);
77766 }
77767
77768diff --git a/mm/swapfile.c b/mm/swapfile.c
77769index f91a255..9dcac21 100644
77770--- a/mm/swapfile.c
77771+++ b/mm/swapfile.c
77772@@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
77773
77774 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
77775 /* Activity counter to indicate that a swapon or swapoff has occurred */
77776-static atomic_t proc_poll_event = ATOMIC_INIT(0);
77777+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
77778
77779 static inline unsigned char swap_count(unsigned char ent)
77780 {
77781@@ -1601,7 +1601,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
77782 }
77783 filp_close(swap_file, NULL);
77784 err = 0;
77785- atomic_inc(&proc_poll_event);
77786+ atomic_inc_unchecked(&proc_poll_event);
77787 wake_up_interruptible(&proc_poll_wait);
77788
77789 out_dput:
77790@@ -1618,8 +1618,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
77791
77792 poll_wait(file, &proc_poll_wait, wait);
77793
77794- if (seq->poll_event != atomic_read(&proc_poll_event)) {
77795- seq->poll_event = atomic_read(&proc_poll_event);
77796+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
77797+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
77798 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
77799 }
77800
77801@@ -1717,7 +1717,7 @@ static int swaps_open(struct inode *inode, struct file *file)
77802 return ret;
77803
77804 seq = file->private_data;
77805- seq->poll_event = atomic_read(&proc_poll_event);
77806+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
77807 return 0;
77808 }
77809
77810@@ -2059,7 +2059,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
77811 (frontswap_map) ? "FS" : "");
77812
77813 mutex_unlock(&swapon_mutex);
77814- atomic_inc(&proc_poll_event);
77815+ atomic_inc_unchecked(&proc_poll_event);
77816 wake_up_interruptible(&proc_poll_wait);
77817
77818 if (S_ISREG(inode->i_mode))
77819diff --git a/mm/util.c b/mm/util.c
77820index dc3036c..b6c7c9d 100644
77821--- a/mm/util.c
77822+++ b/mm/util.c
77823@@ -292,6 +292,12 @@ done:
77824 void arch_pick_mmap_layout(struct mm_struct *mm)
77825 {
77826 mm->mmap_base = TASK_UNMAPPED_BASE;
77827+
77828+#ifdef CONFIG_PAX_RANDMMAP
77829+ if (mm->pax_flags & MF_PAX_RANDMMAP)
77830+ mm->mmap_base += mm->delta_mmap;
77831+#endif
77832+
77833 mm->get_unmapped_area = arch_get_unmapped_area;
77834 mm->unmap_area = arch_unmap_area;
77835 }
77836diff --git a/mm/vmalloc.c b/mm/vmalloc.c
77837index 78e0830..bc6bbd8 100644
77838--- a/mm/vmalloc.c
77839+++ b/mm/vmalloc.c
77840@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
77841
77842 pte = pte_offset_kernel(pmd, addr);
77843 do {
77844- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
77845- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
77846+
77847+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
77848+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
77849+ BUG_ON(!pte_exec(*pte));
77850+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
77851+ continue;
77852+ }
77853+#endif
77854+
77855+ {
77856+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
77857+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
77858+ }
77859 } while (pte++, addr += PAGE_SIZE, addr != end);
77860 }
77861
77862@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
77863 pte = pte_alloc_kernel(pmd, addr);
77864 if (!pte)
77865 return -ENOMEM;
77866+
77867+ pax_open_kernel();
77868 do {
77869 struct page *page = pages[*nr];
77870
77871- if (WARN_ON(!pte_none(*pte)))
77872+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
77873+ if (pgprot_val(prot) & _PAGE_NX)
77874+#endif
77875+
77876+ if (!pte_none(*pte)) {
77877+ pax_close_kernel();
77878+ WARN_ON(1);
77879 return -EBUSY;
77880- if (WARN_ON(!page))
77881+ }
77882+ if (!page) {
77883+ pax_close_kernel();
77884+ WARN_ON(1);
77885 return -ENOMEM;
77886+ }
77887 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
77888 (*nr)++;
77889 } while (pte++, addr += PAGE_SIZE, addr != end);
77890+ pax_close_kernel();
77891 return 0;
77892 }
77893
77894@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
77895 pmd_t *pmd;
77896 unsigned long next;
77897
77898- pmd = pmd_alloc(&init_mm, pud, addr);
77899+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
77900 if (!pmd)
77901 return -ENOMEM;
77902 do {
77903@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
77904 pud_t *pud;
77905 unsigned long next;
77906
77907- pud = pud_alloc(&init_mm, pgd, addr);
77908+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
77909 if (!pud)
77910 return -ENOMEM;
77911 do {
77912@@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
77913 * and fall back on vmalloc() if that fails. Others
77914 * just put it in the vmalloc space.
77915 */
77916-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
77917+#ifdef CONFIG_MODULES
77918+#ifdef MODULES_VADDR
77919 unsigned long addr = (unsigned long)x;
77920 if (addr >= MODULES_VADDR && addr < MODULES_END)
77921 return 1;
77922 #endif
77923+
77924+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
77925+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
77926+ return 1;
77927+#endif
77928+
77929+#endif
77930+
77931 return is_vmalloc_addr(x);
77932 }
77933
77934@@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
77935
77936 if (!pgd_none(*pgd)) {
77937 pud_t *pud = pud_offset(pgd, addr);
77938+#ifdef CONFIG_X86
77939+ if (!pud_large(*pud))
77940+#endif
77941 if (!pud_none(*pud)) {
77942 pmd_t *pmd = pmd_offset(pud, addr);
77943+#ifdef CONFIG_X86
77944+ if (!pmd_large(*pmd))
77945+#endif
77946 if (!pmd_none(*pmd)) {
77947 pte_t *ptep, pte;
77948
77949@@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
77950 * Allocate a region of KVA of the specified size and alignment, within the
77951 * vstart and vend.
77952 */
77953-static struct vmap_area *alloc_vmap_area(unsigned long size,
77954+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
77955 unsigned long align,
77956 unsigned long vstart, unsigned long vend,
77957 int node, gfp_t gfp_mask)
77958@@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
77959 struct vm_struct *area;
77960
77961 BUG_ON(in_interrupt());
77962+
77963+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
77964+ if (flags & VM_KERNEXEC) {
77965+ if (start != VMALLOC_START || end != VMALLOC_END)
77966+ return NULL;
77967+ start = (unsigned long)MODULES_EXEC_VADDR;
77968+ end = (unsigned long)MODULES_EXEC_END;
77969+ }
77970+#endif
77971+
77972 if (flags & VM_IOREMAP) {
77973 int bit = fls(size);
77974
77975@@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
77976 if (count > totalram_pages)
77977 return NULL;
77978
77979+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
77980+ if (!(pgprot_val(prot) & _PAGE_NX))
77981+ flags |= VM_KERNEXEC;
77982+#endif
77983+
77984 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
77985 __builtin_return_address(0));
77986 if (!area)
77987@@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
77988 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
77989 goto fail;
77990
77991+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
77992+ if (!(pgprot_val(prot) & _PAGE_NX))
77993+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
77994+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
77995+ else
77996+#endif
77997+
77998 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
77999 start, end, node, gfp_mask, caller);
78000 if (!area)
78001@@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
78002 * For tight control over page level allocator and protection flags
78003 * use __vmalloc() instead.
78004 */
78005-
78006 void *vmalloc_exec(unsigned long size)
78007 {
78008- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
78009+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
78010 -1, __builtin_return_address(0));
78011 }
78012
78013@@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
78014 unsigned long uaddr = vma->vm_start;
78015 unsigned long usize = vma->vm_end - vma->vm_start;
78016
78017+ BUG_ON(vma->vm_mirror);
78018+
78019 if ((PAGE_SIZE-1) & (unsigned long)addr)
78020 return -EINVAL;
78021
78022@@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
78023 v->addr, v->addr + v->size, v->size);
78024
78025 if (v->caller)
78026+#ifdef CONFIG_GRKERNSEC_HIDESYM
78027+ seq_printf(m, " %pK", v->caller);
78028+#else
78029 seq_printf(m, " %pS", v->caller);
78030+#endif
78031
78032 if (v->nr_pages)
78033 seq_printf(m, " pages=%d", v->nr_pages);
78034diff --git a/mm/vmstat.c b/mm/vmstat.c
78035index c737057..a49753a 100644
78036--- a/mm/vmstat.c
78037+++ b/mm/vmstat.c
78038@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
78039 *
78040 * vm_stat contains the global counters
78041 */
78042-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
78043+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
78044 EXPORT_SYMBOL(vm_stat);
78045
78046 #ifdef CONFIG_SMP
78047@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
78048 v = p->vm_stat_diff[i];
78049 p->vm_stat_diff[i] = 0;
78050 local_irq_restore(flags);
78051- atomic_long_add(v, &zone->vm_stat[i]);
78052+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
78053 global_diff[i] += v;
78054 #ifdef CONFIG_NUMA
78055 /* 3 seconds idle till flush */
78056@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
78057
78058 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
78059 if (global_diff[i])
78060- atomic_long_add(global_diff[i], &vm_stat[i]);
78061+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
78062 }
78063
78064 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
78065@@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
78066 if (pset->vm_stat_diff[i]) {
78067 int v = pset->vm_stat_diff[i];
78068 pset->vm_stat_diff[i] = 0;
78069- atomic_long_add(v, &zone->vm_stat[i]);
78070- atomic_long_add(v, &vm_stat[i]);
78071+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
78072+ atomic_long_add_unchecked(v, &vm_stat[i]);
78073 }
78074 }
78075 #endif
78076@@ -1224,10 +1224,20 @@ static int __init setup_vmstat(void)
78077 start_cpu_timer(cpu);
78078 #endif
78079 #ifdef CONFIG_PROC_FS
78080- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
78081- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
78082- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
78083- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
78084+ {
78085+ mode_t gr_mode = S_IRUGO;
78086+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78087+ gr_mode = S_IRUSR;
78088+#endif
78089+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
78090+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
78091+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
78092+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
78093+#else
78094+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
78095+#endif
78096+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
78097+ }
78098 #endif
78099 return 0;
78100 }
78101diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
78102index ee07072..593e3fd 100644
78103--- a/net/8021q/vlan.c
78104+++ b/net/8021q/vlan.c
78105@@ -484,7 +484,7 @@ out:
78106 return NOTIFY_DONE;
78107 }
78108
78109-static struct notifier_block vlan_notifier_block __read_mostly = {
78110+static struct notifier_block vlan_notifier_block = {
78111 .notifier_call = vlan_device_event,
78112 };
78113
78114@@ -559,8 +559,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
78115 err = -EPERM;
78116 if (!capable(CAP_NET_ADMIN))
78117 break;
78118- if ((args.u.name_type >= 0) &&
78119- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
78120+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
78121 struct vlan_net *vn;
78122
78123 vn = net_generic(net, vlan_net_id);
78124diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
78125index 02efb25..41541a9 100644
78126--- a/net/9p/trans_fd.c
78127+++ b/net/9p/trans_fd.c
78128@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
78129 oldfs = get_fs();
78130 set_fs(get_ds());
78131 /* The cast to a user pointer is valid due to the set_fs() */
78132- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
78133+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
78134 set_fs(oldfs);
78135
78136 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
78137diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
78138index 876fbe8..8bbea9f 100644
78139--- a/net/atm/atm_misc.c
78140+++ b/net/atm/atm_misc.c
78141@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
78142 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
78143 return 1;
78144 atm_return(vcc, truesize);
78145- atomic_inc(&vcc->stats->rx_drop);
78146+ atomic_inc_unchecked(&vcc->stats->rx_drop);
78147 return 0;
78148 }
78149 EXPORT_SYMBOL(atm_charge);
78150@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
78151 }
78152 }
78153 atm_return(vcc, guess);
78154- atomic_inc(&vcc->stats->rx_drop);
78155+ atomic_inc_unchecked(&vcc->stats->rx_drop);
78156 return NULL;
78157 }
78158 EXPORT_SYMBOL(atm_alloc_charge);
78159@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
78160
78161 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
78162 {
78163-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
78164+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
78165 __SONET_ITEMS
78166 #undef __HANDLE_ITEM
78167 }
78168@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
78169
78170 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
78171 {
78172-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
78173+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
78174 __SONET_ITEMS
78175 #undef __HANDLE_ITEM
78176 }
78177diff --git a/net/atm/lec.h b/net/atm/lec.h
78178index a86aff9..3a0d6f6 100644
78179--- a/net/atm/lec.h
78180+++ b/net/atm/lec.h
78181@@ -48,7 +48,7 @@ struct lane2_ops {
78182 const u8 *tlvs, u32 sizeoftlvs);
78183 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
78184 const u8 *tlvs, u32 sizeoftlvs);
78185-};
78186+} __no_const;
78187
78188 /*
78189 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
78190diff --git a/net/atm/proc.c b/net/atm/proc.c
78191index 0d020de..011c7bb 100644
78192--- a/net/atm/proc.c
78193+++ b/net/atm/proc.c
78194@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
78195 const struct k_atm_aal_stats *stats)
78196 {
78197 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
78198- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
78199- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
78200- atomic_read(&stats->rx_drop));
78201+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
78202+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
78203+ atomic_read_unchecked(&stats->rx_drop));
78204 }
78205
78206 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
78207diff --git a/net/atm/resources.c b/net/atm/resources.c
78208index 0447d5d..3cf4728 100644
78209--- a/net/atm/resources.c
78210+++ b/net/atm/resources.c
78211@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
78212 static void copy_aal_stats(struct k_atm_aal_stats *from,
78213 struct atm_aal_stats *to)
78214 {
78215-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
78216+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
78217 __AAL_STAT_ITEMS
78218 #undef __HANDLE_ITEM
78219 }
78220@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
78221 static void subtract_aal_stats(struct k_atm_aal_stats *from,
78222 struct atm_aal_stats *to)
78223 {
78224-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
78225+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
78226 __AAL_STAT_ITEMS
78227 #undef __HANDLE_ITEM
78228 }
78229diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
78230index c6fcc76..1270d14 100644
78231--- a/net/batman-adv/bat_iv_ogm.c
78232+++ b/net/batman-adv/bat_iv_ogm.c
78233@@ -62,7 +62,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
78234
78235 /* randomize initial seqno to avoid collision */
78236 get_random_bytes(&random_seqno, sizeof(random_seqno));
78237- atomic_set(&hard_iface->seqno, random_seqno);
78238+ atomic_set_unchecked(&hard_iface->seqno, random_seqno);
78239
78240 hard_iface->packet_len = BATADV_OGM_HLEN;
78241 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
78242@@ -608,9 +608,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
78243 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
78244
78245 /* change sequence number to network order */
78246- seqno = (uint32_t)atomic_read(&hard_iface->seqno);
78247+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->seqno);
78248 batadv_ogm_packet->seqno = htonl(seqno);
78249- atomic_inc(&hard_iface->seqno);
78250+ atomic_inc_unchecked(&hard_iface->seqno);
78251
78252 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
78253 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
78254@@ -1015,7 +1015,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
78255 return;
78256
78257 /* could be changed by schedule_own_packet() */
78258- if_incoming_seqno = atomic_read(&if_incoming->seqno);
78259+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
78260
78261 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
78262 has_directlink_flag = 1;
78263diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
78264index d112fd6..686a447 100644
78265--- a/net/batman-adv/hard-interface.c
78266+++ b/net/batman-adv/hard-interface.c
78267@@ -327,7 +327,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
78268 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
78269 dev_add_pack(&hard_iface->batman_adv_ptype);
78270
78271- atomic_set(&hard_iface->frag_seqno, 1);
78272+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
78273 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
78274 hard_iface->net_dev->name);
78275
78276@@ -450,7 +450,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
78277 /* This can't be called via a bat_priv callback because
78278 * we have no bat_priv yet.
78279 */
78280- atomic_set(&hard_iface->seqno, 1);
78281+ atomic_set_unchecked(&hard_iface->seqno, 1);
78282 hard_iface->packet_buff = NULL;
78283
78284 return hard_iface;
78285diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
78286index ce0684a..4a0cbf1 100644
78287--- a/net/batman-adv/soft-interface.c
78288+++ b/net/batman-adv/soft-interface.c
78289@@ -234,7 +234,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
78290 primary_if->net_dev->dev_addr, ETH_ALEN);
78291
78292 /* set broadcast sequence number */
78293- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
78294+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
78295 bcast_packet->seqno = htonl(seqno);
78296
78297 batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
78298@@ -427,7 +427,7 @@ struct net_device *batadv_softif_create(const char *name)
78299 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
78300
78301 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
78302- atomic_set(&bat_priv->bcast_seqno, 1);
78303+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
78304 atomic_set(&bat_priv->tt.vn, 0);
78305 atomic_set(&bat_priv->tt.local_changes, 0);
78306 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
78307diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
78308index ac1e07a..4c846e2 100644
78309--- a/net/batman-adv/types.h
78310+++ b/net/batman-adv/types.h
78311@@ -33,8 +33,8 @@ struct batadv_hard_iface {
78312 int16_t if_num;
78313 char if_status;
78314 struct net_device *net_dev;
78315- atomic_t seqno;
78316- atomic_t frag_seqno;
78317+ atomic_unchecked_t seqno;
78318+ atomic_unchecked_t frag_seqno;
78319 unsigned char *packet_buff;
78320 int packet_len;
78321 struct kobject *hardif_obj;
78322@@ -244,7 +244,7 @@ struct batadv_priv {
78323 atomic_t orig_interval; /* uint */
78324 atomic_t hop_penalty; /* uint */
78325 atomic_t log_level; /* uint */
78326- atomic_t bcast_seqno;
78327+ atomic_unchecked_t bcast_seqno;
78328 atomic_t bcast_queue_left;
78329 atomic_t batman_queue_left;
78330 char num_ifaces;
78331diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
78332index f397232..3206a33 100644
78333--- a/net/batman-adv/unicast.c
78334+++ b/net/batman-adv/unicast.c
78335@@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
78336 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
78337 frag2->flags = large_tail;
78338
78339- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
78340+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
78341 frag1->seqno = htons(seqno - 1);
78342 frag2->seqno = htons(seqno);
78343
78344diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
78345index 07f0739..3c42e34 100644
78346--- a/net/bluetooth/hci_sock.c
78347+++ b/net/bluetooth/hci_sock.c
78348@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
78349 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
78350 }
78351
78352- len = min_t(unsigned int, len, sizeof(uf));
78353+ len = min((size_t)len, sizeof(uf));
78354 if (copy_from_user(&uf, optval, len)) {
78355 err = -EFAULT;
78356 break;
78357diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
78358index a91239d..d7ed533 100644
78359--- a/net/bluetooth/l2cap_core.c
78360+++ b/net/bluetooth/l2cap_core.c
78361@@ -3183,8 +3183,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
78362 break;
78363
78364 case L2CAP_CONF_RFC:
78365- if (olen == sizeof(rfc))
78366- memcpy(&rfc, (void *)val, olen);
78367+ if (olen != sizeof(rfc))
78368+ break;
78369+
78370+ memcpy(&rfc, (void *)val, olen);
78371
78372 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
78373 rfc.mode != chan->mode)
78374diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
78375index 083f2bf..799f9448 100644
78376--- a/net/bluetooth/l2cap_sock.c
78377+++ b/net/bluetooth/l2cap_sock.c
78378@@ -471,7 +471,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
78379 struct sock *sk = sock->sk;
78380 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
78381 struct l2cap_options opts;
78382- int len, err = 0;
78383+ int err = 0;
78384+ size_t len = optlen;
78385 u32 opt;
78386
78387 BT_DBG("sk %p", sk);
78388@@ -493,7 +494,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
78389 opts.max_tx = chan->max_tx;
78390 opts.txwin_size = chan->tx_win;
78391
78392- len = min_t(unsigned int, sizeof(opts), optlen);
78393+ len = min(sizeof(opts), len);
78394 if (copy_from_user((char *) &opts, optval, len)) {
78395 err = -EFAULT;
78396 break;
78397@@ -571,7 +572,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78398 struct bt_security sec;
78399 struct bt_power pwr;
78400 struct l2cap_conn *conn;
78401- int len, err = 0;
78402+ int err = 0;
78403+ size_t len = optlen;
78404 u32 opt;
78405
78406 BT_DBG("sk %p", sk);
78407@@ -594,7 +596,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78408
78409 sec.level = BT_SECURITY_LOW;
78410
78411- len = min_t(unsigned int, sizeof(sec), optlen);
78412+ len = min(sizeof(sec), len);
78413 if (copy_from_user((char *) &sec, optval, len)) {
78414 err = -EFAULT;
78415 break;
78416@@ -691,7 +693,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78417
78418 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
78419
78420- len = min_t(unsigned int, sizeof(pwr), optlen);
78421+ len = min(sizeof(pwr), len);
78422 if (copy_from_user((char *) &pwr, optval, len)) {
78423 err = -EFAULT;
78424 break;
78425diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
78426index 868a909..d044bc3 100644
78427--- a/net/bluetooth/rfcomm/sock.c
78428+++ b/net/bluetooth/rfcomm/sock.c
78429@@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
78430 struct sock *sk = sock->sk;
78431 struct bt_security sec;
78432 int err = 0;
78433- size_t len;
78434+ size_t len = optlen;
78435 u32 opt;
78436
78437 BT_DBG("sk %p", sk);
78438@@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
78439
78440 sec.level = BT_SECURITY_LOW;
78441
78442- len = min_t(unsigned int, sizeof(sec), optlen);
78443+ len = min(sizeof(sec), len);
78444 if (copy_from_user((char *) &sec, optval, len)) {
78445 err = -EFAULT;
78446 break;
78447diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
78448index ccc2487..921073d 100644
78449--- a/net/bluetooth/rfcomm/tty.c
78450+++ b/net/bluetooth/rfcomm/tty.c
78451@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
78452 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
78453
78454 spin_lock_irqsave(&dev->port.lock, flags);
78455- if (dev->port.count > 0) {
78456+ if (atomic_read(&dev->port.count) > 0) {
78457 spin_unlock_irqrestore(&dev->port.lock, flags);
78458 return;
78459 }
78460@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
78461 return -ENODEV;
78462
78463 BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
78464- dev->channel, dev->port.count);
78465+ dev->channel, atomic_read(&dev->port.count));
78466
78467 spin_lock_irqsave(&dev->port.lock, flags);
78468- if (++dev->port.count > 1) {
78469+ if (atomic_inc_return(&dev->port.count) > 1) {
78470 spin_unlock_irqrestore(&dev->port.lock, flags);
78471 return 0;
78472 }
78473@@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
78474 return;
78475
78476 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
78477- dev->port.count);
78478+ atomic_read(&dev->port.count));
78479
78480 spin_lock_irqsave(&dev->port.lock, flags);
78481- if (!--dev->port.count) {
78482+ if (!atomic_dec_return(&dev->port.count)) {
78483 spin_unlock_irqrestore(&dev->port.lock, flags);
78484 if (dev->tty_dev->parent)
78485 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
78486diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
78487index 5fe2ff3..121d696 100644
78488--- a/net/bridge/netfilter/ebtables.c
78489+++ b/net/bridge/netfilter/ebtables.c
78490@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
78491 tmp.valid_hooks = t->table->valid_hooks;
78492 }
78493 mutex_unlock(&ebt_mutex);
78494- if (copy_to_user(user, &tmp, *len) != 0){
78495+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
78496 BUGPRINT("c2u Didn't work\n");
78497 ret = -EFAULT;
78498 break;
78499@@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
78500 goto out;
78501 tmp.valid_hooks = t->valid_hooks;
78502
78503- if (copy_to_user(user, &tmp, *len) != 0) {
78504+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
78505 ret = -EFAULT;
78506 break;
78507 }
78508@@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
78509 tmp.entries_size = t->table->entries_size;
78510 tmp.valid_hooks = t->table->valid_hooks;
78511
78512- if (copy_to_user(user, &tmp, *len) != 0) {
78513+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
78514 ret = -EFAULT;
78515 break;
78516 }
78517diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
78518index 44f270f..1f5602d 100644
78519--- a/net/caif/cfctrl.c
78520+++ b/net/caif/cfctrl.c
78521@@ -10,6 +10,7 @@
78522 #include <linux/spinlock.h>
78523 #include <linux/slab.h>
78524 #include <linux/pkt_sched.h>
78525+#include <linux/sched.h>
78526 #include <net/caif/caif_layer.h>
78527 #include <net/caif/cfpkt.h>
78528 #include <net/caif/cfctrl.h>
78529@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
78530 memset(&dev_info, 0, sizeof(dev_info));
78531 dev_info.id = 0xff;
78532 cfsrvl_init(&this->serv, 0, &dev_info, false);
78533- atomic_set(&this->req_seq_no, 1);
78534- atomic_set(&this->rsp_seq_no, 1);
78535+ atomic_set_unchecked(&this->req_seq_no, 1);
78536+ atomic_set_unchecked(&this->rsp_seq_no, 1);
78537 this->serv.layer.receive = cfctrl_recv;
78538 sprintf(this->serv.layer.name, "ctrl");
78539 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
78540@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
78541 struct cfctrl_request_info *req)
78542 {
78543 spin_lock_bh(&ctrl->info_list_lock);
78544- atomic_inc(&ctrl->req_seq_no);
78545- req->sequence_no = atomic_read(&ctrl->req_seq_no);
78546+ atomic_inc_unchecked(&ctrl->req_seq_no);
78547+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
78548 list_add_tail(&req->list, &ctrl->list);
78549 spin_unlock_bh(&ctrl->info_list_lock);
78550 }
78551@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
78552 if (p != first)
78553 pr_warn("Requests are not received in order\n");
78554
78555- atomic_set(&ctrl->rsp_seq_no,
78556+ atomic_set_unchecked(&ctrl->rsp_seq_no,
78557 p->sequence_no);
78558 list_del(&p->list);
78559 goto out;
78560diff --git a/net/can/af_can.c b/net/can/af_can.c
78561index ddac1ee..3ee0a78 100644
78562--- a/net/can/af_can.c
78563+++ b/net/can/af_can.c
78564@@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
78565 };
78566
78567 /* notifier block for netdevice event */
78568-static struct notifier_block can_netdev_notifier __read_mostly = {
78569+static struct notifier_block can_netdev_notifier = {
78570 .notifier_call = can_notifier,
78571 };
78572
78573diff --git a/net/can/gw.c b/net/can/gw.c
78574index 1f5c978..ef714c7 100644
78575--- a/net/can/gw.c
78576+++ b/net/can/gw.c
78577@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
78578 MODULE_ALIAS("can-gw");
78579
78580 static HLIST_HEAD(cgw_list);
78581-static struct notifier_block notifier;
78582
78583 static struct kmem_cache *cgw_cache __read_mostly;
78584
78585@@ -887,6 +886,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
78586 return err;
78587 }
78588
78589+static struct notifier_block notifier = {
78590+ .notifier_call = cgw_notifier
78591+};
78592+
78593 static __init int cgw_module_init(void)
78594 {
78595 printk(banner);
78596@@ -898,7 +901,6 @@ static __init int cgw_module_init(void)
78597 return -ENOMEM;
78598
78599 /* set notifier */
78600- notifier.notifier_call = cgw_notifier;
78601 register_netdevice_notifier(&notifier);
78602
78603 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
78604diff --git a/net/compat.c b/net/compat.c
78605index 79ae884..17c5c09 100644
78606--- a/net/compat.c
78607+++ b/net/compat.c
78608@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
78609 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
78610 __get_user(kmsg->msg_flags, &umsg->msg_flags))
78611 return -EFAULT;
78612- kmsg->msg_name = compat_ptr(tmp1);
78613- kmsg->msg_iov = compat_ptr(tmp2);
78614- kmsg->msg_control = compat_ptr(tmp3);
78615+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
78616+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
78617+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
78618 return 0;
78619 }
78620
78621@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78622
78623 if (kern_msg->msg_namelen) {
78624 if (mode == VERIFY_READ) {
78625- int err = move_addr_to_kernel(kern_msg->msg_name,
78626+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
78627 kern_msg->msg_namelen,
78628 kern_address);
78629 if (err < 0)
78630@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78631 kern_msg->msg_name = NULL;
78632
78633 tot_len = iov_from_user_compat_to_kern(kern_iov,
78634- (struct compat_iovec __user *)kern_msg->msg_iov,
78635+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
78636 kern_msg->msg_iovlen);
78637 if (tot_len >= 0)
78638 kern_msg->msg_iov = kern_iov;
78639@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78640
78641 #define CMSG_COMPAT_FIRSTHDR(msg) \
78642 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
78643- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
78644+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
78645 (struct compat_cmsghdr __user *)NULL)
78646
78647 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
78648 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
78649 (ucmlen) <= (unsigned long) \
78650 ((mhdr)->msg_controllen - \
78651- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
78652+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
78653
78654 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
78655 struct compat_cmsghdr __user *cmsg, int cmsg_len)
78656 {
78657 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
78658- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
78659+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
78660 msg->msg_controllen)
78661 return NULL;
78662 return (struct compat_cmsghdr __user *)ptr;
78663@@ -219,7 +219,7 @@ Efault:
78664
78665 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
78666 {
78667- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
78668+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
78669 struct compat_cmsghdr cmhdr;
78670 struct compat_timeval ctv;
78671 struct compat_timespec cts[3];
78672@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
78673
78674 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
78675 {
78676- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
78677+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
78678 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
78679 int fdnum = scm->fp->count;
78680 struct file **fp = scm->fp->fp;
78681@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
78682 return -EFAULT;
78683 old_fs = get_fs();
78684 set_fs(KERNEL_DS);
78685- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
78686+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
78687 set_fs(old_fs);
78688
78689 return err;
78690@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
78691 len = sizeof(ktime);
78692 old_fs = get_fs();
78693 set_fs(KERNEL_DS);
78694- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
78695+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
78696 set_fs(old_fs);
78697
78698 if (!err) {
78699@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78700 case MCAST_JOIN_GROUP:
78701 case MCAST_LEAVE_GROUP:
78702 {
78703- struct compat_group_req __user *gr32 = (void *)optval;
78704+ struct compat_group_req __user *gr32 = (void __user *)optval;
78705 struct group_req __user *kgr =
78706 compat_alloc_user_space(sizeof(struct group_req));
78707 u32 interface;
78708@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78709 case MCAST_BLOCK_SOURCE:
78710 case MCAST_UNBLOCK_SOURCE:
78711 {
78712- struct compat_group_source_req __user *gsr32 = (void *)optval;
78713+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
78714 struct group_source_req __user *kgsr = compat_alloc_user_space(
78715 sizeof(struct group_source_req));
78716 u32 interface;
78717@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78718 }
78719 case MCAST_MSFILTER:
78720 {
78721- struct compat_group_filter __user *gf32 = (void *)optval;
78722+ struct compat_group_filter __user *gf32 = (void __user *)optval;
78723 struct group_filter __user *kgf;
78724 u32 interface, fmode, numsrc;
78725
78726@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
78727 char __user *optval, int __user *optlen,
78728 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
78729 {
78730- struct compat_group_filter __user *gf32 = (void *)optval;
78731+ struct compat_group_filter __user *gf32 = (void __user *)optval;
78732 struct group_filter __user *kgf;
78733 int __user *koptlen;
78734 u32 interface, fmode, numsrc;
78735@@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
78736
78737 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
78738 return -EINVAL;
78739- if (copy_from_user(a, args, nas[call]))
78740+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
78741 return -EFAULT;
78742 a0 = a[0];
78743 a1 = a[1];
78744diff --git a/net/core/datagram.c b/net/core/datagram.c
78745index 0337e2b..47914a0 100644
78746--- a/net/core/datagram.c
78747+++ b/net/core/datagram.c
78748@@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
78749 }
78750
78751 kfree_skb(skb);
78752- atomic_inc(&sk->sk_drops);
78753+ atomic_inc_unchecked(&sk->sk_drops);
78754 sk_mem_reclaim_partial(sk);
78755
78756 return err;
78757diff --git a/net/core/dev.c b/net/core/dev.c
78758index e5942bf..25998c3 100644
78759--- a/net/core/dev.c
78760+++ b/net/core/dev.c
78761@@ -1162,9 +1162,13 @@ void dev_load(struct net *net, const char *name)
78762 if (no_module && capable(CAP_NET_ADMIN))
78763 no_module = request_module("netdev-%s", name);
78764 if (no_module && capable(CAP_SYS_MODULE)) {
78765+#ifdef CONFIG_GRKERNSEC_MODHARDEN
78766+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
78767+#else
78768 if (!request_module("%s", name))
78769 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
78770 name);
78771+#endif
78772 }
78773 }
78774 EXPORT_SYMBOL(dev_load);
78775@@ -1627,7 +1631,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
78776 {
78777 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
78778 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
78779- atomic_long_inc(&dev->rx_dropped);
78780+ atomic_long_inc_unchecked(&dev->rx_dropped);
78781 kfree_skb(skb);
78782 return NET_RX_DROP;
78783 }
78784@@ -1637,7 +1641,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
78785 nf_reset(skb);
78786
78787 if (unlikely(!is_skb_forwardable(dev, skb))) {
78788- atomic_long_inc(&dev->rx_dropped);
78789+ atomic_long_inc_unchecked(&dev->rx_dropped);
78790 kfree_skb(skb);
78791 return NET_RX_DROP;
78792 }
78793@@ -2093,7 +2097,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
78794
78795 struct dev_gso_cb {
78796 void (*destructor)(struct sk_buff *skb);
78797-};
78798+} __no_const;
78799
78800 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
78801
78802@@ -2955,7 +2959,7 @@ enqueue:
78803
78804 local_irq_restore(flags);
78805
78806- atomic_long_inc(&skb->dev->rx_dropped);
78807+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
78808 kfree_skb(skb);
78809 return NET_RX_DROP;
78810 }
78811@@ -3027,7 +3031,7 @@ int netif_rx_ni(struct sk_buff *skb)
78812 }
78813 EXPORT_SYMBOL(netif_rx_ni);
78814
78815-static void net_tx_action(struct softirq_action *h)
78816+static void net_tx_action(void)
78817 {
78818 struct softnet_data *sd = &__get_cpu_var(softnet_data);
78819
78820@@ -3358,7 +3362,7 @@ ncls:
78821 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
78822 } else {
78823 drop:
78824- atomic_long_inc(&skb->dev->rx_dropped);
78825+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
78826 kfree_skb(skb);
78827 /* Jamal, now you will not able to escape explaining
78828 * me how you were going to use this. :-)
78829@@ -3944,7 +3948,7 @@ void netif_napi_del(struct napi_struct *napi)
78830 }
78831 EXPORT_SYMBOL(netif_napi_del);
78832
78833-static void net_rx_action(struct softirq_action *h)
78834+static void net_rx_action(void)
78835 {
78836 struct softnet_data *sd = &__get_cpu_var(softnet_data);
78837 unsigned long time_limit = jiffies + 2;
78838@@ -4423,8 +4427,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
78839 else
78840 seq_printf(seq, "%04x", ntohs(pt->type));
78841
78842+#ifdef CONFIG_GRKERNSEC_HIDESYM
78843+ seq_printf(seq, " %-8s %p\n",
78844+ pt->dev ? pt->dev->name : "", NULL);
78845+#else
78846 seq_printf(seq, " %-8s %pF\n",
78847 pt->dev ? pt->dev->name : "", pt->func);
78848+#endif
78849 }
78850
78851 return 0;
78852@@ -5987,7 +5996,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
78853 } else {
78854 netdev_stats_to_stats64(storage, &dev->stats);
78855 }
78856- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
78857+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
78858 return storage;
78859 }
78860 EXPORT_SYMBOL(dev_get_stats);
78861diff --git a/net/core/flow.c b/net/core/flow.c
78862index e318c7e..168b1d0 100644
78863--- a/net/core/flow.c
78864+++ b/net/core/flow.c
78865@@ -61,7 +61,7 @@ struct flow_cache {
78866 struct timer_list rnd_timer;
78867 };
78868
78869-atomic_t flow_cache_genid = ATOMIC_INIT(0);
78870+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
78871 EXPORT_SYMBOL(flow_cache_genid);
78872 static struct flow_cache flow_cache_global;
78873 static struct kmem_cache *flow_cachep __read_mostly;
78874@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
78875
78876 static int flow_entry_valid(struct flow_cache_entry *fle)
78877 {
78878- if (atomic_read(&flow_cache_genid) != fle->genid)
78879+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
78880 return 0;
78881 if (fle->object && !fle->object->ops->check(fle->object))
78882 return 0;
78883@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
78884 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
78885 fcp->hash_count++;
78886 }
78887- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
78888+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
78889 flo = fle->object;
78890 if (!flo)
78891 goto ret_object;
78892@@ -280,7 +280,7 @@ nocache:
78893 }
78894 flo = resolver(net, key, family, dir, flo, ctx);
78895 if (fle) {
78896- fle->genid = atomic_read(&flow_cache_genid);
78897+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
78898 if (!IS_ERR(flo))
78899 fle->object = flo;
78900 else
78901diff --git a/net/core/iovec.c b/net/core/iovec.c
78902index 7e7aeb0..2a998cb 100644
78903--- a/net/core/iovec.c
78904+++ b/net/core/iovec.c
78905@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
78906 if (m->msg_namelen) {
78907 if (mode == VERIFY_READ) {
78908 void __user *namep;
78909- namep = (void __user __force *) m->msg_name;
78910+ namep = (void __force_user *) m->msg_name;
78911 err = move_addr_to_kernel(namep, m->msg_namelen,
78912 address);
78913 if (err < 0)
78914@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
78915 }
78916
78917 size = m->msg_iovlen * sizeof(struct iovec);
78918- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
78919+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
78920 return -EFAULT;
78921
78922 m->msg_iov = iov;
78923diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
78924index fad649a..f2fdac4 100644
78925--- a/net/core/rtnetlink.c
78926+++ b/net/core/rtnetlink.c
78927@@ -198,14 +198,16 @@ int __rtnl_register(int protocol, int msgtype,
78928 rtnl_msg_handlers[protocol] = tab;
78929 }
78930
78931+ pax_open_kernel();
78932 if (doit)
78933- tab[msgindex].doit = doit;
78934+ *(void **)&tab[msgindex].doit = doit;
78935
78936 if (dumpit)
78937- tab[msgindex].dumpit = dumpit;
78938+ *(void **)&tab[msgindex].dumpit = dumpit;
78939
78940 if (calcit)
78941- tab[msgindex].calcit = calcit;
78942+ *(void **)&tab[msgindex].calcit = calcit;
78943+ pax_close_kernel();
78944
78945 return 0;
78946 }
78947@@ -248,8 +250,10 @@ int rtnl_unregister(int protocol, int msgtype)
78948 if (rtnl_msg_handlers[protocol] == NULL)
78949 return -ENOENT;
78950
78951- rtnl_msg_handlers[protocol][msgindex].doit = NULL;
78952- rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
78953+ pax_open_kernel();
78954+ *(void **)&rtnl_msg_handlers[protocol][msgindex].doit = NULL;
78955+ *(void **)&rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
78956+ pax_close_kernel();
78957
78958 return 0;
78959 }
78960diff --git a/net/core/scm.c b/net/core/scm.c
78961index ab57084..0190c8f 100644
78962--- a/net/core/scm.c
78963+++ b/net/core/scm.c
78964@@ -223,7 +223,7 @@ EXPORT_SYMBOL(__scm_send);
78965 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
78966 {
78967 struct cmsghdr __user *cm
78968- = (__force struct cmsghdr __user *)msg->msg_control;
78969+ = (struct cmsghdr __force_user *)msg->msg_control;
78970 struct cmsghdr cmhdr;
78971 int cmlen = CMSG_LEN(len);
78972 int err;
78973@@ -246,7 +246,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
78974 err = -EFAULT;
78975 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
78976 goto out;
78977- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
78978+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
78979 goto out;
78980 cmlen = CMSG_SPACE(len);
78981 if (msg->msg_controllen < cmlen)
78982@@ -262,7 +262,7 @@ EXPORT_SYMBOL(put_cmsg);
78983 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
78984 {
78985 struct cmsghdr __user *cm
78986- = (__force struct cmsghdr __user*)msg->msg_control;
78987+ = (struct cmsghdr __force_user *)msg->msg_control;
78988
78989 int fdmax = 0;
78990 int fdnum = scm->fp->count;
78991@@ -282,7 +282,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
78992 if (fdnum < fdmax)
78993 fdmax = fdnum;
78994
78995- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
78996+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
78997 i++, cmfptr++)
78998 {
78999 struct socket *sock;
79000diff --git a/net/core/sock.c b/net/core/sock.c
79001index 8a146cf..ee08914d 100644
79002--- a/net/core/sock.c
79003+++ b/net/core/sock.c
79004@@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
79005 struct sk_buff_head *list = &sk->sk_receive_queue;
79006
79007 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
79008- atomic_inc(&sk->sk_drops);
79009+ atomic_inc_unchecked(&sk->sk_drops);
79010 trace_sock_rcvqueue_full(sk, skb);
79011 return -ENOMEM;
79012 }
79013@@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
79014 return err;
79015
79016 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
79017- atomic_inc(&sk->sk_drops);
79018+ atomic_inc_unchecked(&sk->sk_drops);
79019 return -ENOBUFS;
79020 }
79021
79022@@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
79023 skb_dst_force(skb);
79024
79025 spin_lock_irqsave(&list->lock, flags);
79026- skb->dropcount = atomic_read(&sk->sk_drops);
79027+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
79028 __skb_queue_tail(list, skb);
79029 spin_unlock_irqrestore(&list->lock, flags);
79030
79031@@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
79032 skb->dev = NULL;
79033
79034 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
79035- atomic_inc(&sk->sk_drops);
79036+ atomic_inc_unchecked(&sk->sk_drops);
79037 goto discard_and_relse;
79038 }
79039 if (nested)
79040@@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
79041 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
79042 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
79043 bh_unlock_sock(sk);
79044- atomic_inc(&sk->sk_drops);
79045+ atomic_inc_unchecked(&sk->sk_drops);
79046 goto discard_and_relse;
79047 }
79048
79049@@ -875,12 +875,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79050 struct timeval tm;
79051 } v;
79052
79053- int lv = sizeof(int);
79054- int len;
79055+ unsigned int lv = sizeof(int);
79056+ unsigned int len;
79057
79058 if (get_user(len, optlen))
79059 return -EFAULT;
79060- if (len < 0)
79061+ if (len > INT_MAX)
79062 return -EINVAL;
79063
79064 memset(&v, 0, sizeof(v));
79065@@ -1028,11 +1028,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79066
79067 case SO_PEERNAME:
79068 {
79069- char address[128];
79070+ char address[_K_SS_MAXSIZE];
79071
79072 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
79073 return -ENOTCONN;
79074- if (lv < len)
79075+ if (lv < len || sizeof address < len)
79076 return -EINVAL;
79077 if (copy_to_user(optval, address, len))
79078 return -EFAULT;
79079@@ -1080,7 +1080,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79080
79081 if (len > lv)
79082 len = lv;
79083- if (copy_to_user(optval, &v, len))
79084+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
79085 return -EFAULT;
79086 lenout:
79087 if (put_user(len, optlen))
79088@@ -2212,7 +2212,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
79089 */
79090 smp_wmb();
79091 atomic_set(&sk->sk_refcnt, 1);
79092- atomic_set(&sk->sk_drops, 0);
79093+ atomic_set_unchecked(&sk->sk_drops, 0);
79094 }
79095 EXPORT_SYMBOL(sock_init_data);
79096
79097diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
79098index 602cd63..05c6c60 100644
79099--- a/net/core/sock_diag.c
79100+++ b/net/core/sock_diag.c
79101@@ -15,20 +15,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
79102
79103 int sock_diag_check_cookie(void *sk, __u32 *cookie)
79104 {
79105+#ifndef CONFIG_GRKERNSEC_HIDESYM
79106 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
79107 cookie[1] != INET_DIAG_NOCOOKIE) &&
79108 ((u32)(unsigned long)sk != cookie[0] ||
79109 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
79110 return -ESTALE;
79111 else
79112+#endif
79113 return 0;
79114 }
79115 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
79116
79117 void sock_diag_save_cookie(void *sk, __u32 *cookie)
79118 {
79119+#ifdef CONFIG_GRKERNSEC_HIDESYM
79120+ cookie[0] = 0;
79121+ cookie[1] = 0;
79122+#else
79123 cookie[0] = (u32)(unsigned long)sk;
79124 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
79125+#endif
79126 }
79127 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
79128
79129diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
79130index a55eecc..dd8428c 100644
79131--- a/net/decnet/sysctl_net_decnet.c
79132+++ b/net/decnet/sysctl_net_decnet.c
79133@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
79134
79135 if (len > *lenp) len = *lenp;
79136
79137- if (copy_to_user(buffer, addr, len))
79138+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
79139 return -EFAULT;
79140
79141 *lenp = len;
79142@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
79143
79144 if (len > *lenp) len = *lenp;
79145
79146- if (copy_to_user(buffer, devname, len))
79147+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
79148 return -EFAULT;
79149
79150 *lenp = len;
79151diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
79152index 2a6abc1..c379ba7 100644
79153--- a/net/ipv4/devinet.c
79154+++ b/net/ipv4/devinet.c
79155@@ -822,9 +822,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
79156 if (!ifa) {
79157 ret = -ENOBUFS;
79158 ifa = inet_alloc_ifa();
79159+ if (!ifa)
79160+ break;
79161 INIT_HLIST_NODE(&ifa->hash);
79162- if (!ifa)
79163- break;
79164 if (colon)
79165 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
79166 else
79167diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
79168index 825c608..750ff29 100644
79169--- a/net/ipv4/fib_frontend.c
79170+++ b/net/ipv4/fib_frontend.c
79171@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
79172 #ifdef CONFIG_IP_ROUTE_MULTIPATH
79173 fib_sync_up(dev);
79174 #endif
79175- atomic_inc(&net->ipv4.dev_addr_genid);
79176+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79177 rt_cache_flush(dev_net(dev));
79178 break;
79179 case NETDEV_DOWN:
79180 fib_del_ifaddr(ifa, NULL);
79181- atomic_inc(&net->ipv4.dev_addr_genid);
79182+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79183 if (ifa->ifa_dev->ifa_list == NULL) {
79184 /* Last address was deleted from this interface.
79185 * Disable IP.
79186@@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
79187 #ifdef CONFIG_IP_ROUTE_MULTIPATH
79188 fib_sync_up(dev);
79189 #endif
79190- atomic_inc(&net->ipv4.dev_addr_genid);
79191+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79192 rt_cache_flush(net);
79193 break;
79194 case NETDEV_DOWN:
79195diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
79196index 71b125c..f4c70b0 100644
79197--- a/net/ipv4/fib_semantics.c
79198+++ b/net/ipv4/fib_semantics.c
79199@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
79200 nh->nh_saddr = inet_select_addr(nh->nh_dev,
79201 nh->nh_gw,
79202 nh->nh_parent->fib_scope);
79203- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
79204+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
79205
79206 return nh->nh_saddr;
79207 }
79208diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
79209index 7880af9..70f92a3 100644
79210--- a/net/ipv4/inet_hashtables.c
79211+++ b/net/ipv4/inet_hashtables.c
79212@@ -18,12 +18,15 @@
79213 #include <linux/sched.h>
79214 #include <linux/slab.h>
79215 #include <linux/wait.h>
79216+#include <linux/security.h>
79217
79218 #include <net/inet_connection_sock.h>
79219 #include <net/inet_hashtables.h>
79220 #include <net/secure_seq.h>
79221 #include <net/ip.h>
79222
79223+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
79224+
79225 /*
79226 * Allocate and initialize a new local port bind bucket.
79227 * The bindhash mutex for snum's hash chain must be held here.
79228@@ -530,6 +533,8 @@ ok:
79229 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
79230 spin_unlock(&head->lock);
79231
79232+ gr_update_task_in_ip_table(current, inet_sk(sk));
79233+
79234 if (tw) {
79235 inet_twsk_deschedule(tw, death_row);
79236 while (twrefcnt) {
79237diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
79238index 000e3d2..5472da3 100644
79239--- a/net/ipv4/inetpeer.c
79240+++ b/net/ipv4/inetpeer.c
79241@@ -503,8 +503,8 @@ relookup:
79242 if (p) {
79243 p->daddr = *daddr;
79244 atomic_set(&p->refcnt, 1);
79245- atomic_set(&p->rid, 0);
79246- atomic_set(&p->ip_id_count,
79247+ atomic_set_unchecked(&p->rid, 0);
79248+ atomic_set_unchecked(&p->ip_id_count,
79249 (daddr->family == AF_INET) ?
79250 secure_ip_id(daddr->addr.a4) :
79251 secure_ipv6_id(daddr->addr.a6));
79252diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
79253index 8d5cc75..821fd11 100644
79254--- a/net/ipv4/ip_fragment.c
79255+++ b/net/ipv4/ip_fragment.c
79256@@ -322,7 +322,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
79257 return 0;
79258
79259 start = qp->rid;
79260- end = atomic_inc_return(&peer->rid);
79261+ end = atomic_inc_return_unchecked(&peer->rid);
79262 qp->rid = end;
79263
79264 rc = qp->q.fragments && (end - start) > max;
79265diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
79266index 14bbfcf..644f472 100644
79267--- a/net/ipv4/ip_sockglue.c
79268+++ b/net/ipv4/ip_sockglue.c
79269@@ -1151,7 +1151,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79270 len = min_t(unsigned int, len, opt->optlen);
79271 if (put_user(len, optlen))
79272 return -EFAULT;
79273- if (copy_to_user(optval, opt->__data, len))
79274+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
79275+ copy_to_user(optval, opt->__data, len))
79276 return -EFAULT;
79277 return 0;
79278 }
79279@@ -1282,7 +1283,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79280 if (sk->sk_type != SOCK_STREAM)
79281 return -ENOPROTOOPT;
79282
79283- msg.msg_control = optval;
79284+ msg.msg_control = (void __force_kernel *)optval;
79285 msg.msg_controllen = len;
79286 msg.msg_flags = flags;
79287
79288diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
79289index 798358b..73570b7 100644
79290--- a/net/ipv4/ipconfig.c
79291+++ b/net/ipv4/ipconfig.c
79292@@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
79293
79294 mm_segment_t oldfs = get_fs();
79295 set_fs(get_ds());
79296- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79297+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79298 set_fs(oldfs);
79299 return res;
79300 }
79301@@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
79302
79303 mm_segment_t oldfs = get_fs();
79304 set_fs(get_ds());
79305- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79306+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79307 set_fs(oldfs);
79308 return res;
79309 }
79310@@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
79311
79312 mm_segment_t oldfs = get_fs();
79313 set_fs(get_ds());
79314- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
79315+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
79316 set_fs(oldfs);
79317 return res;
79318 }
79319diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
79320index 97e61ea..cac1bbb 100644
79321--- a/net/ipv4/netfilter/arp_tables.c
79322+++ b/net/ipv4/netfilter/arp_tables.c
79323@@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
79324 #endif
79325
79326 static int get_info(struct net *net, void __user *user,
79327- const int *len, int compat)
79328+ int len, int compat)
79329 {
79330 char name[XT_TABLE_MAXNAMELEN];
79331 struct xt_table *t;
79332 int ret;
79333
79334- if (*len != sizeof(struct arpt_getinfo)) {
79335- duprintf("length %u != %Zu\n", *len,
79336+ if (len != sizeof(struct arpt_getinfo)) {
79337+ duprintf("length %u != %Zu\n", len,
79338 sizeof(struct arpt_getinfo));
79339 return -EINVAL;
79340 }
79341@@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
79342 info.size = private->size;
79343 strcpy(info.name, name);
79344
79345- if (copy_to_user(user, &info, *len) != 0)
79346+ if (copy_to_user(user, &info, len) != 0)
79347 ret = -EFAULT;
79348 else
79349 ret = 0;
79350@@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
79351
79352 switch (cmd) {
79353 case ARPT_SO_GET_INFO:
79354- ret = get_info(sock_net(sk), user, len, 1);
79355+ ret = get_info(sock_net(sk), user, *len, 1);
79356 break;
79357 case ARPT_SO_GET_ENTRIES:
79358 ret = compat_get_entries(sock_net(sk), user, len);
79359@@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
79360
79361 switch (cmd) {
79362 case ARPT_SO_GET_INFO:
79363- ret = get_info(sock_net(sk), user, len, 0);
79364+ ret = get_info(sock_net(sk), user, *len, 0);
79365 break;
79366
79367 case ARPT_SO_GET_ENTRIES:
79368diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
79369index 170b1fd..6105b91 100644
79370--- a/net/ipv4/netfilter/ip_tables.c
79371+++ b/net/ipv4/netfilter/ip_tables.c
79372@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
79373 #endif
79374
79375 static int get_info(struct net *net, void __user *user,
79376- const int *len, int compat)
79377+ int len, int compat)
79378 {
79379 char name[XT_TABLE_MAXNAMELEN];
79380 struct xt_table *t;
79381 int ret;
79382
79383- if (*len != sizeof(struct ipt_getinfo)) {
79384- duprintf("length %u != %zu\n", *len,
79385+ if (len != sizeof(struct ipt_getinfo)) {
79386+ duprintf("length %u != %zu\n", len,
79387 sizeof(struct ipt_getinfo));
79388 return -EINVAL;
79389 }
79390@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
79391 info.size = private->size;
79392 strcpy(info.name, name);
79393
79394- if (copy_to_user(user, &info, *len) != 0)
79395+ if (copy_to_user(user, &info, len) != 0)
79396 ret = -EFAULT;
79397 else
79398 ret = 0;
79399@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79400
79401 switch (cmd) {
79402 case IPT_SO_GET_INFO:
79403- ret = get_info(sock_net(sk), user, len, 1);
79404+ ret = get_info(sock_net(sk), user, *len, 1);
79405 break;
79406 case IPT_SO_GET_ENTRIES:
79407 ret = compat_get_entries(sock_net(sk), user, len);
79408@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79409
79410 switch (cmd) {
79411 case IPT_SO_GET_INFO:
79412- ret = get_info(sock_net(sk), user, len, 0);
79413+ ret = get_info(sock_net(sk), user, *len, 0);
79414 break;
79415
79416 case IPT_SO_GET_ENTRIES:
79417diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
79418index 8f3d054..c58d05d 100644
79419--- a/net/ipv4/ping.c
79420+++ b/net/ipv4/ping.c
79421@@ -843,7 +843,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
79422 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
79423 0, sock_i_ino(sp),
79424 atomic_read(&sp->sk_refcnt), sp,
79425- atomic_read(&sp->sk_drops), len);
79426+ atomic_read_unchecked(&sp->sk_drops), len);
79427 }
79428
79429 static int ping_seq_show(struct seq_file *seq, void *v)
79430diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
79431index 73d1e4d..3af0e8f 100644
79432--- a/net/ipv4/raw.c
79433+++ b/net/ipv4/raw.c
79434@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
79435 int raw_rcv(struct sock *sk, struct sk_buff *skb)
79436 {
79437 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
79438- atomic_inc(&sk->sk_drops);
79439+ atomic_inc_unchecked(&sk->sk_drops);
79440 kfree_skb(skb);
79441 return NET_RX_DROP;
79442 }
79443@@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
79444
79445 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
79446 {
79447+ struct icmp_filter filter;
79448+
79449 if (optlen > sizeof(struct icmp_filter))
79450 optlen = sizeof(struct icmp_filter);
79451- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
79452+ if (copy_from_user(&filter, optval, optlen))
79453 return -EFAULT;
79454+ raw_sk(sk)->filter = filter;
79455 return 0;
79456 }
79457
79458 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
79459 {
79460 int len, ret = -EFAULT;
79461+ struct icmp_filter filter;
79462
79463 if (get_user(len, optlen))
79464 goto out;
79465@@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
79466 if (len > sizeof(struct icmp_filter))
79467 len = sizeof(struct icmp_filter);
79468 ret = -EFAULT;
79469- if (put_user(len, optlen) ||
79470- copy_to_user(optval, &raw_sk(sk)->filter, len))
79471+ filter = raw_sk(sk)->filter;
79472+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
79473 goto out;
79474 ret = 0;
79475 out: return ret;
79476@@ -997,7 +1001,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
79477 0, 0L, 0,
79478 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
79479 0, sock_i_ino(sp),
79480- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
79481+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
79482 }
79483
79484 static int raw_seq_show(struct seq_file *seq, void *v)
79485diff --git a/net/ipv4/route.c b/net/ipv4/route.c
79486index df25142..e92a82a 100644
79487--- a/net/ipv4/route.c
79488+++ b/net/ipv4/route.c
79489@@ -2529,7 +2529,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
79490
79491 static __net_init int rt_genid_init(struct net *net)
79492 {
79493- atomic_set(&net->rt_genid, 0);
79494+ atomic_set_unchecked(&net->rt_genid, 0);
79495 get_random_bytes(&net->ipv4.dev_addr_genid,
79496 sizeof(net->ipv4.dev_addr_genid));
79497 return 0;
79498diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
79499index 181fc82..cc95f8c 100644
79500--- a/net/ipv4/tcp_input.c
79501+++ b/net/ipv4/tcp_input.c
79502@@ -4704,7 +4704,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
79503 * simplifies code)
79504 */
79505 static void
79506-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
79507+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
79508 struct sk_buff *head, struct sk_buff *tail,
79509 u32 start, u32 end)
79510 {
79511@@ -5536,6 +5536,9 @@ slow_path:
79512 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
79513 goto csum_error;
79514
79515+ if (!th->ack)
79516+ goto discard;
79517+
79518 /*
79519 * Standard slow path.
79520 */
79521@@ -5544,7 +5547,7 @@ slow_path:
79522 return 0;
79523
79524 step5:
79525- if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
79526+ if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
79527 goto discard;
79528
79529 /* ts_recent update must be made after we are sure that the packet
79530@@ -5836,6 +5839,7 @@ discard:
79531 tcp_paws_reject(&tp->rx_opt, 0))
79532 goto discard_and_undo;
79533
79534+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
79535 if (th->syn) {
79536 /* We see SYN without ACK. It is attempt of
79537 * simultaneous connect with crossed SYNs.
79538@@ -5886,6 +5890,7 @@ discard:
79539 goto discard;
79540 #endif
79541 }
79542+#endif
79543 /* "fifth, if neither of the SYN or RST bits is set then
79544 * drop the segment and return."
79545 */
79546@@ -5930,7 +5935,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79547 goto discard;
79548
79549 if (th->syn) {
79550- if (th->fin)
79551+ if (th->fin || th->urg || th->psh)
79552 goto discard;
79553 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
79554 return 1;
79555@@ -5977,11 +5982,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79556 if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
79557 goto discard;
79558 }
79559+
79560+ if (!th->ack)
79561+ goto discard;
79562+
79563 if (!tcp_validate_incoming(sk, skb, th, 0))
79564 return 0;
79565
79566 /* step 5: check the ACK field */
79567- if (th->ack) {
79568+ if (true) {
79569 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
79570
79571 switch (sk->sk_state) {
79572@@ -6131,8 +6140,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79573 }
79574 break;
79575 }
79576- } else
79577- goto discard;
79578+ }
79579
79580 /* ts_recent update must be made after we are sure that the packet
79581 * is in window.
79582diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
79583index bc3cb46..815ccd6 100644
79584--- a/net/ipv4/tcp_ipv4.c
79585+++ b/net/ipv4/tcp_ipv4.c
79586@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
79587 EXPORT_SYMBOL(sysctl_tcp_low_latency);
79588
79589
79590+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79591+extern int grsec_enable_blackhole;
79592+#endif
79593+
79594 #ifdef CONFIG_TCP_MD5SIG
79595 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
79596 __be32 daddr, __be32 saddr, const struct tcphdr *th);
79597@@ -1899,6 +1903,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
79598 return 0;
79599
79600 reset:
79601+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79602+ if (!grsec_enable_blackhole)
79603+#endif
79604 tcp_v4_send_reset(rsk, skb);
79605 discard:
79606 kfree_skb(skb);
79607@@ -1999,12 +2006,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
79608 TCP_SKB_CB(skb)->sacked = 0;
79609
79610 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
79611- if (!sk)
79612+ if (!sk) {
79613+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79614+ ret = 1;
79615+#endif
79616 goto no_tcp_socket;
79617-
79618+ }
79619 process:
79620- if (sk->sk_state == TCP_TIME_WAIT)
79621+ if (sk->sk_state == TCP_TIME_WAIT) {
79622+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79623+ ret = 2;
79624+#endif
79625 goto do_time_wait;
79626+ }
79627
79628 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
79629 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
79630@@ -2055,6 +2069,10 @@ no_tcp_socket:
79631 bad_packet:
79632 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
79633 } else {
79634+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79635+ if (!grsec_enable_blackhole || (ret == 1 &&
79636+ (skb->dev->flags & IFF_LOOPBACK)))
79637+#endif
79638 tcp_v4_send_reset(NULL, skb);
79639 }
79640
79641diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
79642index a7302d9..e3ec754 100644
79643--- a/net/ipv4/tcp_minisocks.c
79644+++ b/net/ipv4/tcp_minisocks.c
79645@@ -27,6 +27,10 @@
79646 #include <net/inet_common.h>
79647 #include <net/xfrm.h>
79648
79649+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79650+extern int grsec_enable_blackhole;
79651+#endif
79652+
79653 int sysctl_tcp_syncookies __read_mostly = 1;
79654 EXPORT_SYMBOL(sysctl_tcp_syncookies);
79655
79656@@ -742,7 +746,10 @@ embryonic_reset:
79657 * avoid becoming vulnerable to outside attack aiming at
79658 * resetting legit local connections.
79659 */
79660- req->rsk_ops->send_reset(sk, skb);
79661+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79662+ if (!grsec_enable_blackhole)
79663+#endif
79664+ req->rsk_ops->send_reset(sk, skb);
79665 } else if (fastopen) { /* received a valid RST pkt */
79666 reqsk_fastopen_remove(sk, req, true);
79667 tcp_reset(sk);
79668diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
79669index 4526fe6..1a34e43 100644
79670--- a/net/ipv4/tcp_probe.c
79671+++ b/net/ipv4/tcp_probe.c
79672@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
79673 if (cnt + width >= len)
79674 break;
79675
79676- if (copy_to_user(buf + cnt, tbuf, width))
79677+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
79678 return -EFAULT;
79679 cnt += width;
79680 }
79681diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
79682index d47c1b4..b0584de 100644
79683--- a/net/ipv4/tcp_timer.c
79684+++ b/net/ipv4/tcp_timer.c
79685@@ -22,6 +22,10 @@
79686 #include <linux/gfp.h>
79687 #include <net/tcp.h>
79688
79689+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79690+extern int grsec_lastack_retries;
79691+#endif
79692+
79693 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
79694 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
79695 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
79696@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
79697 }
79698 }
79699
79700+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79701+ if ((sk->sk_state == TCP_LAST_ACK) &&
79702+ (grsec_lastack_retries > 0) &&
79703+ (grsec_lastack_retries < retry_until))
79704+ retry_until = grsec_lastack_retries;
79705+#endif
79706+
79707 if (retransmits_timed_out(sk, retry_until,
79708 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
79709 /* Has it gone just too far? */
79710diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
79711index 79c8dbe..aceb1b6 100644
79712--- a/net/ipv4/udp.c
79713+++ b/net/ipv4/udp.c
79714@@ -87,6 +87,7 @@
79715 #include <linux/types.h>
79716 #include <linux/fcntl.h>
79717 #include <linux/module.h>
79718+#include <linux/security.h>
79719 #include <linux/socket.h>
79720 #include <linux/sockios.h>
79721 #include <linux/igmp.h>
79722@@ -111,6 +112,10 @@
79723 #include <trace/events/skb.h>
79724 #include "udp_impl.h"
79725
79726+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79727+extern int grsec_enable_blackhole;
79728+#endif
79729+
79730 struct udp_table udp_table __read_mostly;
79731 EXPORT_SYMBOL(udp_table);
79732
79733@@ -569,6 +574,9 @@ found:
79734 return s;
79735 }
79736
79737+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
79738+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
79739+
79740 /*
79741 * This routine is called by the ICMP module when it gets some
79742 * sort of error condition. If err < 0 then the socket should
79743@@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
79744 dport = usin->sin_port;
79745 if (dport == 0)
79746 return -EINVAL;
79747+
79748+ err = gr_search_udp_sendmsg(sk, usin);
79749+ if (err)
79750+ return err;
79751 } else {
79752 if (sk->sk_state != TCP_ESTABLISHED)
79753 return -EDESTADDRREQ;
79754+
79755+ err = gr_search_udp_sendmsg(sk, NULL);
79756+ if (err)
79757+ return err;
79758+
79759 daddr = inet->inet_daddr;
79760 dport = inet->inet_dport;
79761 /* Open fast path for connected socket.
79762@@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
79763 udp_lib_checksum_complete(skb)) {
79764 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
79765 IS_UDPLITE(sk));
79766- atomic_inc(&sk->sk_drops);
79767+ atomic_inc_unchecked(&sk->sk_drops);
79768 __skb_unlink(skb, rcvq);
79769 __skb_queue_tail(&list_kill, skb);
79770 }
79771@@ -1194,6 +1211,10 @@ try_again:
79772 if (!skb)
79773 goto out;
79774
79775+ err = gr_search_udp_recvmsg(sk, skb);
79776+ if (err)
79777+ goto out_free;
79778+
79779 ulen = skb->len - sizeof(struct udphdr);
79780 copied = len;
79781 if (copied > ulen)
79782@@ -1227,7 +1248,7 @@ try_again:
79783 if (unlikely(err)) {
79784 trace_kfree_skb(skb, udp_recvmsg);
79785 if (!peeked) {
79786- atomic_inc(&sk->sk_drops);
79787+ atomic_inc_unchecked(&sk->sk_drops);
79788 UDP_INC_STATS_USER(sock_net(sk),
79789 UDP_MIB_INERRORS, is_udplite);
79790 }
79791@@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
79792
79793 drop:
79794 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
79795- atomic_inc(&sk->sk_drops);
79796+ atomic_inc_unchecked(&sk->sk_drops);
79797 kfree_skb(skb);
79798 return -1;
79799 }
79800@@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
79801 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
79802
79803 if (!skb1) {
79804- atomic_inc(&sk->sk_drops);
79805+ atomic_inc_unchecked(&sk->sk_drops);
79806 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
79807 IS_UDPLITE(sk));
79808 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
79809@@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
79810 goto csum_error;
79811
79812 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
79813+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79814+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
79815+#endif
79816 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
79817
79818 /*
79819@@ -2119,7 +2143,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
79820 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
79821 0, sock_i_ino(sp),
79822 atomic_read(&sp->sk_refcnt), sp,
79823- atomic_read(&sp->sk_drops), len);
79824+ atomic_read_unchecked(&sp->sk_drops), len);
79825 }
79826
79827 int udp4_seq_show(struct seq_file *seq, void *v)
79828diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
79829index 0424e4e..308dd43 100644
79830--- a/net/ipv6/addrconf.c
79831+++ b/net/ipv6/addrconf.c
79832@@ -2121,7 +2121,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
79833 p.iph.ihl = 5;
79834 p.iph.protocol = IPPROTO_IPV6;
79835 p.iph.ttl = 64;
79836- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
79837+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
79838
79839 if (ops->ndo_do_ioctl) {
79840 mm_segment_t oldfs = get_fs();
79841diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
79842index d5cb3c4..b3e38d0 100644
79843--- a/net/ipv6/ip6_gre.c
79844+++ b/net/ipv6/ip6_gre.c
79845@@ -1353,7 +1353,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
79846 }
79847
79848
79849-static struct inet6_protocol ip6gre_protocol __read_mostly = {
79850+static struct inet6_protocol ip6gre_protocol = {
79851 .handler = ip6gre_rcv,
79852 .err_handler = ip6gre_err,
79853 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
79854diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
79855index e02faed..9780f28 100644
79856--- a/net/ipv6/ipv6_sockglue.c
79857+++ b/net/ipv6/ipv6_sockglue.c
79858@@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
79859 if (sk->sk_type != SOCK_STREAM)
79860 return -ENOPROTOOPT;
79861
79862- msg.msg_control = optval;
79863+ msg.msg_control = (void __force_kernel *)optval;
79864 msg.msg_controllen = len;
79865 msg.msg_flags = flags;
79866
79867diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
79868index d7cb045..8c0ded6 100644
79869--- a/net/ipv6/netfilter/ip6_tables.c
79870+++ b/net/ipv6/netfilter/ip6_tables.c
79871@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
79872 #endif
79873
79874 static int get_info(struct net *net, void __user *user,
79875- const int *len, int compat)
79876+ int len, int compat)
79877 {
79878 char name[XT_TABLE_MAXNAMELEN];
79879 struct xt_table *t;
79880 int ret;
79881
79882- if (*len != sizeof(struct ip6t_getinfo)) {
79883- duprintf("length %u != %zu\n", *len,
79884+ if (len != sizeof(struct ip6t_getinfo)) {
79885+ duprintf("length %u != %zu\n", len,
79886 sizeof(struct ip6t_getinfo));
79887 return -EINVAL;
79888 }
79889@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
79890 info.size = private->size;
79891 strcpy(info.name, name);
79892
79893- if (copy_to_user(user, &info, *len) != 0)
79894+ if (copy_to_user(user, &info, len) != 0)
79895 ret = -EFAULT;
79896 else
79897 ret = 0;
79898@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79899
79900 switch (cmd) {
79901 case IP6T_SO_GET_INFO:
79902- ret = get_info(sock_net(sk), user, len, 1);
79903+ ret = get_info(sock_net(sk), user, *len, 1);
79904 break;
79905 case IP6T_SO_GET_ENTRIES:
79906 ret = compat_get_entries(sock_net(sk), user, len);
79907@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79908
79909 switch (cmd) {
79910 case IP6T_SO_GET_INFO:
79911- ret = get_info(sock_net(sk), user, len, 0);
79912+ ret = get_info(sock_net(sk), user, *len, 0);
79913 break;
79914
79915 case IP6T_SO_GET_ENTRIES:
79916diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
79917index d8e95c7..81422bc 100644
79918--- a/net/ipv6/raw.c
79919+++ b/net/ipv6/raw.c
79920@@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
79921 {
79922 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
79923 skb_checksum_complete(skb)) {
79924- atomic_inc(&sk->sk_drops);
79925+ atomic_inc_unchecked(&sk->sk_drops);
79926 kfree_skb(skb);
79927 return NET_RX_DROP;
79928 }
79929@@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
79930 struct raw6_sock *rp = raw6_sk(sk);
79931
79932 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
79933- atomic_inc(&sk->sk_drops);
79934+ atomic_inc_unchecked(&sk->sk_drops);
79935 kfree_skb(skb);
79936 return NET_RX_DROP;
79937 }
79938@@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
79939
79940 if (inet->hdrincl) {
79941 if (skb_checksum_complete(skb)) {
79942- atomic_inc(&sk->sk_drops);
79943+ atomic_inc_unchecked(&sk->sk_drops);
79944 kfree_skb(skb);
79945 return NET_RX_DROP;
79946 }
79947@@ -604,7 +604,7 @@ out:
79948 return err;
79949 }
79950
79951-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
79952+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
79953 struct flowi6 *fl6, struct dst_entry **dstp,
79954 unsigned int flags)
79955 {
79956@@ -916,12 +916,15 @@ do_confirm:
79957 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
79958 char __user *optval, int optlen)
79959 {
79960+ struct icmp6_filter filter;
79961+
79962 switch (optname) {
79963 case ICMPV6_FILTER:
79964 if (optlen > sizeof(struct icmp6_filter))
79965 optlen = sizeof(struct icmp6_filter);
79966- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
79967+ if (copy_from_user(&filter, optval, optlen))
79968 return -EFAULT;
79969+ raw6_sk(sk)->filter = filter;
79970 return 0;
79971 default:
79972 return -ENOPROTOOPT;
79973@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
79974 char __user *optval, int __user *optlen)
79975 {
79976 int len;
79977+ struct icmp6_filter filter;
79978
79979 switch (optname) {
79980 case ICMPV6_FILTER:
79981@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
79982 len = sizeof(struct icmp6_filter);
79983 if (put_user(len, optlen))
79984 return -EFAULT;
79985- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
79986+ filter = raw6_sk(sk)->filter;
79987+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
79988 return -EFAULT;
79989 return 0;
79990 default:
79991@@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
79992 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
79993 0,
79994 sock_i_ino(sp),
79995- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
79996+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
79997 }
79998
79999 static int raw6_seq_show(struct seq_file *seq, void *v)
80000diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
80001index 73f2a6b..f8049a1 100644
80002--- a/net/ipv6/tcp_ipv6.c
80003+++ b/net/ipv6/tcp_ipv6.c
80004@@ -106,6 +106,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
80005 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
80006 }
80007
80008+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80009+extern int grsec_enable_blackhole;
80010+#endif
80011+
80012 static void tcp_v6_hash(struct sock *sk)
80013 {
80014 if (sk->sk_state != TCP_CLOSE) {
80015@@ -1525,6 +1529,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
80016 return 0;
80017
80018 reset:
80019+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80020+ if (!grsec_enable_blackhole)
80021+#endif
80022 tcp_v6_send_reset(sk, skb);
80023 discard:
80024 if (opt_skb)
80025@@ -1606,12 +1613,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
80026 TCP_SKB_CB(skb)->sacked = 0;
80027
80028 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
80029- if (!sk)
80030+ if (!sk) {
80031+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80032+ ret = 1;
80033+#endif
80034 goto no_tcp_socket;
80035+ }
80036
80037 process:
80038- if (sk->sk_state == TCP_TIME_WAIT)
80039+ if (sk->sk_state == TCP_TIME_WAIT) {
80040+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80041+ ret = 2;
80042+#endif
80043 goto do_time_wait;
80044+ }
80045
80046 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
80047 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
80048@@ -1660,6 +1675,10 @@ no_tcp_socket:
80049 bad_packet:
80050 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
80051 } else {
80052+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80053+ if (!grsec_enable_blackhole || (ret == 1 &&
80054+ (skb->dev->flags & IFF_LOOPBACK)))
80055+#endif
80056 tcp_v6_send_reset(NULL, skb);
80057 }
80058
80059diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
80060index fc99972..69397e8 100644
80061--- a/net/ipv6/udp.c
80062+++ b/net/ipv6/udp.c
80063@@ -51,6 +51,10 @@
80064 #include <trace/events/skb.h>
80065 #include "udp_impl.h"
80066
80067+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80068+extern int grsec_enable_blackhole;
80069+#endif
80070+
80071 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
80072 {
80073 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
80074@@ -395,7 +399,7 @@ try_again:
80075 if (unlikely(err)) {
80076 trace_kfree_skb(skb, udpv6_recvmsg);
80077 if (!peeked) {
80078- atomic_inc(&sk->sk_drops);
80079+ atomic_inc_unchecked(&sk->sk_drops);
80080 if (is_udp4)
80081 UDP_INC_STATS_USER(sock_net(sk),
80082 UDP_MIB_INERRORS,
80083@@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80084 return rc;
80085 drop:
80086 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
80087- atomic_inc(&sk->sk_drops);
80088+ atomic_inc_unchecked(&sk->sk_drops);
80089 kfree_skb(skb);
80090 return -1;
80091 }
80092@@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
80093 if (likely(skb1 == NULL))
80094 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
80095 if (!skb1) {
80096- atomic_inc(&sk->sk_drops);
80097+ atomic_inc_unchecked(&sk->sk_drops);
80098 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
80099 IS_UDPLITE(sk));
80100 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
80101@@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80102 goto discard;
80103
80104 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
80105+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80106+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80107+#endif
80108 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
80109
80110 kfree_skb(skb);
80111@@ -1473,7 +1480,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
80112 0,
80113 sock_i_ino(sp),
80114 atomic_read(&sp->sk_refcnt), sp,
80115- atomic_read(&sp->sk_drops));
80116+ atomic_read_unchecked(&sp->sk_drops));
80117 }
80118
80119 int udp6_seq_show(struct seq_file *seq, void *v)
80120diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
80121index 496ce2c..f79fac8 100644
80122--- a/net/irda/ircomm/ircomm_tty.c
80123+++ b/net/irda/ircomm/ircomm_tty.c
80124@@ -311,12 +311,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80125 add_wait_queue(&port->open_wait, &wait);
80126
80127 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
80128- __FILE__, __LINE__, tty->driver->name, port->count);
80129+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80130
80131 spin_lock_irqsave(&port->lock, flags);
80132 if (!tty_hung_up_p(filp)) {
80133 extra_count = 1;
80134- port->count--;
80135+ atomic_dec(&port->count);
80136 }
80137 spin_unlock_irqrestore(&port->lock, flags);
80138 port->blocked_open++;
80139@@ -352,7 +352,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80140 }
80141
80142 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
80143- __FILE__, __LINE__, tty->driver->name, port->count);
80144+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80145
80146 schedule();
80147 }
80148@@ -363,13 +363,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80149 if (extra_count) {
80150 /* ++ is not atomic, so this should be protected - Jean II */
80151 spin_lock_irqsave(&port->lock, flags);
80152- port->count++;
80153+ atomic_inc(&port->count);
80154 spin_unlock_irqrestore(&port->lock, flags);
80155 }
80156 port->blocked_open--;
80157
80158 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
80159- __FILE__, __LINE__, tty->driver->name, port->count);
80160+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80161
80162 if (!retval)
80163 port->flags |= ASYNC_NORMAL_ACTIVE;
80164@@ -443,12 +443,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
80165
80166 /* ++ is not atomic, so this should be protected - Jean II */
80167 spin_lock_irqsave(&self->port.lock, flags);
80168- self->port.count++;
80169+ atomic_inc(&self->port.count);
80170 spin_unlock_irqrestore(&self->port.lock, flags);
80171 tty_port_tty_set(&self->port, tty);
80172
80173 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
80174- self->line, self->port.count);
80175+ self->line, atomic_read(&self->port.count));
80176
80177 /* Not really used by us, but lets do it anyway */
80178 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
80179@@ -985,7 +985,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
80180 tty_kref_put(port->tty);
80181 }
80182 port->tty = NULL;
80183- port->count = 0;
80184+ atomic_set(&port->count, 0);
80185 spin_unlock_irqrestore(&port->lock, flags);
80186
80187 wake_up_interruptible(&port->open_wait);
80188@@ -1342,7 +1342,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
80189 seq_putc(m, '\n');
80190
80191 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
80192- seq_printf(m, "Open count: %d\n", self->port.count);
80193+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
80194 seq_printf(m, "Max data size: %d\n", self->max_data_size);
80195 seq_printf(m, "Max header size: %d\n", self->max_header_size);
80196
80197diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
80198index cd6f7a9..e63fe89 100644
80199--- a/net/iucv/af_iucv.c
80200+++ b/net/iucv/af_iucv.c
80201@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
80202
80203 write_lock_bh(&iucv_sk_list.lock);
80204
80205- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
80206+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80207 while (__iucv_get_sock_by_name(name)) {
80208 sprintf(name, "%08x",
80209- atomic_inc_return(&iucv_sk_list.autobind_name));
80210+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80211 }
80212
80213 write_unlock_bh(&iucv_sk_list.lock);
80214diff --git a/net/key/af_key.c b/net/key/af_key.c
80215index 08897a3..0b812ab 100644
80216--- a/net/key/af_key.c
80217+++ b/net/key/af_key.c
80218@@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
80219 static u32 get_acqseq(void)
80220 {
80221 u32 res;
80222- static atomic_t acqseq;
80223+ static atomic_unchecked_t acqseq;
80224
80225 do {
80226- res = atomic_inc_return(&acqseq);
80227+ res = atomic_inc_return_unchecked(&acqseq);
80228 } while (!res);
80229 return res;
80230 }
80231diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
80232index 7371f67..9897314 100644
80233--- a/net/mac80211/cfg.c
80234+++ b/net/mac80211/cfg.c
80235@@ -2594,7 +2594,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
80236 else
80237 local->probe_req_reg--;
80238
80239- if (!local->open_count)
80240+ if (!local_read(&local->open_count))
80241 break;
80242
80243 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
80244diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
80245index 3da215c..497a6e3 100644
80246--- a/net/mac80211/ieee80211_i.h
80247+++ b/net/mac80211/ieee80211_i.h
80248@@ -28,6 +28,7 @@
80249 #include <net/ieee80211_radiotap.h>
80250 #include <net/cfg80211.h>
80251 #include <net/mac80211.h>
80252+#include <asm/local.h>
80253 #include "key.h"
80254 #include "sta_info.h"
80255 #include "debug.h"
80256@@ -852,7 +853,7 @@ struct ieee80211_local {
80257 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
80258 spinlock_t queue_stop_reason_lock;
80259
80260- int open_count;
80261+ local_t open_count;
80262 int monitors, cooked_mntrs;
80263 /* number of interfaces with corresponding FIF_ flags */
80264 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
80265diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
80266index 0f5af91..4dba9e7 100644
80267--- a/net/mac80211/iface.c
80268+++ b/net/mac80211/iface.c
80269@@ -465,7 +465,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80270 break;
80271 }
80272
80273- if (local->open_count == 0) {
80274+ if (local_read(&local->open_count) == 0) {
80275 res = drv_start(local);
80276 if (res)
80277 goto err_del_bss;
80278@@ -508,7 +508,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80279 break;
80280 }
80281
80282- if (local->monitors == 0 && local->open_count == 0) {
80283+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
80284 res = ieee80211_add_virtual_monitor(local);
80285 if (res)
80286 goto err_stop;
80287@@ -616,7 +616,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80288 mutex_unlock(&local->mtx);
80289
80290 if (coming_up)
80291- local->open_count++;
80292+ local_inc(&local->open_count);
80293
80294 if (hw_reconf_flags)
80295 ieee80211_hw_config(local, hw_reconf_flags);
80296@@ -630,7 +630,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80297 err_del_interface:
80298 drv_remove_interface(local, sdata);
80299 err_stop:
80300- if (!local->open_count)
80301+ if (!local_read(&local->open_count))
80302 drv_stop(local);
80303 err_del_bss:
80304 sdata->bss = NULL;
80305@@ -762,7 +762,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80306 }
80307
80308 if (going_down)
80309- local->open_count--;
80310+ local_dec(&local->open_count);
80311
80312 switch (sdata->vif.type) {
80313 case NL80211_IFTYPE_AP_VLAN:
80314@@ -818,7 +818,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80315
80316 ieee80211_recalc_ps(local, -1);
80317
80318- if (local->open_count == 0) {
80319+ if (local_read(&local->open_count) == 0) {
80320 if (local->ops->napi_poll)
80321 napi_disable(&local->napi);
80322 ieee80211_clear_tx_pending(local);
80323@@ -850,7 +850,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80324 }
80325 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
80326
80327- if (local->monitors == local->open_count && local->monitors > 0)
80328+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
80329 ieee80211_add_virtual_monitor(local);
80330 }
80331
80332diff --git a/net/mac80211/main.c b/net/mac80211/main.c
80333index f57f597..e0a7c03 100644
80334--- a/net/mac80211/main.c
80335+++ b/net/mac80211/main.c
80336@@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
80337 local->hw.conf.power_level = power;
80338 }
80339
80340- if (changed && local->open_count) {
80341+ if (changed && local_read(&local->open_count)) {
80342 ret = drv_config(local, changed);
80343 /*
80344 * Goal:
80345diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
80346index 5c572e7..ecf75ce 100644
80347--- a/net/mac80211/pm.c
80348+++ b/net/mac80211/pm.c
80349@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80350 struct ieee80211_sub_if_data *sdata;
80351 struct sta_info *sta;
80352
80353- if (!local->open_count)
80354+ if (!local_read(&local->open_count))
80355 goto suspend;
80356
80357 ieee80211_scan_cancel(local);
80358@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80359 cancel_work_sync(&local->dynamic_ps_enable_work);
80360 del_timer_sync(&local->dynamic_ps_timer);
80361
80362- local->wowlan = wowlan && local->open_count;
80363+ local->wowlan = wowlan && local_read(&local->open_count);
80364 if (local->wowlan) {
80365 int err = drv_suspend(local, wowlan);
80366 if (err < 0) {
80367@@ -143,7 +143,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80368 drv_remove_interface(local, sdata);
80369
80370 /* stop hardware - this must stop RX */
80371- if (local->open_count)
80372+ if (local_read(&local->open_count))
80373 ieee80211_stop_device(local);
80374
80375 suspend:
80376diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
80377index 3313c11..bec9f17 100644
80378--- a/net/mac80211/rate.c
80379+++ b/net/mac80211/rate.c
80380@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
80381
80382 ASSERT_RTNL();
80383
80384- if (local->open_count)
80385+ if (local_read(&local->open_count))
80386 return -EBUSY;
80387
80388 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
80389diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
80390index c97a065..ff61928 100644
80391--- a/net/mac80211/rc80211_pid_debugfs.c
80392+++ b/net/mac80211/rc80211_pid_debugfs.c
80393@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
80394
80395 spin_unlock_irqrestore(&events->lock, status);
80396
80397- if (copy_to_user(buf, pb, p))
80398+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
80399 return -EFAULT;
80400
80401 return p;
80402diff --git a/net/mac80211/util.c b/net/mac80211/util.c
80403index 0151ae3..26709d3 100644
80404--- a/net/mac80211/util.c
80405+++ b/net/mac80211/util.c
80406@@ -1332,7 +1332,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
80407 }
80408 #endif
80409 /* everything else happens only if HW was up & running */
80410- if (!local->open_count)
80411+ if (!local_read(&local->open_count))
80412 goto wake_up;
80413
80414 /*
80415diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
80416index fefa514..0755f23 100644
80417--- a/net/netfilter/Kconfig
80418+++ b/net/netfilter/Kconfig
80419@@ -929,6 +929,16 @@ config NETFILTER_XT_MATCH_ESP
80420
80421 To compile it as a module, choose M here. If unsure, say N.
80422
80423+config NETFILTER_XT_MATCH_GRADM
80424+ tristate '"gradm" match support'
80425+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
80426+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
80427+ ---help---
80428+ The gradm match allows to match on grsecurity RBAC being enabled.
80429+ It is useful when iptables rules are applied early on bootup to
80430+ prevent connections to the machine (except from a trusted host)
80431+ while the RBAC system is disabled.
80432+
80433 config NETFILTER_XT_MATCH_HASHLIMIT
80434 tristate '"hashlimit" match support'
80435 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
80436diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
80437index 3259697..54d5393 100644
80438--- a/net/netfilter/Makefile
80439+++ b/net/netfilter/Makefile
80440@@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
80441 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
80442 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
80443 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
80444+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
80445 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
80446 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
80447 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
80448diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
80449index 1548df9..98ad9b4 100644
80450--- a/net/netfilter/ipvs/ip_vs_conn.c
80451+++ b/net/netfilter/ipvs/ip_vs_conn.c
80452@@ -557,7 +557,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
80453 /* Increase the refcnt counter of the dest */
80454 atomic_inc(&dest->refcnt);
80455
80456- conn_flags = atomic_read(&dest->conn_flags);
80457+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
80458 if (cp->protocol != IPPROTO_UDP)
80459 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
80460 flags = cp->flags;
80461@@ -902,7 +902,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
80462 atomic_set(&cp->refcnt, 1);
80463
80464 atomic_set(&cp->n_control, 0);
80465- atomic_set(&cp->in_pkts, 0);
80466+ atomic_set_unchecked(&cp->in_pkts, 0);
80467
80468 atomic_inc(&ipvs->conn_count);
80469 if (flags & IP_VS_CONN_F_NO_CPORT)
80470@@ -1183,7 +1183,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
80471
80472 /* Don't drop the entry if its number of incoming packets is not
80473 located in [0, 8] */
80474- i = atomic_read(&cp->in_pkts);
80475+ i = atomic_read_unchecked(&cp->in_pkts);
80476 if (i > 8 || i < 0) return 0;
80477
80478 if (!todrop_rate[i]) return 0;
80479diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
80480index 58918e2..4d177a9 100644
80481--- a/net/netfilter/ipvs/ip_vs_core.c
80482+++ b/net/netfilter/ipvs/ip_vs_core.c
80483@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
80484 ret = cp->packet_xmit(skb, cp, pd->pp);
80485 /* do not touch skb anymore */
80486
80487- atomic_inc(&cp->in_pkts);
80488+ atomic_inc_unchecked(&cp->in_pkts);
80489 ip_vs_conn_put(cp);
80490 return ret;
80491 }
80492@@ -1681,7 +1681,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
80493 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
80494 pkts = sysctl_sync_threshold(ipvs);
80495 else
80496- pkts = atomic_add_return(1, &cp->in_pkts);
80497+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80498
80499 if (ipvs->sync_state & IP_VS_STATE_MASTER)
80500 ip_vs_sync_conn(net, cp, pkts);
80501diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
80502index c4ee437..a774a74 100644
80503--- a/net/netfilter/ipvs/ip_vs_ctl.c
80504+++ b/net/netfilter/ipvs/ip_vs_ctl.c
80505@@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
80506 ip_vs_rs_hash(ipvs, dest);
80507 write_unlock_bh(&ipvs->rs_lock);
80508 }
80509- atomic_set(&dest->conn_flags, conn_flags);
80510+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
80511
80512 /* bind the service */
80513 if (!dest->svc) {
80514@@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
80515 " %-7s %-6d %-10d %-10d\n",
80516 &dest->addr.in6,
80517 ntohs(dest->port),
80518- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
80519+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
80520 atomic_read(&dest->weight),
80521 atomic_read(&dest->activeconns),
80522 atomic_read(&dest->inactconns));
80523@@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
80524 "%-7s %-6d %-10d %-10d\n",
80525 ntohl(dest->addr.ip),
80526 ntohs(dest->port),
80527- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
80528+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
80529 atomic_read(&dest->weight),
80530 atomic_read(&dest->activeconns),
80531 atomic_read(&dest->inactconns));
80532@@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
80533
80534 entry.addr = dest->addr.ip;
80535 entry.port = dest->port;
80536- entry.conn_flags = atomic_read(&dest->conn_flags);
80537+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
80538 entry.weight = atomic_read(&dest->weight);
80539 entry.u_threshold = dest->u_threshold;
80540 entry.l_threshold = dest->l_threshold;
80541@@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
80542 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
80543 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
80544 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
80545- (atomic_read(&dest->conn_flags) &
80546+ (atomic_read_unchecked(&dest->conn_flags) &
80547 IP_VS_CONN_F_FWD_MASK)) ||
80548 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
80549 atomic_read(&dest->weight)) ||
80550diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
80551index effa10c..9058928 100644
80552--- a/net/netfilter/ipvs/ip_vs_sync.c
80553+++ b/net/netfilter/ipvs/ip_vs_sync.c
80554@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
80555 cp = cp->control;
80556 if (cp) {
80557 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
80558- pkts = atomic_add_return(1, &cp->in_pkts);
80559+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80560 else
80561 pkts = sysctl_sync_threshold(ipvs);
80562 ip_vs_sync_conn(net, cp->control, pkts);
80563@@ -758,7 +758,7 @@ control:
80564 if (!cp)
80565 return;
80566 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
80567- pkts = atomic_add_return(1, &cp->in_pkts);
80568+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80569 else
80570 pkts = sysctl_sync_threshold(ipvs);
80571 goto sloop;
80572@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
80573
80574 if (opt)
80575 memcpy(&cp->in_seq, opt, sizeof(*opt));
80576- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
80577+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
80578 cp->state = state;
80579 cp->old_state = cp->state;
80580 /*
80581diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
80582index cc4c809..50f8fe5 100644
80583--- a/net/netfilter/ipvs/ip_vs_xmit.c
80584+++ b/net/netfilter/ipvs/ip_vs_xmit.c
80585@@ -1202,7 +1202,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
80586 else
80587 rc = NF_ACCEPT;
80588 /* do not touch skb anymore */
80589- atomic_inc(&cp->in_pkts);
80590+ atomic_inc_unchecked(&cp->in_pkts);
80591 goto out;
80592 }
80593
80594@@ -1323,7 +1323,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
80595 else
80596 rc = NF_ACCEPT;
80597 /* do not touch skb anymore */
80598- atomic_inc(&cp->in_pkts);
80599+ atomic_inc_unchecked(&cp->in_pkts);
80600 goto out;
80601 }
80602
80603diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
80604index 0f241be..2c9be6d 100644
80605--- a/net/netfilter/nf_conntrack_core.c
80606+++ b/net/netfilter/nf_conntrack_core.c
80607@@ -1532,6 +1532,10 @@ err_extend:
80608 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
80609 #define DYING_NULLS_VAL ((1<<30)+1)
80610
80611+#ifdef CONFIG_GRKERNSEC_HIDESYM
80612+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
80613+#endif
80614+
80615 static int nf_conntrack_init_net(struct net *net)
80616 {
80617 int ret;
80618@@ -1545,7 +1549,11 @@ static int nf_conntrack_init_net(struct net *net)
80619 goto err_stat;
80620 }
80621
80622+#ifdef CONFIG_GRKERNSEC_HIDESYM
80623+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
80624+#else
80625 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
80626+#endif
80627 if (!net->ct.slabname) {
80628 ret = -ENOMEM;
80629 goto err_slabname;
80630diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
80631index 9f199f2..719ad23 100644
80632--- a/net/netfilter/nfnetlink_log.c
80633+++ b/net/netfilter/nfnetlink_log.c
80634@@ -71,7 +71,7 @@ struct nfulnl_instance {
80635 };
80636
80637 static DEFINE_SPINLOCK(instances_lock);
80638-static atomic_t global_seq;
80639+static atomic_unchecked_t global_seq;
80640
80641 #define INSTANCE_BUCKETS 16
80642 static struct hlist_head instance_table[INSTANCE_BUCKETS];
80643@@ -527,7 +527,7 @@ __build_packet_message(struct nfulnl_instance *inst,
80644 /* global sequence number */
80645 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
80646 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
80647- htonl(atomic_inc_return(&global_seq))))
80648+ htonl(atomic_inc_return_unchecked(&global_seq))))
80649 goto nla_put_failure;
80650
80651 if (data_len) {
80652diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
80653new file mode 100644
80654index 0000000..c566332
80655--- /dev/null
80656+++ b/net/netfilter/xt_gradm.c
80657@@ -0,0 +1,51 @@
80658+/*
80659+ * gradm match for netfilter
80660