]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.8.4-201303252035.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.8.4-201303252035.patch
CommitLineData
c2d4cb5e
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..b47493f 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125-linux
126+lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130@@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134-media
135 mconf
136+mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143+mkpiggy
144 mkprep
145 mkregtable
146 mktables
147@@ -185,6 +204,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151+parse-events*
152+pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156@@ -194,6 +215,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164@@ -203,7 +225,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168+realmode.lds
169+realmode.relocs
170 recordmcount
171+regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175@@ -213,8 +238,12 @@ series
176 setup
177 setup.bin
178 setup.elf
179+signing_key*
180+size_overflow_hash.h
181 sImage
182+slabinfo
183 sm_tbl*
184+sortextable
185 split-include
186 syscalltab.h
187 tables.c
188@@ -224,6 +253,7 @@ tftpboot.img
189 timeconst.h
190 times.h*
191 trix_boot.h
192+user_constants.h
193 utsrelease.h*
194 vdso-syms.lds
195 vdso.lds
196@@ -235,13 +265,17 @@ vdso32.lds
197 vdso32.so.dbg
198 vdso64.lds
199 vdso64.so.dbg
200+vdsox32.lds
201+vdsox32-syms.lds
202 version.h*
203 vmImage
204 vmlinux
205 vmlinux-*
206 vmlinux.aout
207 vmlinux.bin.all
208+vmlinux.bin.bz2
209 vmlinux.lds
210+vmlinux.relocs
211 vmlinuz
212 voffset.h
213 vsyscall.lds
214@@ -249,9 +283,12 @@ vsyscall_32.lds
215 wanxlfw.inc
216 uImage
217 unifdef
218+utsrelease.h
219 wakeup.bin
220 wakeup.elf
221 wakeup.lds
222+x509*
223 zImage*
224 zconf.hash.c
225+zconf.lex.c
226 zoffset.h
227diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
228index 986614d..e8bfedc 100644
229--- a/Documentation/kernel-parameters.txt
230+++ b/Documentation/kernel-parameters.txt
231@@ -922,6 +922,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
232 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
233 Default: 1024
234
235+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
236+ ignore grsecurity's /proc restrictions
237+
238+
239 hashdist= [KNL,NUMA] Large hashes allocated during boot
240 are distributed across NUMA nodes. Defaults on
241 for 64-bit NUMA, off otherwise.
242@@ -2121,6 +2125,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
243 the specified number of seconds. This is to be used if
244 your oopses keep scrolling off the screen.
245
246+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
247+ virtualization environments that don't cope well with the
248+ expand down segment used by UDEREF on X86-32 or the frequent
249+ page table updates on X86-64.
250+
251+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
252+
253+ pax_extra_latent_entropy
254+ Enable a very simple form of latent entropy extraction
255+ from the first 4GB of memory as the bootmem allocator
256+ passes the memory pages to the buddy allocator.
257+
258 pcbit= [HW,ISDN]
259
260 pcd. [PARIDE]
261diff --git a/Makefile b/Makefile
262index e20f162..7090f4a 100644
263--- a/Makefile
264+++ b/Makefile
265@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
266
267 HOSTCC = gcc
268 HOSTCXX = g++
269-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
270-HOSTCXXFLAGS = -O2
271+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
272+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
273+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
274
275 # Decide whether to build built-in, modular, or both.
276 # Normally, just do built-in.
277@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
278 # Rules shared between *config targets and build targets
279
280 # Basic helpers built in scripts/
281-PHONY += scripts_basic
282-scripts_basic:
283+PHONY += scripts_basic gcc-plugins
284+scripts_basic: gcc-plugins
285 $(Q)$(MAKE) $(build)=scripts/basic
286 $(Q)rm -f .tmp_quiet_recordmcount
287
288@@ -575,6 +576,62 @@ else
289 KBUILD_CFLAGS += -O2
290 endif
291
292+ifndef DISABLE_PAX_PLUGINS
293+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
294+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
295+else
296+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
297+endif
298+ifneq ($(PLUGINCC),)
299+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
300+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
301+endif
302+ifdef CONFIG_PAX_MEMORY_STACKLEAK
303+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
304+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
305+endif
306+ifdef CONFIG_KALLOCSTAT_PLUGIN
307+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
308+endif
309+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
310+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
311+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
312+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
313+endif
314+ifdef CONFIG_CHECKER_PLUGIN
315+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
316+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
317+endif
318+endif
319+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
320+ifdef CONFIG_PAX_SIZE_OVERFLOW
321+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
322+endif
323+ifdef CONFIG_PAX_LATENT_ENTROPY
324+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
325+endif
326+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
327+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
328+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
329+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
330+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
331+ifeq ($(KBUILD_EXTMOD),)
332+gcc-plugins:
333+ $(Q)$(MAKE) $(build)=tools/gcc
334+else
335+gcc-plugins: ;
336+endif
337+else
338+gcc-plugins:
339+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
340+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
341+else
342+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
343+endif
344+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
345+endif
346+endif
347+
348 include $(srctree)/arch/$(SRCARCH)/Makefile
349
350 ifdef CONFIG_READABLE_ASM
351@@ -731,7 +788,7 @@ export mod_sign_cmd
352
353
354 ifeq ($(KBUILD_EXTMOD),)
355-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
356+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
357
358 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
359 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
360@@ -778,6 +835,8 @@ endif
361
362 # The actual objects are generated when descending,
363 # make sure no implicit rule kicks in
364+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
365+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
366 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
367
368 # Handle descending into subdirectories listed in $(vmlinux-dirs)
369@@ -787,7 +846,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
370 # Error messages still appears in the original language
371
372 PHONY += $(vmlinux-dirs)
373-$(vmlinux-dirs): prepare scripts
374+$(vmlinux-dirs): gcc-plugins prepare scripts
375 $(Q)$(MAKE) $(build)=$@
376
377 # Store (new) KERNELRELASE string in include/config/kernel.release
378@@ -831,6 +890,7 @@ prepare0: archprepare FORCE
379 $(Q)$(MAKE) $(build)=.
380
381 # All the preparing..
382+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
383 prepare: prepare0
384
385 # Generate some files
386@@ -938,6 +998,8 @@ all: modules
387 # using awk while concatenating to the final file.
388
389 PHONY += modules
390+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
391+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
392 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
393 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
394 @$(kecho) ' Building modules, stage 2.';
395@@ -953,7 +1015,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
396
397 # Target to prepare building external modules
398 PHONY += modules_prepare
399-modules_prepare: prepare scripts
400+modules_prepare: gcc-plugins prepare scripts
401
402 # Target to install modules
403 PHONY += modules_install
404@@ -1019,7 +1081,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
405 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
406 signing_key.priv signing_key.x509 x509.genkey \
407 extra_certificates signing_key.x509.keyid \
408- signing_key.x509.signer
409+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
410
411 # clean - Delete most, but leave enough to build external modules
412 #
413@@ -1059,6 +1121,7 @@ distclean: mrproper
414 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
415 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
416 -o -name '.*.rej' \
417+ -o -name '.*.rej' -o -name '*.so' \
418 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
419 -type f -print | xargs rm -f
420
421@@ -1219,6 +1282,8 @@ PHONY += $(module-dirs) modules
422 $(module-dirs): crmodverdir $(objtree)/Module.symvers
423 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
424
425+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
426+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
427 modules: $(module-dirs)
428 @$(kecho) ' Building modules, stage 2.';
429 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
430@@ -1355,17 +1420,21 @@ else
431 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
432 endif
433
434-%.s: %.c prepare scripts FORCE
435+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
436+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
437+%.s: %.c gcc-plugins prepare scripts FORCE
438 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
439 %.i: %.c prepare scripts FORCE
440 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
441-%.o: %.c prepare scripts FORCE
442+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
443+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
444+%.o: %.c gcc-plugins prepare scripts FORCE
445 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
446 %.lst: %.c prepare scripts FORCE
447 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
448-%.s: %.S prepare scripts FORCE
449+%.s: %.S gcc-plugins prepare scripts FORCE
450 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
451-%.o: %.S prepare scripts FORCE
452+%.o: %.S gcc-plugins prepare scripts FORCE
453 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
454 %.symtypes: %.c prepare scripts FORCE
455 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
456@@ -1375,11 +1444,15 @@ endif
457 $(cmd_crmodverdir)
458 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
459 $(build)=$(build-dir)
460-%/: prepare scripts FORCE
461+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
462+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
463+%/: gcc-plugins prepare scripts FORCE
464 $(cmd_crmodverdir)
465 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
466 $(build)=$(build-dir)
467-%.ko: prepare scripts FORCE
468+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
469+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
470+%.ko: gcc-plugins prepare scripts FORCE
471 $(cmd_crmodverdir)
472 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
473 $(build)=$(build-dir) $(@:.ko=.o)
474diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
475index c2cbe4f..f7264b4 100644
476--- a/arch/alpha/include/asm/atomic.h
477+++ b/arch/alpha/include/asm/atomic.h
478@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
479 #define atomic_dec(v) atomic_sub(1,(v))
480 #define atomic64_dec(v) atomic64_sub(1,(v))
481
482+#define atomic64_read_unchecked(v) atomic64_read(v)
483+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
484+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
485+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
486+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
487+#define atomic64_inc_unchecked(v) atomic64_inc(v)
488+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
489+#define atomic64_dec_unchecked(v) atomic64_dec(v)
490+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
491+
492 #define smp_mb__before_atomic_dec() smp_mb()
493 #define smp_mb__after_atomic_dec() smp_mb()
494 #define smp_mb__before_atomic_inc() smp_mb()
495diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
496index ad368a9..fbe0f25 100644
497--- a/arch/alpha/include/asm/cache.h
498+++ b/arch/alpha/include/asm/cache.h
499@@ -4,19 +4,19 @@
500 #ifndef __ARCH_ALPHA_CACHE_H
501 #define __ARCH_ALPHA_CACHE_H
502
503+#include <linux/const.h>
504
505 /* Bytes per L1 (data) cache line. */
506 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
507-# define L1_CACHE_BYTES 64
508 # define L1_CACHE_SHIFT 6
509 #else
510 /* Both EV4 and EV5 are write-through, read-allocate,
511 direct-mapped, physical.
512 */
513-# define L1_CACHE_BYTES 32
514 # define L1_CACHE_SHIFT 5
515 #endif
516
517+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
518 #define SMP_CACHE_BYTES L1_CACHE_BYTES
519
520 #endif
521diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
522index 968d999..d36b2df 100644
523--- a/arch/alpha/include/asm/elf.h
524+++ b/arch/alpha/include/asm/elf.h
525@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
526
527 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
528
529+#ifdef CONFIG_PAX_ASLR
530+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
531+
532+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
533+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
534+#endif
535+
536 /* $0 is set by ld.so to a pointer to a function which might be
537 registered using atexit. This provides a mean for the dynamic
538 linker to call DT_FINI functions for shared libraries that have
539diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
540index bc2a0da..8ad11ee 100644
541--- a/arch/alpha/include/asm/pgalloc.h
542+++ b/arch/alpha/include/asm/pgalloc.h
543@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
544 pgd_set(pgd, pmd);
545 }
546
547+static inline void
548+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
549+{
550+ pgd_populate(mm, pgd, pmd);
551+}
552+
553 extern pgd_t *pgd_alloc(struct mm_struct *mm);
554
555 static inline void
556diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
557index 81a4342..348b927 100644
558--- a/arch/alpha/include/asm/pgtable.h
559+++ b/arch/alpha/include/asm/pgtable.h
560@@ -102,6 +102,17 @@ struct vm_area_struct;
561 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
562 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
563 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
564+
565+#ifdef CONFIG_PAX_PAGEEXEC
566+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
567+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
568+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
569+#else
570+# define PAGE_SHARED_NOEXEC PAGE_SHARED
571+# define PAGE_COPY_NOEXEC PAGE_COPY
572+# define PAGE_READONLY_NOEXEC PAGE_READONLY
573+#endif
574+
575 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
576
577 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
578diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
579index 2fd00b7..cfd5069 100644
580--- a/arch/alpha/kernel/module.c
581+++ b/arch/alpha/kernel/module.c
582@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
583
584 /* The small sections were sorted to the end of the segment.
585 The following should definitely cover them. */
586- gp = (u64)me->module_core + me->core_size - 0x8000;
587+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
588 got = sechdrs[me->arch.gotsecindex].sh_addr;
589
590 for (i = 0; i < n; i++) {
591diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
592index 14db93e..47bed62 100644
593--- a/arch/alpha/kernel/osf_sys.c
594+++ b/arch/alpha/kernel/osf_sys.c
595@@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
596 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
597
598 static unsigned long
599-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
600- unsigned long limit)
601+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
602+ unsigned long limit, unsigned long flags)
603 {
604 struct vm_area_struct *vma = find_vma(current->mm, addr);
605-
606+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
607 while (1) {
608 /* At this point: (!vma || addr < vma->vm_end). */
609 if (limit - len < addr)
610 return -ENOMEM;
611- if (!vma || addr + len <= vma->vm_start)
612+ if (check_heap_stack_gap(vma, addr, len, offset))
613 return addr;
614 addr = vma->vm_end;
615 vma = vma->vm_next;
616@@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
617 merely specific addresses, but regions of memory -- perhaps
618 this feature should be incorporated into all ports? */
619
620+#ifdef CONFIG_PAX_RANDMMAP
621+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
622+#endif
623+
624 if (addr) {
625- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
626+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
627 if (addr != (unsigned long) -ENOMEM)
628 return addr;
629 }
630
631 /* Next, try allocating at TASK_UNMAPPED_BASE. */
632- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
633- len, limit);
634+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
635+
636 if (addr != (unsigned long) -ENOMEM)
637 return addr;
638
639 /* Finally, try allocating in low memory. */
640- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
641+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
642
643 return addr;
644 }
645diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
646index 0c4132d..88f0d53 100644
647--- a/arch/alpha/mm/fault.c
648+++ b/arch/alpha/mm/fault.c
649@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
650 __reload_thread(pcb);
651 }
652
653+#ifdef CONFIG_PAX_PAGEEXEC
654+/*
655+ * PaX: decide what to do with offenders (regs->pc = fault address)
656+ *
657+ * returns 1 when task should be killed
658+ * 2 when patched PLT trampoline was detected
659+ * 3 when unpatched PLT trampoline was detected
660+ */
661+static int pax_handle_fetch_fault(struct pt_regs *regs)
662+{
663+
664+#ifdef CONFIG_PAX_EMUPLT
665+ int err;
666+
667+ do { /* PaX: patched PLT emulation #1 */
668+ unsigned int ldah, ldq, jmp;
669+
670+ err = get_user(ldah, (unsigned int *)regs->pc);
671+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
672+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
673+
674+ if (err)
675+ break;
676+
677+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
678+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
679+ jmp == 0x6BFB0000U)
680+ {
681+ unsigned long r27, addr;
682+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
683+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
684+
685+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
686+ err = get_user(r27, (unsigned long *)addr);
687+ if (err)
688+ break;
689+
690+ regs->r27 = r27;
691+ regs->pc = r27;
692+ return 2;
693+ }
694+ } while (0);
695+
696+ do { /* PaX: patched PLT emulation #2 */
697+ unsigned int ldah, lda, br;
698+
699+ err = get_user(ldah, (unsigned int *)regs->pc);
700+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
701+ err |= get_user(br, (unsigned int *)(regs->pc+8));
702+
703+ if (err)
704+ break;
705+
706+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
707+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
708+ (br & 0xFFE00000U) == 0xC3E00000U)
709+ {
710+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
711+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
712+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
713+
714+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
715+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
716+ return 2;
717+ }
718+ } while (0);
719+
720+ do { /* PaX: unpatched PLT emulation */
721+ unsigned int br;
722+
723+ err = get_user(br, (unsigned int *)regs->pc);
724+
725+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
726+ unsigned int br2, ldq, nop, jmp;
727+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
728+
729+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
730+ err = get_user(br2, (unsigned int *)addr);
731+ err |= get_user(ldq, (unsigned int *)(addr+4));
732+ err |= get_user(nop, (unsigned int *)(addr+8));
733+ err |= get_user(jmp, (unsigned int *)(addr+12));
734+ err |= get_user(resolver, (unsigned long *)(addr+16));
735+
736+ if (err)
737+ break;
738+
739+ if (br2 == 0xC3600000U &&
740+ ldq == 0xA77B000CU &&
741+ nop == 0x47FF041FU &&
742+ jmp == 0x6B7B0000U)
743+ {
744+ regs->r28 = regs->pc+4;
745+ regs->r27 = addr+16;
746+ regs->pc = resolver;
747+ return 3;
748+ }
749+ }
750+ } while (0);
751+#endif
752+
753+ return 1;
754+}
755+
756+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
757+{
758+ unsigned long i;
759+
760+ printk(KERN_ERR "PAX: bytes at PC: ");
761+ for (i = 0; i < 5; i++) {
762+ unsigned int c;
763+ if (get_user(c, (unsigned int *)pc+i))
764+ printk(KERN_CONT "???????? ");
765+ else
766+ printk(KERN_CONT "%08x ", c);
767+ }
768+ printk("\n");
769+}
770+#endif
771
772 /*
773 * This routine handles page faults. It determines the address,
774@@ -133,8 +251,29 @@ retry:
775 good_area:
776 si_code = SEGV_ACCERR;
777 if (cause < 0) {
778- if (!(vma->vm_flags & VM_EXEC))
779+ if (!(vma->vm_flags & VM_EXEC)) {
780+
781+#ifdef CONFIG_PAX_PAGEEXEC
782+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
783+ goto bad_area;
784+
785+ up_read(&mm->mmap_sem);
786+ switch (pax_handle_fetch_fault(regs)) {
787+
788+#ifdef CONFIG_PAX_EMUPLT
789+ case 2:
790+ case 3:
791+ return;
792+#endif
793+
794+ }
795+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
796+ do_group_exit(SIGKILL);
797+#else
798 goto bad_area;
799+#endif
800+
801+ }
802 } else if (!cause) {
803 /* Allow reads even for write-only mappings */
804 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
805diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
806index 67874b8..0e40765 100644
807--- a/arch/arm/Kconfig
808+++ b/arch/arm/Kconfig
809@@ -1813,7 +1813,7 @@ config ALIGNMENT_TRAP
810
811 config UACCESS_WITH_MEMCPY
812 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
813- depends on MMU
814+ depends on MMU && !PAX_MEMORY_UDEREF
815 default y if CPU_FEROCEON
816 help
817 Implement faster copy_to_user and clear_user methods for CPU
818diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
819index 87dfa902..3a523fc 100644
820--- a/arch/arm/common/gic.c
821+++ b/arch/arm/common/gic.c
822@@ -81,7 +81,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
823 * Supported arch specific GIC irq extension.
824 * Default make them NULL.
825 */
826-struct irq_chip gic_arch_extn = {
827+irq_chip_no_const gic_arch_extn __read_only = {
828 .irq_eoi = NULL,
829 .irq_mask = NULL,
830 .irq_unmask = NULL,
831@@ -329,7 +329,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
832 chained_irq_exit(chip, desc);
833 }
834
835-static struct irq_chip gic_chip = {
836+static irq_chip_no_const gic_chip __read_only = {
837 .name = "GIC",
838 .irq_mask = gic_mask_irq,
839 .irq_unmask = gic_unmask_irq,
840diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
841index c79f61f..9ac0642 100644
842--- a/arch/arm/include/asm/atomic.h
843+++ b/arch/arm/include/asm/atomic.h
844@@ -17,17 +17,35 @@
845 #include <asm/barrier.h>
846 #include <asm/cmpxchg.h>
847
848+#ifdef CONFIG_GENERIC_ATOMIC64
849+#include <asm-generic/atomic64.h>
850+#endif
851+
852 #define ATOMIC_INIT(i) { (i) }
853
854 #ifdef __KERNEL__
855
856+#define _ASM_EXTABLE(from, to) \
857+" .pushsection __ex_table,\"a\"\n"\
858+" .align 3\n" \
859+" .long " #from ", " #to"\n" \
860+" .popsection"
861+
862 /*
863 * On ARM, ordinary assignment (str instruction) doesn't clear the local
864 * strex/ldrex monitor on some implementations. The reason we can use it for
865 * atomic_set() is the clrex or dummy strex done on every exception return.
866 */
867 #define atomic_read(v) (*(volatile int *)&(v)->counter)
868+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
869+{
870+ return v->counter;
871+}
872 #define atomic_set(v,i) (((v)->counter) = (i))
873+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
874+{
875+ v->counter = i;
876+}
877
878 #if __LINUX_ARM_ARCH__ >= 6
879
880@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
881 int result;
882
883 __asm__ __volatile__("@ atomic_add\n"
884+"1: ldrex %1, [%3]\n"
885+" adds %0, %1, %4\n"
886+
887+#ifdef CONFIG_PAX_REFCOUNT
888+" bvc 3f\n"
889+"2: bkpt 0xf103\n"
890+"3:\n"
891+#endif
892+
893+" strex %1, %0, [%3]\n"
894+" teq %1, #0\n"
895+" bne 1b"
896+
897+#ifdef CONFIG_PAX_REFCOUNT
898+"\n4:\n"
899+ _ASM_EXTABLE(2b, 4b)
900+#endif
901+
902+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
903+ : "r" (&v->counter), "Ir" (i)
904+ : "cc");
905+}
906+
907+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
908+{
909+ unsigned long tmp;
910+ int result;
911+
912+ __asm__ __volatile__("@ atomic_add_unchecked\n"
913 "1: ldrex %0, [%3]\n"
914 " add %0, %0, %4\n"
915 " strex %1, %0, [%3]\n"
916@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
917 smp_mb();
918
919 __asm__ __volatile__("@ atomic_add_return\n"
920+"1: ldrex %1, [%3]\n"
921+" adds %0, %1, %4\n"
922+
923+#ifdef CONFIG_PAX_REFCOUNT
924+" bvc 3f\n"
925+" mov %0, %1\n"
926+"2: bkpt 0xf103\n"
927+"3:\n"
928+#endif
929+
930+" strex %1, %0, [%3]\n"
931+" teq %1, #0\n"
932+" bne 1b"
933+
934+#ifdef CONFIG_PAX_REFCOUNT
935+"\n4:\n"
936+ _ASM_EXTABLE(2b, 4b)
937+#endif
938+
939+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
940+ : "r" (&v->counter), "Ir" (i)
941+ : "cc");
942+
943+ smp_mb();
944+
945+ return result;
946+}
947+
948+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
949+{
950+ unsigned long tmp;
951+ int result;
952+
953+ smp_mb();
954+
955+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
956 "1: ldrex %0, [%3]\n"
957 " add %0, %0, %4\n"
958 " strex %1, %0, [%3]\n"
959@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
960 int result;
961
962 __asm__ __volatile__("@ atomic_sub\n"
963+"1: ldrex %1, [%3]\n"
964+" subs %0, %1, %4\n"
965+
966+#ifdef CONFIG_PAX_REFCOUNT
967+" bvc 3f\n"
968+"2: bkpt 0xf103\n"
969+"3:\n"
970+#endif
971+
972+" strex %1, %0, [%3]\n"
973+" teq %1, #0\n"
974+" bne 1b"
975+
976+#ifdef CONFIG_PAX_REFCOUNT
977+"\n4:\n"
978+ _ASM_EXTABLE(2b, 4b)
979+#endif
980+
981+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
982+ : "r" (&v->counter), "Ir" (i)
983+ : "cc");
984+}
985+
986+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
987+{
988+ unsigned long tmp;
989+ int result;
990+
991+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
992 "1: ldrex %0, [%3]\n"
993 " sub %0, %0, %4\n"
994 " strex %1, %0, [%3]\n"
995@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
996 smp_mb();
997
998 __asm__ __volatile__("@ atomic_sub_return\n"
999-"1: ldrex %0, [%3]\n"
1000-" sub %0, %0, %4\n"
1001+"1: ldrex %1, [%3]\n"
1002+" subs %0, %1, %4\n"
1003+
1004+#ifdef CONFIG_PAX_REFCOUNT
1005+" bvc 3f\n"
1006+" mov %0, %1\n"
1007+"2: bkpt 0xf103\n"
1008+"3:\n"
1009+#endif
1010+
1011 " strex %1, %0, [%3]\n"
1012 " teq %1, #0\n"
1013 " bne 1b"
1014+
1015+#ifdef CONFIG_PAX_REFCOUNT
1016+"\n4:\n"
1017+ _ASM_EXTABLE(2b, 4b)
1018+#endif
1019+
1020 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1021 : "r" (&v->counter), "Ir" (i)
1022 : "cc");
1023@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1024 return oldval;
1025 }
1026
1027+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1028+{
1029+ unsigned long oldval, res;
1030+
1031+ smp_mb();
1032+
1033+ do {
1034+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1035+ "ldrex %1, [%3]\n"
1036+ "mov %0, #0\n"
1037+ "teq %1, %4\n"
1038+ "strexeq %0, %5, [%3]\n"
1039+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1040+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1041+ : "cc");
1042+ } while (res);
1043+
1044+ smp_mb();
1045+
1046+ return oldval;
1047+}
1048+
1049 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1050 {
1051 unsigned long tmp, tmp2;
1052@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1053
1054 return val;
1055 }
1056+
1057+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1058+{
1059+ return atomic_add_return(i, v);
1060+}
1061+
1062 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1063+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1064+{
1065+ (void) atomic_add_return(i, v);
1066+}
1067
1068 static inline int atomic_sub_return(int i, atomic_t *v)
1069 {
1070@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1071 return val;
1072 }
1073 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1074+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1075+{
1076+ (void) atomic_sub_return(i, v);
1077+}
1078
1079 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1080 {
1081@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1082 return ret;
1083 }
1084
1085+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1086+{
1087+ return atomic_cmpxchg(v, old, new);
1088+}
1089+
1090 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1091 {
1092 unsigned long flags;
1093@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1094 #endif /* __LINUX_ARM_ARCH__ */
1095
1096 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1097+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1098+{
1099+ return xchg(&v->counter, new);
1100+}
1101
1102 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1103 {
1104@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1105 }
1106
1107 #define atomic_inc(v) atomic_add(1, v)
1108+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1109+{
1110+ atomic_add_unchecked(1, v);
1111+}
1112 #define atomic_dec(v) atomic_sub(1, v)
1113+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1114+{
1115+ atomic_sub_unchecked(1, v);
1116+}
1117
1118 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1119+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1120+{
1121+ return atomic_add_return_unchecked(1, v) == 0;
1122+}
1123 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1124 #define atomic_inc_return(v) (atomic_add_return(1, v))
1125+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1126+{
1127+ return atomic_add_return_unchecked(1, v);
1128+}
1129 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1130 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1131
1132@@ -241,6 +428,14 @@ typedef struct {
1133 u64 __aligned(8) counter;
1134 } atomic64_t;
1135
1136+#ifdef CONFIG_PAX_REFCOUNT
1137+typedef struct {
1138+ u64 __aligned(8) counter;
1139+} atomic64_unchecked_t;
1140+#else
1141+typedef atomic64_t atomic64_unchecked_t;
1142+#endif
1143+
1144 #define ATOMIC64_INIT(i) { (i) }
1145
1146 static inline u64 atomic64_read(const atomic64_t *v)
1147@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1148 return result;
1149 }
1150
1151+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1152+{
1153+ u64 result;
1154+
1155+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1156+" ldrexd %0, %H0, [%1]"
1157+ : "=&r" (result)
1158+ : "r" (&v->counter), "Qo" (v->counter)
1159+ );
1160+
1161+ return result;
1162+}
1163+
1164 static inline void atomic64_set(atomic64_t *v, u64 i)
1165 {
1166 u64 tmp;
1167@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1168 : "cc");
1169 }
1170
1171+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1172+{
1173+ u64 tmp;
1174+
1175+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1176+"1: ldrexd %0, %H0, [%2]\n"
1177+" strexd %0, %3, %H3, [%2]\n"
1178+" teq %0, #0\n"
1179+" bne 1b"
1180+ : "=&r" (tmp), "=Qo" (v->counter)
1181+ : "r" (&v->counter), "r" (i)
1182+ : "cc");
1183+}
1184+
1185 static inline void atomic64_add(u64 i, atomic64_t *v)
1186 {
1187 u64 result;
1188@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1189 __asm__ __volatile__("@ atomic64_add\n"
1190 "1: ldrexd %0, %H0, [%3]\n"
1191 " adds %0, %0, %4\n"
1192+" adcs %H0, %H0, %H4\n"
1193+
1194+#ifdef CONFIG_PAX_REFCOUNT
1195+" bvc 3f\n"
1196+"2: bkpt 0xf103\n"
1197+"3:\n"
1198+#endif
1199+
1200+" strexd %1, %0, %H0, [%3]\n"
1201+" teq %1, #0\n"
1202+" bne 1b"
1203+
1204+#ifdef CONFIG_PAX_REFCOUNT
1205+"\n4:\n"
1206+ _ASM_EXTABLE(2b, 4b)
1207+#endif
1208+
1209+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1210+ : "r" (&v->counter), "r" (i)
1211+ : "cc");
1212+}
1213+
1214+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1215+{
1216+ u64 result;
1217+ unsigned long tmp;
1218+
1219+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1220+"1: ldrexd %0, %H0, [%3]\n"
1221+" adds %0, %0, %4\n"
1222 " adc %H0, %H0, %H4\n"
1223 " strexd %1, %0, %H0, [%3]\n"
1224 " teq %1, #0\n"
1225@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1226
1227 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1228 {
1229- u64 result;
1230- unsigned long tmp;
1231+ u64 result, tmp;
1232
1233 smp_mb();
1234
1235 __asm__ __volatile__("@ atomic64_add_return\n"
1236+"1: ldrexd %1, %H1, [%3]\n"
1237+" adds %0, %1, %4\n"
1238+" adcs %H0, %H1, %H4\n"
1239+
1240+#ifdef CONFIG_PAX_REFCOUNT
1241+" bvc 3f\n"
1242+" mov %0, %1\n"
1243+" mov %H0, %H1\n"
1244+"2: bkpt 0xf103\n"
1245+"3:\n"
1246+#endif
1247+
1248+" strexd %1, %0, %H0, [%3]\n"
1249+" teq %1, #0\n"
1250+" bne 1b"
1251+
1252+#ifdef CONFIG_PAX_REFCOUNT
1253+"\n4:\n"
1254+ _ASM_EXTABLE(2b, 4b)
1255+#endif
1256+
1257+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1258+ : "r" (&v->counter), "r" (i)
1259+ : "cc");
1260+
1261+ smp_mb();
1262+
1263+ return result;
1264+}
1265+
1266+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1267+{
1268+ u64 result;
1269+ unsigned long tmp;
1270+
1271+ smp_mb();
1272+
1273+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1274 "1: ldrexd %0, %H0, [%3]\n"
1275 " adds %0, %0, %4\n"
1276 " adc %H0, %H0, %H4\n"
1277@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1278 __asm__ __volatile__("@ atomic64_sub\n"
1279 "1: ldrexd %0, %H0, [%3]\n"
1280 " subs %0, %0, %4\n"
1281+" sbcs %H0, %H0, %H4\n"
1282+
1283+#ifdef CONFIG_PAX_REFCOUNT
1284+" bvc 3f\n"
1285+"2: bkpt 0xf103\n"
1286+"3:\n"
1287+#endif
1288+
1289+" strexd %1, %0, %H0, [%3]\n"
1290+" teq %1, #0\n"
1291+" bne 1b"
1292+
1293+#ifdef CONFIG_PAX_REFCOUNT
1294+"\n4:\n"
1295+ _ASM_EXTABLE(2b, 4b)
1296+#endif
1297+
1298+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1299+ : "r" (&v->counter), "r" (i)
1300+ : "cc");
1301+}
1302+
1303+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1304+{
1305+ u64 result;
1306+ unsigned long tmp;
1307+
1308+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1309+"1: ldrexd %0, %H0, [%3]\n"
1310+" subs %0, %0, %4\n"
1311 " sbc %H0, %H0, %H4\n"
1312 " strexd %1, %0, %H0, [%3]\n"
1313 " teq %1, #0\n"
1314@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1315
1316 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1317 {
1318- u64 result;
1319- unsigned long tmp;
1320+ u64 result, tmp;
1321
1322 smp_mb();
1323
1324 __asm__ __volatile__("@ atomic64_sub_return\n"
1325-"1: ldrexd %0, %H0, [%3]\n"
1326-" subs %0, %0, %4\n"
1327-" sbc %H0, %H0, %H4\n"
1328+"1: ldrexd %1, %H1, [%3]\n"
1329+" subs %0, %1, %4\n"
1330+" sbcs %H0, %H1, %H4\n"
1331+
1332+#ifdef CONFIG_PAX_REFCOUNT
1333+" bvc 3f\n"
1334+" mov %0, %1\n"
1335+" mov %H0, %H1\n"
1336+"2: bkpt 0xf103\n"
1337+"3:\n"
1338+#endif
1339+
1340 " strexd %1, %0, %H0, [%3]\n"
1341 " teq %1, #0\n"
1342 " bne 1b"
1343+
1344+#ifdef CONFIG_PAX_REFCOUNT
1345+"\n4:\n"
1346+ _ASM_EXTABLE(2b, 4b)
1347+#endif
1348+
1349 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1350 : "r" (&v->counter), "r" (i)
1351 : "cc");
1352@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1353 return oldval;
1354 }
1355
1356+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1357+{
1358+ u64 oldval;
1359+ unsigned long res;
1360+
1361+ smp_mb();
1362+
1363+ do {
1364+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1365+ "ldrexd %1, %H1, [%3]\n"
1366+ "mov %0, #0\n"
1367+ "teq %1, %4\n"
1368+ "teqeq %H1, %H4\n"
1369+ "strexdeq %0, %5, %H5, [%3]"
1370+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1371+ : "r" (&ptr->counter), "r" (old), "r" (new)
1372+ : "cc");
1373+ } while (res);
1374+
1375+ smp_mb();
1376+
1377+ return oldval;
1378+}
1379+
1380 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1381 {
1382 u64 result;
1383@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1384
1385 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1386 {
1387- u64 result;
1388- unsigned long tmp;
1389+ u64 result, tmp;
1390
1391 smp_mb();
1392
1393 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1394-"1: ldrexd %0, %H0, [%3]\n"
1395-" subs %0, %0, #1\n"
1396-" sbc %H0, %H0, #0\n"
1397+"1: ldrexd %1, %H1, [%3]\n"
1398+" subs %0, %1, #1\n"
1399+" sbcs %H0, %H1, #0\n"
1400+
1401+#ifdef CONFIG_PAX_REFCOUNT
1402+" bvc 3f\n"
1403+" mov %0, %1\n"
1404+" mov %H0, %H1\n"
1405+"2: bkpt 0xf103\n"
1406+"3:\n"
1407+#endif
1408+
1409 " teq %H0, #0\n"
1410-" bmi 2f\n"
1411+" bmi 4f\n"
1412 " strexd %1, %0, %H0, [%3]\n"
1413 " teq %1, #0\n"
1414 " bne 1b\n"
1415-"2:"
1416+"4:\n"
1417+
1418+#ifdef CONFIG_PAX_REFCOUNT
1419+ _ASM_EXTABLE(2b, 4b)
1420+#endif
1421+
1422 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1423 : "r" (&v->counter)
1424 : "cc");
1425@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1426 " teq %0, %5\n"
1427 " teqeq %H0, %H5\n"
1428 " moveq %1, #0\n"
1429-" beq 2f\n"
1430+" beq 4f\n"
1431 " adds %0, %0, %6\n"
1432-" adc %H0, %H0, %H6\n"
1433+" adcs %H0, %H0, %H6\n"
1434+
1435+#ifdef CONFIG_PAX_REFCOUNT
1436+" bvc 3f\n"
1437+"2: bkpt 0xf103\n"
1438+"3:\n"
1439+#endif
1440+
1441 " strexd %2, %0, %H0, [%4]\n"
1442 " teq %2, #0\n"
1443 " bne 1b\n"
1444-"2:"
1445+"4:\n"
1446+
1447+#ifdef CONFIG_PAX_REFCOUNT
1448+ _ASM_EXTABLE(2b, 4b)
1449+#endif
1450+
1451 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1452 : "r" (&v->counter), "r" (u), "r" (a)
1453 : "cc");
1454@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1455
1456 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1457 #define atomic64_inc(v) atomic64_add(1LL, (v))
1458+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1459 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1460+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1461 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1462 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1463 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1464+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1465 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1466 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1467 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1468diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1469index 75fe66b..ba3dee4 100644
1470--- a/arch/arm/include/asm/cache.h
1471+++ b/arch/arm/include/asm/cache.h
1472@@ -4,8 +4,10 @@
1473 #ifndef __ASMARM_CACHE_H
1474 #define __ASMARM_CACHE_H
1475
1476+#include <linux/const.h>
1477+
1478 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1479-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1480+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1481
1482 /*
1483 * Memory returned by kmalloc() may be used for DMA, so we must make
1484@@ -24,5 +26,6 @@
1485 #endif
1486
1487 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1488+#define __read_only __attribute__ ((__section__(".data..read_only")))
1489
1490 #endif
1491diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1492index e1489c5..d418304 100644
1493--- a/arch/arm/include/asm/cacheflush.h
1494+++ b/arch/arm/include/asm/cacheflush.h
1495@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1496 void (*dma_unmap_area)(const void *, size_t, int);
1497
1498 void (*dma_flush_range)(const void *, const void *);
1499-};
1500+} __no_const;
1501
1502 /*
1503 * Select the calling method
1504diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1505index 6dcc164..b14d917 100644
1506--- a/arch/arm/include/asm/checksum.h
1507+++ b/arch/arm/include/asm/checksum.h
1508@@ -37,7 +37,19 @@ __wsum
1509 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1510
1511 __wsum
1512-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1513+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1514+
1515+static inline __wsum
1516+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1517+{
1518+ __wsum ret;
1519+ pax_open_userland();
1520+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1521+ pax_close_userland();
1522+ return ret;
1523+}
1524+
1525+
1526
1527 /*
1528 * Fold a partial checksum without adding pseudo headers
1529diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1530index 7eb18c1..e38b6d2 100644
1531--- a/arch/arm/include/asm/cmpxchg.h
1532+++ b/arch/arm/include/asm/cmpxchg.h
1533@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1534
1535 #define xchg(ptr,x) \
1536 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1537+#define xchg_unchecked(ptr,x) \
1538+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1539
1540 #include <asm-generic/cmpxchg-local.h>
1541
1542diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
1543index 720799f..2f67631 100644
1544--- a/arch/arm/include/asm/delay.h
1545+++ b/arch/arm/include/asm/delay.h
1546@@ -25,9 +25,9 @@ extern struct arm_delay_ops {
1547 void (*const_udelay)(unsigned long);
1548 void (*udelay)(unsigned long);
1549 bool const_clock;
1550-} arm_delay_ops;
1551+} *arm_delay_ops;
1552
1553-#define __delay(n) arm_delay_ops.delay(n)
1554+#define __delay(n) arm_delay_ops->delay(n)
1555
1556 /*
1557 * This function intentionally does not exist; if you see references to
1558@@ -48,8 +48,8 @@ extern void __bad_udelay(void);
1559 * first constant multiplications gets optimized away if the delay is
1560 * a constant)
1561 */
1562-#define __udelay(n) arm_delay_ops.udelay(n)
1563-#define __const_udelay(n) arm_delay_ops.const_udelay(n)
1564+#define __udelay(n) arm_delay_ops->udelay(n)
1565+#define __const_udelay(n) arm_delay_ops->const_udelay(n)
1566
1567 #define udelay(n) \
1568 (__builtin_constant_p(n) ? \
1569diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1570index 6ddbe44..b5e38b1 100644
1571--- a/arch/arm/include/asm/domain.h
1572+++ b/arch/arm/include/asm/domain.h
1573@@ -48,18 +48,37 @@
1574 * Domain types
1575 */
1576 #define DOMAIN_NOACCESS 0
1577-#define DOMAIN_CLIENT 1
1578 #ifdef CONFIG_CPU_USE_DOMAINS
1579+#define DOMAIN_USERCLIENT 1
1580+#define DOMAIN_KERNELCLIENT 1
1581 #define DOMAIN_MANAGER 3
1582+#define DOMAIN_VECTORS DOMAIN_USER
1583 #else
1584+
1585+#ifdef CONFIG_PAX_KERNEXEC
1586 #define DOMAIN_MANAGER 1
1587+#define DOMAIN_KERNEXEC 3
1588+#else
1589+#define DOMAIN_MANAGER 1
1590+#endif
1591+
1592+#ifdef CONFIG_PAX_MEMORY_UDEREF
1593+#define DOMAIN_USERCLIENT 0
1594+#define DOMAIN_UDEREF 1
1595+#define DOMAIN_VECTORS DOMAIN_KERNEL
1596+#else
1597+#define DOMAIN_USERCLIENT 1
1598+#define DOMAIN_VECTORS DOMAIN_USER
1599+#endif
1600+#define DOMAIN_KERNELCLIENT 1
1601+
1602 #endif
1603
1604 #define domain_val(dom,type) ((type) << (2*(dom)))
1605
1606 #ifndef __ASSEMBLY__
1607
1608-#ifdef CONFIG_CPU_USE_DOMAINS
1609+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1610 static inline void set_domain(unsigned val)
1611 {
1612 asm volatile(
1613@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1614 isb();
1615 }
1616
1617-#define modify_domain(dom,type) \
1618- do { \
1619- struct thread_info *thread = current_thread_info(); \
1620- unsigned int domain = thread->cpu_domain; \
1621- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1622- thread->cpu_domain = domain | domain_val(dom, type); \
1623- set_domain(thread->cpu_domain); \
1624- } while (0)
1625-
1626+extern void modify_domain(unsigned int dom, unsigned int type);
1627 #else
1628 static inline void set_domain(unsigned val) { }
1629 static inline void modify_domain(unsigned dom, unsigned type) { }
1630diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1631index 38050b1..9d90e8b 100644
1632--- a/arch/arm/include/asm/elf.h
1633+++ b/arch/arm/include/asm/elf.h
1634@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1635 the loader. We need to make sure that it is out of the way of the program
1636 that it will "exec", and that there is sufficient room for the brk. */
1637
1638-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1639+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1640+
1641+#ifdef CONFIG_PAX_ASLR
1642+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1643+
1644+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1645+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1646+#endif
1647
1648 /* When the program starts, a1 contains a pointer to a function to be
1649 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1650@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1651 extern void elf_set_personality(const struct elf32_hdr *);
1652 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1653
1654-struct mm_struct;
1655-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1656-#define arch_randomize_brk arch_randomize_brk
1657-
1658 #endif
1659diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1660index de53547..52b9a28 100644
1661--- a/arch/arm/include/asm/fncpy.h
1662+++ b/arch/arm/include/asm/fncpy.h
1663@@ -81,7 +81,9 @@
1664 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1665 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1666 \
1667+ pax_open_kernel(); \
1668 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1669+ pax_close_kernel(); \
1670 flush_icache_range((unsigned long)(dest_buf), \
1671 (unsigned long)(dest_buf) + (size)); \
1672 \
1673diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1674index e42cf59..7b94b8f 100644
1675--- a/arch/arm/include/asm/futex.h
1676+++ b/arch/arm/include/asm/futex.h
1677@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1678 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1679 return -EFAULT;
1680
1681+ pax_open_userland();
1682+
1683 smp_mb();
1684 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1685 "1: ldrex %1, [%4]\n"
1686@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1687 : "cc", "memory");
1688 smp_mb();
1689
1690+ pax_close_userland();
1691+
1692 *uval = val;
1693 return ret;
1694 }
1695@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1696 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1697 return -EFAULT;
1698
1699+ pax_open_userland();
1700+
1701 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1702 "1: " TUSER(ldr) " %1, [%4]\n"
1703 " teq %1, %2\n"
1704@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1705 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1706 : "cc", "memory");
1707
1708+ pax_close_userland();
1709+
1710 *uval = val;
1711 return ret;
1712 }
1713@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1714 return -EFAULT;
1715
1716 pagefault_disable(); /* implies preempt_disable() */
1717+ pax_open_userland();
1718
1719 switch (op) {
1720 case FUTEX_OP_SET:
1721@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1722 ret = -ENOSYS;
1723 }
1724
1725+ pax_close_userland();
1726 pagefault_enable(); /* subsumes preempt_enable() */
1727
1728 if (!ret) {
1729diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
1730index 4b1ce6c..bea3f73 100644
1731--- a/arch/arm/include/asm/hardware/gic.h
1732+++ b/arch/arm/include/asm/hardware/gic.h
1733@@ -34,9 +34,10 @@
1734
1735 #ifndef __ASSEMBLY__
1736 #include <linux/irqdomain.h>
1737+#include <linux/irq.h>
1738 struct device_node;
1739
1740-extern struct irq_chip gic_arch_extn;
1741+extern irq_chip_no_const gic_arch_extn;
1742
1743 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
1744 u32 offset, struct device_node *);
1745diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1746index 83eb2f7..ed77159 100644
1747--- a/arch/arm/include/asm/kmap_types.h
1748+++ b/arch/arm/include/asm/kmap_types.h
1749@@ -4,6 +4,6 @@
1750 /*
1751 * This is the "bare minimum". AIO seems to require this.
1752 */
1753-#define KM_TYPE_NR 16
1754+#define KM_TYPE_NR 17
1755
1756 #endif
1757diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1758index 9e614a1..3302cca 100644
1759--- a/arch/arm/include/asm/mach/dma.h
1760+++ b/arch/arm/include/asm/mach/dma.h
1761@@ -22,7 +22,7 @@ struct dma_ops {
1762 int (*residue)(unsigned int, dma_t *); /* optional */
1763 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1764 const char *type;
1765-};
1766+} __do_const;
1767
1768 struct dma_struct {
1769 void *addr; /* single DMA address */
1770diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1771index 2fe141f..192dc01 100644
1772--- a/arch/arm/include/asm/mach/map.h
1773+++ b/arch/arm/include/asm/mach/map.h
1774@@ -27,13 +27,16 @@ struct map_desc {
1775 #define MT_MINICLEAN 6
1776 #define MT_LOW_VECTORS 7
1777 #define MT_HIGH_VECTORS 8
1778-#define MT_MEMORY 9
1779+#define MT_MEMORY_RWX 9
1780 #define MT_ROM 10
1781-#define MT_MEMORY_NONCACHED 11
1782+#define MT_MEMORY_NONCACHED_RX 11
1783 #define MT_MEMORY_DTCM 12
1784 #define MT_MEMORY_ITCM 13
1785 #define MT_MEMORY_SO 14
1786 #define MT_MEMORY_DMA_READY 15
1787+#define MT_MEMORY_RW 16
1788+#define MT_MEMORY_RX 17
1789+#define MT_MEMORY_NONCACHED_RW 18
1790
1791 #ifdef CONFIG_MMU
1792 extern void iotable_init(struct map_desc *, int);
1793diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1794index 53426c6..c7baff3 100644
1795--- a/arch/arm/include/asm/outercache.h
1796+++ b/arch/arm/include/asm/outercache.h
1797@@ -35,7 +35,7 @@ struct outer_cache_fns {
1798 #endif
1799 void (*set_debug)(unsigned long);
1800 void (*resume)(void);
1801-};
1802+} __no_const;
1803
1804 #ifdef CONFIG_OUTER_CACHE
1805
1806diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1807index 812a494..71fc0b6 100644
1808--- a/arch/arm/include/asm/page.h
1809+++ b/arch/arm/include/asm/page.h
1810@@ -114,7 +114,7 @@ struct cpu_user_fns {
1811 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1812 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1813 unsigned long vaddr, struct vm_area_struct *vma);
1814-};
1815+} __no_const;
1816
1817 #ifdef MULTI_USER
1818 extern struct cpu_user_fns cpu_user;
1819diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1820index 943504f..c37a730 100644
1821--- a/arch/arm/include/asm/pgalloc.h
1822+++ b/arch/arm/include/asm/pgalloc.h
1823@@ -17,6 +17,7 @@
1824 #include <asm/processor.h>
1825 #include <asm/cacheflush.h>
1826 #include <asm/tlbflush.h>
1827+#include <asm/system_info.h>
1828
1829 #define check_pgt_cache() do { } while (0)
1830
1831@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1832 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1833 }
1834
1835+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1836+{
1837+ pud_populate(mm, pud, pmd);
1838+}
1839+
1840 #else /* !CONFIG_ARM_LPAE */
1841
1842 /*
1843@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1844 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1845 #define pmd_free(mm, pmd) do { } while (0)
1846 #define pud_populate(mm,pmd,pte) BUG()
1847+#define pud_populate_kernel(mm,pmd,pte) BUG()
1848
1849 #endif /* CONFIG_ARM_LPAE */
1850
1851@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1852 __free_page(pte);
1853 }
1854
1855+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1856+{
1857+#ifdef CONFIG_ARM_LPAE
1858+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1859+#else
1860+ if (addr & SECTION_SIZE)
1861+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1862+ else
1863+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1864+#endif
1865+ flush_pmd_entry(pmdp);
1866+}
1867+
1868 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1869 pmdval_t prot)
1870 {
1871@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1872 static inline void
1873 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1874 {
1875- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1876+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1877 }
1878 #define pmd_pgtable(pmd) pmd_page(pmd)
1879
1880diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1881index 5cfba15..f415e1a 100644
1882--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1883+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1884@@ -20,12 +20,15 @@
1885 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1886 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1887 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1888+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1889 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1890 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1891 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1892+
1893 /*
1894 * - section
1895 */
1896+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1897 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1898 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1899 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1900@@ -37,6 +40,7 @@
1901 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1902 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1903 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1904+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1905
1906 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1907 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1908@@ -66,6 +70,7 @@
1909 * - extended small page/tiny page
1910 */
1911 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1912+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1913 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1914 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1915 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1916diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1917index f97ee02..07f1be5 100644
1918--- a/arch/arm/include/asm/pgtable-2level.h
1919+++ b/arch/arm/include/asm/pgtable-2level.h
1920@@ -125,6 +125,7 @@
1921 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1922 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1923 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1924+#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1925
1926 /*
1927 * These are the memory types, defined to be compatible with
1928diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1929index d795282..a43ea90 100644
1930--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1931+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1932@@ -32,15 +32,18 @@
1933 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1934 #define PMD_BIT4 (_AT(pmdval_t, 0))
1935 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1936+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1937
1938 /*
1939 * - section
1940 */
1941 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1942 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1943+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1944 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1945 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1946 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1947+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1948 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
1949 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
1950 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
1951@@ -66,6 +69,7 @@
1952 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1953 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1954 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1955+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1956 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1957
1958 /*
1959diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1960index a3f3792..7b932a6 100644
1961--- a/arch/arm/include/asm/pgtable-3level.h
1962+++ b/arch/arm/include/asm/pgtable-3level.h
1963@@ -74,6 +74,7 @@
1964 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1965 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1966 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1967+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1968 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1969 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1970 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1971@@ -82,6 +83,7 @@
1972 /*
1973 * To be used in assembly code with the upper page attributes.
1974 */
1975+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1976 #define L_PTE_XN_HIGH (1 << (54 - 32))
1977 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1978
1979diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1980index c094749..fd8272e 100644
1981--- a/arch/arm/include/asm/pgtable.h
1982+++ b/arch/arm/include/asm/pgtable.h
1983@@ -30,6 +30,9 @@
1984 #include <asm/pgtable-2level.h>
1985 #endif
1986
1987+#define ktla_ktva(addr) (addr)
1988+#define ktva_ktla(addr) (addr)
1989+
1990 /*
1991 * Just any arbitrary offset to the start of the vmalloc VM area: the
1992 * current 8MB value just means that there will be a 8MB "hole" after the
1993@@ -45,6 +48,9 @@
1994 #define LIBRARY_TEXT_START 0x0c000000
1995
1996 #ifndef __ASSEMBLY__
1997+extern pteval_t __supported_pte_mask;
1998+extern pmdval_t __supported_pmd_mask;
1999+
2000 extern void __pte_error(const char *file, int line, pte_t);
2001 extern void __pmd_error(const char *file, int line, pmd_t);
2002 extern void __pgd_error(const char *file, int line, pgd_t);
2003@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2004 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2005 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2006
2007+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2008+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2009+
2010+#ifdef CONFIG_PAX_KERNEXEC
2011+#include <asm/domain.h>
2012+#include <linux/thread_info.h>
2013+#include <linux/preempt.h>
2014+#endif
2015+
2016+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2017+static inline int test_domain(int domain, int domaintype)
2018+{
2019+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2020+}
2021+#endif
2022+
2023+#ifdef CONFIG_PAX_KERNEXEC
2024+static inline unsigned long pax_open_kernel(void) {
2025+#ifdef CONFIG_ARM_LPAE
2026+ /* TODO */
2027+#else
2028+ preempt_disable();
2029+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2030+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2031+#endif
2032+ return 0;
2033+}
2034+
2035+static inline unsigned long pax_close_kernel(void) {
2036+#ifdef CONFIG_ARM_LPAE
2037+ /* TODO */
2038+#else
2039+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2040+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2041+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2042+ preempt_enable_no_resched();
2043+#endif
2044+ return 0;
2045+}
2046+#else
2047+static inline unsigned long pax_open_kernel(void) { return 0; }
2048+static inline unsigned long pax_close_kernel(void) { return 0; }
2049+#endif
2050+
2051 /*
2052 * This is the lowest virtual address we can permit any user space
2053 * mapping to be mapped at. This is particularly important for
2054@@ -63,8 +113,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2055 /*
2056 * The pgprot_* and protection_map entries will be fixed up in runtime
2057 * to include the cachable and bufferable bits based on memory policy,
2058- * as well as any architecture dependent bits like global/ASID and SMP
2059- * shared mapping bits.
2060+ * as well as any architecture dependent bits like global/ASID, PXN,
2061+ * and SMP shared mapping bits.
2062 */
2063 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2064
2065@@ -240,8 +290,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2066
2067 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2068 {
2069- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2070- L_PTE_NONE | L_PTE_VALID;
2071+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2072 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2073 return pte;
2074 }
2075diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2076index f3628fb..a0672dd 100644
2077--- a/arch/arm/include/asm/proc-fns.h
2078+++ b/arch/arm/include/asm/proc-fns.h
2079@@ -75,7 +75,7 @@ extern struct processor {
2080 unsigned int suspend_size;
2081 void (*do_suspend)(void *);
2082 void (*do_resume)(void *);
2083-} processor;
2084+} __do_const processor;
2085
2086 #ifndef MULTI_CPU
2087 extern void cpu_proc_init(void);
2088diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2089index 06e7d50..8a8e251 100644
2090--- a/arch/arm/include/asm/processor.h
2091+++ b/arch/arm/include/asm/processor.h
2092@@ -65,9 +65,8 @@ struct thread_struct {
2093 regs->ARM_cpsr |= PSR_ENDSTATE; \
2094 regs->ARM_pc = pc & ~1; /* pc */ \
2095 regs->ARM_sp = sp; /* sp */ \
2096- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2097- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2098- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2099+ /* r2 (envp), r1 (argv), r0 (argc) */ \
2100+ (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2101 nommu_start_thread(regs); \
2102 })
2103
2104diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2105index d3a22be..3a69ad5 100644
2106--- a/arch/arm/include/asm/smp.h
2107+++ b/arch/arm/include/asm/smp.h
2108@@ -107,7 +107,7 @@ struct smp_operations {
2109 int (*cpu_disable)(unsigned int cpu);
2110 #endif
2111 #endif
2112-};
2113+} __no_const;
2114
2115 /*
2116 * set platform specific SMP operations
2117diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2118index cddda1f..ff357f7 100644
2119--- a/arch/arm/include/asm/thread_info.h
2120+++ b/arch/arm/include/asm/thread_info.h
2121@@ -77,9 +77,9 @@ struct thread_info {
2122 .flags = 0, \
2123 .preempt_count = INIT_PREEMPT_COUNT, \
2124 .addr_limit = KERNEL_DS, \
2125- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2126- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2127- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2128+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2129+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2130+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2131 .restart_block = { \
2132 .fn = do_no_restart_syscall, \
2133 }, \
2134@@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2135 #define TIF_SYSCALL_AUDIT 9
2136 #define TIF_SYSCALL_TRACEPOINT 10
2137 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2138+
2139+/* within 8 bits of TIF_SYSCALL_TRACE
2140+ * to meet flexible second operand requirements
2141+ */
2142+#define TIF_GRSEC_SETXID 12
2143+
2144 #define TIF_USING_IWMMXT 17
2145 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2146 #define TIF_RESTORE_SIGMASK 20
2147@@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2148 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2149 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2150 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2151+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2152
2153 /* Checks for any syscall work in entry-common.S */
2154 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2155- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2156+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2157
2158 /*
2159 * Change these and you break ASM code in entry-common.S
2160diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2161index 7e1f760..752fcb7 100644
2162--- a/arch/arm/include/asm/uaccess.h
2163+++ b/arch/arm/include/asm/uaccess.h
2164@@ -18,6 +18,7 @@
2165 #include <asm/domain.h>
2166 #include <asm/unified.h>
2167 #include <asm/compiler.h>
2168+#include <asm/pgtable.h>
2169
2170 #define VERIFY_READ 0
2171 #define VERIFY_WRITE 1
2172@@ -60,10 +61,34 @@ extern int __put_user_bad(void);
2173 #define USER_DS TASK_SIZE
2174 #define get_fs() (current_thread_info()->addr_limit)
2175
2176+static inline void pax_open_userland(void)
2177+{
2178+
2179+#ifdef CONFIG_PAX_MEMORY_UDEREF
2180+ if (get_fs() == USER_DS) {
2181+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2182+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2183+ }
2184+#endif
2185+
2186+}
2187+
2188+static inline void pax_close_userland(void)
2189+{
2190+
2191+#ifdef CONFIG_PAX_MEMORY_UDEREF
2192+ if (get_fs() == USER_DS) {
2193+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2194+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2195+ }
2196+#endif
2197+
2198+}
2199+
2200 static inline void set_fs(mm_segment_t fs)
2201 {
2202 current_thread_info()->addr_limit = fs;
2203- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2204+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2205 }
2206
2207 #define segment_eq(a,b) ((a) == (b))
2208@@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2209
2210 #define get_user(x,p) \
2211 ({ \
2212+ int __e; \
2213 might_fault(); \
2214- __get_user_check(x,p); \
2215+ pax_open_userland(); \
2216+ __e = __get_user_check(x,p); \
2217+ pax_close_userland(); \
2218+ __e; \
2219 })
2220
2221 extern int __put_user_1(void *, unsigned int);
2222@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2223
2224 #define put_user(x,p) \
2225 ({ \
2226+ int __e; \
2227 might_fault(); \
2228- __put_user_check(x,p); \
2229+ pax_open_userland(); \
2230+ __e = __put_user_check(x,p); \
2231+ pax_close_userland(); \
2232+ __e; \
2233 })
2234
2235 #else /* CONFIG_MMU */
2236@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2237 #define __get_user(x,ptr) \
2238 ({ \
2239 long __gu_err = 0; \
2240+ pax_open_userland(); \
2241 __get_user_err((x),(ptr),__gu_err); \
2242+ pax_close_userland(); \
2243 __gu_err; \
2244 })
2245
2246 #define __get_user_error(x,ptr,err) \
2247 ({ \
2248+ pax_open_userland(); \
2249 __get_user_err((x),(ptr),err); \
2250+ pax_close_userland(); \
2251 (void) 0; \
2252 })
2253
2254@@ -312,13 +349,17 @@ do { \
2255 #define __put_user(x,ptr) \
2256 ({ \
2257 long __pu_err = 0; \
2258+ pax_open_userland(); \
2259 __put_user_err((x),(ptr),__pu_err); \
2260+ pax_close_userland(); \
2261 __pu_err; \
2262 })
2263
2264 #define __put_user_error(x,ptr,err) \
2265 ({ \
2266+ pax_open_userland(); \
2267 __put_user_err((x),(ptr),err); \
2268+ pax_close_userland(); \
2269 (void) 0; \
2270 })
2271
2272@@ -418,11 +459,44 @@ do { \
2273
2274
2275 #ifdef CONFIG_MMU
2276-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2277-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2278+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2279+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2280+
2281+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2282+{
2283+ unsigned long ret;
2284+
2285+ check_object_size(to, n, false);
2286+ pax_open_userland();
2287+ ret = ___copy_from_user(to, from, n);
2288+ pax_close_userland();
2289+ return ret;
2290+}
2291+
2292+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2293+{
2294+ unsigned long ret;
2295+
2296+ check_object_size(from, n, true);
2297+ pax_open_userland();
2298+ ret = ___copy_to_user(to, from, n);
2299+ pax_close_userland();
2300+ return ret;
2301+}
2302+
2303 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2304-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2305+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2306 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2307+
2308+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2309+{
2310+ unsigned long ret;
2311+ pax_open_userland();
2312+ ret = ___clear_user(addr, n);
2313+ pax_close_userland();
2314+ return ret;
2315+}
2316+
2317 #else
2318 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2319 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2320@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2321
2322 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2323 {
2324+ if ((long)n < 0)
2325+ return n;
2326+
2327 if (access_ok(VERIFY_READ, from, n))
2328 n = __copy_from_user(to, from, n);
2329 else /* security hole - plug it */
2330@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2331
2332 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2333 {
2334+ if ((long)n < 0)
2335+ return n;
2336+
2337 if (access_ok(VERIFY_WRITE, to, n))
2338 n = __copy_to_user(to, from, n);
2339 return n;
2340diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2341index 96ee092..37f1844 100644
2342--- a/arch/arm/include/uapi/asm/ptrace.h
2343+++ b/arch/arm/include/uapi/asm/ptrace.h
2344@@ -73,7 +73,7 @@
2345 * ARMv7 groups of PSR bits
2346 */
2347 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2348-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2349+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2350 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2351 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2352
2353diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2354index 60d3b73..d27ee09 100644
2355--- a/arch/arm/kernel/armksyms.c
2356+++ b/arch/arm/kernel/armksyms.c
2357@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2358 #ifdef CONFIG_MMU
2359 EXPORT_SYMBOL(copy_page);
2360
2361-EXPORT_SYMBOL(__copy_from_user);
2362-EXPORT_SYMBOL(__copy_to_user);
2363-EXPORT_SYMBOL(__clear_user);
2364+EXPORT_SYMBOL(___copy_from_user);
2365+EXPORT_SYMBOL(___copy_to_user);
2366+EXPORT_SYMBOL(___clear_user);
2367
2368 EXPORT_SYMBOL(__get_user_1);
2369 EXPORT_SYMBOL(__get_user_2);
2370diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2371index 0f82098..3dbd3ee 100644
2372--- a/arch/arm/kernel/entry-armv.S
2373+++ b/arch/arm/kernel/entry-armv.S
2374@@ -47,6 +47,87 @@
2375 9997:
2376 .endm
2377
2378+ .macro pax_enter_kernel
2379+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2380+ @ make aligned space for saved DACR
2381+ sub sp, sp, #8
2382+ @ save regs
2383+ stmdb sp!, {r1, r2}
2384+ @ read DACR from cpu_domain into r1
2385+ mov r2, sp
2386+ @ assume 8K pages, since we have to split the immediate in two
2387+ bic r2, r2, #(0x1fc0)
2388+ bic r2, r2, #(0x3f)
2389+ ldr r1, [r2, #TI_CPU_DOMAIN]
2390+ @ store old DACR on stack
2391+ str r1, [sp, #8]
2392+#ifdef CONFIG_PAX_KERNEXEC
2393+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2394+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2395+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2396+#endif
2397+#ifdef CONFIG_PAX_MEMORY_UDEREF
2398+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2399+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2400+#endif
2401+ @ write r1 to current_thread_info()->cpu_domain
2402+ str r1, [r2, #TI_CPU_DOMAIN]
2403+ @ write r1 to DACR
2404+ mcr p15, 0, r1, c3, c0, 0
2405+ @ instruction sync
2406+ instr_sync
2407+ @ restore regs
2408+ ldmia sp!, {r1, r2}
2409+#endif
2410+ .endm
2411+
2412+ .macro pax_open_userland
2413+#ifdef CONFIG_PAX_MEMORY_UDEREF
2414+ @ save regs
2415+ stmdb sp!, {r0, r1}
2416+ @ read DACR from cpu_domain into r1
2417+ mov r0, sp
2418+ @ assume 8K pages, since we have to split the immediate in two
2419+ bic r0, r0, #(0x1fc0)
2420+ bic r0, r0, #(0x3f)
2421+ ldr r1, [r0, #TI_CPU_DOMAIN]
2422+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2423+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2424+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2425+ @ write r1 to current_thread_info()->cpu_domain
2426+ str r1, [r0, #TI_CPU_DOMAIN]
2427+ @ write r1 to DACR
2428+ mcr p15, 0, r1, c3, c0, 0
2429+ @ instruction sync
2430+ instr_sync
2431+ @ restore regs
2432+ ldmia sp!, {r0, r1}
2433+#endif
2434+ .endm
2435+
2436+ .macro pax_close_userland
2437+#ifdef CONFIG_PAX_MEMORY_UDEREF
2438+ @ save regs
2439+ stmdb sp!, {r0, r1}
2440+ @ read DACR from cpu_domain into r1
2441+ mov r0, sp
2442+ @ assume 8K pages, since we have to split the immediate in two
2443+ bic r0, r0, #(0x1fc0)
2444+ bic r0, r0, #(0x3f)
2445+ ldr r1, [r0, #TI_CPU_DOMAIN]
2446+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2447+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2448+ @ write r1 to current_thread_info()->cpu_domain
2449+ str r1, [r0, #TI_CPU_DOMAIN]
2450+ @ write r1 to DACR
2451+ mcr p15, 0, r1, c3, c0, 0
2452+ @ instruction sync
2453+ instr_sync
2454+ @ restore regs
2455+ ldmia sp!, {r0, r1}
2456+#endif
2457+ .endm
2458+
2459 .macro pabt_helper
2460 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2461 #ifdef MULTI_PABORT
2462@@ -89,11 +170,15 @@
2463 * Invalid mode handlers
2464 */
2465 .macro inv_entry, reason
2466+
2467+ pax_enter_kernel
2468+
2469 sub sp, sp, #S_FRAME_SIZE
2470 ARM( stmib sp, {r1 - lr} )
2471 THUMB( stmia sp, {r0 - r12} )
2472 THUMB( str sp, [sp, #S_SP] )
2473 THUMB( str lr, [sp, #S_LR] )
2474+
2475 mov r1, #\reason
2476 .endm
2477
2478@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2479 .macro svc_entry, stack_hole=0
2480 UNWIND(.fnstart )
2481 UNWIND(.save {r0 - pc} )
2482+
2483+ pax_enter_kernel
2484+
2485 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2486+
2487 #ifdef CONFIG_THUMB2_KERNEL
2488 SPFIX( str r0, [sp] ) @ temporarily saved
2489 SPFIX( mov r0, sp )
2490@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2491 ldmia r0, {r3 - r5}
2492 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2493 mov r6, #-1 @ "" "" "" ""
2494+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2495+ @ offset sp by 8 as done in pax_enter_kernel
2496+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2497+#else
2498 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2499+#endif
2500 SPFIX( addeq r2, r2, #4 )
2501 str r3, [sp, #-4]! @ save the "real" r0 copied
2502 @ from the exception stack
2503@@ -359,6 +453,9 @@ ENDPROC(__pabt_svc)
2504 .macro usr_entry
2505 UNWIND(.fnstart )
2506 UNWIND(.cantunwind ) @ don't unwind the user space
2507+
2508+ pax_enter_kernel_user
2509+
2510 sub sp, sp, #S_FRAME_SIZE
2511 ARM( stmib sp, {r1 - r12} )
2512 THUMB( stmia sp, {r0 - r12} )
2513@@ -456,7 +553,9 @@ __und_usr:
2514 tst r3, #PSR_T_BIT @ Thumb mode?
2515 bne __und_usr_thumb
2516 sub r4, r2, #4 @ ARM instr at LR - 4
2517+ pax_open_userland
2518 1: ldrt r0, [r4]
2519+ pax_close_userland
2520 #ifdef CONFIG_CPU_ENDIAN_BE8
2521 rev r0, r0 @ little endian instruction
2522 #endif
2523@@ -491,10 +590,14 @@ __und_usr_thumb:
2524 */
2525 .arch armv6t2
2526 #endif
2527+ pax_open_userland
2528 2: ldrht r5, [r4]
2529+ pax_close_userland
2530 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2531 blo __und_usr_fault_16 @ 16bit undefined instruction
2532+ pax_open_userland
2533 3: ldrht r0, [r2]
2534+ pax_close_userland
2535 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2536 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2537 orr r0, r0, r5, lsl #16
2538@@ -733,7 +836,7 @@ ENTRY(__switch_to)
2539 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2540 THUMB( str sp, [ip], #4 )
2541 THUMB( str lr, [ip], #4 )
2542-#ifdef CONFIG_CPU_USE_DOMAINS
2543+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2544 ldr r6, [r2, #TI_CPU_DOMAIN]
2545 #endif
2546 set_tls r3, r4, r5
2547@@ -742,7 +845,7 @@ ENTRY(__switch_to)
2548 ldr r8, =__stack_chk_guard
2549 ldr r7, [r7, #TSK_STACK_CANARY]
2550 #endif
2551-#ifdef CONFIG_CPU_USE_DOMAINS
2552+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2553 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2554 #endif
2555 mov r5, r0
2556diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2557index a6c301e..908821b 100644
2558--- a/arch/arm/kernel/entry-common.S
2559+++ b/arch/arm/kernel/entry-common.S
2560@@ -10,18 +10,46 @@
2561
2562 #include <asm/unistd.h>
2563 #include <asm/ftrace.h>
2564+#include <asm/domain.h>
2565 #include <asm/unwind.h>
2566
2567+#include "entry-header.S"
2568+
2569 #ifdef CONFIG_NEED_RET_TO_USER
2570 #include <mach/entry-macro.S>
2571 #else
2572 .macro arch_ret_to_user, tmp1, tmp2
2573+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2574+ @ save regs
2575+ stmdb sp!, {r1, r2}
2576+ @ read DACR from cpu_domain into r1
2577+ mov r2, sp
2578+ @ assume 8K pages, since we have to split the immediate in two
2579+ bic r2, r2, #(0x1fc0)
2580+ bic r2, r2, #(0x3f)
2581+ ldr r1, [r2, #TI_CPU_DOMAIN]
2582+#ifdef CONFIG_PAX_KERNEXEC
2583+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2584+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2585+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2586+#endif
2587+#ifdef CONFIG_PAX_MEMORY_UDEREF
2588+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2589+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2590+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2591+#endif
2592+ @ write r1 to current_thread_info()->cpu_domain
2593+ str r1, [r2, #TI_CPU_DOMAIN]
2594+ @ write r1 to DACR
2595+ mcr p15, 0, r1, c3, c0, 0
2596+ @ instruction sync
2597+ instr_sync
2598+ @ restore regs
2599+ ldmia sp!, {r1, r2}
2600+#endif
2601 .endm
2602 #endif
2603
2604-#include "entry-header.S"
2605-
2606-
2607 .align 5
2608 /*
2609 * This is the fast syscall return path. We do as little as
2610@@ -339,6 +367,7 @@ ENDPROC(ftrace_stub)
2611
2612 .align 5
2613 ENTRY(vector_swi)
2614+
2615 sub sp, sp, #S_FRAME_SIZE
2616 stmia sp, {r0 - r12} @ Calling r0 - r12
2617 ARM( add r8, sp, #S_PC )
2618@@ -388,6 +417,12 @@ ENTRY(vector_swi)
2619 ldr scno, [lr, #-4] @ get SWI instruction
2620 #endif
2621
2622+ /*
2623+ * do this here to avoid a performance hit of wrapping the code above
2624+ * that directly dereferences userland to parse the SWI instruction
2625+ */
2626+ pax_enter_kernel_user
2627+
2628 #ifdef CONFIG_ALIGNMENT_TRAP
2629 ldr ip, __cr_alignment
2630 ldr ip, [ip]
2631diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2632index 9a8531e..812e287 100644
2633--- a/arch/arm/kernel/entry-header.S
2634+++ b/arch/arm/kernel/entry-header.S
2635@@ -73,9 +73,66 @@
2636 msr cpsr_c, \rtemp @ switch back to the SVC mode
2637 .endm
2638
2639+ .macro pax_enter_kernel_user
2640+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2641+ @ save regs
2642+ stmdb sp!, {r0, r1}
2643+ @ read DACR from cpu_domain into r1
2644+ mov r0, sp
2645+ @ assume 8K pages, since we have to split the immediate in two
2646+ bic r0, r0, #(0x1fc0)
2647+ bic r0, r0, #(0x3f)
2648+ ldr r1, [r0, #TI_CPU_DOMAIN]
2649+#ifdef CONFIG_PAX_MEMORY_UDEREF
2650+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2651+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2652+#endif
2653+#ifdef CONFIG_PAX_KERNEXEC
2654+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2655+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2656+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2657+#endif
2658+ @ write r1 to current_thread_info()->cpu_domain
2659+ str r1, [r0, #TI_CPU_DOMAIN]
2660+ @ write r1 to DACR
2661+ mcr p15, 0, r1, c3, c0, 0
2662+ @ instruction sync
2663+ instr_sync
2664+ @ restore regs
2665+ ldmia sp!, {r0, r1}
2666+#endif
2667+ .endm
2668+
2669+ .macro pax_exit_kernel
2670+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2671+ @ save regs
2672+ stmdb sp!, {r0, r1}
2673+ @ read old DACR from stack into r1
2674+ ldr r1, [sp, #(8 + S_SP)]
2675+ sub r1, r1, #8
2676+ ldr r1, [r1]
2677+
2678+ @ write r1 to current_thread_info()->cpu_domain
2679+ mov r0, sp
2680+ @ assume 8K pages, since we have to split the immediate in two
2681+ bic r0, r0, #(0x1fc0)
2682+ bic r0, r0, #(0x3f)
2683+ str r1, [r0, #TI_CPU_DOMAIN]
2684+ @ write r1 to DACR
2685+ mcr p15, 0, r1, c3, c0, 0
2686+ @ instruction sync
2687+ instr_sync
2688+ @ restore regs
2689+ ldmia sp!, {r0, r1}
2690+#endif
2691+ .endm
2692+
2693 #ifndef CONFIG_THUMB2_KERNEL
2694 .macro svc_exit, rpsr
2695 msr spsr_cxsf, \rpsr
2696+
2697+ pax_exit_kernel
2698+
2699 #if defined(CONFIG_CPU_V6)
2700 ldr r0, [sp]
2701 strex r1, r2, [sp] @ clear the exclusive monitor
2702@@ -121,6 +178,9 @@
2703 .endm
2704 #else /* CONFIG_THUMB2_KERNEL */
2705 .macro svc_exit, rpsr
2706+
2707+ pax_exit_kernel
2708+
2709 ldr lr, [sp, #S_SP] @ top of the stack
2710 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2711 clrex @ clear the exclusive monitor
2712diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2713index 2adda11..7fbe958 100644
2714--- a/arch/arm/kernel/fiq.c
2715+++ b/arch/arm/kernel/fiq.c
2716@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2717 #if defined(CONFIG_CPU_USE_DOMAINS)
2718 memcpy((void *)0xffff001c, start, length);
2719 #else
2720+ pax_open_kernel();
2721 memcpy(vectors_page + 0x1c, start, length);
2722+ pax_close_kernel();
2723 #endif
2724 flush_icache_range(0xffff001c, 0xffff001c + length);
2725 if (!vectors_high())
2726diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2727index e0eb9a1..c7d74a3 100644
2728--- a/arch/arm/kernel/head.S
2729+++ b/arch/arm/kernel/head.S
2730@@ -52,7 +52,9 @@
2731 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2732
2733 .macro pgtbl, rd, phys
2734- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2735+ mov \rd, #TEXT_OFFSET
2736+ sub \rd, #PG_DIR_SIZE
2737+ add \rd, \rd, \phys
2738 .endm
2739
2740 /*
2741@@ -434,7 +436,7 @@ __enable_mmu:
2742 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2743 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2744 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2745- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2746+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2747 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2748 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2749 #endif
2750diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2751index 5ff2e77..556d030 100644
2752--- a/arch/arm/kernel/hw_breakpoint.c
2753+++ b/arch/arm/kernel/hw_breakpoint.c
2754@@ -1011,7 +1011,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2755 return NOTIFY_OK;
2756 }
2757
2758-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2759+static struct notifier_block dbg_reset_nb = {
2760 .notifier_call = dbg_reset_notify,
2761 };
2762
2763diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2764index 1e9be5d..03edbc2 100644
2765--- a/arch/arm/kernel/module.c
2766+++ b/arch/arm/kernel/module.c
2767@@ -37,12 +37,37 @@
2768 #endif
2769
2770 #ifdef CONFIG_MMU
2771-void *module_alloc(unsigned long size)
2772+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2773 {
2774+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2775+ return NULL;
2776 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2777- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2778+ GFP_KERNEL, prot, -1,
2779 __builtin_return_address(0));
2780 }
2781+
2782+void *module_alloc(unsigned long size)
2783+{
2784+
2785+#ifdef CONFIG_PAX_KERNEXEC
2786+ return __module_alloc(size, PAGE_KERNEL);
2787+#else
2788+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2789+#endif
2790+
2791+}
2792+
2793+#ifdef CONFIG_PAX_KERNEXEC
2794+void module_free_exec(struct module *mod, void *module_region)
2795+{
2796+ module_free(mod, module_region);
2797+}
2798+
2799+void *module_alloc_exec(unsigned long size)
2800+{
2801+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2802+}
2803+#endif
2804 #endif
2805
2806 int
2807diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2808index 07314af..c46655c 100644
2809--- a/arch/arm/kernel/patch.c
2810+++ b/arch/arm/kernel/patch.c
2811@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2812 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2813 int size;
2814
2815+ pax_open_kernel();
2816 if (thumb2 && __opcode_is_thumb16(insn)) {
2817 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2818 size = sizeof(u16);
2819@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2820 *(u32 *)addr = insn;
2821 size = sizeof(u32);
2822 }
2823+ pax_close_kernel();
2824
2825 flush_icache_range((uintptr_t)(addr),
2826 (uintptr_t)(addr) + size);
2827diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2828index 5f66206..dce492f 100644
2829--- a/arch/arm/kernel/perf_event_cpu.c
2830+++ b/arch/arm/kernel/perf_event_cpu.c
2831@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2832 return NOTIFY_OK;
2833 }
2834
2835-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2836+static struct notifier_block cpu_pmu_hotplug_notifier = {
2837 .notifier_call = cpu_pmu_notify,
2838 };
2839
2840diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2841index c6dec5f..e0fddd1 100644
2842--- a/arch/arm/kernel/process.c
2843+++ b/arch/arm/kernel/process.c
2844@@ -28,7 +28,6 @@
2845 #include <linux/tick.h>
2846 #include <linux/utsname.h>
2847 #include <linux/uaccess.h>
2848-#include <linux/random.h>
2849 #include <linux/hw_breakpoint.h>
2850 #include <linux/cpuidle.h>
2851 #include <linux/leds.h>
2852@@ -256,9 +255,10 @@ void machine_power_off(void)
2853 machine_shutdown();
2854 if (pm_power_off)
2855 pm_power_off();
2856+ BUG();
2857 }
2858
2859-void machine_restart(char *cmd)
2860+__noreturn void machine_restart(char *cmd)
2861 {
2862 machine_shutdown();
2863
2864@@ -283,8 +283,8 @@ void __show_regs(struct pt_regs *regs)
2865 init_utsname()->release,
2866 (int)strcspn(init_utsname()->version, " "),
2867 init_utsname()->version);
2868- print_symbol("PC is at %s\n", instruction_pointer(regs));
2869- print_symbol("LR is at %s\n", regs->ARM_lr);
2870+ printk("PC is at %pA\n", instruction_pointer(regs));
2871+ printk("LR is at %pA\n", regs->ARM_lr);
2872 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2873 "sp : %08lx ip : %08lx fp : %08lx\n",
2874 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2875@@ -452,12 +452,6 @@ unsigned long get_wchan(struct task_struct *p)
2876 return 0;
2877 }
2878
2879-unsigned long arch_randomize_brk(struct mm_struct *mm)
2880-{
2881- unsigned long range_end = mm->brk + 0x02000000;
2882- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2883-}
2884-
2885 #ifdef CONFIG_MMU
2886 /*
2887 * The vectors page is always readable from user space for the
2888@@ -470,9 +464,8 @@ static int __init gate_vma_init(void)
2889 {
2890 gate_vma.vm_start = 0xffff0000;
2891 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
2892- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2893- gate_vma.vm_flags = VM_READ | VM_EXEC |
2894- VM_MAYREAD | VM_MAYEXEC;
2895+ gate_vma.vm_flags = VM_NONE;
2896+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2897 return 0;
2898 }
2899 arch_initcall(gate_vma_init);
2900diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2901index 03deeff..741ce88 100644
2902--- a/arch/arm/kernel/ptrace.c
2903+++ b/arch/arm/kernel/ptrace.c
2904@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2905 return current_thread_info()->syscall;
2906 }
2907
2908+#ifdef CONFIG_GRKERNSEC_SETXID
2909+extern void gr_delayed_cred_worker(void);
2910+#endif
2911+
2912 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2913 {
2914 current_thread_info()->syscall = scno;
2915
2916+#ifdef CONFIG_GRKERNSEC_SETXID
2917+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2918+ gr_delayed_cred_worker();
2919+#endif
2920+
2921 /* Do the secure computing check first; failures should be fast. */
2922 if (secure_computing(scno) == -1)
2923 return -1;
2924diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2925index 3f6cbb2..6d856f5 100644
2926--- a/arch/arm/kernel/setup.c
2927+++ b/arch/arm/kernel/setup.c
2928@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
2929 unsigned int elf_hwcap __read_mostly;
2930 EXPORT_SYMBOL(elf_hwcap);
2931
2932+pteval_t __supported_pte_mask __read_only;
2933+pmdval_t __supported_pmd_mask __read_only;
2934
2935 #ifdef MULTI_CPU
2936-struct processor processor __read_mostly;
2937+struct processor processor;
2938 #endif
2939 #ifdef MULTI_TLB
2940-struct cpu_tlb_fns cpu_tlb __read_mostly;
2941+struct cpu_tlb_fns cpu_tlb __read_only;
2942 #endif
2943 #ifdef MULTI_USER
2944-struct cpu_user_fns cpu_user __read_mostly;
2945+struct cpu_user_fns cpu_user __read_only;
2946 #endif
2947 #ifdef MULTI_CACHE
2948-struct cpu_cache_fns cpu_cache __read_mostly;
2949+struct cpu_cache_fns cpu_cache __read_only;
2950 #endif
2951 #ifdef CONFIG_OUTER_CACHE
2952-struct outer_cache_fns outer_cache __read_mostly;
2953+struct outer_cache_fns outer_cache __read_only;
2954 EXPORT_SYMBOL(outer_cache);
2955 #endif
2956
2957@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
2958 asm("mrc p15, 0, %0, c0, c1, 4"
2959 : "=r" (mmfr0));
2960 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
2961- (mmfr0 & 0x000000f0) >= 0x00000030)
2962+ (mmfr0 & 0x000000f0) >= 0x00000030) {
2963 cpu_arch = CPU_ARCH_ARMv7;
2964- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2965+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
2966+ __supported_pte_mask |= L_PTE_PXN;
2967+ __supported_pmd_mask |= PMD_PXNTABLE;
2968+ }
2969+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2970 (mmfr0 & 0x000000f0) == 0x00000020)
2971 cpu_arch = CPU_ARCH_ARMv6;
2972 else
2973@@ -462,7 +468,7 @@ static void __init setup_processor(void)
2974 __cpu_architecture = __get_cpu_architecture();
2975
2976 #ifdef MULTI_CPU
2977- processor = *list->proc;
2978+ memcpy((void *)&processor, list->proc, sizeof processor);
2979 #endif
2980 #ifdef MULTI_TLB
2981 cpu_tlb = *list->tlb;
2982diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
2983index 56f72d2..6924200 100644
2984--- a/arch/arm/kernel/signal.c
2985+++ b/arch/arm/kernel/signal.c
2986@@ -433,22 +433,14 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
2987 __put_user(sigreturn_codes[idx+1], rc+1))
2988 return 1;
2989
2990- if (cpsr & MODE32_BIT) {
2991- /*
2992- * 32-bit code can use the new high-page
2993- * signal return code support.
2994- */
2995- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
2996- } else {
2997- /*
2998- * Ensure that the instruction cache sees
2999- * the return code written onto the stack.
3000- */
3001- flush_icache_range((unsigned long)rc,
3002- (unsigned long)(rc + 2));
3003+ /*
3004+ * Ensure that the instruction cache sees
3005+ * the return code written onto the stack.
3006+ */
3007+ flush_icache_range((unsigned long)rc,
3008+ (unsigned long)(rc + 2));
3009
3010- retcode = ((unsigned long)rc) + thumb;
3011- }
3012+ retcode = ((unsigned long)rc) + thumb;
3013 }
3014
3015 regs->ARM_r0 = usig;
3016diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3017index 58af91c..343ce99 100644
3018--- a/arch/arm/kernel/smp.c
3019+++ b/arch/arm/kernel/smp.c
3020@@ -70,7 +70,7 @@ enum ipi_msg_type {
3021
3022 static DECLARE_COMPLETION(cpu_running);
3023
3024-static struct smp_operations smp_ops;
3025+static struct smp_operations smp_ops __read_only;
3026
3027 void __init smp_set_ops(struct smp_operations *ops)
3028 {
3029diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3030index b0179b8..829510e 100644
3031--- a/arch/arm/kernel/traps.c
3032+++ b/arch/arm/kernel/traps.c
3033@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3034 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3035 {
3036 #ifdef CONFIG_KALLSYMS
3037- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3038+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3039 #else
3040 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3041 #endif
3042@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3043 static int die_owner = -1;
3044 static unsigned int die_nest_count;
3045
3046+extern void gr_handle_kernel_exploit(void);
3047+
3048 static unsigned long oops_begin(void)
3049 {
3050 int cpu;
3051@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3052 panic("Fatal exception in interrupt");
3053 if (panic_on_oops)
3054 panic("Fatal exception");
3055+
3056+ gr_handle_kernel_exploit();
3057+
3058 if (signr)
3059 do_exit(signr);
3060 }
3061@@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3062 * The user helper at 0xffff0fe0 must be used instead.
3063 * (see entry-armv.S for details)
3064 */
3065+ pax_open_kernel();
3066 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3067+ pax_close_kernel();
3068 }
3069 return 0;
3070
3071@@ -841,13 +848,10 @@ void __init early_trap_init(void *vectors_base)
3072 */
3073 kuser_get_tls_init(vectors);
3074
3075- /*
3076- * Copy signal return handlers into the vector page, and
3077- * set sigreturn to be a pointer to these.
3078- */
3079- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
3080- sigreturn_codes, sizeof(sigreturn_codes));
3081-
3082 flush_icache_range(vectors, vectors + PAGE_SIZE);
3083- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3084+
3085+#ifndef CONFIG_PAX_MEMORY_UDEREF
3086+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3087+#endif
3088+
3089 }
3090diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3091index 11c1785..c67d54c 100644
3092--- a/arch/arm/kernel/vmlinux.lds.S
3093+++ b/arch/arm/kernel/vmlinux.lds.S
3094@@ -8,7 +8,11 @@
3095 #include <asm/thread_info.h>
3096 #include <asm/memory.h>
3097 #include <asm/page.h>
3098-
3099+
3100+#ifdef CONFIG_PAX_KERNEXEC
3101+#include <asm/pgtable.h>
3102+#endif
3103+
3104 #define PROC_INFO \
3105 . = ALIGN(4); \
3106 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3107@@ -90,6 +94,11 @@ SECTIONS
3108 _text = .;
3109 HEAD_TEXT
3110 }
3111+
3112+#ifdef CONFIG_PAX_KERNEXEC
3113+ . = ALIGN(1<<SECTION_SHIFT);
3114+#endif
3115+
3116 .text : { /* Real text segment */
3117 _stext = .; /* Text and read-only data */
3118 __exception_text_start = .;
3119@@ -144,6 +153,10 @@ SECTIONS
3120
3121 _etext = .; /* End of text and rodata section */
3122
3123+#ifdef CONFIG_PAX_KERNEXEC
3124+ . = ALIGN(1<<SECTION_SHIFT);
3125+#endif
3126+
3127 #ifndef CONFIG_XIP_KERNEL
3128 . = ALIGN(PAGE_SIZE);
3129 __init_begin = .;
3130@@ -203,6 +216,11 @@ SECTIONS
3131 . = PAGE_OFFSET + TEXT_OFFSET;
3132 #else
3133 __init_end = .;
3134+
3135+#ifdef CONFIG_PAX_KERNEXEC
3136+ . = ALIGN(1<<SECTION_SHIFT);
3137+#endif
3138+
3139 . = ALIGN(THREAD_SIZE);
3140 __data_loc = .;
3141 #endif
3142diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3143index 14a0d98..7771a7d 100644
3144--- a/arch/arm/lib/clear_user.S
3145+++ b/arch/arm/lib/clear_user.S
3146@@ -12,14 +12,14 @@
3147
3148 .text
3149
3150-/* Prototype: int __clear_user(void *addr, size_t sz)
3151+/* Prototype: int ___clear_user(void *addr, size_t sz)
3152 * Purpose : clear some user memory
3153 * Params : addr - user memory address to clear
3154 * : sz - number of bytes to clear
3155 * Returns : number of bytes NOT cleared
3156 */
3157 ENTRY(__clear_user_std)
3158-WEAK(__clear_user)
3159+WEAK(___clear_user)
3160 stmfd sp!, {r1, lr}
3161 mov r2, #0
3162 cmp r1, #4
3163@@ -44,7 +44,7 @@ WEAK(__clear_user)
3164 USER( strnebt r2, [r0])
3165 mov r0, #0
3166 ldmfd sp!, {r1, pc}
3167-ENDPROC(__clear_user)
3168+ENDPROC(___clear_user)
3169 ENDPROC(__clear_user_std)
3170
3171 .pushsection .fixup,"ax"
3172diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3173index 66a477a..bee61d3 100644
3174--- a/arch/arm/lib/copy_from_user.S
3175+++ b/arch/arm/lib/copy_from_user.S
3176@@ -16,7 +16,7 @@
3177 /*
3178 * Prototype:
3179 *
3180- * size_t __copy_from_user(void *to, const void *from, size_t n)
3181+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3182 *
3183 * Purpose:
3184 *
3185@@ -84,11 +84,11 @@
3186
3187 .text
3188
3189-ENTRY(__copy_from_user)
3190+ENTRY(___copy_from_user)
3191
3192 #include "copy_template.S"
3193
3194-ENDPROC(__copy_from_user)
3195+ENDPROC(___copy_from_user)
3196
3197 .pushsection .fixup,"ax"
3198 .align 0
3199diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3200index 6ee2f67..d1cce76 100644
3201--- a/arch/arm/lib/copy_page.S
3202+++ b/arch/arm/lib/copy_page.S
3203@@ -10,6 +10,7 @@
3204 * ASM optimised string functions
3205 */
3206 #include <linux/linkage.h>
3207+#include <linux/const.h>
3208 #include <asm/assembler.h>
3209 #include <asm/asm-offsets.h>
3210 #include <asm/cache.h>
3211diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3212index d066df6..df28194 100644
3213--- a/arch/arm/lib/copy_to_user.S
3214+++ b/arch/arm/lib/copy_to_user.S
3215@@ -16,7 +16,7 @@
3216 /*
3217 * Prototype:
3218 *
3219- * size_t __copy_to_user(void *to, const void *from, size_t n)
3220+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3221 *
3222 * Purpose:
3223 *
3224@@ -88,11 +88,11 @@
3225 .text
3226
3227 ENTRY(__copy_to_user_std)
3228-WEAK(__copy_to_user)
3229+WEAK(___copy_to_user)
3230
3231 #include "copy_template.S"
3232
3233-ENDPROC(__copy_to_user)
3234+ENDPROC(___copy_to_user)
3235 ENDPROC(__copy_to_user_std)
3236
3237 .pushsection .fixup,"ax"
3238diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3239index 7d08b43..f7ca7ea 100644
3240--- a/arch/arm/lib/csumpartialcopyuser.S
3241+++ b/arch/arm/lib/csumpartialcopyuser.S
3242@@ -57,8 +57,8 @@
3243 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3244 */
3245
3246-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3247-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3248+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3249+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3250
3251 #include "csumpartialcopygeneric.S"
3252
3253diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3254index 6b93f6a..88d9b64 100644
3255--- a/arch/arm/lib/delay.c
3256+++ b/arch/arm/lib/delay.c
3257@@ -28,12 +28,14 @@
3258 /*
3259 * Default to the loop-based delay implementation.
3260 */
3261-struct arm_delay_ops arm_delay_ops = {
3262+static struct arm_delay_ops arm_loop_delay_ops = {
3263 .delay = __loop_delay,
3264 .const_udelay = __loop_const_udelay,
3265 .udelay = __loop_udelay,
3266 };
3267
3268+struct arm_delay_ops *arm_delay_ops __read_only = &arm_loop_delay_ops;
3269+
3270 static const struct delay_timer *delay_timer;
3271 static bool delay_calibrated;
3272
3273@@ -67,6 +69,13 @@ static void __timer_udelay(unsigned long usecs)
3274 __timer_const_udelay(usecs * UDELAY_MULT);
3275 }
3276
3277+static struct arm_delay_ops arm_timer_delay_ops = {
3278+ .delay = __timer_delay,
3279+ .const_udelay = __timer_const_udelay,
3280+ .udelay = __timer_udelay,
3281+ .const_clock = true,
3282+};
3283+
3284 void __init register_current_timer_delay(const struct delay_timer *timer)
3285 {
3286 if (!delay_calibrated) {
3287@@ -74,10 +83,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
3288 delay_timer = timer;
3289 lpj_fine = timer->freq / HZ;
3290 loops_per_jiffy = lpj_fine;
3291- arm_delay_ops.delay = __timer_delay;
3292- arm_delay_ops.const_udelay = __timer_const_udelay;
3293- arm_delay_ops.udelay = __timer_udelay;
3294- arm_delay_ops.const_clock = true;
3295+ arm_delay_ops = &arm_timer_delay_ops;
3296 delay_calibrated = true;
3297 } else {
3298 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
3299diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3300index 025f742..8432b08 100644
3301--- a/arch/arm/lib/uaccess_with_memcpy.c
3302+++ b/arch/arm/lib/uaccess_with_memcpy.c
3303@@ -104,7 +104,7 @@ out:
3304 }
3305
3306 unsigned long
3307-__copy_to_user(void __user *to, const void *from, unsigned long n)
3308+___copy_to_user(void __user *to, const void *from, unsigned long n)
3309 {
3310 /*
3311 * This test is stubbed out of the main function above to keep
3312diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3313index bac21a5..b67ef8e 100644
3314--- a/arch/arm/mach-kirkwood/common.c
3315+++ b/arch/arm/mach-kirkwood/common.c
3316@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3317 clk_gate_ops.disable(hw);
3318 }
3319
3320-static struct clk_ops clk_gate_fn_ops;
3321+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3322+{
3323+ return clk_gate_ops.is_enabled(hw);
3324+}
3325+
3326+static struct clk_ops clk_gate_fn_ops = {
3327+ .enable = clk_gate_fn_enable,
3328+ .disable = clk_gate_fn_disable,
3329+ .is_enabled = clk_gate_fn_is_enabled,
3330+};
3331
3332 static struct clk __init *clk_register_gate_fn(struct device *dev,
3333 const char *name,
3334@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3335 gate_fn->fn_en = fn_en;
3336 gate_fn->fn_dis = fn_dis;
3337
3338- /* ops is the gate ops, but with our enable/disable functions */
3339- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3340- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3341- clk_gate_fn_ops = clk_gate_ops;
3342- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3343- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3344- }
3345-
3346 clk = clk_register(dev, &gate_fn->gate.hw);
3347
3348 if (IS_ERR(clk))
3349diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3350index 0abb30f..54064da 100644
3351--- a/arch/arm/mach-omap2/board-n8x0.c
3352+++ b/arch/arm/mach-omap2/board-n8x0.c
3353@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3354 }
3355 #endif
3356
3357-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3358+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3359 .late_init = n8x0_menelaus_late_init,
3360 };
3361
3362diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3363index 8033cb7..2f7cb62 100644
3364--- a/arch/arm/mach-omap2/gpmc.c
3365+++ b/arch/arm/mach-omap2/gpmc.c
3366@@ -139,7 +139,6 @@ struct omap3_gpmc_regs {
3367 };
3368
3369 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3370-static struct irq_chip gpmc_irq_chip;
3371 static unsigned gpmc_irq_start;
3372
3373 static struct resource gpmc_mem_root;
3374@@ -700,6 +699,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3375
3376 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3377
3378+static struct irq_chip gpmc_irq_chip = {
3379+ .name = "gpmc",
3380+ .irq_startup = gpmc_irq_noop_ret,
3381+ .irq_enable = gpmc_irq_enable,
3382+ .irq_disable = gpmc_irq_disable,
3383+ .irq_shutdown = gpmc_irq_noop,
3384+ .irq_ack = gpmc_irq_noop,
3385+ .irq_mask = gpmc_irq_noop,
3386+ .irq_unmask = gpmc_irq_noop,
3387+
3388+};
3389+
3390 static int gpmc_setup_irq(void)
3391 {
3392 int i;
3393@@ -714,15 +725,6 @@ static int gpmc_setup_irq(void)
3394 return gpmc_irq_start;
3395 }
3396
3397- gpmc_irq_chip.name = "gpmc";
3398- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3399- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3400- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3401- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3402- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3403- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3404- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3405-
3406 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3407 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3408
3409diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3410index 5d3b4f4..ddba3c0 100644
3411--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3412+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3413@@ -340,7 +340,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3414 return NOTIFY_OK;
3415 }
3416
3417-static struct notifier_block __refdata irq_hotplug_notifier = {
3418+static struct notifier_block irq_hotplug_notifier = {
3419 .notifier_call = irq_cpu_hotplug_notify,
3420 };
3421
3422diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3423index e065daa..7b1ad9b 100644
3424--- a/arch/arm/mach-omap2/omap_device.c
3425+++ b/arch/arm/mach-omap2/omap_device.c
3426@@ -686,7 +686,7 @@ void omap_device_delete(struct omap_device *od)
3427 * passes along the return value of omap_device_build_ss().
3428 */
3429 struct platform_device __init *omap_device_build(const char *pdev_name, int pdev_id,
3430- struct omap_hwmod *oh, void *pdata,
3431+ struct omap_hwmod *oh, const void *pdata,
3432 int pdata_len,
3433 struct omap_device_pm_latency *pm_lats,
3434 int pm_lats_cnt, int is_early_device)
3435@@ -720,7 +720,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name, int pdev
3436 */
3437 struct platform_device __init *omap_device_build_ss(const char *pdev_name, int pdev_id,
3438 struct omap_hwmod **ohs, int oh_cnt,
3439- void *pdata, int pdata_len,
3440+ const void *pdata, int pdata_len,
3441 struct omap_device_pm_latency *pm_lats,
3442 int pm_lats_cnt, int is_early_device)
3443 {
3444diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3445index 0933c59..42b8e2d 100644
3446--- a/arch/arm/mach-omap2/omap_device.h
3447+++ b/arch/arm/mach-omap2/omap_device.h
3448@@ -91,14 +91,14 @@ int omap_device_shutdown(struct platform_device *pdev);
3449 /* Core code interface */
3450
3451 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3452- struct omap_hwmod *oh, void *pdata,
3453+ struct omap_hwmod *oh, const void *pdata,
3454 int pdata_len,
3455 struct omap_device_pm_latency *pm_lats,
3456 int pm_lats_cnt, int is_early_device);
3457
3458 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3459 struct omap_hwmod **oh, int oh_cnt,
3460- void *pdata, int pdata_len,
3461+ const void *pdata, int pdata_len,
3462 struct omap_device_pm_latency *pm_lats,
3463 int pm_lats_cnt, int is_early_device);
3464
3465diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3466index 4653efb..8c60bf7 100644
3467--- a/arch/arm/mach-omap2/omap_hwmod.c
3468+++ b/arch/arm/mach-omap2/omap_hwmod.c
3469@@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
3470 int (*init_clkdm)(struct omap_hwmod *oh);
3471 void (*update_context_lost)(struct omap_hwmod *oh);
3472 int (*get_context_lost)(struct omap_hwmod *oh);
3473-};
3474+} __no_const;
3475
3476 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3477-static struct omap_hwmod_soc_ops soc_ops;
3478+static struct omap_hwmod_soc_ops soc_ops __read_only;
3479
3480 /* omap_hwmod_list contains all registered struct omap_hwmods */
3481 static LIST_HEAD(omap_hwmod_list);
3482diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3483index 7c2b4ed..b2ea51f 100644
3484--- a/arch/arm/mach-omap2/wd_timer.c
3485+++ b/arch/arm/mach-omap2/wd_timer.c
3486@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3487 struct omap_hwmod *oh;
3488 char *oh_name = "wd_timer2";
3489 char *dev_name = "omap_wdt";
3490- struct omap_wd_timer_platform_data pdata;
3491+ static struct omap_wd_timer_platform_data pdata = {
3492+ .read_reset_sources = prm_read_reset_sources
3493+ };
3494
3495 if (!cpu_class_is_omap2() || of_have_populated_dt())
3496 return 0;
3497@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3498 return -EINVAL;
3499 }
3500
3501- pdata.read_reset_sources = prm_read_reset_sources;
3502-
3503 pdev = omap_device_build(dev_name, id, oh, &pdata,
3504 sizeof(struct omap_wd_timer_platform_data),
3505 NULL, 0, 0);
3506diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
3507index 6be4c4d..32ac32a 100644
3508--- a/arch/arm/mach-ux500/include/mach/setup.h
3509+++ b/arch/arm/mach-ux500/include/mach/setup.h
3510@@ -38,13 +38,6 @@ extern struct sys_timer ux500_timer;
3511 .type = MT_DEVICE, \
3512 }
3513
3514-#define __MEM_DEV_DESC(x, sz) { \
3515- .virtual = IO_ADDRESS(x), \
3516- .pfn = __phys_to_pfn(x), \
3517- .length = sz, \
3518- .type = MT_MEMORY, \
3519-}
3520-
3521 extern struct smp_operations ux500_smp_ops;
3522 extern void ux500_cpu_die(unsigned int cpu);
3523
3524diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3525index 3fd629d..8b1aca9 100644
3526--- a/arch/arm/mm/Kconfig
3527+++ b/arch/arm/mm/Kconfig
3528@@ -425,7 +425,7 @@ config CPU_32v5
3529
3530 config CPU_32v6
3531 bool
3532- select CPU_USE_DOMAINS if CPU_V6 && MMU
3533+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC
3534 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3535
3536 config CPU_32v6K
3537@@ -577,6 +577,7 @@ config CPU_CP15_MPU
3538
3539 config CPU_USE_DOMAINS
3540 bool
3541+ depends on !ARM_LPAE && !PAX_KERNEXEC
3542 help
3543 This option enables or disables the use of domain switching
3544 via the set_fs() function.
3545diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3546index db26e2e..ee44569 100644
3547--- a/arch/arm/mm/alignment.c
3548+++ b/arch/arm/mm/alignment.c
3549@@ -211,10 +211,12 @@ union offset_union {
3550 #define __get16_unaligned_check(ins,val,addr) \
3551 do { \
3552 unsigned int err = 0, v, a = addr; \
3553+ pax_open_userland(); \
3554 __get8_unaligned_check(ins,v,a,err); \
3555 val = v << ((BE) ? 8 : 0); \
3556 __get8_unaligned_check(ins,v,a,err); \
3557 val |= v << ((BE) ? 0 : 8); \
3558+ pax_close_userland(); \
3559 if (err) \
3560 goto fault; \
3561 } while (0)
3562@@ -228,6 +230,7 @@ union offset_union {
3563 #define __get32_unaligned_check(ins,val,addr) \
3564 do { \
3565 unsigned int err = 0, v, a = addr; \
3566+ pax_open_userland(); \
3567 __get8_unaligned_check(ins,v,a,err); \
3568 val = v << ((BE) ? 24 : 0); \
3569 __get8_unaligned_check(ins,v,a,err); \
3570@@ -236,6 +239,7 @@ union offset_union {
3571 val |= v << ((BE) ? 8 : 16); \
3572 __get8_unaligned_check(ins,v,a,err); \
3573 val |= v << ((BE) ? 0 : 24); \
3574+ pax_close_userland(); \
3575 if (err) \
3576 goto fault; \
3577 } while (0)
3578@@ -249,6 +253,7 @@ union offset_union {
3579 #define __put16_unaligned_check(ins,val,addr) \
3580 do { \
3581 unsigned int err = 0, v = val, a = addr; \
3582+ pax_open_userland(); \
3583 __asm__( FIRST_BYTE_16 \
3584 ARM( "1: "ins" %1, [%2], #1\n" ) \
3585 THUMB( "1: "ins" %1, [%2]\n" ) \
3586@@ -268,6 +273,7 @@ union offset_union {
3587 " .popsection\n" \
3588 : "=r" (err), "=&r" (v), "=&r" (a) \
3589 : "0" (err), "1" (v), "2" (a)); \
3590+ pax_close_userland(); \
3591 if (err) \
3592 goto fault; \
3593 } while (0)
3594@@ -281,6 +287,7 @@ union offset_union {
3595 #define __put32_unaligned_check(ins,val,addr) \
3596 do { \
3597 unsigned int err = 0, v = val, a = addr; \
3598+ pax_open_userland(); \
3599 __asm__( FIRST_BYTE_32 \
3600 ARM( "1: "ins" %1, [%2], #1\n" ) \
3601 THUMB( "1: "ins" %1, [%2]\n" ) \
3602@@ -310,6 +317,7 @@ union offset_union {
3603 " .popsection\n" \
3604 : "=r" (err), "=&r" (v), "=&r" (a) \
3605 : "0" (err), "1" (v), "2" (a)); \
3606+ pax_close_userland(); \
3607 if (err) \
3608 goto fault; \
3609 } while (0)
3610diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3611index 5dbf13f..1a60561 100644
3612--- a/arch/arm/mm/fault.c
3613+++ b/arch/arm/mm/fault.c
3614@@ -25,6 +25,7 @@
3615 #include <asm/system_misc.h>
3616 #include <asm/system_info.h>
3617 #include <asm/tlbflush.h>
3618+#include <asm/sections.h>
3619
3620 #include "fault.h"
3621
3622@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3623 if (fixup_exception(regs))
3624 return;
3625
3626+#ifdef CONFIG_PAX_KERNEXEC
3627+ if ((fsr & FSR_WRITE) &&
3628+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3629+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3630+ {
3631+ if (current->signal->curr_ip)
3632+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3633+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3634+ else
3635+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3636+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3637+ }
3638+#endif
3639+
3640 /*
3641 * No handler, we'll have to terminate things with extreme prejudice.
3642 */
3643@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3644 }
3645 #endif
3646
3647+#ifdef CONFIG_PAX_PAGEEXEC
3648+ if (fsr & FSR_LNX_PF) {
3649+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3650+ do_group_exit(SIGKILL);
3651+ }
3652+#endif
3653+
3654 tsk->thread.address = addr;
3655 tsk->thread.error_code = fsr;
3656 tsk->thread.trap_no = 14;
3657@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3658 }
3659 #endif /* CONFIG_MMU */
3660
3661+#ifdef CONFIG_PAX_PAGEEXEC
3662+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3663+{
3664+ long i;
3665+
3666+ printk(KERN_ERR "PAX: bytes at PC: ");
3667+ for (i = 0; i < 20; i++) {
3668+ unsigned char c;
3669+ if (get_user(c, (__force unsigned char __user *)pc+i))
3670+ printk(KERN_CONT "?? ");
3671+ else
3672+ printk(KERN_CONT "%02x ", c);
3673+ }
3674+ printk("\n");
3675+
3676+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3677+ for (i = -1; i < 20; i++) {
3678+ unsigned long c;
3679+ if (get_user(c, (__force unsigned long __user *)sp+i))
3680+ printk(KERN_CONT "???????? ");
3681+ else
3682+ printk(KERN_CONT "%08lx ", c);
3683+ }
3684+ printk("\n");
3685+}
3686+#endif
3687+
3688 /*
3689 * First Level Translation Fault Handler
3690 *
3691@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3692 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3693 struct siginfo info;
3694
3695+#ifdef CONFIG_PAX_MEMORY_UDEREF
3696+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3697+ if (current->signal->curr_ip)
3698+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3699+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3700+ else
3701+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3702+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3703+ goto die;
3704+ }
3705+#endif
3706+
3707 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3708 return;
3709
3710+die:
3711 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3712 inf->name, fsr, addr);
3713
3714@@ -575,9 +637,49 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3715 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3716 struct siginfo info;
3717
3718+ if (user_mode(regs)) {
3719+ if (addr == 0xffff0fe0UL) {
3720+ /*
3721+ * PaX: __kuser_get_tls emulation
3722+ */
3723+ regs->ARM_r0 = current_thread_info()->tp_value;
3724+ regs->ARM_pc = regs->ARM_lr;
3725+ return;
3726+ }
3727+ }
3728+
3729+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3730+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3731+ if (current->signal->curr_ip)
3732+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3733+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3734+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3735+ else
3736+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3737+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3738+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3739+ goto die;
3740+ }
3741+#endif
3742+
3743+#ifdef CONFIG_PAX_REFCOUNT
3744+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3745+ unsigned int bkpt;
3746+
3747+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3748+ current->thread.error_code = ifsr;
3749+ current->thread.trap_no = 0;
3750+ pax_report_refcount_overflow(regs);
3751+ fixup_exception(regs);
3752+ return;
3753+ }
3754+ }
3755+#endif
3756+
3757 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3758 return;
3759
3760+die:
3761 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3762 inf->name, ifsr, addr);
3763
3764diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3765index cf08bdf..772656c 100644
3766--- a/arch/arm/mm/fault.h
3767+++ b/arch/arm/mm/fault.h
3768@@ -3,6 +3,7 @@
3769
3770 /*
3771 * Fault status register encodings. We steal bit 31 for our own purposes.
3772+ * Set when the FSR value is from an instruction fault.
3773 */
3774 #define FSR_LNX_PF (1 << 31)
3775 #define FSR_WRITE (1 << 11)
3776@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3777 }
3778 #endif
3779
3780+/* valid for LPAE and !LPAE */
3781+static inline int is_xn_fault(unsigned int fsr)
3782+{
3783+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3784+}
3785+
3786+static inline int is_domain_fault(unsigned int fsr)
3787+{
3788+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3789+}
3790+
3791 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3792 unsigned long search_exception_table(unsigned long addr);
3793
3794diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3795index ad722f1..763fdd3 100644
3796--- a/arch/arm/mm/init.c
3797+++ b/arch/arm/mm/init.c
3798@@ -30,6 +30,8 @@
3799 #include <asm/setup.h>
3800 #include <asm/tlb.h>
3801 #include <asm/fixmap.h>
3802+#include <asm/system_info.h>
3803+#include <asm/cp15.h>
3804
3805 #include <asm/mach/arch.h>
3806 #include <asm/mach/map.h>
3807@@ -736,7 +738,46 @@ void free_initmem(void)
3808 {
3809 #ifdef CONFIG_HAVE_TCM
3810 extern char __tcm_start, __tcm_end;
3811+#endif
3812
3813+#ifdef CONFIG_PAX_KERNEXEC
3814+ unsigned long addr;
3815+ pgd_t *pgd;
3816+ pud_t *pud;
3817+ pmd_t *pmd;
3818+ int cpu_arch = cpu_architecture();
3819+ unsigned int cr = get_cr();
3820+
3821+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
3822+ /* make pages tables, etc before .text NX */
3823+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
3824+ pgd = pgd_offset_k(addr);
3825+ pud = pud_offset(pgd, addr);
3826+ pmd = pmd_offset(pud, addr);
3827+ __section_update(pmd, addr, PMD_SECT_XN);
3828+ }
3829+ /* make init NX */
3830+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
3831+ pgd = pgd_offset_k(addr);
3832+ pud = pud_offset(pgd, addr);
3833+ pmd = pmd_offset(pud, addr);
3834+ __section_update(pmd, addr, PMD_SECT_XN);
3835+ }
3836+ /* make kernel code/rodata RX */
3837+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
3838+ pgd = pgd_offset_k(addr);
3839+ pud = pud_offset(pgd, addr);
3840+ pmd = pmd_offset(pud, addr);
3841+#ifdef CONFIG_ARM_LPAE
3842+ __section_update(pmd, addr, PMD_SECT_RDONLY);
3843+#else
3844+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
3845+#endif
3846+ }
3847+ }
3848+#endif
3849+
3850+#ifdef CONFIG_HAVE_TCM
3851 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
3852 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
3853 __phys_to_pfn(__pa(&__tcm_end)),
3854diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
3855index 88fd86c..7a224ce 100644
3856--- a/arch/arm/mm/ioremap.c
3857+++ b/arch/arm/mm/ioremap.c
3858@@ -335,9 +335,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
3859 unsigned int mtype;
3860
3861 if (cached)
3862- mtype = MT_MEMORY;
3863+ mtype = MT_MEMORY_RX;
3864 else
3865- mtype = MT_MEMORY_NONCACHED;
3866+ mtype = MT_MEMORY_NONCACHED_RX;
3867
3868 return __arm_ioremap_caller(phys_addr, size, mtype,
3869 __builtin_return_address(0));
3870diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
3871index 10062ce..aa96dd7 100644
3872--- a/arch/arm/mm/mmap.c
3873+++ b/arch/arm/mm/mmap.c
3874@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3875 struct vm_area_struct *vma;
3876 int do_align = 0;
3877 int aliasing = cache_is_vipt_aliasing();
3878+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3879 struct vm_unmapped_area_info info;
3880
3881 /*
3882@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3883 if (len > TASK_SIZE)
3884 return -ENOMEM;
3885
3886+#ifdef CONFIG_PAX_RANDMMAP
3887+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3888+#endif
3889+
3890 if (addr) {
3891 if (do_align)
3892 addr = COLOUR_ALIGN(addr, pgoff);
3893@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3894 addr = PAGE_ALIGN(addr);
3895
3896 vma = find_vma(mm, addr);
3897- if (TASK_SIZE - len >= addr &&
3898- (!vma || addr + len <= vma->vm_start))
3899+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3900 return addr;
3901 }
3902
3903@@ -112,6 +116,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3904 unsigned long addr = addr0;
3905 int do_align = 0;
3906 int aliasing = cache_is_vipt_aliasing();
3907+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3908 struct vm_unmapped_area_info info;
3909
3910 /*
3911@@ -132,6 +137,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3912 return addr;
3913 }
3914
3915+#ifdef CONFIG_PAX_RANDMMAP
3916+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3917+#endif
3918+
3919 /* requesting a specific address */
3920 if (addr) {
3921 if (do_align)
3922@@ -139,8 +148,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3923 else
3924 addr = PAGE_ALIGN(addr);
3925 vma = find_vma(mm, addr);
3926- if (TASK_SIZE - len >= addr &&
3927- (!vma || addr + len <= vma->vm_start))
3928+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3929 return addr;
3930 }
3931
3932@@ -162,6 +170,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3933 VM_BUG_ON(addr != -ENOMEM);
3934 info.flags = 0;
3935 info.low_limit = mm->mmap_base;
3936+
3937+#ifdef CONFIG_PAX_RANDMMAP
3938+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3939+ info.low_limit += mm->delta_mmap;
3940+#endif
3941+
3942 info.high_limit = TASK_SIZE;
3943 addr = vm_unmapped_area(&info);
3944 }
3945@@ -173,6 +187,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3946 {
3947 unsigned long random_factor = 0UL;
3948
3949+#ifdef CONFIG_PAX_RANDMMAP
3950+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3951+#endif
3952+
3953 /* 8 bits of randomness in 20 address space bits */
3954 if ((current->flags & PF_RANDOMIZE) &&
3955 !(current->personality & ADDR_NO_RANDOMIZE))
3956@@ -180,10 +198,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3957
3958 if (mmap_is_legacy()) {
3959 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3960+
3961+#ifdef CONFIG_PAX_RANDMMAP
3962+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3963+ mm->mmap_base += mm->delta_mmap;
3964+#endif
3965+
3966 mm->get_unmapped_area = arch_get_unmapped_area;
3967 mm->unmap_area = arch_unmap_area;
3968 } else {
3969 mm->mmap_base = mmap_base(random_factor);
3970+
3971+#ifdef CONFIG_PAX_RANDMMAP
3972+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3973+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3974+#endif
3975+
3976 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3977 mm->unmap_area = arch_unmap_area_topdown;
3978 }
3979diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
3980index ce328c7..35b88dc 100644
3981--- a/arch/arm/mm/mmu.c
3982+++ b/arch/arm/mm/mmu.c
3983@@ -35,6 +35,23 @@
3984
3985 #include "mm.h"
3986
3987+
3988+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3989+void modify_domain(unsigned int dom, unsigned int type)
3990+{
3991+ struct thread_info *thread = current_thread_info();
3992+ unsigned int domain = thread->cpu_domain;
3993+ /*
3994+ * DOMAIN_MANAGER might be defined to some other value,
3995+ * use the arch-defined constant
3996+ */
3997+ domain &= ~domain_val(dom, 3);
3998+ thread->cpu_domain = domain | domain_val(dom, type);
3999+ set_domain(thread->cpu_domain);
4000+}
4001+EXPORT_SYMBOL(modify_domain);
4002+#endif
4003+
4004 /*
4005 * empty_zero_page is a special page that is used for
4006 * zero-initialized data and COW.
4007@@ -195,10 +212,18 @@ void adjust_cr(unsigned long mask, unsigned long set)
4008 }
4009 #endif
4010
4011-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4012+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4013 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4014
4015-static struct mem_type mem_types[] = {
4016+#ifdef CONFIG_PAX_KERNEXEC
4017+#define L_PTE_KERNEXEC L_PTE_RDONLY
4018+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4019+#else
4020+#define L_PTE_KERNEXEC L_PTE_DIRTY
4021+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4022+#endif
4023+
4024+static struct mem_type mem_types[] __read_only = {
4025 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4026 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4027 L_PTE_SHARED,
4028@@ -227,16 +252,16 @@ static struct mem_type mem_types[] = {
4029 [MT_UNCACHED] = {
4030 .prot_pte = PROT_PTE_DEVICE,
4031 .prot_l1 = PMD_TYPE_TABLE,
4032- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4033+ .prot_sect = PROT_SECT_DEVICE,
4034 .domain = DOMAIN_IO,
4035 },
4036 [MT_CACHECLEAN] = {
4037- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4038+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4039 .domain = DOMAIN_KERNEL,
4040 },
4041 #ifndef CONFIG_ARM_LPAE
4042 [MT_MINICLEAN] = {
4043- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4044+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4045 .domain = DOMAIN_KERNEL,
4046 },
4047 #endif
4048@@ -244,36 +269,54 @@ static struct mem_type mem_types[] = {
4049 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4050 L_PTE_RDONLY,
4051 .prot_l1 = PMD_TYPE_TABLE,
4052- .domain = DOMAIN_USER,
4053+ .domain = DOMAIN_VECTORS,
4054 },
4055 [MT_HIGH_VECTORS] = {
4056 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4057- L_PTE_USER | L_PTE_RDONLY,
4058+ L_PTE_RDONLY,
4059 .prot_l1 = PMD_TYPE_TABLE,
4060- .domain = DOMAIN_USER,
4061+ .domain = DOMAIN_VECTORS,
4062 },
4063- [MT_MEMORY] = {
4064+ [MT_MEMORY_RWX] = {
4065 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4066 .prot_l1 = PMD_TYPE_TABLE,
4067 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4068 .domain = DOMAIN_KERNEL,
4069 },
4070+ [MT_MEMORY_RW] = {
4071+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4072+ .prot_l1 = PMD_TYPE_TABLE,
4073+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4074+ .domain = DOMAIN_KERNEL,
4075+ },
4076+ [MT_MEMORY_RX] = {
4077+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4078+ .prot_l1 = PMD_TYPE_TABLE,
4079+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4080+ .domain = DOMAIN_KERNEL,
4081+ },
4082 [MT_ROM] = {
4083- .prot_sect = PMD_TYPE_SECT,
4084+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4085 .domain = DOMAIN_KERNEL,
4086 },
4087- [MT_MEMORY_NONCACHED] = {
4088+ [MT_MEMORY_NONCACHED_RW] = {
4089 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4090 L_PTE_MT_BUFFERABLE,
4091 .prot_l1 = PMD_TYPE_TABLE,
4092 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4093 .domain = DOMAIN_KERNEL,
4094 },
4095+ [MT_MEMORY_NONCACHED_RX] = {
4096+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4097+ L_PTE_MT_BUFFERABLE,
4098+ .prot_l1 = PMD_TYPE_TABLE,
4099+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4100+ .domain = DOMAIN_KERNEL,
4101+ },
4102 [MT_MEMORY_DTCM] = {
4103- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4104- L_PTE_XN,
4105+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4106 .prot_l1 = PMD_TYPE_TABLE,
4107- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4108+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4109 .domain = DOMAIN_KERNEL,
4110 },
4111 [MT_MEMORY_ITCM] = {
4112@@ -283,10 +326,10 @@ static struct mem_type mem_types[] = {
4113 },
4114 [MT_MEMORY_SO] = {
4115 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4116- L_PTE_MT_UNCACHED | L_PTE_XN,
4117+ L_PTE_MT_UNCACHED,
4118 .prot_l1 = PMD_TYPE_TABLE,
4119 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4120- PMD_SECT_UNCACHED | PMD_SECT_XN,
4121+ PMD_SECT_UNCACHED,
4122 .domain = DOMAIN_KERNEL,
4123 },
4124 [MT_MEMORY_DMA_READY] = {
4125@@ -371,9 +414,35 @@ static void __init build_mem_type_table(void)
4126 * to prevent speculative instruction fetches.
4127 */
4128 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4129+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4130 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4131+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4132 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4133+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4134 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4135+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4136+
4137+ /* Mark other regions on ARMv6+ as execute-never */
4138+
4139+#ifdef CONFIG_PAX_KERNEXEC
4140+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4141+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4142+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4143+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4144+#ifndef CONFIG_ARM_LPAE
4145+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4146+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4147+#endif
4148+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4149+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4150+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4151+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4152+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4153+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4154+#endif
4155+
4156+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4157+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4158 }
4159 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4160 /*
4161@@ -432,6 +501,9 @@ static void __init build_mem_type_table(void)
4162 * from SVC mode and no access from userspace.
4163 */
4164 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4165+#ifdef CONFIG_PAX_KERNEXEC
4166+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4167+#endif
4168 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4169 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4170 #endif
4171@@ -448,11 +520,17 @@ static void __init build_mem_type_table(void)
4172 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4173 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4174 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4175- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4176- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4177+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4178+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4179+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4180+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4181+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4182+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4183 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4184- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4185- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4186+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4187+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4188+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4189+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4190 }
4191 }
4192
4193@@ -463,15 +541,20 @@ static void __init build_mem_type_table(void)
4194 if (cpu_arch >= CPU_ARCH_ARMv6) {
4195 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4196 /* Non-cacheable Normal is XCB = 001 */
4197- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4198+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4199+ PMD_SECT_BUFFERED;
4200+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4201 PMD_SECT_BUFFERED;
4202 } else {
4203 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4204- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4205+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4206+ PMD_SECT_TEX(1);
4207+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4208 PMD_SECT_TEX(1);
4209 }
4210 } else {
4211- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4212+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4213+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4214 }
4215
4216 #ifdef CONFIG_ARM_LPAE
4217@@ -487,6 +570,8 @@ static void __init build_mem_type_table(void)
4218 vecs_pgprot |= PTE_EXT_AF;
4219 #endif
4220
4221+ user_pgprot |= __supported_pte_mask;
4222+
4223 for (i = 0; i < 16; i++) {
4224 pteval_t v = pgprot_val(protection_map[i]);
4225 protection_map[i] = __pgprot(v | user_pgprot);
4226@@ -501,10 +586,15 @@ static void __init build_mem_type_table(void)
4227
4228 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4229 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4230- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4231- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4232+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4233+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4234+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4235+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4236+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4237+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4238 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4239- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4240+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4241+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4242 mem_types[MT_ROM].prot_sect |= cp->pmd;
4243
4244 switch (cp->pmd) {
4245@@ -1105,18 +1195,15 @@ void __init arm_mm_memblock_reserve(void)
4246 * called function. This means you can't use any function or debugging
4247 * method which may touch any device, otherwise the kernel _will_ crash.
4248 */
4249+
4250+static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
4251+
4252 static void __init devicemaps_init(struct machine_desc *mdesc)
4253 {
4254 struct map_desc map;
4255 unsigned long addr;
4256- void *vectors;
4257
4258- /*
4259- * Allocate the vector page early.
4260- */
4261- vectors = early_alloc(PAGE_SIZE);
4262-
4263- early_trap_init(vectors);
4264+ early_trap_init(&vectors);
4265
4266 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4267 pmd_clear(pmd_off_k(addr));
4268@@ -1156,7 +1243,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4269 * location (0xffff0000). If we aren't using high-vectors, also
4270 * create a mapping at the low-vectors virtual address.
4271 */
4272- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4273+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4274 map.virtual = 0xffff0000;
4275 map.length = PAGE_SIZE;
4276 map.type = MT_HIGH_VECTORS;
4277@@ -1214,8 +1301,39 @@ static void __init map_lowmem(void)
4278 map.pfn = __phys_to_pfn(start);
4279 map.virtual = __phys_to_virt(start);
4280 map.length = end - start;
4281- map.type = MT_MEMORY;
4282
4283+#ifdef CONFIG_PAX_KERNEXEC
4284+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4285+ struct map_desc kernel;
4286+ struct map_desc initmap;
4287+
4288+ /* when freeing initmem we will make this RW */
4289+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4290+ initmap.virtual = (unsigned long)__init_begin;
4291+ initmap.length = _sdata - __init_begin;
4292+ initmap.type = MT_MEMORY_RWX;
4293+ create_mapping(&initmap);
4294+
4295+ /* when freeing initmem we will make this RX */
4296+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4297+ kernel.virtual = (unsigned long)_stext;
4298+ kernel.length = __init_begin - _stext;
4299+ kernel.type = MT_MEMORY_RWX;
4300+ create_mapping(&kernel);
4301+
4302+ if (map.virtual < (unsigned long)_stext) {
4303+ map.length = (unsigned long)_stext - map.virtual;
4304+ map.type = MT_MEMORY_RWX;
4305+ create_mapping(&map);
4306+ }
4307+
4308+ map.pfn = __phys_to_pfn(__pa(_sdata));
4309+ map.virtual = (unsigned long)_sdata;
4310+ map.length = end - __pa(_sdata);
4311+ }
4312+#endif
4313+
4314+ map.type = MT_MEMORY_RW;
4315 create_mapping(&map);
4316 }
4317 }
4318diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4319index 6d98c13..3cfb174 100644
4320--- a/arch/arm/mm/proc-v7-2level.S
4321+++ b/arch/arm/mm/proc-v7-2level.S
4322@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4323 tst r1, #L_PTE_XN
4324 orrne r3, r3, #PTE_EXT_XN
4325
4326+ tst r1, #L_PTE_PXN
4327+ orrne r3, r3, #PTE_EXT_PXN
4328+
4329 tst r1, #L_PTE_YOUNG
4330 tstne r1, #L_PTE_VALID
4331 #ifndef CONFIG_CPU_USE_DOMAINS
4332diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4333index a5bc92d..0bb4730 100644
4334--- a/arch/arm/plat-omap/sram.c
4335+++ b/arch/arm/plat-omap/sram.c
4336@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4337 * Looks like we need to preserve some bootloader code at the
4338 * beginning of SRAM for jumping to flash for reboot to work...
4339 */
4340+ pax_open_kernel();
4341 memset_io(omap_sram_base + omap_sram_skip, 0,
4342 omap_sram_size - omap_sram_skip);
4343+ pax_close_kernel();
4344 }
4345diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4346index f5144cd..71f6d1f 100644
4347--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4348+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4349@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4350 int (*started)(unsigned ch);
4351 int (*flush)(unsigned ch);
4352 int (*stop)(unsigned ch);
4353-};
4354+} __no_const;
4355
4356 extern void *samsung_dmadev_get_ops(void);
4357 extern void *s3c_dma_get_ops(void);
4358diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4359index 0c3ba9f..95722b3 100644
4360--- a/arch/arm64/kernel/debug-monitors.c
4361+++ b/arch/arm64/kernel/debug-monitors.c
4362@@ -151,7 +151,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4363 return NOTIFY_OK;
4364 }
4365
4366-static struct notifier_block __cpuinitdata os_lock_nb = {
4367+static struct notifier_block os_lock_nb = {
4368 .notifier_call = os_lock_notify,
4369 };
4370
4371diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4372index 5ab825c..96aaec8 100644
4373--- a/arch/arm64/kernel/hw_breakpoint.c
4374+++ b/arch/arm64/kernel/hw_breakpoint.c
4375@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4376 return NOTIFY_OK;
4377 }
4378
4379-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4380+static struct notifier_block hw_breakpoint_reset_nb = {
4381 .notifier_call = hw_breakpoint_reset_notify,
4382 };
4383
4384diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4385index c3a58a1..78fbf54 100644
4386--- a/arch/avr32/include/asm/cache.h
4387+++ b/arch/avr32/include/asm/cache.h
4388@@ -1,8 +1,10 @@
4389 #ifndef __ASM_AVR32_CACHE_H
4390 #define __ASM_AVR32_CACHE_H
4391
4392+#include <linux/const.h>
4393+
4394 #define L1_CACHE_SHIFT 5
4395-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4396+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4397
4398 /*
4399 * Memory returned by kmalloc() may be used for DMA, so we must make
4400diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4401index e2c3287..6c4f98c 100644
4402--- a/arch/avr32/include/asm/elf.h
4403+++ b/arch/avr32/include/asm/elf.h
4404@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4405 the loader. We need to make sure that it is out of the way of the program
4406 that it will "exec", and that there is sufficient room for the brk. */
4407
4408-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4409+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4410
4411+#ifdef CONFIG_PAX_ASLR
4412+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4413+
4414+#define PAX_DELTA_MMAP_LEN 15
4415+#define PAX_DELTA_STACK_LEN 15
4416+#endif
4417
4418 /* This yields a mask that user programs can use to figure out what
4419 instruction set this CPU supports. This could be done in user space,
4420diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4421index 479330b..53717a8 100644
4422--- a/arch/avr32/include/asm/kmap_types.h
4423+++ b/arch/avr32/include/asm/kmap_types.h
4424@@ -2,9 +2,9 @@
4425 #define __ASM_AVR32_KMAP_TYPES_H
4426
4427 #ifdef CONFIG_DEBUG_HIGHMEM
4428-# define KM_TYPE_NR 29
4429+# define KM_TYPE_NR 30
4430 #else
4431-# define KM_TYPE_NR 14
4432+# define KM_TYPE_NR 15
4433 #endif
4434
4435 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4436diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4437index b2f2d2d..d1c85cb 100644
4438--- a/arch/avr32/mm/fault.c
4439+++ b/arch/avr32/mm/fault.c
4440@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4441
4442 int exception_trace = 1;
4443
4444+#ifdef CONFIG_PAX_PAGEEXEC
4445+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4446+{
4447+ unsigned long i;
4448+
4449+ printk(KERN_ERR "PAX: bytes at PC: ");
4450+ for (i = 0; i < 20; i++) {
4451+ unsigned char c;
4452+ if (get_user(c, (unsigned char *)pc+i))
4453+ printk(KERN_CONT "???????? ");
4454+ else
4455+ printk(KERN_CONT "%02x ", c);
4456+ }
4457+ printk("\n");
4458+}
4459+#endif
4460+
4461 /*
4462 * This routine handles page faults. It determines the address and the
4463 * problem, and then passes it off to one of the appropriate routines.
4464@@ -174,6 +191,16 @@ bad_area:
4465 up_read(&mm->mmap_sem);
4466
4467 if (user_mode(regs)) {
4468+
4469+#ifdef CONFIG_PAX_PAGEEXEC
4470+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4471+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4472+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4473+ do_group_exit(SIGKILL);
4474+ }
4475+ }
4476+#endif
4477+
4478 if (exception_trace && printk_ratelimit())
4479 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4480 "sp %08lx ecr %lu\n",
4481diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4482index 568885a..f8008df 100644
4483--- a/arch/blackfin/include/asm/cache.h
4484+++ b/arch/blackfin/include/asm/cache.h
4485@@ -7,6 +7,7 @@
4486 #ifndef __ARCH_BLACKFIN_CACHE_H
4487 #define __ARCH_BLACKFIN_CACHE_H
4488
4489+#include <linux/const.h>
4490 #include <linux/linkage.h> /* for asmlinkage */
4491
4492 /*
4493@@ -14,7 +15,7 @@
4494 * Blackfin loads 32 bytes for cache
4495 */
4496 #define L1_CACHE_SHIFT 5
4497-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4498+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4499 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4500
4501 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4502diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4503index aea2718..3639a60 100644
4504--- a/arch/cris/include/arch-v10/arch/cache.h
4505+++ b/arch/cris/include/arch-v10/arch/cache.h
4506@@ -1,8 +1,9 @@
4507 #ifndef _ASM_ARCH_CACHE_H
4508 #define _ASM_ARCH_CACHE_H
4509
4510+#include <linux/const.h>
4511 /* Etrax 100LX have 32-byte cache-lines. */
4512-#define L1_CACHE_BYTES 32
4513 #define L1_CACHE_SHIFT 5
4514+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4515
4516 #endif /* _ASM_ARCH_CACHE_H */
4517diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4518index 7caf25d..ee65ac5 100644
4519--- a/arch/cris/include/arch-v32/arch/cache.h
4520+++ b/arch/cris/include/arch-v32/arch/cache.h
4521@@ -1,11 +1,12 @@
4522 #ifndef _ASM_CRIS_ARCH_CACHE_H
4523 #define _ASM_CRIS_ARCH_CACHE_H
4524
4525+#include <linux/const.h>
4526 #include <arch/hwregs/dma.h>
4527
4528 /* A cache-line is 32 bytes. */
4529-#define L1_CACHE_BYTES 32
4530 #define L1_CACHE_SHIFT 5
4531+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4532
4533 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4534
4535diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4536index b86329d..6709906 100644
4537--- a/arch/frv/include/asm/atomic.h
4538+++ b/arch/frv/include/asm/atomic.h
4539@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4540 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4541 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4542
4543+#define atomic64_read_unchecked(v) atomic64_read(v)
4544+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4545+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4546+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4547+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4548+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4549+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4550+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4551+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4552+
4553 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4554 {
4555 int c, old;
4556diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4557index 2797163..c2a401d 100644
4558--- a/arch/frv/include/asm/cache.h
4559+++ b/arch/frv/include/asm/cache.h
4560@@ -12,10 +12,11 @@
4561 #ifndef __ASM_CACHE_H
4562 #define __ASM_CACHE_H
4563
4564+#include <linux/const.h>
4565
4566 /* bytes per L1 cache line */
4567 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4568-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4569+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4570
4571 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4572 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4573diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4574index 43901f2..0d8b865 100644
4575--- a/arch/frv/include/asm/kmap_types.h
4576+++ b/arch/frv/include/asm/kmap_types.h
4577@@ -2,6 +2,6 @@
4578 #ifndef _ASM_KMAP_TYPES_H
4579 #define _ASM_KMAP_TYPES_H
4580
4581-#define KM_TYPE_NR 17
4582+#define KM_TYPE_NR 18
4583
4584 #endif
4585diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4586index 385fd30..3aaf4fe 100644
4587--- a/arch/frv/mm/elf-fdpic.c
4588+++ b/arch/frv/mm/elf-fdpic.c
4589@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4590 {
4591 struct vm_area_struct *vma;
4592 unsigned long limit;
4593+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4594
4595 if (len > TASK_SIZE)
4596 return -ENOMEM;
4597@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4598 if (addr) {
4599 addr = PAGE_ALIGN(addr);
4600 vma = find_vma(current->mm, addr);
4601- if (TASK_SIZE - len >= addr &&
4602- (!vma || addr + len <= vma->vm_start))
4603+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4604 goto success;
4605 }
4606
4607@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4608 for (; vma; vma = vma->vm_next) {
4609 if (addr > limit)
4610 break;
4611- if (addr + len <= vma->vm_start)
4612+ if (check_heap_stack_gap(vma, addr, len, offset))
4613 goto success;
4614 addr = vma->vm_end;
4615 }
4616@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4617 for (; vma; vma = vma->vm_next) {
4618 if (addr > limit)
4619 break;
4620- if (addr + len <= vma->vm_start)
4621+ if (check_heap_stack_gap(vma, addr, len, offset))
4622 goto success;
4623 addr = vma->vm_end;
4624 }
4625diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4626index f4ca594..adc72fd6 100644
4627--- a/arch/hexagon/include/asm/cache.h
4628+++ b/arch/hexagon/include/asm/cache.h
4629@@ -21,9 +21,11 @@
4630 #ifndef __ASM_CACHE_H
4631 #define __ASM_CACHE_H
4632
4633+#include <linux/const.h>
4634+
4635 /* Bytes per L1 cache line */
4636-#define L1_CACHE_SHIFT (5)
4637-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4638+#define L1_CACHE_SHIFT 5
4639+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4640
4641 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4642 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4643diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4644index 6e6fe18..a6ae668 100644
4645--- a/arch/ia64/include/asm/atomic.h
4646+++ b/arch/ia64/include/asm/atomic.h
4647@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4648 #define atomic64_inc(v) atomic64_add(1, (v))
4649 #define atomic64_dec(v) atomic64_sub(1, (v))
4650
4651+#define atomic64_read_unchecked(v) atomic64_read(v)
4652+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4653+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4654+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4655+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4656+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4657+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4658+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4659+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4660+
4661 /* Atomic operations are already serializing */
4662 #define smp_mb__before_atomic_dec() barrier()
4663 #define smp_mb__after_atomic_dec() barrier()
4664diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4665index 988254a..e1ee885 100644
4666--- a/arch/ia64/include/asm/cache.h
4667+++ b/arch/ia64/include/asm/cache.h
4668@@ -1,6 +1,7 @@
4669 #ifndef _ASM_IA64_CACHE_H
4670 #define _ASM_IA64_CACHE_H
4671
4672+#include <linux/const.h>
4673
4674 /*
4675 * Copyright (C) 1998-2000 Hewlett-Packard Co
4676@@ -9,7 +10,7 @@
4677
4678 /* Bytes per L1 (data) cache line. */
4679 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4680-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4681+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4682
4683 #ifdef CONFIG_SMP
4684 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4685diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4686index b5298eb..67c6e62 100644
4687--- a/arch/ia64/include/asm/elf.h
4688+++ b/arch/ia64/include/asm/elf.h
4689@@ -42,6 +42,13 @@
4690 */
4691 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4692
4693+#ifdef CONFIG_PAX_ASLR
4694+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4695+
4696+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4697+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4698+#endif
4699+
4700 #define PT_IA_64_UNWIND 0x70000001
4701
4702 /* IA-64 relocations: */
4703diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4704index 96a8d92..617a1cf 100644
4705--- a/arch/ia64/include/asm/pgalloc.h
4706+++ b/arch/ia64/include/asm/pgalloc.h
4707@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4708 pgd_val(*pgd_entry) = __pa(pud);
4709 }
4710
4711+static inline void
4712+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4713+{
4714+ pgd_populate(mm, pgd_entry, pud);
4715+}
4716+
4717 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4718 {
4719 return quicklist_alloc(0, GFP_KERNEL, NULL);
4720@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4721 pud_val(*pud_entry) = __pa(pmd);
4722 }
4723
4724+static inline void
4725+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4726+{
4727+ pud_populate(mm, pud_entry, pmd);
4728+}
4729+
4730 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4731 {
4732 return quicklist_alloc(0, GFP_KERNEL, NULL);
4733diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4734index 815810c..d60bd4c 100644
4735--- a/arch/ia64/include/asm/pgtable.h
4736+++ b/arch/ia64/include/asm/pgtable.h
4737@@ -12,7 +12,7 @@
4738 * David Mosberger-Tang <davidm@hpl.hp.com>
4739 */
4740
4741-
4742+#include <linux/const.h>
4743 #include <asm/mman.h>
4744 #include <asm/page.h>
4745 #include <asm/processor.h>
4746@@ -142,6 +142,17 @@
4747 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4748 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4749 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4750+
4751+#ifdef CONFIG_PAX_PAGEEXEC
4752+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4753+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4754+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4755+#else
4756+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4757+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4758+# define PAGE_COPY_NOEXEC PAGE_COPY
4759+#endif
4760+
4761 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4762 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4763 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4764diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4765index 54ff557..70c88b7 100644
4766--- a/arch/ia64/include/asm/spinlock.h
4767+++ b/arch/ia64/include/asm/spinlock.h
4768@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4769 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4770
4771 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4772- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4773+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4774 }
4775
4776 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4777diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4778index 449c8c0..50cdf87 100644
4779--- a/arch/ia64/include/asm/uaccess.h
4780+++ b/arch/ia64/include/asm/uaccess.h
4781@@ -42,6 +42,8 @@
4782 #include <asm/pgtable.h>
4783 #include <asm/io.h>
4784
4785+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4786+
4787 /*
4788 * For historical reasons, the following macros are grossly misnamed:
4789 */
4790@@ -240,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4791 static inline unsigned long
4792 __copy_to_user (void __user *to, const void *from, unsigned long count)
4793 {
4794+ if (count > INT_MAX)
4795+ return count;
4796+
4797+ if (!__builtin_constant_p(count))
4798+ check_object_size(from, count, true);
4799+
4800 return __copy_user(to, (__force void __user *) from, count);
4801 }
4802
4803 static inline unsigned long
4804 __copy_from_user (void *to, const void __user *from, unsigned long count)
4805 {
4806+ if (count > INT_MAX)
4807+ return count;
4808+
4809+ if (!__builtin_constant_p(count))
4810+ check_object_size(to, count, false);
4811+
4812 return __copy_user((__force void __user *) to, from, count);
4813 }
4814
4815@@ -255,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4816 ({ \
4817 void __user *__cu_to = (to); \
4818 const void *__cu_from = (from); \
4819- long __cu_len = (n); \
4820+ unsigned long __cu_len = (n); \
4821 \
4822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
4823+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
4824+ if (!__builtin_constant_p(n)) \
4825+ check_object_size(__cu_from, __cu_len, true); \
4826 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
4827+ } \
4828 __cu_len; \
4829 })
4830
4831@@ -266,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4832 ({ \
4833 void *__cu_to = (to); \
4834 const void __user *__cu_from = (from); \
4835- long __cu_len = (n); \
4836+ unsigned long __cu_len = (n); \
4837 \
4838 __chk_user_ptr(__cu_from); \
4839- if (__access_ok(__cu_from, __cu_len, get_fs())) \
4840+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
4841+ if (!__builtin_constant_p(n)) \
4842+ check_object_size(__cu_to, __cu_len, false); \
4843 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
4844+ } \
4845 __cu_len; \
4846 })
4847
4848diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
4849index 2d67317..07d8bfa 100644
4850--- a/arch/ia64/kernel/err_inject.c
4851+++ b/arch/ia64/kernel/err_inject.c
4852@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
4853 return NOTIFY_OK;
4854 }
4855
4856-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
4857+static struct notifier_block err_inject_cpu_notifier =
4858 {
4859 .notifier_call = err_inject_cpu_callback,
4860 };
4861diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
4862index 65bf9cd..794f06b 100644
4863--- a/arch/ia64/kernel/mca.c
4864+++ b/arch/ia64/kernel/mca.c
4865@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
4866 return NOTIFY_OK;
4867 }
4868
4869-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
4870+static struct notifier_block mca_cpu_notifier = {
4871 .notifier_call = mca_cpu_callback
4872 };
4873
4874diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
4875index 24603be..948052d 100644
4876--- a/arch/ia64/kernel/module.c
4877+++ b/arch/ia64/kernel/module.c
4878@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
4879 void
4880 module_free (struct module *mod, void *module_region)
4881 {
4882- if (mod && mod->arch.init_unw_table &&
4883- module_region == mod->module_init) {
4884+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
4885 unw_remove_unwind_table(mod->arch.init_unw_table);
4886 mod->arch.init_unw_table = NULL;
4887 }
4888@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
4889 }
4890
4891 static inline int
4892+in_init_rx (const struct module *mod, uint64_t addr)
4893+{
4894+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
4895+}
4896+
4897+static inline int
4898+in_init_rw (const struct module *mod, uint64_t addr)
4899+{
4900+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
4901+}
4902+
4903+static inline int
4904 in_init (const struct module *mod, uint64_t addr)
4905 {
4906- return addr - (uint64_t) mod->module_init < mod->init_size;
4907+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
4908+}
4909+
4910+static inline int
4911+in_core_rx (const struct module *mod, uint64_t addr)
4912+{
4913+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
4914+}
4915+
4916+static inline int
4917+in_core_rw (const struct module *mod, uint64_t addr)
4918+{
4919+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
4920 }
4921
4922 static inline int
4923 in_core (const struct module *mod, uint64_t addr)
4924 {
4925- return addr - (uint64_t) mod->module_core < mod->core_size;
4926+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
4927 }
4928
4929 static inline int
4930@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
4931 break;
4932
4933 case RV_BDREL:
4934- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
4935+ if (in_init_rx(mod, val))
4936+ val -= (uint64_t) mod->module_init_rx;
4937+ else if (in_init_rw(mod, val))
4938+ val -= (uint64_t) mod->module_init_rw;
4939+ else if (in_core_rx(mod, val))
4940+ val -= (uint64_t) mod->module_core_rx;
4941+ else if (in_core_rw(mod, val))
4942+ val -= (uint64_t) mod->module_core_rw;
4943 break;
4944
4945 case RV_LTV:
4946@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
4947 * addresses have been selected...
4948 */
4949 uint64_t gp;
4950- if (mod->core_size > MAX_LTOFF)
4951+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
4952 /*
4953 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
4954 * at the end of the module.
4955 */
4956- gp = mod->core_size - MAX_LTOFF / 2;
4957+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
4958 else
4959- gp = mod->core_size / 2;
4960- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
4961+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
4962+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
4963 mod->arch.gp = gp;
4964 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
4965 }
4966diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
4967index 77597e5..6f28f3f 100644
4968--- a/arch/ia64/kernel/palinfo.c
4969+++ b/arch/ia64/kernel/palinfo.c
4970@@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
4971 return NOTIFY_OK;
4972 }
4973
4974-static struct notifier_block __refdata palinfo_cpu_notifier =
4975+static struct notifier_block palinfo_cpu_notifier =
4976 {
4977 .notifier_call = palinfo_cpu_callback,
4978 .priority = 0,
4979diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
4980index 79802e5..1a89ec5 100644
4981--- a/arch/ia64/kernel/salinfo.c
4982+++ b/arch/ia64/kernel/salinfo.c
4983@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
4984 return NOTIFY_OK;
4985 }
4986
4987-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
4988+static struct notifier_block salinfo_cpu_notifier =
4989 {
4990 .notifier_call = salinfo_cpu_callback,
4991 .priority = 0,
4992diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
4993index d9439ef..d0cac6b 100644
4994--- a/arch/ia64/kernel/sys_ia64.c
4995+++ b/arch/ia64/kernel/sys_ia64.c
4996@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4997 unsigned long start_addr, align_mask = PAGE_SIZE - 1;
4998 struct mm_struct *mm = current->mm;
4999 struct vm_area_struct *vma;
5000+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5001
5002 if (len > RGN_MAP_LIMIT)
5003 return -ENOMEM;
5004@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5005 if (REGION_NUMBER(addr) == RGN_HPAGE)
5006 addr = 0;
5007 #endif
5008+
5009+#ifdef CONFIG_PAX_RANDMMAP
5010+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5011+ addr = mm->free_area_cache;
5012+ else
5013+#endif
5014+
5015 if (!addr)
5016 addr = mm->free_area_cache;
5017
5018@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5019 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
5020 /* At this point: (!vma || addr < vma->vm_end). */
5021 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
5022- if (start_addr != TASK_UNMAPPED_BASE) {
5023+ if (start_addr != mm->mmap_base) {
5024 /* Start a new search --- just in case we missed some holes. */
5025- addr = TASK_UNMAPPED_BASE;
5026+ addr = mm->mmap_base;
5027 goto full_search;
5028 }
5029 return -ENOMEM;
5030 }
5031- if (!vma || addr + len <= vma->vm_start) {
5032+ if (check_heap_stack_gap(vma, addr, len, offset)) {
5033 /* Remember the address where we stopped this search: */
5034 mm->free_area_cache = addr + len;
5035 return addr;
5036diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5037index dc00b2c..cce53c2 100644
5038--- a/arch/ia64/kernel/topology.c
5039+++ b/arch/ia64/kernel/topology.c
5040@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5041 return NOTIFY_OK;
5042 }
5043
5044-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5045+static struct notifier_block cache_cpu_notifier =
5046 {
5047 .notifier_call = cache_cpu_callback
5048 };
5049diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5050index 0ccb28f..8992469 100644
5051--- a/arch/ia64/kernel/vmlinux.lds.S
5052+++ b/arch/ia64/kernel/vmlinux.lds.S
5053@@ -198,7 +198,7 @@ SECTIONS {
5054 /* Per-cpu data: */
5055 . = ALIGN(PERCPU_PAGE_SIZE);
5056 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5057- __phys_per_cpu_start = __per_cpu_load;
5058+ __phys_per_cpu_start = per_cpu_load;
5059 /*
5060 * ensure percpu data fits
5061 * into percpu page size
5062diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5063index 6cf0341..d352594 100644
5064--- a/arch/ia64/mm/fault.c
5065+++ b/arch/ia64/mm/fault.c
5066@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5067 return pte_present(pte);
5068 }
5069
5070+#ifdef CONFIG_PAX_PAGEEXEC
5071+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5072+{
5073+ unsigned long i;
5074+
5075+ printk(KERN_ERR "PAX: bytes at PC: ");
5076+ for (i = 0; i < 8; i++) {
5077+ unsigned int c;
5078+ if (get_user(c, (unsigned int *)pc+i))
5079+ printk(KERN_CONT "???????? ");
5080+ else
5081+ printk(KERN_CONT "%08x ", c);
5082+ }
5083+ printk("\n");
5084+}
5085+#endif
5086+
5087 # define VM_READ_BIT 0
5088 # define VM_WRITE_BIT 1
5089 # define VM_EXEC_BIT 2
5090@@ -149,8 +166,21 @@ retry:
5091 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5092 goto bad_area;
5093
5094- if ((vma->vm_flags & mask) != mask)
5095+ if ((vma->vm_flags & mask) != mask) {
5096+
5097+#ifdef CONFIG_PAX_PAGEEXEC
5098+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5099+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5100+ goto bad_area;
5101+
5102+ up_read(&mm->mmap_sem);
5103+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5104+ do_group_exit(SIGKILL);
5105+ }
5106+#endif
5107+
5108 goto bad_area;
5109+ }
5110
5111 /*
5112 * If for any reason at all we couldn't handle the fault, make
5113diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5114index 5ca674b..127c3cb 100644
5115--- a/arch/ia64/mm/hugetlbpage.c
5116+++ b/arch/ia64/mm/hugetlbpage.c
5117@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5118 unsigned long pgoff, unsigned long flags)
5119 {
5120 struct vm_area_struct *vmm;
5121+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5122
5123 if (len > RGN_MAP_LIMIT)
5124 return -ENOMEM;
5125@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5126 /* At this point: (!vmm || addr < vmm->vm_end). */
5127 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
5128 return -ENOMEM;
5129- if (!vmm || (addr + len) <= vmm->vm_start)
5130+ if (check_heap_stack_gap(vmm, addr, len, offset))
5131 return addr;
5132 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
5133 }
5134diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5135index b755ea9..b9a969e 100644
5136--- a/arch/ia64/mm/init.c
5137+++ b/arch/ia64/mm/init.c
5138@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5139 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5140 vma->vm_end = vma->vm_start + PAGE_SIZE;
5141 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5142+
5143+#ifdef CONFIG_PAX_PAGEEXEC
5144+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5145+ vma->vm_flags &= ~VM_EXEC;
5146+
5147+#ifdef CONFIG_PAX_MPROTECT
5148+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5149+ vma->vm_flags &= ~VM_MAYEXEC;
5150+#endif
5151+
5152+ }
5153+#endif
5154+
5155 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5156 down_write(&current->mm->mmap_sem);
5157 if (insert_vm_struct(current->mm, vma)) {
5158diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5159index 40b3ee9..8c2c112 100644
5160--- a/arch/m32r/include/asm/cache.h
5161+++ b/arch/m32r/include/asm/cache.h
5162@@ -1,8 +1,10 @@
5163 #ifndef _ASM_M32R_CACHE_H
5164 #define _ASM_M32R_CACHE_H
5165
5166+#include <linux/const.h>
5167+
5168 /* L1 cache line size */
5169 #define L1_CACHE_SHIFT 4
5170-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5171+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5172
5173 #endif /* _ASM_M32R_CACHE_H */
5174diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5175index 82abd15..d95ae5d 100644
5176--- a/arch/m32r/lib/usercopy.c
5177+++ b/arch/m32r/lib/usercopy.c
5178@@ -14,6 +14,9 @@
5179 unsigned long
5180 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5181 {
5182+ if ((long)n < 0)
5183+ return n;
5184+
5185 prefetch(from);
5186 if (access_ok(VERIFY_WRITE, to, n))
5187 __copy_user(to,from,n);
5188@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5189 unsigned long
5190 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5191 {
5192+ if ((long)n < 0)
5193+ return n;
5194+
5195 prefetchw(to);
5196 if (access_ok(VERIFY_READ, from, n))
5197 __copy_user_zeroing(to,from,n);
5198diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5199index 0395c51..5f26031 100644
5200--- a/arch/m68k/include/asm/cache.h
5201+++ b/arch/m68k/include/asm/cache.h
5202@@ -4,9 +4,11 @@
5203 #ifndef __ARCH_M68K_CACHE_H
5204 #define __ARCH_M68K_CACHE_H
5205
5206+#include <linux/const.h>
5207+
5208 /* bytes per L1 cache line */
5209 #define L1_CACHE_SHIFT 4
5210-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5211+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5212
5213 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5214
5215diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5216index 4efe96a..60e8699 100644
5217--- a/arch/microblaze/include/asm/cache.h
5218+++ b/arch/microblaze/include/asm/cache.h
5219@@ -13,11 +13,12 @@
5220 #ifndef _ASM_MICROBLAZE_CACHE_H
5221 #define _ASM_MICROBLAZE_CACHE_H
5222
5223+#include <linux/const.h>
5224 #include <asm/registers.h>
5225
5226 #define L1_CACHE_SHIFT 5
5227 /* word-granular cache in microblaze */
5228-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5229+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5230
5231 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5232
5233diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5234index 01cc6ba..bcb7a5d 100644
5235--- a/arch/mips/include/asm/atomic.h
5236+++ b/arch/mips/include/asm/atomic.h
5237@@ -21,6 +21,10 @@
5238 #include <asm/cmpxchg.h>
5239 #include <asm/war.h>
5240
5241+#ifdef CONFIG_GENERIC_ATOMIC64
5242+#include <asm-generic/atomic64.h>
5243+#endif
5244+
5245 #define ATOMIC_INIT(i) { (i) }
5246
5247 /*
5248@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5249 */
5250 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5251
5252+#define atomic64_read_unchecked(v) atomic64_read(v)
5253+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5254+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5255+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5256+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5257+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5258+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5259+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5260+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5261+
5262 #endif /* CONFIG_64BIT */
5263
5264 /*
5265diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5266index b4db69f..8f3b093 100644
5267--- a/arch/mips/include/asm/cache.h
5268+++ b/arch/mips/include/asm/cache.h
5269@@ -9,10 +9,11 @@
5270 #ifndef _ASM_CACHE_H
5271 #define _ASM_CACHE_H
5272
5273+#include <linux/const.h>
5274 #include <kmalloc.h>
5275
5276 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5277-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5278+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5279
5280 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5281 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5282diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5283index 455c0ac..ad65fbe 100644
5284--- a/arch/mips/include/asm/elf.h
5285+++ b/arch/mips/include/asm/elf.h
5286@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5287 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5288 #endif
5289
5290+#ifdef CONFIG_PAX_ASLR
5291+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5292+
5293+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5294+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5295+#endif
5296+
5297 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5298 struct linux_binprm;
5299 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5300 int uses_interp);
5301
5302-struct mm_struct;
5303-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5304-#define arch_randomize_brk arch_randomize_brk
5305-
5306 #endif /* _ASM_ELF_H */
5307diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5308index c1f6afa..38cc6e9 100644
5309--- a/arch/mips/include/asm/exec.h
5310+++ b/arch/mips/include/asm/exec.h
5311@@ -12,6 +12,6 @@
5312 #ifndef _ASM_EXEC_H
5313 #define _ASM_EXEC_H
5314
5315-extern unsigned long arch_align_stack(unsigned long sp);
5316+#define arch_align_stack(x) ((x) & ~0xfUL)
5317
5318 #endif /* _ASM_EXEC_H */
5319diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5320index dbaec94..6a14935 100644
5321--- a/arch/mips/include/asm/page.h
5322+++ b/arch/mips/include/asm/page.h
5323@@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5324 #ifdef CONFIG_CPU_MIPS32
5325 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5326 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5327- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5328+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5329 #else
5330 typedef struct { unsigned long long pte; } pte_t;
5331 #define pte_val(x) ((x).pte)
5332diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5333index 881d18b..cea38bc 100644
5334--- a/arch/mips/include/asm/pgalloc.h
5335+++ b/arch/mips/include/asm/pgalloc.h
5336@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5337 {
5338 set_pud(pud, __pud((unsigned long)pmd));
5339 }
5340+
5341+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5342+{
5343+ pud_populate(mm, pud, pmd);
5344+}
5345 #endif
5346
5347 /*
5348diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5349index b2050b9..d71bb1b 100644
5350--- a/arch/mips/include/asm/thread_info.h
5351+++ b/arch/mips/include/asm/thread_info.h
5352@@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
5353 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5354 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5355 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5356+/* li takes a 32bit immediate */
5357+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5358 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5359
5360 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5361@@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
5362 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5363 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5364 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5365+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5366+
5367+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5368
5369 /* work to do in syscall_trace_leave() */
5370-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5371+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5372
5373 /* work to do on interrupt/exception return */
5374 #define _TIF_WORK_MASK \
5375 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5376 /* work to do on any return to u-space */
5377-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5378+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5379
5380 #endif /* __KERNEL__ */
5381
5382diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5383index 9fdd8bc..4bd7f1a 100644
5384--- a/arch/mips/kernel/binfmt_elfn32.c
5385+++ b/arch/mips/kernel/binfmt_elfn32.c
5386@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5387 #undef ELF_ET_DYN_BASE
5388 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5389
5390+#ifdef CONFIG_PAX_ASLR
5391+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5392+
5393+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5394+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5395+#endif
5396+
5397 #include <asm/processor.h>
5398 #include <linux/module.h>
5399 #include <linux/elfcore.h>
5400diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5401index ff44823..97f8906 100644
5402--- a/arch/mips/kernel/binfmt_elfo32.c
5403+++ b/arch/mips/kernel/binfmt_elfo32.c
5404@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5405 #undef ELF_ET_DYN_BASE
5406 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5407
5408+#ifdef CONFIG_PAX_ASLR
5409+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5410+
5411+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5412+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5413+#endif
5414+
5415 #include <asm/processor.h>
5416
5417 /*
5418diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5419index a11c6f9..be5e164 100644
5420--- a/arch/mips/kernel/process.c
5421+++ b/arch/mips/kernel/process.c
5422@@ -460,15 +460,3 @@ unsigned long get_wchan(struct task_struct *task)
5423 out:
5424 return pc;
5425 }
5426-
5427-/*
5428- * Don't forget that the stack pointer must be aligned on a 8 bytes
5429- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5430- */
5431-unsigned long arch_align_stack(unsigned long sp)
5432-{
5433- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5434- sp -= get_random_int() & ~PAGE_MASK;
5435-
5436- return sp & ALMASK;
5437-}
5438diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5439index 4812c6d..2069554 100644
5440--- a/arch/mips/kernel/ptrace.c
5441+++ b/arch/mips/kernel/ptrace.c
5442@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5443 return arch;
5444 }
5445
5446+#ifdef CONFIG_GRKERNSEC_SETXID
5447+extern void gr_delayed_cred_worker(void);
5448+#endif
5449+
5450 /*
5451 * Notification of system call entry/exit
5452 * - triggered by current->work.syscall_trace
5453@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5454 /* do the secure computing check first */
5455 secure_computing_strict(regs->regs[2]);
5456
5457+#ifdef CONFIG_GRKERNSEC_SETXID
5458+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5459+ gr_delayed_cred_worker();
5460+#endif
5461+
5462 if (!(current->ptrace & PT_PTRACED))
5463 goto out;
5464
5465diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5466index d20a4bc..7096ae5 100644
5467--- a/arch/mips/kernel/scall32-o32.S
5468+++ b/arch/mips/kernel/scall32-o32.S
5469@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5470
5471 stack_done:
5472 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5473- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5474+ li t1, _TIF_SYSCALL_WORK
5475 and t0, t1
5476 bnez t0, syscall_trace_entry # -> yes
5477
5478diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5479index b64f642..0fe6eab 100644
5480--- a/arch/mips/kernel/scall64-64.S
5481+++ b/arch/mips/kernel/scall64-64.S
5482@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5483
5484 sd a3, PT_R26(sp) # save a3 for syscall restarting
5485
5486- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5487+ li t1, _TIF_SYSCALL_WORK
5488 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5489 and t0, t1, t0
5490 bnez t0, syscall_trace_entry
5491diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5492index c29ac19..c592d05 100644
5493--- a/arch/mips/kernel/scall64-n32.S
5494+++ b/arch/mips/kernel/scall64-n32.S
5495@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5496
5497 sd a3, PT_R26(sp) # save a3 for syscall restarting
5498
5499- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5500+ li t1, _TIF_SYSCALL_WORK
5501 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5502 and t0, t1, t0
5503 bnez t0, n32_syscall_trace_entry
5504diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5505index cf3e75e..72e93fe 100644
5506--- a/arch/mips/kernel/scall64-o32.S
5507+++ b/arch/mips/kernel/scall64-o32.S
5508@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5509 PTR 4b, bad_stack
5510 .previous
5511
5512- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5513+ li t1, _TIF_SYSCALL_WORK
5514 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5515 and t0, t1, t0
5516 bnez t0, trace_a_syscall
5517diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5518index ddcec1e..c7f983e 100644
5519--- a/arch/mips/mm/fault.c
5520+++ b/arch/mips/mm/fault.c
5521@@ -27,6 +27,23 @@
5522 #include <asm/highmem.h> /* For VMALLOC_END */
5523 #include <linux/kdebug.h>
5524
5525+#ifdef CONFIG_PAX_PAGEEXEC
5526+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5527+{
5528+ unsigned long i;
5529+
5530+ printk(KERN_ERR "PAX: bytes at PC: ");
5531+ for (i = 0; i < 5; i++) {
5532+ unsigned int c;
5533+ if (get_user(c, (unsigned int *)pc+i))
5534+ printk(KERN_CONT "???????? ");
5535+ else
5536+ printk(KERN_CONT "%08x ", c);
5537+ }
5538+ printk("\n");
5539+}
5540+#endif
5541+
5542 /*
5543 * This routine handles page faults. It determines the address,
5544 * and the problem, and then passes it off to one of the appropriate
5545diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5546index 7e5fe27..479a219 100644
5547--- a/arch/mips/mm/mmap.c
5548+++ b/arch/mips/mm/mmap.c
5549@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5550 struct vm_area_struct *vma;
5551 unsigned long addr = addr0;
5552 int do_color_align;
5553+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5554 struct vm_unmapped_area_info info;
5555
5556 if (unlikely(len > TASK_SIZE))
5557@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5558 do_color_align = 1;
5559
5560 /* requesting a specific address */
5561+
5562+#ifdef CONFIG_PAX_RANDMMAP
5563+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5564+#endif
5565+
5566 if (addr) {
5567 if (do_color_align)
5568 addr = COLOUR_ALIGN(addr, pgoff);
5569@@ -91,8 +97,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5570 addr = PAGE_ALIGN(addr);
5571
5572 vma = find_vma(mm, addr);
5573- if (TASK_SIZE - len >= addr &&
5574- (!vma || addr + len <= vma->vm_start))
5575+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5576 return addr;
5577 }
5578
5579@@ -146,6 +151,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5580 {
5581 unsigned long random_factor = 0UL;
5582
5583+#ifdef CONFIG_PAX_RANDMMAP
5584+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5585+#endif
5586+
5587 if (current->flags & PF_RANDOMIZE) {
5588 random_factor = get_random_int();
5589 random_factor = random_factor << PAGE_SHIFT;
5590@@ -157,42 +166,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5591
5592 if (mmap_is_legacy()) {
5593 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5594+
5595+#ifdef CONFIG_PAX_RANDMMAP
5596+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5597+ mm->mmap_base += mm->delta_mmap;
5598+#endif
5599+
5600 mm->get_unmapped_area = arch_get_unmapped_area;
5601 mm->unmap_area = arch_unmap_area;
5602 } else {
5603 mm->mmap_base = mmap_base(random_factor);
5604+
5605+#ifdef CONFIG_PAX_RANDMMAP
5606+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5607+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5608+#endif
5609+
5610 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5611 mm->unmap_area = arch_unmap_area_topdown;
5612 }
5613 }
5614
5615-static inline unsigned long brk_rnd(void)
5616-{
5617- unsigned long rnd = get_random_int();
5618-
5619- rnd = rnd << PAGE_SHIFT;
5620- /* 8MB for 32bit, 256MB for 64bit */
5621- if (TASK_IS_32BIT_ADDR)
5622- rnd = rnd & 0x7ffffful;
5623- else
5624- rnd = rnd & 0xffffffful;
5625-
5626- return rnd;
5627-}
5628-
5629-unsigned long arch_randomize_brk(struct mm_struct *mm)
5630-{
5631- unsigned long base = mm->brk;
5632- unsigned long ret;
5633-
5634- ret = PAGE_ALIGN(base + brk_rnd());
5635-
5636- if (ret < mm->brk)
5637- return mm->brk;
5638-
5639- return ret;
5640-}
5641-
5642 int __virt_addr_valid(const volatile void *kaddr)
5643 {
5644 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5645diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5646index 967d144..db12197 100644
5647--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5648+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5649@@ -11,12 +11,14 @@
5650 #ifndef _ASM_PROC_CACHE_H
5651 #define _ASM_PROC_CACHE_H
5652
5653+#include <linux/const.h>
5654+
5655 /* L1 cache */
5656
5657 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5658 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5659-#define L1_CACHE_BYTES 16 /* bytes per entry */
5660 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5661+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5662 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5663
5664 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5665diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5666index bcb5df2..84fabd2 100644
5667--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5668+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5669@@ -16,13 +16,15 @@
5670 #ifndef _ASM_PROC_CACHE_H
5671 #define _ASM_PROC_CACHE_H
5672
5673+#include <linux/const.h>
5674+
5675 /*
5676 * L1 cache
5677 */
5678 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5679 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5680-#define L1_CACHE_BYTES 32 /* bytes per entry */
5681 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5682+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5683 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5684
5685 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5686diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5687index 4ce7a01..449202a 100644
5688--- a/arch/openrisc/include/asm/cache.h
5689+++ b/arch/openrisc/include/asm/cache.h
5690@@ -19,11 +19,13 @@
5691 #ifndef __ASM_OPENRISC_CACHE_H
5692 #define __ASM_OPENRISC_CACHE_H
5693
5694+#include <linux/const.h>
5695+
5696 /* FIXME: How can we replace these with values from the CPU...
5697 * they shouldn't be hard-coded!
5698 */
5699
5700-#define L1_CACHE_BYTES 16
5701 #define L1_CACHE_SHIFT 4
5702+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5703
5704 #endif /* __ASM_OPENRISC_CACHE_H */
5705diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5706index af9cf30..2aae9b2 100644
5707--- a/arch/parisc/include/asm/atomic.h
5708+++ b/arch/parisc/include/asm/atomic.h
5709@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5710
5711 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5712
5713+#define atomic64_read_unchecked(v) atomic64_read(v)
5714+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5715+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5716+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5717+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5718+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5719+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5720+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5721+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5722+
5723 #endif /* !CONFIG_64BIT */
5724
5725
5726diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5727index 47f11c7..3420df2 100644
5728--- a/arch/parisc/include/asm/cache.h
5729+++ b/arch/parisc/include/asm/cache.h
5730@@ -5,6 +5,7 @@
5731 #ifndef __ARCH_PARISC_CACHE_H
5732 #define __ARCH_PARISC_CACHE_H
5733
5734+#include <linux/const.h>
5735
5736 /*
5737 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5738@@ -15,13 +16,13 @@
5739 * just ruin performance.
5740 */
5741 #ifdef CONFIG_PA20
5742-#define L1_CACHE_BYTES 64
5743 #define L1_CACHE_SHIFT 6
5744 #else
5745-#define L1_CACHE_BYTES 32
5746 #define L1_CACHE_SHIFT 5
5747 #endif
5748
5749+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5750+
5751 #ifndef __ASSEMBLY__
5752
5753 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5754diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5755index 19f6cb1..6c78cf2 100644
5756--- a/arch/parisc/include/asm/elf.h
5757+++ b/arch/parisc/include/asm/elf.h
5758@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
5759
5760 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
5761
5762+#ifdef CONFIG_PAX_ASLR
5763+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5764+
5765+#define PAX_DELTA_MMAP_LEN 16
5766+#define PAX_DELTA_STACK_LEN 16
5767+#endif
5768+
5769 /* This yields a mask that user programs can use to figure out what
5770 instruction set this CPU supports. This could be done in user space,
5771 but it's not easy, and we've already done it here. */
5772diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
5773index fc987a1..6e068ef 100644
5774--- a/arch/parisc/include/asm/pgalloc.h
5775+++ b/arch/parisc/include/asm/pgalloc.h
5776@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5777 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
5778 }
5779
5780+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5781+{
5782+ pgd_populate(mm, pgd, pmd);
5783+}
5784+
5785 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
5786 {
5787 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
5788@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
5789 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
5790 #define pmd_free(mm, x) do { } while (0)
5791 #define pgd_populate(mm, pmd, pte) BUG()
5792+#define pgd_populate_kernel(mm, pmd, pte) BUG()
5793
5794 #endif
5795
5796diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
5797index 7df49fa..38b62bf 100644
5798--- a/arch/parisc/include/asm/pgtable.h
5799+++ b/arch/parisc/include/asm/pgtable.h
5800@@ -218,6 +218,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
5801 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
5802 #define PAGE_COPY PAGE_EXECREAD
5803 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
5804+
5805+#ifdef CONFIG_PAX_PAGEEXEC
5806+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
5807+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5808+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5809+#else
5810+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5811+# define PAGE_COPY_NOEXEC PAGE_COPY
5812+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5813+#endif
5814+
5815 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
5816 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
5817 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
5818diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
5819index 4ba2c93..f5e3974 100644
5820--- a/arch/parisc/include/asm/uaccess.h
5821+++ b/arch/parisc/include/asm/uaccess.h
5822@@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
5823 const void __user *from,
5824 unsigned long n)
5825 {
5826- int sz = __compiletime_object_size(to);
5827+ size_t sz = __compiletime_object_size(to);
5828 int ret = -EFAULT;
5829
5830- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
5831+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
5832 ret = __copy_from_user(to, from, n);
5833 else
5834 copy_from_user_overflow();
5835diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
5836index 2a625fb..9908930 100644
5837--- a/arch/parisc/kernel/module.c
5838+++ b/arch/parisc/kernel/module.c
5839@@ -98,16 +98,38 @@
5840
5841 /* three functions to determine where in the module core
5842 * or init pieces the location is */
5843+static inline int in_init_rx(struct module *me, void *loc)
5844+{
5845+ return (loc >= me->module_init_rx &&
5846+ loc < (me->module_init_rx + me->init_size_rx));
5847+}
5848+
5849+static inline int in_init_rw(struct module *me, void *loc)
5850+{
5851+ return (loc >= me->module_init_rw &&
5852+ loc < (me->module_init_rw + me->init_size_rw));
5853+}
5854+
5855 static inline int in_init(struct module *me, void *loc)
5856 {
5857- return (loc >= me->module_init &&
5858- loc <= (me->module_init + me->init_size));
5859+ return in_init_rx(me, loc) || in_init_rw(me, loc);
5860+}
5861+
5862+static inline int in_core_rx(struct module *me, void *loc)
5863+{
5864+ return (loc >= me->module_core_rx &&
5865+ loc < (me->module_core_rx + me->core_size_rx));
5866+}
5867+
5868+static inline int in_core_rw(struct module *me, void *loc)
5869+{
5870+ return (loc >= me->module_core_rw &&
5871+ loc < (me->module_core_rw + me->core_size_rw));
5872 }
5873
5874 static inline int in_core(struct module *me, void *loc)
5875 {
5876- return (loc >= me->module_core &&
5877- loc <= (me->module_core + me->core_size));
5878+ return in_core_rx(me, loc) || in_core_rw(me, loc);
5879 }
5880
5881 static inline int in_local(struct module *me, void *loc)
5882@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
5883 }
5884
5885 /* align things a bit */
5886- me->core_size = ALIGN(me->core_size, 16);
5887- me->arch.got_offset = me->core_size;
5888- me->core_size += gots * sizeof(struct got_entry);
5889+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5890+ me->arch.got_offset = me->core_size_rw;
5891+ me->core_size_rw += gots * sizeof(struct got_entry);
5892
5893- me->core_size = ALIGN(me->core_size, 16);
5894- me->arch.fdesc_offset = me->core_size;
5895- me->core_size += fdescs * sizeof(Elf_Fdesc);
5896+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5897+ me->arch.fdesc_offset = me->core_size_rw;
5898+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
5899
5900 me->arch.got_max = gots;
5901 me->arch.fdesc_max = fdescs;
5902@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5903
5904 BUG_ON(value == 0);
5905
5906- got = me->module_core + me->arch.got_offset;
5907+ got = me->module_core_rw + me->arch.got_offset;
5908 for (i = 0; got[i].addr; i++)
5909 if (got[i].addr == value)
5910 goto out;
5911@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5912 #ifdef CONFIG_64BIT
5913 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5914 {
5915- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
5916+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
5917
5918 if (!value) {
5919 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
5920@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5921
5922 /* Create new one */
5923 fdesc->addr = value;
5924- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5925+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5926 return (Elf_Addr)fdesc;
5927 }
5928 #endif /* CONFIG_64BIT */
5929@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
5930
5931 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
5932 end = table + sechdrs[me->arch.unwind_section].sh_size;
5933- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5934+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5935
5936 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
5937 me->arch.unwind_section, table, end, gp);
5938diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
5939index f76c108..92bad82 100644
5940--- a/arch/parisc/kernel/sys_parisc.c
5941+++ b/arch/parisc/kernel/sys_parisc.c
5942@@ -33,9 +33,11 @@
5943 #include <linux/utsname.h>
5944 #include <linux/personality.h>
5945
5946-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5947+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
5948+ unsigned long flags)
5949 {
5950 struct vm_area_struct *vma;
5951+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5952
5953 addr = PAGE_ALIGN(addr);
5954
5955@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5956 /* At this point: (!vma || addr < vma->vm_end). */
5957 if (TASK_SIZE - len < addr)
5958 return -ENOMEM;
5959- if (!vma || addr + len <= vma->vm_start)
5960+ if (check_heap_stack_gap(vma, addr, len, offset))
5961 return addr;
5962 addr = vma->vm_end;
5963 }
5964@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
5965 return offset & 0x3FF000;
5966 }
5967
5968-static unsigned long get_shared_area(struct address_space *mapping,
5969- unsigned long addr, unsigned long len, unsigned long pgoff)
5970+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
5971+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
5972 {
5973 struct vm_area_struct *vma;
5974 int offset = mapping ? get_offset(mapping) : 0;
5975+ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5976
5977 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
5978
5979@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
5980 /* At this point: (!vma || addr < vma->vm_end). */
5981 if (TASK_SIZE - len < addr)
5982 return -ENOMEM;
5983- if (!vma || addr + len <= vma->vm_start)
5984+ if (check_heap_stack_gap(vma, addr, len, rand_offset))
5985 return addr;
5986 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
5987 if (addr < vma->vm_end) /* handle wraparound */
5988@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5989 if (flags & MAP_FIXED)
5990 return addr;
5991 if (!addr)
5992- addr = TASK_UNMAPPED_BASE;
5993+ addr = current->mm->mmap_base;
5994
5995 if (filp) {
5996- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
5997+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
5998 } else if(flags & MAP_SHARED) {
5999- addr = get_shared_area(NULL, addr, len, pgoff);
6000+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
6001 } else {
6002- addr = get_unshared_area(addr, len);
6003+ addr = get_unshared_area(filp, addr, len, flags);
6004 }
6005 return addr;
6006 }
6007diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
6008index 45ba99f..8e22c33 100644
6009--- a/arch/parisc/kernel/traps.c
6010+++ b/arch/parisc/kernel/traps.c
6011@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
6012
6013 down_read(&current->mm->mmap_sem);
6014 vma = find_vma(current->mm,regs->iaoq[0]);
6015- if (vma && (regs->iaoq[0] >= vma->vm_start)
6016- && (vma->vm_flags & VM_EXEC)) {
6017-
6018+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
6019 fault_address = regs->iaoq[0];
6020 fault_space = regs->iasq[0];
6021
6022diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
6023index 18162ce..94de376 100644
6024--- a/arch/parisc/mm/fault.c
6025+++ b/arch/parisc/mm/fault.c
6026@@ -15,6 +15,7 @@
6027 #include <linux/sched.h>
6028 #include <linux/interrupt.h>
6029 #include <linux/module.h>
6030+#include <linux/unistd.h>
6031
6032 #include <asm/uaccess.h>
6033 #include <asm/traps.h>
6034@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
6035 static unsigned long
6036 parisc_acctyp(unsigned long code, unsigned int inst)
6037 {
6038- if (code == 6 || code == 16)
6039+ if (code == 6 || code == 7 || code == 16)
6040 return VM_EXEC;
6041
6042 switch (inst & 0xf0000000) {
6043@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
6044 }
6045 #endif
6046
6047+#ifdef CONFIG_PAX_PAGEEXEC
6048+/*
6049+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
6050+ *
6051+ * returns 1 when task should be killed
6052+ * 2 when rt_sigreturn trampoline was detected
6053+ * 3 when unpatched PLT trampoline was detected
6054+ */
6055+static int pax_handle_fetch_fault(struct pt_regs *regs)
6056+{
6057+
6058+#ifdef CONFIG_PAX_EMUPLT
6059+ int err;
6060+
6061+ do { /* PaX: unpatched PLT emulation */
6062+ unsigned int bl, depwi;
6063+
6064+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
6065+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
6066+
6067+ if (err)
6068+ break;
6069+
6070+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
6071+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
6072+
6073+ err = get_user(ldw, (unsigned int *)addr);
6074+ err |= get_user(bv, (unsigned int *)(addr+4));
6075+ err |= get_user(ldw2, (unsigned int *)(addr+8));
6076+
6077+ if (err)
6078+ break;
6079+
6080+ if (ldw == 0x0E801096U &&
6081+ bv == 0xEAC0C000U &&
6082+ ldw2 == 0x0E881095U)
6083+ {
6084+ unsigned int resolver, map;
6085+
6086+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
6087+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
6088+ if (err)
6089+ break;
6090+
6091+ regs->gr[20] = instruction_pointer(regs)+8;
6092+ regs->gr[21] = map;
6093+ regs->gr[22] = resolver;
6094+ regs->iaoq[0] = resolver | 3UL;
6095+ regs->iaoq[1] = regs->iaoq[0] + 4;
6096+ return 3;
6097+ }
6098+ }
6099+ } while (0);
6100+#endif
6101+
6102+#ifdef CONFIG_PAX_EMUTRAMP
6103+
6104+#ifndef CONFIG_PAX_EMUSIGRT
6105+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
6106+ return 1;
6107+#endif
6108+
6109+ do { /* PaX: rt_sigreturn emulation */
6110+ unsigned int ldi1, ldi2, bel, nop;
6111+
6112+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
6113+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
6114+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
6115+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
6116+
6117+ if (err)
6118+ break;
6119+
6120+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
6121+ ldi2 == 0x3414015AU &&
6122+ bel == 0xE4008200U &&
6123+ nop == 0x08000240U)
6124+ {
6125+ regs->gr[25] = (ldi1 & 2) >> 1;
6126+ regs->gr[20] = __NR_rt_sigreturn;
6127+ regs->gr[31] = regs->iaoq[1] + 16;
6128+ regs->sr[0] = regs->iasq[1];
6129+ regs->iaoq[0] = 0x100UL;
6130+ regs->iaoq[1] = regs->iaoq[0] + 4;
6131+ regs->iasq[0] = regs->sr[2];
6132+ regs->iasq[1] = regs->sr[2];
6133+ return 2;
6134+ }
6135+ } while (0);
6136+#endif
6137+
6138+ return 1;
6139+}
6140+
6141+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6142+{
6143+ unsigned long i;
6144+
6145+ printk(KERN_ERR "PAX: bytes at PC: ");
6146+ for (i = 0; i < 5; i++) {
6147+ unsigned int c;
6148+ if (get_user(c, (unsigned int *)pc+i))
6149+ printk(KERN_CONT "???????? ");
6150+ else
6151+ printk(KERN_CONT "%08x ", c);
6152+ }
6153+ printk("\n");
6154+}
6155+#endif
6156+
6157 int fixup_exception(struct pt_regs *regs)
6158 {
6159 const struct exception_table_entry *fix;
6160@@ -192,8 +303,33 @@ good_area:
6161
6162 acc_type = parisc_acctyp(code,regs->iir);
6163
6164- if ((vma->vm_flags & acc_type) != acc_type)
6165+ if ((vma->vm_flags & acc_type) != acc_type) {
6166+
6167+#ifdef CONFIG_PAX_PAGEEXEC
6168+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
6169+ (address & ~3UL) == instruction_pointer(regs))
6170+ {
6171+ up_read(&mm->mmap_sem);
6172+ switch (pax_handle_fetch_fault(regs)) {
6173+
6174+#ifdef CONFIG_PAX_EMUPLT
6175+ case 3:
6176+ return;
6177+#endif
6178+
6179+#ifdef CONFIG_PAX_EMUTRAMP
6180+ case 2:
6181+ return;
6182+#endif
6183+
6184+ }
6185+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
6186+ do_group_exit(SIGKILL);
6187+ }
6188+#endif
6189+
6190 goto bad_area;
6191+ }
6192
6193 /*
6194 * If for any reason at all we couldn't handle the fault, make
6195diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
6196index e3b1d41..8e81edf 100644
6197--- a/arch/powerpc/include/asm/atomic.h
6198+++ b/arch/powerpc/include/asm/atomic.h
6199@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
6200 return t1;
6201 }
6202
6203+#define atomic64_read_unchecked(v) atomic64_read(v)
6204+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6205+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6206+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6207+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6208+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6209+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6210+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6211+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6212+
6213 #endif /* __powerpc64__ */
6214
6215 #endif /* __KERNEL__ */
6216diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6217index 9e495c9..b6878e5 100644
6218--- a/arch/powerpc/include/asm/cache.h
6219+++ b/arch/powerpc/include/asm/cache.h
6220@@ -3,6 +3,7 @@
6221
6222 #ifdef __KERNEL__
6223
6224+#include <linux/const.h>
6225
6226 /* bytes per L1 cache line */
6227 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6228@@ -22,7 +23,7 @@
6229 #define L1_CACHE_SHIFT 7
6230 #endif
6231
6232-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6233+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6234
6235 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6236
6237diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6238index 6abf0a1..459d0f1 100644
6239--- a/arch/powerpc/include/asm/elf.h
6240+++ b/arch/powerpc/include/asm/elf.h
6241@@ -28,8 +28,19 @@
6242 the loader. We need to make sure that it is out of the way of the program
6243 that it will "exec", and that there is sufficient room for the brk. */
6244
6245-extern unsigned long randomize_et_dyn(unsigned long base);
6246-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6247+#define ELF_ET_DYN_BASE (0x20000000)
6248+
6249+#ifdef CONFIG_PAX_ASLR
6250+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6251+
6252+#ifdef __powerpc64__
6253+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6254+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6255+#else
6256+#define PAX_DELTA_MMAP_LEN 15
6257+#define PAX_DELTA_STACK_LEN 15
6258+#endif
6259+#endif
6260
6261 /*
6262 * Our registers are always unsigned longs, whether we're a 32 bit
6263@@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6264 (0x7ff >> (PAGE_SHIFT - 12)) : \
6265 (0x3ffff >> (PAGE_SHIFT - 12)))
6266
6267-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6268-#define arch_randomize_brk arch_randomize_brk
6269-
6270-
6271 #ifdef CONFIG_SPU_BASE
6272 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6273 #define NT_SPU 1
6274diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6275index 8196e9c..d83a9f3 100644
6276--- a/arch/powerpc/include/asm/exec.h
6277+++ b/arch/powerpc/include/asm/exec.h
6278@@ -4,6 +4,6 @@
6279 #ifndef _ASM_POWERPC_EXEC_H
6280 #define _ASM_POWERPC_EXEC_H
6281
6282-extern unsigned long arch_align_stack(unsigned long sp);
6283+#define arch_align_stack(x) ((x) & ~0xfUL)
6284
6285 #endif /* _ASM_POWERPC_EXEC_H */
6286diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6287index 5acabbd..7ea14fa 100644
6288--- a/arch/powerpc/include/asm/kmap_types.h
6289+++ b/arch/powerpc/include/asm/kmap_types.h
6290@@ -10,7 +10,7 @@
6291 * 2 of the License, or (at your option) any later version.
6292 */
6293
6294-#define KM_TYPE_NR 16
6295+#define KM_TYPE_NR 17
6296
6297 #endif /* __KERNEL__ */
6298 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6299diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6300index 8565c25..2865190 100644
6301--- a/arch/powerpc/include/asm/mman.h
6302+++ b/arch/powerpc/include/asm/mman.h
6303@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6304 }
6305 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6306
6307-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6308+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6309 {
6310 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6311 }
6312diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6313index f072e97..b436dee 100644
6314--- a/arch/powerpc/include/asm/page.h
6315+++ b/arch/powerpc/include/asm/page.h
6316@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6317 * and needs to be executable. This means the whole heap ends
6318 * up being executable.
6319 */
6320-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6321- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6322+#define VM_DATA_DEFAULT_FLAGS32 \
6323+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6324+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6325
6326 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6327 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6328@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6329 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6330 #endif
6331
6332+#define ktla_ktva(addr) (addr)
6333+#define ktva_ktla(addr) (addr)
6334+
6335 /*
6336 * Use the top bit of the higher-level page table entries to indicate whether
6337 * the entries we point to contain hugepages. This works because we know that
6338diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6339index cd915d6..c10cee8 100644
6340--- a/arch/powerpc/include/asm/page_64.h
6341+++ b/arch/powerpc/include/asm/page_64.h
6342@@ -154,15 +154,18 @@ do { \
6343 * stack by default, so in the absence of a PT_GNU_STACK program header
6344 * we turn execute permission off.
6345 */
6346-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6347- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6348+#define VM_STACK_DEFAULT_FLAGS32 \
6349+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6350+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6351
6352 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6353 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6354
6355+#ifndef CONFIG_PAX_PAGEEXEC
6356 #define VM_STACK_DEFAULT_FLAGS \
6357 (is_32bit_task() ? \
6358 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6359+#endif
6360
6361 #include <asm-generic/getorder.h>
6362
6363diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6364index 292725c..f87ae14 100644
6365--- a/arch/powerpc/include/asm/pgalloc-64.h
6366+++ b/arch/powerpc/include/asm/pgalloc-64.h
6367@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6368 #ifndef CONFIG_PPC_64K_PAGES
6369
6370 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6371+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6372
6373 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6374 {
6375@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6376 pud_set(pud, (unsigned long)pmd);
6377 }
6378
6379+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6380+{
6381+ pud_populate(mm, pud, pmd);
6382+}
6383+
6384 #define pmd_populate(mm, pmd, pte_page) \
6385 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6386 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6387@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6388 #else /* CONFIG_PPC_64K_PAGES */
6389
6390 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6391+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6392
6393 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6394 pte_t *pte)
6395diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6396index a9cbd3b..3b67efa 100644
6397--- a/arch/powerpc/include/asm/pgtable.h
6398+++ b/arch/powerpc/include/asm/pgtable.h
6399@@ -2,6 +2,7 @@
6400 #define _ASM_POWERPC_PGTABLE_H
6401 #ifdef __KERNEL__
6402
6403+#include <linux/const.h>
6404 #ifndef __ASSEMBLY__
6405 #include <asm/processor.h> /* For TASK_SIZE */
6406 #include <asm/mmu.h>
6407diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6408index 4aad413..85d86bf 100644
6409--- a/arch/powerpc/include/asm/pte-hash32.h
6410+++ b/arch/powerpc/include/asm/pte-hash32.h
6411@@ -21,6 +21,7 @@
6412 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6413 #define _PAGE_USER 0x004 /* usermode access allowed */
6414 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6415+#define _PAGE_EXEC _PAGE_GUARDED
6416 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6417 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6418 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6419diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6420index 3d5c9dc..62f8414 100644
6421--- a/arch/powerpc/include/asm/reg.h
6422+++ b/arch/powerpc/include/asm/reg.h
6423@@ -215,6 +215,7 @@
6424 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6425 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6426 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6427+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6428 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6429 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6430 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6431diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6432index 406b7b9..af63426 100644
6433--- a/arch/powerpc/include/asm/thread_info.h
6434+++ b/arch/powerpc/include/asm/thread_info.h
6435@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
6436 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6437 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6438 #define TIF_SINGLESTEP 8 /* singlestepping active */
6439-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
6440 #define TIF_SECCOMP 10 /* secure computing */
6441 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
6442 #define TIF_NOERROR 12 /* Force successful syscall return */
6443@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6444 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
6445 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6446 for stack store? */
6447+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6448+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6449+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
6450
6451 /* as above, but as bit values */
6452 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6453@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
6454 #define _TIF_UPROBE (1<<TIF_UPROBE)
6455 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6456 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6457+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6458 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6459- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
6460+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6461+ _TIF_GRSEC_SETXID)
6462
6463 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6464 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6465diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6466index 4db4959..335e00c 100644
6467--- a/arch/powerpc/include/asm/uaccess.h
6468+++ b/arch/powerpc/include/asm/uaccess.h
6469@@ -13,6 +13,8 @@
6470 #define VERIFY_READ 0
6471 #define VERIFY_WRITE 1
6472
6473+extern void check_object_size(const void *ptr, unsigned long n, bool to);
6474+
6475 /*
6476 * The fs value determines whether argument validity checking should be
6477 * performed or not. If get_fs() == USER_DS, checking is performed, with
6478@@ -318,52 +320,6 @@ do { \
6479 extern unsigned long __copy_tofrom_user(void __user *to,
6480 const void __user *from, unsigned long size);
6481
6482-#ifndef __powerpc64__
6483-
6484-static inline unsigned long copy_from_user(void *to,
6485- const void __user *from, unsigned long n)
6486-{
6487- unsigned long over;
6488-
6489- if (access_ok(VERIFY_READ, from, n))
6490- return __copy_tofrom_user((__force void __user *)to, from, n);
6491- if ((unsigned long)from < TASK_SIZE) {
6492- over = (unsigned long)from + n - TASK_SIZE;
6493- return __copy_tofrom_user((__force void __user *)to, from,
6494- n - over) + over;
6495- }
6496- return n;
6497-}
6498-
6499-static inline unsigned long copy_to_user(void __user *to,
6500- const void *from, unsigned long n)
6501-{
6502- unsigned long over;
6503-
6504- if (access_ok(VERIFY_WRITE, to, n))
6505- return __copy_tofrom_user(to, (__force void __user *)from, n);
6506- if ((unsigned long)to < TASK_SIZE) {
6507- over = (unsigned long)to + n - TASK_SIZE;
6508- return __copy_tofrom_user(to, (__force void __user *)from,
6509- n - over) + over;
6510- }
6511- return n;
6512-}
6513-
6514-#else /* __powerpc64__ */
6515-
6516-#define __copy_in_user(to, from, size) \
6517- __copy_tofrom_user((to), (from), (size))
6518-
6519-extern unsigned long copy_from_user(void *to, const void __user *from,
6520- unsigned long n);
6521-extern unsigned long copy_to_user(void __user *to, const void *from,
6522- unsigned long n);
6523-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6524- unsigned long n);
6525-
6526-#endif /* __powerpc64__ */
6527-
6528 static inline unsigned long __copy_from_user_inatomic(void *to,
6529 const void __user *from, unsigned long n)
6530 {
6531@@ -387,6 +343,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6532 if (ret == 0)
6533 return 0;
6534 }
6535+
6536+ if (!__builtin_constant_p(n))
6537+ check_object_size(to, n, false);
6538+
6539 return __copy_tofrom_user((__force void __user *)to, from, n);
6540 }
6541
6542@@ -413,6 +373,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6543 if (ret == 0)
6544 return 0;
6545 }
6546+
6547+ if (!__builtin_constant_p(n))
6548+ check_object_size(from, n, true);
6549+
6550 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6551 }
6552
6553@@ -430,6 +394,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6554 return __copy_to_user_inatomic(to, from, size);
6555 }
6556
6557+#ifndef __powerpc64__
6558+
6559+static inline unsigned long __must_check copy_from_user(void *to,
6560+ const void __user *from, unsigned long n)
6561+{
6562+ unsigned long over;
6563+
6564+ if ((long)n < 0)
6565+ return n;
6566+
6567+ if (access_ok(VERIFY_READ, from, n)) {
6568+ if (!__builtin_constant_p(n))
6569+ check_object_size(to, n, false);
6570+ return __copy_tofrom_user((__force void __user *)to, from, n);
6571+ }
6572+ if ((unsigned long)from < TASK_SIZE) {
6573+ over = (unsigned long)from + n - TASK_SIZE;
6574+ if (!__builtin_constant_p(n - over))
6575+ check_object_size(to, n - over, false);
6576+ return __copy_tofrom_user((__force void __user *)to, from,
6577+ n - over) + over;
6578+ }
6579+ return n;
6580+}
6581+
6582+static inline unsigned long __must_check copy_to_user(void __user *to,
6583+ const void *from, unsigned long n)
6584+{
6585+ unsigned long over;
6586+
6587+ if ((long)n < 0)
6588+ return n;
6589+
6590+ if (access_ok(VERIFY_WRITE, to, n)) {
6591+ if (!__builtin_constant_p(n))
6592+ check_object_size(from, n, true);
6593+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6594+ }
6595+ if ((unsigned long)to < TASK_SIZE) {
6596+ over = (unsigned long)to + n - TASK_SIZE;
6597+ if (!__builtin_constant_p(n))
6598+ check_object_size(from, n - over, true);
6599+ return __copy_tofrom_user(to, (__force void __user *)from,
6600+ n - over) + over;
6601+ }
6602+ return n;
6603+}
6604+
6605+#else /* __powerpc64__ */
6606+
6607+#define __copy_in_user(to, from, size) \
6608+ __copy_tofrom_user((to), (from), (size))
6609+
6610+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6611+{
6612+ if ((long)n < 0 || n > INT_MAX)
6613+ return n;
6614+
6615+ if (!__builtin_constant_p(n))
6616+ check_object_size(to, n, false);
6617+
6618+ if (likely(access_ok(VERIFY_READ, from, n)))
6619+ n = __copy_from_user(to, from, n);
6620+ else
6621+ memset(to, 0, n);
6622+ return n;
6623+}
6624+
6625+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6626+{
6627+ if ((long)n < 0 || n > INT_MAX)
6628+ return n;
6629+
6630+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6631+ if (!__builtin_constant_p(n))
6632+ check_object_size(from, n, true);
6633+ n = __copy_to_user(to, from, n);
6634+ }
6635+ return n;
6636+}
6637+
6638+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6639+ unsigned long n);
6640+
6641+#endif /* __powerpc64__ */
6642+
6643 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6644
6645 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6646diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6647index 4684e33..acc4d19e 100644
6648--- a/arch/powerpc/kernel/exceptions-64e.S
6649+++ b/arch/powerpc/kernel/exceptions-64e.S
6650@@ -715,6 +715,7 @@ storage_fault_common:
6651 std r14,_DAR(r1)
6652 std r15,_DSISR(r1)
6653 addi r3,r1,STACK_FRAME_OVERHEAD
6654+ bl .save_nvgprs
6655 mr r4,r14
6656 mr r5,r15
6657 ld r14,PACA_EXGEN+EX_R14(r13)
6658@@ -723,8 +724,7 @@ storage_fault_common:
6659 cmpdi r3,0
6660 bne- 1f
6661 b .ret_from_except_lite
6662-1: bl .save_nvgprs
6663- mr r5,r3
6664+1: mr r5,r3
6665 addi r3,r1,STACK_FRAME_OVERHEAD
6666 ld r4,_DAR(r1)
6667 bl .bad_page_fault
6668diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6669index 3684cbd..bc89eab 100644
6670--- a/arch/powerpc/kernel/exceptions-64s.S
6671+++ b/arch/powerpc/kernel/exceptions-64s.S
6672@@ -1206,10 +1206,10 @@ handle_page_fault:
6673 11: ld r4,_DAR(r1)
6674 ld r5,_DSISR(r1)
6675 addi r3,r1,STACK_FRAME_OVERHEAD
6676+ bl .save_nvgprs
6677 bl .do_page_fault
6678 cmpdi r3,0
6679 beq+ 12f
6680- bl .save_nvgprs
6681 mr r5,r3
6682 addi r3,r1,STACK_FRAME_OVERHEAD
6683 lwz r4,_DAR(r1)
6684diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6685index 2e3200c..72095ce 100644
6686--- a/arch/powerpc/kernel/module_32.c
6687+++ b/arch/powerpc/kernel/module_32.c
6688@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6689 me->arch.core_plt_section = i;
6690 }
6691 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6692- printk("Module doesn't contain .plt or .init.plt sections.\n");
6693+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
6694 return -ENOEXEC;
6695 }
6696
6697@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
6698
6699 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
6700 /* Init, or core PLT? */
6701- if (location >= mod->module_core
6702- && location < mod->module_core + mod->core_size)
6703+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
6704+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
6705 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
6706- else
6707+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
6708+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
6709 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
6710+ else {
6711+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
6712+ return ~0UL;
6713+ }
6714
6715 /* Find this entry, or if that fails, the next avail. entry */
6716 while (entry->jump[0]) {
6717diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
6718index 8143067..21ae55b 100644
6719--- a/arch/powerpc/kernel/process.c
6720+++ b/arch/powerpc/kernel/process.c
6721@@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
6722 * Lookup NIP late so we have the best change of getting the
6723 * above info out without failing
6724 */
6725- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
6726- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
6727+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
6728+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
6729 #endif
6730 show_stack(current, (unsigned long *) regs->gpr[1]);
6731 if (!user_mode(regs))
6732@@ -1129,10 +1129,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6733 newsp = stack[0];
6734 ip = stack[STACK_FRAME_LR_SAVE];
6735 if (!firstframe || ip != lr) {
6736- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6737+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
6738 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6739 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6740- printk(" (%pS)",
6741+ printk(" (%pA)",
6742 (void *)current->ret_stack[curr_frame].ret);
6743 curr_frame--;
6744 }
6745@@ -1152,7 +1152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6746 struct pt_regs *regs = (struct pt_regs *)
6747 (sp + STACK_FRAME_OVERHEAD);
6748 lr = regs->link;
6749- printk("--- Exception: %lx at %pS\n LR = %pS\n",
6750+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
6751 regs->trap, (void *)regs->nip, (void *)lr);
6752 firstframe = 1;
6753 }
6754@@ -1194,58 +1194,3 @@ void __ppc64_runlatch_off(void)
6755 mtspr(SPRN_CTRLT, ctrl);
6756 }
6757 #endif /* CONFIG_PPC64 */
6758-
6759-unsigned long arch_align_stack(unsigned long sp)
6760-{
6761- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6762- sp -= get_random_int() & ~PAGE_MASK;
6763- return sp & ~0xf;
6764-}
6765-
6766-static inline unsigned long brk_rnd(void)
6767-{
6768- unsigned long rnd = 0;
6769-
6770- /* 8MB for 32bit, 1GB for 64bit */
6771- if (is_32bit_task())
6772- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
6773- else
6774- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
6775-
6776- return rnd << PAGE_SHIFT;
6777-}
6778-
6779-unsigned long arch_randomize_brk(struct mm_struct *mm)
6780-{
6781- unsigned long base = mm->brk;
6782- unsigned long ret;
6783-
6784-#ifdef CONFIG_PPC_STD_MMU_64
6785- /*
6786- * If we are using 1TB segments and we are allowed to randomise
6787- * the heap, we can put it above 1TB so it is backed by a 1TB
6788- * segment. Otherwise the heap will be in the bottom 1TB
6789- * which always uses 256MB segments and this may result in a
6790- * performance penalty.
6791- */
6792- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
6793- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
6794-#endif
6795-
6796- ret = PAGE_ALIGN(base + brk_rnd());
6797-
6798- if (ret < mm->brk)
6799- return mm->brk;
6800-
6801- return ret;
6802-}
6803-
6804-unsigned long randomize_et_dyn(unsigned long base)
6805-{
6806- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
6807-
6808- if (ret < base)
6809- return base;
6810-
6811- return ret;
6812-}
6813diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
6814index c497000..8fde506 100644
6815--- a/arch/powerpc/kernel/ptrace.c
6816+++ b/arch/powerpc/kernel/ptrace.c
6817@@ -1737,6 +1737,10 @@ long arch_ptrace(struct task_struct *child, long request,
6818 return ret;
6819 }
6820
6821+#ifdef CONFIG_GRKERNSEC_SETXID
6822+extern void gr_delayed_cred_worker(void);
6823+#endif
6824+
6825 /*
6826 * We must return the syscall number to actually look up in the table.
6827 * This can be -1L to skip running any syscall at all.
6828@@ -1747,6 +1751,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
6829
6830 secure_computing_strict(regs->gpr[0]);
6831
6832+#ifdef CONFIG_GRKERNSEC_SETXID
6833+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6834+ gr_delayed_cred_worker();
6835+#endif
6836+
6837 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
6838 tracehook_report_syscall_entry(regs))
6839 /*
6840@@ -1781,6 +1790,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
6841 {
6842 int step;
6843
6844+#ifdef CONFIG_GRKERNSEC_SETXID
6845+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6846+ gr_delayed_cred_worker();
6847+#endif
6848+
6849 audit_syscall_exit(regs);
6850
6851 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6852diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
6853index 804e323..79181c1 100644
6854--- a/arch/powerpc/kernel/signal_32.c
6855+++ b/arch/powerpc/kernel/signal_32.c
6856@@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
6857 /* Save user registers on the stack */
6858 frame = &rt_sf->uc.uc_mcontext;
6859 addr = frame;
6860- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
6861+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6862 if (save_user_regs(regs, frame, 0, 1))
6863 goto badframe;
6864 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
6865diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
6866index 1ca045d..139c3f7 100644
6867--- a/arch/powerpc/kernel/signal_64.c
6868+++ b/arch/powerpc/kernel/signal_64.c
6869@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
6870 current->thread.fpscr.val = 0;
6871
6872 /* Set up to return from userspace. */
6873- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
6874+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6875 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
6876 } else {
6877 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
6878diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
6879index 3ce1f86..c30e629 100644
6880--- a/arch/powerpc/kernel/sysfs.c
6881+++ b/arch/powerpc/kernel/sysfs.c
6882@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
6883 return NOTIFY_OK;
6884 }
6885
6886-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
6887+static struct notifier_block sysfs_cpu_nb = {
6888 .notifier_call = sysfs_cpu_notify,
6889 };
6890
6891diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
6892index 3251840..3f7c77a 100644
6893--- a/arch/powerpc/kernel/traps.c
6894+++ b/arch/powerpc/kernel/traps.c
6895@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
6896 return flags;
6897 }
6898
6899+extern void gr_handle_kernel_exploit(void);
6900+
6901 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6902 int signr)
6903 {
6904@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6905 panic("Fatal exception in interrupt");
6906 if (panic_on_oops)
6907 panic("Fatal exception");
6908+
6909+ gr_handle_kernel_exploit();
6910+
6911 do_exit(signr);
6912 }
6913
6914diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
6915index 1b2076f..835e4be 100644
6916--- a/arch/powerpc/kernel/vdso.c
6917+++ b/arch/powerpc/kernel/vdso.c
6918@@ -34,6 +34,7 @@
6919 #include <asm/firmware.h>
6920 #include <asm/vdso.h>
6921 #include <asm/vdso_datapage.h>
6922+#include <asm/mman.h>
6923
6924 #include "setup.h"
6925
6926@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6927 vdso_base = VDSO32_MBASE;
6928 #endif
6929
6930- current->mm->context.vdso_base = 0;
6931+ current->mm->context.vdso_base = ~0UL;
6932
6933 /* vDSO has a problem and was disabled, just don't "enable" it for the
6934 * process
6935@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6936 vdso_base = get_unmapped_area(NULL, vdso_base,
6937 (vdso_pages << PAGE_SHIFT) +
6938 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
6939- 0, 0);
6940+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
6941 if (IS_ERR_VALUE(vdso_base)) {
6942 rc = vdso_base;
6943 goto fail_mmapsem;
6944diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
6945index 5eea6f3..5d10396 100644
6946--- a/arch/powerpc/lib/usercopy_64.c
6947+++ b/arch/powerpc/lib/usercopy_64.c
6948@@ -9,22 +9,6 @@
6949 #include <linux/module.h>
6950 #include <asm/uaccess.h>
6951
6952-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6953-{
6954- if (likely(access_ok(VERIFY_READ, from, n)))
6955- n = __copy_from_user(to, from, n);
6956- else
6957- memset(to, 0, n);
6958- return n;
6959-}
6960-
6961-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6962-{
6963- if (likely(access_ok(VERIFY_WRITE, to, n)))
6964- n = __copy_to_user(to, from, n);
6965- return n;
6966-}
6967-
6968 unsigned long copy_in_user(void __user *to, const void __user *from,
6969 unsigned long n)
6970 {
6971@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
6972 return n;
6973 }
6974
6975-EXPORT_SYMBOL(copy_from_user);
6976-EXPORT_SYMBOL(copy_to_user);
6977 EXPORT_SYMBOL(copy_in_user);
6978
6979diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
6980index 3a8489a..6a63b3b 100644
6981--- a/arch/powerpc/mm/fault.c
6982+++ b/arch/powerpc/mm/fault.c
6983@@ -32,6 +32,10 @@
6984 #include <linux/perf_event.h>
6985 #include <linux/magic.h>
6986 #include <linux/ratelimit.h>
6987+#include <linux/slab.h>
6988+#include <linux/pagemap.h>
6989+#include <linux/compiler.h>
6990+#include <linux/unistd.h>
6991
6992 #include <asm/firmware.h>
6993 #include <asm/page.h>
6994@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
6995 }
6996 #endif
6997
6998+#ifdef CONFIG_PAX_PAGEEXEC
6999+/*
7000+ * PaX: decide what to do with offenders (regs->nip = fault address)
7001+ *
7002+ * returns 1 when task should be killed
7003+ */
7004+static int pax_handle_fetch_fault(struct pt_regs *regs)
7005+{
7006+ return 1;
7007+}
7008+
7009+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7010+{
7011+ unsigned long i;
7012+
7013+ printk(KERN_ERR "PAX: bytes at PC: ");
7014+ for (i = 0; i < 5; i++) {
7015+ unsigned int c;
7016+ if (get_user(c, (unsigned int __user *)pc+i))
7017+ printk(KERN_CONT "???????? ");
7018+ else
7019+ printk(KERN_CONT "%08x ", c);
7020+ }
7021+ printk("\n");
7022+}
7023+#endif
7024+
7025 /*
7026 * Check whether the instruction at regs->nip is a store using
7027 * an update addressing form which will update r1.
7028@@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
7029 * indicate errors in DSISR but can validly be set in SRR1.
7030 */
7031 if (trap == 0x400)
7032- error_code &= 0x48200000;
7033+ error_code &= 0x58200000;
7034 else
7035 is_write = error_code & DSISR_ISSTORE;
7036 #else
7037@@ -364,7 +395,7 @@ good_area:
7038 * "undefined". Of those that can be set, this is the only
7039 * one which seems bad.
7040 */
7041- if (error_code & 0x10000000)
7042+ if (error_code & DSISR_GUARDED)
7043 /* Guarded storage error. */
7044 goto bad_area;
7045 #endif /* CONFIG_8xx */
7046@@ -379,7 +410,7 @@ good_area:
7047 * processors use the same I/D cache coherency mechanism
7048 * as embedded.
7049 */
7050- if (error_code & DSISR_PROTFAULT)
7051+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
7052 goto bad_area;
7053 #endif /* CONFIG_PPC_STD_MMU */
7054
7055@@ -462,6 +493,23 @@ bad_area:
7056 bad_area_nosemaphore:
7057 /* User mode accesses cause a SIGSEGV */
7058 if (user_mode(regs)) {
7059+
7060+#ifdef CONFIG_PAX_PAGEEXEC
7061+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
7062+#ifdef CONFIG_PPC_STD_MMU
7063+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
7064+#else
7065+ if (is_exec && regs->nip == address) {
7066+#endif
7067+ switch (pax_handle_fetch_fault(regs)) {
7068+ }
7069+
7070+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
7071+ do_group_exit(SIGKILL);
7072+ }
7073+ }
7074+#endif
7075+
7076 _exception(SIGSEGV, regs, code, address);
7077 return 0;
7078 }
7079diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
7080index 67a42ed..cd463e0 100644
7081--- a/arch/powerpc/mm/mmap_64.c
7082+++ b/arch/powerpc/mm/mmap_64.c
7083@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
7084 {
7085 unsigned long rnd = 0;
7086
7087+#ifdef CONFIG_PAX_RANDMMAP
7088+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7089+#endif
7090+
7091 if (current->flags & PF_RANDOMIZE) {
7092 /* 8MB for 32bit, 1GB for 64bit */
7093 if (is_32bit_task())
7094@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7095 */
7096 if (mmap_is_legacy()) {
7097 mm->mmap_base = TASK_UNMAPPED_BASE;
7098+
7099+#ifdef CONFIG_PAX_RANDMMAP
7100+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7101+ mm->mmap_base += mm->delta_mmap;
7102+#endif
7103+
7104 mm->get_unmapped_area = arch_get_unmapped_area;
7105 mm->unmap_area = arch_unmap_area;
7106 } else {
7107 mm->mmap_base = mmap_base();
7108+
7109+#ifdef CONFIG_PAX_RANDMMAP
7110+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7111+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7112+#endif
7113+
7114 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7115 mm->unmap_area = arch_unmap_area_topdown;
7116 }
7117diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
7118index e779642..e5bb889 100644
7119--- a/arch/powerpc/mm/mmu_context_nohash.c
7120+++ b/arch/powerpc/mm/mmu_context_nohash.c
7121@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
7122 return NOTIFY_OK;
7123 }
7124
7125-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
7126+static struct notifier_block mmu_context_cpu_nb = {
7127 .notifier_call = mmu_context_cpu_notify,
7128 };
7129
7130diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
7131index bba87ca..c346a33 100644
7132--- a/arch/powerpc/mm/numa.c
7133+++ b/arch/powerpc/mm/numa.c
7134@@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
7135 return ret;
7136 }
7137
7138-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
7139+static struct notifier_block ppc64_numa_nb = {
7140 .notifier_call = cpu_numa_callback,
7141 .priority = 1 /* Must run before sched domains notifier. */
7142 };
7143diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
7144index cf9dada..241529f 100644
7145--- a/arch/powerpc/mm/slice.c
7146+++ b/arch/powerpc/mm/slice.c
7147@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
7148 if ((mm->task_size - len) < addr)
7149 return 0;
7150 vma = find_vma(mm, addr);
7151- return (!vma || (addr + len) <= vma->vm_start);
7152+ return check_heap_stack_gap(vma, addr, len, 0);
7153 }
7154
7155 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
7156@@ -272,7 +272,7 @@ full_search:
7157 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
7158 continue;
7159 }
7160- if (!vma || addr + len <= vma->vm_start) {
7161+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7162 /*
7163 * Remember the place where we stopped the search:
7164 */
7165@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7166 }
7167 }
7168
7169- addr = mm->mmap_base;
7170- while (addr > len) {
7171+ if (mm->mmap_base < len)
7172+ addr = -ENOMEM;
7173+ else
7174+ addr = mm->mmap_base - len;
7175+
7176+ while (!IS_ERR_VALUE(addr)) {
7177 /* Go down by chunk size */
7178- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
7179+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
7180
7181 /* Check for hit with different page size */
7182 mask = slice_range_to_mask(addr, len);
7183@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7184 * return with success:
7185 */
7186 vma = find_vma(mm, addr);
7187- if (!vma || (addr + len) <= vma->vm_start) {
7188+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7189 /* remember the address as a hint for next time */
7190 if (use_cache)
7191 mm->free_area_cache = addr;
7192@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7193 mm->cached_hole_size = vma->vm_start - addr;
7194
7195 /* try just below the current vma->vm_start */
7196- addr = vma->vm_start;
7197+ addr = skip_heap_stack_gap(vma, len, 0);
7198 }
7199
7200 /*
7201@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
7202 if (fixed && addr > (mm->task_size - len))
7203 return -EINVAL;
7204
7205+#ifdef CONFIG_PAX_RANDMMAP
7206+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
7207+ addr = 0;
7208+#endif
7209+
7210 /* If hint, make sure it matches our alignment restrictions */
7211 if (!fixed && addr) {
7212 addr = _ALIGN_UP(addr, 1ul << pshift);
7213diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
7214index bdb738a..49c9f95 100644
7215--- a/arch/powerpc/platforms/powermac/smp.c
7216+++ b/arch/powerpc/platforms/powermac/smp.c
7217@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7218 return NOTIFY_OK;
7219 }
7220
7221-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7222+static struct notifier_block smp_core99_cpu_nb = {
7223 .notifier_call = smp_core99_cpu_notify,
7224 };
7225 #endif /* CONFIG_HOTPLUG_CPU */
7226diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7227index c797832..ce575c8 100644
7228--- a/arch/s390/include/asm/atomic.h
7229+++ b/arch/s390/include/asm/atomic.h
7230@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7231 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7232 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7233
7234+#define atomic64_read_unchecked(v) atomic64_read(v)
7235+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7236+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7237+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7238+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7239+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7240+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7241+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7242+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7243+
7244 #define smp_mb__before_atomic_dec() smp_mb()
7245 #define smp_mb__after_atomic_dec() smp_mb()
7246 #define smp_mb__before_atomic_inc() smp_mb()
7247diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7248index 4d7ccac..d03d0ad 100644
7249--- a/arch/s390/include/asm/cache.h
7250+++ b/arch/s390/include/asm/cache.h
7251@@ -9,8 +9,10 @@
7252 #ifndef __ARCH_S390_CACHE_H
7253 #define __ARCH_S390_CACHE_H
7254
7255-#define L1_CACHE_BYTES 256
7256+#include <linux/const.h>
7257+
7258 #define L1_CACHE_SHIFT 8
7259+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7260 #define NET_SKB_PAD 32
7261
7262 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7263diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7264index 178ff96..8c93bd1 100644
7265--- a/arch/s390/include/asm/elf.h
7266+++ b/arch/s390/include/asm/elf.h
7267@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
7268 the loader. We need to make sure that it is out of the way of the program
7269 that it will "exec", and that there is sufficient room for the brk. */
7270
7271-extern unsigned long randomize_et_dyn(unsigned long base);
7272-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7273+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7274+
7275+#ifdef CONFIG_PAX_ASLR
7276+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7277+
7278+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7279+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7280+#endif
7281
7282 /* This yields a mask that user programs can use to figure out what
7283 instruction set this CPU supports. */
7284@@ -210,9 +216,6 @@ struct linux_binprm;
7285 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7286 int arch_setup_additional_pages(struct linux_binprm *, int);
7287
7288-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7289-#define arch_randomize_brk arch_randomize_brk
7290-
7291 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7292
7293 #endif
7294diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7295index c4a93d6..4d2a9b4 100644
7296--- a/arch/s390/include/asm/exec.h
7297+++ b/arch/s390/include/asm/exec.h
7298@@ -7,6 +7,6 @@
7299 #ifndef __ASM_EXEC_H
7300 #define __ASM_EXEC_H
7301
7302-extern unsigned long arch_align_stack(unsigned long sp);
7303+#define arch_align_stack(x) ((x) & ~0xfUL)
7304
7305 #endif /* __ASM_EXEC_H */
7306diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7307index 34268df..ea97318 100644
7308--- a/arch/s390/include/asm/uaccess.h
7309+++ b/arch/s390/include/asm/uaccess.h
7310@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7311 copy_to_user(void __user *to, const void *from, unsigned long n)
7312 {
7313 might_fault();
7314+
7315+ if ((long)n < 0)
7316+ return n;
7317+
7318 if (access_ok(VERIFY_WRITE, to, n))
7319 n = __copy_to_user(to, from, n);
7320 return n;
7321@@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7322 static inline unsigned long __must_check
7323 __copy_from_user(void *to, const void __user *from, unsigned long n)
7324 {
7325+ if ((long)n < 0)
7326+ return n;
7327+
7328 if (__builtin_constant_p(n) && (n <= 256))
7329 return uaccess.copy_from_user_small(n, from, to);
7330 else
7331@@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7332 static inline unsigned long __must_check
7333 copy_from_user(void *to, const void __user *from, unsigned long n)
7334 {
7335- unsigned int sz = __compiletime_object_size(to);
7336+ size_t sz = __compiletime_object_size(to);
7337
7338 might_fault();
7339- if (unlikely(sz != -1 && sz < n)) {
7340+
7341+ if ((long)n < 0)
7342+ return n;
7343+
7344+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7345 copy_from_user_overflow();
7346 return n;
7347 }
7348diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7349index 4610dea..cf0af21 100644
7350--- a/arch/s390/kernel/module.c
7351+++ b/arch/s390/kernel/module.c
7352@@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7353
7354 /* Increase core size by size of got & plt and set start
7355 offsets for got and plt. */
7356- me->core_size = ALIGN(me->core_size, 4);
7357- me->arch.got_offset = me->core_size;
7358- me->core_size += me->arch.got_size;
7359- me->arch.plt_offset = me->core_size;
7360- me->core_size += me->arch.plt_size;
7361+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7362+ me->arch.got_offset = me->core_size_rw;
7363+ me->core_size_rw += me->arch.got_size;
7364+ me->arch.plt_offset = me->core_size_rx;
7365+ me->core_size_rx += me->arch.plt_size;
7366 return 0;
7367 }
7368
7369@@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7370 if (info->got_initialized == 0) {
7371 Elf_Addr *gotent;
7372
7373- gotent = me->module_core + me->arch.got_offset +
7374+ gotent = me->module_core_rw + me->arch.got_offset +
7375 info->got_offset;
7376 *gotent = val;
7377 info->got_initialized = 1;
7378@@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7379 else if (r_type == R_390_GOTENT ||
7380 r_type == R_390_GOTPLTENT)
7381 *(unsigned int *) loc =
7382- (val + (Elf_Addr) me->module_core - loc) >> 1;
7383+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
7384 else if (r_type == R_390_GOT64 ||
7385 r_type == R_390_GOTPLT64)
7386 *(unsigned long *) loc = val;
7387@@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7388 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7389 if (info->plt_initialized == 0) {
7390 unsigned int *ip;
7391- ip = me->module_core + me->arch.plt_offset +
7392+ ip = me->module_core_rx + me->arch.plt_offset +
7393 info->plt_offset;
7394 #ifndef CONFIG_64BIT
7395 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7396@@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7397 val - loc + 0xffffUL < 0x1ffffeUL) ||
7398 (r_type == R_390_PLT32DBL &&
7399 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7400- val = (Elf_Addr) me->module_core +
7401+ val = (Elf_Addr) me->module_core_rx +
7402 me->arch.plt_offset +
7403 info->plt_offset;
7404 val += rela->r_addend - loc;
7405@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7406 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7407 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7408 val = val + rela->r_addend -
7409- ((Elf_Addr) me->module_core + me->arch.got_offset);
7410+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7411 if (r_type == R_390_GOTOFF16)
7412 *(unsigned short *) loc = val;
7413 else if (r_type == R_390_GOTOFF32)
7414@@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7415 break;
7416 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7417 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7418- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7419+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7420 rela->r_addend - loc;
7421 if (r_type == R_390_GOTPC)
7422 *(unsigned int *) loc = val;
7423diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7424index 536d645..4a5bd9e 100644
7425--- a/arch/s390/kernel/process.c
7426+++ b/arch/s390/kernel/process.c
7427@@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p)
7428 }
7429 return 0;
7430 }
7431-
7432-unsigned long arch_align_stack(unsigned long sp)
7433-{
7434- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7435- sp -= get_random_int() & ~PAGE_MASK;
7436- return sp & ~0xf;
7437-}
7438-
7439-static inline unsigned long brk_rnd(void)
7440-{
7441- /* 8MB for 32bit, 1GB for 64bit */
7442- if (is_32bit_task())
7443- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7444- else
7445- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7446-}
7447-
7448-unsigned long arch_randomize_brk(struct mm_struct *mm)
7449-{
7450- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7451-
7452- if (ret < mm->brk)
7453- return mm->brk;
7454- return ret;
7455-}
7456-
7457-unsigned long randomize_et_dyn(unsigned long base)
7458-{
7459- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7460-
7461- if (!(current->flags & PF_RANDOMIZE))
7462- return base;
7463- if (ret < base)
7464- return base;
7465- return ret;
7466-}
7467diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7468index c59a5ef..3fae59c 100644
7469--- a/arch/s390/mm/mmap.c
7470+++ b/arch/s390/mm/mmap.c
7471@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7472 */
7473 if (mmap_is_legacy()) {
7474 mm->mmap_base = TASK_UNMAPPED_BASE;
7475+
7476+#ifdef CONFIG_PAX_RANDMMAP
7477+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7478+ mm->mmap_base += mm->delta_mmap;
7479+#endif
7480+
7481 mm->get_unmapped_area = arch_get_unmapped_area;
7482 mm->unmap_area = arch_unmap_area;
7483 } else {
7484 mm->mmap_base = mmap_base();
7485+
7486+#ifdef CONFIG_PAX_RANDMMAP
7487+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7488+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7489+#endif
7490+
7491 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7492 mm->unmap_area = arch_unmap_area_topdown;
7493 }
7494@@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7495 */
7496 if (mmap_is_legacy()) {
7497 mm->mmap_base = TASK_UNMAPPED_BASE;
7498+
7499+#ifdef CONFIG_PAX_RANDMMAP
7500+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7501+ mm->mmap_base += mm->delta_mmap;
7502+#endif
7503+
7504 mm->get_unmapped_area = s390_get_unmapped_area;
7505 mm->unmap_area = arch_unmap_area;
7506 } else {
7507 mm->mmap_base = mmap_base();
7508+
7509+#ifdef CONFIG_PAX_RANDMMAP
7510+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7511+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7512+#endif
7513+
7514 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7515 mm->unmap_area = arch_unmap_area_topdown;
7516 }
7517diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7518index ae3d59f..f65f075 100644
7519--- a/arch/score/include/asm/cache.h
7520+++ b/arch/score/include/asm/cache.h
7521@@ -1,7 +1,9 @@
7522 #ifndef _ASM_SCORE_CACHE_H
7523 #define _ASM_SCORE_CACHE_H
7524
7525+#include <linux/const.h>
7526+
7527 #define L1_CACHE_SHIFT 4
7528-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7529+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7530
7531 #endif /* _ASM_SCORE_CACHE_H */
7532diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7533index f9f3cd5..58ff438 100644
7534--- a/arch/score/include/asm/exec.h
7535+++ b/arch/score/include/asm/exec.h
7536@@ -1,6 +1,6 @@
7537 #ifndef _ASM_SCORE_EXEC_H
7538 #define _ASM_SCORE_EXEC_H
7539
7540-extern unsigned long arch_align_stack(unsigned long sp);
7541+#define arch_align_stack(x) (x)
7542
7543 #endif /* _ASM_SCORE_EXEC_H */
7544diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7545index 7956846..5f37677 100644
7546--- a/arch/score/kernel/process.c
7547+++ b/arch/score/kernel/process.c
7548@@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task)
7549
7550 return task_pt_regs(task)->cp0_epc;
7551 }
7552-
7553-unsigned long arch_align_stack(unsigned long sp)
7554-{
7555- return sp;
7556-}
7557diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7558index ef9e555..331bd29 100644
7559--- a/arch/sh/include/asm/cache.h
7560+++ b/arch/sh/include/asm/cache.h
7561@@ -9,10 +9,11 @@
7562 #define __ASM_SH_CACHE_H
7563 #ifdef __KERNEL__
7564
7565+#include <linux/const.h>
7566 #include <linux/init.h>
7567 #include <cpu/cache.h>
7568
7569-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7570+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7571
7572 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7573
7574diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7575index 03f2b55..b027032 100644
7576--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7577+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7578@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7579 return NOTIFY_OK;
7580 }
7581
7582-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7583+static struct notifier_block shx3_cpu_notifier = {
7584 .notifier_call = shx3_cpu_callback,
7585 };
7586
7587diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7588index 6777177..cb5e44f 100644
7589--- a/arch/sh/mm/mmap.c
7590+++ b/arch/sh/mm/mmap.c
7591@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7592 struct mm_struct *mm = current->mm;
7593 struct vm_area_struct *vma;
7594 int do_colour_align;
7595+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7596 struct vm_unmapped_area_info info;
7597
7598 if (flags & MAP_FIXED) {
7599@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7600 if (filp || (flags & MAP_SHARED))
7601 do_colour_align = 1;
7602
7603+#ifdef CONFIG_PAX_RANDMMAP
7604+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7605+#endif
7606+
7607 if (addr) {
7608 if (do_colour_align)
7609 addr = COLOUR_ALIGN(addr, pgoff);
7610@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7611 addr = PAGE_ALIGN(addr);
7612
7613 vma = find_vma(mm, addr);
7614- if (TASK_SIZE - len >= addr &&
7615- (!vma || addr + len <= vma->vm_start))
7616+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7617 return addr;
7618 }
7619
7620 info.flags = 0;
7621 info.length = len;
7622- info.low_limit = TASK_UNMAPPED_BASE;
7623+ info.low_limit = mm->mmap_base;
7624 info.high_limit = TASK_SIZE;
7625 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7626 info.align_offset = pgoff << PAGE_SHIFT;
7627@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7628 struct mm_struct *mm = current->mm;
7629 unsigned long addr = addr0;
7630 int do_colour_align;
7631+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7632 struct vm_unmapped_area_info info;
7633
7634 if (flags & MAP_FIXED) {
7635@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7636 if (filp || (flags & MAP_SHARED))
7637 do_colour_align = 1;
7638
7639+#ifdef CONFIG_PAX_RANDMMAP
7640+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7641+#endif
7642+
7643 /* requesting a specific address */
7644 if (addr) {
7645 if (do_colour_align)
7646@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7647 addr = PAGE_ALIGN(addr);
7648
7649 vma = find_vma(mm, addr);
7650- if (TASK_SIZE - len >= addr &&
7651- (!vma || addr + len <= vma->vm_start))
7652+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7653 return addr;
7654 }
7655
7656@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7657 VM_BUG_ON(addr != -ENOMEM);
7658 info.flags = 0;
7659 info.low_limit = TASK_UNMAPPED_BASE;
7660+
7661+#ifdef CONFIG_PAX_RANDMMAP
7662+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7663+ info.low_limit += mm->delta_mmap;
7664+#endif
7665+
7666 info.high_limit = TASK_SIZE;
7667 addr = vm_unmapped_area(&info);
7668 }
7669diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7670index be56a24..443328f 100644
7671--- a/arch/sparc/include/asm/atomic_64.h
7672+++ b/arch/sparc/include/asm/atomic_64.h
7673@@ -14,18 +14,40 @@
7674 #define ATOMIC64_INIT(i) { (i) }
7675
7676 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7677+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7678+{
7679+ return v->counter;
7680+}
7681 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
7682+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7683+{
7684+ return v->counter;
7685+}
7686
7687 #define atomic_set(v, i) (((v)->counter) = i)
7688+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7689+{
7690+ v->counter = i;
7691+}
7692 #define atomic64_set(v, i) (((v)->counter) = i)
7693+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7694+{
7695+ v->counter = i;
7696+}
7697
7698 extern void atomic_add(int, atomic_t *);
7699+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
7700 extern void atomic64_add(long, atomic64_t *);
7701+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
7702 extern void atomic_sub(int, atomic_t *);
7703+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
7704 extern void atomic64_sub(long, atomic64_t *);
7705+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
7706
7707 extern int atomic_add_ret(int, atomic_t *);
7708+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
7709 extern long atomic64_add_ret(long, atomic64_t *);
7710+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
7711 extern int atomic_sub_ret(int, atomic_t *);
7712 extern long atomic64_sub_ret(long, atomic64_t *);
7713
7714@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7715 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
7716
7717 #define atomic_inc_return(v) atomic_add_ret(1, v)
7718+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7719+{
7720+ return atomic_add_ret_unchecked(1, v);
7721+}
7722 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
7723+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7724+{
7725+ return atomic64_add_ret_unchecked(1, v);
7726+}
7727
7728 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
7729 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
7730
7731 #define atomic_add_return(i, v) atomic_add_ret(i, v)
7732+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7733+{
7734+ return atomic_add_ret_unchecked(i, v);
7735+}
7736 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
7737+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7738+{
7739+ return atomic64_add_ret_unchecked(i, v);
7740+}
7741
7742 /*
7743 * atomic_inc_and_test - increment and test
7744@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7745 * other cases.
7746 */
7747 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7748+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7749+{
7750+ return atomic_inc_return_unchecked(v) == 0;
7751+}
7752 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7753
7754 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
7755@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7756 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
7757
7758 #define atomic_inc(v) atomic_add(1, v)
7759+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7760+{
7761+ atomic_add_unchecked(1, v);
7762+}
7763 #define atomic64_inc(v) atomic64_add(1, v)
7764+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7765+{
7766+ atomic64_add_unchecked(1, v);
7767+}
7768
7769 #define atomic_dec(v) atomic_sub(1, v)
7770+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7771+{
7772+ atomic_sub_unchecked(1, v);
7773+}
7774 #define atomic64_dec(v) atomic64_sub(1, v)
7775+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7776+{
7777+ atomic64_sub_unchecked(1, v);
7778+}
7779
7780 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
7781 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
7782
7783 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7784+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7785+{
7786+ return cmpxchg(&v->counter, old, new);
7787+}
7788 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7789+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7790+{
7791+ return xchg(&v->counter, new);
7792+}
7793
7794 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7795 {
7796- int c, old;
7797+ int c, old, new;
7798 c = atomic_read(v);
7799 for (;;) {
7800- if (unlikely(c == (u)))
7801+ if (unlikely(c == u))
7802 break;
7803- old = atomic_cmpxchg((v), c, c + (a));
7804+
7805+ asm volatile("addcc %2, %0, %0\n"
7806+
7807+#ifdef CONFIG_PAX_REFCOUNT
7808+ "tvs %%icc, 6\n"
7809+#endif
7810+
7811+ : "=r" (new)
7812+ : "0" (c), "ir" (a)
7813+ : "cc");
7814+
7815+ old = atomic_cmpxchg(v, c, new);
7816 if (likely(old == c))
7817 break;
7818 c = old;
7819@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7820 #define atomic64_cmpxchg(v, o, n) \
7821 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
7822 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
7823+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7824+{
7825+ return xchg(&v->counter, new);
7826+}
7827
7828 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7829 {
7830- long c, old;
7831+ long c, old, new;
7832 c = atomic64_read(v);
7833 for (;;) {
7834- if (unlikely(c == (u)))
7835+ if (unlikely(c == u))
7836 break;
7837- old = atomic64_cmpxchg((v), c, c + (a));
7838+
7839+ asm volatile("addcc %2, %0, %0\n"
7840+
7841+#ifdef CONFIG_PAX_REFCOUNT
7842+ "tvs %%xcc, 6\n"
7843+#endif
7844+
7845+ : "=r" (new)
7846+ : "0" (c), "ir" (a)
7847+ : "cc");
7848+
7849+ old = atomic64_cmpxchg(v, c, new);
7850 if (likely(old == c))
7851 break;
7852 c = old;
7853 }
7854- return c != (u);
7855+ return c != u;
7856 }
7857
7858 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7859diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
7860index 5bb6991..5c2132e 100644
7861--- a/arch/sparc/include/asm/cache.h
7862+++ b/arch/sparc/include/asm/cache.h
7863@@ -7,10 +7,12 @@
7864 #ifndef _SPARC_CACHE_H
7865 #define _SPARC_CACHE_H
7866
7867+#include <linux/const.h>
7868+
7869 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
7870
7871 #define L1_CACHE_SHIFT 5
7872-#define L1_CACHE_BYTES 32
7873+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7874
7875 #ifdef CONFIG_SPARC32
7876 #define SMP_CACHE_BYTES_SHIFT 5
7877diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
7878index ac74a2c..a9e58af 100644
7879--- a/arch/sparc/include/asm/elf_32.h
7880+++ b/arch/sparc/include/asm/elf_32.h
7881@@ -114,6 +114,13 @@ typedef struct {
7882
7883 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
7884
7885+#ifdef CONFIG_PAX_ASLR
7886+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7887+
7888+#define PAX_DELTA_MMAP_LEN 16
7889+#define PAX_DELTA_STACK_LEN 16
7890+#endif
7891+
7892 /* This yields a mask that user programs can use to figure out what
7893 instruction set this cpu supports. This can NOT be done in userspace
7894 on Sparc. */
7895diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
7896index 370ca1e..d4f4a98 100644
7897--- a/arch/sparc/include/asm/elf_64.h
7898+++ b/arch/sparc/include/asm/elf_64.h
7899@@ -189,6 +189,13 @@ typedef struct {
7900 #define ELF_ET_DYN_BASE 0x0000010000000000UL
7901 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
7902
7903+#ifdef CONFIG_PAX_ASLR
7904+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
7905+
7906+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
7907+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
7908+#endif
7909+
7910 extern unsigned long sparc64_elf_hwcap;
7911 #define ELF_HWCAP sparc64_elf_hwcap
7912
7913diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
7914index 9b1c36d..209298b 100644
7915--- a/arch/sparc/include/asm/pgalloc_32.h
7916+++ b/arch/sparc/include/asm/pgalloc_32.h
7917@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
7918 }
7919
7920 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
7921+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
7922
7923 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
7924 unsigned long address)
7925diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
7926index bcfe063..b333142 100644
7927--- a/arch/sparc/include/asm/pgalloc_64.h
7928+++ b/arch/sparc/include/asm/pgalloc_64.h
7929@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7930 }
7931
7932 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
7933+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
7934
7935 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
7936 {
7937diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
7938index 6fc1348..390c50a 100644
7939--- a/arch/sparc/include/asm/pgtable_32.h
7940+++ b/arch/sparc/include/asm/pgtable_32.h
7941@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
7942 #define PAGE_SHARED SRMMU_PAGE_SHARED
7943 #define PAGE_COPY SRMMU_PAGE_COPY
7944 #define PAGE_READONLY SRMMU_PAGE_RDONLY
7945+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
7946+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
7947+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
7948 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
7949
7950 /* Top-level page directory - dummy used by init-mm.
7951@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
7952
7953 /* xwr */
7954 #define __P000 PAGE_NONE
7955-#define __P001 PAGE_READONLY
7956-#define __P010 PAGE_COPY
7957-#define __P011 PAGE_COPY
7958+#define __P001 PAGE_READONLY_NOEXEC
7959+#define __P010 PAGE_COPY_NOEXEC
7960+#define __P011 PAGE_COPY_NOEXEC
7961 #define __P100 PAGE_READONLY
7962 #define __P101 PAGE_READONLY
7963 #define __P110 PAGE_COPY
7964 #define __P111 PAGE_COPY
7965
7966 #define __S000 PAGE_NONE
7967-#define __S001 PAGE_READONLY
7968-#define __S010 PAGE_SHARED
7969-#define __S011 PAGE_SHARED
7970+#define __S001 PAGE_READONLY_NOEXEC
7971+#define __S010 PAGE_SHARED_NOEXEC
7972+#define __S011 PAGE_SHARED_NOEXEC
7973 #define __S100 PAGE_READONLY
7974 #define __S101 PAGE_READONLY
7975 #define __S110 PAGE_SHARED
7976diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
7977index 79da178..c2eede8 100644
7978--- a/arch/sparc/include/asm/pgtsrmmu.h
7979+++ b/arch/sparc/include/asm/pgtsrmmu.h
7980@@ -115,6 +115,11 @@
7981 SRMMU_EXEC | SRMMU_REF)
7982 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
7983 SRMMU_EXEC | SRMMU_REF)
7984+
7985+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
7986+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7987+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7988+
7989 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
7990 SRMMU_DIRTY | SRMMU_REF)
7991
7992diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
7993index 9689176..63c18ea 100644
7994--- a/arch/sparc/include/asm/spinlock_64.h
7995+++ b/arch/sparc/include/asm/spinlock_64.h
7996@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
7997
7998 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
7999
8000-static void inline arch_read_lock(arch_rwlock_t *lock)
8001+static inline void arch_read_lock(arch_rwlock_t *lock)
8002 {
8003 unsigned long tmp1, tmp2;
8004
8005 __asm__ __volatile__ (
8006 "1: ldsw [%2], %0\n"
8007 " brlz,pn %0, 2f\n"
8008-"4: add %0, 1, %1\n"
8009+"4: addcc %0, 1, %1\n"
8010+
8011+#ifdef CONFIG_PAX_REFCOUNT
8012+" tvs %%icc, 6\n"
8013+#endif
8014+
8015 " cas [%2], %0, %1\n"
8016 " cmp %0, %1\n"
8017 " bne,pn %%icc, 1b\n"
8018@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
8019 " .previous"
8020 : "=&r" (tmp1), "=&r" (tmp2)
8021 : "r" (lock)
8022- : "memory");
8023+ : "memory", "cc");
8024 }
8025
8026-static int inline arch_read_trylock(arch_rwlock_t *lock)
8027+static inline int arch_read_trylock(arch_rwlock_t *lock)
8028 {
8029 int tmp1, tmp2;
8030
8031@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8032 "1: ldsw [%2], %0\n"
8033 " brlz,a,pn %0, 2f\n"
8034 " mov 0, %0\n"
8035-" add %0, 1, %1\n"
8036+" addcc %0, 1, %1\n"
8037+
8038+#ifdef CONFIG_PAX_REFCOUNT
8039+" tvs %%icc, 6\n"
8040+#endif
8041+
8042 " cas [%2], %0, %1\n"
8043 " cmp %0, %1\n"
8044 " bne,pn %%icc, 1b\n"
8045@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8046 return tmp1;
8047 }
8048
8049-static void inline arch_read_unlock(arch_rwlock_t *lock)
8050+static inline void arch_read_unlock(arch_rwlock_t *lock)
8051 {
8052 unsigned long tmp1, tmp2;
8053
8054 __asm__ __volatile__(
8055 "1: lduw [%2], %0\n"
8056-" sub %0, 1, %1\n"
8057+" subcc %0, 1, %1\n"
8058+
8059+#ifdef CONFIG_PAX_REFCOUNT
8060+" tvs %%icc, 6\n"
8061+#endif
8062+
8063 " cas [%2], %0, %1\n"
8064 " cmp %0, %1\n"
8065 " bne,pn %%xcc, 1b\n"
8066@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8067 : "memory");
8068 }
8069
8070-static void inline arch_write_lock(arch_rwlock_t *lock)
8071+static inline void arch_write_lock(arch_rwlock_t *lock)
8072 {
8073 unsigned long mask, tmp1, tmp2;
8074
8075@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8076 : "memory");
8077 }
8078
8079-static void inline arch_write_unlock(arch_rwlock_t *lock)
8080+static inline void arch_write_unlock(arch_rwlock_t *lock)
8081 {
8082 __asm__ __volatile__(
8083 " stw %%g0, [%0]"
8084@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8085 : "memory");
8086 }
8087
8088-static int inline arch_write_trylock(arch_rwlock_t *lock)
8089+static inline int arch_write_trylock(arch_rwlock_t *lock)
8090 {
8091 unsigned long mask, tmp1, tmp2, result;
8092
8093diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
8094index 25849ae..924c54b 100644
8095--- a/arch/sparc/include/asm/thread_info_32.h
8096+++ b/arch/sparc/include/asm/thread_info_32.h
8097@@ -49,6 +49,8 @@ struct thread_info {
8098 unsigned long w_saved;
8099
8100 struct restart_block restart_block;
8101+
8102+ unsigned long lowest_stack;
8103 };
8104
8105 /*
8106diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8107index 269bd92..e46a9b8 100644
8108--- a/arch/sparc/include/asm/thread_info_64.h
8109+++ b/arch/sparc/include/asm/thread_info_64.h
8110@@ -63,6 +63,8 @@ struct thread_info {
8111 struct pt_regs *kern_una_regs;
8112 unsigned int kern_una_insn;
8113
8114+ unsigned long lowest_stack;
8115+
8116 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8117 };
8118
8119@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8120 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8121 /* flag bit 6 is available */
8122 #define TIF_32BIT 7 /* 32-bit binary */
8123-/* flag bit 8 is available */
8124+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
8125 #define TIF_SECCOMP 9 /* secure computing */
8126 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
8127 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
8128+
8129 /* NOTE: Thread flags >= 12 should be ones we have no interest
8130 * in using in assembly, else we can't use the mask as
8131 * an immediate value in instructions such as andcc.
8132@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8133 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8134 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8135 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8136+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8137
8138 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
8139 _TIF_DO_NOTIFY_RESUME_MASK | \
8140 _TIF_NEED_RESCHED)
8141 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
8142
8143+#define _TIF_WORK_SYSCALL \
8144+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
8145+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
8146+
8147+
8148 /*
8149 * Thread-synchronous status.
8150 *
8151diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
8152index 0167d26..9acd8ed 100644
8153--- a/arch/sparc/include/asm/uaccess.h
8154+++ b/arch/sparc/include/asm/uaccess.h
8155@@ -1,5 +1,13 @@
8156 #ifndef ___ASM_SPARC_UACCESS_H
8157 #define ___ASM_SPARC_UACCESS_H
8158+
8159+#ifdef __KERNEL__
8160+#ifndef __ASSEMBLY__
8161+#include <linux/types.h>
8162+extern void check_object_size(const void *ptr, unsigned long n, bool to);
8163+#endif
8164+#endif
8165+
8166 #if defined(__sparc__) && defined(__arch64__)
8167 #include <asm/uaccess_64.h>
8168 #else
8169diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
8170index 53a28dd..50c38c3 100644
8171--- a/arch/sparc/include/asm/uaccess_32.h
8172+++ b/arch/sparc/include/asm/uaccess_32.h
8173@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
8174
8175 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8176 {
8177- if (n && __access_ok((unsigned long) to, n))
8178+ if ((long)n < 0)
8179+ return n;
8180+
8181+ if (n && __access_ok((unsigned long) to, n)) {
8182+ if (!__builtin_constant_p(n))
8183+ check_object_size(from, n, true);
8184 return __copy_user(to, (__force void __user *) from, n);
8185- else
8186+ } else
8187 return n;
8188 }
8189
8190 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
8191 {
8192+ if ((long)n < 0)
8193+ return n;
8194+
8195+ if (!__builtin_constant_p(n))
8196+ check_object_size(from, n, true);
8197+
8198 return __copy_user(to, (__force void __user *) from, n);
8199 }
8200
8201 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8202 {
8203- if (n && __access_ok((unsigned long) from, n))
8204+ if ((long)n < 0)
8205+ return n;
8206+
8207+ if (n && __access_ok((unsigned long) from, n)) {
8208+ if (!__builtin_constant_p(n))
8209+ check_object_size(to, n, false);
8210 return __copy_user((__force void __user *) to, from, n);
8211- else
8212+ } else
8213 return n;
8214 }
8215
8216 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8217 {
8218+ if ((long)n < 0)
8219+ return n;
8220+
8221 return __copy_user((__force void __user *) to, from, n);
8222 }
8223
8224diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8225index e562d3c..191f176 100644
8226--- a/arch/sparc/include/asm/uaccess_64.h
8227+++ b/arch/sparc/include/asm/uaccess_64.h
8228@@ -10,6 +10,7 @@
8229 #include <linux/compiler.h>
8230 #include <linux/string.h>
8231 #include <linux/thread_info.h>
8232+#include <linux/kernel.h>
8233 #include <asm/asi.h>
8234 #include <asm/spitfire.h>
8235 #include <asm-generic/uaccess-unaligned.h>
8236@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8237 static inline unsigned long __must_check
8238 copy_from_user(void *to, const void __user *from, unsigned long size)
8239 {
8240- unsigned long ret = ___copy_from_user(to, from, size);
8241+ unsigned long ret;
8242
8243+ if ((long)size < 0 || size > INT_MAX)
8244+ return size;
8245+
8246+ if (!__builtin_constant_p(size))
8247+ check_object_size(to, size, false);
8248+
8249+ ret = ___copy_from_user(to, from, size);
8250 if (unlikely(ret))
8251 ret = copy_from_user_fixup(to, from, size);
8252
8253@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8254 static inline unsigned long __must_check
8255 copy_to_user(void __user *to, const void *from, unsigned long size)
8256 {
8257- unsigned long ret = ___copy_to_user(to, from, size);
8258+ unsigned long ret;
8259
8260+ if ((long)size < 0 || size > INT_MAX)
8261+ return size;
8262+
8263+ if (!__builtin_constant_p(size))
8264+ check_object_size(from, size, true);
8265+
8266+ ret = ___copy_to_user(to, from, size);
8267 if (unlikely(ret))
8268 ret = copy_to_user_fixup(to, from, size);
8269 return ret;
8270diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8271index 6cf591b..b49e65a 100644
8272--- a/arch/sparc/kernel/Makefile
8273+++ b/arch/sparc/kernel/Makefile
8274@@ -3,7 +3,7 @@
8275 #
8276
8277 asflags-y := -ansi
8278-ccflags-y := -Werror
8279+#ccflags-y := -Werror
8280
8281 extra-y := head_$(BITS).o
8282
8283diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8284index be8e862..5b50b12 100644
8285--- a/arch/sparc/kernel/process_32.c
8286+++ b/arch/sparc/kernel/process_32.c
8287@@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
8288
8289 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8290 r->psr, r->pc, r->npc, r->y, print_tainted());
8291- printk("PC: <%pS>\n", (void *) r->pc);
8292+ printk("PC: <%pA>\n", (void *) r->pc);
8293 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8294 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8295 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8296 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8297 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8298 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8299- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8300+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8301
8302 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8303 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8304@@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8305 rw = (struct reg_window32 *) fp;
8306 pc = rw->ins[7];
8307 printk("[%08lx : ", pc);
8308- printk("%pS ] ", (void *) pc);
8309+ printk("%pA ] ", (void *) pc);
8310 fp = rw->ins[6];
8311 } while (++count < 16);
8312 printk("\n");
8313diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8314index cdb80b2..5ca141d 100644
8315--- a/arch/sparc/kernel/process_64.c
8316+++ b/arch/sparc/kernel/process_64.c
8317@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
8318 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8319 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8320 if (regs->tstate & TSTATE_PRIV)
8321- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8322+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8323 }
8324
8325 void show_regs(struct pt_regs *regs)
8326 {
8327 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8328 regs->tpc, regs->tnpc, regs->y, print_tainted());
8329- printk("TPC: <%pS>\n", (void *) regs->tpc);
8330+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8331 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8332 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8333 regs->u_regs[3]);
8334@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
8335 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8336 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8337 regs->u_regs[15]);
8338- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8339+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8340 show_regwindow(regs);
8341 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8342 }
8343@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
8344 ((tp && tp->task) ? tp->task->pid : -1));
8345
8346 if (gp->tstate & TSTATE_PRIV) {
8347- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8348+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8349 (void *) gp->tpc,
8350 (void *) gp->o7,
8351 (void *) gp->i7,
8352diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8353index 7ff45e4..a58f271 100644
8354--- a/arch/sparc/kernel/ptrace_64.c
8355+++ b/arch/sparc/kernel/ptrace_64.c
8356@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8357 return ret;
8358 }
8359
8360+#ifdef CONFIG_GRKERNSEC_SETXID
8361+extern void gr_delayed_cred_worker(void);
8362+#endif
8363+
8364 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8365 {
8366 int ret = 0;
8367@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8368 /* do the secure computing check first */
8369 secure_computing_strict(regs->u_regs[UREG_G1]);
8370
8371+#ifdef CONFIG_GRKERNSEC_SETXID
8372+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8373+ gr_delayed_cred_worker();
8374+#endif
8375+
8376 if (test_thread_flag(TIF_SYSCALL_TRACE))
8377 ret = tracehook_report_syscall_entry(regs);
8378
8379@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8380
8381 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8382 {
8383+#ifdef CONFIG_GRKERNSEC_SETXID
8384+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8385+ gr_delayed_cred_worker();
8386+#endif
8387+
8388 audit_syscall_exit(regs);
8389
8390 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8391diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8392index 2da0bdc..79128d2 100644
8393--- a/arch/sparc/kernel/sys_sparc_32.c
8394+++ b/arch/sparc/kernel/sys_sparc_32.c
8395@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8396 if (len > TASK_SIZE - PAGE_SIZE)
8397 return -ENOMEM;
8398 if (!addr)
8399- addr = TASK_UNMAPPED_BASE;
8400+ addr = current->mm->mmap_base;
8401
8402 info.flags = 0;
8403 info.length = len;
8404diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8405index 708bc29..f0129cb 100644
8406--- a/arch/sparc/kernel/sys_sparc_64.c
8407+++ b/arch/sparc/kernel/sys_sparc_64.c
8408@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8409 struct vm_area_struct * vma;
8410 unsigned long task_size = TASK_SIZE;
8411 int do_color_align;
8412+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8413 struct vm_unmapped_area_info info;
8414
8415 if (flags & MAP_FIXED) {
8416 /* We do not accept a shared mapping if it would violate
8417 * cache aliasing constraints.
8418 */
8419- if ((flags & MAP_SHARED) &&
8420+ if ((filp || (flags & MAP_SHARED)) &&
8421 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8422 return -EINVAL;
8423 return addr;
8424@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8425 if (filp || (flags & MAP_SHARED))
8426 do_color_align = 1;
8427
8428+#ifdef CONFIG_PAX_RANDMMAP
8429+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8430+#endif
8431+
8432 if (addr) {
8433 if (do_color_align)
8434 addr = COLOR_ALIGN(addr, pgoff);
8435@@ -118,14 +123,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8436 addr = PAGE_ALIGN(addr);
8437
8438 vma = find_vma(mm, addr);
8439- if (task_size - len >= addr &&
8440- (!vma || addr + len <= vma->vm_start))
8441+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8442 return addr;
8443 }
8444
8445 info.flags = 0;
8446 info.length = len;
8447- info.low_limit = TASK_UNMAPPED_BASE;
8448+ info.low_limit = mm->mmap_base;
8449 info.high_limit = min(task_size, VA_EXCLUDE_START);
8450 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8451 info.align_offset = pgoff << PAGE_SHIFT;
8452@@ -134,6 +138,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8453 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8454 VM_BUG_ON(addr != -ENOMEM);
8455 info.low_limit = VA_EXCLUDE_END;
8456+
8457+#ifdef CONFIG_PAX_RANDMMAP
8458+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8459+ info.low_limit += mm->delta_mmap;
8460+#endif
8461+
8462 info.high_limit = task_size;
8463 addr = vm_unmapped_area(&info);
8464 }
8465@@ -151,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8466 unsigned long task_size = STACK_TOP32;
8467 unsigned long addr = addr0;
8468 int do_color_align;
8469+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8470 struct vm_unmapped_area_info info;
8471
8472 /* This should only ever run for 32-bit processes. */
8473@@ -160,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8474 /* We do not accept a shared mapping if it would violate
8475 * cache aliasing constraints.
8476 */
8477- if ((flags & MAP_SHARED) &&
8478+ if ((filp || (flags & MAP_SHARED)) &&
8479 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8480 return -EINVAL;
8481 return addr;
8482@@ -173,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8483 if (filp || (flags & MAP_SHARED))
8484 do_color_align = 1;
8485
8486+#ifdef CONFIG_PAX_RANDMMAP
8487+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8488+#endif
8489+
8490 /* requesting a specific address */
8491 if (addr) {
8492 if (do_color_align)
8493@@ -181,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8494 addr = PAGE_ALIGN(addr);
8495
8496 vma = find_vma(mm, addr);
8497- if (task_size - len >= addr &&
8498- (!vma || addr + len <= vma->vm_start))
8499+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8500 return addr;
8501 }
8502
8503@@ -204,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8504 VM_BUG_ON(addr != -ENOMEM);
8505 info.flags = 0;
8506 info.low_limit = TASK_UNMAPPED_BASE;
8507+
8508+#ifdef CONFIG_PAX_RANDMMAP
8509+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8510+ info.low_limit += mm->delta_mmap;
8511+#endif
8512+
8513 info.high_limit = STACK_TOP32;
8514 addr = vm_unmapped_area(&info);
8515 }
8516@@ -264,6 +284,10 @@ static unsigned long mmap_rnd(void)
8517 {
8518 unsigned long rnd = 0UL;
8519
8520+#ifdef CONFIG_PAX_RANDMMAP
8521+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8522+#endif
8523+
8524 if (current->flags & PF_RANDOMIZE) {
8525 unsigned long val = get_random_int();
8526 if (test_thread_flag(TIF_32BIT))
8527@@ -289,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8528 gap == RLIM_INFINITY ||
8529 sysctl_legacy_va_layout) {
8530 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8531+
8532+#ifdef CONFIG_PAX_RANDMMAP
8533+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8534+ mm->mmap_base += mm->delta_mmap;
8535+#endif
8536+
8537 mm->get_unmapped_area = arch_get_unmapped_area;
8538 mm->unmap_area = arch_unmap_area;
8539 } else {
8540@@ -301,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8541 gap = (task_size / 6 * 5);
8542
8543 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8544+
8545+#ifdef CONFIG_PAX_RANDMMAP
8546+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8547+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8548+#endif
8549+
8550 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8551 mm->unmap_area = arch_unmap_area_topdown;
8552 }
8553diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8554index e0fed77..604a7e5 100644
8555--- a/arch/sparc/kernel/syscalls.S
8556+++ b/arch/sparc/kernel/syscalls.S
8557@@ -58,7 +58,7 @@ sys32_rt_sigreturn:
8558 #endif
8559 .align 32
8560 1: ldx [%g6 + TI_FLAGS], %l5
8561- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8562+ andcc %l5, _TIF_WORK_SYSCALL, %g0
8563 be,pt %icc, rtrap
8564 nop
8565 call syscall_trace_leave
8566@@ -190,7 +190,7 @@ linux_sparc_syscall32:
8567
8568 srl %i5, 0, %o5 ! IEU1
8569 srl %i2, 0, %o2 ! IEU0 Group
8570- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8571+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8572 bne,pn %icc, linux_syscall_trace32 ! CTI
8573 mov %i0, %l5 ! IEU1
8574 call %l7 ! CTI Group brk forced
8575@@ -213,7 +213,7 @@ linux_sparc_syscall:
8576
8577 mov %i3, %o3 ! IEU1
8578 mov %i4, %o4 ! IEU0 Group
8579- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8580+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8581 bne,pn %icc, linux_syscall_trace ! CTI Group
8582 mov %i0, %l5 ! IEU0
8583 2: call %l7 ! CTI Group brk forced
8584@@ -229,7 +229,7 @@ ret_sys_call:
8585
8586 cmp %o0, -ERESTART_RESTARTBLOCK
8587 bgeu,pn %xcc, 1f
8588- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8589+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8590 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
8591
8592 2:
8593diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
8594index 654e8aa..45f431b 100644
8595--- a/arch/sparc/kernel/sysfs.c
8596+++ b/arch/sparc/kernel/sysfs.c
8597@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8598 return NOTIFY_OK;
8599 }
8600
8601-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8602+static struct notifier_block sysfs_cpu_nb = {
8603 .notifier_call = sysfs_cpu_notify,
8604 };
8605
8606diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
8607index a5785ea..405c5f7 100644
8608--- a/arch/sparc/kernel/traps_32.c
8609+++ b/arch/sparc/kernel/traps_32.c
8610@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
8611 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
8612 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
8613
8614+extern void gr_handle_kernel_exploit(void);
8615+
8616 void die_if_kernel(char *str, struct pt_regs *regs)
8617 {
8618 static int die_counter;
8619@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8620 count++ < 30 &&
8621 (((unsigned long) rw) >= PAGE_OFFSET) &&
8622 !(((unsigned long) rw) & 0x7)) {
8623- printk("Caller[%08lx]: %pS\n", rw->ins[7],
8624+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
8625 (void *) rw->ins[7]);
8626 rw = (struct reg_window32 *)rw->ins[6];
8627 }
8628 }
8629 printk("Instruction DUMP:");
8630 instruction_dump ((unsigned long *) regs->pc);
8631- if(regs->psr & PSR_PS)
8632+ if(regs->psr & PSR_PS) {
8633+ gr_handle_kernel_exploit();
8634 do_exit(SIGKILL);
8635+ }
8636 do_exit(SIGSEGV);
8637 }
8638
8639diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
8640index e7ecf15..6520e65 100644
8641--- a/arch/sparc/kernel/traps_64.c
8642+++ b/arch/sparc/kernel/traps_64.c
8643@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
8644 i + 1,
8645 p->trapstack[i].tstate, p->trapstack[i].tpc,
8646 p->trapstack[i].tnpc, p->trapstack[i].tt);
8647- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
8648+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
8649 }
8650 }
8651
8652@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
8653
8654 lvl -= 0x100;
8655 if (regs->tstate & TSTATE_PRIV) {
8656+
8657+#ifdef CONFIG_PAX_REFCOUNT
8658+ if (lvl == 6)
8659+ pax_report_refcount_overflow(regs);
8660+#endif
8661+
8662 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
8663 die_if_kernel(buffer, regs);
8664 }
8665@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
8666 void bad_trap_tl1(struct pt_regs *regs, long lvl)
8667 {
8668 char buffer[32];
8669-
8670+
8671 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
8672 0, lvl, SIGTRAP) == NOTIFY_STOP)
8673 return;
8674
8675+#ifdef CONFIG_PAX_REFCOUNT
8676+ if (lvl == 6)
8677+ pax_report_refcount_overflow(regs);
8678+#endif
8679+
8680 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
8681
8682 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
8683@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
8684 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
8685 printk("%s" "ERROR(%d): ",
8686 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
8687- printk("TPC<%pS>\n", (void *) regs->tpc);
8688+ printk("TPC<%pA>\n", (void *) regs->tpc);
8689 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
8690 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
8691 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
8692@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8693 smp_processor_id(),
8694 (type & 0x1) ? 'I' : 'D',
8695 regs->tpc);
8696- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
8697+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
8698 panic("Irrecoverable Cheetah+ parity error.");
8699 }
8700
8701@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8702 smp_processor_id(),
8703 (type & 0x1) ? 'I' : 'D',
8704 regs->tpc);
8705- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
8706+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
8707 }
8708
8709 struct sun4v_error_entry {
8710@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
8711
8712 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
8713 regs->tpc, tl);
8714- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
8715+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
8716 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8717- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
8718+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
8719 (void *) regs->u_regs[UREG_I7]);
8720 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
8721 "pte[%lx] error[%lx]\n",
8722@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
8723
8724 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
8725 regs->tpc, tl);
8726- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
8727+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
8728 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8729- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
8730+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
8731 (void *) regs->u_regs[UREG_I7]);
8732 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
8733 "pte[%lx] error[%lx]\n",
8734@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8735 fp = (unsigned long)sf->fp + STACK_BIAS;
8736 }
8737
8738- printk(" [%016lx] %pS\n", pc, (void *) pc);
8739+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8740 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8741 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
8742 int index = tsk->curr_ret_stack;
8743 if (tsk->ret_stack && index >= graph) {
8744 pc = tsk->ret_stack[index - graph].ret;
8745- printk(" [%016lx] %pS\n", pc, (void *) pc);
8746+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8747 graph++;
8748 }
8749 }
8750@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
8751 return (struct reg_window *) (fp + STACK_BIAS);
8752 }
8753
8754+extern void gr_handle_kernel_exploit(void);
8755+
8756 void die_if_kernel(char *str, struct pt_regs *regs)
8757 {
8758 static int die_counter;
8759@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8760 while (rw &&
8761 count++ < 30 &&
8762 kstack_valid(tp, (unsigned long) rw)) {
8763- printk("Caller[%016lx]: %pS\n", rw->ins[7],
8764+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
8765 (void *) rw->ins[7]);
8766
8767 rw = kernel_stack_up(rw);
8768@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8769 }
8770 user_instruction_dump ((unsigned int __user *) regs->tpc);
8771 }
8772- if (regs->tstate & TSTATE_PRIV)
8773+ if (regs->tstate & TSTATE_PRIV) {
8774+ gr_handle_kernel_exploit();
8775 do_exit(SIGKILL);
8776+ }
8777 do_exit(SIGSEGV);
8778 }
8779 EXPORT_SYMBOL(die_if_kernel);
8780diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
8781index 8201c25e..072a2a7 100644
8782--- a/arch/sparc/kernel/unaligned_64.c
8783+++ b/arch/sparc/kernel/unaligned_64.c
8784@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
8785 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
8786
8787 if (__ratelimit(&ratelimit)) {
8788- printk("Kernel unaligned access at TPC[%lx] %pS\n",
8789+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
8790 regs->tpc, (void *) regs->tpc);
8791 }
8792 }
8793diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
8794index 8410065f2..4fd4ca22 100644
8795--- a/arch/sparc/lib/Makefile
8796+++ b/arch/sparc/lib/Makefile
8797@@ -2,7 +2,7 @@
8798 #
8799
8800 asflags-y := -ansi -DST_DIV0=0x02
8801-ccflags-y := -Werror
8802+#ccflags-y := -Werror
8803
8804 lib-$(CONFIG_SPARC32) += ashrdi3.o
8805 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
8806diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
8807index 85c233d..68500e0 100644
8808--- a/arch/sparc/lib/atomic_64.S
8809+++ b/arch/sparc/lib/atomic_64.S
8810@@ -17,7 +17,12 @@
8811 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8812 BACKOFF_SETUP(%o2)
8813 1: lduw [%o1], %g1
8814- add %g1, %o0, %g7
8815+ addcc %g1, %o0, %g7
8816+
8817+#ifdef CONFIG_PAX_REFCOUNT
8818+ tvs %icc, 6
8819+#endif
8820+
8821 cas [%o1], %g1, %g7
8822 cmp %g1, %g7
8823 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8824@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8825 2: BACKOFF_SPIN(%o2, %o3, 1b)
8826 ENDPROC(atomic_add)
8827
8828+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8829+ BACKOFF_SETUP(%o2)
8830+1: lduw [%o1], %g1
8831+ add %g1, %o0, %g7
8832+ cas [%o1], %g1, %g7
8833+ cmp %g1, %g7
8834+ bne,pn %icc, 2f
8835+ nop
8836+ retl
8837+ nop
8838+2: BACKOFF_SPIN(%o2, %o3, 1b)
8839+ENDPROC(atomic_add_unchecked)
8840+
8841 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8842 BACKOFF_SETUP(%o2)
8843 1: lduw [%o1], %g1
8844- sub %g1, %o0, %g7
8845+ subcc %g1, %o0, %g7
8846+
8847+#ifdef CONFIG_PAX_REFCOUNT
8848+ tvs %icc, 6
8849+#endif
8850+
8851 cas [%o1], %g1, %g7
8852 cmp %g1, %g7
8853 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8854@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8855 2: BACKOFF_SPIN(%o2, %o3, 1b)
8856 ENDPROC(atomic_sub)
8857
8858+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8859+ BACKOFF_SETUP(%o2)
8860+1: lduw [%o1], %g1
8861+ sub %g1, %o0, %g7
8862+ cas [%o1], %g1, %g7
8863+ cmp %g1, %g7
8864+ bne,pn %icc, 2f
8865+ nop
8866+ retl
8867+ nop
8868+2: BACKOFF_SPIN(%o2, %o3, 1b)
8869+ENDPROC(atomic_sub_unchecked)
8870+
8871 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8872 BACKOFF_SETUP(%o2)
8873 1: lduw [%o1], %g1
8874- add %g1, %o0, %g7
8875+ addcc %g1, %o0, %g7
8876+
8877+#ifdef CONFIG_PAX_REFCOUNT
8878+ tvs %icc, 6
8879+#endif
8880+
8881 cas [%o1], %g1, %g7
8882 cmp %g1, %g7
8883 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8884@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8885 2: BACKOFF_SPIN(%o2, %o3, 1b)
8886 ENDPROC(atomic_add_ret)
8887
8888+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8889+ BACKOFF_SETUP(%o2)
8890+1: lduw [%o1], %g1
8891+ addcc %g1, %o0, %g7
8892+ cas [%o1], %g1, %g7
8893+ cmp %g1, %g7
8894+ bne,pn %icc, 2f
8895+ add %g7, %o0, %g7
8896+ sra %g7, 0, %o0
8897+ retl
8898+ nop
8899+2: BACKOFF_SPIN(%o2, %o3, 1b)
8900+ENDPROC(atomic_add_ret_unchecked)
8901+
8902 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
8903 BACKOFF_SETUP(%o2)
8904 1: lduw [%o1], %g1
8905- sub %g1, %o0, %g7
8906+ subcc %g1, %o0, %g7
8907+
8908+#ifdef CONFIG_PAX_REFCOUNT
8909+ tvs %icc, 6
8910+#endif
8911+
8912 cas [%o1], %g1, %g7
8913 cmp %g1, %g7
8914 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8915@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
8916 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8917 BACKOFF_SETUP(%o2)
8918 1: ldx [%o1], %g1
8919- add %g1, %o0, %g7
8920+ addcc %g1, %o0, %g7
8921+
8922+#ifdef CONFIG_PAX_REFCOUNT
8923+ tvs %xcc, 6
8924+#endif
8925+
8926 casx [%o1], %g1, %g7
8927 cmp %g1, %g7
8928 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8929@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8930 2: BACKOFF_SPIN(%o2, %o3, 1b)
8931 ENDPROC(atomic64_add)
8932
8933+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8934+ BACKOFF_SETUP(%o2)
8935+1: ldx [%o1], %g1
8936+ addcc %g1, %o0, %g7
8937+ casx [%o1], %g1, %g7
8938+ cmp %g1, %g7
8939+ bne,pn %xcc, 2f
8940+ nop
8941+ retl
8942+ nop
8943+2: BACKOFF_SPIN(%o2, %o3, 1b)
8944+ENDPROC(atomic64_add_unchecked)
8945+
8946 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8947 BACKOFF_SETUP(%o2)
8948 1: ldx [%o1], %g1
8949- sub %g1, %o0, %g7
8950+ subcc %g1, %o0, %g7
8951+
8952+#ifdef CONFIG_PAX_REFCOUNT
8953+ tvs %xcc, 6
8954+#endif
8955+
8956 casx [%o1], %g1, %g7
8957 cmp %g1, %g7
8958 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8959@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8960 2: BACKOFF_SPIN(%o2, %o3, 1b)
8961 ENDPROC(atomic64_sub)
8962
8963+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8964+ BACKOFF_SETUP(%o2)
8965+1: ldx [%o1], %g1
8966+ subcc %g1, %o0, %g7
8967+ casx [%o1], %g1, %g7
8968+ cmp %g1, %g7
8969+ bne,pn %xcc, 2f
8970+ nop
8971+ retl
8972+ nop
8973+2: BACKOFF_SPIN(%o2, %o3, 1b)
8974+ENDPROC(atomic64_sub_unchecked)
8975+
8976 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8977 BACKOFF_SETUP(%o2)
8978 1: ldx [%o1], %g1
8979- add %g1, %o0, %g7
8980+ addcc %g1, %o0, %g7
8981+
8982+#ifdef CONFIG_PAX_REFCOUNT
8983+ tvs %xcc, 6
8984+#endif
8985+
8986 casx [%o1], %g1, %g7
8987 cmp %g1, %g7
8988 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8989@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8990 2: BACKOFF_SPIN(%o2, %o3, 1b)
8991 ENDPROC(atomic64_add_ret)
8992
8993+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8994+ BACKOFF_SETUP(%o2)
8995+1: ldx [%o1], %g1
8996+ addcc %g1, %o0, %g7
8997+ casx [%o1], %g1, %g7
8998+ cmp %g1, %g7
8999+ bne,pn %xcc, 2f
9000+ add %g7, %o0, %g7
9001+ mov %g7, %o0
9002+ retl
9003+ nop
9004+2: BACKOFF_SPIN(%o2, %o3, 1b)
9005+ENDPROC(atomic64_add_ret_unchecked)
9006+
9007 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9008 BACKOFF_SETUP(%o2)
9009 1: ldx [%o1], %g1
9010- sub %g1, %o0, %g7
9011+ subcc %g1, %o0, %g7
9012+
9013+#ifdef CONFIG_PAX_REFCOUNT
9014+ tvs %xcc, 6
9015+#endif
9016+
9017 casx [%o1], %g1, %g7
9018 cmp %g1, %g7
9019 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9020diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
9021index 0c4e35e..745d3e4 100644
9022--- a/arch/sparc/lib/ksyms.c
9023+++ b/arch/sparc/lib/ksyms.c
9024@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
9025
9026 /* Atomic counter implementation. */
9027 EXPORT_SYMBOL(atomic_add);
9028+EXPORT_SYMBOL(atomic_add_unchecked);
9029 EXPORT_SYMBOL(atomic_add_ret);
9030+EXPORT_SYMBOL(atomic_add_ret_unchecked);
9031 EXPORT_SYMBOL(atomic_sub);
9032+EXPORT_SYMBOL(atomic_sub_unchecked);
9033 EXPORT_SYMBOL(atomic_sub_ret);
9034 EXPORT_SYMBOL(atomic64_add);
9035+EXPORT_SYMBOL(atomic64_add_unchecked);
9036 EXPORT_SYMBOL(atomic64_add_ret);
9037+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
9038 EXPORT_SYMBOL(atomic64_sub);
9039+EXPORT_SYMBOL(atomic64_sub_unchecked);
9040 EXPORT_SYMBOL(atomic64_sub_ret);
9041 EXPORT_SYMBOL(atomic64_dec_if_positive);
9042
9043diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
9044index 30c3ecc..736f015 100644
9045--- a/arch/sparc/mm/Makefile
9046+++ b/arch/sparc/mm/Makefile
9047@@ -2,7 +2,7 @@
9048 #
9049
9050 asflags-y := -ansi
9051-ccflags-y := -Werror
9052+#ccflags-y := -Werror
9053
9054 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
9055 obj-y += fault_$(BITS).o
9056diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
9057index e98bfda..ea8d221 100644
9058--- a/arch/sparc/mm/fault_32.c
9059+++ b/arch/sparc/mm/fault_32.c
9060@@ -21,6 +21,9 @@
9061 #include <linux/perf_event.h>
9062 #include <linux/interrupt.h>
9063 #include <linux/kdebug.h>
9064+#include <linux/slab.h>
9065+#include <linux/pagemap.h>
9066+#include <linux/compiler.h>
9067
9068 #include <asm/page.h>
9069 #include <asm/pgtable.h>
9070@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
9071 return safe_compute_effective_address(regs, insn);
9072 }
9073
9074+#ifdef CONFIG_PAX_PAGEEXEC
9075+#ifdef CONFIG_PAX_DLRESOLVE
9076+static void pax_emuplt_close(struct vm_area_struct *vma)
9077+{
9078+ vma->vm_mm->call_dl_resolve = 0UL;
9079+}
9080+
9081+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9082+{
9083+ unsigned int *kaddr;
9084+
9085+ vmf->page = alloc_page(GFP_HIGHUSER);
9086+ if (!vmf->page)
9087+ return VM_FAULT_OOM;
9088+
9089+ kaddr = kmap(vmf->page);
9090+ memset(kaddr, 0, PAGE_SIZE);
9091+ kaddr[0] = 0x9DE3BFA8U; /* save */
9092+ flush_dcache_page(vmf->page);
9093+ kunmap(vmf->page);
9094+ return VM_FAULT_MAJOR;
9095+}
9096+
9097+static const struct vm_operations_struct pax_vm_ops = {
9098+ .close = pax_emuplt_close,
9099+ .fault = pax_emuplt_fault
9100+};
9101+
9102+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9103+{
9104+ int ret;
9105+
9106+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9107+ vma->vm_mm = current->mm;
9108+ vma->vm_start = addr;
9109+ vma->vm_end = addr + PAGE_SIZE;
9110+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9111+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9112+ vma->vm_ops = &pax_vm_ops;
9113+
9114+ ret = insert_vm_struct(current->mm, vma);
9115+ if (ret)
9116+ return ret;
9117+
9118+ ++current->mm->total_vm;
9119+ return 0;
9120+}
9121+#endif
9122+
9123+/*
9124+ * PaX: decide what to do with offenders (regs->pc = fault address)
9125+ *
9126+ * returns 1 when task should be killed
9127+ * 2 when patched PLT trampoline was detected
9128+ * 3 when unpatched PLT trampoline was detected
9129+ */
9130+static int pax_handle_fetch_fault(struct pt_regs *regs)
9131+{
9132+
9133+#ifdef CONFIG_PAX_EMUPLT
9134+ int err;
9135+
9136+ do { /* PaX: patched PLT emulation #1 */
9137+ unsigned int sethi1, sethi2, jmpl;
9138+
9139+ err = get_user(sethi1, (unsigned int *)regs->pc);
9140+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
9141+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
9142+
9143+ if (err)
9144+ break;
9145+
9146+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9147+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9148+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9149+ {
9150+ unsigned int addr;
9151+
9152+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9153+ addr = regs->u_regs[UREG_G1];
9154+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9155+ regs->pc = addr;
9156+ regs->npc = addr+4;
9157+ return 2;
9158+ }
9159+ } while (0);
9160+
9161+ do { /* PaX: patched PLT emulation #2 */
9162+ unsigned int ba;
9163+
9164+ err = get_user(ba, (unsigned int *)regs->pc);
9165+
9166+ if (err)
9167+ break;
9168+
9169+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9170+ unsigned int addr;
9171+
9172+ if ((ba & 0xFFC00000U) == 0x30800000U)
9173+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9174+ else
9175+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9176+ regs->pc = addr;
9177+ regs->npc = addr+4;
9178+ return 2;
9179+ }
9180+ } while (0);
9181+
9182+ do { /* PaX: patched PLT emulation #3 */
9183+ unsigned int sethi, bajmpl, nop;
9184+
9185+ err = get_user(sethi, (unsigned int *)regs->pc);
9186+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
9187+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9188+
9189+ if (err)
9190+ break;
9191+
9192+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9193+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9194+ nop == 0x01000000U)
9195+ {
9196+ unsigned int addr;
9197+
9198+ addr = (sethi & 0x003FFFFFU) << 10;
9199+ regs->u_regs[UREG_G1] = addr;
9200+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9201+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9202+ else
9203+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9204+ regs->pc = addr;
9205+ regs->npc = addr+4;
9206+ return 2;
9207+ }
9208+ } while (0);
9209+
9210+ do { /* PaX: unpatched PLT emulation step 1 */
9211+ unsigned int sethi, ba, nop;
9212+
9213+ err = get_user(sethi, (unsigned int *)regs->pc);
9214+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
9215+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9216+
9217+ if (err)
9218+ break;
9219+
9220+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9221+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9222+ nop == 0x01000000U)
9223+ {
9224+ unsigned int addr, save, call;
9225+
9226+ if ((ba & 0xFFC00000U) == 0x30800000U)
9227+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9228+ else
9229+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9230+
9231+ err = get_user(save, (unsigned int *)addr);
9232+ err |= get_user(call, (unsigned int *)(addr+4));
9233+ err |= get_user(nop, (unsigned int *)(addr+8));
9234+ if (err)
9235+ break;
9236+
9237+#ifdef CONFIG_PAX_DLRESOLVE
9238+ if (save == 0x9DE3BFA8U &&
9239+ (call & 0xC0000000U) == 0x40000000U &&
9240+ nop == 0x01000000U)
9241+ {
9242+ struct vm_area_struct *vma;
9243+ unsigned long call_dl_resolve;
9244+
9245+ down_read(&current->mm->mmap_sem);
9246+ call_dl_resolve = current->mm->call_dl_resolve;
9247+ up_read(&current->mm->mmap_sem);
9248+ if (likely(call_dl_resolve))
9249+ goto emulate;
9250+
9251+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9252+
9253+ down_write(&current->mm->mmap_sem);
9254+ if (current->mm->call_dl_resolve) {
9255+ call_dl_resolve = current->mm->call_dl_resolve;
9256+ up_write(&current->mm->mmap_sem);
9257+ if (vma)
9258+ kmem_cache_free(vm_area_cachep, vma);
9259+ goto emulate;
9260+ }
9261+
9262+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9263+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9264+ up_write(&current->mm->mmap_sem);
9265+ if (vma)
9266+ kmem_cache_free(vm_area_cachep, vma);
9267+ return 1;
9268+ }
9269+
9270+ if (pax_insert_vma(vma, call_dl_resolve)) {
9271+ up_write(&current->mm->mmap_sem);
9272+ kmem_cache_free(vm_area_cachep, vma);
9273+ return 1;
9274+ }
9275+
9276+ current->mm->call_dl_resolve = call_dl_resolve;
9277+ up_write(&current->mm->mmap_sem);
9278+
9279+emulate:
9280+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9281+ regs->pc = call_dl_resolve;
9282+ regs->npc = addr+4;
9283+ return 3;
9284+ }
9285+#endif
9286+
9287+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9288+ if ((save & 0xFFC00000U) == 0x05000000U &&
9289+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9290+ nop == 0x01000000U)
9291+ {
9292+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9293+ regs->u_regs[UREG_G2] = addr + 4;
9294+ addr = (save & 0x003FFFFFU) << 10;
9295+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9296+ regs->pc = addr;
9297+ regs->npc = addr+4;
9298+ return 3;
9299+ }
9300+ }
9301+ } while (0);
9302+
9303+ do { /* PaX: unpatched PLT emulation step 2 */
9304+ unsigned int save, call, nop;
9305+
9306+ err = get_user(save, (unsigned int *)(regs->pc-4));
9307+ err |= get_user(call, (unsigned int *)regs->pc);
9308+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9309+ if (err)
9310+ break;
9311+
9312+ if (save == 0x9DE3BFA8U &&
9313+ (call & 0xC0000000U) == 0x40000000U &&
9314+ nop == 0x01000000U)
9315+ {
9316+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9317+
9318+ regs->u_regs[UREG_RETPC] = regs->pc;
9319+ regs->pc = dl_resolve;
9320+ regs->npc = dl_resolve+4;
9321+ return 3;
9322+ }
9323+ } while (0);
9324+#endif
9325+
9326+ return 1;
9327+}
9328+
9329+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9330+{
9331+ unsigned long i;
9332+
9333+ printk(KERN_ERR "PAX: bytes at PC: ");
9334+ for (i = 0; i < 8; i++) {
9335+ unsigned int c;
9336+ if (get_user(c, (unsigned int *)pc+i))
9337+ printk(KERN_CONT "???????? ");
9338+ else
9339+ printk(KERN_CONT "%08x ", c);
9340+ }
9341+ printk("\n");
9342+}
9343+#endif
9344+
9345 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9346 int text_fault)
9347 {
9348@@ -230,6 +504,24 @@ good_area:
9349 if (!(vma->vm_flags & VM_WRITE))
9350 goto bad_area;
9351 } else {
9352+
9353+#ifdef CONFIG_PAX_PAGEEXEC
9354+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9355+ up_read(&mm->mmap_sem);
9356+ switch (pax_handle_fetch_fault(regs)) {
9357+
9358+#ifdef CONFIG_PAX_EMUPLT
9359+ case 2:
9360+ case 3:
9361+ return;
9362+#endif
9363+
9364+ }
9365+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9366+ do_group_exit(SIGKILL);
9367+ }
9368+#endif
9369+
9370 /* Allow reads even for write-only mappings */
9371 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9372 goto bad_area;
9373diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9374index 5062ff3..e0b75f3 100644
9375--- a/arch/sparc/mm/fault_64.c
9376+++ b/arch/sparc/mm/fault_64.c
9377@@ -21,6 +21,9 @@
9378 #include <linux/kprobes.h>
9379 #include <linux/kdebug.h>
9380 #include <linux/percpu.h>
9381+#include <linux/slab.h>
9382+#include <linux/pagemap.h>
9383+#include <linux/compiler.h>
9384
9385 #include <asm/page.h>
9386 #include <asm/pgtable.h>
9387@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9388 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9389 regs->tpc);
9390 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9391- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9392+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9393 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9394 dump_stack();
9395 unhandled_fault(regs->tpc, current, regs);
9396@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9397 show_regs(regs);
9398 }
9399
9400+#ifdef CONFIG_PAX_PAGEEXEC
9401+#ifdef CONFIG_PAX_DLRESOLVE
9402+static void pax_emuplt_close(struct vm_area_struct *vma)
9403+{
9404+ vma->vm_mm->call_dl_resolve = 0UL;
9405+}
9406+
9407+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9408+{
9409+ unsigned int *kaddr;
9410+
9411+ vmf->page = alloc_page(GFP_HIGHUSER);
9412+ if (!vmf->page)
9413+ return VM_FAULT_OOM;
9414+
9415+ kaddr = kmap(vmf->page);
9416+ memset(kaddr, 0, PAGE_SIZE);
9417+ kaddr[0] = 0x9DE3BFA8U; /* save */
9418+ flush_dcache_page(vmf->page);
9419+ kunmap(vmf->page);
9420+ return VM_FAULT_MAJOR;
9421+}
9422+
9423+static const struct vm_operations_struct pax_vm_ops = {
9424+ .close = pax_emuplt_close,
9425+ .fault = pax_emuplt_fault
9426+};
9427+
9428+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9429+{
9430+ int ret;
9431+
9432+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9433+ vma->vm_mm = current->mm;
9434+ vma->vm_start = addr;
9435+ vma->vm_end = addr + PAGE_SIZE;
9436+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9437+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9438+ vma->vm_ops = &pax_vm_ops;
9439+
9440+ ret = insert_vm_struct(current->mm, vma);
9441+ if (ret)
9442+ return ret;
9443+
9444+ ++current->mm->total_vm;
9445+ return 0;
9446+}
9447+#endif
9448+
9449+/*
9450+ * PaX: decide what to do with offenders (regs->tpc = fault address)
9451+ *
9452+ * returns 1 when task should be killed
9453+ * 2 when patched PLT trampoline was detected
9454+ * 3 when unpatched PLT trampoline was detected
9455+ */
9456+static int pax_handle_fetch_fault(struct pt_regs *regs)
9457+{
9458+
9459+#ifdef CONFIG_PAX_EMUPLT
9460+ int err;
9461+
9462+ do { /* PaX: patched PLT emulation #1 */
9463+ unsigned int sethi1, sethi2, jmpl;
9464+
9465+ err = get_user(sethi1, (unsigned int *)regs->tpc);
9466+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9467+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9468+
9469+ if (err)
9470+ break;
9471+
9472+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9473+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9474+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9475+ {
9476+ unsigned long addr;
9477+
9478+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9479+ addr = regs->u_regs[UREG_G1];
9480+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9481+
9482+ if (test_thread_flag(TIF_32BIT))
9483+ addr &= 0xFFFFFFFFUL;
9484+
9485+ regs->tpc = addr;
9486+ regs->tnpc = addr+4;
9487+ return 2;
9488+ }
9489+ } while (0);
9490+
9491+ do { /* PaX: patched PLT emulation #2 */
9492+ unsigned int ba;
9493+
9494+ err = get_user(ba, (unsigned int *)regs->tpc);
9495+
9496+ if (err)
9497+ break;
9498+
9499+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9500+ unsigned long addr;
9501+
9502+ if ((ba & 0xFFC00000U) == 0x30800000U)
9503+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9504+ else
9505+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9506+
9507+ if (test_thread_flag(TIF_32BIT))
9508+ addr &= 0xFFFFFFFFUL;
9509+
9510+ regs->tpc = addr;
9511+ regs->tnpc = addr+4;
9512+ return 2;
9513+ }
9514+ } while (0);
9515+
9516+ do { /* PaX: patched PLT emulation #3 */
9517+ unsigned int sethi, bajmpl, nop;
9518+
9519+ err = get_user(sethi, (unsigned int *)regs->tpc);
9520+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9521+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9522+
9523+ if (err)
9524+ break;
9525+
9526+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9527+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9528+ nop == 0x01000000U)
9529+ {
9530+ unsigned long addr;
9531+
9532+ addr = (sethi & 0x003FFFFFU) << 10;
9533+ regs->u_regs[UREG_G1] = addr;
9534+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9535+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9536+ else
9537+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9538+
9539+ if (test_thread_flag(TIF_32BIT))
9540+ addr &= 0xFFFFFFFFUL;
9541+
9542+ regs->tpc = addr;
9543+ regs->tnpc = addr+4;
9544+ return 2;
9545+ }
9546+ } while (0);
9547+
9548+ do { /* PaX: patched PLT emulation #4 */
9549+ unsigned int sethi, mov1, call, mov2;
9550+
9551+ err = get_user(sethi, (unsigned int *)regs->tpc);
9552+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9553+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
9554+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9555+
9556+ if (err)
9557+ break;
9558+
9559+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9560+ mov1 == 0x8210000FU &&
9561+ (call & 0xC0000000U) == 0x40000000U &&
9562+ mov2 == 0x9E100001U)
9563+ {
9564+ unsigned long addr;
9565+
9566+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
9567+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9568+
9569+ if (test_thread_flag(TIF_32BIT))
9570+ addr &= 0xFFFFFFFFUL;
9571+
9572+ regs->tpc = addr;
9573+ regs->tnpc = addr+4;
9574+ return 2;
9575+ }
9576+ } while (0);
9577+
9578+ do { /* PaX: patched PLT emulation #5 */
9579+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
9580+
9581+ err = get_user(sethi, (unsigned int *)regs->tpc);
9582+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9583+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9584+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
9585+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
9586+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
9587+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
9588+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
9589+
9590+ if (err)
9591+ break;
9592+
9593+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9594+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9595+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9596+ (or1 & 0xFFFFE000U) == 0x82106000U &&
9597+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9598+ sllx == 0x83287020U &&
9599+ jmpl == 0x81C04005U &&
9600+ nop == 0x01000000U)
9601+ {
9602+ unsigned long addr;
9603+
9604+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9605+ regs->u_regs[UREG_G1] <<= 32;
9606+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9607+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9608+ regs->tpc = addr;
9609+ regs->tnpc = addr+4;
9610+ return 2;
9611+ }
9612+ } while (0);
9613+
9614+ do { /* PaX: patched PLT emulation #6 */
9615+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
9616+
9617+ err = get_user(sethi, (unsigned int *)regs->tpc);
9618+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9619+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9620+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
9621+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
9622+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
9623+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
9624+
9625+ if (err)
9626+ break;
9627+
9628+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9629+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9630+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9631+ sllx == 0x83287020U &&
9632+ (or & 0xFFFFE000U) == 0x8A116000U &&
9633+ jmpl == 0x81C04005U &&
9634+ nop == 0x01000000U)
9635+ {
9636+ unsigned long addr;
9637+
9638+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
9639+ regs->u_regs[UREG_G1] <<= 32;
9640+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
9641+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9642+ regs->tpc = addr;
9643+ regs->tnpc = addr+4;
9644+ return 2;
9645+ }
9646+ } while (0);
9647+
9648+ do { /* PaX: unpatched PLT emulation step 1 */
9649+ unsigned int sethi, ba, nop;
9650+
9651+ err = get_user(sethi, (unsigned int *)regs->tpc);
9652+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9653+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9654+
9655+ if (err)
9656+ break;
9657+
9658+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9659+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9660+ nop == 0x01000000U)
9661+ {
9662+ unsigned long addr;
9663+ unsigned int save, call;
9664+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
9665+
9666+ if ((ba & 0xFFC00000U) == 0x30800000U)
9667+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9668+ else
9669+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9670+
9671+ if (test_thread_flag(TIF_32BIT))
9672+ addr &= 0xFFFFFFFFUL;
9673+
9674+ err = get_user(save, (unsigned int *)addr);
9675+ err |= get_user(call, (unsigned int *)(addr+4));
9676+ err |= get_user(nop, (unsigned int *)(addr+8));
9677+ if (err)
9678+ break;
9679+
9680+#ifdef CONFIG_PAX_DLRESOLVE
9681+ if (save == 0x9DE3BFA8U &&
9682+ (call & 0xC0000000U) == 0x40000000U &&
9683+ nop == 0x01000000U)
9684+ {
9685+ struct vm_area_struct *vma;
9686+ unsigned long call_dl_resolve;
9687+
9688+ down_read(&current->mm->mmap_sem);
9689+ call_dl_resolve = current->mm->call_dl_resolve;
9690+ up_read(&current->mm->mmap_sem);
9691+ if (likely(call_dl_resolve))
9692+ goto emulate;
9693+
9694+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9695+
9696+ down_write(&current->mm->mmap_sem);
9697+ if (current->mm->call_dl_resolve) {
9698+ call_dl_resolve = current->mm->call_dl_resolve;
9699+ up_write(&current->mm->mmap_sem);
9700+ if (vma)
9701+ kmem_cache_free(vm_area_cachep, vma);
9702+ goto emulate;
9703+ }
9704+
9705+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9706+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9707+ up_write(&current->mm->mmap_sem);
9708+ if (vma)
9709+ kmem_cache_free(vm_area_cachep, vma);
9710+ return 1;
9711+ }
9712+
9713+ if (pax_insert_vma(vma, call_dl_resolve)) {
9714+ up_write(&current->mm->mmap_sem);
9715+ kmem_cache_free(vm_area_cachep, vma);
9716+ return 1;
9717+ }
9718+
9719+ current->mm->call_dl_resolve = call_dl_resolve;
9720+ up_write(&current->mm->mmap_sem);
9721+
9722+emulate:
9723+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9724+ regs->tpc = call_dl_resolve;
9725+ regs->tnpc = addr+4;
9726+ return 3;
9727+ }
9728+#endif
9729+
9730+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9731+ if ((save & 0xFFC00000U) == 0x05000000U &&
9732+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9733+ nop == 0x01000000U)
9734+ {
9735+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9736+ regs->u_regs[UREG_G2] = addr + 4;
9737+ addr = (save & 0x003FFFFFU) << 10;
9738+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9739+
9740+ if (test_thread_flag(TIF_32BIT))
9741+ addr &= 0xFFFFFFFFUL;
9742+
9743+ regs->tpc = addr;
9744+ regs->tnpc = addr+4;
9745+ return 3;
9746+ }
9747+
9748+ /* PaX: 64-bit PLT stub */
9749+ err = get_user(sethi1, (unsigned int *)addr);
9750+ err |= get_user(sethi2, (unsigned int *)(addr+4));
9751+ err |= get_user(or1, (unsigned int *)(addr+8));
9752+ err |= get_user(or2, (unsigned int *)(addr+12));
9753+ err |= get_user(sllx, (unsigned int *)(addr+16));
9754+ err |= get_user(add, (unsigned int *)(addr+20));
9755+ err |= get_user(jmpl, (unsigned int *)(addr+24));
9756+ err |= get_user(nop, (unsigned int *)(addr+28));
9757+ if (err)
9758+ break;
9759+
9760+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
9761+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9762+ (or1 & 0xFFFFE000U) == 0x88112000U &&
9763+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9764+ sllx == 0x89293020U &&
9765+ add == 0x8A010005U &&
9766+ jmpl == 0x89C14000U &&
9767+ nop == 0x01000000U)
9768+ {
9769+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9770+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9771+ regs->u_regs[UREG_G4] <<= 32;
9772+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9773+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
9774+ regs->u_regs[UREG_G4] = addr + 24;
9775+ addr = regs->u_regs[UREG_G5];
9776+ regs->tpc = addr;
9777+ regs->tnpc = addr+4;
9778+ return 3;
9779+ }
9780+ }
9781+ } while (0);
9782+
9783+#ifdef CONFIG_PAX_DLRESOLVE
9784+ do { /* PaX: unpatched PLT emulation step 2 */
9785+ unsigned int save, call, nop;
9786+
9787+ err = get_user(save, (unsigned int *)(regs->tpc-4));
9788+ err |= get_user(call, (unsigned int *)regs->tpc);
9789+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
9790+ if (err)
9791+ break;
9792+
9793+ if (save == 0x9DE3BFA8U &&
9794+ (call & 0xC0000000U) == 0x40000000U &&
9795+ nop == 0x01000000U)
9796+ {
9797+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9798+
9799+ if (test_thread_flag(TIF_32BIT))
9800+ dl_resolve &= 0xFFFFFFFFUL;
9801+
9802+ regs->u_regs[UREG_RETPC] = regs->tpc;
9803+ regs->tpc = dl_resolve;
9804+ regs->tnpc = dl_resolve+4;
9805+ return 3;
9806+ }
9807+ } while (0);
9808+#endif
9809+
9810+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
9811+ unsigned int sethi, ba, nop;
9812+
9813+ err = get_user(sethi, (unsigned int *)regs->tpc);
9814+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9815+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9816+
9817+ if (err)
9818+ break;
9819+
9820+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9821+ (ba & 0xFFF00000U) == 0x30600000U &&
9822+ nop == 0x01000000U)
9823+ {
9824+ unsigned long addr;
9825+
9826+ addr = (sethi & 0x003FFFFFU) << 10;
9827+ regs->u_regs[UREG_G1] = addr;
9828+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9829+
9830+ if (test_thread_flag(TIF_32BIT))
9831+ addr &= 0xFFFFFFFFUL;
9832+
9833+ regs->tpc = addr;
9834+ regs->tnpc = addr+4;
9835+ return 2;
9836+ }
9837+ } while (0);
9838+
9839+#endif
9840+
9841+ return 1;
9842+}
9843+
9844+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9845+{
9846+ unsigned long i;
9847+
9848+ printk(KERN_ERR "PAX: bytes at PC: ");
9849+ for (i = 0; i < 8; i++) {
9850+ unsigned int c;
9851+ if (get_user(c, (unsigned int *)pc+i))
9852+ printk(KERN_CONT "???????? ");
9853+ else
9854+ printk(KERN_CONT "%08x ", c);
9855+ }
9856+ printk("\n");
9857+}
9858+#endif
9859+
9860 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
9861 {
9862 struct mm_struct *mm = current->mm;
9863@@ -341,6 +804,29 @@ retry:
9864 if (!vma)
9865 goto bad_area;
9866
9867+#ifdef CONFIG_PAX_PAGEEXEC
9868+ /* PaX: detect ITLB misses on non-exec pages */
9869+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
9870+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
9871+ {
9872+ if (address != regs->tpc)
9873+ goto good_area;
9874+
9875+ up_read(&mm->mmap_sem);
9876+ switch (pax_handle_fetch_fault(regs)) {
9877+
9878+#ifdef CONFIG_PAX_EMUPLT
9879+ case 2:
9880+ case 3:
9881+ return;
9882+#endif
9883+
9884+ }
9885+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
9886+ do_group_exit(SIGKILL);
9887+ }
9888+#endif
9889+
9890 /* Pure DTLB misses do not tell us whether the fault causing
9891 * load/store/atomic was a write or not, it only says that there
9892 * was no match. So in such a case we (carefully) read the
9893diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
9894index d2b5944..bd813f2 100644
9895--- a/arch/sparc/mm/hugetlbpage.c
9896+++ b/arch/sparc/mm/hugetlbpage.c
9897@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9898
9899 info.flags = 0;
9900 info.length = len;
9901- info.low_limit = TASK_UNMAPPED_BASE;
9902+ info.low_limit = mm->mmap_base;
9903 info.high_limit = min(task_size, VA_EXCLUDE_START);
9904 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
9905 info.align_offset = 0;
9906@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9907 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9908 VM_BUG_ON(addr != -ENOMEM);
9909 info.low_limit = VA_EXCLUDE_END;
9910+
9911+#ifdef CONFIG_PAX_RANDMMAP
9912+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9913+ info.low_limit += mm->delta_mmap;
9914+#endif
9915+
9916 info.high_limit = task_size;
9917 addr = vm_unmapped_area(&info);
9918 }
9919@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9920 VM_BUG_ON(addr != -ENOMEM);
9921 info.flags = 0;
9922 info.low_limit = TASK_UNMAPPED_BASE;
9923+
9924+#ifdef CONFIG_PAX_RANDMMAP
9925+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9926+ info.low_limit += mm->delta_mmap;
9927+#endif
9928+
9929 info.high_limit = STACK_TOP32;
9930 addr = vm_unmapped_area(&info);
9931 }
9932@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9933 struct mm_struct *mm = current->mm;
9934 struct vm_area_struct *vma;
9935 unsigned long task_size = TASK_SIZE;
9936+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
9937
9938 if (test_thread_flag(TIF_32BIT))
9939 task_size = STACK_TOP32;
9940@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9941 return addr;
9942 }
9943
9944+#ifdef CONFIG_PAX_RANDMMAP
9945+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9946+#endif
9947+
9948 if (addr) {
9949 addr = ALIGN(addr, HPAGE_SIZE);
9950 vma = find_vma(mm, addr);
9951- if (task_size - len >= addr &&
9952- (!vma || addr + len <= vma->vm_start))
9953+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9954 return addr;
9955 }
9956 if (mm->get_unmapped_area == arch_get_unmapped_area)
9957diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
9958index f4500c6..889656c 100644
9959--- a/arch/tile/include/asm/atomic_64.h
9960+++ b/arch/tile/include/asm/atomic_64.h
9961@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9962
9963 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9964
9965+#define atomic64_read_unchecked(v) atomic64_read(v)
9966+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9967+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9968+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9969+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9970+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9971+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9972+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9973+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9974+
9975 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
9976 #define smp_mb__before_atomic_dec() smp_mb()
9977 #define smp_mb__after_atomic_dec() smp_mb()
9978diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
9979index a9a5299..0fce79e 100644
9980--- a/arch/tile/include/asm/cache.h
9981+++ b/arch/tile/include/asm/cache.h
9982@@ -15,11 +15,12 @@
9983 #ifndef _ASM_TILE_CACHE_H
9984 #define _ASM_TILE_CACHE_H
9985
9986+#include <linux/const.h>
9987 #include <arch/chip.h>
9988
9989 /* bytes per L1 data cache line */
9990 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
9991-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9992+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9993
9994 /* bytes per L2 cache line */
9995 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
9996diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
9997index 9ab078a..d6635c2 100644
9998--- a/arch/tile/include/asm/uaccess.h
9999+++ b/arch/tile/include/asm/uaccess.h
10000@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
10001 const void __user *from,
10002 unsigned long n)
10003 {
10004- int sz = __compiletime_object_size(to);
10005+ size_t sz = __compiletime_object_size(to);
10006
10007- if (likely(sz == -1 || sz >= n))
10008+ if (likely(sz == (size_t)-1 || sz >= n))
10009 n = _copy_from_user(to, from, n);
10010 else
10011 copy_from_user_overflow();
10012diff --git a/arch/um/Makefile b/arch/um/Makefile
10013index 133f7de..1d6f2f1 100644
10014--- a/arch/um/Makefile
10015+++ b/arch/um/Makefile
10016@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
10017 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
10018 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
10019
10020+ifdef CONSTIFY_PLUGIN
10021+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10022+endif
10023+
10024 #This will adjust *FLAGS accordingly to the platform.
10025 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
10026
10027diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
10028index 19e1bdd..3665b77 100644
10029--- a/arch/um/include/asm/cache.h
10030+++ b/arch/um/include/asm/cache.h
10031@@ -1,6 +1,7 @@
10032 #ifndef __UM_CACHE_H
10033 #define __UM_CACHE_H
10034
10035+#include <linux/const.h>
10036
10037 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
10038 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10039@@ -12,6 +13,6 @@
10040 # define L1_CACHE_SHIFT 5
10041 #endif
10042
10043-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10044+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10045
10046 #endif
10047diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
10048index 2e0a6b1..a64d0f5 100644
10049--- a/arch/um/include/asm/kmap_types.h
10050+++ b/arch/um/include/asm/kmap_types.h
10051@@ -8,6 +8,6 @@
10052
10053 /* No more #include "asm/arch/kmap_types.h" ! */
10054
10055-#define KM_TYPE_NR 14
10056+#define KM_TYPE_NR 15
10057
10058 #endif
10059diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
10060index 5ff53d9..5850cdf 100644
10061--- a/arch/um/include/asm/page.h
10062+++ b/arch/um/include/asm/page.h
10063@@ -14,6 +14,9 @@
10064 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
10065 #define PAGE_MASK (~(PAGE_SIZE-1))
10066
10067+#define ktla_ktva(addr) (addr)
10068+#define ktva_ktla(addr) (addr)
10069+
10070 #ifndef __ASSEMBLY__
10071
10072 struct page;
10073diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
10074index 0032f92..cd151e0 100644
10075--- a/arch/um/include/asm/pgtable-3level.h
10076+++ b/arch/um/include/asm/pgtable-3level.h
10077@@ -58,6 +58,7 @@
10078 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
10079 #define pud_populate(mm, pud, pmd) \
10080 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
10081+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
10082
10083 #ifdef CONFIG_64BIT
10084 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
10085diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
10086index b462b13..e7a19aa 100644
10087--- a/arch/um/kernel/process.c
10088+++ b/arch/um/kernel/process.c
10089@@ -386,22 +386,6 @@ int singlestepping(void * t)
10090 return 2;
10091 }
10092
10093-/*
10094- * Only x86 and x86_64 have an arch_align_stack().
10095- * All other arches have "#define arch_align_stack(x) (x)"
10096- * in their asm/system.h
10097- * As this is included in UML from asm-um/system-generic.h,
10098- * we can use it to behave as the subarch does.
10099- */
10100-#ifndef arch_align_stack
10101-unsigned long arch_align_stack(unsigned long sp)
10102-{
10103- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10104- sp -= get_random_int() % 8192;
10105- return sp & ~0xf;
10106-}
10107-#endif
10108-
10109 unsigned long get_wchan(struct task_struct *p)
10110 {
10111 unsigned long stack_page, sp, ip;
10112diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
10113index ad8f795..2c7eec6 100644
10114--- a/arch/unicore32/include/asm/cache.h
10115+++ b/arch/unicore32/include/asm/cache.h
10116@@ -12,8 +12,10 @@
10117 #ifndef __UNICORE_CACHE_H__
10118 #define __UNICORE_CACHE_H__
10119
10120-#define L1_CACHE_SHIFT (5)
10121-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10122+#include <linux/const.h>
10123+
10124+#define L1_CACHE_SHIFT 5
10125+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10126
10127 /*
10128 * Memory returned by kmalloc() may be used for DMA, so we must make
10129diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
10130index 0694d09..b58b3aa 100644
10131--- a/arch/x86/Kconfig
10132+++ b/arch/x86/Kconfig
10133@@ -238,7 +238,7 @@ config X86_HT
10134
10135 config X86_32_LAZY_GS
10136 def_bool y
10137- depends on X86_32 && !CC_STACKPROTECTOR
10138+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10139
10140 config ARCH_HWEIGHT_CFLAGS
10141 string
10142@@ -1031,6 +1031,7 @@ config MICROCODE_OLD_INTERFACE
10143
10144 config X86_MSR
10145 tristate "/dev/cpu/*/msr - Model-specific register support"
10146+ depends on !GRKERNSEC_KMEM
10147 ---help---
10148 This device gives privileged processes access to the x86
10149 Model-Specific Registers (MSRs). It is a character device with
10150@@ -1054,7 +1055,7 @@ choice
10151
10152 config NOHIGHMEM
10153 bool "off"
10154- depends on !X86_NUMAQ
10155+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10156 ---help---
10157 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10158 However, the address space of 32-bit x86 processors is only 4
10159@@ -1091,7 +1092,7 @@ config NOHIGHMEM
10160
10161 config HIGHMEM4G
10162 bool "4GB"
10163- depends on !X86_NUMAQ
10164+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10165 ---help---
10166 Select this if you have a 32-bit processor and between 1 and 4
10167 gigabytes of physical RAM.
10168@@ -1145,7 +1146,7 @@ config PAGE_OFFSET
10169 hex
10170 default 0xB0000000 if VMSPLIT_3G_OPT
10171 default 0x80000000 if VMSPLIT_2G
10172- default 0x78000000 if VMSPLIT_2G_OPT
10173+ default 0x70000000 if VMSPLIT_2G_OPT
10174 default 0x40000000 if VMSPLIT_1G
10175 default 0xC0000000
10176 depends on X86_32
10177@@ -1542,6 +1543,7 @@ config SECCOMP
10178
10179 config CC_STACKPROTECTOR
10180 bool "Enable -fstack-protector buffer overflow detection"
10181+ depends on X86_64 || !PAX_MEMORY_UDEREF
10182 ---help---
10183 This option turns on the -fstack-protector GCC feature. This
10184 feature puts, at the beginning of functions, a canary value on
10185@@ -1599,6 +1601,7 @@ config KEXEC_JUMP
10186 config PHYSICAL_START
10187 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10188 default "0x1000000"
10189+ range 0x400000 0x40000000
10190 ---help---
10191 This gives the physical address where the kernel is loaded.
10192
10193@@ -1662,6 +1665,7 @@ config X86_NEED_RELOCS
10194 config PHYSICAL_ALIGN
10195 hex "Alignment value to which kernel should be aligned" if X86_32
10196 default "0x1000000"
10197+ range 0x400000 0x1000000 if PAX_KERNEXEC
10198 range 0x2000 0x1000000
10199 ---help---
10200 This value puts the alignment restrictions on physical address
10201@@ -1737,9 +1741,10 @@ config DEBUG_HOTPLUG_CPU0
10202 If unsure, say N.
10203
10204 config COMPAT_VDSO
10205- def_bool y
10206+ def_bool n
10207 prompt "Compat VDSO support"
10208 depends on X86_32 || IA32_EMULATION
10209+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
10210 ---help---
10211 Map the 32-bit VDSO to the predictable old-style address too.
10212
10213diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
10214index c026cca..14657ae 100644
10215--- a/arch/x86/Kconfig.cpu
10216+++ b/arch/x86/Kconfig.cpu
10217@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
10218
10219 config X86_F00F_BUG
10220 def_bool y
10221- depends on M586MMX || M586TSC || M586 || M486
10222+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
10223
10224 config X86_INVD_BUG
10225 def_bool y
10226@@ -327,7 +327,7 @@ config X86_INVD_BUG
10227
10228 config X86_ALIGNMENT_16
10229 def_bool y
10230- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10231+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10232
10233 config X86_INTEL_USERCOPY
10234 def_bool y
10235@@ -373,7 +373,7 @@ config X86_CMPXCHG64
10236 # generates cmov.
10237 config X86_CMOV
10238 def_bool y
10239- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10240+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10241
10242 config X86_MINIMUM_CPU_FAMILY
10243 int
10244diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10245index b322f12..652d0d9 100644
10246--- a/arch/x86/Kconfig.debug
10247+++ b/arch/x86/Kconfig.debug
10248@@ -84,7 +84,7 @@ config X86_PTDUMP
10249 config DEBUG_RODATA
10250 bool "Write protect kernel read-only data structures"
10251 default y
10252- depends on DEBUG_KERNEL
10253+ depends on DEBUG_KERNEL && BROKEN
10254 ---help---
10255 Mark the kernel read-only data as write-protected in the pagetables,
10256 in order to catch accidental (and incorrect) writes to such const
10257@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10258
10259 config DEBUG_SET_MODULE_RONX
10260 bool "Set loadable kernel module data as NX and text as RO"
10261- depends on MODULES
10262+ depends on MODULES && BROKEN
10263 ---help---
10264 This option helps catch unintended modifications to loadable
10265 kernel module's text and read-only data. It also prevents execution
10266@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
10267
10268 config DEBUG_STRICT_USER_COPY_CHECKS
10269 bool "Strict copy size checks"
10270- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
10271+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
10272 ---help---
10273 Enabling this option turns a certain set of sanity checks for user
10274 copy operations into compile time failures.
10275diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10276index e71fc42..7829607 100644
10277--- a/arch/x86/Makefile
10278+++ b/arch/x86/Makefile
10279@@ -50,6 +50,7 @@ else
10280 UTS_MACHINE := x86_64
10281 CHECKFLAGS += -D__x86_64__ -m64
10282
10283+ biarch := $(call cc-option,-m64)
10284 KBUILD_AFLAGS += -m64
10285 KBUILD_CFLAGS += -m64
10286
10287@@ -230,3 +231,12 @@ define archhelp
10288 echo ' FDARGS="..." arguments for the booted kernel'
10289 echo ' FDINITRD=file initrd for the booted kernel'
10290 endef
10291+
10292+define OLD_LD
10293+
10294+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10295+*** Please upgrade your binutils to 2.18 or newer
10296+endef
10297+
10298+archprepare:
10299+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10300diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10301index 379814b..add62ce 100644
10302--- a/arch/x86/boot/Makefile
10303+++ b/arch/x86/boot/Makefile
10304@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10305 $(call cc-option, -fno-stack-protector) \
10306 $(call cc-option, -mpreferred-stack-boundary=2)
10307 KBUILD_CFLAGS += $(call cc-option, -m32)
10308+ifdef CONSTIFY_PLUGIN
10309+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10310+endif
10311 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10312 GCOV_PROFILE := n
10313
10314diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10315index 878e4b9..20537ab 100644
10316--- a/arch/x86/boot/bitops.h
10317+++ b/arch/x86/boot/bitops.h
10318@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10319 u8 v;
10320 const u32 *p = (const u32 *)addr;
10321
10322- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10323+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10324 return v;
10325 }
10326
10327@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10328
10329 static inline void set_bit(int nr, void *addr)
10330 {
10331- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10332+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10333 }
10334
10335 #endif /* BOOT_BITOPS_H */
10336diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10337index 18997e5..83d9c67 100644
10338--- a/arch/x86/boot/boot.h
10339+++ b/arch/x86/boot/boot.h
10340@@ -85,7 +85,7 @@ static inline void io_delay(void)
10341 static inline u16 ds(void)
10342 {
10343 u16 seg;
10344- asm("movw %%ds,%0" : "=rm" (seg));
10345+ asm volatile("movw %%ds,%0" : "=rm" (seg));
10346 return seg;
10347 }
10348
10349@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10350 static inline int memcmp(const void *s1, const void *s2, size_t len)
10351 {
10352 u8 diff;
10353- asm("repe; cmpsb; setnz %0"
10354+ asm volatile("repe; cmpsb; setnz %0"
10355 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10356 return diff;
10357 }
10358diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10359index 8a84501..b2d165f 100644
10360--- a/arch/x86/boot/compressed/Makefile
10361+++ b/arch/x86/boot/compressed/Makefile
10362@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10363 KBUILD_CFLAGS += $(cflags-y)
10364 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10365 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10366+ifdef CONSTIFY_PLUGIN
10367+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10368+endif
10369
10370 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10371 GCOV_PROFILE := n
10372diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10373index c205035..5853587 100644
10374--- a/arch/x86/boot/compressed/eboot.c
10375+++ b/arch/x86/boot/compressed/eboot.c
10376@@ -150,7 +150,6 @@ again:
10377 *addr = max_addr;
10378 }
10379
10380-free_pool:
10381 efi_call_phys1(sys_table->boottime->free_pool, map);
10382
10383 fail:
10384@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10385 if (i == map_size / desc_size)
10386 status = EFI_NOT_FOUND;
10387
10388-free_pool:
10389 efi_call_phys1(sys_table->boottime->free_pool, map);
10390 fail:
10391 return status;
10392diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10393index 1e3184f..0d11e2e 100644
10394--- a/arch/x86/boot/compressed/head_32.S
10395+++ b/arch/x86/boot/compressed/head_32.S
10396@@ -118,7 +118,7 @@ preferred_addr:
10397 notl %eax
10398 andl %eax, %ebx
10399 #else
10400- movl $LOAD_PHYSICAL_ADDR, %ebx
10401+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10402 #endif
10403
10404 /* Target address to relocate to for decompression */
10405@@ -204,7 +204,7 @@ relocated:
10406 * and where it was actually loaded.
10407 */
10408 movl %ebp, %ebx
10409- subl $LOAD_PHYSICAL_ADDR, %ebx
10410+ subl $____LOAD_PHYSICAL_ADDR, %ebx
10411 jz 2f /* Nothing to be done if loaded at compiled addr. */
10412 /*
10413 * Process relocations.
10414@@ -212,8 +212,7 @@ relocated:
10415
10416 1: subl $4, %edi
10417 movl (%edi), %ecx
10418- testl %ecx, %ecx
10419- jz 2f
10420+ jecxz 2f
10421 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10422 jmp 1b
10423 2:
10424diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10425index f5d1aaa..cce11dc 100644
10426--- a/arch/x86/boot/compressed/head_64.S
10427+++ b/arch/x86/boot/compressed/head_64.S
10428@@ -91,7 +91,7 @@ ENTRY(startup_32)
10429 notl %eax
10430 andl %eax, %ebx
10431 #else
10432- movl $LOAD_PHYSICAL_ADDR, %ebx
10433+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10434 #endif
10435
10436 /* Target address to relocate to for decompression */
10437@@ -273,7 +273,7 @@ preferred_addr:
10438 notq %rax
10439 andq %rax, %rbp
10440 #else
10441- movq $LOAD_PHYSICAL_ADDR, %rbp
10442+ movq $____LOAD_PHYSICAL_ADDR, %rbp
10443 #endif
10444
10445 /* Target address to relocate to for decompression */
10446diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
10447index 88f7ff6..ed695dd 100644
10448--- a/arch/x86/boot/compressed/misc.c
10449+++ b/arch/x86/boot/compressed/misc.c
10450@@ -303,7 +303,7 @@ static void parse_elf(void *output)
10451 case PT_LOAD:
10452 #ifdef CONFIG_RELOCATABLE
10453 dest = output;
10454- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
10455+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
10456 #else
10457 dest = (void *)(phdr->p_paddr);
10458 #endif
10459@@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
10460 error("Destination address too large");
10461 #endif
10462 #ifndef CONFIG_RELOCATABLE
10463- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
10464+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
10465 error("Wrong destination address");
10466 #endif
10467
10468diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
10469index 4d3ff03..e4972ff 100644
10470--- a/arch/x86/boot/cpucheck.c
10471+++ b/arch/x86/boot/cpucheck.c
10472@@ -74,7 +74,7 @@ static int has_fpu(void)
10473 u16 fcw = -1, fsw = -1;
10474 u32 cr0;
10475
10476- asm("movl %%cr0,%0" : "=r" (cr0));
10477+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
10478 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
10479 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
10480 asm volatile("movl %0,%%cr0" : : "r" (cr0));
10481@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
10482 {
10483 u32 f0, f1;
10484
10485- asm("pushfl ; "
10486+ asm volatile("pushfl ; "
10487 "pushfl ; "
10488 "popl %0 ; "
10489 "movl %0,%1 ; "
10490@@ -115,7 +115,7 @@ static void get_flags(void)
10491 set_bit(X86_FEATURE_FPU, cpu.flags);
10492
10493 if (has_eflag(X86_EFLAGS_ID)) {
10494- asm("cpuid"
10495+ asm volatile("cpuid"
10496 : "=a" (max_intel_level),
10497 "=b" (cpu_vendor[0]),
10498 "=d" (cpu_vendor[1]),
10499@@ -124,7 +124,7 @@ static void get_flags(void)
10500
10501 if (max_intel_level >= 0x00000001 &&
10502 max_intel_level <= 0x0000ffff) {
10503- asm("cpuid"
10504+ asm volatile("cpuid"
10505 : "=a" (tfms),
10506 "=c" (cpu.flags[4]),
10507 "=d" (cpu.flags[0])
10508@@ -136,7 +136,7 @@ static void get_flags(void)
10509 cpu.model += ((tfms >> 16) & 0xf) << 4;
10510 }
10511
10512- asm("cpuid"
10513+ asm volatile("cpuid"
10514 : "=a" (max_amd_level)
10515 : "a" (0x80000000)
10516 : "ebx", "ecx", "edx");
10517@@ -144,7 +144,7 @@ static void get_flags(void)
10518 if (max_amd_level >= 0x80000001 &&
10519 max_amd_level <= 0x8000ffff) {
10520 u32 eax = 0x80000001;
10521- asm("cpuid"
10522+ asm volatile("cpuid"
10523 : "+a" (eax),
10524 "=c" (cpu.flags[6]),
10525 "=d" (cpu.flags[1])
10526@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10527 u32 ecx = MSR_K7_HWCR;
10528 u32 eax, edx;
10529
10530- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10531+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10532 eax &= ~(1 << 15);
10533- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10534+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10535
10536 get_flags(); /* Make sure it really did something */
10537 err = check_flags();
10538@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10539 u32 ecx = MSR_VIA_FCR;
10540 u32 eax, edx;
10541
10542- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10543+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10544 eax |= (1<<1)|(1<<7);
10545- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10546+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10547
10548 set_bit(X86_FEATURE_CX8, cpu.flags);
10549 err = check_flags();
10550@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10551 u32 eax, edx;
10552 u32 level = 1;
10553
10554- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10555- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10556- asm("cpuid"
10557+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10558+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10559+ asm volatile("cpuid"
10560 : "+a" (level), "=d" (cpu.flags[0])
10561 : : "ecx", "ebx");
10562- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10563+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10564
10565 err = check_flags();
10566 }
10567diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
10568index 944ce59..87ee37a 100644
10569--- a/arch/x86/boot/header.S
10570+++ b/arch/x86/boot/header.S
10571@@ -401,10 +401,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
10572 # single linked list of
10573 # struct setup_data
10574
10575-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
10576+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
10577
10578 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
10579+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10580+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
10581+#else
10582 #define VO_INIT_SIZE (VO__end - VO__text)
10583+#endif
10584 #if ZO_INIT_SIZE > VO_INIT_SIZE
10585 #define INIT_SIZE ZO_INIT_SIZE
10586 #else
10587diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
10588index db75d07..8e6d0af 100644
10589--- a/arch/x86/boot/memory.c
10590+++ b/arch/x86/boot/memory.c
10591@@ -19,7 +19,7 @@
10592
10593 static int detect_memory_e820(void)
10594 {
10595- int count = 0;
10596+ unsigned int count = 0;
10597 struct biosregs ireg, oreg;
10598 struct e820entry *desc = boot_params.e820_map;
10599 static struct e820entry buf; /* static so it is zeroed */
10600diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
10601index 11e8c6e..fdbb1ed 100644
10602--- a/arch/x86/boot/video-vesa.c
10603+++ b/arch/x86/boot/video-vesa.c
10604@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
10605
10606 boot_params.screen_info.vesapm_seg = oreg.es;
10607 boot_params.screen_info.vesapm_off = oreg.di;
10608+ boot_params.screen_info.vesapm_size = oreg.cx;
10609 }
10610
10611 /*
10612diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
10613index 43eda28..5ab5fdb 100644
10614--- a/arch/x86/boot/video.c
10615+++ b/arch/x86/boot/video.c
10616@@ -96,7 +96,7 @@ static void store_mode_params(void)
10617 static unsigned int get_entry(void)
10618 {
10619 char entry_buf[4];
10620- int i, len = 0;
10621+ unsigned int i, len = 0;
10622 int key;
10623 unsigned int v;
10624
10625diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
10626index 5b577d5..3c1fed4 100644
10627--- a/arch/x86/crypto/aes-x86_64-asm_64.S
10628+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
10629@@ -8,6 +8,8 @@
10630 * including this sentence is retained in full.
10631 */
10632
10633+#include <asm/alternative-asm.h>
10634+
10635 .extern crypto_ft_tab
10636 .extern crypto_it_tab
10637 .extern crypto_fl_tab
10638@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
10639 je B192; \
10640 leaq 32(r9),r9;
10641
10642+#define ret pax_force_retaddr 0, 1; ret
10643+
10644 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
10645 movq r1,r2; \
10646 movq r3,r4; \
10647diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
10648index 3470624..201259d 100644
10649--- a/arch/x86/crypto/aesni-intel_asm.S
10650+++ b/arch/x86/crypto/aesni-intel_asm.S
10651@@ -31,6 +31,7 @@
10652
10653 #include <linux/linkage.h>
10654 #include <asm/inst.h>
10655+#include <asm/alternative-asm.h>
10656
10657 #ifdef __x86_64__
10658 .data
10659@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
10660 pop %r14
10661 pop %r13
10662 pop %r12
10663+ pax_force_retaddr 0, 1
10664 ret
10665+ENDPROC(aesni_gcm_dec)
10666
10667
10668 /*****************************************************************************
10669@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
10670 pop %r14
10671 pop %r13
10672 pop %r12
10673+ pax_force_retaddr 0, 1
10674 ret
10675+ENDPROC(aesni_gcm_enc)
10676
10677 #endif
10678
10679@@ -1714,6 +1719,7 @@ _key_expansion_256a:
10680 pxor %xmm1, %xmm0
10681 movaps %xmm0, (TKEYP)
10682 add $0x10, TKEYP
10683+ pax_force_retaddr_bts
10684 ret
10685
10686 .align 4
10687@@ -1738,6 +1744,7 @@ _key_expansion_192a:
10688 shufps $0b01001110, %xmm2, %xmm1
10689 movaps %xmm1, 0x10(TKEYP)
10690 add $0x20, TKEYP
10691+ pax_force_retaddr_bts
10692 ret
10693
10694 .align 4
10695@@ -1757,6 +1764,7 @@ _key_expansion_192b:
10696
10697 movaps %xmm0, (TKEYP)
10698 add $0x10, TKEYP
10699+ pax_force_retaddr_bts
10700 ret
10701
10702 .align 4
10703@@ -1769,6 +1777,7 @@ _key_expansion_256b:
10704 pxor %xmm1, %xmm2
10705 movaps %xmm2, (TKEYP)
10706 add $0x10, TKEYP
10707+ pax_force_retaddr_bts
10708 ret
10709
10710 /*
10711@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
10712 #ifndef __x86_64__
10713 popl KEYP
10714 #endif
10715+ pax_force_retaddr 0, 1
10716 ret
10717+ENDPROC(aesni_set_key)
10718
10719 /*
10720 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
10721@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
10722 popl KLEN
10723 popl KEYP
10724 #endif
10725+ pax_force_retaddr 0, 1
10726 ret
10727+ENDPROC(aesni_enc)
10728
10729 /*
10730 * _aesni_enc1: internal ABI
10731@@ -1959,6 +1972,7 @@ _aesni_enc1:
10732 AESENC KEY STATE
10733 movaps 0x70(TKEYP), KEY
10734 AESENCLAST KEY STATE
10735+ pax_force_retaddr_bts
10736 ret
10737
10738 /*
10739@@ -2067,6 +2081,7 @@ _aesni_enc4:
10740 AESENCLAST KEY STATE2
10741 AESENCLAST KEY STATE3
10742 AESENCLAST KEY STATE4
10743+ pax_force_retaddr_bts
10744 ret
10745
10746 /*
10747@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
10748 popl KLEN
10749 popl KEYP
10750 #endif
10751+ pax_force_retaddr 0, 1
10752 ret
10753+ENDPROC(aesni_dec)
10754
10755 /*
10756 * _aesni_dec1: internal ABI
10757@@ -2146,6 +2163,7 @@ _aesni_dec1:
10758 AESDEC KEY STATE
10759 movaps 0x70(TKEYP), KEY
10760 AESDECLAST KEY STATE
10761+ pax_force_retaddr_bts
10762 ret
10763
10764 /*
10765@@ -2254,6 +2272,7 @@ _aesni_dec4:
10766 AESDECLAST KEY STATE2
10767 AESDECLAST KEY STATE3
10768 AESDECLAST KEY STATE4
10769+ pax_force_retaddr_bts
10770 ret
10771
10772 /*
10773@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
10774 popl KEYP
10775 popl LEN
10776 #endif
10777+ pax_force_retaddr 0, 1
10778 ret
10779+ENDPROC(aesni_ecb_enc)
10780
10781 /*
10782 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10783@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
10784 popl KEYP
10785 popl LEN
10786 #endif
10787+ pax_force_retaddr 0, 1
10788 ret
10789+ENDPROC(aesni_ecb_dec)
10790
10791 /*
10792 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10793@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
10794 popl LEN
10795 popl IVP
10796 #endif
10797+ pax_force_retaddr 0, 1
10798 ret
10799+ENDPROC(aesni_cbc_enc)
10800
10801 /*
10802 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10803@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
10804 popl LEN
10805 popl IVP
10806 #endif
10807+ pax_force_retaddr 0, 1
10808 ret
10809+ENDPROC(aesni_cbc_dec)
10810
10811 #ifdef __x86_64__
10812 .align 16
10813@@ -2526,6 +2553,7 @@ _aesni_inc_init:
10814 mov $1, TCTR_LOW
10815 MOVQ_R64_XMM TCTR_LOW INC
10816 MOVQ_R64_XMM CTR TCTR_LOW
10817+ pax_force_retaddr_bts
10818 ret
10819
10820 /*
10821@@ -2554,6 +2582,7 @@ _aesni_inc:
10822 .Linc_low:
10823 movaps CTR, IV
10824 PSHUFB_XMM BSWAP_MASK IV
10825+ pax_force_retaddr_bts
10826 ret
10827
10828 /*
10829@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
10830 .Lctr_enc_ret:
10831 movups IV, (IVP)
10832 .Lctr_enc_just_ret:
10833+ pax_force_retaddr 0, 1
10834 ret
10835+ENDPROC(aesni_ctr_enc)
10836 #endif
10837diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10838index 391d245..67f35c2 100644
10839--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
10840+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10841@@ -20,6 +20,8 @@
10842 *
10843 */
10844
10845+#include <asm/alternative-asm.h>
10846+
10847 .file "blowfish-x86_64-asm.S"
10848 .text
10849
10850@@ -151,9 +153,11 @@ __blowfish_enc_blk:
10851 jnz __enc_xor;
10852
10853 write_block();
10854+ pax_force_retaddr 0, 1
10855 ret;
10856 __enc_xor:
10857 xor_block();
10858+ pax_force_retaddr 0, 1
10859 ret;
10860
10861 .align 8
10862@@ -188,6 +192,7 @@ blowfish_dec_blk:
10863
10864 movq %r11, %rbp;
10865
10866+ pax_force_retaddr 0, 1
10867 ret;
10868
10869 /**********************************************************************
10870@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
10871
10872 popq %rbx;
10873 popq %rbp;
10874+ pax_force_retaddr 0, 1
10875 ret;
10876
10877 __enc_xor4:
10878@@ -349,6 +355,7 @@ __enc_xor4:
10879
10880 popq %rbx;
10881 popq %rbp;
10882+ pax_force_retaddr 0, 1
10883 ret;
10884
10885 .align 8
10886@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
10887 popq %rbx;
10888 popq %rbp;
10889
10890+ pax_force_retaddr 0, 1
10891 ret;
10892
10893diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
10894index 0b33743..7a56206 100644
10895--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
10896+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
10897@@ -20,6 +20,8 @@
10898 *
10899 */
10900
10901+#include <asm/alternative-asm.h>
10902+
10903 .file "camellia-x86_64-asm_64.S"
10904 .text
10905
10906@@ -229,12 +231,14 @@ __enc_done:
10907 enc_outunpack(mov, RT1);
10908
10909 movq RRBP, %rbp;
10910+ pax_force_retaddr 0, 1
10911 ret;
10912
10913 __enc_xor:
10914 enc_outunpack(xor, RT1);
10915
10916 movq RRBP, %rbp;
10917+ pax_force_retaddr 0, 1
10918 ret;
10919
10920 .global camellia_dec_blk;
10921@@ -275,6 +279,7 @@ __dec_rounds16:
10922 dec_outunpack();
10923
10924 movq RRBP, %rbp;
10925+ pax_force_retaddr 0, 1
10926 ret;
10927
10928 /**********************************************************************
10929@@ -468,6 +473,7 @@ __enc2_done:
10930
10931 movq RRBP, %rbp;
10932 popq %rbx;
10933+ pax_force_retaddr 0, 1
10934 ret;
10935
10936 __enc2_xor:
10937@@ -475,6 +481,7 @@ __enc2_xor:
10938
10939 movq RRBP, %rbp;
10940 popq %rbx;
10941+ pax_force_retaddr 0, 1
10942 ret;
10943
10944 .global camellia_dec_blk_2way;
10945@@ -517,4 +524,5 @@ __dec2_rounds16:
10946
10947 movq RRBP, %rbp;
10948 movq RXOR, %rbx;
10949+ pax_force_retaddr 0, 1
10950 ret;
10951diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10952index 15b00ac..2071784 100644
10953--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10954+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10955@@ -23,6 +23,8 @@
10956 *
10957 */
10958
10959+#include <asm/alternative-asm.h>
10960+
10961 .file "cast5-avx-x86_64-asm_64.S"
10962
10963 .extern cast_s1
10964@@ -281,6 +283,7 @@ __skip_enc:
10965 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
10966 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
10967
10968+ pax_force_retaddr 0, 1
10969 ret;
10970
10971 .align 16
10972@@ -353,6 +356,7 @@ __dec_tail:
10973 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
10974 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
10975
10976+ pax_force_retaddr 0, 1
10977 ret;
10978
10979 __skip_dec:
10980@@ -392,6 +396,7 @@ cast5_ecb_enc_16way:
10981 vmovdqu RR4, (6*4*4)(%r11);
10982 vmovdqu RL4, (7*4*4)(%r11);
10983
10984+ pax_force_retaddr
10985 ret;
10986
10987 .align 16
10988@@ -427,6 +432,7 @@ cast5_ecb_dec_16way:
10989 vmovdqu RR4, (6*4*4)(%r11);
10990 vmovdqu RL4, (7*4*4)(%r11);
10991
10992+ pax_force_retaddr
10993 ret;
10994
10995 .align 16
10996@@ -479,6 +485,7 @@ cast5_cbc_dec_16way:
10997
10998 popq %r12;
10999
11000+ pax_force_retaddr
11001 ret;
11002
11003 .align 16
11004@@ -555,4 +562,5 @@ cast5_ctr_16way:
11005
11006 popq %r12;
11007
11008+ pax_force_retaddr
11009 ret;
11010diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11011index 2569d0d..637c289 100644
11012--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11013+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11014@@ -23,6 +23,8 @@
11015 *
11016 */
11017
11018+#include <asm/alternative-asm.h>
11019+
11020 #include "glue_helper-asm-avx.S"
11021
11022 .file "cast6-avx-x86_64-asm_64.S"
11023@@ -294,6 +296,7 @@ __cast6_enc_blk8:
11024 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11025 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11026
11027+ pax_force_retaddr 0, 1
11028 ret;
11029
11030 .align 8
11031@@ -340,6 +343,7 @@ __cast6_dec_blk8:
11032 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11033 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11034
11035+ pax_force_retaddr 0, 1
11036 ret;
11037
11038 .align 8
11039@@ -361,6 +365,7 @@ cast6_ecb_enc_8way:
11040
11041 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11042
11043+ pax_force_retaddr
11044 ret;
11045
11046 .align 8
11047@@ -382,6 +387,7 @@ cast6_ecb_dec_8way:
11048
11049 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11050
11051+ pax_force_retaddr
11052 ret;
11053
11054 .align 8
11055@@ -408,6 +414,7 @@ cast6_cbc_dec_8way:
11056
11057 popq %r12;
11058
11059+ pax_force_retaddr
11060 ret;
11061
11062 .align 8
11063@@ -436,4 +443,5 @@ cast6_ctr_8way:
11064
11065 popq %r12;
11066
11067+ pax_force_retaddr
11068 ret;
11069diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11070index 6214a9b..1f4fc9a 100644
11071--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
11072+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11073@@ -1,3 +1,5 @@
11074+#include <asm/alternative-asm.h>
11075+
11076 # enter ECRYPT_encrypt_bytes
11077 .text
11078 .p2align 5
11079@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
11080 add %r11,%rsp
11081 mov %rdi,%rax
11082 mov %rsi,%rdx
11083+ pax_force_retaddr 0, 1
11084 ret
11085 # bytesatleast65:
11086 ._bytesatleast65:
11087@@ -891,6 +894,7 @@ ECRYPT_keysetup:
11088 add %r11,%rsp
11089 mov %rdi,%rax
11090 mov %rsi,%rdx
11091+ pax_force_retaddr
11092 ret
11093 # enter ECRYPT_ivsetup
11094 .text
11095@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
11096 add %r11,%rsp
11097 mov %rdi,%rax
11098 mov %rsi,%rdx
11099+ pax_force_retaddr
11100 ret
11101diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11102index 02b0e9f..cf4cf5c 100644
11103--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11104+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11105@@ -24,6 +24,8 @@
11106 *
11107 */
11108
11109+#include <asm/alternative-asm.h>
11110+
11111 #include "glue_helper-asm-avx.S"
11112
11113 .file "serpent-avx-x86_64-asm_64.S"
11114@@ -618,6 +620,7 @@ __serpent_enc_blk8_avx:
11115 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11116 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11117
11118+ pax_force_retaddr
11119 ret;
11120
11121 .align 8
11122@@ -673,6 +676,7 @@ __serpent_dec_blk8_avx:
11123 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11124 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11125
11126+ pax_force_retaddr
11127 ret;
11128
11129 .align 8
11130@@ -692,6 +696,7 @@ serpent_ecb_enc_8way_avx:
11131
11132 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11133
11134+ pax_force_retaddr
11135 ret;
11136
11137 .align 8
11138@@ -711,6 +716,7 @@ serpent_ecb_dec_8way_avx:
11139
11140 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11141
11142+ pax_force_retaddr
11143 ret;
11144
11145 .align 8
11146@@ -730,6 +736,7 @@ serpent_cbc_dec_8way_avx:
11147
11148 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11149
11150+ pax_force_retaddr
11151 ret;
11152
11153 .align 8
11154@@ -751,4 +758,5 @@ serpent_ctr_8way_avx:
11155
11156 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11157
11158+ pax_force_retaddr
11159 ret;
11160diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11161index 3ee1ff0..cbc568b 100644
11162--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11163+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11164@@ -24,6 +24,8 @@
11165 *
11166 */
11167
11168+#include <asm/alternative-asm.h>
11169+
11170 .file "serpent-sse2-x86_64-asm_64.S"
11171 .text
11172
11173@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
11174 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11175 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11176
11177+ pax_force_retaddr
11178 ret;
11179
11180 __enc_xor8:
11181 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11182 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11183
11184+ pax_force_retaddr
11185 ret;
11186
11187 .align 8
11188@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
11189 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11190 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11191
11192+ pax_force_retaddr
11193 ret;
11194diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
11195index 49d6987..df66bd4 100644
11196--- a/arch/x86/crypto/sha1_ssse3_asm.S
11197+++ b/arch/x86/crypto/sha1_ssse3_asm.S
11198@@ -28,6 +28,8 @@
11199 * (at your option) any later version.
11200 */
11201
11202+#include <asm/alternative-asm.h>
11203+
11204 #define CTX %rdi // arg1
11205 #define BUF %rsi // arg2
11206 #define CNT %rdx // arg3
11207@@ -104,6 +106,7 @@
11208 pop %r12
11209 pop %rbp
11210 pop %rbx
11211+ pax_force_retaddr 0, 1
11212 ret
11213
11214 .size \name, .-\name
11215diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11216index ebac16b..8092eb9 100644
11217--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11218+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11219@@ -23,6 +23,8 @@
11220 *
11221 */
11222
11223+#include <asm/alternative-asm.h>
11224+
11225 #include "glue_helper-asm-avx.S"
11226
11227 .file "twofish-avx-x86_64-asm_64.S"
11228@@ -283,6 +285,7 @@ __twofish_enc_blk8:
11229 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
11230 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
11231
11232+ pax_force_retaddr 0, 1
11233 ret;
11234
11235 .align 8
11236@@ -324,6 +327,7 @@ __twofish_dec_blk8:
11237 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
11238 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
11239
11240+ pax_force_retaddr 0, 1
11241 ret;
11242
11243 .align 8
11244@@ -345,6 +349,7 @@ twofish_ecb_enc_8way:
11245
11246 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11247
11248+ pax_force_retaddr 0, 1
11249 ret;
11250
11251 .align 8
11252@@ -366,6 +371,7 @@ twofish_ecb_dec_8way:
11253
11254 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11255
11256+ pax_force_retaddr 0, 1
11257 ret;
11258
11259 .align 8
11260@@ -392,6 +398,7 @@ twofish_cbc_dec_8way:
11261
11262 popq %r12;
11263
11264+ pax_force_retaddr 0, 1
11265 ret;
11266
11267 .align 8
11268@@ -420,4 +427,5 @@ twofish_ctr_8way:
11269
11270 popq %r12;
11271
11272+ pax_force_retaddr 0, 1
11273 ret;
11274diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11275index 5b012a2..36d5364 100644
11276--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11277+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11278@@ -20,6 +20,8 @@
11279 *
11280 */
11281
11282+#include <asm/alternative-asm.h>
11283+
11284 .file "twofish-x86_64-asm-3way.S"
11285 .text
11286
11287@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
11288 popq %r13;
11289 popq %r14;
11290 popq %r15;
11291+ pax_force_retaddr 0, 1
11292 ret;
11293
11294 __enc_xor3:
11295@@ -271,6 +274,7 @@ __enc_xor3:
11296 popq %r13;
11297 popq %r14;
11298 popq %r15;
11299+ pax_force_retaddr 0, 1
11300 ret;
11301
11302 .global twofish_dec_blk_3way
11303@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
11304 popq %r13;
11305 popq %r14;
11306 popq %r15;
11307+ pax_force_retaddr 0, 1
11308 ret;
11309
11310diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
11311index 7bcf3fc..f53832f 100644
11312--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
11313+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
11314@@ -21,6 +21,7 @@
11315 .text
11316
11317 #include <asm/asm-offsets.h>
11318+#include <asm/alternative-asm.h>
11319
11320 #define a_offset 0
11321 #define b_offset 4
11322@@ -268,6 +269,7 @@ twofish_enc_blk:
11323
11324 popq R1
11325 movq $1,%rax
11326+ pax_force_retaddr 0, 1
11327 ret
11328
11329 twofish_dec_blk:
11330@@ -319,4 +321,5 @@ twofish_dec_blk:
11331
11332 popq R1
11333 movq $1,%rax
11334+ pax_force_retaddr 0, 1
11335 ret
11336diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
11337index a703af1..f5b9c36 100644
11338--- a/arch/x86/ia32/ia32_aout.c
11339+++ b/arch/x86/ia32/ia32_aout.c
11340@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
11341 unsigned long dump_start, dump_size;
11342 struct user32 dump;
11343
11344+ memset(&dump, 0, sizeof(dump));
11345+
11346 fs = get_fs();
11347 set_fs(KERNEL_DS);
11348 has_dumped = 1;
11349diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
11350index a1daf4a..f8c4537 100644
11351--- a/arch/x86/ia32/ia32_signal.c
11352+++ b/arch/x86/ia32/ia32_signal.c
11353@@ -348,7 +348,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
11354 sp -= frame_size;
11355 /* Align the stack pointer according to the i386 ABI,
11356 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
11357- sp = ((sp + 4) & -16ul) - 4;
11358+ sp = ((sp - 12) & -16ul) - 4;
11359 return (void __user *) sp;
11360 }
11361
11362@@ -406,7 +406,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
11363 * These are actually not used anymore, but left because some
11364 * gdb versions depend on them as a marker.
11365 */
11366- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11367+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11368 } put_user_catch(err);
11369
11370 if (err)
11371@@ -448,7 +448,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11372 0xb8,
11373 __NR_ia32_rt_sigreturn,
11374 0x80cd,
11375- 0,
11376+ 0
11377 };
11378
11379 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
11380@@ -471,16 +471,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11381
11382 if (ka->sa.sa_flags & SA_RESTORER)
11383 restorer = ka->sa.sa_restorer;
11384+ else if (current->mm->context.vdso)
11385+ /* Return stub is in 32bit vsyscall page */
11386+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
11387 else
11388- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
11389- rt_sigreturn);
11390+ restorer = &frame->retcode;
11391 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
11392
11393 /*
11394 * Not actually used anymore, but left because some gdb
11395 * versions need it.
11396 */
11397- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11398+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11399 } put_user_catch(err);
11400
11401 err |= copy_siginfo_to_user32(&frame->info, info);
11402diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
11403index 142c4ce..19b683f 100644
11404--- a/arch/x86/ia32/ia32entry.S
11405+++ b/arch/x86/ia32/ia32entry.S
11406@@ -15,8 +15,10 @@
11407 #include <asm/irqflags.h>
11408 #include <asm/asm.h>
11409 #include <asm/smap.h>
11410+#include <asm/pgtable.h>
11411 #include <linux/linkage.h>
11412 #include <linux/err.h>
11413+#include <asm/alternative-asm.h>
11414
11415 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11416 #include <linux/elf-em.h>
11417@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
11418 ENDPROC(native_irq_enable_sysexit)
11419 #endif
11420
11421+ .macro pax_enter_kernel_user
11422+ pax_set_fptr_mask
11423+#ifdef CONFIG_PAX_MEMORY_UDEREF
11424+ call pax_enter_kernel_user
11425+#endif
11426+ .endm
11427+
11428+ .macro pax_exit_kernel_user
11429+#ifdef CONFIG_PAX_MEMORY_UDEREF
11430+ call pax_exit_kernel_user
11431+#endif
11432+#ifdef CONFIG_PAX_RANDKSTACK
11433+ pushq %rax
11434+ pushq %r11
11435+ call pax_randomize_kstack
11436+ popq %r11
11437+ popq %rax
11438+#endif
11439+ .endm
11440+
11441+.macro pax_erase_kstack
11442+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11443+ call pax_erase_kstack
11444+#endif
11445+.endm
11446+
11447 /*
11448 * 32bit SYSENTER instruction entry.
11449 *
11450@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
11451 CFI_REGISTER rsp,rbp
11452 SWAPGS_UNSAFE_STACK
11453 movq PER_CPU_VAR(kernel_stack), %rsp
11454- addq $(KERNEL_STACK_OFFSET),%rsp
11455- /*
11456- * No need to follow this irqs on/off section: the syscall
11457- * disabled irqs, here we enable it straight after entry:
11458- */
11459- ENABLE_INTERRUPTS(CLBR_NONE)
11460 movl %ebp,%ebp /* zero extension */
11461 pushq_cfi $__USER32_DS
11462 /*CFI_REL_OFFSET ss,0*/
11463@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
11464 CFI_REL_OFFSET rsp,0
11465 pushfq_cfi
11466 /*CFI_REL_OFFSET rflags,0*/
11467- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
11468- CFI_REGISTER rip,r10
11469+ orl $X86_EFLAGS_IF,(%rsp)
11470+ GET_THREAD_INFO(%r11)
11471+ movl TI_sysenter_return(%r11), %r11d
11472+ CFI_REGISTER rip,r11
11473 pushq_cfi $__USER32_CS
11474 /*CFI_REL_OFFSET cs,0*/
11475 movl %eax, %eax
11476- pushq_cfi %r10
11477+ pushq_cfi %r11
11478 CFI_REL_OFFSET rip,0
11479 pushq_cfi %rax
11480 cld
11481 SAVE_ARGS 0,1,0
11482+ pax_enter_kernel_user
11483+
11484+#ifdef CONFIG_PAX_RANDKSTACK
11485+ pax_erase_kstack
11486+#endif
11487+
11488+ /*
11489+ * No need to follow this irqs on/off section: the syscall
11490+ * disabled irqs, here we enable it straight after entry:
11491+ */
11492+ ENABLE_INTERRUPTS(CLBR_NONE)
11493 /* no need to do an access_ok check here because rbp has been
11494 32bit zero extended */
11495+
11496+#ifdef CONFIG_PAX_MEMORY_UDEREF
11497+ mov $PAX_USER_SHADOW_BASE,%r11
11498+ add %r11,%rbp
11499+#endif
11500+
11501 ASM_STAC
11502 1: movl (%rbp),%ebp
11503 _ASM_EXTABLE(1b,ia32_badarg)
11504 ASM_CLAC
11505- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11506- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11507+ GET_THREAD_INFO(%r11)
11508+ orl $TS_COMPAT,TI_status(%r11)
11509+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11510 CFI_REMEMBER_STATE
11511 jnz sysenter_tracesys
11512 cmpq $(IA32_NR_syscalls-1),%rax
11513@@ -162,12 +204,15 @@ sysenter_do_call:
11514 sysenter_dispatch:
11515 call *ia32_sys_call_table(,%rax,8)
11516 movq %rax,RAX-ARGOFFSET(%rsp)
11517+ GET_THREAD_INFO(%r11)
11518 DISABLE_INTERRUPTS(CLBR_NONE)
11519 TRACE_IRQS_OFF
11520- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11521+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11522 jnz sysexit_audit
11523 sysexit_from_sys_call:
11524- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11525+ pax_exit_kernel_user
11526+ pax_erase_kstack
11527+ andl $~TS_COMPAT,TI_status(%r11)
11528 /* clear IF, that popfq doesn't enable interrupts early */
11529 andl $~0x200,EFLAGS-R11(%rsp)
11530 movl RIP-R11(%rsp),%edx /* User %eip */
11531@@ -193,6 +238,9 @@ sysexit_from_sys_call:
11532 movl %eax,%esi /* 2nd arg: syscall number */
11533 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
11534 call __audit_syscall_entry
11535+
11536+ pax_erase_kstack
11537+
11538 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
11539 cmpq $(IA32_NR_syscalls-1),%rax
11540 ja ia32_badsys
11541@@ -204,7 +252,7 @@ sysexit_from_sys_call:
11542 .endm
11543
11544 .macro auditsys_exit exit
11545- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11546+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11547 jnz ia32_ret_from_sys_call
11548 TRACE_IRQS_ON
11549 ENABLE_INTERRUPTS(CLBR_NONE)
11550@@ -215,11 +263,12 @@ sysexit_from_sys_call:
11551 1: setbe %al /* 1 if error, 0 if not */
11552 movzbl %al,%edi /* zero-extend that into %edi */
11553 call __audit_syscall_exit
11554+ GET_THREAD_INFO(%r11)
11555 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
11556 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
11557 DISABLE_INTERRUPTS(CLBR_NONE)
11558 TRACE_IRQS_OFF
11559- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11560+ testl %edi,TI_flags(%r11)
11561 jz \exit
11562 CLEAR_RREGS -ARGOFFSET
11563 jmp int_with_check
11564@@ -237,7 +286,7 @@ sysexit_audit:
11565
11566 sysenter_tracesys:
11567 #ifdef CONFIG_AUDITSYSCALL
11568- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11569+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11570 jz sysenter_auditsys
11571 #endif
11572 SAVE_REST
11573@@ -249,6 +298,9 @@ sysenter_tracesys:
11574 RESTORE_REST
11575 cmpq $(IA32_NR_syscalls-1),%rax
11576 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
11577+
11578+ pax_erase_kstack
11579+
11580 jmp sysenter_do_call
11581 CFI_ENDPROC
11582 ENDPROC(ia32_sysenter_target)
11583@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
11584 ENTRY(ia32_cstar_target)
11585 CFI_STARTPROC32 simple
11586 CFI_SIGNAL_FRAME
11587- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
11588+ CFI_DEF_CFA rsp,0
11589 CFI_REGISTER rip,rcx
11590 /*CFI_REGISTER rflags,r11*/
11591 SWAPGS_UNSAFE_STACK
11592 movl %esp,%r8d
11593 CFI_REGISTER rsp,r8
11594 movq PER_CPU_VAR(kernel_stack),%rsp
11595+ SAVE_ARGS 8*6,0,0
11596+ pax_enter_kernel_user
11597+
11598+#ifdef CONFIG_PAX_RANDKSTACK
11599+ pax_erase_kstack
11600+#endif
11601+
11602 /*
11603 * No need to follow this irqs on/off section: the syscall
11604 * disabled irqs and here we enable it straight after entry:
11605 */
11606 ENABLE_INTERRUPTS(CLBR_NONE)
11607- SAVE_ARGS 8,0,0
11608 movl %eax,%eax /* zero extension */
11609 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
11610 movq %rcx,RIP-ARGOFFSET(%rsp)
11611@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
11612 /* no need to do an access_ok check here because r8 has been
11613 32bit zero extended */
11614 /* hardware stack frame is complete now */
11615+
11616+#ifdef CONFIG_PAX_MEMORY_UDEREF
11617+ mov $PAX_USER_SHADOW_BASE,%r11
11618+ add %r11,%r8
11619+#endif
11620+
11621 ASM_STAC
11622 1: movl (%r8),%r9d
11623 _ASM_EXTABLE(1b,ia32_badarg)
11624 ASM_CLAC
11625- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11626- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11627+ GET_THREAD_INFO(%r11)
11628+ orl $TS_COMPAT,TI_status(%r11)
11629+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11630 CFI_REMEMBER_STATE
11631 jnz cstar_tracesys
11632 cmpq $IA32_NR_syscalls-1,%rax
11633@@ -319,12 +384,15 @@ cstar_do_call:
11634 cstar_dispatch:
11635 call *ia32_sys_call_table(,%rax,8)
11636 movq %rax,RAX-ARGOFFSET(%rsp)
11637+ GET_THREAD_INFO(%r11)
11638 DISABLE_INTERRUPTS(CLBR_NONE)
11639 TRACE_IRQS_OFF
11640- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11641+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11642 jnz sysretl_audit
11643 sysretl_from_sys_call:
11644- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11645+ pax_exit_kernel_user
11646+ pax_erase_kstack
11647+ andl $~TS_COMPAT,TI_status(%r11)
11648 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
11649 movl RIP-ARGOFFSET(%rsp),%ecx
11650 CFI_REGISTER rip,rcx
11651@@ -352,7 +420,7 @@ sysretl_audit:
11652
11653 cstar_tracesys:
11654 #ifdef CONFIG_AUDITSYSCALL
11655- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11656+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11657 jz cstar_auditsys
11658 #endif
11659 xchgl %r9d,%ebp
11660@@ -366,6 +434,9 @@ cstar_tracesys:
11661 xchgl %ebp,%r9d
11662 cmpq $(IA32_NR_syscalls-1),%rax
11663 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
11664+
11665+ pax_erase_kstack
11666+
11667 jmp cstar_do_call
11668 END(ia32_cstar_target)
11669
11670@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
11671 CFI_REL_OFFSET rip,RIP-RIP
11672 PARAVIRT_ADJUST_EXCEPTION_FRAME
11673 SWAPGS
11674- /*
11675- * No need to follow this irqs on/off section: the syscall
11676- * disabled irqs and here we enable it straight after entry:
11677- */
11678- ENABLE_INTERRUPTS(CLBR_NONE)
11679 movl %eax,%eax
11680 pushq_cfi %rax
11681 cld
11682 /* note the registers are not zero extended to the sf.
11683 this could be a problem. */
11684 SAVE_ARGS 0,1,0
11685- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11686- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11687+ pax_enter_kernel_user
11688+
11689+#ifdef CONFIG_PAX_RANDKSTACK
11690+ pax_erase_kstack
11691+#endif
11692+
11693+ /*
11694+ * No need to follow this irqs on/off section: the syscall
11695+ * disabled irqs and here we enable it straight after entry:
11696+ */
11697+ ENABLE_INTERRUPTS(CLBR_NONE)
11698+ GET_THREAD_INFO(%r11)
11699+ orl $TS_COMPAT,TI_status(%r11)
11700+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11701 jnz ia32_tracesys
11702 cmpq $(IA32_NR_syscalls-1),%rax
11703 ja ia32_badsys
11704@@ -442,6 +520,9 @@ ia32_tracesys:
11705 RESTORE_REST
11706 cmpq $(IA32_NR_syscalls-1),%rax
11707 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
11708+
11709+ pax_erase_kstack
11710+
11711 jmp ia32_do_call
11712 END(ia32_syscall)
11713
11714diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
11715index d0b689b..34be51d 100644
11716--- a/arch/x86/ia32/sys_ia32.c
11717+++ b/arch/x86/ia32/sys_ia32.c
11718@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
11719 */
11720 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
11721 {
11722- typeof(ubuf->st_uid) uid = 0;
11723- typeof(ubuf->st_gid) gid = 0;
11724+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
11725+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
11726 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
11727 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
11728 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
11729@@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
11730 mm_segment_t old_fs = get_fs();
11731
11732 set_fs(KERNEL_DS);
11733- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
11734+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
11735 set_fs(old_fs);
11736 if (put_compat_timespec(&t, interval))
11737 return -EFAULT;
11738@@ -319,7 +319,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
11739 mm_segment_t old_fs = get_fs();
11740
11741 set_fs(KERNEL_DS);
11742- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
11743+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
11744 set_fs(old_fs);
11745 if (!ret) {
11746 switch (_NSIG_WORDS) {
11747@@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
11748 if (copy_siginfo_from_user32(&info, uinfo))
11749 return -EFAULT;
11750 set_fs(KERNEL_DS);
11751- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
11752+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
11753 set_fs(old_fs);
11754 return ret;
11755 }
11756@@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
11757 return -EFAULT;
11758
11759 set_fs(KERNEL_DS);
11760- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
11761+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
11762 count);
11763 set_fs(old_fs);
11764
11765diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
11766index 372231c..a5aa1a1 100644
11767--- a/arch/x86/include/asm/alternative-asm.h
11768+++ b/arch/x86/include/asm/alternative-asm.h
11769@@ -18,6 +18,45 @@
11770 .endm
11771 #endif
11772
11773+#ifdef KERNEXEC_PLUGIN
11774+ .macro pax_force_retaddr_bts rip=0
11775+ btsq $63,\rip(%rsp)
11776+ .endm
11777+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11778+ .macro pax_force_retaddr rip=0, reload=0
11779+ btsq $63,\rip(%rsp)
11780+ .endm
11781+ .macro pax_force_fptr ptr
11782+ btsq $63,\ptr
11783+ .endm
11784+ .macro pax_set_fptr_mask
11785+ .endm
11786+#endif
11787+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
11788+ .macro pax_force_retaddr rip=0, reload=0
11789+ .if \reload
11790+ pax_set_fptr_mask
11791+ .endif
11792+ orq %r10,\rip(%rsp)
11793+ .endm
11794+ .macro pax_force_fptr ptr
11795+ orq %r10,\ptr
11796+ .endm
11797+ .macro pax_set_fptr_mask
11798+ movabs $0x8000000000000000,%r10
11799+ .endm
11800+#endif
11801+#else
11802+ .macro pax_force_retaddr rip=0, reload=0
11803+ .endm
11804+ .macro pax_force_fptr ptr
11805+ .endm
11806+ .macro pax_force_retaddr_bts rip=0
11807+ .endm
11808+ .macro pax_set_fptr_mask
11809+ .endm
11810+#endif
11811+
11812 .macro altinstruction_entry orig alt feature orig_len alt_len
11813 .long \orig - .
11814 .long \alt - .
11815diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
11816index 58ed6d9..f1cbe58 100644
11817--- a/arch/x86/include/asm/alternative.h
11818+++ b/arch/x86/include/asm/alternative.h
11819@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11820 ".pushsection .discard,\"aw\",@progbits\n" \
11821 DISCARD_ENTRY(1) \
11822 ".popsection\n" \
11823- ".pushsection .altinstr_replacement, \"ax\"\n" \
11824+ ".pushsection .altinstr_replacement, \"a\"\n" \
11825 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
11826 ".popsection"
11827
11828@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11829 DISCARD_ENTRY(1) \
11830 DISCARD_ENTRY(2) \
11831 ".popsection\n" \
11832- ".pushsection .altinstr_replacement, \"ax\"\n" \
11833+ ".pushsection .altinstr_replacement, \"a\"\n" \
11834 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
11835 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
11836 ".popsection"
11837diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
11838index 3388034..050f0b9 100644
11839--- a/arch/x86/include/asm/apic.h
11840+++ b/arch/x86/include/asm/apic.h
11841@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
11842
11843 #ifdef CONFIG_X86_LOCAL_APIC
11844
11845-extern unsigned int apic_verbosity;
11846+extern int apic_verbosity;
11847 extern int local_apic_timer_c2_ok;
11848
11849 extern int disable_apic;
11850diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
11851index 20370c6..a2eb9b0 100644
11852--- a/arch/x86/include/asm/apm.h
11853+++ b/arch/x86/include/asm/apm.h
11854@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
11855 __asm__ __volatile__(APM_DO_ZERO_SEGS
11856 "pushl %%edi\n\t"
11857 "pushl %%ebp\n\t"
11858- "lcall *%%cs:apm_bios_entry\n\t"
11859+ "lcall *%%ss:apm_bios_entry\n\t"
11860 "setc %%al\n\t"
11861 "popl %%ebp\n\t"
11862 "popl %%edi\n\t"
11863@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
11864 __asm__ __volatile__(APM_DO_ZERO_SEGS
11865 "pushl %%edi\n\t"
11866 "pushl %%ebp\n\t"
11867- "lcall *%%cs:apm_bios_entry\n\t"
11868+ "lcall *%%ss:apm_bios_entry\n\t"
11869 "setc %%bl\n\t"
11870 "popl %%ebp\n\t"
11871 "popl %%edi\n\t"
11872diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
11873index 722aa3b..3a0bb27 100644
11874--- a/arch/x86/include/asm/atomic.h
11875+++ b/arch/x86/include/asm/atomic.h
11876@@ -22,7 +22,18 @@
11877 */
11878 static inline int atomic_read(const atomic_t *v)
11879 {
11880- return (*(volatile int *)&(v)->counter);
11881+ return (*(volatile const int *)&(v)->counter);
11882+}
11883+
11884+/**
11885+ * atomic_read_unchecked - read atomic variable
11886+ * @v: pointer of type atomic_unchecked_t
11887+ *
11888+ * Atomically reads the value of @v.
11889+ */
11890+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
11891+{
11892+ return (*(volatile const int *)&(v)->counter);
11893 }
11894
11895 /**
11896@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
11897 }
11898
11899 /**
11900+ * atomic_set_unchecked - set atomic variable
11901+ * @v: pointer of type atomic_unchecked_t
11902+ * @i: required value
11903+ *
11904+ * Atomically sets the value of @v to @i.
11905+ */
11906+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
11907+{
11908+ v->counter = i;
11909+}
11910+
11911+/**
11912 * atomic_add - add integer to atomic variable
11913 * @i: integer value to add
11914 * @v: pointer of type atomic_t
11915@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
11916 */
11917 static inline void atomic_add(int i, atomic_t *v)
11918 {
11919- asm volatile(LOCK_PREFIX "addl %1,%0"
11920+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
11921+
11922+#ifdef CONFIG_PAX_REFCOUNT
11923+ "jno 0f\n"
11924+ LOCK_PREFIX "subl %1,%0\n"
11925+ "int $4\n0:\n"
11926+ _ASM_EXTABLE(0b, 0b)
11927+#endif
11928+
11929+ : "+m" (v->counter)
11930+ : "ir" (i));
11931+}
11932+
11933+/**
11934+ * atomic_add_unchecked - add integer to atomic variable
11935+ * @i: integer value to add
11936+ * @v: pointer of type atomic_unchecked_t
11937+ *
11938+ * Atomically adds @i to @v.
11939+ */
11940+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
11941+{
11942+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
11943 : "+m" (v->counter)
11944 : "ir" (i));
11945 }
11946@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
11947 */
11948 static inline void atomic_sub(int i, atomic_t *v)
11949 {
11950- asm volatile(LOCK_PREFIX "subl %1,%0"
11951+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
11952+
11953+#ifdef CONFIG_PAX_REFCOUNT
11954+ "jno 0f\n"
11955+ LOCK_PREFIX "addl %1,%0\n"
11956+ "int $4\n0:\n"
11957+ _ASM_EXTABLE(0b, 0b)
11958+#endif
11959+
11960+ : "+m" (v->counter)
11961+ : "ir" (i));
11962+}
11963+
11964+/**
11965+ * atomic_sub_unchecked - subtract integer from atomic variable
11966+ * @i: integer value to subtract
11967+ * @v: pointer of type atomic_unchecked_t
11968+ *
11969+ * Atomically subtracts @i from @v.
11970+ */
11971+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
11972+{
11973+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
11974 : "+m" (v->counter)
11975 : "ir" (i));
11976 }
11977@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
11978 {
11979 unsigned char c;
11980
11981- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
11982+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
11983+
11984+#ifdef CONFIG_PAX_REFCOUNT
11985+ "jno 0f\n"
11986+ LOCK_PREFIX "addl %2,%0\n"
11987+ "int $4\n0:\n"
11988+ _ASM_EXTABLE(0b, 0b)
11989+#endif
11990+
11991+ "sete %1\n"
11992 : "+m" (v->counter), "=qm" (c)
11993 : "ir" (i) : "memory");
11994 return c;
11995@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
11996 */
11997 static inline void atomic_inc(atomic_t *v)
11998 {
11999- asm volatile(LOCK_PREFIX "incl %0"
12000+ asm volatile(LOCK_PREFIX "incl %0\n"
12001+
12002+#ifdef CONFIG_PAX_REFCOUNT
12003+ "jno 0f\n"
12004+ LOCK_PREFIX "decl %0\n"
12005+ "int $4\n0:\n"
12006+ _ASM_EXTABLE(0b, 0b)
12007+#endif
12008+
12009+ : "+m" (v->counter));
12010+}
12011+
12012+/**
12013+ * atomic_inc_unchecked - increment atomic variable
12014+ * @v: pointer of type atomic_unchecked_t
12015+ *
12016+ * Atomically increments @v by 1.
12017+ */
12018+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
12019+{
12020+ asm volatile(LOCK_PREFIX "incl %0\n"
12021 : "+m" (v->counter));
12022 }
12023
12024@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
12025 */
12026 static inline void atomic_dec(atomic_t *v)
12027 {
12028- asm volatile(LOCK_PREFIX "decl %0"
12029+ asm volatile(LOCK_PREFIX "decl %0\n"
12030+
12031+#ifdef CONFIG_PAX_REFCOUNT
12032+ "jno 0f\n"
12033+ LOCK_PREFIX "incl %0\n"
12034+ "int $4\n0:\n"
12035+ _ASM_EXTABLE(0b, 0b)
12036+#endif
12037+
12038+ : "+m" (v->counter));
12039+}
12040+
12041+/**
12042+ * atomic_dec_unchecked - decrement atomic variable
12043+ * @v: pointer of type atomic_unchecked_t
12044+ *
12045+ * Atomically decrements @v by 1.
12046+ */
12047+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
12048+{
12049+ asm volatile(LOCK_PREFIX "decl %0\n"
12050 : "+m" (v->counter));
12051 }
12052
12053@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
12054 {
12055 unsigned char c;
12056
12057- asm volatile(LOCK_PREFIX "decl %0; sete %1"
12058+ asm volatile(LOCK_PREFIX "decl %0\n"
12059+
12060+#ifdef CONFIG_PAX_REFCOUNT
12061+ "jno 0f\n"
12062+ LOCK_PREFIX "incl %0\n"
12063+ "int $4\n0:\n"
12064+ _ASM_EXTABLE(0b, 0b)
12065+#endif
12066+
12067+ "sete %1\n"
12068 : "+m" (v->counter), "=qm" (c)
12069 : : "memory");
12070 return c != 0;
12071@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
12072 {
12073 unsigned char c;
12074
12075- asm volatile(LOCK_PREFIX "incl %0; sete %1"
12076+ asm volatile(LOCK_PREFIX "incl %0\n"
12077+
12078+#ifdef CONFIG_PAX_REFCOUNT
12079+ "jno 0f\n"
12080+ LOCK_PREFIX "decl %0\n"
12081+ "int $4\n0:\n"
12082+ _ASM_EXTABLE(0b, 0b)
12083+#endif
12084+
12085+ "sete %1\n"
12086+ : "+m" (v->counter), "=qm" (c)
12087+ : : "memory");
12088+ return c != 0;
12089+}
12090+
12091+/**
12092+ * atomic_inc_and_test_unchecked - increment and test
12093+ * @v: pointer of type atomic_unchecked_t
12094+ *
12095+ * Atomically increments @v by 1
12096+ * and returns true if the result is zero, or false for all
12097+ * other cases.
12098+ */
12099+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
12100+{
12101+ unsigned char c;
12102+
12103+ asm volatile(LOCK_PREFIX "incl %0\n"
12104+ "sete %1\n"
12105 : "+m" (v->counter), "=qm" (c)
12106 : : "memory");
12107 return c != 0;
12108@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12109 {
12110 unsigned char c;
12111
12112- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
12113+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
12114+
12115+#ifdef CONFIG_PAX_REFCOUNT
12116+ "jno 0f\n"
12117+ LOCK_PREFIX "subl %2,%0\n"
12118+ "int $4\n0:\n"
12119+ _ASM_EXTABLE(0b, 0b)
12120+#endif
12121+
12122+ "sets %1\n"
12123 : "+m" (v->counter), "=qm" (c)
12124 : "ir" (i) : "memory");
12125 return c;
12126@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12127 */
12128 static inline int atomic_add_return(int i, atomic_t *v)
12129 {
12130+ return i + xadd_check_overflow(&v->counter, i);
12131+}
12132+
12133+/**
12134+ * atomic_add_return_unchecked - add integer and return
12135+ * @i: integer value to add
12136+ * @v: pointer of type atomic_unchecked_t
12137+ *
12138+ * Atomically adds @i to @v and returns @i + @v
12139+ */
12140+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
12141+{
12142 return i + xadd(&v->counter, i);
12143 }
12144
12145@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
12146 }
12147
12148 #define atomic_inc_return(v) (atomic_add_return(1, v))
12149+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
12150+{
12151+ return atomic_add_return_unchecked(1, v);
12152+}
12153 #define atomic_dec_return(v) (atomic_sub_return(1, v))
12154
12155 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12156@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12157 return cmpxchg(&v->counter, old, new);
12158 }
12159
12160+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
12161+{
12162+ return cmpxchg(&v->counter, old, new);
12163+}
12164+
12165 static inline int atomic_xchg(atomic_t *v, int new)
12166 {
12167 return xchg(&v->counter, new);
12168 }
12169
12170+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
12171+{
12172+ return xchg(&v->counter, new);
12173+}
12174+
12175 /**
12176 * __atomic_add_unless - add unless the number is already a given value
12177 * @v: pointer of type atomic_t
12178@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
12179 */
12180 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12181 {
12182- int c, old;
12183+ int c, old, new;
12184 c = atomic_read(v);
12185 for (;;) {
12186- if (unlikely(c == (u)))
12187+ if (unlikely(c == u))
12188 break;
12189- old = atomic_cmpxchg((v), c, c + (a));
12190+
12191+ asm volatile("addl %2,%0\n"
12192+
12193+#ifdef CONFIG_PAX_REFCOUNT
12194+ "jno 0f\n"
12195+ "subl %2,%0\n"
12196+ "int $4\n0:\n"
12197+ _ASM_EXTABLE(0b, 0b)
12198+#endif
12199+
12200+ : "=r" (new)
12201+ : "0" (c), "ir" (a));
12202+
12203+ old = atomic_cmpxchg(v, c, new);
12204 if (likely(old == c))
12205 break;
12206 c = old;
12207@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12208 }
12209
12210 /**
12211+ * atomic_inc_not_zero_hint - increment if not null
12212+ * @v: pointer of type atomic_t
12213+ * @hint: probable value of the atomic before the increment
12214+ *
12215+ * This version of atomic_inc_not_zero() gives a hint of probable
12216+ * value of the atomic. This helps processor to not read the memory
12217+ * before doing the atomic read/modify/write cycle, lowering
12218+ * number of bus transactions on some arches.
12219+ *
12220+ * Returns: 0 if increment was not done, 1 otherwise.
12221+ */
12222+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
12223+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
12224+{
12225+ int val, c = hint, new;
12226+
12227+ /* sanity test, should be removed by compiler if hint is a constant */
12228+ if (!hint)
12229+ return __atomic_add_unless(v, 1, 0);
12230+
12231+ do {
12232+ asm volatile("incl %0\n"
12233+
12234+#ifdef CONFIG_PAX_REFCOUNT
12235+ "jno 0f\n"
12236+ "decl %0\n"
12237+ "int $4\n0:\n"
12238+ _ASM_EXTABLE(0b, 0b)
12239+#endif
12240+
12241+ : "=r" (new)
12242+ : "0" (c));
12243+
12244+ val = atomic_cmpxchg(v, c, new);
12245+ if (val == c)
12246+ return 1;
12247+ c = val;
12248+ } while (c);
12249+
12250+ return 0;
12251+}
12252+
12253+/**
12254 * atomic_inc_short - increment of a short integer
12255 * @v: pointer to type int
12256 *
12257@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
12258 #endif
12259
12260 /* These are x86-specific, used by some header files */
12261-#define atomic_clear_mask(mask, addr) \
12262- asm volatile(LOCK_PREFIX "andl %0,%1" \
12263- : : "r" (~(mask)), "m" (*(addr)) : "memory")
12264+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
12265+{
12266+ asm volatile(LOCK_PREFIX "andl %1,%0"
12267+ : "+m" (v->counter)
12268+ : "r" (~(mask))
12269+ : "memory");
12270+}
12271
12272-#define atomic_set_mask(mask, addr) \
12273- asm volatile(LOCK_PREFIX "orl %0,%1" \
12274- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
12275- : "memory")
12276+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12277+{
12278+ asm volatile(LOCK_PREFIX "andl %1,%0"
12279+ : "+m" (v->counter)
12280+ : "r" (~(mask))
12281+ : "memory");
12282+}
12283+
12284+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
12285+{
12286+ asm volatile(LOCK_PREFIX "orl %1,%0"
12287+ : "+m" (v->counter)
12288+ : "r" (mask)
12289+ : "memory");
12290+}
12291+
12292+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12293+{
12294+ asm volatile(LOCK_PREFIX "orl %1,%0"
12295+ : "+m" (v->counter)
12296+ : "r" (mask)
12297+ : "memory");
12298+}
12299
12300 /* Atomic operations are already serializing on x86 */
12301 #define smp_mb__before_atomic_dec() barrier()
12302diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
12303index b154de7..aadebd8 100644
12304--- a/arch/x86/include/asm/atomic64_32.h
12305+++ b/arch/x86/include/asm/atomic64_32.h
12306@@ -12,6 +12,14 @@ typedef struct {
12307 u64 __aligned(8) counter;
12308 } atomic64_t;
12309
12310+#ifdef CONFIG_PAX_REFCOUNT
12311+typedef struct {
12312+ u64 __aligned(8) counter;
12313+} atomic64_unchecked_t;
12314+#else
12315+typedef atomic64_t atomic64_unchecked_t;
12316+#endif
12317+
12318 #define ATOMIC64_INIT(val) { (val) }
12319
12320 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
12321@@ -37,21 +45,31 @@ typedef struct {
12322 ATOMIC64_DECL_ONE(sym##_386)
12323
12324 ATOMIC64_DECL_ONE(add_386);
12325+ATOMIC64_DECL_ONE(add_unchecked_386);
12326 ATOMIC64_DECL_ONE(sub_386);
12327+ATOMIC64_DECL_ONE(sub_unchecked_386);
12328 ATOMIC64_DECL_ONE(inc_386);
12329+ATOMIC64_DECL_ONE(inc_unchecked_386);
12330 ATOMIC64_DECL_ONE(dec_386);
12331+ATOMIC64_DECL_ONE(dec_unchecked_386);
12332 #endif
12333
12334 #define alternative_atomic64(f, out, in...) \
12335 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
12336
12337 ATOMIC64_DECL(read);
12338+ATOMIC64_DECL(read_unchecked);
12339 ATOMIC64_DECL(set);
12340+ATOMIC64_DECL(set_unchecked);
12341 ATOMIC64_DECL(xchg);
12342 ATOMIC64_DECL(add_return);
12343+ATOMIC64_DECL(add_return_unchecked);
12344 ATOMIC64_DECL(sub_return);
12345+ATOMIC64_DECL(sub_return_unchecked);
12346 ATOMIC64_DECL(inc_return);
12347+ATOMIC64_DECL(inc_return_unchecked);
12348 ATOMIC64_DECL(dec_return);
12349+ATOMIC64_DECL(dec_return_unchecked);
12350 ATOMIC64_DECL(dec_if_positive);
12351 ATOMIC64_DECL(inc_not_zero);
12352 ATOMIC64_DECL(add_unless);
12353@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
12354 }
12355
12356 /**
12357+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
12358+ * @p: pointer to type atomic64_unchecked_t
12359+ * @o: expected value
12360+ * @n: new value
12361+ *
12362+ * Atomically sets @v to @n if it was equal to @o and returns
12363+ * the old value.
12364+ */
12365+
12366+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
12367+{
12368+ return cmpxchg64(&v->counter, o, n);
12369+}
12370+
12371+/**
12372 * atomic64_xchg - xchg atomic64 variable
12373 * @v: pointer to type atomic64_t
12374 * @n: value to assign
12375@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
12376 }
12377
12378 /**
12379+ * atomic64_set_unchecked - set atomic64 variable
12380+ * @v: pointer to type atomic64_unchecked_t
12381+ * @n: value to assign
12382+ *
12383+ * Atomically sets the value of @v to @n.
12384+ */
12385+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
12386+{
12387+ unsigned high = (unsigned)(i >> 32);
12388+ unsigned low = (unsigned)i;
12389+ alternative_atomic64(set, /* no output */,
12390+ "S" (v), "b" (low), "c" (high)
12391+ : "eax", "edx", "memory");
12392+}
12393+
12394+/**
12395 * atomic64_read - read atomic64 variable
12396 * @v: pointer to type atomic64_t
12397 *
12398@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
12399 }
12400
12401 /**
12402+ * atomic64_read_unchecked - read atomic64 variable
12403+ * @v: pointer to type atomic64_unchecked_t
12404+ *
12405+ * Atomically reads the value of @v and returns it.
12406+ */
12407+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
12408+{
12409+ long long r;
12410+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
12411+ return r;
12412+ }
12413+
12414+/**
12415 * atomic64_add_return - add and return
12416 * @i: integer value to add
12417 * @v: pointer to type atomic64_t
12418@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
12419 return i;
12420 }
12421
12422+/**
12423+ * atomic64_add_return_unchecked - add and return
12424+ * @i: integer value to add
12425+ * @v: pointer to type atomic64_unchecked_t
12426+ *
12427+ * Atomically adds @i to @v and returns @i + *@v
12428+ */
12429+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
12430+{
12431+ alternative_atomic64(add_return_unchecked,
12432+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12433+ ASM_NO_INPUT_CLOBBER("memory"));
12434+ return i;
12435+}
12436+
12437 /*
12438 * Other variants with different arithmetic operators:
12439 */
12440@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
12441 return a;
12442 }
12443
12444+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12445+{
12446+ long long a;
12447+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
12448+ "S" (v) : "memory", "ecx");
12449+ return a;
12450+}
12451+
12452 static inline long long atomic64_dec_return(atomic64_t *v)
12453 {
12454 long long a;
12455@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
12456 }
12457
12458 /**
12459+ * atomic64_add_unchecked - add integer to atomic64 variable
12460+ * @i: integer value to add
12461+ * @v: pointer to type atomic64_unchecked_t
12462+ *
12463+ * Atomically adds @i to @v.
12464+ */
12465+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
12466+{
12467+ __alternative_atomic64(add_unchecked, add_return_unchecked,
12468+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12469+ ASM_NO_INPUT_CLOBBER("memory"));
12470+ return i;
12471+}
12472+
12473+/**
12474 * atomic64_sub - subtract the atomic64 variable
12475 * @i: integer value to subtract
12476 * @v: pointer to type atomic64_t
12477diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
12478index 0e1cbfc..5623683 100644
12479--- a/arch/x86/include/asm/atomic64_64.h
12480+++ b/arch/x86/include/asm/atomic64_64.h
12481@@ -18,7 +18,19 @@
12482 */
12483 static inline long atomic64_read(const atomic64_t *v)
12484 {
12485- return (*(volatile long *)&(v)->counter);
12486+ return (*(volatile const long *)&(v)->counter);
12487+}
12488+
12489+/**
12490+ * atomic64_read_unchecked - read atomic64 variable
12491+ * @v: pointer of type atomic64_unchecked_t
12492+ *
12493+ * Atomically reads the value of @v.
12494+ * Doesn't imply a read memory barrier.
12495+ */
12496+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
12497+{
12498+ return (*(volatile const long *)&(v)->counter);
12499 }
12500
12501 /**
12502@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
12503 }
12504
12505 /**
12506+ * atomic64_set_unchecked - set atomic64 variable
12507+ * @v: pointer to type atomic64_unchecked_t
12508+ * @i: required value
12509+ *
12510+ * Atomically sets the value of @v to @i.
12511+ */
12512+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
12513+{
12514+ v->counter = i;
12515+}
12516+
12517+/**
12518 * atomic64_add - add integer to atomic64 variable
12519 * @i: integer value to add
12520 * @v: pointer to type atomic64_t
12521@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
12522 */
12523 static inline void atomic64_add(long i, atomic64_t *v)
12524 {
12525+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
12526+
12527+#ifdef CONFIG_PAX_REFCOUNT
12528+ "jno 0f\n"
12529+ LOCK_PREFIX "subq %1,%0\n"
12530+ "int $4\n0:\n"
12531+ _ASM_EXTABLE(0b, 0b)
12532+#endif
12533+
12534+ : "=m" (v->counter)
12535+ : "er" (i), "m" (v->counter));
12536+}
12537+
12538+/**
12539+ * atomic64_add_unchecked - add integer to atomic64 variable
12540+ * @i: integer value to add
12541+ * @v: pointer to type atomic64_unchecked_t
12542+ *
12543+ * Atomically adds @i to @v.
12544+ */
12545+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
12546+{
12547 asm volatile(LOCK_PREFIX "addq %1,%0"
12548 : "=m" (v->counter)
12549 : "er" (i), "m" (v->counter));
12550@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
12551 */
12552 static inline void atomic64_sub(long i, atomic64_t *v)
12553 {
12554- asm volatile(LOCK_PREFIX "subq %1,%0"
12555+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12556+
12557+#ifdef CONFIG_PAX_REFCOUNT
12558+ "jno 0f\n"
12559+ LOCK_PREFIX "addq %1,%0\n"
12560+ "int $4\n0:\n"
12561+ _ASM_EXTABLE(0b, 0b)
12562+#endif
12563+
12564+ : "=m" (v->counter)
12565+ : "er" (i), "m" (v->counter));
12566+}
12567+
12568+/**
12569+ * atomic64_sub_unchecked - subtract the atomic64 variable
12570+ * @i: integer value to subtract
12571+ * @v: pointer to type atomic64_unchecked_t
12572+ *
12573+ * Atomically subtracts @i from @v.
12574+ */
12575+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
12576+{
12577+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12578 : "=m" (v->counter)
12579 : "er" (i), "m" (v->counter));
12580 }
12581@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12582 {
12583 unsigned char c;
12584
12585- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
12586+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
12587+
12588+#ifdef CONFIG_PAX_REFCOUNT
12589+ "jno 0f\n"
12590+ LOCK_PREFIX "addq %2,%0\n"
12591+ "int $4\n0:\n"
12592+ _ASM_EXTABLE(0b, 0b)
12593+#endif
12594+
12595+ "sete %1\n"
12596 : "=m" (v->counter), "=qm" (c)
12597 : "er" (i), "m" (v->counter) : "memory");
12598 return c;
12599@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12600 */
12601 static inline void atomic64_inc(atomic64_t *v)
12602 {
12603+ asm volatile(LOCK_PREFIX "incq %0\n"
12604+
12605+#ifdef CONFIG_PAX_REFCOUNT
12606+ "jno 0f\n"
12607+ LOCK_PREFIX "decq %0\n"
12608+ "int $4\n0:\n"
12609+ _ASM_EXTABLE(0b, 0b)
12610+#endif
12611+
12612+ : "=m" (v->counter)
12613+ : "m" (v->counter));
12614+}
12615+
12616+/**
12617+ * atomic64_inc_unchecked - increment atomic64 variable
12618+ * @v: pointer to type atomic64_unchecked_t
12619+ *
12620+ * Atomically increments @v by 1.
12621+ */
12622+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
12623+{
12624 asm volatile(LOCK_PREFIX "incq %0"
12625 : "=m" (v->counter)
12626 : "m" (v->counter));
12627@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
12628 */
12629 static inline void atomic64_dec(atomic64_t *v)
12630 {
12631- asm volatile(LOCK_PREFIX "decq %0"
12632+ asm volatile(LOCK_PREFIX "decq %0\n"
12633+
12634+#ifdef CONFIG_PAX_REFCOUNT
12635+ "jno 0f\n"
12636+ LOCK_PREFIX "incq %0\n"
12637+ "int $4\n0:\n"
12638+ _ASM_EXTABLE(0b, 0b)
12639+#endif
12640+
12641+ : "=m" (v->counter)
12642+ : "m" (v->counter));
12643+}
12644+
12645+/**
12646+ * atomic64_dec_unchecked - decrement atomic64 variable
12647+ * @v: pointer to type atomic64_t
12648+ *
12649+ * Atomically decrements @v by 1.
12650+ */
12651+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
12652+{
12653+ asm volatile(LOCK_PREFIX "decq %0\n"
12654 : "=m" (v->counter)
12655 : "m" (v->counter));
12656 }
12657@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
12658 {
12659 unsigned char c;
12660
12661- asm volatile(LOCK_PREFIX "decq %0; sete %1"
12662+ asm volatile(LOCK_PREFIX "decq %0\n"
12663+
12664+#ifdef CONFIG_PAX_REFCOUNT
12665+ "jno 0f\n"
12666+ LOCK_PREFIX "incq %0\n"
12667+ "int $4\n0:\n"
12668+ _ASM_EXTABLE(0b, 0b)
12669+#endif
12670+
12671+ "sete %1\n"
12672 : "=m" (v->counter), "=qm" (c)
12673 : "m" (v->counter) : "memory");
12674 return c != 0;
12675@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
12676 {
12677 unsigned char c;
12678
12679- asm volatile(LOCK_PREFIX "incq %0; sete %1"
12680+ asm volatile(LOCK_PREFIX "incq %0\n"
12681+
12682+#ifdef CONFIG_PAX_REFCOUNT
12683+ "jno 0f\n"
12684+ LOCK_PREFIX "decq %0\n"
12685+ "int $4\n0:\n"
12686+ _ASM_EXTABLE(0b, 0b)
12687+#endif
12688+
12689+ "sete %1\n"
12690 : "=m" (v->counter), "=qm" (c)
12691 : "m" (v->counter) : "memory");
12692 return c != 0;
12693@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12694 {
12695 unsigned char c;
12696
12697- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
12698+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
12699+
12700+#ifdef CONFIG_PAX_REFCOUNT
12701+ "jno 0f\n"
12702+ LOCK_PREFIX "subq %2,%0\n"
12703+ "int $4\n0:\n"
12704+ _ASM_EXTABLE(0b, 0b)
12705+#endif
12706+
12707+ "sets %1\n"
12708 : "=m" (v->counter), "=qm" (c)
12709 : "er" (i), "m" (v->counter) : "memory");
12710 return c;
12711@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12712 */
12713 static inline long atomic64_add_return(long i, atomic64_t *v)
12714 {
12715+ return i + xadd_check_overflow(&v->counter, i);
12716+}
12717+
12718+/**
12719+ * atomic64_add_return_unchecked - add and return
12720+ * @i: integer value to add
12721+ * @v: pointer to type atomic64_unchecked_t
12722+ *
12723+ * Atomically adds @i to @v and returns @i + @v
12724+ */
12725+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
12726+{
12727 return i + xadd(&v->counter, i);
12728 }
12729
12730@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
12731 }
12732
12733 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
12734+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12735+{
12736+ return atomic64_add_return_unchecked(1, v);
12737+}
12738 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
12739
12740 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12741@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12742 return cmpxchg(&v->counter, old, new);
12743 }
12744
12745+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
12746+{
12747+ return cmpxchg(&v->counter, old, new);
12748+}
12749+
12750 static inline long atomic64_xchg(atomic64_t *v, long new)
12751 {
12752 return xchg(&v->counter, new);
12753@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
12754 */
12755 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
12756 {
12757- long c, old;
12758+ long c, old, new;
12759 c = atomic64_read(v);
12760 for (;;) {
12761- if (unlikely(c == (u)))
12762+ if (unlikely(c == u))
12763 break;
12764- old = atomic64_cmpxchg((v), c, c + (a));
12765+
12766+ asm volatile("add %2,%0\n"
12767+
12768+#ifdef CONFIG_PAX_REFCOUNT
12769+ "jno 0f\n"
12770+ "sub %2,%0\n"
12771+ "int $4\n0:\n"
12772+ _ASM_EXTABLE(0b, 0b)
12773+#endif
12774+
12775+ : "=r" (new)
12776+ : "0" (c), "ir" (a));
12777+
12778+ old = atomic64_cmpxchg(v, c, new);
12779 if (likely(old == c))
12780 break;
12781 c = old;
12782 }
12783- return c != (u);
12784+ return c != u;
12785 }
12786
12787 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12788diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
12789index 6dfd019..28e188d 100644
12790--- a/arch/x86/include/asm/bitops.h
12791+++ b/arch/x86/include/asm/bitops.h
12792@@ -40,7 +40,7 @@
12793 * a mask operation on a byte.
12794 */
12795 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
12796-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
12797+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
12798 #define CONST_MASK(nr) (1 << ((nr) & 7))
12799
12800 /**
12801@@ -486,7 +486,7 @@ static inline int fls(int x)
12802 * at position 64.
12803 */
12804 #ifdef CONFIG_X86_64
12805-static __always_inline int fls64(__u64 x)
12806+static __always_inline long fls64(__u64 x)
12807 {
12808 int bitpos = -1;
12809 /*
12810diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
12811index 4fa687a..60f2d39 100644
12812--- a/arch/x86/include/asm/boot.h
12813+++ b/arch/x86/include/asm/boot.h
12814@@ -6,10 +6,15 @@
12815 #include <uapi/asm/boot.h>
12816
12817 /* Physical address where kernel should be loaded. */
12818-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12819+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12820 + (CONFIG_PHYSICAL_ALIGN - 1)) \
12821 & ~(CONFIG_PHYSICAL_ALIGN - 1))
12822
12823+#ifndef __ASSEMBLY__
12824+extern unsigned char __LOAD_PHYSICAL_ADDR[];
12825+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
12826+#endif
12827+
12828 /* Minimum kernel alignment, as a power of two */
12829 #ifdef CONFIG_X86_64
12830 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
12831diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
12832index 48f99f1..d78ebf9 100644
12833--- a/arch/x86/include/asm/cache.h
12834+++ b/arch/x86/include/asm/cache.h
12835@@ -5,12 +5,13 @@
12836
12837 /* L1 cache line size */
12838 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12839-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12840+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12841
12842 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
12843+#define __read_only __attribute__((__section__(".data..read_only")))
12844
12845 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
12846-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
12847+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
12848
12849 #ifdef CONFIG_X86_VSMP
12850 #ifdef CONFIG_SMP
12851diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
12852index 9863ee3..4a1f8e1 100644
12853--- a/arch/x86/include/asm/cacheflush.h
12854+++ b/arch/x86/include/asm/cacheflush.h
12855@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
12856 unsigned long pg_flags = pg->flags & _PGMT_MASK;
12857
12858 if (pg_flags == _PGMT_DEFAULT)
12859- return -1;
12860+ return ~0UL;
12861 else if (pg_flags == _PGMT_WC)
12862 return _PAGE_CACHE_WC;
12863 else if (pg_flags == _PGMT_UC_MINUS)
12864diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
12865index 46fc474..b02b0f9 100644
12866--- a/arch/x86/include/asm/checksum_32.h
12867+++ b/arch/x86/include/asm/checksum_32.h
12868@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
12869 int len, __wsum sum,
12870 int *src_err_ptr, int *dst_err_ptr);
12871
12872+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
12873+ int len, __wsum sum,
12874+ int *src_err_ptr, int *dst_err_ptr);
12875+
12876+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
12877+ int len, __wsum sum,
12878+ int *src_err_ptr, int *dst_err_ptr);
12879+
12880 /*
12881 * Note: when you get a NULL pointer exception here this means someone
12882 * passed in an incorrect kernel address to one of these functions.
12883@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
12884 int *err_ptr)
12885 {
12886 might_sleep();
12887- return csum_partial_copy_generic((__force void *)src, dst,
12888+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
12889 len, sum, err_ptr, NULL);
12890 }
12891
12892@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
12893 {
12894 might_sleep();
12895 if (access_ok(VERIFY_WRITE, dst, len))
12896- return csum_partial_copy_generic(src, (__force void *)dst,
12897+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
12898 len, sum, NULL, err_ptr);
12899
12900 if (len)
12901diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
12902index 8d871ea..c1a0dc9 100644
12903--- a/arch/x86/include/asm/cmpxchg.h
12904+++ b/arch/x86/include/asm/cmpxchg.h
12905@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
12906 __compiletime_error("Bad argument size for cmpxchg");
12907 extern void __xadd_wrong_size(void)
12908 __compiletime_error("Bad argument size for xadd");
12909+extern void __xadd_check_overflow_wrong_size(void)
12910+ __compiletime_error("Bad argument size for xadd_check_overflow");
12911 extern void __add_wrong_size(void)
12912 __compiletime_error("Bad argument size for add");
12913+extern void __add_check_overflow_wrong_size(void)
12914+ __compiletime_error("Bad argument size for add_check_overflow");
12915
12916 /*
12917 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
12918@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
12919 __ret; \
12920 })
12921
12922+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
12923+ ({ \
12924+ __typeof__ (*(ptr)) __ret = (arg); \
12925+ switch (sizeof(*(ptr))) { \
12926+ case __X86_CASE_L: \
12927+ asm volatile (lock #op "l %0, %1\n" \
12928+ "jno 0f\n" \
12929+ "mov %0,%1\n" \
12930+ "int $4\n0:\n" \
12931+ _ASM_EXTABLE(0b, 0b) \
12932+ : "+r" (__ret), "+m" (*(ptr)) \
12933+ : : "memory", "cc"); \
12934+ break; \
12935+ case __X86_CASE_Q: \
12936+ asm volatile (lock #op "q %q0, %1\n" \
12937+ "jno 0f\n" \
12938+ "mov %0,%1\n" \
12939+ "int $4\n0:\n" \
12940+ _ASM_EXTABLE(0b, 0b) \
12941+ : "+r" (__ret), "+m" (*(ptr)) \
12942+ : : "memory", "cc"); \
12943+ break; \
12944+ default: \
12945+ __ ## op ## _check_overflow_wrong_size(); \
12946+ } \
12947+ __ret; \
12948+ })
12949+
12950 /*
12951 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
12952 * Since this is generally used to protect other memory information, we
12953@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
12954 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
12955 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
12956
12957+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
12958+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
12959+
12960 #define __add(ptr, inc, lock) \
12961 ({ \
12962 __typeof__ (*(ptr)) __ret = (inc); \
12963diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
12964index 2d9075e..b75a844 100644
12965--- a/arch/x86/include/asm/cpufeature.h
12966+++ b/arch/x86/include/asm/cpufeature.h
12967@@ -206,7 +206,7 @@
12968 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
12969 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
12970 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
12971-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
12972+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
12973 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
12974 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
12975 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
12976@@ -375,7 +375,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
12977 ".section .discard,\"aw\",@progbits\n"
12978 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
12979 ".previous\n"
12980- ".section .altinstr_replacement,\"ax\"\n"
12981+ ".section .altinstr_replacement,\"a\"\n"
12982 "3: movb $1,%0\n"
12983 "4:\n"
12984 ".previous\n"
12985diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
12986index 8bf1c06..b6ae785 100644
12987--- a/arch/x86/include/asm/desc.h
12988+++ b/arch/x86/include/asm/desc.h
12989@@ -4,6 +4,7 @@
12990 #include <asm/desc_defs.h>
12991 #include <asm/ldt.h>
12992 #include <asm/mmu.h>
12993+#include <asm/pgtable.h>
12994
12995 #include <linux/smp.h>
12996 #include <linux/percpu.h>
12997@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
12998
12999 desc->type = (info->read_exec_only ^ 1) << 1;
13000 desc->type |= info->contents << 2;
13001+ desc->type |= info->seg_not_present ^ 1;
13002
13003 desc->s = 1;
13004 desc->dpl = 0x3;
13005@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13006 }
13007
13008 extern struct desc_ptr idt_descr;
13009-extern gate_desc idt_table[];
13010 extern struct desc_ptr nmi_idt_descr;
13011-extern gate_desc nmi_idt_table[];
13012-
13013-struct gdt_page {
13014- struct desc_struct gdt[GDT_ENTRIES];
13015-} __attribute__((aligned(PAGE_SIZE)));
13016-
13017-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
13018+extern gate_desc idt_table[256];
13019+extern gate_desc nmi_idt_table[256];
13020
13021+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
13022 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
13023 {
13024- return per_cpu(gdt_page, cpu).gdt;
13025+ return cpu_gdt_table[cpu];
13026 }
13027
13028 #ifdef CONFIG_X86_64
13029@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
13030 unsigned long base, unsigned dpl, unsigned flags,
13031 unsigned short seg)
13032 {
13033- gate->a = (seg << 16) | (base & 0xffff);
13034- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
13035+ gate->gate.offset_low = base;
13036+ gate->gate.seg = seg;
13037+ gate->gate.reserved = 0;
13038+ gate->gate.type = type;
13039+ gate->gate.s = 0;
13040+ gate->gate.dpl = dpl;
13041+ gate->gate.p = 1;
13042+ gate->gate.offset_high = base >> 16;
13043 }
13044
13045 #endif
13046@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
13047
13048 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
13049 {
13050+ pax_open_kernel();
13051 memcpy(&idt[entry], gate, sizeof(*gate));
13052+ pax_close_kernel();
13053 }
13054
13055 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
13056 {
13057+ pax_open_kernel();
13058 memcpy(&ldt[entry], desc, 8);
13059+ pax_close_kernel();
13060 }
13061
13062 static inline void
13063@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
13064 default: size = sizeof(*gdt); break;
13065 }
13066
13067+ pax_open_kernel();
13068 memcpy(&gdt[entry], desc, size);
13069+ pax_close_kernel();
13070 }
13071
13072 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
13073@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
13074
13075 static inline void native_load_tr_desc(void)
13076 {
13077+ pax_open_kernel();
13078 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
13079+ pax_close_kernel();
13080 }
13081
13082 static inline void native_load_gdt(const struct desc_ptr *dtr)
13083@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
13084 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
13085 unsigned int i;
13086
13087+ pax_open_kernel();
13088 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
13089 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
13090+ pax_close_kernel();
13091 }
13092
13093 #define _LDT_empty(info) \
13094@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
13095 preempt_enable();
13096 }
13097
13098-static inline unsigned long get_desc_base(const struct desc_struct *desc)
13099+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
13100 {
13101 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
13102 }
13103@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
13104 }
13105
13106 #ifdef CONFIG_X86_64
13107-static inline void set_nmi_gate(int gate, void *addr)
13108+static inline void set_nmi_gate(int gate, const void *addr)
13109 {
13110 gate_desc s;
13111
13112@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
13113 }
13114 #endif
13115
13116-static inline void _set_gate(int gate, unsigned type, void *addr,
13117+static inline void _set_gate(int gate, unsigned type, const void *addr,
13118 unsigned dpl, unsigned ist, unsigned seg)
13119 {
13120 gate_desc s;
13121@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
13122 * Pentium F0 0F bugfix can have resulted in the mapped
13123 * IDT being write-protected.
13124 */
13125-static inline void set_intr_gate(unsigned int n, void *addr)
13126+static inline void set_intr_gate(unsigned int n, const void *addr)
13127 {
13128 BUG_ON((unsigned)n > 0xFF);
13129 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
13130@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
13131 /*
13132 * This routine sets up an interrupt gate at directory privilege level 3.
13133 */
13134-static inline void set_system_intr_gate(unsigned int n, void *addr)
13135+static inline void set_system_intr_gate(unsigned int n, const void *addr)
13136 {
13137 BUG_ON((unsigned)n > 0xFF);
13138 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
13139 }
13140
13141-static inline void set_system_trap_gate(unsigned int n, void *addr)
13142+static inline void set_system_trap_gate(unsigned int n, const void *addr)
13143 {
13144 BUG_ON((unsigned)n > 0xFF);
13145 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
13146 }
13147
13148-static inline void set_trap_gate(unsigned int n, void *addr)
13149+static inline void set_trap_gate(unsigned int n, const void *addr)
13150 {
13151 BUG_ON((unsigned)n > 0xFF);
13152 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
13153@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
13154 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
13155 {
13156 BUG_ON((unsigned)n > 0xFF);
13157- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
13158+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
13159 }
13160
13161-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
13162+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
13163 {
13164 BUG_ON((unsigned)n > 0xFF);
13165 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
13166 }
13167
13168-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
13169+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
13170 {
13171 BUG_ON((unsigned)n > 0xFF);
13172 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
13173 }
13174
13175+#ifdef CONFIG_X86_32
13176+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
13177+{
13178+ struct desc_struct d;
13179+
13180+ if (likely(limit))
13181+ limit = (limit - 1UL) >> PAGE_SHIFT;
13182+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
13183+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
13184+}
13185+#endif
13186+
13187 #endif /* _ASM_X86_DESC_H */
13188diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
13189index 278441f..b95a174 100644
13190--- a/arch/x86/include/asm/desc_defs.h
13191+++ b/arch/x86/include/asm/desc_defs.h
13192@@ -31,6 +31,12 @@ struct desc_struct {
13193 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
13194 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
13195 };
13196+ struct {
13197+ u16 offset_low;
13198+ u16 seg;
13199+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
13200+ unsigned offset_high: 16;
13201+ } gate;
13202 };
13203 } __attribute__((packed));
13204
13205diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
13206index ced283a..ffe04cc 100644
13207--- a/arch/x86/include/asm/div64.h
13208+++ b/arch/x86/include/asm/div64.h
13209@@ -39,7 +39,7 @@
13210 __mod; \
13211 })
13212
13213-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13214+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13215 {
13216 union {
13217 u64 v64;
13218diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
13219index 9c999c1..3860cb8 100644
13220--- a/arch/x86/include/asm/elf.h
13221+++ b/arch/x86/include/asm/elf.h
13222@@ -243,7 +243,25 @@ extern int force_personality32;
13223 the loader. We need to make sure that it is out of the way of the program
13224 that it will "exec", and that there is sufficient room for the brk. */
13225
13226+#ifdef CONFIG_PAX_SEGMEXEC
13227+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
13228+#else
13229 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
13230+#endif
13231+
13232+#ifdef CONFIG_PAX_ASLR
13233+#ifdef CONFIG_X86_32
13234+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
13235+
13236+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13237+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13238+#else
13239+#define PAX_ELF_ET_DYN_BASE 0x400000UL
13240+
13241+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13242+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13243+#endif
13244+#endif
13245
13246 /* This yields a mask that user programs can use to figure out what
13247 instruction set this CPU supports. This could be done in user space,
13248@@ -296,16 +314,12 @@ do { \
13249
13250 #define ARCH_DLINFO \
13251 do { \
13252- if (vdso_enabled) \
13253- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13254- (unsigned long)current->mm->context.vdso); \
13255+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13256 } while (0)
13257
13258 #define ARCH_DLINFO_X32 \
13259 do { \
13260- if (vdso_enabled) \
13261- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13262- (unsigned long)current->mm->context.vdso); \
13263+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13264 } while (0)
13265
13266 #define AT_SYSINFO 32
13267@@ -320,7 +334,7 @@ else \
13268
13269 #endif /* !CONFIG_X86_32 */
13270
13271-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
13272+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
13273
13274 #define VDSO_ENTRY \
13275 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
13276@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
13277 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
13278 #define compat_arch_setup_additional_pages syscall32_setup_pages
13279
13280-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
13281-#define arch_randomize_brk arch_randomize_brk
13282-
13283 /*
13284 * True on X86_32 or when emulating IA32 on X86_64
13285 */
13286diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
13287index 75ce3f4..882e801 100644
13288--- a/arch/x86/include/asm/emergency-restart.h
13289+++ b/arch/x86/include/asm/emergency-restart.h
13290@@ -13,6 +13,6 @@ enum reboot_type {
13291
13292 extern enum reboot_type reboot_type;
13293
13294-extern void machine_emergency_restart(void);
13295+extern void machine_emergency_restart(void) __noreturn;
13296
13297 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
13298diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
13299index 41ab26e..a88c9e6 100644
13300--- a/arch/x86/include/asm/fpu-internal.h
13301+++ b/arch/x86/include/asm/fpu-internal.h
13302@@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
13303 ({ \
13304 int err; \
13305 asm volatile(ASM_STAC "\n" \
13306- "1:" #insn "\n\t" \
13307+ "1:" \
13308+ __copyuser_seg \
13309+ #insn "\n\t" \
13310 "2: " ASM_CLAC "\n" \
13311 ".section .fixup,\"ax\"\n" \
13312 "3: movl $-1,%[err]\n" \
13313@@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
13314 "emms\n\t" /* clear stack tags */
13315 "fildl %P[addr]", /* set F?P to defined value */
13316 X86_FEATURE_FXSAVE_LEAK,
13317- [addr] "m" (tsk->thread.fpu.has_fpu));
13318+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
13319
13320 return fpu_restore_checking(&tsk->thread.fpu);
13321 }
13322diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
13323index be27ba1..8f13ff9 100644
13324--- a/arch/x86/include/asm/futex.h
13325+++ b/arch/x86/include/asm/futex.h
13326@@ -12,6 +12,7 @@
13327 #include <asm/smap.h>
13328
13329 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13330+ typecheck(u32 __user *, uaddr); \
13331 asm volatile("\t" ASM_STAC "\n" \
13332 "1:\t" insn "\n" \
13333 "2:\t" ASM_CLAC "\n" \
13334@@ -20,15 +21,16 @@
13335 "\tjmp\t2b\n" \
13336 "\t.previous\n" \
13337 _ASM_EXTABLE(1b, 3b) \
13338- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
13339+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
13340 : "i" (-EFAULT), "0" (oparg), "1" (0))
13341
13342 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
13343+ typecheck(u32 __user *, uaddr); \
13344 asm volatile("\t" ASM_STAC "\n" \
13345 "1:\tmovl %2, %0\n" \
13346 "\tmovl\t%0, %3\n" \
13347 "\t" insn "\n" \
13348- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
13349+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
13350 "\tjnz\t1b\n" \
13351 "3:\t" ASM_CLAC "\n" \
13352 "\t.section .fixup,\"ax\"\n" \
13353@@ -38,7 +40,7 @@
13354 _ASM_EXTABLE(1b, 4b) \
13355 _ASM_EXTABLE(2b, 4b) \
13356 : "=&a" (oldval), "=&r" (ret), \
13357- "+m" (*uaddr), "=&r" (tem) \
13358+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
13359 : "r" (oparg), "i" (-EFAULT), "1" (0))
13360
13361 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13362@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13363
13364 switch (op) {
13365 case FUTEX_OP_SET:
13366- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
13367+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
13368 break;
13369 case FUTEX_OP_ADD:
13370- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
13371+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
13372 uaddr, oparg);
13373 break;
13374 case FUTEX_OP_OR:
13375@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
13376 return -EFAULT;
13377
13378 asm volatile("\t" ASM_STAC "\n"
13379- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
13380+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
13381 "2:\t" ASM_CLAC "\n"
13382 "\t.section .fixup, \"ax\"\n"
13383 "3:\tmov %3, %0\n"
13384 "\tjmp 2b\n"
13385 "\t.previous\n"
13386 _ASM_EXTABLE(1b, 3b)
13387- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
13388+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
13389 : "i" (-EFAULT), "r" (newval), "1" (oldval)
13390 : "memory"
13391 );
13392diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
13393index eb92a6e..b98b2f4 100644
13394--- a/arch/x86/include/asm/hw_irq.h
13395+++ b/arch/x86/include/asm/hw_irq.h
13396@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
13397 extern void enable_IO_APIC(void);
13398
13399 /* Statistics */
13400-extern atomic_t irq_err_count;
13401-extern atomic_t irq_mis_count;
13402+extern atomic_unchecked_t irq_err_count;
13403+extern atomic_unchecked_t irq_mis_count;
13404
13405 /* EISA */
13406 extern void eisa_set_level_irq(unsigned int irq);
13407diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
13408index a203659..9889f1c 100644
13409--- a/arch/x86/include/asm/i8259.h
13410+++ b/arch/x86/include/asm/i8259.h
13411@@ -62,7 +62,7 @@ struct legacy_pic {
13412 void (*init)(int auto_eoi);
13413 int (*irq_pending)(unsigned int irq);
13414 void (*make_irq)(unsigned int irq);
13415-};
13416+} __do_const;
13417
13418 extern struct legacy_pic *legacy_pic;
13419 extern struct legacy_pic null_legacy_pic;
13420diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
13421index d8e8eef..1765f78 100644
13422--- a/arch/x86/include/asm/io.h
13423+++ b/arch/x86/include/asm/io.h
13424@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
13425 "m" (*(volatile type __force *)addr) barrier); }
13426
13427 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
13428-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
13429-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
13430+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
13431+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
13432
13433 build_mmio_read(__readb, "b", unsigned char, "=q", )
13434-build_mmio_read(__readw, "w", unsigned short, "=r", )
13435-build_mmio_read(__readl, "l", unsigned int, "=r", )
13436+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
13437+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
13438
13439 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
13440 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
13441@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
13442 return ioremap_nocache(offset, size);
13443 }
13444
13445-extern void iounmap(volatile void __iomem *addr);
13446+extern void iounmap(const volatile void __iomem *addr);
13447
13448 extern void set_iounmap_nonlazy(void);
13449
13450@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
13451
13452 #include <linux/vmalloc.h>
13453
13454+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
13455+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
13456+{
13457+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13458+}
13459+
13460+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
13461+{
13462+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13463+}
13464+
13465 /*
13466 * Convert a virtual cached pointer to an uncached pointer
13467 */
13468diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
13469index bba3cf8..06bc8da 100644
13470--- a/arch/x86/include/asm/irqflags.h
13471+++ b/arch/x86/include/asm/irqflags.h
13472@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
13473 sti; \
13474 sysexit
13475
13476+#define GET_CR0_INTO_RDI mov %cr0, %rdi
13477+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
13478+#define GET_CR3_INTO_RDI mov %cr3, %rdi
13479+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
13480+
13481 #else
13482 #define INTERRUPT_RETURN iret
13483 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
13484diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
13485index d3ddd17..c9fb0cc 100644
13486--- a/arch/x86/include/asm/kprobes.h
13487+++ b/arch/x86/include/asm/kprobes.h
13488@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
13489 #define RELATIVEJUMP_SIZE 5
13490 #define RELATIVECALL_OPCODE 0xe8
13491 #define RELATIVE_ADDR_SIZE 4
13492-#define MAX_STACK_SIZE 64
13493-#define MIN_STACK_SIZE(ADDR) \
13494- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
13495- THREAD_SIZE - (unsigned long)(ADDR))) \
13496- ? (MAX_STACK_SIZE) \
13497- : (((unsigned long)current_thread_info()) + \
13498- THREAD_SIZE - (unsigned long)(ADDR)))
13499+#define MAX_STACK_SIZE 64UL
13500+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
13501
13502 #define flush_insn_slot(p) do { } while (0)
13503
13504diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
13505index dc87b65..85039f9 100644
13506--- a/arch/x86/include/asm/kvm_host.h
13507+++ b/arch/x86/include/asm/kvm_host.h
13508@@ -419,8 +419,8 @@ struct kvm_vcpu_arch {
13509 gpa_t time;
13510 struct pvclock_vcpu_time_info hv_clock;
13511 unsigned int hw_tsc_khz;
13512- unsigned int time_offset;
13513- struct page *time_page;
13514+ struct gfn_to_hva_cache pv_time;
13515+ bool pv_time_enabled;
13516 /* set guest stopped flag in pvclock flags field */
13517 bool pvclock_set_guest_stopped_request;
13518
13519diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
13520index 2d89e39..baee879 100644
13521--- a/arch/x86/include/asm/local.h
13522+++ b/arch/x86/include/asm/local.h
13523@@ -10,33 +10,97 @@ typedef struct {
13524 atomic_long_t a;
13525 } local_t;
13526
13527+typedef struct {
13528+ atomic_long_unchecked_t a;
13529+} local_unchecked_t;
13530+
13531 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13532
13533 #define local_read(l) atomic_long_read(&(l)->a)
13534+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
13535 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
13536+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
13537
13538 static inline void local_inc(local_t *l)
13539 {
13540- asm volatile(_ASM_INC "%0"
13541+ asm volatile(_ASM_INC "%0\n"
13542+
13543+#ifdef CONFIG_PAX_REFCOUNT
13544+ "jno 0f\n"
13545+ _ASM_DEC "%0\n"
13546+ "int $4\n0:\n"
13547+ _ASM_EXTABLE(0b, 0b)
13548+#endif
13549+
13550+ : "+m" (l->a.counter));
13551+}
13552+
13553+static inline void local_inc_unchecked(local_unchecked_t *l)
13554+{
13555+ asm volatile(_ASM_INC "%0\n"
13556 : "+m" (l->a.counter));
13557 }
13558
13559 static inline void local_dec(local_t *l)
13560 {
13561- asm volatile(_ASM_DEC "%0"
13562+ asm volatile(_ASM_DEC "%0\n"
13563+
13564+#ifdef CONFIG_PAX_REFCOUNT
13565+ "jno 0f\n"
13566+ _ASM_INC "%0\n"
13567+ "int $4\n0:\n"
13568+ _ASM_EXTABLE(0b, 0b)
13569+#endif
13570+
13571+ : "+m" (l->a.counter));
13572+}
13573+
13574+static inline void local_dec_unchecked(local_unchecked_t *l)
13575+{
13576+ asm volatile(_ASM_DEC "%0\n"
13577 : "+m" (l->a.counter));
13578 }
13579
13580 static inline void local_add(long i, local_t *l)
13581 {
13582- asm volatile(_ASM_ADD "%1,%0"
13583+ asm volatile(_ASM_ADD "%1,%0\n"
13584+
13585+#ifdef CONFIG_PAX_REFCOUNT
13586+ "jno 0f\n"
13587+ _ASM_SUB "%1,%0\n"
13588+ "int $4\n0:\n"
13589+ _ASM_EXTABLE(0b, 0b)
13590+#endif
13591+
13592+ : "+m" (l->a.counter)
13593+ : "ir" (i));
13594+}
13595+
13596+static inline void local_add_unchecked(long i, local_unchecked_t *l)
13597+{
13598+ asm volatile(_ASM_ADD "%1,%0\n"
13599 : "+m" (l->a.counter)
13600 : "ir" (i));
13601 }
13602
13603 static inline void local_sub(long i, local_t *l)
13604 {
13605- asm volatile(_ASM_SUB "%1,%0"
13606+ asm volatile(_ASM_SUB "%1,%0\n"
13607+
13608+#ifdef CONFIG_PAX_REFCOUNT
13609+ "jno 0f\n"
13610+ _ASM_ADD "%1,%0\n"
13611+ "int $4\n0:\n"
13612+ _ASM_EXTABLE(0b, 0b)
13613+#endif
13614+
13615+ : "+m" (l->a.counter)
13616+ : "ir" (i));
13617+}
13618+
13619+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
13620+{
13621+ asm volatile(_ASM_SUB "%1,%0\n"
13622 : "+m" (l->a.counter)
13623 : "ir" (i));
13624 }
13625@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
13626 {
13627 unsigned char c;
13628
13629- asm volatile(_ASM_SUB "%2,%0; sete %1"
13630+ asm volatile(_ASM_SUB "%2,%0\n"
13631+
13632+#ifdef CONFIG_PAX_REFCOUNT
13633+ "jno 0f\n"
13634+ _ASM_ADD "%2,%0\n"
13635+ "int $4\n0:\n"
13636+ _ASM_EXTABLE(0b, 0b)
13637+#endif
13638+
13639+ "sete %1\n"
13640 : "+m" (l->a.counter), "=qm" (c)
13641 : "ir" (i) : "memory");
13642 return c;
13643@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
13644 {
13645 unsigned char c;
13646
13647- asm volatile(_ASM_DEC "%0; sete %1"
13648+ asm volatile(_ASM_DEC "%0\n"
13649+
13650+#ifdef CONFIG_PAX_REFCOUNT
13651+ "jno 0f\n"
13652+ _ASM_INC "%0\n"
13653+ "int $4\n0:\n"
13654+ _ASM_EXTABLE(0b, 0b)
13655+#endif
13656+
13657+ "sete %1\n"
13658 : "+m" (l->a.counter), "=qm" (c)
13659 : : "memory");
13660 return c != 0;
13661@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
13662 {
13663 unsigned char c;
13664
13665- asm volatile(_ASM_INC "%0; sete %1"
13666+ asm volatile(_ASM_INC "%0\n"
13667+
13668+#ifdef CONFIG_PAX_REFCOUNT
13669+ "jno 0f\n"
13670+ _ASM_DEC "%0\n"
13671+ "int $4\n0:\n"
13672+ _ASM_EXTABLE(0b, 0b)
13673+#endif
13674+
13675+ "sete %1\n"
13676 : "+m" (l->a.counter), "=qm" (c)
13677 : : "memory");
13678 return c != 0;
13679@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
13680 {
13681 unsigned char c;
13682
13683- asm volatile(_ASM_ADD "%2,%0; sets %1"
13684+ asm volatile(_ASM_ADD "%2,%0\n"
13685+
13686+#ifdef CONFIG_PAX_REFCOUNT
13687+ "jno 0f\n"
13688+ _ASM_SUB "%2,%0\n"
13689+ "int $4\n0:\n"
13690+ _ASM_EXTABLE(0b, 0b)
13691+#endif
13692+
13693+ "sets %1\n"
13694 : "+m" (l->a.counter), "=qm" (c)
13695 : "ir" (i) : "memory");
13696 return c;
13697@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
13698 static inline long local_add_return(long i, local_t *l)
13699 {
13700 long __i = i;
13701+ asm volatile(_ASM_XADD "%0, %1\n"
13702+
13703+#ifdef CONFIG_PAX_REFCOUNT
13704+ "jno 0f\n"
13705+ _ASM_MOV "%0,%1\n"
13706+ "int $4\n0:\n"
13707+ _ASM_EXTABLE(0b, 0b)
13708+#endif
13709+
13710+ : "+r" (i), "+m" (l->a.counter)
13711+ : : "memory");
13712+ return i + __i;
13713+}
13714+
13715+/**
13716+ * local_add_return_unchecked - add and return
13717+ * @i: integer value to add
13718+ * @l: pointer to type local_unchecked_t
13719+ *
13720+ * Atomically adds @i to @l and returns @i + @l
13721+ */
13722+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
13723+{
13724+ long __i = i;
13725 asm volatile(_ASM_XADD "%0, %1;"
13726 : "+r" (i), "+m" (l->a.counter)
13727 : : "memory");
13728@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
13729
13730 #define local_cmpxchg(l, o, n) \
13731 (cmpxchg_local(&((l)->a.counter), (o), (n)))
13732+#define local_cmpxchg_unchecked(l, o, n) \
13733+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
13734 /* Always has a lock prefix */
13735 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
13736
13737diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
13738new file mode 100644
13739index 0000000..2bfd3ba
13740--- /dev/null
13741+++ b/arch/x86/include/asm/mman.h
13742@@ -0,0 +1,15 @@
13743+#ifndef _X86_MMAN_H
13744+#define _X86_MMAN_H
13745+
13746+#include <uapi/asm/mman.h>
13747+
13748+#ifdef __KERNEL__
13749+#ifndef __ASSEMBLY__
13750+#ifdef CONFIG_X86_32
13751+#define arch_mmap_check i386_mmap_check
13752+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
13753+#endif
13754+#endif
13755+#endif
13756+
13757+#endif /* X86_MMAN_H */
13758diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
13759index 5f55e69..e20bfb1 100644
13760--- a/arch/x86/include/asm/mmu.h
13761+++ b/arch/x86/include/asm/mmu.h
13762@@ -9,7 +9,7 @@
13763 * we put the segment information here.
13764 */
13765 typedef struct {
13766- void *ldt;
13767+ struct desc_struct *ldt;
13768 int size;
13769
13770 #ifdef CONFIG_X86_64
13771@@ -18,7 +18,19 @@ typedef struct {
13772 #endif
13773
13774 struct mutex lock;
13775- void *vdso;
13776+ unsigned long vdso;
13777+
13778+#ifdef CONFIG_X86_32
13779+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13780+ unsigned long user_cs_base;
13781+ unsigned long user_cs_limit;
13782+
13783+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13784+ cpumask_t cpu_user_cs_mask;
13785+#endif
13786+
13787+#endif
13788+#endif
13789 } mm_context_t;
13790
13791 #ifdef CONFIG_SMP
13792diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
13793index cdbf367..adb37ac 100644
13794--- a/arch/x86/include/asm/mmu_context.h
13795+++ b/arch/x86/include/asm/mmu_context.h
13796@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
13797
13798 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
13799 {
13800+
13801+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13802+ unsigned int i;
13803+ pgd_t *pgd;
13804+
13805+ pax_open_kernel();
13806+ pgd = get_cpu_pgd(smp_processor_id());
13807+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
13808+ set_pgd_batched(pgd+i, native_make_pgd(0));
13809+ pax_close_kernel();
13810+#endif
13811+
13812 #ifdef CONFIG_SMP
13813 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
13814 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
13815@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13816 struct task_struct *tsk)
13817 {
13818 unsigned cpu = smp_processor_id();
13819+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13820+ int tlbstate = TLBSTATE_OK;
13821+#endif
13822
13823 if (likely(prev != next)) {
13824 #ifdef CONFIG_SMP
13825+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13826+ tlbstate = this_cpu_read(cpu_tlbstate.state);
13827+#endif
13828 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13829 this_cpu_write(cpu_tlbstate.active_mm, next);
13830 #endif
13831 cpumask_set_cpu(cpu, mm_cpumask(next));
13832
13833 /* Re-load page tables */
13834+#ifdef CONFIG_PAX_PER_CPU_PGD
13835+ pax_open_kernel();
13836+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13837+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13838+ pax_close_kernel();
13839+ load_cr3(get_cpu_pgd(cpu));
13840+#else
13841 load_cr3(next->pgd);
13842+#endif
13843
13844 /* stop flush ipis for the previous mm */
13845 cpumask_clear_cpu(cpu, mm_cpumask(prev));
13846@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13847 */
13848 if (unlikely(prev->context.ldt != next->context.ldt))
13849 load_LDT_nolock(&next->context);
13850- }
13851+
13852+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13853+ if (!(__supported_pte_mask & _PAGE_NX)) {
13854+ smp_mb__before_clear_bit();
13855+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
13856+ smp_mb__after_clear_bit();
13857+ cpu_set(cpu, next->context.cpu_user_cs_mask);
13858+ }
13859+#endif
13860+
13861+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13862+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
13863+ prev->context.user_cs_limit != next->context.user_cs_limit))
13864+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13865 #ifdef CONFIG_SMP
13866+ else if (unlikely(tlbstate != TLBSTATE_OK))
13867+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13868+#endif
13869+#endif
13870+
13871+ }
13872 else {
13873+
13874+#ifdef CONFIG_PAX_PER_CPU_PGD
13875+ pax_open_kernel();
13876+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13877+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13878+ pax_close_kernel();
13879+ load_cr3(get_cpu_pgd(cpu));
13880+#endif
13881+
13882+#ifdef CONFIG_SMP
13883 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13884 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
13885
13886@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13887 * tlb flush IPI delivery. We must reload CR3
13888 * to make sure to use no freed page tables.
13889 */
13890+
13891+#ifndef CONFIG_PAX_PER_CPU_PGD
13892 load_cr3(next->pgd);
13893+#endif
13894+
13895 load_LDT_nolock(&next->context);
13896+
13897+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
13898+ if (!(__supported_pte_mask & _PAGE_NX))
13899+ cpu_set(cpu, next->context.cpu_user_cs_mask);
13900+#endif
13901+
13902+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13903+#ifdef CONFIG_PAX_PAGEEXEC
13904+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
13905+#endif
13906+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13907+#endif
13908+
13909 }
13910+#endif
13911 }
13912-#endif
13913 }
13914
13915 #define activate_mm(prev, next) \
13916diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
13917index e3b7819..b257c64 100644
13918--- a/arch/x86/include/asm/module.h
13919+++ b/arch/x86/include/asm/module.h
13920@@ -5,6 +5,7 @@
13921
13922 #ifdef CONFIG_X86_64
13923 /* X86_64 does not define MODULE_PROC_FAMILY */
13924+#define MODULE_PROC_FAMILY ""
13925 #elif defined CONFIG_M486
13926 #define MODULE_PROC_FAMILY "486 "
13927 #elif defined CONFIG_M586
13928@@ -57,8 +58,20 @@
13929 #error unknown processor family
13930 #endif
13931
13932-#ifdef CONFIG_X86_32
13933-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
13934+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
13935+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
13936+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
13937+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
13938+#else
13939+#define MODULE_PAX_KERNEXEC ""
13940 #endif
13941
13942+#ifdef CONFIG_PAX_MEMORY_UDEREF
13943+#define MODULE_PAX_UDEREF "UDEREF "
13944+#else
13945+#define MODULE_PAX_UDEREF ""
13946+#endif
13947+
13948+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
13949+
13950 #endif /* _ASM_X86_MODULE_H */
13951diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
13952index c0fa356..07a498a 100644
13953--- a/arch/x86/include/asm/nmi.h
13954+++ b/arch/x86/include/asm/nmi.h
13955@@ -42,11 +42,11 @@ struct nmiaction {
13956 nmi_handler_t handler;
13957 unsigned long flags;
13958 const char *name;
13959-};
13960+} __do_const;
13961
13962 #define register_nmi_handler(t, fn, fg, n, init...) \
13963 ({ \
13964- static struct nmiaction init fn##_na = { \
13965+ static const struct nmiaction init fn##_na = { \
13966 .handler = (fn), \
13967 .name = (n), \
13968 .flags = (fg), \
13969@@ -54,7 +54,7 @@ struct nmiaction {
13970 __register_nmi_handler((t), &fn##_na); \
13971 })
13972
13973-int __register_nmi_handler(unsigned int, struct nmiaction *);
13974+int __register_nmi_handler(unsigned int, const struct nmiaction *);
13975
13976 void unregister_nmi_handler(unsigned int, const char *);
13977
13978diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
13979index 320f7bb..e89f8f8 100644
13980--- a/arch/x86/include/asm/page_64_types.h
13981+++ b/arch/x86/include/asm/page_64_types.h
13982@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
13983
13984 /* duplicated to the one in bootmem.h */
13985 extern unsigned long max_pfn;
13986-extern unsigned long phys_base;
13987+extern const unsigned long phys_base;
13988
13989 extern unsigned long __phys_addr(unsigned long);
13990 #define __phys_reloc_hide(x) (x)
13991diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
13992index 5edd174..c395822 100644
13993--- a/arch/x86/include/asm/paravirt.h
13994+++ b/arch/x86/include/asm/paravirt.h
13995@@ -564,7 +564,7 @@ static inline pmd_t __pmd(pmdval_t val)
13996 return (pmd_t) { ret };
13997 }
13998
13999-static inline pmdval_t pmd_val(pmd_t pmd)
14000+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
14001 {
14002 pmdval_t ret;
14003
14004@@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
14005 val);
14006 }
14007
14008+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14009+{
14010+ pgdval_t val = native_pgd_val(pgd);
14011+
14012+ if (sizeof(pgdval_t) > sizeof(long))
14013+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
14014+ val, (u64)val >> 32);
14015+ else
14016+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
14017+ val);
14018+}
14019+
14020 static inline void pgd_clear(pgd_t *pgdp)
14021 {
14022 set_pgd(pgdp, __pgd(0));
14023@@ -711,6 +723,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
14024 pv_mmu_ops.set_fixmap(idx, phys, flags);
14025 }
14026
14027+#ifdef CONFIG_PAX_KERNEXEC
14028+static inline unsigned long pax_open_kernel(void)
14029+{
14030+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
14031+}
14032+
14033+static inline unsigned long pax_close_kernel(void)
14034+{
14035+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
14036+}
14037+#else
14038+static inline unsigned long pax_open_kernel(void) { return 0; }
14039+static inline unsigned long pax_close_kernel(void) { return 0; }
14040+#endif
14041+
14042 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
14043
14044 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
14045@@ -927,7 +954,7 @@ extern void default_banner(void);
14046
14047 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
14048 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
14049-#define PARA_INDIRECT(addr) *%cs:addr
14050+#define PARA_INDIRECT(addr) *%ss:addr
14051 #endif
14052
14053 #define INTERRUPT_RETURN \
14054@@ -1002,6 +1029,21 @@ extern void default_banner(void);
14055 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
14056 CLBR_NONE, \
14057 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
14058+
14059+#define GET_CR0_INTO_RDI \
14060+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
14061+ mov %rax,%rdi
14062+
14063+#define SET_RDI_INTO_CR0 \
14064+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14065+
14066+#define GET_CR3_INTO_RDI \
14067+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
14068+ mov %rax,%rdi
14069+
14070+#define SET_RDI_INTO_CR3 \
14071+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
14072+
14073 #endif /* CONFIG_X86_32 */
14074
14075 #endif /* __ASSEMBLY__ */
14076diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
14077index 142236e..5446ffbc 100644
14078--- a/arch/x86/include/asm/paravirt_types.h
14079+++ b/arch/x86/include/asm/paravirt_types.h
14080@@ -84,7 +84,7 @@ struct pv_init_ops {
14081 */
14082 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
14083 unsigned long addr, unsigned len);
14084-};
14085+} __no_const;
14086
14087
14088 struct pv_lazy_ops {
14089@@ -97,7 +97,7 @@ struct pv_time_ops {
14090 unsigned long long (*sched_clock)(void);
14091 unsigned long long (*steal_clock)(int cpu);
14092 unsigned long (*get_tsc_khz)(void);
14093-};
14094+} __no_const;
14095
14096 struct pv_cpu_ops {
14097 /* hooks for various privileged instructions */
14098@@ -191,7 +191,7 @@ struct pv_cpu_ops {
14099
14100 void (*start_context_switch)(struct task_struct *prev);
14101 void (*end_context_switch)(struct task_struct *next);
14102-};
14103+} __no_const;
14104
14105 struct pv_irq_ops {
14106 /*
14107@@ -222,7 +222,7 @@ struct pv_apic_ops {
14108 unsigned long start_eip,
14109 unsigned long start_esp);
14110 #endif
14111-};
14112+} __no_const;
14113
14114 struct pv_mmu_ops {
14115 unsigned long (*read_cr2)(void);
14116@@ -312,6 +312,7 @@ struct pv_mmu_ops {
14117 struct paravirt_callee_save make_pud;
14118
14119 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
14120+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
14121 #endif /* PAGETABLE_LEVELS == 4 */
14122 #endif /* PAGETABLE_LEVELS >= 3 */
14123
14124@@ -323,6 +324,12 @@ struct pv_mmu_ops {
14125 an mfn. We can tell which is which from the index. */
14126 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
14127 phys_addr_t phys, pgprot_t flags);
14128+
14129+#ifdef CONFIG_PAX_KERNEXEC
14130+ unsigned long (*pax_open_kernel)(void);
14131+ unsigned long (*pax_close_kernel)(void);
14132+#endif
14133+
14134 };
14135
14136 struct arch_spinlock;
14137@@ -333,7 +340,7 @@ struct pv_lock_ops {
14138 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
14139 int (*spin_trylock)(struct arch_spinlock *lock);
14140 void (*spin_unlock)(struct arch_spinlock *lock);
14141-};
14142+} __no_const;
14143
14144 /* This contains all the paravirt structures: we get a convenient
14145 * number for each function using the offset which we use to indicate
14146diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
14147index b4389a4..7024269 100644
14148--- a/arch/x86/include/asm/pgalloc.h
14149+++ b/arch/x86/include/asm/pgalloc.h
14150@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
14151 pmd_t *pmd, pte_t *pte)
14152 {
14153 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14154+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
14155+}
14156+
14157+static inline void pmd_populate_user(struct mm_struct *mm,
14158+ pmd_t *pmd, pte_t *pte)
14159+{
14160+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14161 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
14162 }
14163
14164@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
14165
14166 #ifdef CONFIG_X86_PAE
14167 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
14168+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
14169+{
14170+ pud_populate(mm, pudp, pmd);
14171+}
14172 #else /* !CONFIG_X86_PAE */
14173 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14174 {
14175 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14176 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
14177 }
14178+
14179+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14180+{
14181+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14182+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
14183+}
14184 #endif /* CONFIG_X86_PAE */
14185
14186 #if PAGETABLE_LEVELS > 3
14187@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14188 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
14189 }
14190
14191+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14192+{
14193+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
14194+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
14195+}
14196+
14197 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
14198 {
14199 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
14200diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
14201index f2b489c..4f7e2e5 100644
14202--- a/arch/x86/include/asm/pgtable-2level.h
14203+++ b/arch/x86/include/asm/pgtable-2level.h
14204@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
14205
14206 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14207 {
14208+ pax_open_kernel();
14209 *pmdp = pmd;
14210+ pax_close_kernel();
14211 }
14212
14213 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14214diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
14215index 4cc9f2b..5fd9226 100644
14216--- a/arch/x86/include/asm/pgtable-3level.h
14217+++ b/arch/x86/include/asm/pgtable-3level.h
14218@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14219
14220 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14221 {
14222+ pax_open_kernel();
14223 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
14224+ pax_close_kernel();
14225 }
14226
14227 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14228 {
14229+ pax_open_kernel();
14230 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
14231+ pax_close_kernel();
14232 }
14233
14234 /*
14235diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
14236index 1c1a955..50f828c 100644
14237--- a/arch/x86/include/asm/pgtable.h
14238+++ b/arch/x86/include/asm/pgtable.h
14239@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14240
14241 #ifndef __PAGETABLE_PUD_FOLDED
14242 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
14243+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
14244 #define pgd_clear(pgd) native_pgd_clear(pgd)
14245 #endif
14246
14247@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14248
14249 #define arch_end_context_switch(prev) do {} while(0)
14250
14251+#define pax_open_kernel() native_pax_open_kernel()
14252+#define pax_close_kernel() native_pax_close_kernel()
14253 #endif /* CONFIG_PARAVIRT */
14254
14255+#define __HAVE_ARCH_PAX_OPEN_KERNEL
14256+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
14257+
14258+#ifdef CONFIG_PAX_KERNEXEC
14259+static inline unsigned long native_pax_open_kernel(void)
14260+{
14261+ unsigned long cr0;
14262+
14263+ preempt_disable();
14264+ barrier();
14265+ cr0 = read_cr0() ^ X86_CR0_WP;
14266+ BUG_ON(cr0 & X86_CR0_WP);
14267+ write_cr0(cr0);
14268+ return cr0 ^ X86_CR0_WP;
14269+}
14270+
14271+static inline unsigned long native_pax_close_kernel(void)
14272+{
14273+ unsigned long cr0;
14274+
14275+ cr0 = read_cr0() ^ X86_CR0_WP;
14276+ BUG_ON(!(cr0 & X86_CR0_WP));
14277+ write_cr0(cr0);
14278+ barrier();
14279+ preempt_enable_no_resched();
14280+ return cr0 ^ X86_CR0_WP;
14281+}
14282+#else
14283+static inline unsigned long native_pax_open_kernel(void) { return 0; }
14284+static inline unsigned long native_pax_close_kernel(void) { return 0; }
14285+#endif
14286+
14287 /*
14288 * The following only work if pte_present() is true.
14289 * Undefined behaviour if not..
14290 */
14291+static inline int pte_user(pte_t pte)
14292+{
14293+ return pte_val(pte) & _PAGE_USER;
14294+}
14295+
14296 static inline int pte_dirty(pte_t pte)
14297 {
14298 return pte_flags(pte) & _PAGE_DIRTY;
14299@@ -200,9 +240,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
14300 return pte_clear_flags(pte, _PAGE_RW);
14301 }
14302
14303+static inline pte_t pte_mkread(pte_t pte)
14304+{
14305+ return __pte(pte_val(pte) | _PAGE_USER);
14306+}
14307+
14308 static inline pte_t pte_mkexec(pte_t pte)
14309 {
14310- return pte_clear_flags(pte, _PAGE_NX);
14311+#ifdef CONFIG_X86_PAE
14312+ if (__supported_pte_mask & _PAGE_NX)
14313+ return pte_clear_flags(pte, _PAGE_NX);
14314+ else
14315+#endif
14316+ return pte_set_flags(pte, _PAGE_USER);
14317+}
14318+
14319+static inline pte_t pte_exprotect(pte_t pte)
14320+{
14321+#ifdef CONFIG_X86_PAE
14322+ if (__supported_pte_mask & _PAGE_NX)
14323+ return pte_set_flags(pte, _PAGE_NX);
14324+ else
14325+#endif
14326+ return pte_clear_flags(pte, _PAGE_USER);
14327 }
14328
14329 static inline pte_t pte_mkdirty(pte_t pte)
14330@@ -394,6 +454,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
14331 #endif
14332
14333 #ifndef __ASSEMBLY__
14334+
14335+#ifdef CONFIG_PAX_PER_CPU_PGD
14336+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
14337+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
14338+{
14339+ return cpu_pgd[cpu];
14340+}
14341+#endif
14342+
14343 #include <linux/mm_types.h>
14344
14345 static inline int pte_none(pte_t pte)
14346@@ -583,7 +652,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
14347
14348 static inline int pgd_bad(pgd_t pgd)
14349 {
14350- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
14351+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
14352 }
14353
14354 static inline int pgd_none(pgd_t pgd)
14355@@ -606,7 +675,12 @@ static inline int pgd_none(pgd_t pgd)
14356 * pgd_offset() returns a (pgd_t *)
14357 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
14358 */
14359-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
14360+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
14361+
14362+#ifdef CONFIG_PAX_PER_CPU_PGD
14363+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
14364+#endif
14365+
14366 /*
14367 * a shortcut which implies the use of the kernel's pgd, instead
14368 * of a process's
14369@@ -617,6 +691,20 @@ static inline int pgd_none(pgd_t pgd)
14370 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
14371 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
14372
14373+#ifdef CONFIG_X86_32
14374+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
14375+#else
14376+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
14377+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
14378+
14379+#ifdef CONFIG_PAX_MEMORY_UDEREF
14380+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
14381+#else
14382+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
14383+#endif
14384+
14385+#endif
14386+
14387 #ifndef __ASSEMBLY__
14388
14389 extern int direct_gbpages;
14390@@ -781,11 +869,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
14391 * dst and src can be on the same page, but the range must not overlap,
14392 * and must not cross a page boundary.
14393 */
14394-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
14395+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
14396 {
14397- memcpy(dst, src, count * sizeof(pgd_t));
14398+ pax_open_kernel();
14399+ while (count--)
14400+ *dst++ = *src++;
14401+ pax_close_kernel();
14402 }
14403
14404+#ifdef CONFIG_PAX_PER_CPU_PGD
14405+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
14406+#endif
14407+
14408+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14409+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
14410+#else
14411+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
14412+#endif
14413
14414 #include <asm-generic/pgtable.h>
14415 #endif /* __ASSEMBLY__ */
14416diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
14417index 8faa215..a8a17ea 100644
14418--- a/arch/x86/include/asm/pgtable_32.h
14419+++ b/arch/x86/include/asm/pgtable_32.h
14420@@ -25,9 +25,6 @@
14421 struct mm_struct;
14422 struct vm_area_struct;
14423
14424-extern pgd_t swapper_pg_dir[1024];
14425-extern pgd_t initial_page_table[1024];
14426-
14427 static inline void pgtable_cache_init(void) { }
14428 static inline void check_pgt_cache(void) { }
14429 void paging_init(void);
14430@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14431 # include <asm/pgtable-2level.h>
14432 #endif
14433
14434+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
14435+extern pgd_t initial_page_table[PTRS_PER_PGD];
14436+#ifdef CONFIG_X86_PAE
14437+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
14438+#endif
14439+
14440 #if defined(CONFIG_HIGHPTE)
14441 #define pte_offset_map(dir, address) \
14442 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
14443@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14444 /* Clear a kernel PTE and flush it from the TLB */
14445 #define kpte_clear_flush(ptep, vaddr) \
14446 do { \
14447+ pax_open_kernel(); \
14448 pte_clear(&init_mm, (vaddr), (ptep)); \
14449+ pax_close_kernel(); \
14450 __flush_tlb_one((vaddr)); \
14451 } while (0)
14452
14453@@ -75,6 +80,9 @@ do { \
14454
14455 #endif /* !__ASSEMBLY__ */
14456
14457+#define HAVE_ARCH_UNMAPPED_AREA
14458+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
14459+
14460 /*
14461 * kern_addr_valid() is (1) for FLATMEM and (0) for
14462 * SPARSEMEM and DISCONTIGMEM
14463diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
14464index ed5903b..c7fe163 100644
14465--- a/arch/x86/include/asm/pgtable_32_types.h
14466+++ b/arch/x86/include/asm/pgtable_32_types.h
14467@@ -8,7 +8,7 @@
14468 */
14469 #ifdef CONFIG_X86_PAE
14470 # include <asm/pgtable-3level_types.h>
14471-# define PMD_SIZE (1UL << PMD_SHIFT)
14472+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
14473 # define PMD_MASK (~(PMD_SIZE - 1))
14474 #else
14475 # include <asm/pgtable-2level_types.h>
14476@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
14477 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
14478 #endif
14479
14480+#ifdef CONFIG_PAX_KERNEXEC
14481+#ifndef __ASSEMBLY__
14482+extern unsigned char MODULES_EXEC_VADDR[];
14483+extern unsigned char MODULES_EXEC_END[];
14484+#endif
14485+#include <asm/boot.h>
14486+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
14487+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
14488+#else
14489+#define ktla_ktva(addr) (addr)
14490+#define ktva_ktla(addr) (addr)
14491+#endif
14492+
14493 #define MODULES_VADDR VMALLOC_START
14494 #define MODULES_END VMALLOC_END
14495 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
14496diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
14497index 47356f9..deb94a2 100644
14498--- a/arch/x86/include/asm/pgtable_64.h
14499+++ b/arch/x86/include/asm/pgtable_64.h
14500@@ -16,10 +16,14 @@
14501
14502 extern pud_t level3_kernel_pgt[512];
14503 extern pud_t level3_ident_pgt[512];
14504+extern pud_t level3_vmalloc_start_pgt[512];
14505+extern pud_t level3_vmalloc_end_pgt[512];
14506+extern pud_t level3_vmemmap_pgt[512];
14507+extern pud_t level2_vmemmap_pgt[512];
14508 extern pmd_t level2_kernel_pgt[512];
14509 extern pmd_t level2_fixmap_pgt[512];
14510-extern pmd_t level2_ident_pgt[512];
14511-extern pgd_t init_level4_pgt[];
14512+extern pmd_t level2_ident_pgt[512*2];
14513+extern pgd_t init_level4_pgt[512];
14514
14515 #define swapper_pg_dir init_level4_pgt
14516
14517@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14518
14519 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14520 {
14521+ pax_open_kernel();
14522 *pmdp = pmd;
14523+ pax_close_kernel();
14524 }
14525
14526 static inline void native_pmd_clear(pmd_t *pmd)
14527@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
14528
14529 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14530 {
14531+ pax_open_kernel();
14532 *pudp = pud;
14533+ pax_close_kernel();
14534 }
14535
14536 static inline void native_pud_clear(pud_t *pud)
14537@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
14538
14539 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
14540 {
14541+ pax_open_kernel();
14542+ *pgdp = pgd;
14543+ pax_close_kernel();
14544+}
14545+
14546+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14547+{
14548 *pgdp = pgd;
14549 }
14550
14551diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
14552index 766ea16..5b96cb3 100644
14553--- a/arch/x86/include/asm/pgtable_64_types.h
14554+++ b/arch/x86/include/asm/pgtable_64_types.h
14555@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
14556 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
14557 #define MODULES_END _AC(0xffffffffff000000, UL)
14558 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
14559+#define MODULES_EXEC_VADDR MODULES_VADDR
14560+#define MODULES_EXEC_END MODULES_END
14561+
14562+#define ktla_ktva(addr) (addr)
14563+#define ktva_ktla(addr) (addr)
14564
14565 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
14566diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
14567index 3c32db8..1ddccf5 100644
14568--- a/arch/x86/include/asm/pgtable_types.h
14569+++ b/arch/x86/include/asm/pgtable_types.h
14570@@ -16,13 +16,12 @@
14571 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14572 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
14573 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
14574-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
14575+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
14576 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
14577 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
14578 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
14579-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
14580-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
14581-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
14582+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
14583+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
14584 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
14585
14586 /* If _PAGE_BIT_PRESENT is clear, we use these: */
14587@@ -40,7 +39,6 @@
14588 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
14589 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
14590 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
14591-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
14592 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
14593 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
14594 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
14595@@ -57,8 +55,10 @@
14596
14597 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14598 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
14599-#else
14600+#elif defined(CONFIG_KMEMCHECK)
14601 #define _PAGE_NX (_AT(pteval_t, 0))
14602+#else
14603+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
14604 #endif
14605
14606 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
14607@@ -116,6 +116,9 @@
14608 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
14609 _PAGE_ACCESSED)
14610
14611+#define PAGE_READONLY_NOEXEC PAGE_READONLY
14612+#define PAGE_SHARED_NOEXEC PAGE_SHARED
14613+
14614 #define __PAGE_KERNEL_EXEC \
14615 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
14616 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
14617@@ -126,7 +129,7 @@
14618 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
14619 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
14620 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
14621-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
14622+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
14623 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
14624 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
14625 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
14626@@ -188,8 +191,8 @@
14627 * bits are combined, this will alow user to access the high address mapped
14628 * VDSO in the presence of CONFIG_COMPAT_VDSO
14629 */
14630-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
14631-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
14632+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14633+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14634 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
14635 #endif
14636
14637@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
14638 {
14639 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
14640 }
14641+#endif
14642
14643+#if PAGETABLE_LEVELS == 3
14644+#include <asm-generic/pgtable-nopud.h>
14645+#endif
14646+
14647+#if PAGETABLE_LEVELS == 2
14648+#include <asm-generic/pgtable-nopmd.h>
14649+#endif
14650+
14651+#ifndef __ASSEMBLY__
14652 #if PAGETABLE_LEVELS > 3
14653 typedef struct { pudval_t pud; } pud_t;
14654
14655@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
14656 return pud.pud;
14657 }
14658 #else
14659-#include <asm-generic/pgtable-nopud.h>
14660-
14661 static inline pudval_t native_pud_val(pud_t pud)
14662 {
14663 return native_pgd_val(pud.pgd);
14664@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
14665 return pmd.pmd;
14666 }
14667 #else
14668-#include <asm-generic/pgtable-nopmd.h>
14669-
14670 static inline pmdval_t native_pmd_val(pmd_t pmd)
14671 {
14672 return native_pgd_val(pmd.pud.pgd);
14673@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
14674
14675 extern pteval_t __supported_pte_mask;
14676 extern void set_nx(void);
14677-extern int nx_enabled;
14678
14679 #define pgprot_writecombine pgprot_writecombine
14680 extern pgprot_t pgprot_writecombine(pgprot_t prot);
14681diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
14682index 888184b..a07ac89 100644
14683--- a/arch/x86/include/asm/processor.h
14684+++ b/arch/x86/include/asm/processor.h
14685@@ -287,7 +287,7 @@ struct tss_struct {
14686
14687 } ____cacheline_aligned;
14688
14689-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
14690+extern struct tss_struct init_tss[NR_CPUS];
14691
14692 /*
14693 * Save the original ist values for checking stack pointers during debugging
14694@@ -827,11 +827,18 @@ static inline void spin_lock_prefetch(const void *x)
14695 */
14696 #define TASK_SIZE PAGE_OFFSET
14697 #define TASK_SIZE_MAX TASK_SIZE
14698+
14699+#ifdef CONFIG_PAX_SEGMEXEC
14700+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
14701+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
14702+#else
14703 #define STACK_TOP TASK_SIZE
14704-#define STACK_TOP_MAX STACK_TOP
14705+#endif
14706+
14707+#define STACK_TOP_MAX TASK_SIZE
14708
14709 #define INIT_THREAD { \
14710- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14711+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14712 .vm86_info = NULL, \
14713 .sysenter_cs = __KERNEL_CS, \
14714 .io_bitmap_ptr = NULL, \
14715@@ -845,7 +852,7 @@ static inline void spin_lock_prefetch(const void *x)
14716 */
14717 #define INIT_TSS { \
14718 .x86_tss = { \
14719- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14720+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14721 .ss0 = __KERNEL_DS, \
14722 .ss1 = __KERNEL_CS, \
14723 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
14724@@ -856,11 +863,7 @@ static inline void spin_lock_prefetch(const void *x)
14725 extern unsigned long thread_saved_pc(struct task_struct *tsk);
14726
14727 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
14728-#define KSTK_TOP(info) \
14729-({ \
14730- unsigned long *__ptr = (unsigned long *)(info); \
14731- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
14732-})
14733+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
14734
14735 /*
14736 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
14737@@ -875,7 +878,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14738 #define task_pt_regs(task) \
14739 ({ \
14740 struct pt_regs *__regs__; \
14741- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
14742+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
14743 __regs__ - 1; \
14744 })
14745
14746@@ -885,13 +888,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14747 /*
14748 * User space process size. 47bits minus one guard page.
14749 */
14750-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
14751+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
14752
14753 /* This decides where the kernel will search for a free chunk of vm
14754 * space during mmap's.
14755 */
14756 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
14757- 0xc0000000 : 0xFFFFe000)
14758+ 0xc0000000 : 0xFFFFf000)
14759
14760 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
14761 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
14762@@ -902,11 +905,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14763 #define STACK_TOP_MAX TASK_SIZE_MAX
14764
14765 #define INIT_THREAD { \
14766- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14767+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14768 }
14769
14770 #define INIT_TSS { \
14771- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14772+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14773 }
14774
14775 /*
14776@@ -934,6 +937,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
14777 */
14778 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
14779
14780+#ifdef CONFIG_PAX_SEGMEXEC
14781+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
14782+#endif
14783+
14784 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
14785
14786 /* Get/set a process' ability to use the timestamp counter instruction */
14787@@ -994,12 +1001,12 @@ extern bool cpu_has_amd_erratum(const int *);
14788 #define cpu_has_amd_erratum(x) (false)
14789 #endif /* CONFIG_CPU_SUP_AMD */
14790
14791-extern unsigned long arch_align_stack(unsigned long sp);
14792+#define arch_align_stack(x) ((x) & ~0xfUL)
14793 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
14794
14795 void default_idle(void);
14796 bool set_pm_idle_to_default(void);
14797
14798-void stop_this_cpu(void *dummy);
14799+void stop_this_cpu(void *dummy) __noreturn;
14800
14801 #endif /* _ASM_X86_PROCESSOR_H */
14802diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
14803index 942a086..6c26446 100644
14804--- a/arch/x86/include/asm/ptrace.h
14805+++ b/arch/x86/include/asm/ptrace.h
14806@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
14807 }
14808
14809 /*
14810- * user_mode_vm(regs) determines whether a register set came from user mode.
14811+ * user_mode(regs) determines whether a register set came from user mode.
14812 * This is true if V8086 mode was enabled OR if the register set was from
14813 * protected mode with RPL-3 CS value. This tricky test checks that with
14814 * one comparison. Many places in the kernel can bypass this full check
14815- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
14816+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
14817+ * be used.
14818 */
14819-static inline int user_mode(struct pt_regs *regs)
14820+static inline int user_mode_novm(struct pt_regs *regs)
14821 {
14822 #ifdef CONFIG_X86_32
14823 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
14824 #else
14825- return !!(regs->cs & 3);
14826+ return !!(regs->cs & SEGMENT_RPL_MASK);
14827 #endif
14828 }
14829
14830-static inline int user_mode_vm(struct pt_regs *regs)
14831+static inline int user_mode(struct pt_regs *regs)
14832 {
14833 #ifdef CONFIG_X86_32
14834 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
14835 USER_RPL;
14836 #else
14837- return user_mode(regs);
14838+ return user_mode_novm(regs);
14839 #endif
14840 }
14841
14842@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
14843 #ifdef CONFIG_X86_64
14844 static inline bool user_64bit_mode(struct pt_regs *regs)
14845 {
14846+ unsigned long cs = regs->cs & 0xffff;
14847 #ifndef CONFIG_PARAVIRT
14848 /*
14849 * On non-paravirt systems, this is the only long mode CPL 3
14850 * selector. We do not allow long mode selectors in the LDT.
14851 */
14852- return regs->cs == __USER_CS;
14853+ return cs == __USER_CS;
14854 #else
14855 /* Headers are too twisted for this to go in paravirt.h. */
14856- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
14857+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
14858 #endif
14859 }
14860
14861@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
14862 * Traps from the kernel do not save sp and ss.
14863 * Use the helper function to retrieve sp.
14864 */
14865- if (offset == offsetof(struct pt_regs, sp) &&
14866- regs->cs == __KERNEL_CS)
14867- return kernel_stack_pointer(regs);
14868+ if (offset == offsetof(struct pt_regs, sp)) {
14869+ unsigned long cs = regs->cs & 0xffff;
14870+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
14871+ return kernel_stack_pointer(regs);
14872+ }
14873 #endif
14874 return *(unsigned long *)((unsigned long)regs + offset);
14875 }
14876diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
14877index fe1ec5b..dc5c3fe 100644
14878--- a/arch/x86/include/asm/realmode.h
14879+++ b/arch/x86/include/asm/realmode.h
14880@@ -22,16 +22,14 @@ struct real_mode_header {
14881 #endif
14882 /* APM/BIOS reboot */
14883 u32 machine_real_restart_asm;
14884-#ifdef CONFIG_X86_64
14885 u32 machine_real_restart_seg;
14886-#endif
14887 };
14888
14889 /* This must match data at trampoline_32/64.S */
14890 struct trampoline_header {
14891 #ifdef CONFIG_X86_32
14892 u32 start;
14893- u16 gdt_pad;
14894+ u16 boot_cs;
14895 u16 gdt_limit;
14896 u32 gdt_base;
14897 #else
14898diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
14899index a82c4f1..ac45053 100644
14900--- a/arch/x86/include/asm/reboot.h
14901+++ b/arch/x86/include/asm/reboot.h
14902@@ -6,13 +6,13 @@
14903 struct pt_regs;
14904
14905 struct machine_ops {
14906- void (*restart)(char *cmd);
14907- void (*halt)(void);
14908- void (*power_off)(void);
14909+ void (* __noreturn restart)(char *cmd);
14910+ void (* __noreturn halt)(void);
14911+ void (* __noreturn power_off)(void);
14912 void (*shutdown)(void);
14913 void (*crash_shutdown)(struct pt_regs *);
14914- void (*emergency_restart)(void);
14915-};
14916+ void (* __noreturn emergency_restart)(void);
14917+} __no_const;
14918
14919 extern struct machine_ops machine_ops;
14920
14921diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
14922index 2dbe4a7..ce1db00 100644
14923--- a/arch/x86/include/asm/rwsem.h
14924+++ b/arch/x86/include/asm/rwsem.h
14925@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
14926 {
14927 asm volatile("# beginning down_read\n\t"
14928 LOCK_PREFIX _ASM_INC "(%1)\n\t"
14929+
14930+#ifdef CONFIG_PAX_REFCOUNT
14931+ "jno 0f\n"
14932+ LOCK_PREFIX _ASM_DEC "(%1)\n"
14933+ "int $4\n0:\n"
14934+ _ASM_EXTABLE(0b, 0b)
14935+#endif
14936+
14937 /* adds 0x00000001 */
14938 " jns 1f\n"
14939 " call call_rwsem_down_read_failed\n"
14940@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
14941 "1:\n\t"
14942 " mov %1,%2\n\t"
14943 " add %3,%2\n\t"
14944+
14945+#ifdef CONFIG_PAX_REFCOUNT
14946+ "jno 0f\n"
14947+ "sub %3,%2\n"
14948+ "int $4\n0:\n"
14949+ _ASM_EXTABLE(0b, 0b)
14950+#endif
14951+
14952 " jle 2f\n\t"
14953 LOCK_PREFIX " cmpxchg %2,%0\n\t"
14954 " jnz 1b\n\t"
14955@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
14956 long tmp;
14957 asm volatile("# beginning down_write\n\t"
14958 LOCK_PREFIX " xadd %1,(%2)\n\t"
14959+
14960+#ifdef CONFIG_PAX_REFCOUNT
14961+ "jno 0f\n"
14962+ "mov %1,(%2)\n"
14963+ "int $4\n0:\n"
14964+ _ASM_EXTABLE(0b, 0b)
14965+#endif
14966+
14967 /* adds 0xffff0001, returns the old value */
14968 " test %1,%1\n\t"
14969 /* was the count 0 before? */
14970@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
14971 long tmp;
14972 asm volatile("# beginning __up_read\n\t"
14973 LOCK_PREFIX " xadd %1,(%2)\n\t"
14974+
14975+#ifdef CONFIG_PAX_REFCOUNT
14976+ "jno 0f\n"
14977+ "mov %1,(%2)\n"
14978+ "int $4\n0:\n"
14979+ _ASM_EXTABLE(0b, 0b)
14980+#endif
14981+
14982 /* subtracts 1, returns the old value */
14983 " jns 1f\n\t"
14984 " call call_rwsem_wake\n" /* expects old value in %edx */
14985@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
14986 long tmp;
14987 asm volatile("# beginning __up_write\n\t"
14988 LOCK_PREFIX " xadd %1,(%2)\n\t"
14989+
14990+#ifdef CONFIG_PAX_REFCOUNT
14991+ "jno 0f\n"
14992+ "mov %1,(%2)\n"
14993+ "int $4\n0:\n"
14994+ _ASM_EXTABLE(0b, 0b)
14995+#endif
14996+
14997 /* subtracts 0xffff0001, returns the old value */
14998 " jns 1f\n\t"
14999 " call call_rwsem_wake\n" /* expects old value in %edx */
15000@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15001 {
15002 asm volatile("# beginning __downgrade_write\n\t"
15003 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
15004+
15005+#ifdef CONFIG_PAX_REFCOUNT
15006+ "jno 0f\n"
15007+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
15008+ "int $4\n0:\n"
15009+ _ASM_EXTABLE(0b, 0b)
15010+#endif
15011+
15012 /*
15013 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
15014 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
15015@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15016 */
15017 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15018 {
15019- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
15020+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
15021+
15022+#ifdef CONFIG_PAX_REFCOUNT
15023+ "jno 0f\n"
15024+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
15025+ "int $4\n0:\n"
15026+ _ASM_EXTABLE(0b, 0b)
15027+#endif
15028+
15029 : "+m" (sem->count)
15030 : "er" (delta));
15031 }
15032@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15033 */
15034 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
15035 {
15036- return delta + xadd(&sem->count, delta);
15037+ return delta + xadd_check_overflow(&sem->count, delta);
15038 }
15039
15040 #endif /* __KERNEL__ */
15041diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
15042index c48a950..c6d7468 100644
15043--- a/arch/x86/include/asm/segment.h
15044+++ b/arch/x86/include/asm/segment.h
15045@@ -64,10 +64,15 @@
15046 * 26 - ESPFIX small SS
15047 * 27 - per-cpu [ offset to per-cpu data area ]
15048 * 28 - stack_canary-20 [ for stack protector ]
15049- * 29 - unused
15050- * 30 - unused
15051+ * 29 - PCI BIOS CS
15052+ * 30 - PCI BIOS DS
15053 * 31 - TSS for double fault handler
15054 */
15055+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
15056+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
15057+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
15058+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
15059+
15060 #define GDT_ENTRY_TLS_MIN 6
15061 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
15062
15063@@ -79,6 +84,8 @@
15064
15065 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
15066
15067+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
15068+
15069 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
15070
15071 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
15072@@ -104,6 +111,12 @@
15073 #define __KERNEL_STACK_CANARY 0
15074 #endif
15075
15076+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
15077+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
15078+
15079+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
15080+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
15081+
15082 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
15083
15084 /*
15085@@ -141,7 +154,7 @@
15086 */
15087
15088 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
15089-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
15090+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
15091
15092
15093 #else
15094@@ -165,6 +178,8 @@
15095 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
15096 #define __USER32_DS __USER_DS
15097
15098+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
15099+
15100 #define GDT_ENTRY_TSS 8 /* needs two entries */
15101 #define GDT_ENTRY_LDT 10 /* needs two entries */
15102 #define GDT_ENTRY_TLS_MIN 12
15103@@ -185,6 +200,7 @@
15104 #endif
15105
15106 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
15107+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
15108 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
15109 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
15110 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
15111@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
15112 {
15113 unsigned long __limit;
15114 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
15115- return __limit + 1;
15116+ return __limit;
15117 }
15118
15119 #endif /* !__ASSEMBLY__ */
15120diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
15121index b073aae..39f9bdd 100644
15122--- a/arch/x86/include/asm/smp.h
15123+++ b/arch/x86/include/asm/smp.h
15124@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
15125 /* cpus sharing the last level cache: */
15126 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
15127 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
15128-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
15129+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
15130
15131 static inline struct cpumask *cpu_sibling_mask(int cpu)
15132 {
15133@@ -79,7 +79,7 @@ struct smp_ops {
15134
15135 void (*send_call_func_ipi)(const struct cpumask *mask);
15136 void (*send_call_func_single_ipi)(int cpu);
15137-};
15138+} __no_const;
15139
15140 /* Globals due to paravirt */
15141 extern void set_cpu_sibling_map(int cpu);
15142@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
15143 extern int safe_smp_processor_id(void);
15144
15145 #elif defined(CONFIG_X86_64_SMP)
15146-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15147-
15148-#define stack_smp_processor_id() \
15149-({ \
15150- struct thread_info *ti; \
15151- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
15152- ti->cpu; \
15153-})
15154+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15155+#define stack_smp_processor_id() raw_smp_processor_id()
15156 #define safe_smp_processor_id() smp_processor_id()
15157
15158 #endif
15159diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
15160index 33692ea..350a534 100644
15161--- a/arch/x86/include/asm/spinlock.h
15162+++ b/arch/x86/include/asm/spinlock.h
15163@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
15164 static inline void arch_read_lock(arch_rwlock_t *rw)
15165 {
15166 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
15167+
15168+#ifdef CONFIG_PAX_REFCOUNT
15169+ "jno 0f\n"
15170+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
15171+ "int $4\n0:\n"
15172+ _ASM_EXTABLE(0b, 0b)
15173+#endif
15174+
15175 "jns 1f\n"
15176 "call __read_lock_failed\n\t"
15177 "1:\n"
15178@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
15179 static inline void arch_write_lock(arch_rwlock_t *rw)
15180 {
15181 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
15182+
15183+#ifdef CONFIG_PAX_REFCOUNT
15184+ "jno 0f\n"
15185+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
15186+ "int $4\n0:\n"
15187+ _ASM_EXTABLE(0b, 0b)
15188+#endif
15189+
15190 "jz 1f\n"
15191 "call __write_lock_failed\n\t"
15192 "1:\n"
15193@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
15194
15195 static inline void arch_read_unlock(arch_rwlock_t *rw)
15196 {
15197- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
15198+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
15199+
15200+#ifdef CONFIG_PAX_REFCOUNT
15201+ "jno 0f\n"
15202+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
15203+ "int $4\n0:\n"
15204+ _ASM_EXTABLE(0b, 0b)
15205+#endif
15206+
15207 :"+m" (rw->lock) : : "memory");
15208 }
15209
15210 static inline void arch_write_unlock(arch_rwlock_t *rw)
15211 {
15212- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
15213+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
15214+
15215+#ifdef CONFIG_PAX_REFCOUNT
15216+ "jno 0f\n"
15217+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
15218+ "int $4\n0:\n"
15219+ _ASM_EXTABLE(0b, 0b)
15220+#endif
15221+
15222 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
15223 }
15224
15225diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
15226index 6a99859..03cb807 100644
15227--- a/arch/x86/include/asm/stackprotector.h
15228+++ b/arch/x86/include/asm/stackprotector.h
15229@@ -47,7 +47,7 @@
15230 * head_32 for boot CPU and setup_per_cpu_areas() for others.
15231 */
15232 #define GDT_STACK_CANARY_INIT \
15233- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
15234+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
15235
15236 /*
15237 * Initialize the stackprotector canary value.
15238@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
15239
15240 static inline void load_stack_canary_segment(void)
15241 {
15242-#ifdef CONFIG_X86_32
15243+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15244 asm volatile ("mov %0, %%gs" : : "r" (0));
15245 #endif
15246 }
15247diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
15248index 70bbe39..4ae2bd4 100644
15249--- a/arch/x86/include/asm/stacktrace.h
15250+++ b/arch/x86/include/asm/stacktrace.h
15251@@ -11,28 +11,20 @@
15252
15253 extern int kstack_depth_to_print;
15254
15255-struct thread_info;
15256+struct task_struct;
15257 struct stacktrace_ops;
15258
15259-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
15260- unsigned long *stack,
15261- unsigned long bp,
15262- const struct stacktrace_ops *ops,
15263- void *data,
15264- unsigned long *end,
15265- int *graph);
15266+typedef unsigned long walk_stack_t(struct task_struct *task,
15267+ void *stack_start,
15268+ unsigned long *stack,
15269+ unsigned long bp,
15270+ const struct stacktrace_ops *ops,
15271+ void *data,
15272+ unsigned long *end,
15273+ int *graph);
15274
15275-extern unsigned long
15276-print_context_stack(struct thread_info *tinfo,
15277- unsigned long *stack, unsigned long bp,
15278- const struct stacktrace_ops *ops, void *data,
15279- unsigned long *end, int *graph);
15280-
15281-extern unsigned long
15282-print_context_stack_bp(struct thread_info *tinfo,
15283- unsigned long *stack, unsigned long bp,
15284- const struct stacktrace_ops *ops, void *data,
15285- unsigned long *end, int *graph);
15286+extern walk_stack_t print_context_stack;
15287+extern walk_stack_t print_context_stack_bp;
15288
15289 /* Generic stack tracer with callbacks */
15290
15291@@ -40,7 +32,7 @@ struct stacktrace_ops {
15292 void (*address)(void *data, unsigned long address, int reliable);
15293 /* On negative return stop dumping */
15294 int (*stack)(void *data, char *name);
15295- walk_stack_t walk_stack;
15296+ walk_stack_t *walk_stack;
15297 };
15298
15299 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
15300diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
15301index 4ec45b3..a4f0a8a 100644
15302--- a/arch/x86/include/asm/switch_to.h
15303+++ b/arch/x86/include/asm/switch_to.h
15304@@ -108,7 +108,7 @@ do { \
15305 "call __switch_to\n\t" \
15306 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
15307 __switch_canary \
15308- "movq %P[thread_info](%%rsi),%%r8\n\t" \
15309+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
15310 "movq %%rax,%%rdi\n\t" \
15311 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
15312 "jnz ret_from_fork\n\t" \
15313@@ -119,7 +119,7 @@ do { \
15314 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
15315 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
15316 [_tif_fork] "i" (_TIF_FORK), \
15317- [thread_info] "i" (offsetof(struct task_struct, stack)), \
15318+ [thread_info] "m" (current_tinfo), \
15319 [current_task] "m" (current_task) \
15320 __switch_canary_iparam \
15321 : "memory", "cc" __EXTRA_CLOBBER)
15322diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
15323index 2d946e6..e453ec4 100644
15324--- a/arch/x86/include/asm/thread_info.h
15325+++ b/arch/x86/include/asm/thread_info.h
15326@@ -10,6 +10,7 @@
15327 #include <linux/compiler.h>
15328 #include <asm/page.h>
15329 #include <asm/types.h>
15330+#include <asm/percpu.h>
15331
15332 /*
15333 * low level task data that entry.S needs immediate access to
15334@@ -24,7 +25,6 @@ struct exec_domain;
15335 #include <linux/atomic.h>
15336
15337 struct thread_info {
15338- struct task_struct *task; /* main task structure */
15339 struct exec_domain *exec_domain; /* execution domain */
15340 __u32 flags; /* low level flags */
15341 __u32 status; /* thread synchronous flags */
15342@@ -34,19 +34,13 @@ struct thread_info {
15343 mm_segment_t addr_limit;
15344 struct restart_block restart_block;
15345 void __user *sysenter_return;
15346-#ifdef CONFIG_X86_32
15347- unsigned long previous_esp; /* ESP of the previous stack in
15348- case of nested (IRQ) stacks
15349- */
15350- __u8 supervisor_stack[0];
15351-#endif
15352+ unsigned long lowest_stack;
15353 unsigned int sig_on_uaccess_error:1;
15354 unsigned int uaccess_err:1; /* uaccess failed */
15355 };
15356
15357-#define INIT_THREAD_INFO(tsk) \
15358+#define INIT_THREAD_INFO \
15359 { \
15360- .task = &tsk, \
15361 .exec_domain = &default_exec_domain, \
15362 .flags = 0, \
15363 .cpu = 0, \
15364@@ -57,7 +51,7 @@ struct thread_info {
15365 }, \
15366 }
15367
15368-#define init_thread_info (init_thread_union.thread_info)
15369+#define init_thread_info (init_thread_union.stack)
15370 #define init_stack (init_thread_union.stack)
15371
15372 #else /* !__ASSEMBLY__ */
15373@@ -98,6 +92,7 @@ struct thread_info {
15374 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
15375 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
15376 #define TIF_X32 30 /* 32-bit native x86-64 binary */
15377+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
15378
15379 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
15380 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
15381@@ -122,17 +117,18 @@ struct thread_info {
15382 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
15383 #define _TIF_ADDR32 (1 << TIF_ADDR32)
15384 #define _TIF_X32 (1 << TIF_X32)
15385+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15386
15387 /* work to do in syscall_trace_enter() */
15388 #define _TIF_WORK_SYSCALL_ENTRY \
15389 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15390 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
15391- _TIF_NOHZ)
15392+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15393
15394 /* work to do in syscall_trace_leave() */
15395 #define _TIF_WORK_SYSCALL_EXIT \
15396 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15397- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
15398+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
15399
15400 /* work to do on interrupt/exception return */
15401 #define _TIF_WORK_MASK \
15402@@ -143,7 +139,7 @@ struct thread_info {
15403 /* work to do on any return to user space */
15404 #define _TIF_ALLWORK_MASK \
15405 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15406- _TIF_NOHZ)
15407+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15408
15409 /* Only used for 64 bit */
15410 #define _TIF_DO_NOTIFY_MASK \
15411@@ -159,45 +155,40 @@ struct thread_info {
15412
15413 #define PREEMPT_ACTIVE 0x10000000
15414
15415-#ifdef CONFIG_X86_32
15416-
15417-#define STACK_WARN (THREAD_SIZE/8)
15418-/*
15419- * macros/functions for gaining access to the thread information structure
15420- *
15421- * preempt_count needs to be 1 initially, until the scheduler is functional.
15422- */
15423-#ifndef __ASSEMBLY__
15424-
15425-
15426-/* how to get the current stack pointer from C */
15427-register unsigned long current_stack_pointer asm("esp") __used;
15428-
15429-/* how to get the thread information struct from C */
15430-static inline struct thread_info *current_thread_info(void)
15431-{
15432- return (struct thread_info *)
15433- (current_stack_pointer & ~(THREAD_SIZE - 1));
15434-}
15435-
15436-#else /* !__ASSEMBLY__ */
15437-
15438+#ifdef __ASSEMBLY__
15439 /* how to get the thread information struct from ASM */
15440 #define GET_THREAD_INFO(reg) \
15441- movl $-THREAD_SIZE, reg; \
15442- andl %esp, reg
15443+ mov PER_CPU_VAR(current_tinfo), reg
15444
15445 /* use this one if reg already contains %esp */
15446-#define GET_THREAD_INFO_WITH_ESP(reg) \
15447- andl $-THREAD_SIZE, reg
15448+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
15449+#else
15450+/* how to get the thread information struct from C */
15451+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
15452+
15453+static __always_inline struct thread_info *current_thread_info(void)
15454+{
15455+ return this_cpu_read_stable(current_tinfo);
15456+}
15457+#endif
15458+
15459+#ifdef CONFIG_X86_32
15460+
15461+#define STACK_WARN (THREAD_SIZE/8)
15462+/*
15463+ * macros/functions for gaining access to the thread information structure
15464+ *
15465+ * preempt_count needs to be 1 initially, until the scheduler is functional.
15466+ */
15467+#ifndef __ASSEMBLY__
15468+
15469+/* how to get the current stack pointer from C */
15470+register unsigned long current_stack_pointer asm("esp") __used;
15471
15472 #endif
15473
15474 #else /* X86_32 */
15475
15476-#include <asm/percpu.h>
15477-#define KERNEL_STACK_OFFSET (5*8)
15478-
15479 /*
15480 * macros/functions for gaining access to the thread information structure
15481 * preempt_count needs to be 1 initially, until the scheduler is functional.
15482@@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
15483 #ifndef __ASSEMBLY__
15484 DECLARE_PER_CPU(unsigned long, kernel_stack);
15485
15486-static inline struct thread_info *current_thread_info(void)
15487-{
15488- struct thread_info *ti;
15489- ti = (void *)(this_cpu_read_stable(kernel_stack) +
15490- KERNEL_STACK_OFFSET - THREAD_SIZE);
15491- return ti;
15492-}
15493-
15494-#else /* !__ASSEMBLY__ */
15495-
15496-/* how to get the thread information struct from ASM */
15497-#define GET_THREAD_INFO(reg) \
15498- movq PER_CPU_VAR(kernel_stack),reg ; \
15499- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
15500-
15501-/*
15502- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
15503- * a certain register (to be used in assembler memory operands).
15504- */
15505-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
15506-
15507+/* how to get the current stack pointer from C */
15508+register unsigned long current_stack_pointer asm("rsp") __used;
15509 #endif
15510
15511 #endif /* !X86_32 */
15512@@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
15513 extern void arch_task_cache_init(void);
15514 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15515 extern void arch_release_task_struct(struct task_struct *tsk);
15516+
15517+#define __HAVE_THREAD_FUNCTIONS
15518+#define task_thread_info(task) (&(task)->tinfo)
15519+#define task_stack_page(task) ((task)->stack)
15520+#define setup_thread_stack(p, org) do {} while (0)
15521+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
15522+
15523 #endif
15524 #endif /* _ASM_X86_THREAD_INFO_H */
15525diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
15526index 1709801..0a60f2f 100644
15527--- a/arch/x86/include/asm/uaccess.h
15528+++ b/arch/x86/include/asm/uaccess.h
15529@@ -7,6 +7,7 @@
15530 #include <linux/compiler.h>
15531 #include <linux/thread_info.h>
15532 #include <linux/string.h>
15533+#include <linux/sched.h>
15534 #include <asm/asm.h>
15535 #include <asm/page.h>
15536 #include <asm/smap.h>
15537@@ -29,7 +30,12 @@
15538
15539 #define get_ds() (KERNEL_DS)
15540 #define get_fs() (current_thread_info()->addr_limit)
15541+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15542+void __set_fs(mm_segment_t x);
15543+void set_fs(mm_segment_t x);
15544+#else
15545 #define set_fs(x) (current_thread_info()->addr_limit = (x))
15546+#endif
15547
15548 #define segment_eq(a, b) ((a).seg == (b).seg)
15549
15550@@ -77,8 +83,33 @@
15551 * checks that the pointer is in the user space range - after calling
15552 * this function, memory access functions may still return -EFAULT.
15553 */
15554-#define access_ok(type, addr, size) \
15555- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15556+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15557+#define access_ok(type, addr, size) \
15558+({ \
15559+ long __size = size; \
15560+ unsigned long __addr = (unsigned long)addr; \
15561+ unsigned long __addr_ao = __addr & PAGE_MASK; \
15562+ unsigned long __end_ao = __addr + __size - 1; \
15563+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
15564+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
15565+ while(__addr_ao <= __end_ao) { \
15566+ char __c_ao; \
15567+ __addr_ao += PAGE_SIZE; \
15568+ if (__size > PAGE_SIZE) \
15569+ cond_resched(); \
15570+ if (__get_user(__c_ao, (char __user *)__addr)) \
15571+ break; \
15572+ if (type != VERIFY_WRITE) { \
15573+ __addr = __addr_ao; \
15574+ continue; \
15575+ } \
15576+ if (__put_user(__c_ao, (char __user *)__addr)) \
15577+ break; \
15578+ __addr = __addr_ao; \
15579+ } \
15580+ } \
15581+ __ret_ao; \
15582+})
15583
15584 /*
15585 * The exception table consists of pairs of addresses relative to the
15586@@ -189,13 +220,21 @@ extern int __get_user_bad(void);
15587 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
15588 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
15589
15590-
15591+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15592+#define __copyuser_seg "gs;"
15593+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
15594+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
15595+#else
15596+#define __copyuser_seg
15597+#define __COPYUSER_SET_ES
15598+#define __COPYUSER_RESTORE_ES
15599+#endif
15600
15601 #ifdef CONFIG_X86_32
15602 #define __put_user_asm_u64(x, addr, err, errret) \
15603 asm volatile(ASM_STAC "\n" \
15604- "1: movl %%eax,0(%2)\n" \
15605- "2: movl %%edx,4(%2)\n" \
15606+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
15607+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
15608 "3: " ASM_CLAC "\n" \
15609 ".section .fixup,\"ax\"\n" \
15610 "4: movl %3,%0\n" \
15611@@ -208,8 +247,8 @@ extern int __get_user_bad(void);
15612
15613 #define __put_user_asm_ex_u64(x, addr) \
15614 asm volatile(ASM_STAC "\n" \
15615- "1: movl %%eax,0(%1)\n" \
15616- "2: movl %%edx,4(%1)\n" \
15617+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
15618+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
15619 "3: " ASM_CLAC "\n" \
15620 _ASM_EXTABLE_EX(1b, 2b) \
15621 _ASM_EXTABLE_EX(2b, 3b) \
15622@@ -259,7 +298,7 @@ extern void __put_user_8(void);
15623 __typeof__(*(ptr)) __pu_val; \
15624 __chk_user_ptr(ptr); \
15625 might_fault(); \
15626- __pu_val = x; \
15627+ __pu_val = (x); \
15628 switch (sizeof(*(ptr))) { \
15629 case 1: \
15630 __put_user_x(1, __pu_val, ptr, __ret_pu); \
15631@@ -358,7 +397,7 @@ do { \
15632
15633 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15634 asm volatile(ASM_STAC "\n" \
15635- "1: mov"itype" %2,%"rtype"1\n" \
15636+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
15637 "2: " ASM_CLAC "\n" \
15638 ".section .fixup,\"ax\"\n" \
15639 "3: mov %3,%0\n" \
15640@@ -366,7 +405,7 @@ do { \
15641 " jmp 2b\n" \
15642 ".previous\n" \
15643 _ASM_EXTABLE(1b, 3b) \
15644- : "=r" (err), ltype(x) \
15645+ : "=r" (err), ltype (x) \
15646 : "m" (__m(addr)), "i" (errret), "0" (err))
15647
15648 #define __get_user_size_ex(x, ptr, size) \
15649@@ -391,7 +430,7 @@ do { \
15650 } while (0)
15651
15652 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
15653- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
15654+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
15655 "2:\n" \
15656 _ASM_EXTABLE_EX(1b, 2b) \
15657 : ltype(x) : "m" (__m(addr)))
15658@@ -408,13 +447,24 @@ do { \
15659 int __gu_err; \
15660 unsigned long __gu_val; \
15661 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
15662- (x) = (__force __typeof__(*(ptr)))__gu_val; \
15663+ (x) = (__typeof__(*(ptr)))__gu_val; \
15664 __gu_err; \
15665 })
15666
15667 /* FIXME: this hack is definitely wrong -AK */
15668 struct __large_struct { unsigned long buf[100]; };
15669-#define __m(x) (*(struct __large_struct __user *)(x))
15670+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15671+#define ____m(x) \
15672+({ \
15673+ unsigned long ____x = (unsigned long)(x); \
15674+ if (____x < PAX_USER_SHADOW_BASE) \
15675+ ____x += PAX_USER_SHADOW_BASE; \
15676+ (void __user *)____x; \
15677+})
15678+#else
15679+#define ____m(x) (x)
15680+#endif
15681+#define __m(x) (*(struct __large_struct __user *)____m(x))
15682
15683 /*
15684 * Tell gcc we read from memory instead of writing: this is because
15685@@ -423,7 +473,7 @@ struct __large_struct { unsigned long buf[100]; };
15686 */
15687 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15688 asm volatile(ASM_STAC "\n" \
15689- "1: mov"itype" %"rtype"1,%2\n" \
15690+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
15691 "2: " ASM_CLAC "\n" \
15692 ".section .fixup,\"ax\"\n" \
15693 "3: mov %3,%0\n" \
15694@@ -431,10 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
15695 ".previous\n" \
15696 _ASM_EXTABLE(1b, 3b) \
15697 : "=r"(err) \
15698- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
15699+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
15700
15701 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
15702- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
15703+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
15704 "2:\n" \
15705 _ASM_EXTABLE_EX(1b, 2b) \
15706 : : ltype(x), "m" (__m(addr)))
15707@@ -473,8 +523,12 @@ struct __large_struct { unsigned long buf[100]; };
15708 * On error, the variable @x is set to zero.
15709 */
15710
15711+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15712+#define __get_user(x, ptr) get_user((x), (ptr))
15713+#else
15714 #define __get_user(x, ptr) \
15715 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
15716+#endif
15717
15718 /**
15719 * __put_user: - Write a simple value into user space, with less checking.
15720@@ -496,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
15721 * Returns zero on success, or -EFAULT on error.
15722 */
15723
15724+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15725+#define __put_user(x, ptr) put_user((x), (ptr))
15726+#else
15727 #define __put_user(x, ptr) \
15728 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
15729+#endif
15730
15731 #define __get_user_unaligned __get_user
15732 #define __put_user_unaligned __put_user
15733@@ -515,7 +573,7 @@ struct __large_struct { unsigned long buf[100]; };
15734 #define get_user_ex(x, ptr) do { \
15735 unsigned long __gue_val; \
15736 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
15737- (x) = (__force __typeof__(*(ptr)))__gue_val; \
15738+ (x) = (__typeof__(*(ptr)))__gue_val; \
15739 } while (0)
15740
15741 #define put_user_try uaccess_try
15742@@ -532,8 +590,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
15743 extern __must_check long strlen_user(const char __user *str);
15744 extern __must_check long strnlen_user(const char __user *str, long n);
15745
15746-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
15747-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
15748+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15749+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15750
15751 /*
15752 * movsl can be slow when source and dest are not both 8-byte aligned
15753diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
15754index 7f760a9..04b1c65 100644
15755--- a/arch/x86/include/asm/uaccess_32.h
15756+++ b/arch/x86/include/asm/uaccess_32.h
15757@@ -11,15 +11,15 @@
15758 #include <asm/page.h>
15759
15760 unsigned long __must_check __copy_to_user_ll
15761- (void __user *to, const void *from, unsigned long n);
15762+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
15763 unsigned long __must_check __copy_from_user_ll
15764- (void *to, const void __user *from, unsigned long n);
15765+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15766 unsigned long __must_check __copy_from_user_ll_nozero
15767- (void *to, const void __user *from, unsigned long n);
15768+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15769 unsigned long __must_check __copy_from_user_ll_nocache
15770- (void *to, const void __user *from, unsigned long n);
15771+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15772 unsigned long __must_check __copy_from_user_ll_nocache_nozero
15773- (void *to, const void __user *from, unsigned long n);
15774+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15775
15776 /**
15777 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
15778@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
15779 static __always_inline unsigned long __must_check
15780 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
15781 {
15782+ if ((long)n < 0)
15783+ return n;
15784+
15785+ check_object_size(from, n, true);
15786+
15787 if (__builtin_constant_p(n)) {
15788 unsigned long ret;
15789
15790@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
15791 __copy_to_user(void __user *to, const void *from, unsigned long n)
15792 {
15793 might_fault();
15794+
15795 return __copy_to_user_inatomic(to, from, n);
15796 }
15797
15798 static __always_inline unsigned long
15799 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
15800 {
15801+ if ((long)n < 0)
15802+ return n;
15803+
15804 /* Avoid zeroing the tail if the copy fails..
15805 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
15806 * but as the zeroing behaviour is only significant when n is not
15807@@ -137,6 +146,12 @@ static __always_inline unsigned long
15808 __copy_from_user(void *to, const void __user *from, unsigned long n)
15809 {
15810 might_fault();
15811+
15812+ if ((long)n < 0)
15813+ return n;
15814+
15815+ check_object_size(to, n, false);
15816+
15817 if (__builtin_constant_p(n)) {
15818 unsigned long ret;
15819
15820@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
15821 const void __user *from, unsigned long n)
15822 {
15823 might_fault();
15824+
15825+ if ((long)n < 0)
15826+ return n;
15827+
15828 if (__builtin_constant_p(n)) {
15829 unsigned long ret;
15830
15831@@ -181,15 +200,19 @@ static __always_inline unsigned long
15832 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
15833 unsigned long n)
15834 {
15835- return __copy_from_user_ll_nocache_nozero(to, from, n);
15836+ if ((long)n < 0)
15837+ return n;
15838+
15839+ return __copy_from_user_ll_nocache_nozero(to, from, n);
15840 }
15841
15842-unsigned long __must_check copy_to_user(void __user *to,
15843- const void *from, unsigned long n);
15844-unsigned long __must_check _copy_from_user(void *to,
15845- const void __user *from,
15846- unsigned long n);
15847-
15848+extern void copy_to_user_overflow(void)
15849+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15850+ __compiletime_error("copy_to_user() buffer size is not provably correct")
15851+#else
15852+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
15853+#endif
15854+;
15855
15856 extern void copy_from_user_overflow(void)
15857 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15858@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
15859 #endif
15860 ;
15861
15862-static inline unsigned long __must_check copy_from_user(void *to,
15863- const void __user *from,
15864- unsigned long n)
15865+/**
15866+ * copy_to_user: - Copy a block of data into user space.
15867+ * @to: Destination address, in user space.
15868+ * @from: Source address, in kernel space.
15869+ * @n: Number of bytes to copy.
15870+ *
15871+ * Context: User context only. This function may sleep.
15872+ *
15873+ * Copy data from kernel space to user space.
15874+ *
15875+ * Returns number of bytes that could not be copied.
15876+ * On success, this will be zero.
15877+ */
15878+static inline unsigned long __must_check
15879+copy_to_user(void __user *to, const void *from, unsigned long n)
15880 {
15881- int sz = __compiletime_object_size(to);
15882+ size_t sz = __compiletime_object_size(from);
15883
15884- if (likely(sz == -1 || sz >= n))
15885- n = _copy_from_user(to, from, n);
15886- else
15887+ if (unlikely(sz != (size_t)-1 && sz < n))
15888+ copy_to_user_overflow();
15889+ else if (access_ok(VERIFY_WRITE, to, n))
15890+ n = __copy_to_user(to, from, n);
15891+ return n;
15892+}
15893+
15894+/**
15895+ * copy_from_user: - Copy a block of data from user space.
15896+ * @to: Destination address, in kernel space.
15897+ * @from: Source address, in user space.
15898+ * @n: Number of bytes to copy.
15899+ *
15900+ * Context: User context only. This function may sleep.
15901+ *
15902+ * Copy data from user space to kernel space.
15903+ *
15904+ * Returns number of bytes that could not be copied.
15905+ * On success, this will be zero.
15906+ *
15907+ * If some data could not be copied, this function will pad the copied
15908+ * data to the requested size using zero bytes.
15909+ */
15910+static inline unsigned long __must_check
15911+copy_from_user(void *to, const void __user *from, unsigned long n)
15912+{
15913+ size_t sz = __compiletime_object_size(to);
15914+
15915+ check_object_size(to, n, false);
15916+
15917+ if (unlikely(sz != (size_t)-1 && sz < n))
15918 copy_from_user_overflow();
15919-
15920+ else if (access_ok(VERIFY_READ, from, n))
15921+ n = __copy_from_user(to, from, n);
15922+ else if ((long)n > 0)
15923+ memset(to, 0, n);
15924 return n;
15925 }
15926
15927diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
15928index 142810c..747941a 100644
15929--- a/arch/x86/include/asm/uaccess_64.h
15930+++ b/arch/x86/include/asm/uaccess_64.h
15931@@ -10,6 +10,9 @@
15932 #include <asm/alternative.h>
15933 #include <asm/cpufeature.h>
15934 #include <asm/page.h>
15935+#include <asm/pgtable.h>
15936+
15937+#define set_fs(x) (current_thread_info()->addr_limit = (x))
15938
15939 /*
15940 * Copy To/From Userspace
15941@@ -17,13 +20,13 @@
15942
15943 /* Handles exceptions in both to and from, but doesn't do access_ok */
15944 __must_check unsigned long
15945-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
15946+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
15947 __must_check unsigned long
15948-copy_user_generic_string(void *to, const void *from, unsigned len);
15949+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
15950 __must_check unsigned long
15951-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
15952+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
15953
15954-static __always_inline __must_check unsigned long
15955+static __always_inline __must_check __size_overflow(3) unsigned long
15956 copy_user_generic(void *to, const void *from, unsigned len)
15957 {
15958 unsigned ret;
15959@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
15960 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
15961 "=d" (len)),
15962 "1" (to), "2" (from), "3" (len)
15963- : "memory", "rcx", "r8", "r9", "r10", "r11");
15964+ : "memory", "rcx", "r8", "r9", "r11");
15965 return ret;
15966 }
15967
15968+static __always_inline __must_check unsigned long
15969+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
15970+static __always_inline __must_check unsigned long
15971+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
15972 __must_check unsigned long
15973-_copy_to_user(void __user *to, const void *from, unsigned len);
15974-__must_check unsigned long
15975-_copy_from_user(void *to, const void __user *from, unsigned len);
15976-__must_check unsigned long
15977-copy_in_user(void __user *to, const void __user *from, unsigned len);
15978+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
15979+
15980+extern void copy_to_user_overflow(void)
15981+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15982+ __compiletime_error("copy_to_user() buffer size is not provably correct")
15983+#else
15984+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
15985+#endif
15986+;
15987+
15988+extern void copy_from_user_overflow(void)
15989+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15990+ __compiletime_error("copy_from_user() buffer size is not provably correct")
15991+#else
15992+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
15993+#endif
15994+;
15995
15996 static inline unsigned long __must_check copy_from_user(void *to,
15997 const void __user *from,
15998 unsigned long n)
15999 {
16000- int sz = __compiletime_object_size(to);
16001-
16002 might_fault();
16003- if (likely(sz == -1 || sz >= n))
16004- n = _copy_from_user(to, from, n);
16005-#ifdef CONFIG_DEBUG_VM
16006- else
16007- WARN(1, "Buffer overflow detected!\n");
16008-#endif
16009+
16010+ check_object_size(to, n, false);
16011+
16012+ if (access_ok(VERIFY_READ, from, n))
16013+ n = __copy_from_user(to, from, n);
16014+ else if (n < INT_MAX)
16015+ memset(to, 0, n);
16016 return n;
16017 }
16018
16019 static __always_inline __must_check
16020-int copy_to_user(void __user *dst, const void *src, unsigned size)
16021+int copy_to_user(void __user *dst, const void *src, unsigned long size)
16022 {
16023 might_fault();
16024
16025- return _copy_to_user(dst, src, size);
16026+ if (access_ok(VERIFY_WRITE, dst, size))
16027+ size = __copy_to_user(dst, src, size);
16028+ return size;
16029 }
16030
16031 static __always_inline __must_check
16032-int __copy_from_user(void *dst, const void __user *src, unsigned size)
16033+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
16034 {
16035- int ret = 0;
16036+ size_t sz = __compiletime_object_size(dst);
16037+ unsigned ret = 0;
16038
16039 might_fault();
16040+
16041+ if (size > INT_MAX)
16042+ return size;
16043+
16044+ check_object_size(dst, size, false);
16045+
16046+#ifdef CONFIG_PAX_MEMORY_UDEREF
16047+ if (!__access_ok(VERIFY_READ, src, size))
16048+ return size;
16049+#endif
16050+
16051+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16052+ copy_from_user_overflow();
16053+ return size;
16054+ }
16055+
16056 if (!__builtin_constant_p(size))
16057- return copy_user_generic(dst, (__force void *)src, size);
16058+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16059 switch (size) {
16060- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
16061+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
16062 ret, "b", "b", "=q", 1);
16063 return ret;
16064- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
16065+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
16066 ret, "w", "w", "=r", 2);
16067 return ret;
16068- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
16069+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
16070 ret, "l", "k", "=r", 4);
16071 return ret;
16072- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
16073+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16074 ret, "q", "", "=r", 8);
16075 return ret;
16076 case 10:
16077- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16078+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16079 ret, "q", "", "=r", 10);
16080 if (unlikely(ret))
16081 return ret;
16082 __get_user_asm(*(u16 *)(8 + (char *)dst),
16083- (u16 __user *)(8 + (char __user *)src),
16084+ (const u16 __user *)(8 + (const char __user *)src),
16085 ret, "w", "w", "=r", 2);
16086 return ret;
16087 case 16:
16088- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16089+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16090 ret, "q", "", "=r", 16);
16091 if (unlikely(ret))
16092 return ret;
16093 __get_user_asm(*(u64 *)(8 + (char *)dst),
16094- (u64 __user *)(8 + (char __user *)src),
16095+ (const u64 __user *)(8 + (const char __user *)src),
16096 ret, "q", "", "=r", 8);
16097 return ret;
16098 default:
16099- return copy_user_generic(dst, (__force void *)src, size);
16100+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16101 }
16102 }
16103
16104 static __always_inline __must_check
16105-int __copy_to_user(void __user *dst, const void *src, unsigned size)
16106+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
16107 {
16108- int ret = 0;
16109+ size_t sz = __compiletime_object_size(src);
16110+ unsigned ret = 0;
16111
16112 might_fault();
16113+
16114+ if (size > INT_MAX)
16115+ return size;
16116+
16117+ check_object_size(src, size, true);
16118+
16119+#ifdef CONFIG_PAX_MEMORY_UDEREF
16120+ if (!__access_ok(VERIFY_WRITE, dst, size))
16121+ return size;
16122+#endif
16123+
16124+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16125+ copy_to_user_overflow();
16126+ return size;
16127+ }
16128+
16129 if (!__builtin_constant_p(size))
16130- return copy_user_generic((__force void *)dst, src, size);
16131+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16132 switch (size) {
16133- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
16134+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
16135 ret, "b", "b", "iq", 1);
16136 return ret;
16137- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
16138+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
16139 ret, "w", "w", "ir", 2);
16140 return ret;
16141- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
16142+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
16143 ret, "l", "k", "ir", 4);
16144 return ret;
16145- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
16146+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16147 ret, "q", "", "er", 8);
16148 return ret;
16149 case 10:
16150- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16151+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16152 ret, "q", "", "er", 10);
16153 if (unlikely(ret))
16154 return ret;
16155 asm("":::"memory");
16156- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
16157+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
16158 ret, "w", "w", "ir", 2);
16159 return ret;
16160 case 16:
16161- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16162+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16163 ret, "q", "", "er", 16);
16164 if (unlikely(ret))
16165 return ret;
16166 asm("":::"memory");
16167- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
16168+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
16169 ret, "q", "", "er", 8);
16170 return ret;
16171 default:
16172- return copy_user_generic((__force void *)dst, src, size);
16173+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16174 }
16175 }
16176
16177 static __always_inline __must_check
16178-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16179+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
16180 {
16181- int ret = 0;
16182+ unsigned ret = 0;
16183
16184 might_fault();
16185+
16186+ if (size > INT_MAX)
16187+ return size;
16188+
16189+#ifdef CONFIG_PAX_MEMORY_UDEREF
16190+ if (!__access_ok(VERIFY_READ, src, size))
16191+ return size;
16192+ if (!__access_ok(VERIFY_WRITE, dst, size))
16193+ return size;
16194+#endif
16195+
16196 if (!__builtin_constant_p(size))
16197- return copy_user_generic((__force void *)dst,
16198- (__force void *)src, size);
16199+ return copy_user_generic((__force_kernel void *)____m(dst),
16200+ (__force_kernel const void *)____m(src), size);
16201 switch (size) {
16202 case 1: {
16203 u8 tmp;
16204- __get_user_asm(tmp, (u8 __user *)src,
16205+ __get_user_asm(tmp, (const u8 __user *)src,
16206 ret, "b", "b", "=q", 1);
16207 if (likely(!ret))
16208 __put_user_asm(tmp, (u8 __user *)dst,
16209@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16210 }
16211 case 2: {
16212 u16 tmp;
16213- __get_user_asm(tmp, (u16 __user *)src,
16214+ __get_user_asm(tmp, (const u16 __user *)src,
16215 ret, "w", "w", "=r", 2);
16216 if (likely(!ret))
16217 __put_user_asm(tmp, (u16 __user *)dst,
16218@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16219
16220 case 4: {
16221 u32 tmp;
16222- __get_user_asm(tmp, (u32 __user *)src,
16223+ __get_user_asm(tmp, (const u32 __user *)src,
16224 ret, "l", "k", "=r", 4);
16225 if (likely(!ret))
16226 __put_user_asm(tmp, (u32 __user *)dst,
16227@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16228 }
16229 case 8: {
16230 u64 tmp;
16231- __get_user_asm(tmp, (u64 __user *)src,
16232+ __get_user_asm(tmp, (const u64 __user *)src,
16233 ret, "q", "", "=r", 8);
16234 if (likely(!ret))
16235 __put_user_asm(tmp, (u64 __user *)dst,
16236@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16237 return ret;
16238 }
16239 default:
16240- return copy_user_generic((__force void *)dst,
16241- (__force void *)src, size);
16242+ return copy_user_generic((__force_kernel void *)____m(dst),
16243+ (__force_kernel const void *)____m(src), size);
16244 }
16245 }
16246
16247 static __must_check __always_inline int
16248-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
16249+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
16250 {
16251- return copy_user_generic(dst, (__force const void *)src, size);
16252+ if (size > INT_MAX)
16253+ return size;
16254+
16255+#ifdef CONFIG_PAX_MEMORY_UDEREF
16256+ if (!__access_ok(VERIFY_READ, src, size))
16257+ return size;
16258+#endif
16259+
16260+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16261 }
16262
16263-static __must_check __always_inline int
16264-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
16265+static __must_check __always_inline unsigned long
16266+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
16267 {
16268- return copy_user_generic((__force void *)dst, src, size);
16269+ if (size > INT_MAX)
16270+ return size;
16271+
16272+#ifdef CONFIG_PAX_MEMORY_UDEREF
16273+ if (!__access_ok(VERIFY_WRITE, dst, size))
16274+ return size;
16275+#endif
16276+
16277+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16278 }
16279
16280-extern long __copy_user_nocache(void *dst, const void __user *src,
16281- unsigned size, int zerorest);
16282+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
16283+ unsigned long size, int zerorest) __size_overflow(3);
16284
16285-static inline int
16286-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
16287+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
16288 {
16289 might_sleep();
16290+
16291+ if (size > INT_MAX)
16292+ return size;
16293+
16294+#ifdef CONFIG_PAX_MEMORY_UDEREF
16295+ if (!__access_ok(VERIFY_READ, src, size))
16296+ return size;
16297+#endif
16298+
16299 return __copy_user_nocache(dst, src, size, 1);
16300 }
16301
16302-static inline int
16303-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16304- unsigned size)
16305+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16306+ unsigned long size)
16307 {
16308+ if (size > INT_MAX)
16309+ return size;
16310+
16311+#ifdef CONFIG_PAX_MEMORY_UDEREF
16312+ if (!__access_ok(VERIFY_READ, src, size))
16313+ return size;
16314+#endif
16315+
16316 return __copy_user_nocache(dst, src, size, 0);
16317 }
16318
16319-unsigned long
16320-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
16321+extern unsigned long
16322+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
16323
16324 #endif /* _ASM_X86_UACCESS_64_H */
16325diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
16326index 5b238981..77fdd78 100644
16327--- a/arch/x86/include/asm/word-at-a-time.h
16328+++ b/arch/x86/include/asm/word-at-a-time.h
16329@@ -11,7 +11,7 @@
16330 * and shift, for example.
16331 */
16332 struct word_at_a_time {
16333- const unsigned long one_bits, high_bits;
16334+ unsigned long one_bits, high_bits;
16335 };
16336
16337 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
16338diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
16339index 5769349..a3d3e2a 100644
16340--- a/arch/x86/include/asm/x86_init.h
16341+++ b/arch/x86/include/asm/x86_init.h
16342@@ -141,7 +141,7 @@ struct x86_init_ops {
16343 struct x86_init_timers timers;
16344 struct x86_init_iommu iommu;
16345 struct x86_init_pci pci;
16346-};
16347+} __no_const;
16348
16349 /**
16350 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
16351@@ -152,7 +152,7 @@ struct x86_cpuinit_ops {
16352 void (*setup_percpu_clockev)(void);
16353 void (*early_percpu_clock_init)(void);
16354 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
16355-};
16356+} __no_const;
16357
16358 /**
16359 * struct x86_platform_ops - platform specific runtime functions
16360@@ -178,7 +178,7 @@ struct x86_platform_ops {
16361 void (*save_sched_clock_state)(void);
16362 void (*restore_sched_clock_state)(void);
16363 void (*apic_post_init)(void);
16364-};
16365+} __no_const;
16366
16367 struct pci_dev;
16368
16369@@ -187,14 +187,14 @@ struct x86_msi_ops {
16370 void (*teardown_msi_irq)(unsigned int irq);
16371 void (*teardown_msi_irqs)(struct pci_dev *dev);
16372 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
16373-};
16374+} __no_const;
16375
16376 struct x86_io_apic_ops {
16377 void (*init) (void);
16378 unsigned int (*read) (unsigned int apic, unsigned int reg);
16379 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
16380 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
16381-};
16382+} __no_const;
16383
16384 extern struct x86_init_ops x86_init;
16385 extern struct x86_cpuinit_ops x86_cpuinit;
16386diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
16387index 0415cda..b43d877 100644
16388--- a/arch/x86/include/asm/xsave.h
16389+++ b/arch/x86/include/asm/xsave.h
16390@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16391 return -EFAULT;
16392
16393 __asm__ __volatile__(ASM_STAC "\n"
16394- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
16395+ "1:"
16396+ __copyuser_seg
16397+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
16398 "2: " ASM_CLAC "\n"
16399 ".section .fixup,\"ax\"\n"
16400 "3: movl $-1,%[err]\n"
16401@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16402 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
16403 {
16404 int err;
16405- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
16406+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
16407 u32 lmask = mask;
16408 u32 hmask = mask >> 32;
16409
16410 __asm__ __volatile__(ASM_STAC "\n"
16411- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16412+ "1:"
16413+ __copyuser_seg
16414+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16415 "2: " ASM_CLAC "\n"
16416 ".section .fixup,\"ax\"\n"
16417 "3: movl $-1,%[err]\n"
16418diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
16419index bbae024..e1528f9 100644
16420--- a/arch/x86/include/uapi/asm/e820.h
16421+++ b/arch/x86/include/uapi/asm/e820.h
16422@@ -63,7 +63,7 @@ struct e820map {
16423 #define ISA_START_ADDRESS 0xa0000
16424 #define ISA_END_ADDRESS 0x100000
16425
16426-#define BIOS_BEGIN 0x000a0000
16427+#define BIOS_BEGIN 0x000c0000
16428 #define BIOS_END 0x00100000
16429
16430 #define BIOS_ROM_BASE 0xffe00000
16431diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
16432index 34e923a..0c6bb6e 100644
16433--- a/arch/x86/kernel/Makefile
16434+++ b/arch/x86/kernel/Makefile
16435@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
16436 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
16437 obj-$(CONFIG_IRQ_WORK) += irq_work.o
16438 obj-y += probe_roms.o
16439-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
16440+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
16441 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
16442 obj-y += syscall_$(BITS).o
16443 obj-$(CONFIG_X86_64) += vsyscall_64.o
16444diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
16445index bacf4b0..4ede72e 100644
16446--- a/arch/x86/kernel/acpi/boot.c
16447+++ b/arch/x86/kernel/acpi/boot.c
16448@@ -1358,7 +1358,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
16449 * If your system is blacklisted here, but you find that acpi=force
16450 * works for you, please contact linux-acpi@vger.kernel.org
16451 */
16452-static struct dmi_system_id __initdata acpi_dmi_table[] = {
16453+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
16454 /*
16455 * Boxes that need ACPI disabled
16456 */
16457@@ -1433,7 +1433,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
16458 };
16459
16460 /* second table for DMI checks that should run after early-quirks */
16461-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
16462+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
16463 /*
16464 * HP laptops which use a DSDT reporting as HP/SB400/10000,
16465 * which includes some code which overrides all temperature
16466diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
16467index d5e0d71..6533e08 100644
16468--- a/arch/x86/kernel/acpi/sleep.c
16469+++ b/arch/x86/kernel/acpi/sleep.c
16470@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
16471 #else /* CONFIG_64BIT */
16472 #ifdef CONFIG_SMP
16473 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
16474+
16475+ pax_open_kernel();
16476 early_gdt_descr.address =
16477 (unsigned long)get_cpu_gdt_table(smp_processor_id());
16478+ pax_close_kernel();
16479+
16480 initial_gs = per_cpu_offset(smp_processor_id());
16481 #endif
16482 initial_code = (unsigned long)wakeup_long64;
16483diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
16484index 13ab720..95d5442 100644
16485--- a/arch/x86/kernel/acpi/wakeup_32.S
16486+++ b/arch/x86/kernel/acpi/wakeup_32.S
16487@@ -30,13 +30,11 @@ wakeup_pmode_return:
16488 # and restore the stack ... but you need gdt for this to work
16489 movl saved_context_esp, %esp
16490
16491- movl %cs:saved_magic, %eax
16492- cmpl $0x12345678, %eax
16493+ cmpl $0x12345678, saved_magic
16494 jne bogus_magic
16495
16496 # jump to place where we left off
16497- movl saved_eip, %eax
16498- jmp *%eax
16499+ jmp *(saved_eip)
16500
16501 bogus_magic:
16502 jmp bogus_magic
16503diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
16504index ef5ccca..bd83949 100644
16505--- a/arch/x86/kernel/alternative.c
16506+++ b/arch/x86/kernel/alternative.c
16507@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
16508 */
16509 for (a = start; a < end; a++) {
16510 instr = (u8 *)&a->instr_offset + a->instr_offset;
16511+
16512+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16513+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16514+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
16515+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16516+#endif
16517+
16518 replacement = (u8 *)&a->repl_offset + a->repl_offset;
16519 BUG_ON(a->replacementlen > a->instrlen);
16520 BUG_ON(a->instrlen > sizeof(insnbuf));
16521@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
16522 for (poff = start; poff < end; poff++) {
16523 u8 *ptr = (u8 *)poff + *poff;
16524
16525+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16526+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16527+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16528+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16529+#endif
16530+
16531 if (!*poff || ptr < text || ptr >= text_end)
16532 continue;
16533 /* turn DS segment override prefix into lock prefix */
16534- if (*ptr == 0x3e)
16535+ if (*ktla_ktva(ptr) == 0x3e)
16536 text_poke(ptr, ((unsigned char []){0xf0}), 1);
16537 }
16538 mutex_unlock(&text_mutex);
16539@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
16540 for (poff = start; poff < end; poff++) {
16541 u8 *ptr = (u8 *)poff + *poff;
16542
16543+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16544+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16545+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16546+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16547+#endif
16548+
16549 if (!*poff || ptr < text || ptr >= text_end)
16550 continue;
16551 /* turn lock prefix into DS segment override prefix */
16552- if (*ptr == 0xf0)
16553+ if (*ktla_ktva(ptr) == 0xf0)
16554 text_poke(ptr, ((unsigned char []){0x3E}), 1);
16555 }
16556 mutex_unlock(&text_mutex);
16557@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
16558
16559 BUG_ON(p->len > MAX_PATCH_LEN);
16560 /* prep the buffer with the original instructions */
16561- memcpy(insnbuf, p->instr, p->len);
16562+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
16563 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
16564 (unsigned long)p->instr, p->len);
16565
16566@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
16567 if (!uniproc_patched || num_possible_cpus() == 1)
16568 free_init_pages("SMP alternatives",
16569 (unsigned long)__smp_locks,
16570- (unsigned long)__smp_locks_end);
16571+ PAGE_ALIGN((unsigned long)__smp_locks_end));
16572 #endif
16573
16574 apply_paravirt(__parainstructions, __parainstructions_end);
16575@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
16576 * instructions. And on the local CPU you need to be protected again NMI or MCE
16577 * handlers seeing an inconsistent instruction while you patch.
16578 */
16579-void *__init_or_module text_poke_early(void *addr, const void *opcode,
16580+void *__kprobes text_poke_early(void *addr, const void *opcode,
16581 size_t len)
16582 {
16583 unsigned long flags;
16584 local_irq_save(flags);
16585- memcpy(addr, opcode, len);
16586+
16587+ pax_open_kernel();
16588+ memcpy(ktla_ktva(addr), opcode, len);
16589 sync_core();
16590+ pax_close_kernel();
16591+
16592 local_irq_restore(flags);
16593 /* Could also do a CLFLUSH here to speed up CPU recovery; but
16594 that causes hangs on some VIA CPUs. */
16595@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
16596 */
16597 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
16598 {
16599- unsigned long flags;
16600- char *vaddr;
16601+ unsigned char *vaddr = ktla_ktva(addr);
16602 struct page *pages[2];
16603- int i;
16604+ size_t i;
16605
16606 if (!core_kernel_text((unsigned long)addr)) {
16607- pages[0] = vmalloc_to_page(addr);
16608- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
16609+ pages[0] = vmalloc_to_page(vaddr);
16610+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
16611 } else {
16612- pages[0] = virt_to_page(addr);
16613+ pages[0] = virt_to_page(vaddr);
16614 WARN_ON(!PageReserved(pages[0]));
16615- pages[1] = virt_to_page(addr + PAGE_SIZE);
16616+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
16617 }
16618 BUG_ON(!pages[0]);
16619- local_irq_save(flags);
16620- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
16621- if (pages[1])
16622- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
16623- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
16624- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
16625- clear_fixmap(FIX_TEXT_POKE0);
16626- if (pages[1])
16627- clear_fixmap(FIX_TEXT_POKE1);
16628- local_flush_tlb();
16629- sync_core();
16630- /* Could also do a CLFLUSH here to speed up CPU recovery; but
16631- that causes hangs on some VIA CPUs. */
16632+ text_poke_early(addr, opcode, len);
16633 for (i = 0; i < len; i++)
16634- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
16635- local_irq_restore(flags);
16636+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
16637 return addr;
16638 }
16639
16640diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
16641index cbf5121..812b537 100644
16642--- a/arch/x86/kernel/apic/apic.c
16643+++ b/arch/x86/kernel/apic/apic.c
16644@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
16645 /*
16646 * Debug level, exported for io_apic.c
16647 */
16648-unsigned int apic_verbosity;
16649+int apic_verbosity;
16650
16651 int pic_mode;
16652
16653@@ -1956,7 +1956,7 @@ void smp_error_interrupt(struct pt_regs *regs)
16654 apic_write(APIC_ESR, 0);
16655 v1 = apic_read(APIC_ESR);
16656 ack_APIC_irq();
16657- atomic_inc(&irq_err_count);
16658+ atomic_inc_unchecked(&irq_err_count);
16659
16660 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
16661 smp_processor_id(), v0 , v1);
16662diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
16663index 00c77cf..2dc6a2d 100644
16664--- a/arch/x86/kernel/apic/apic_flat_64.c
16665+++ b/arch/x86/kernel/apic/apic_flat_64.c
16666@@ -157,7 +157,7 @@ static int flat_probe(void)
16667 return 1;
16668 }
16669
16670-static struct apic apic_flat = {
16671+static struct apic apic_flat __read_only = {
16672 .name = "flat",
16673 .probe = flat_probe,
16674 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
16675@@ -271,7 +271,7 @@ static int physflat_probe(void)
16676 return 0;
16677 }
16678
16679-static struct apic apic_physflat = {
16680+static struct apic apic_physflat __read_only = {
16681
16682 .name = "physical flat",
16683 .probe = physflat_probe,
16684diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
16685index e145f28..2752888 100644
16686--- a/arch/x86/kernel/apic/apic_noop.c
16687+++ b/arch/x86/kernel/apic/apic_noop.c
16688@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
16689 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
16690 }
16691
16692-struct apic apic_noop = {
16693+struct apic apic_noop __read_only = {
16694 .name = "noop",
16695 .probe = noop_probe,
16696 .acpi_madt_oem_check = NULL,
16697diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
16698index d50e364..543bee3 100644
16699--- a/arch/x86/kernel/apic/bigsmp_32.c
16700+++ b/arch/x86/kernel/apic/bigsmp_32.c
16701@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
16702 return dmi_bigsmp;
16703 }
16704
16705-static struct apic apic_bigsmp = {
16706+static struct apic apic_bigsmp __read_only = {
16707
16708 .name = "bigsmp",
16709 .probe = probe_bigsmp,
16710diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
16711index 0874799..a7a7892 100644
16712--- a/arch/x86/kernel/apic/es7000_32.c
16713+++ b/arch/x86/kernel/apic/es7000_32.c
16714@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
16715 return ret && es7000_apic_is_cluster();
16716 }
16717
16718-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
16719-static struct apic __refdata apic_es7000_cluster = {
16720+static struct apic apic_es7000_cluster __read_only = {
16721
16722 .name = "es7000",
16723 .probe = probe_es7000,
16724@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
16725 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
16726 };
16727
16728-static struct apic __refdata apic_es7000 = {
16729+static struct apic apic_es7000 __read_only = {
16730
16731 .name = "es7000",
16732 .probe = probe_es7000,
16733diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
16734index b739d39..aebc14c 100644
16735--- a/arch/x86/kernel/apic/io_apic.c
16736+++ b/arch/x86/kernel/apic/io_apic.c
16737@@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
16738 }
16739 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
16740
16741-void lock_vector_lock(void)
16742+void lock_vector_lock(void) __acquires(vector_lock)
16743 {
16744 /* Used to the online set of cpus does not change
16745 * during assign_irq_vector.
16746@@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
16747 raw_spin_lock(&vector_lock);
16748 }
16749
16750-void unlock_vector_lock(void)
16751+void unlock_vector_lock(void) __releases(vector_lock)
16752 {
16753 raw_spin_unlock(&vector_lock);
16754 }
16755@@ -2399,7 +2399,7 @@ static void ack_apic_edge(struct irq_data *data)
16756 ack_APIC_irq();
16757 }
16758
16759-atomic_t irq_mis_count;
16760+atomic_unchecked_t irq_mis_count;
16761
16762 #ifdef CONFIG_GENERIC_PENDING_IRQ
16763 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
16764@@ -2540,7 +2540,7 @@ static void ack_apic_level(struct irq_data *data)
16765 * at the cpu.
16766 */
16767 if (!(v & (1 << (i & 0x1f)))) {
16768- atomic_inc(&irq_mis_count);
16769+ atomic_inc_unchecked(&irq_mis_count);
16770
16771 eoi_ioapic_irq(irq, cfg);
16772 }
16773@@ -2567,11 +2567,13 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
16774
16775 static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
16776 {
16777- chip->irq_print_chip = ir_print_prefix;
16778- chip->irq_ack = ir_ack_apic_edge;
16779- chip->irq_eoi = ir_ack_apic_level;
16780+ pax_open_kernel();
16781+ *(void **)&chip->irq_print_chip = ir_print_prefix;
16782+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
16783+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
16784
16785- chip->irq_set_affinity = set_remapped_irq_affinity;
16786+ *(void **)&chip->irq_set_affinity = set_remapped_irq_affinity;
16787+ pax_close_kernel();
16788 }
16789 #endif /* CONFIG_IRQ_REMAP */
16790
16791diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
16792index d661ee9..791fd33 100644
16793--- a/arch/x86/kernel/apic/numaq_32.c
16794+++ b/arch/x86/kernel/apic/numaq_32.c
16795@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
16796 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
16797 }
16798
16799-/* Use __refdata to keep false positive warning calm. */
16800-static struct apic __refdata apic_numaq = {
16801+static struct apic apic_numaq __read_only = {
16802
16803 .name = "NUMAQ",
16804 .probe = probe_numaq,
16805diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
16806index eb35ef9..f184a21 100644
16807--- a/arch/x86/kernel/apic/probe_32.c
16808+++ b/arch/x86/kernel/apic/probe_32.c
16809@@ -72,7 +72,7 @@ static int probe_default(void)
16810 return 1;
16811 }
16812
16813-static struct apic apic_default = {
16814+static struct apic apic_default __read_only = {
16815
16816 .name = "default",
16817 .probe = probe_default,
16818diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
16819index 77c95c0..434f8a4 100644
16820--- a/arch/x86/kernel/apic/summit_32.c
16821+++ b/arch/x86/kernel/apic/summit_32.c
16822@@ -486,7 +486,7 @@ void setup_summit(void)
16823 }
16824 #endif
16825
16826-static struct apic apic_summit = {
16827+static struct apic apic_summit __read_only = {
16828
16829 .name = "summit",
16830 .probe = probe_summit,
16831diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
16832index c88baa4..757aee1 100644
16833--- a/arch/x86/kernel/apic/x2apic_cluster.c
16834+++ b/arch/x86/kernel/apic/x2apic_cluster.c
16835@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
16836 return notifier_from_errno(err);
16837 }
16838
16839-static struct notifier_block __refdata x2apic_cpu_notifier = {
16840+static struct notifier_block x2apic_cpu_notifier = {
16841 .notifier_call = update_clusterinfo,
16842 };
16843
16844@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
16845 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
16846 }
16847
16848-static struct apic apic_x2apic_cluster = {
16849+static struct apic apic_x2apic_cluster __read_only = {
16850
16851 .name = "cluster x2apic",
16852 .probe = x2apic_cluster_probe,
16853diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
16854index 562a76d..a003c0f 100644
16855--- a/arch/x86/kernel/apic/x2apic_phys.c
16856+++ b/arch/x86/kernel/apic/x2apic_phys.c
16857@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
16858 return apic == &apic_x2apic_phys;
16859 }
16860
16861-static struct apic apic_x2apic_phys = {
16862+static struct apic apic_x2apic_phys __read_only = {
16863
16864 .name = "physical x2apic",
16865 .probe = x2apic_phys_probe,
16866diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
16867index 8cfade9..b9d04fc 100644
16868--- a/arch/x86/kernel/apic/x2apic_uv_x.c
16869+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
16870@@ -333,7 +333,7 @@ static int uv_probe(void)
16871 return apic == &apic_x2apic_uv_x;
16872 }
16873
16874-static struct apic __refdata apic_x2apic_uv_x = {
16875+static struct apic apic_x2apic_uv_x __read_only = {
16876
16877 .name = "UV large system",
16878 .probe = uv_probe,
16879diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
16880index d65464e..1035d31 100644
16881--- a/arch/x86/kernel/apm_32.c
16882+++ b/arch/x86/kernel/apm_32.c
16883@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
16884 * This is for buggy BIOS's that refer to (real mode) segment 0x40
16885 * even though they are called in protected mode.
16886 */
16887-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
16888+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
16889 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
16890
16891 static const char driver_version[] = "1.16ac"; /* no spaces */
16892@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
16893 BUG_ON(cpu != 0);
16894 gdt = get_cpu_gdt_table(cpu);
16895 save_desc_40 = gdt[0x40 / 8];
16896+
16897+ pax_open_kernel();
16898 gdt[0x40 / 8] = bad_bios_desc;
16899+ pax_close_kernel();
16900
16901 apm_irq_save(flags);
16902 APM_DO_SAVE_SEGS;
16903@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
16904 &call->esi);
16905 APM_DO_RESTORE_SEGS;
16906 apm_irq_restore(flags);
16907+
16908+ pax_open_kernel();
16909 gdt[0x40 / 8] = save_desc_40;
16910+ pax_close_kernel();
16911+
16912 put_cpu();
16913
16914 return call->eax & 0xff;
16915@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
16916 BUG_ON(cpu != 0);
16917 gdt = get_cpu_gdt_table(cpu);
16918 save_desc_40 = gdt[0x40 / 8];
16919+
16920+ pax_open_kernel();
16921 gdt[0x40 / 8] = bad_bios_desc;
16922+ pax_close_kernel();
16923
16924 apm_irq_save(flags);
16925 APM_DO_SAVE_SEGS;
16926@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
16927 &call->eax);
16928 APM_DO_RESTORE_SEGS;
16929 apm_irq_restore(flags);
16930+
16931+ pax_open_kernel();
16932 gdt[0x40 / 8] = save_desc_40;
16933+ pax_close_kernel();
16934+
16935 put_cpu();
16936 return error;
16937 }
16938@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
16939 * code to that CPU.
16940 */
16941 gdt = get_cpu_gdt_table(0);
16942+
16943+ pax_open_kernel();
16944 set_desc_base(&gdt[APM_CS >> 3],
16945 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
16946 set_desc_base(&gdt[APM_CS_16 >> 3],
16947 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
16948 set_desc_base(&gdt[APM_DS >> 3],
16949 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
16950+ pax_close_kernel();
16951
16952 proc_create("apm", 0, NULL, &apm_file_ops);
16953
16954diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
16955index 2861082..6d4718e 100644
16956--- a/arch/x86/kernel/asm-offsets.c
16957+++ b/arch/x86/kernel/asm-offsets.c
16958@@ -33,6 +33,8 @@ void common(void) {
16959 OFFSET(TI_status, thread_info, status);
16960 OFFSET(TI_addr_limit, thread_info, addr_limit);
16961 OFFSET(TI_preempt_count, thread_info, preempt_count);
16962+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
16963+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
16964
16965 BLANK();
16966 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
16967@@ -53,8 +55,26 @@ void common(void) {
16968 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
16969 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
16970 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
16971+
16972+#ifdef CONFIG_PAX_KERNEXEC
16973+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
16974 #endif
16975
16976+#ifdef CONFIG_PAX_MEMORY_UDEREF
16977+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
16978+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
16979+#ifdef CONFIG_X86_64
16980+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
16981+#endif
16982+#endif
16983+
16984+#endif
16985+
16986+ BLANK();
16987+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
16988+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
16989+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
16990+
16991 #ifdef CONFIG_XEN
16992 BLANK();
16993 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
16994diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
16995index 1b4754f..fbb4227 100644
16996--- a/arch/x86/kernel/asm-offsets_64.c
16997+++ b/arch/x86/kernel/asm-offsets_64.c
16998@@ -76,6 +76,7 @@ int main(void)
16999 BLANK();
17000 #undef ENTRY
17001
17002+ DEFINE(TSS_size, sizeof(struct tss_struct));
17003 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
17004 BLANK();
17005
17006diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
17007index a0e067d..9c7db16 100644
17008--- a/arch/x86/kernel/cpu/Makefile
17009+++ b/arch/x86/kernel/cpu/Makefile
17010@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
17011 CFLAGS_REMOVE_perf_event.o = -pg
17012 endif
17013
17014-# Make sure load_percpu_segment has no stackprotector
17015-nostackp := $(call cc-option, -fno-stack-protector)
17016-CFLAGS_common.o := $(nostackp)
17017-
17018 obj-y := intel_cacheinfo.o scattered.o topology.o
17019 obj-y += proc.o capflags.o powerflags.o common.o
17020 obj-y += vmware.o hypervisor.o mshyperv.o
17021diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
17022index 15239ff..e23e04e 100644
17023--- a/arch/x86/kernel/cpu/amd.c
17024+++ b/arch/x86/kernel/cpu/amd.c
17025@@ -733,7 +733,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
17026 unsigned int size)
17027 {
17028 /* AMD errata T13 (order #21922) */
17029- if ((c->x86 == 6)) {
17030+ if (c->x86 == 6) {
17031 /* Duron Rev A0 */
17032 if (c->x86_model == 3 && c->x86_mask == 0)
17033 size = 64;
17034diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
17035index 9c3ab43..51e6366 100644
17036--- a/arch/x86/kernel/cpu/common.c
17037+++ b/arch/x86/kernel/cpu/common.c
17038@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
17039
17040 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
17041
17042-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
17043-#ifdef CONFIG_X86_64
17044- /*
17045- * We need valid kernel segments for data and code in long mode too
17046- * IRET will check the segment types kkeil 2000/10/28
17047- * Also sysret mandates a special GDT layout
17048- *
17049- * TLS descriptors are currently at a different place compared to i386.
17050- * Hopefully nobody expects them at a fixed place (Wine?)
17051- */
17052- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
17053- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
17054- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
17055- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
17056- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
17057- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
17058-#else
17059- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
17060- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17061- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
17062- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
17063- /*
17064- * Segments used for calling PnP BIOS have byte granularity.
17065- * They code segments and data segments have fixed 64k limits,
17066- * the transfer segment sizes are set at run time.
17067- */
17068- /* 32-bit code */
17069- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17070- /* 16-bit code */
17071- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17072- /* 16-bit data */
17073- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
17074- /* 16-bit data */
17075- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
17076- /* 16-bit data */
17077- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
17078- /*
17079- * The APM segments have byte granularity and their bases
17080- * are set at run time. All have 64k limits.
17081- */
17082- /* 32-bit code */
17083- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17084- /* 16-bit code */
17085- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17086- /* data */
17087- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
17088-
17089- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17090- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17091- GDT_STACK_CANARY_INIT
17092-#endif
17093-} };
17094-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
17095-
17096 static int __init x86_xsave_setup(char *s)
17097 {
17098 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
17099@@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
17100 {
17101 struct desc_ptr gdt_descr;
17102
17103- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
17104+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17105 gdt_descr.size = GDT_SIZE - 1;
17106 load_gdt(&gdt_descr);
17107 /* Reload the per-cpu base */
17108@@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
17109 /* Filter out anything that depends on CPUID levels we don't have */
17110 filter_cpuid_features(c, true);
17111
17112+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
17113+ setup_clear_cpu_cap(X86_FEATURE_SEP);
17114+#endif
17115+
17116 /* If the model name is still unset, do table lookup. */
17117 if (!c->x86_model_id[0]) {
17118 const char *p;
17119@@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
17120 }
17121 __setup("clearcpuid=", setup_disablecpuid);
17122
17123+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
17124+EXPORT_PER_CPU_SYMBOL(current_tinfo);
17125+
17126 #ifdef CONFIG_X86_64
17127 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
17128-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
17129- (unsigned long) nmi_idt_table };
17130+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
17131
17132 DEFINE_PER_CPU_FIRST(union irq_stack_union,
17133 irq_stack_union) __aligned(PAGE_SIZE);
17134@@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
17135 EXPORT_PER_CPU_SYMBOL(current_task);
17136
17137 DEFINE_PER_CPU(unsigned long, kernel_stack) =
17138- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
17139+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
17140 EXPORT_PER_CPU_SYMBOL(kernel_stack);
17141
17142 DEFINE_PER_CPU(char *, irq_stack_ptr) =
17143@@ -1224,7 +1176,7 @@ void __cpuinit cpu_init(void)
17144 int i;
17145
17146 cpu = stack_smp_processor_id();
17147- t = &per_cpu(init_tss, cpu);
17148+ t = init_tss + cpu;
17149 oist = &per_cpu(orig_ist, cpu);
17150
17151 #ifdef CONFIG_NUMA
17152@@ -1250,7 +1202,7 @@ void __cpuinit cpu_init(void)
17153 switch_to_new_gdt(cpu);
17154 loadsegment(fs, 0);
17155
17156- load_idt((const struct desc_ptr *)&idt_descr);
17157+ load_idt(&idt_descr);
17158
17159 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
17160 syscall_init();
17161@@ -1259,7 +1211,6 @@ void __cpuinit cpu_init(void)
17162 wrmsrl(MSR_KERNEL_GS_BASE, 0);
17163 barrier();
17164
17165- x86_configure_nx();
17166 enable_x2apic();
17167
17168 /*
17169@@ -1311,7 +1262,7 @@ void __cpuinit cpu_init(void)
17170 {
17171 int cpu = smp_processor_id();
17172 struct task_struct *curr = current;
17173- struct tss_struct *t = &per_cpu(init_tss, cpu);
17174+ struct tss_struct *t = init_tss + cpu;
17175 struct thread_struct *thread = &curr->thread;
17176
17177 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
17178diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
17179index fcaabd0..7b55a26 100644
17180--- a/arch/x86/kernel/cpu/intel.c
17181+++ b/arch/x86/kernel/cpu/intel.c
17182@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
17183 * Update the IDT descriptor and reload the IDT so that
17184 * it uses the read-only mapped virtual address.
17185 */
17186- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
17187+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
17188 load_idt(&idt_descr);
17189 }
17190 #endif
17191diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
17192index 84c1309..39b7224 100644
17193--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
17194+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
17195@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
17196 };
17197
17198 #ifdef CONFIG_AMD_NB
17199+static struct attribute *default_attrs_amd_nb[] = {
17200+ &type.attr,
17201+ &level.attr,
17202+ &coherency_line_size.attr,
17203+ &physical_line_partition.attr,
17204+ &ways_of_associativity.attr,
17205+ &number_of_sets.attr,
17206+ &size.attr,
17207+ &shared_cpu_map.attr,
17208+ &shared_cpu_list.attr,
17209+ NULL,
17210+ NULL,
17211+ NULL,
17212+ NULL
17213+};
17214+
17215 static struct attribute ** __cpuinit amd_l3_attrs(void)
17216 {
17217 static struct attribute **attrs;
17218@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
17219
17220 n = ARRAY_SIZE(default_attrs);
17221
17222- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
17223- n += 2;
17224-
17225- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
17226- n += 1;
17227-
17228- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
17229- if (attrs == NULL)
17230- return attrs = default_attrs;
17231-
17232- for (n = 0; default_attrs[n]; n++)
17233- attrs[n] = default_attrs[n];
17234+ attrs = default_attrs_amd_nb;
17235
17236 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
17237 attrs[n++] = &cache_disable_0.attr;
17238@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
17239 .default_attrs = default_attrs,
17240 };
17241
17242+#ifdef CONFIG_AMD_NB
17243+static struct kobj_type ktype_cache_amd_nb = {
17244+ .sysfs_ops = &sysfs_ops,
17245+ .default_attrs = default_attrs_amd_nb,
17246+};
17247+#endif
17248+
17249 static struct kobj_type ktype_percpu_entry = {
17250 .sysfs_ops = &sysfs_ops,
17251 };
17252@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
17253 return retval;
17254 }
17255
17256+#ifdef CONFIG_AMD_NB
17257+ amd_l3_attrs();
17258+#endif
17259+
17260 for (i = 0; i < num_cache_leaves; i++) {
17261+ struct kobj_type *ktype;
17262+
17263 this_object = INDEX_KOBJECT_PTR(cpu, i);
17264 this_object->cpu = cpu;
17265 this_object->index = i;
17266
17267 this_leaf = CPUID4_INFO_IDX(cpu, i);
17268
17269- ktype_cache.default_attrs = default_attrs;
17270+ ktype = &ktype_cache;
17271 #ifdef CONFIG_AMD_NB
17272 if (this_leaf->base.nb)
17273- ktype_cache.default_attrs = amd_l3_attrs();
17274+ ktype = &ktype_cache_amd_nb;
17275 #endif
17276 retval = kobject_init_and_add(&(this_object->kobj),
17277- &ktype_cache,
17278+ ktype,
17279 per_cpu(ici_cache_kobject, cpu),
17280 "index%1lu", i);
17281 if (unlikely(retval)) {
17282@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
17283 return NOTIFY_OK;
17284 }
17285
17286-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
17287+static struct notifier_block cacheinfo_cpu_notifier = {
17288 .notifier_call = cacheinfo_cpu_callback,
17289 };
17290
17291diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
17292index 80dbda8..be16652 100644
17293--- a/arch/x86/kernel/cpu/mcheck/mce.c
17294+++ b/arch/x86/kernel/cpu/mcheck/mce.c
17295@@ -45,6 +45,7 @@
17296 #include <asm/processor.h>
17297 #include <asm/mce.h>
17298 #include <asm/msr.h>
17299+#include <asm/local.h>
17300
17301 #include "mce-internal.h"
17302
17303@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
17304 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
17305 m->cs, m->ip);
17306
17307- if (m->cs == __KERNEL_CS)
17308+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
17309 print_symbol("{%s}", m->ip);
17310 pr_cont("\n");
17311 }
17312@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
17313
17314 #define PANIC_TIMEOUT 5 /* 5 seconds */
17315
17316-static atomic_t mce_paniced;
17317+static atomic_unchecked_t mce_paniced;
17318
17319 static int fake_panic;
17320-static atomic_t mce_fake_paniced;
17321+static atomic_unchecked_t mce_fake_paniced;
17322
17323 /* Panic in progress. Enable interrupts and wait for final IPI */
17324 static void wait_for_panic(void)
17325@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17326 /*
17327 * Make sure only one CPU runs in machine check panic
17328 */
17329- if (atomic_inc_return(&mce_paniced) > 1)
17330+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
17331 wait_for_panic();
17332 barrier();
17333
17334@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17335 console_verbose();
17336 } else {
17337 /* Don't log too much for fake panic */
17338- if (atomic_inc_return(&mce_fake_paniced) > 1)
17339+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
17340 return;
17341 }
17342 /* First print corrected ones that are still unlogged */
17343@@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
17344 * might have been modified by someone else.
17345 */
17346 rmb();
17347- if (atomic_read(&mce_paniced))
17348+ if (atomic_read_unchecked(&mce_paniced))
17349 wait_for_panic();
17350 if (!mca_cfg.monarch_timeout)
17351 goto out;
17352@@ -1662,7 +1663,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
17353 }
17354
17355 /* Call the installed machine check handler for this CPU setup. */
17356-void (*machine_check_vector)(struct pt_regs *, long error_code) =
17357+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
17358 unexpected_machine_check;
17359
17360 /*
17361@@ -1685,7 +1686,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17362 return;
17363 }
17364
17365+ pax_open_kernel();
17366 machine_check_vector = do_machine_check;
17367+ pax_close_kernel();
17368
17369 __mcheck_cpu_init_generic();
17370 __mcheck_cpu_init_vendor(c);
17371@@ -1699,7 +1702,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17372 */
17373
17374 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
17375-static int mce_chrdev_open_count; /* #times opened */
17376+static local_t mce_chrdev_open_count; /* #times opened */
17377 static int mce_chrdev_open_exclu; /* already open exclusive? */
17378
17379 static int mce_chrdev_open(struct inode *inode, struct file *file)
17380@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17381 spin_lock(&mce_chrdev_state_lock);
17382
17383 if (mce_chrdev_open_exclu ||
17384- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
17385+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
17386 spin_unlock(&mce_chrdev_state_lock);
17387
17388 return -EBUSY;
17389@@ -1715,7 +1718,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17390
17391 if (file->f_flags & O_EXCL)
17392 mce_chrdev_open_exclu = 1;
17393- mce_chrdev_open_count++;
17394+ local_inc(&mce_chrdev_open_count);
17395
17396 spin_unlock(&mce_chrdev_state_lock);
17397
17398@@ -1726,7 +1729,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
17399 {
17400 spin_lock(&mce_chrdev_state_lock);
17401
17402- mce_chrdev_open_count--;
17403+ local_dec(&mce_chrdev_open_count);
17404 mce_chrdev_open_exclu = 0;
17405
17406 spin_unlock(&mce_chrdev_state_lock);
17407@@ -2372,7 +2375,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
17408 return NOTIFY_OK;
17409 }
17410
17411-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
17412+static struct notifier_block mce_cpu_notifier = {
17413 .notifier_call = mce_cpu_callback,
17414 };
17415
17416@@ -2382,7 +2385,7 @@ static __init void mce_init_banks(void)
17417
17418 for (i = 0; i < mca_cfg.banks; i++) {
17419 struct mce_bank *b = &mce_banks[i];
17420- struct device_attribute *a = &b->attr;
17421+ device_attribute_no_const *a = &b->attr;
17422
17423 sysfs_attr_init(&a->attr);
17424 a->attr.name = b->attrname;
17425@@ -2450,7 +2453,7 @@ struct dentry *mce_get_debugfs_dir(void)
17426 static void mce_reset(void)
17427 {
17428 cpu_missing = 0;
17429- atomic_set(&mce_fake_paniced, 0);
17430+ atomic_set_unchecked(&mce_fake_paniced, 0);
17431 atomic_set(&mce_executing, 0);
17432 atomic_set(&mce_callin, 0);
17433 atomic_set(&global_nwo, 0);
17434diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
17435index 2d5454c..51987eb 100644
17436--- a/arch/x86/kernel/cpu/mcheck/p5.c
17437+++ b/arch/x86/kernel/cpu/mcheck/p5.c
17438@@ -11,6 +11,7 @@
17439 #include <asm/processor.h>
17440 #include <asm/mce.h>
17441 #include <asm/msr.h>
17442+#include <asm/pgtable.h>
17443
17444 /* By default disabled */
17445 int mce_p5_enabled __read_mostly;
17446@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
17447 if (!cpu_has(c, X86_FEATURE_MCE))
17448 return;
17449
17450+ pax_open_kernel();
17451 machine_check_vector = pentium_machine_check;
17452+ pax_close_kernel();
17453 /* Make sure the vector pointer is visible before we enable MCEs: */
17454 wmb();
17455
17456diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17457index 47a1870..8c019a7 100644
17458--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
17459+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17460@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
17461 return notifier_from_errno(err);
17462 }
17463
17464-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
17465+static struct notifier_block thermal_throttle_cpu_notifier =
17466 {
17467 .notifier_call = thermal_throttle_cpu_callback,
17468 };
17469diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
17470index 2d7998f..17c9de1 100644
17471--- a/arch/x86/kernel/cpu/mcheck/winchip.c
17472+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
17473@@ -10,6 +10,7 @@
17474 #include <asm/processor.h>
17475 #include <asm/mce.h>
17476 #include <asm/msr.h>
17477+#include <asm/pgtable.h>
17478
17479 /* Machine check handler for WinChip C6: */
17480 static void winchip_machine_check(struct pt_regs *regs, long error_code)
17481@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
17482 {
17483 u32 lo, hi;
17484
17485+ pax_open_kernel();
17486 machine_check_vector = winchip_machine_check;
17487+ pax_close_kernel();
17488 /* Make sure the vector pointer is visible before we enable MCEs: */
17489 wmb();
17490
17491diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
17492index 726bf96..81f0526 100644
17493--- a/arch/x86/kernel/cpu/mtrr/main.c
17494+++ b/arch/x86/kernel/cpu/mtrr/main.c
17495@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
17496 u64 size_or_mask, size_and_mask;
17497 static bool mtrr_aps_delayed_init;
17498
17499-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
17500+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
17501
17502 const struct mtrr_ops *mtrr_if;
17503
17504diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
17505index df5e41f..816c719 100644
17506--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
17507+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
17508@@ -25,7 +25,7 @@ struct mtrr_ops {
17509 int (*validate_add_page)(unsigned long base, unsigned long size,
17510 unsigned int type);
17511 int (*have_wrcomb)(void);
17512-};
17513+} __do_const;
17514
17515 extern int generic_get_free_region(unsigned long base, unsigned long size,
17516 int replace_reg);
17517diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
17518index 6774c17..72c1b22 100644
17519--- a/arch/x86/kernel/cpu/perf_event.c
17520+++ b/arch/x86/kernel/cpu/perf_event.c
17521@@ -1305,7 +1305,7 @@ static void __init pmu_check_apic(void)
17522 pr_info("no hardware sampling interrupt available.\n");
17523 }
17524
17525-static struct attribute_group x86_pmu_format_group = {
17526+static attribute_group_no_const x86_pmu_format_group = {
17527 .name = "format",
17528 .attrs = NULL,
17529 };
17530@@ -1313,7 +1313,7 @@ static struct attribute_group x86_pmu_format_group = {
17531 struct perf_pmu_events_attr {
17532 struct device_attribute attr;
17533 u64 id;
17534-};
17535+} __do_const;
17536
17537 /*
17538 * Remove all undefined events (x86_pmu.event_map(id) == 0)
17539@@ -1381,7 +1381,7 @@ static struct attribute *events_attr[] = {
17540 NULL,
17541 };
17542
17543-static struct attribute_group x86_pmu_events_group = {
17544+static attribute_group_no_const x86_pmu_events_group = {
17545 .name = "events",
17546 .attrs = events_attr,
17547 };
17548@@ -1880,7 +1880,7 @@ static unsigned long get_segment_base(unsigned int segment)
17549 if (idx > GDT_ENTRIES)
17550 return 0;
17551
17552- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
17553+ desc = get_cpu_gdt_table(smp_processor_id());
17554 }
17555
17556 return get_desc_base(desc + idx);
17557@@ -1970,7 +1970,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
17558 break;
17559
17560 perf_callchain_store(entry, frame.return_address);
17561- fp = frame.next_frame;
17562+ fp = (const void __force_user *)frame.next_frame;
17563 }
17564 }
17565
17566diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
17567index 4914e94..60b06e3 100644
17568--- a/arch/x86/kernel/cpu/perf_event_intel.c
17569+++ b/arch/x86/kernel/cpu/perf_event_intel.c
17570@@ -1958,10 +1958,10 @@ __init int intel_pmu_init(void)
17571 * v2 and above have a perf capabilities MSR
17572 */
17573 if (version > 1) {
17574- u64 capabilities;
17575+ u64 capabilities = x86_pmu.intel_cap.capabilities;
17576
17577- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
17578- x86_pmu.intel_cap.capabilities = capabilities;
17579+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
17580+ x86_pmu.intel_cap.capabilities = capabilities;
17581 }
17582
17583 intel_ds_init();
17584diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17585index b43200d..d235b3e 100644
17586--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17587+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17588@@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
17589 static int __init uncore_type_init(struct intel_uncore_type *type)
17590 {
17591 struct intel_uncore_pmu *pmus;
17592- struct attribute_group *events_group;
17593+ attribute_group_no_const *attr_group;
17594 struct attribute **attrs;
17595 int i, j;
17596
17597@@ -2455,19 +2455,19 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
17598 while (type->event_descs[i].attr.attr.name)
17599 i++;
17600
17601- events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
17602- sizeof(*events_group), GFP_KERNEL);
17603- if (!events_group)
17604+ attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
17605+ sizeof(*attr_group), GFP_KERNEL);
17606+ if (!attr_group)
17607 goto fail;
17608
17609- attrs = (struct attribute **)(events_group + 1);
17610- events_group->name = "events";
17611- events_group->attrs = attrs;
17612+ attrs = (struct attribute **)(attr_group + 1);
17613+ attr_group->name = "events";
17614+ attr_group->attrs = attrs;
17615
17616 for (j = 0; j < i; j++)
17617 attrs[j] = &type->event_descs[j].attr.attr;
17618
17619- type->events_group = events_group;
17620+ type->events_group = attr_group;
17621 }
17622
17623 type->pmu_group = &uncore_pmu_attr_group;
17624@@ -2826,7 +2826,7 @@ static int
17625 return NOTIFY_OK;
17626 }
17627
17628-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
17629+static struct notifier_block uncore_cpu_nb = {
17630 .notifier_call = uncore_cpu_notifier,
17631 /*
17632 * to migrate uncore events, our notifier should be executed
17633diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17634index e68a455..975a932 100644
17635--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17636+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17637@@ -428,7 +428,7 @@ struct intel_uncore_box {
17638 struct uncore_event_desc {
17639 struct kobj_attribute attr;
17640 const char *config;
17641-};
17642+} __do_const;
17643
17644 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
17645 { \
17646diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
17647index 60c7891..9e911d3 100644
17648--- a/arch/x86/kernel/cpuid.c
17649+++ b/arch/x86/kernel/cpuid.c
17650@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
17651 return notifier_from_errno(err);
17652 }
17653
17654-static struct notifier_block __refdata cpuid_class_cpu_notifier =
17655+static struct notifier_block cpuid_class_cpu_notifier =
17656 {
17657 .notifier_call = cpuid_class_cpu_callback,
17658 };
17659diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
17660index 74467fe..18793d5 100644
17661--- a/arch/x86/kernel/crash.c
17662+++ b/arch/x86/kernel/crash.c
17663@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
17664 {
17665 #ifdef CONFIG_X86_32
17666 struct pt_regs fixed_regs;
17667-#endif
17668
17669-#ifdef CONFIG_X86_32
17670- if (!user_mode_vm(regs)) {
17671+ if (!user_mode(regs)) {
17672 crash_fixup_ss_esp(&fixed_regs, regs);
17673 regs = &fixed_regs;
17674 }
17675diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
17676index 37250fe..bf2ec74 100644
17677--- a/arch/x86/kernel/doublefault_32.c
17678+++ b/arch/x86/kernel/doublefault_32.c
17679@@ -11,7 +11,7 @@
17680
17681 #define DOUBLEFAULT_STACKSIZE (1024)
17682 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
17683-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
17684+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
17685
17686 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
17687
17688@@ -21,7 +21,7 @@ static void doublefault_fn(void)
17689 unsigned long gdt, tss;
17690
17691 store_gdt(&gdt_desc);
17692- gdt = gdt_desc.address;
17693+ gdt = (unsigned long)gdt_desc.address;
17694
17695 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
17696
17697@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
17698 /* 0x2 bit is always set */
17699 .flags = X86_EFLAGS_SF | 0x2,
17700 .sp = STACK_START,
17701- .es = __USER_DS,
17702+ .es = __KERNEL_DS,
17703 .cs = __KERNEL_CS,
17704 .ss = __KERNEL_DS,
17705- .ds = __USER_DS,
17706+ .ds = __KERNEL_DS,
17707 .fs = __KERNEL_PERCPU,
17708
17709 .__cr3 = __pa_nodebug(swapper_pg_dir),
17710diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
17711index ae42418b..787c16b 100644
17712--- a/arch/x86/kernel/dumpstack.c
17713+++ b/arch/x86/kernel/dumpstack.c
17714@@ -2,6 +2,9 @@
17715 * Copyright (C) 1991, 1992 Linus Torvalds
17716 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
17717 */
17718+#ifdef CONFIG_GRKERNSEC_HIDESYM
17719+#define __INCLUDED_BY_HIDESYM 1
17720+#endif
17721 #include <linux/kallsyms.h>
17722 #include <linux/kprobes.h>
17723 #include <linux/uaccess.h>
17724@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
17725 static void
17726 print_ftrace_graph_addr(unsigned long addr, void *data,
17727 const struct stacktrace_ops *ops,
17728- struct thread_info *tinfo, int *graph)
17729+ struct task_struct *task, int *graph)
17730 {
17731- struct task_struct *task;
17732 unsigned long ret_addr;
17733 int index;
17734
17735 if (addr != (unsigned long)return_to_handler)
17736 return;
17737
17738- task = tinfo->task;
17739 index = task->curr_ret_stack;
17740
17741 if (!task->ret_stack || index < *graph)
17742@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17743 static inline void
17744 print_ftrace_graph_addr(unsigned long addr, void *data,
17745 const struct stacktrace_ops *ops,
17746- struct thread_info *tinfo, int *graph)
17747+ struct task_struct *task, int *graph)
17748 { }
17749 #endif
17750
17751@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17752 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
17753 */
17754
17755-static inline int valid_stack_ptr(struct thread_info *tinfo,
17756- void *p, unsigned int size, void *end)
17757+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
17758 {
17759- void *t = tinfo;
17760 if (end) {
17761 if (p < end && p >= (end-THREAD_SIZE))
17762 return 1;
17763@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
17764 }
17765
17766 unsigned long
17767-print_context_stack(struct thread_info *tinfo,
17768+print_context_stack(struct task_struct *task, void *stack_start,
17769 unsigned long *stack, unsigned long bp,
17770 const struct stacktrace_ops *ops, void *data,
17771 unsigned long *end, int *graph)
17772 {
17773 struct stack_frame *frame = (struct stack_frame *)bp;
17774
17775- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
17776+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
17777 unsigned long addr;
17778
17779 addr = *stack;
17780@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
17781 } else {
17782 ops->address(data, addr, 0);
17783 }
17784- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17785+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17786 }
17787 stack++;
17788 }
17789@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
17790 EXPORT_SYMBOL_GPL(print_context_stack);
17791
17792 unsigned long
17793-print_context_stack_bp(struct thread_info *tinfo,
17794+print_context_stack_bp(struct task_struct *task, void *stack_start,
17795 unsigned long *stack, unsigned long bp,
17796 const struct stacktrace_ops *ops, void *data,
17797 unsigned long *end, int *graph)
17798@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17799 struct stack_frame *frame = (struct stack_frame *)bp;
17800 unsigned long *ret_addr = &frame->return_address;
17801
17802- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
17803+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
17804 unsigned long addr = *ret_addr;
17805
17806 if (!__kernel_text_address(addr))
17807@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17808 ops->address(data, addr, 1);
17809 frame = frame->next_frame;
17810 ret_addr = &frame->return_address;
17811- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17812+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17813 }
17814
17815 return (unsigned long)frame;
17816@@ -189,7 +188,7 @@ void dump_stack(void)
17817
17818 bp = stack_frame(current, NULL);
17819 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
17820- current->pid, current->comm, print_tainted(),
17821+ task_pid_nr(current), current->comm, print_tainted(),
17822 init_utsname()->release,
17823 (int)strcspn(init_utsname()->version, " "),
17824 init_utsname()->version);
17825@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
17826 }
17827 EXPORT_SYMBOL_GPL(oops_begin);
17828
17829+extern void gr_handle_kernel_exploit(void);
17830+
17831 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17832 {
17833 if (regs && kexec_should_crash(current))
17834@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17835 panic("Fatal exception in interrupt");
17836 if (panic_on_oops)
17837 panic("Fatal exception");
17838- do_exit(signr);
17839+
17840+ gr_handle_kernel_exploit();
17841+
17842+ do_group_exit(signr);
17843 }
17844
17845 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17846@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17847 print_modules();
17848 show_regs(regs);
17849 #ifdef CONFIG_X86_32
17850- if (user_mode_vm(regs)) {
17851+ if (user_mode(regs)) {
17852 sp = regs->sp;
17853 ss = regs->ss & 0xffff;
17854 } else {
17855@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
17856 unsigned long flags = oops_begin();
17857 int sig = SIGSEGV;
17858
17859- if (!user_mode_vm(regs))
17860+ if (!user_mode(regs))
17861 report_bug(regs->ip, regs);
17862
17863 if (__die(str, regs, err))
17864diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
17865index 1038a41..db2c12b 100644
17866--- a/arch/x86/kernel/dumpstack_32.c
17867+++ b/arch/x86/kernel/dumpstack_32.c
17868@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17869 bp = stack_frame(task, regs);
17870
17871 for (;;) {
17872- struct thread_info *context;
17873+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
17874
17875- context = (struct thread_info *)
17876- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
17877- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
17878+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
17879
17880- stack = (unsigned long *)context->previous_esp;
17881- if (!stack)
17882+ if (stack_start == task_stack_page(task))
17883 break;
17884+ stack = *(unsigned long **)stack_start;
17885 if (ops->stack(data, "IRQ") < 0)
17886 break;
17887 touch_nmi_watchdog();
17888@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
17889 {
17890 int i;
17891
17892- __show_regs(regs, !user_mode_vm(regs));
17893+ __show_regs(regs, !user_mode(regs));
17894
17895 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
17896 TASK_COMM_LEN, current->comm, task_pid_nr(current),
17897@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
17898 * When in-kernel, we also print out the stack and code at the
17899 * time of the fault..
17900 */
17901- if (!user_mode_vm(regs)) {
17902+ if (!user_mode(regs)) {
17903 unsigned int code_prologue = code_bytes * 43 / 64;
17904 unsigned int code_len = code_bytes;
17905 unsigned char c;
17906 u8 *ip;
17907+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
17908
17909 pr_emerg("Stack:\n");
17910 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
17911
17912 pr_emerg("Code:");
17913
17914- ip = (u8 *)regs->ip - code_prologue;
17915+ ip = (u8 *)regs->ip - code_prologue + cs_base;
17916 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
17917 /* try starting at IP */
17918- ip = (u8 *)regs->ip;
17919+ ip = (u8 *)regs->ip + cs_base;
17920 code_len = code_len - code_prologue + 1;
17921 }
17922 for (i = 0; i < code_len; i++, ip++) {
17923@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
17924 pr_cont(" Bad EIP value.");
17925 break;
17926 }
17927- if (ip == (u8 *)regs->ip)
17928+ if (ip == (u8 *)regs->ip + cs_base)
17929 pr_cont(" <%02x>", c);
17930 else
17931 pr_cont(" %02x", c);
17932@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
17933 {
17934 unsigned short ud2;
17935
17936+ ip = ktla_ktva(ip);
17937 if (ip < PAGE_OFFSET)
17938 return 0;
17939 if (probe_kernel_address((unsigned short *)ip, ud2))
17940@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
17941
17942 return ud2 == 0x0b0f;
17943 }
17944+
17945+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17946+void pax_check_alloca(unsigned long size)
17947+{
17948+ unsigned long sp = (unsigned long)&sp, stack_left;
17949+
17950+ /* all kernel stacks are of the same size */
17951+ stack_left = sp & (THREAD_SIZE - 1);
17952+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
17953+}
17954+EXPORT_SYMBOL(pax_check_alloca);
17955+#endif
17956diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
17957index b653675..51cc8c0 100644
17958--- a/arch/x86/kernel/dumpstack_64.c
17959+++ b/arch/x86/kernel/dumpstack_64.c
17960@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17961 unsigned long *irq_stack_end =
17962 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
17963 unsigned used = 0;
17964- struct thread_info *tinfo;
17965 int graph = 0;
17966 unsigned long dummy;
17967+ void *stack_start;
17968
17969 if (!task)
17970 task = current;
17971@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17972 * current stack address. If the stacks consist of nested
17973 * exceptions
17974 */
17975- tinfo = task_thread_info(task);
17976 for (;;) {
17977 char *id;
17978 unsigned long *estack_end;
17979+
17980 estack_end = in_exception_stack(cpu, (unsigned long)stack,
17981 &used, &id);
17982
17983@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17984 if (ops->stack(data, id) < 0)
17985 break;
17986
17987- bp = ops->walk_stack(tinfo, stack, bp, ops,
17988+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
17989 data, estack_end, &graph);
17990 ops->stack(data, "<EOE>");
17991 /*
17992@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17993 * second-to-last pointer (index -2 to end) in the
17994 * exception stack:
17995 */
17996+ if ((u16)estack_end[-1] != __KERNEL_DS)
17997+ goto out;
17998 stack = (unsigned long *) estack_end[-2];
17999 continue;
18000 }
18001@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18002 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
18003 if (ops->stack(data, "IRQ") < 0)
18004 break;
18005- bp = ops->walk_stack(tinfo, stack, bp,
18006+ bp = ops->walk_stack(task, irq_stack, stack, bp,
18007 ops, data, irq_stack_end, &graph);
18008 /*
18009 * We link to the next stack (which would be
18010@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18011 /*
18012 * This handles the process stack:
18013 */
18014- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
18015+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18016+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18017+out:
18018 put_cpu();
18019 }
18020 EXPORT_SYMBOL(dump_trace);
18021@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
18022 {
18023 int i;
18024 unsigned long sp;
18025- const int cpu = smp_processor_id();
18026+ const int cpu = raw_smp_processor_id();
18027 struct task_struct *cur = current;
18028
18029 sp = regs->sp;
18030@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
18031
18032 return ud2 == 0x0b0f;
18033 }
18034+
18035+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18036+void pax_check_alloca(unsigned long size)
18037+{
18038+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
18039+ unsigned cpu, used;
18040+ char *id;
18041+
18042+ /* check the process stack first */
18043+ stack_start = (unsigned long)task_stack_page(current);
18044+ stack_end = stack_start + THREAD_SIZE;
18045+ if (likely(stack_start <= sp && sp < stack_end)) {
18046+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
18047+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18048+ return;
18049+ }
18050+
18051+ cpu = get_cpu();
18052+
18053+ /* check the irq stacks */
18054+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
18055+ stack_start = stack_end - IRQ_STACK_SIZE;
18056+ if (stack_start <= sp && sp < stack_end) {
18057+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
18058+ put_cpu();
18059+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18060+ return;
18061+ }
18062+
18063+ /* check the exception stacks */
18064+ used = 0;
18065+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
18066+ stack_start = stack_end - EXCEPTION_STKSZ;
18067+ if (stack_end && stack_start <= sp && sp < stack_end) {
18068+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
18069+ put_cpu();
18070+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18071+ return;
18072+ }
18073+
18074+ put_cpu();
18075+
18076+ /* unknown stack */
18077+ BUG();
18078+}
18079+EXPORT_SYMBOL(pax_check_alloca);
18080+#endif
18081diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
18082index 9b9f18b..9fcaa04 100644
18083--- a/arch/x86/kernel/early_printk.c
18084+++ b/arch/x86/kernel/early_printk.c
18085@@ -7,6 +7,7 @@
18086 #include <linux/pci_regs.h>
18087 #include <linux/pci_ids.h>
18088 #include <linux/errno.h>
18089+#include <linux/sched.h>
18090 #include <asm/io.h>
18091 #include <asm/processor.h>
18092 #include <asm/fcntl.h>
18093diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
18094index 6ed91d9..6cc365b 100644
18095--- a/arch/x86/kernel/entry_32.S
18096+++ b/arch/x86/kernel/entry_32.S
18097@@ -177,13 +177,153 @@
18098 /*CFI_REL_OFFSET gs, PT_GS*/
18099 .endm
18100 .macro SET_KERNEL_GS reg
18101+
18102+#ifdef CONFIG_CC_STACKPROTECTOR
18103 movl $(__KERNEL_STACK_CANARY), \reg
18104+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18105+ movl $(__USER_DS), \reg
18106+#else
18107+ xorl \reg, \reg
18108+#endif
18109+
18110 movl \reg, %gs
18111 .endm
18112
18113 #endif /* CONFIG_X86_32_LAZY_GS */
18114
18115-.macro SAVE_ALL
18116+.macro pax_enter_kernel
18117+#ifdef CONFIG_PAX_KERNEXEC
18118+ call pax_enter_kernel
18119+#endif
18120+.endm
18121+
18122+.macro pax_exit_kernel
18123+#ifdef CONFIG_PAX_KERNEXEC
18124+ call pax_exit_kernel
18125+#endif
18126+.endm
18127+
18128+#ifdef CONFIG_PAX_KERNEXEC
18129+ENTRY(pax_enter_kernel)
18130+#ifdef CONFIG_PARAVIRT
18131+ pushl %eax
18132+ pushl %ecx
18133+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
18134+ mov %eax, %esi
18135+#else
18136+ mov %cr0, %esi
18137+#endif
18138+ bts $16, %esi
18139+ jnc 1f
18140+ mov %cs, %esi
18141+ cmp $__KERNEL_CS, %esi
18142+ jz 3f
18143+ ljmp $__KERNEL_CS, $3f
18144+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
18145+2:
18146+#ifdef CONFIG_PARAVIRT
18147+ mov %esi, %eax
18148+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18149+#else
18150+ mov %esi, %cr0
18151+#endif
18152+3:
18153+#ifdef CONFIG_PARAVIRT
18154+ popl %ecx
18155+ popl %eax
18156+#endif
18157+ ret
18158+ENDPROC(pax_enter_kernel)
18159+
18160+ENTRY(pax_exit_kernel)
18161+#ifdef CONFIG_PARAVIRT
18162+ pushl %eax
18163+ pushl %ecx
18164+#endif
18165+ mov %cs, %esi
18166+ cmp $__KERNEXEC_KERNEL_CS, %esi
18167+ jnz 2f
18168+#ifdef CONFIG_PARAVIRT
18169+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
18170+ mov %eax, %esi
18171+#else
18172+ mov %cr0, %esi
18173+#endif
18174+ btr $16, %esi
18175+ ljmp $__KERNEL_CS, $1f
18176+1:
18177+#ifdef CONFIG_PARAVIRT
18178+ mov %esi, %eax
18179+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
18180+#else
18181+ mov %esi, %cr0
18182+#endif
18183+2:
18184+#ifdef CONFIG_PARAVIRT
18185+ popl %ecx
18186+ popl %eax
18187+#endif
18188+ ret
18189+ENDPROC(pax_exit_kernel)
18190+#endif
18191+
18192+.macro pax_erase_kstack
18193+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18194+ call pax_erase_kstack
18195+#endif
18196+.endm
18197+
18198+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18199+/*
18200+ * ebp: thread_info
18201+ */
18202+ENTRY(pax_erase_kstack)
18203+ pushl %edi
18204+ pushl %ecx
18205+ pushl %eax
18206+
18207+ mov TI_lowest_stack(%ebp), %edi
18208+ mov $-0xBEEF, %eax
18209+ std
18210+
18211+1: mov %edi, %ecx
18212+ and $THREAD_SIZE_asm - 1, %ecx
18213+ shr $2, %ecx
18214+ repne scasl
18215+ jecxz 2f
18216+
18217+ cmp $2*16, %ecx
18218+ jc 2f
18219+
18220+ mov $2*16, %ecx
18221+ repe scasl
18222+ jecxz 2f
18223+ jne 1b
18224+
18225+2: cld
18226+ mov %esp, %ecx
18227+ sub %edi, %ecx
18228+
18229+ cmp $THREAD_SIZE_asm, %ecx
18230+ jb 3f
18231+ ud2
18232+3:
18233+
18234+ shr $2, %ecx
18235+ rep stosl
18236+
18237+ mov TI_task_thread_sp0(%ebp), %edi
18238+ sub $128, %edi
18239+ mov %edi, TI_lowest_stack(%ebp)
18240+
18241+ popl %eax
18242+ popl %ecx
18243+ popl %edi
18244+ ret
18245+ENDPROC(pax_erase_kstack)
18246+#endif
18247+
18248+.macro __SAVE_ALL _DS
18249 cld
18250 PUSH_GS
18251 pushl_cfi %fs
18252@@ -206,7 +346,7 @@
18253 CFI_REL_OFFSET ecx, 0
18254 pushl_cfi %ebx
18255 CFI_REL_OFFSET ebx, 0
18256- movl $(__USER_DS), %edx
18257+ movl $\_DS, %edx
18258 movl %edx, %ds
18259 movl %edx, %es
18260 movl $(__KERNEL_PERCPU), %edx
18261@@ -214,6 +354,15 @@
18262 SET_KERNEL_GS %edx
18263 .endm
18264
18265+.macro SAVE_ALL
18266+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
18267+ __SAVE_ALL __KERNEL_DS
18268+ pax_enter_kernel
18269+#else
18270+ __SAVE_ALL __USER_DS
18271+#endif
18272+.endm
18273+
18274 .macro RESTORE_INT_REGS
18275 popl_cfi %ebx
18276 CFI_RESTORE ebx
18277@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
18278 popfl_cfi
18279 jmp syscall_exit
18280 CFI_ENDPROC
18281-END(ret_from_fork)
18282+ENDPROC(ret_from_fork)
18283
18284 ENTRY(ret_from_kernel_thread)
18285 CFI_STARTPROC
18286@@ -344,7 +493,15 @@ ret_from_intr:
18287 andl $SEGMENT_RPL_MASK, %eax
18288 #endif
18289 cmpl $USER_RPL, %eax
18290+
18291+#ifdef CONFIG_PAX_KERNEXEC
18292+ jae resume_userspace
18293+
18294+ pax_exit_kernel
18295+ jmp resume_kernel
18296+#else
18297 jb resume_kernel # not returning to v8086 or userspace
18298+#endif
18299
18300 ENTRY(resume_userspace)
18301 LOCKDEP_SYS_EXIT
18302@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
18303 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
18304 # int/exception return?
18305 jne work_pending
18306- jmp restore_all
18307-END(ret_from_exception)
18308+ jmp restore_all_pax
18309+ENDPROC(ret_from_exception)
18310
18311 #ifdef CONFIG_PREEMPT
18312 ENTRY(resume_kernel)
18313@@ -372,7 +529,7 @@ need_resched:
18314 jz restore_all
18315 call preempt_schedule_irq
18316 jmp need_resched
18317-END(resume_kernel)
18318+ENDPROC(resume_kernel)
18319 #endif
18320 CFI_ENDPROC
18321 /*
18322@@ -406,30 +563,45 @@ sysenter_past_esp:
18323 /*CFI_REL_OFFSET cs, 0*/
18324 /*
18325 * Push current_thread_info()->sysenter_return to the stack.
18326- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
18327- * pushed above; +8 corresponds to copy_thread's esp0 setting.
18328 */
18329- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
18330+ pushl_cfi $0
18331 CFI_REL_OFFSET eip, 0
18332
18333 pushl_cfi %eax
18334 SAVE_ALL
18335+ GET_THREAD_INFO(%ebp)
18336+ movl TI_sysenter_return(%ebp),%ebp
18337+ movl %ebp,PT_EIP(%esp)
18338 ENABLE_INTERRUPTS(CLBR_NONE)
18339
18340 /*
18341 * Load the potential sixth argument from user stack.
18342 * Careful about security.
18343 */
18344+ movl PT_OLDESP(%esp),%ebp
18345+
18346+#ifdef CONFIG_PAX_MEMORY_UDEREF
18347+ mov PT_OLDSS(%esp),%ds
18348+1: movl %ds:(%ebp),%ebp
18349+ push %ss
18350+ pop %ds
18351+#else
18352 cmpl $__PAGE_OFFSET-3,%ebp
18353 jae syscall_fault
18354 ASM_STAC
18355 1: movl (%ebp),%ebp
18356 ASM_CLAC
18357+#endif
18358+
18359 movl %ebp,PT_EBP(%esp)
18360 _ASM_EXTABLE(1b,syscall_fault)
18361
18362 GET_THREAD_INFO(%ebp)
18363
18364+#ifdef CONFIG_PAX_RANDKSTACK
18365+ pax_erase_kstack
18366+#endif
18367+
18368 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18369 jnz sysenter_audit
18370 sysenter_do_call:
18371@@ -444,12 +616,24 @@ sysenter_do_call:
18372 testl $_TIF_ALLWORK_MASK, %ecx
18373 jne sysexit_audit
18374 sysenter_exit:
18375+
18376+#ifdef CONFIG_PAX_RANDKSTACK
18377+ pushl_cfi %eax
18378+ movl %esp, %eax
18379+ call pax_randomize_kstack
18380+ popl_cfi %eax
18381+#endif
18382+
18383+ pax_erase_kstack
18384+
18385 /* if something modifies registers it must also disable sysexit */
18386 movl PT_EIP(%esp), %edx
18387 movl PT_OLDESP(%esp), %ecx
18388 xorl %ebp,%ebp
18389 TRACE_IRQS_ON
18390 1: mov PT_FS(%esp), %fs
18391+2: mov PT_DS(%esp), %ds
18392+3: mov PT_ES(%esp), %es
18393 PTGS_TO_GS
18394 ENABLE_INTERRUPTS_SYSEXIT
18395
18396@@ -466,6 +650,9 @@ sysenter_audit:
18397 movl %eax,%edx /* 2nd arg: syscall number */
18398 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
18399 call __audit_syscall_entry
18400+
18401+ pax_erase_kstack
18402+
18403 pushl_cfi %ebx
18404 movl PT_EAX(%esp),%eax /* reload syscall number */
18405 jmp sysenter_do_call
18406@@ -491,10 +678,16 @@ sysexit_audit:
18407
18408 CFI_ENDPROC
18409 .pushsection .fixup,"ax"
18410-2: movl $0,PT_FS(%esp)
18411+4: movl $0,PT_FS(%esp)
18412+ jmp 1b
18413+5: movl $0,PT_DS(%esp)
18414+ jmp 1b
18415+6: movl $0,PT_ES(%esp)
18416 jmp 1b
18417 .popsection
18418- _ASM_EXTABLE(1b,2b)
18419+ _ASM_EXTABLE(1b,4b)
18420+ _ASM_EXTABLE(2b,5b)
18421+ _ASM_EXTABLE(3b,6b)
18422 PTGS_TO_GS_EX
18423 ENDPROC(ia32_sysenter_target)
18424
18425@@ -509,6 +702,11 @@ ENTRY(system_call)
18426 pushl_cfi %eax # save orig_eax
18427 SAVE_ALL
18428 GET_THREAD_INFO(%ebp)
18429+
18430+#ifdef CONFIG_PAX_RANDKSTACK
18431+ pax_erase_kstack
18432+#endif
18433+
18434 # system call tracing in operation / emulation
18435 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18436 jnz syscall_trace_entry
18437@@ -527,6 +725,15 @@ syscall_exit:
18438 testl $_TIF_ALLWORK_MASK, %ecx # current->work
18439 jne syscall_exit_work
18440
18441+restore_all_pax:
18442+
18443+#ifdef CONFIG_PAX_RANDKSTACK
18444+ movl %esp, %eax
18445+ call pax_randomize_kstack
18446+#endif
18447+
18448+ pax_erase_kstack
18449+
18450 restore_all:
18451 TRACE_IRQS_IRET
18452 restore_all_notrace:
18453@@ -583,14 +790,34 @@ ldt_ss:
18454 * compensating for the offset by changing to the ESPFIX segment with
18455 * a base address that matches for the difference.
18456 */
18457-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
18458+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
18459 mov %esp, %edx /* load kernel esp */
18460 mov PT_OLDESP(%esp), %eax /* load userspace esp */
18461 mov %dx, %ax /* eax: new kernel esp */
18462 sub %eax, %edx /* offset (low word is 0) */
18463+#ifdef CONFIG_SMP
18464+ movl PER_CPU_VAR(cpu_number), %ebx
18465+ shll $PAGE_SHIFT_asm, %ebx
18466+ addl $cpu_gdt_table, %ebx
18467+#else
18468+ movl $cpu_gdt_table, %ebx
18469+#endif
18470 shr $16, %edx
18471- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
18472- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
18473+
18474+#ifdef CONFIG_PAX_KERNEXEC
18475+ mov %cr0, %esi
18476+ btr $16, %esi
18477+ mov %esi, %cr0
18478+#endif
18479+
18480+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
18481+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
18482+
18483+#ifdef CONFIG_PAX_KERNEXEC
18484+ bts $16, %esi
18485+ mov %esi, %cr0
18486+#endif
18487+
18488 pushl_cfi $__ESPFIX_SS
18489 pushl_cfi %eax /* new kernel esp */
18490 /* Disable interrupts, but do not irqtrace this section: we
18491@@ -619,20 +846,18 @@ work_resched:
18492 movl TI_flags(%ebp), %ecx
18493 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
18494 # than syscall tracing?
18495- jz restore_all
18496+ jz restore_all_pax
18497 testb $_TIF_NEED_RESCHED, %cl
18498 jnz work_resched
18499
18500 work_notifysig: # deal with pending signals and
18501 # notify-resume requests
18502+ movl %esp, %eax
18503 #ifdef CONFIG_VM86
18504 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
18505- movl %esp, %eax
18506 jne work_notifysig_v86 # returning to kernel-space or
18507 # vm86-space
18508 1:
18509-#else
18510- movl %esp, %eax
18511 #endif
18512 TRACE_IRQS_ON
18513 ENABLE_INTERRUPTS(CLBR_NONE)
18514@@ -653,7 +878,7 @@ work_notifysig_v86:
18515 movl %eax, %esp
18516 jmp 1b
18517 #endif
18518-END(work_pending)
18519+ENDPROC(work_pending)
18520
18521 # perform syscall exit tracing
18522 ALIGN
18523@@ -661,11 +886,14 @@ syscall_trace_entry:
18524 movl $-ENOSYS,PT_EAX(%esp)
18525 movl %esp, %eax
18526 call syscall_trace_enter
18527+
18528+ pax_erase_kstack
18529+
18530 /* What it returned is what we'll actually use. */
18531 cmpl $(NR_syscalls), %eax
18532 jnae syscall_call
18533 jmp syscall_exit
18534-END(syscall_trace_entry)
18535+ENDPROC(syscall_trace_entry)
18536
18537 # perform syscall exit tracing
18538 ALIGN
18539@@ -678,21 +906,25 @@ syscall_exit_work:
18540 movl %esp, %eax
18541 call syscall_trace_leave
18542 jmp resume_userspace
18543-END(syscall_exit_work)
18544+ENDPROC(syscall_exit_work)
18545 CFI_ENDPROC
18546
18547 RING0_INT_FRAME # can't unwind into user space anyway
18548 syscall_fault:
18549+#ifdef CONFIG_PAX_MEMORY_UDEREF
18550+ push %ss
18551+ pop %ds
18552+#endif
18553 ASM_CLAC
18554 GET_THREAD_INFO(%ebp)
18555 movl $-EFAULT,PT_EAX(%esp)
18556 jmp resume_userspace
18557-END(syscall_fault)
18558+ENDPROC(syscall_fault)
18559
18560 syscall_badsys:
18561 movl $-ENOSYS,PT_EAX(%esp)
18562 jmp resume_userspace
18563-END(syscall_badsys)
18564+ENDPROC(syscall_badsys)
18565 CFI_ENDPROC
18566 /*
18567 * End of kprobes section
18568@@ -753,8 +985,15 @@ PTREGSCALL1(vm86old)
18569 * normal stack and adjusts ESP with the matching offset.
18570 */
18571 /* fixup the stack */
18572- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
18573- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
18574+#ifdef CONFIG_SMP
18575+ movl PER_CPU_VAR(cpu_number), %ebx
18576+ shll $PAGE_SHIFT_asm, %ebx
18577+ addl $cpu_gdt_table, %ebx
18578+#else
18579+ movl $cpu_gdt_table, %ebx
18580+#endif
18581+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
18582+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
18583 shl $16, %eax
18584 addl %esp, %eax /* the adjusted stack pointer */
18585 pushl_cfi $__KERNEL_DS
18586@@ -807,7 +1046,7 @@ vector=vector+1
18587 .endr
18588 2: jmp common_interrupt
18589 .endr
18590-END(irq_entries_start)
18591+ENDPROC(irq_entries_start)
18592
18593 .previous
18594 END(interrupt)
18595@@ -858,7 +1097,7 @@ ENTRY(coprocessor_error)
18596 pushl_cfi $do_coprocessor_error
18597 jmp error_code
18598 CFI_ENDPROC
18599-END(coprocessor_error)
18600+ENDPROC(coprocessor_error)
18601
18602 ENTRY(simd_coprocessor_error)
18603 RING0_INT_FRAME
18604@@ -880,7 +1119,7 @@ ENTRY(simd_coprocessor_error)
18605 #endif
18606 jmp error_code
18607 CFI_ENDPROC
18608-END(simd_coprocessor_error)
18609+ENDPROC(simd_coprocessor_error)
18610
18611 ENTRY(device_not_available)
18612 RING0_INT_FRAME
18613@@ -889,18 +1128,18 @@ ENTRY(device_not_available)
18614 pushl_cfi $do_device_not_available
18615 jmp error_code
18616 CFI_ENDPROC
18617-END(device_not_available)
18618+ENDPROC(device_not_available)
18619
18620 #ifdef CONFIG_PARAVIRT
18621 ENTRY(native_iret)
18622 iret
18623 _ASM_EXTABLE(native_iret, iret_exc)
18624-END(native_iret)
18625+ENDPROC(native_iret)
18626
18627 ENTRY(native_irq_enable_sysexit)
18628 sti
18629 sysexit
18630-END(native_irq_enable_sysexit)
18631+ENDPROC(native_irq_enable_sysexit)
18632 #endif
18633
18634 ENTRY(overflow)
18635@@ -910,7 +1149,7 @@ ENTRY(overflow)
18636 pushl_cfi $do_overflow
18637 jmp error_code
18638 CFI_ENDPROC
18639-END(overflow)
18640+ENDPROC(overflow)
18641
18642 ENTRY(bounds)
18643 RING0_INT_FRAME
18644@@ -919,7 +1158,7 @@ ENTRY(bounds)
18645 pushl_cfi $do_bounds
18646 jmp error_code
18647 CFI_ENDPROC
18648-END(bounds)
18649+ENDPROC(bounds)
18650
18651 ENTRY(invalid_op)
18652 RING0_INT_FRAME
18653@@ -928,7 +1167,7 @@ ENTRY(invalid_op)
18654 pushl_cfi $do_invalid_op
18655 jmp error_code
18656 CFI_ENDPROC
18657-END(invalid_op)
18658+ENDPROC(invalid_op)
18659
18660 ENTRY(coprocessor_segment_overrun)
18661 RING0_INT_FRAME
18662@@ -937,7 +1176,7 @@ ENTRY(coprocessor_segment_overrun)
18663 pushl_cfi $do_coprocessor_segment_overrun
18664 jmp error_code
18665 CFI_ENDPROC
18666-END(coprocessor_segment_overrun)
18667+ENDPROC(coprocessor_segment_overrun)
18668
18669 ENTRY(invalid_TSS)
18670 RING0_EC_FRAME
18671@@ -945,7 +1184,7 @@ ENTRY(invalid_TSS)
18672 pushl_cfi $do_invalid_TSS
18673 jmp error_code
18674 CFI_ENDPROC
18675-END(invalid_TSS)
18676+ENDPROC(invalid_TSS)
18677
18678 ENTRY(segment_not_present)
18679 RING0_EC_FRAME
18680@@ -953,7 +1192,7 @@ ENTRY(segment_not_present)
18681 pushl_cfi $do_segment_not_present
18682 jmp error_code
18683 CFI_ENDPROC
18684-END(segment_not_present)
18685+ENDPROC(segment_not_present)
18686
18687 ENTRY(stack_segment)
18688 RING0_EC_FRAME
18689@@ -961,7 +1200,7 @@ ENTRY(stack_segment)
18690 pushl_cfi $do_stack_segment
18691 jmp error_code
18692 CFI_ENDPROC
18693-END(stack_segment)
18694+ENDPROC(stack_segment)
18695
18696 ENTRY(alignment_check)
18697 RING0_EC_FRAME
18698@@ -969,7 +1208,7 @@ ENTRY(alignment_check)
18699 pushl_cfi $do_alignment_check
18700 jmp error_code
18701 CFI_ENDPROC
18702-END(alignment_check)
18703+ENDPROC(alignment_check)
18704
18705 ENTRY(divide_error)
18706 RING0_INT_FRAME
18707@@ -978,7 +1217,7 @@ ENTRY(divide_error)
18708 pushl_cfi $do_divide_error
18709 jmp error_code
18710 CFI_ENDPROC
18711-END(divide_error)
18712+ENDPROC(divide_error)
18713
18714 #ifdef CONFIG_X86_MCE
18715 ENTRY(machine_check)
18716@@ -988,7 +1227,7 @@ ENTRY(machine_check)
18717 pushl_cfi machine_check_vector
18718 jmp error_code
18719 CFI_ENDPROC
18720-END(machine_check)
18721+ENDPROC(machine_check)
18722 #endif
18723
18724 ENTRY(spurious_interrupt_bug)
18725@@ -998,7 +1237,7 @@ ENTRY(spurious_interrupt_bug)
18726 pushl_cfi $do_spurious_interrupt_bug
18727 jmp error_code
18728 CFI_ENDPROC
18729-END(spurious_interrupt_bug)
18730+ENDPROC(spurious_interrupt_bug)
18731 /*
18732 * End of kprobes section
18733 */
18734@@ -1101,7 +1340,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
18735
18736 ENTRY(mcount)
18737 ret
18738-END(mcount)
18739+ENDPROC(mcount)
18740
18741 ENTRY(ftrace_caller)
18742 cmpl $0, function_trace_stop
18743@@ -1134,7 +1373,7 @@ ftrace_graph_call:
18744 .globl ftrace_stub
18745 ftrace_stub:
18746 ret
18747-END(ftrace_caller)
18748+ENDPROC(ftrace_caller)
18749
18750 ENTRY(ftrace_regs_caller)
18751 pushf /* push flags before compare (in cs location) */
18752@@ -1235,7 +1474,7 @@ trace:
18753 popl %ecx
18754 popl %eax
18755 jmp ftrace_stub
18756-END(mcount)
18757+ENDPROC(mcount)
18758 #endif /* CONFIG_DYNAMIC_FTRACE */
18759 #endif /* CONFIG_FUNCTION_TRACER */
18760
18761@@ -1253,7 +1492,7 @@ ENTRY(ftrace_graph_caller)
18762 popl %ecx
18763 popl %eax
18764 ret
18765-END(ftrace_graph_caller)
18766+ENDPROC(ftrace_graph_caller)
18767
18768 .globl return_to_handler
18769 return_to_handler:
18770@@ -1309,15 +1548,18 @@ error_code:
18771 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
18772 REG_TO_PTGS %ecx
18773 SET_KERNEL_GS %ecx
18774- movl $(__USER_DS), %ecx
18775+ movl $(__KERNEL_DS), %ecx
18776 movl %ecx, %ds
18777 movl %ecx, %es
18778+
18779+ pax_enter_kernel
18780+
18781 TRACE_IRQS_OFF
18782 movl %esp,%eax # pt_regs pointer
18783 call *%edi
18784 jmp ret_from_exception
18785 CFI_ENDPROC
18786-END(page_fault)
18787+ENDPROC(page_fault)
18788
18789 /*
18790 * Debug traps and NMI can happen at the one SYSENTER instruction
18791@@ -1360,7 +1602,7 @@ debug_stack_correct:
18792 call do_debug
18793 jmp ret_from_exception
18794 CFI_ENDPROC
18795-END(debug)
18796+ENDPROC(debug)
18797
18798 /*
18799 * NMI is doubly nasty. It can happen _while_ we're handling
18800@@ -1398,6 +1640,9 @@ nmi_stack_correct:
18801 xorl %edx,%edx # zero error code
18802 movl %esp,%eax # pt_regs pointer
18803 call do_nmi
18804+
18805+ pax_exit_kernel
18806+
18807 jmp restore_all_notrace
18808 CFI_ENDPROC
18809
18810@@ -1434,12 +1679,15 @@ nmi_espfix_stack:
18811 FIXUP_ESPFIX_STACK # %eax == %esp
18812 xorl %edx,%edx # zero error code
18813 call do_nmi
18814+
18815+ pax_exit_kernel
18816+
18817 RESTORE_REGS
18818 lss 12+4(%esp), %esp # back to espfix stack
18819 CFI_ADJUST_CFA_OFFSET -24
18820 jmp irq_return
18821 CFI_ENDPROC
18822-END(nmi)
18823+ENDPROC(nmi)
18824
18825 ENTRY(int3)
18826 RING0_INT_FRAME
18827@@ -1452,14 +1700,14 @@ ENTRY(int3)
18828 call do_int3
18829 jmp ret_from_exception
18830 CFI_ENDPROC
18831-END(int3)
18832+ENDPROC(int3)
18833
18834 ENTRY(general_protection)
18835 RING0_EC_FRAME
18836 pushl_cfi $do_general_protection
18837 jmp error_code
18838 CFI_ENDPROC
18839-END(general_protection)
18840+ENDPROC(general_protection)
18841
18842 #ifdef CONFIG_KVM_GUEST
18843 ENTRY(async_page_fault)
18844@@ -1468,7 +1716,7 @@ ENTRY(async_page_fault)
18845 pushl_cfi $do_async_page_fault
18846 jmp error_code
18847 CFI_ENDPROC
18848-END(async_page_fault)
18849+ENDPROC(async_page_fault)
18850 #endif
18851
18852 /*
18853diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
18854index cb3c591..bc63707 100644
18855--- a/arch/x86/kernel/entry_64.S
18856+++ b/arch/x86/kernel/entry_64.S
18857@@ -59,6 +59,8 @@
18858 #include <asm/context_tracking.h>
18859 #include <asm/smap.h>
18860 #include <linux/err.h>
18861+#include <asm/pgtable.h>
18862+#include <asm/alternative-asm.h>
18863
18864 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
18865 #include <linux/elf-em.h>
18866@@ -80,8 +82,9 @@
18867 #ifdef CONFIG_DYNAMIC_FTRACE
18868
18869 ENTRY(function_hook)
18870+ pax_force_retaddr
18871 retq
18872-END(function_hook)
18873+ENDPROC(function_hook)
18874
18875 /* skip is set if stack has been adjusted */
18876 .macro ftrace_caller_setup skip=0
18877@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
18878 #endif
18879
18880 GLOBAL(ftrace_stub)
18881+ pax_force_retaddr
18882 retq
18883-END(ftrace_caller)
18884+ENDPROC(ftrace_caller)
18885
18886 ENTRY(ftrace_regs_caller)
18887 /* Save the current flags before compare (in SS location)*/
18888@@ -191,7 +195,7 @@ ftrace_restore_flags:
18889 popfq
18890 jmp ftrace_stub
18891
18892-END(ftrace_regs_caller)
18893+ENDPROC(ftrace_regs_caller)
18894
18895
18896 #else /* ! CONFIG_DYNAMIC_FTRACE */
18897@@ -212,6 +216,7 @@ ENTRY(function_hook)
18898 #endif
18899
18900 GLOBAL(ftrace_stub)
18901+ pax_force_retaddr
18902 retq
18903
18904 trace:
18905@@ -225,12 +230,13 @@ trace:
18906 #endif
18907 subq $MCOUNT_INSN_SIZE, %rdi
18908
18909+ pax_force_fptr ftrace_trace_function
18910 call *ftrace_trace_function
18911
18912 MCOUNT_RESTORE_FRAME
18913
18914 jmp ftrace_stub
18915-END(function_hook)
18916+ENDPROC(function_hook)
18917 #endif /* CONFIG_DYNAMIC_FTRACE */
18918 #endif /* CONFIG_FUNCTION_TRACER */
18919
18920@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
18921
18922 MCOUNT_RESTORE_FRAME
18923
18924+ pax_force_retaddr
18925 retq
18926-END(ftrace_graph_caller)
18927+ENDPROC(ftrace_graph_caller)
18928
18929 GLOBAL(return_to_handler)
18930 subq $24, %rsp
18931@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
18932 movq 8(%rsp), %rdx
18933 movq (%rsp), %rax
18934 addq $24, %rsp
18935+ pax_force_fptr %rdi
18936 jmp *%rdi
18937+ENDPROC(return_to_handler)
18938 #endif
18939
18940
18941@@ -284,6 +293,273 @@ ENTRY(native_usergs_sysret64)
18942 ENDPROC(native_usergs_sysret64)
18943 #endif /* CONFIG_PARAVIRT */
18944
18945+ .macro ljmpq sel, off
18946+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
18947+ .byte 0x48; ljmp *1234f(%rip)
18948+ .pushsection .rodata
18949+ .align 16
18950+ 1234: .quad \off; .word \sel
18951+ .popsection
18952+#else
18953+ pushq $\sel
18954+ pushq $\off
18955+ lretq
18956+#endif
18957+ .endm
18958+
18959+ .macro pax_enter_kernel
18960+ pax_set_fptr_mask
18961+#ifdef CONFIG_PAX_KERNEXEC
18962+ call pax_enter_kernel
18963+#endif
18964+ .endm
18965+
18966+ .macro pax_exit_kernel
18967+#ifdef CONFIG_PAX_KERNEXEC
18968+ call pax_exit_kernel
18969+#endif
18970+ .endm
18971+
18972+#ifdef CONFIG_PAX_KERNEXEC
18973+ENTRY(pax_enter_kernel)
18974+ pushq %rdi
18975+
18976+#ifdef CONFIG_PARAVIRT
18977+ PV_SAVE_REGS(CLBR_RDI)
18978+#endif
18979+
18980+ GET_CR0_INTO_RDI
18981+ bts $16,%rdi
18982+ jnc 3f
18983+ mov %cs,%edi
18984+ cmp $__KERNEL_CS,%edi
18985+ jnz 2f
18986+1:
18987+
18988+#ifdef CONFIG_PARAVIRT
18989+ PV_RESTORE_REGS(CLBR_RDI)
18990+#endif
18991+
18992+ popq %rdi
18993+ pax_force_retaddr
18994+ retq
18995+
18996+2: ljmpq __KERNEL_CS,1f
18997+3: ljmpq __KERNEXEC_KERNEL_CS,4f
18998+4: SET_RDI_INTO_CR0
18999+ jmp 1b
19000+ENDPROC(pax_enter_kernel)
19001+
19002+ENTRY(pax_exit_kernel)
19003+ pushq %rdi
19004+
19005+#ifdef CONFIG_PARAVIRT
19006+ PV_SAVE_REGS(CLBR_RDI)
19007+#endif
19008+
19009+ mov %cs,%rdi
19010+ cmp $__KERNEXEC_KERNEL_CS,%edi
19011+ jz 2f
19012+1:
19013+
19014+#ifdef CONFIG_PARAVIRT
19015+ PV_RESTORE_REGS(CLBR_RDI);
19016+#endif
19017+
19018+ popq %rdi
19019+ pax_force_retaddr
19020+ retq
19021+
19022+2: GET_CR0_INTO_RDI
19023+ btr $16,%rdi
19024+ ljmpq __KERNEL_CS,3f
19025+3: SET_RDI_INTO_CR0
19026+ jmp 1b
19027+ENDPROC(pax_exit_kernel)
19028+#endif
19029+
19030+ .macro pax_enter_kernel_user
19031+ pax_set_fptr_mask
19032+#ifdef CONFIG_PAX_MEMORY_UDEREF
19033+ call pax_enter_kernel_user
19034+#endif
19035+ .endm
19036+
19037+ .macro pax_exit_kernel_user
19038+#ifdef CONFIG_PAX_MEMORY_UDEREF
19039+ call pax_exit_kernel_user
19040+#endif
19041+#ifdef CONFIG_PAX_RANDKSTACK
19042+ pushq %rax
19043+ call pax_randomize_kstack
19044+ popq %rax
19045+#endif
19046+ .endm
19047+
19048+#ifdef CONFIG_PAX_MEMORY_UDEREF
19049+ENTRY(pax_enter_kernel_user)
19050+ pushq %rdi
19051+ pushq %rbx
19052+
19053+#ifdef CONFIG_PARAVIRT
19054+ PV_SAVE_REGS(CLBR_RDI)
19055+#endif
19056+
19057+ GET_CR3_INTO_RDI
19058+ mov %rdi,%rbx
19059+ add $__START_KERNEL_map,%rbx
19060+ sub phys_base(%rip),%rbx
19061+
19062+#ifdef CONFIG_PARAVIRT
19063+ pushq %rdi
19064+ cmpl $0, pv_info+PARAVIRT_enabled
19065+ jz 1f
19066+ i = 0
19067+ .rept USER_PGD_PTRS
19068+ mov i*8(%rbx),%rsi
19069+ mov $0,%sil
19070+ lea i*8(%rbx),%rdi
19071+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19072+ i = i + 1
19073+ .endr
19074+ jmp 2f
19075+1:
19076+#endif
19077+
19078+ i = 0
19079+ .rept USER_PGD_PTRS
19080+ movb $0,i*8(%rbx)
19081+ i = i + 1
19082+ .endr
19083+
19084+#ifdef CONFIG_PARAVIRT
19085+2: popq %rdi
19086+#endif
19087+ SET_RDI_INTO_CR3
19088+
19089+#ifdef CONFIG_PAX_KERNEXEC
19090+ GET_CR0_INTO_RDI
19091+ bts $16,%rdi
19092+ SET_RDI_INTO_CR0
19093+#endif
19094+
19095+#ifdef CONFIG_PARAVIRT
19096+ PV_RESTORE_REGS(CLBR_RDI)
19097+#endif
19098+
19099+ popq %rbx
19100+ popq %rdi
19101+ pax_force_retaddr
19102+ retq
19103+ENDPROC(pax_enter_kernel_user)
19104+
19105+ENTRY(pax_exit_kernel_user)
19106+ push %rdi
19107+
19108+#ifdef CONFIG_PARAVIRT
19109+ pushq %rbx
19110+ PV_SAVE_REGS(CLBR_RDI)
19111+#endif
19112+
19113+#ifdef CONFIG_PAX_KERNEXEC
19114+ GET_CR0_INTO_RDI
19115+ btr $16,%rdi
19116+ SET_RDI_INTO_CR0
19117+#endif
19118+
19119+ GET_CR3_INTO_RDI
19120+ add $__START_KERNEL_map,%rdi
19121+ sub phys_base(%rip),%rdi
19122+
19123+#ifdef CONFIG_PARAVIRT
19124+ cmpl $0, pv_info+PARAVIRT_enabled
19125+ jz 1f
19126+ mov %rdi,%rbx
19127+ i = 0
19128+ .rept USER_PGD_PTRS
19129+ mov i*8(%rbx),%rsi
19130+ mov $0x67,%sil
19131+ lea i*8(%rbx),%rdi
19132+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19133+ i = i + 1
19134+ .endr
19135+ jmp 2f
19136+1:
19137+#endif
19138+
19139+ i = 0
19140+ .rept USER_PGD_PTRS
19141+ movb $0x67,i*8(%rdi)
19142+ i = i + 1
19143+ .endr
19144+
19145+#ifdef CONFIG_PARAVIRT
19146+2: PV_RESTORE_REGS(CLBR_RDI)
19147+ popq %rbx
19148+#endif
19149+
19150+ popq %rdi
19151+ pax_force_retaddr
19152+ retq
19153+ENDPROC(pax_exit_kernel_user)
19154+#endif
19155+
19156+.macro pax_erase_kstack
19157+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19158+ call pax_erase_kstack
19159+#endif
19160+.endm
19161+
19162+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19163+ENTRY(pax_erase_kstack)
19164+ pushq %rdi
19165+ pushq %rcx
19166+ pushq %rax
19167+ pushq %r11
19168+
19169+ GET_THREAD_INFO(%r11)
19170+ mov TI_lowest_stack(%r11), %rdi
19171+ mov $-0xBEEF, %rax
19172+ std
19173+
19174+1: mov %edi, %ecx
19175+ and $THREAD_SIZE_asm - 1, %ecx
19176+ shr $3, %ecx
19177+ repne scasq
19178+ jecxz 2f
19179+
19180+ cmp $2*8, %ecx
19181+ jc 2f
19182+
19183+ mov $2*8, %ecx
19184+ repe scasq
19185+ jecxz 2f
19186+ jne 1b
19187+
19188+2: cld
19189+ mov %esp, %ecx
19190+ sub %edi, %ecx
19191+
19192+ cmp $THREAD_SIZE_asm, %rcx
19193+ jb 3f
19194+ ud2
19195+3:
19196+
19197+ shr $3, %ecx
19198+ rep stosq
19199+
19200+ mov TI_task_thread_sp0(%r11), %rdi
19201+ sub $256, %rdi
19202+ mov %rdi, TI_lowest_stack(%r11)
19203+
19204+ popq %r11
19205+ popq %rax
19206+ popq %rcx
19207+ popq %rdi
19208+ pax_force_retaddr
19209+ ret
19210+ENDPROC(pax_erase_kstack)
19211+#endif
19212
19213 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
19214 #ifdef CONFIG_TRACE_IRQFLAGS
19215@@ -375,8 +651,8 @@ ENDPROC(native_usergs_sysret64)
19216 .endm
19217
19218 .macro UNFAKE_STACK_FRAME
19219- addq $8*6, %rsp
19220- CFI_ADJUST_CFA_OFFSET -(6*8)
19221+ addq $8*6 + ARG_SKIP, %rsp
19222+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
19223 .endm
19224
19225 /*
19226@@ -463,7 +739,7 @@ ENDPROC(native_usergs_sysret64)
19227 movq %rsp, %rsi
19228
19229 leaq -RBP(%rsp),%rdi /* arg1 for handler */
19230- testl $3, CS-RBP(%rsi)
19231+ testb $3, CS-RBP(%rsi)
19232 je 1f
19233 SWAPGS
19234 /*
19235@@ -498,9 +774,10 @@ ENTRY(save_rest)
19236 movq_cfi r15, R15+16
19237 movq %r11, 8(%rsp) /* return address */
19238 FIXUP_TOP_OF_STACK %r11, 16
19239+ pax_force_retaddr
19240 ret
19241 CFI_ENDPROC
19242-END(save_rest)
19243+ENDPROC(save_rest)
19244
19245 /* save complete stack frame */
19246 .pushsection .kprobes.text, "ax"
19247@@ -529,9 +806,10 @@ ENTRY(save_paranoid)
19248 js 1f /* negative -> in kernel */
19249 SWAPGS
19250 xorl %ebx,%ebx
19251-1: ret
19252+1: pax_force_retaddr_bts
19253+ ret
19254 CFI_ENDPROC
19255-END(save_paranoid)
19256+ENDPROC(save_paranoid)
19257 .popsection
19258
19259 /*
19260@@ -553,7 +831,7 @@ ENTRY(ret_from_fork)
19261
19262 RESTORE_REST
19263
19264- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19265+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19266 jz 1f
19267
19268 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
19269@@ -571,7 +849,7 @@ ENTRY(ret_from_fork)
19270 RESTORE_REST
19271 jmp int_ret_from_sys_call
19272 CFI_ENDPROC
19273-END(ret_from_fork)
19274+ENDPROC(ret_from_fork)
19275
19276 /*
19277 * System call entry. Up to 6 arguments in registers are supported.
19278@@ -608,7 +886,7 @@ END(ret_from_fork)
19279 ENTRY(system_call)
19280 CFI_STARTPROC simple
19281 CFI_SIGNAL_FRAME
19282- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
19283+ CFI_DEF_CFA rsp,0
19284 CFI_REGISTER rip,rcx
19285 /*CFI_REGISTER rflags,r11*/
19286 SWAPGS_UNSAFE_STACK
19287@@ -621,16 +899,23 @@ GLOBAL(system_call_after_swapgs)
19288
19289 movq %rsp,PER_CPU_VAR(old_rsp)
19290 movq PER_CPU_VAR(kernel_stack),%rsp
19291+ SAVE_ARGS 8*6,0
19292+ pax_enter_kernel_user
19293+
19294+#ifdef CONFIG_PAX_RANDKSTACK
19295+ pax_erase_kstack
19296+#endif
19297+
19298 /*
19299 * No need to follow this irqs off/on section - it's straight
19300 * and short:
19301 */
19302 ENABLE_INTERRUPTS(CLBR_NONE)
19303- SAVE_ARGS 8,0
19304 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
19305 movq %rcx,RIP-ARGOFFSET(%rsp)
19306 CFI_REL_OFFSET rip,RIP-ARGOFFSET
19307- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19308+ GET_THREAD_INFO(%rcx)
19309+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
19310 jnz tracesys
19311 system_call_fastpath:
19312 #if __SYSCALL_MASK == ~0
19313@@ -640,7 +925,7 @@ system_call_fastpath:
19314 cmpl $__NR_syscall_max,%eax
19315 #endif
19316 ja badsys
19317- movq %r10,%rcx
19318+ movq R10-ARGOFFSET(%rsp),%rcx
19319 call *sys_call_table(,%rax,8) # XXX: rip relative
19320 movq %rax,RAX-ARGOFFSET(%rsp)
19321 /*
19322@@ -654,10 +939,13 @@ sysret_check:
19323 LOCKDEP_SYS_EXIT
19324 DISABLE_INTERRUPTS(CLBR_NONE)
19325 TRACE_IRQS_OFF
19326- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
19327+ GET_THREAD_INFO(%rcx)
19328+ movl TI_flags(%rcx),%edx
19329 andl %edi,%edx
19330 jnz sysret_careful
19331 CFI_REMEMBER_STATE
19332+ pax_exit_kernel_user
19333+ pax_erase_kstack
19334 /*
19335 * sysretq will re-enable interrupts:
19336 */
19337@@ -709,14 +997,18 @@ badsys:
19338 * jump back to the normal fast path.
19339 */
19340 auditsys:
19341- movq %r10,%r9 /* 6th arg: 4th syscall arg */
19342+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
19343 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
19344 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
19345 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
19346 movq %rax,%rsi /* 2nd arg: syscall number */
19347 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
19348 call __audit_syscall_entry
19349+
19350+ pax_erase_kstack
19351+
19352 LOAD_ARGS 0 /* reload call-clobbered registers */
19353+ pax_set_fptr_mask
19354 jmp system_call_fastpath
19355
19356 /*
19357@@ -737,7 +1029,7 @@ sysret_audit:
19358 /* Do syscall tracing */
19359 tracesys:
19360 #ifdef CONFIG_AUDITSYSCALL
19361- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19362+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
19363 jz auditsys
19364 #endif
19365 SAVE_REST
19366@@ -745,12 +1037,16 @@ tracesys:
19367 FIXUP_TOP_OF_STACK %rdi
19368 movq %rsp,%rdi
19369 call syscall_trace_enter
19370+
19371+ pax_erase_kstack
19372+
19373 /*
19374 * Reload arg registers from stack in case ptrace changed them.
19375 * We don't reload %rax because syscall_trace_enter() returned
19376 * the value it wants us to use in the table lookup.
19377 */
19378 LOAD_ARGS ARGOFFSET, 1
19379+ pax_set_fptr_mask
19380 RESTORE_REST
19381 #if __SYSCALL_MASK == ~0
19382 cmpq $__NR_syscall_max,%rax
19383@@ -759,7 +1055,7 @@ tracesys:
19384 cmpl $__NR_syscall_max,%eax
19385 #endif
19386 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
19387- movq %r10,%rcx /* fixup for C */
19388+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
19389 call *sys_call_table(,%rax,8)
19390 movq %rax,RAX-ARGOFFSET(%rsp)
19391 /* Use IRET because user could have changed frame */
19392@@ -780,7 +1076,9 @@ GLOBAL(int_with_check)
19393 andl %edi,%edx
19394 jnz int_careful
19395 andl $~TS_COMPAT,TI_status(%rcx)
19396- jmp retint_swapgs
19397+ pax_exit_kernel_user
19398+ pax_erase_kstack
19399+ jmp retint_swapgs_pax
19400
19401 /* Either reschedule or signal or syscall exit tracking needed. */
19402 /* First do a reschedule test. */
19403@@ -826,7 +1124,7 @@ int_restore_rest:
19404 TRACE_IRQS_OFF
19405 jmp int_with_check
19406 CFI_ENDPROC
19407-END(system_call)
19408+ENDPROC(system_call)
19409
19410 /*
19411 * Certain special system calls that need to save a complete full stack frame.
19412@@ -842,7 +1140,7 @@ ENTRY(\label)
19413 call \func
19414 jmp ptregscall_common
19415 CFI_ENDPROC
19416-END(\label)
19417+ENDPROC(\label)
19418 .endm
19419
19420 .macro FORK_LIKE func
19421@@ -856,9 +1154,10 @@ ENTRY(stub_\func)
19422 DEFAULT_FRAME 0 8 /* offset 8: return address */
19423 call sys_\func
19424 RESTORE_TOP_OF_STACK %r11, 8
19425+ pax_force_retaddr
19426 ret $REST_SKIP /* pop extended registers */
19427 CFI_ENDPROC
19428-END(stub_\func)
19429+ENDPROC(stub_\func)
19430 .endm
19431
19432 FORK_LIKE clone
19433@@ -875,9 +1174,10 @@ ENTRY(ptregscall_common)
19434 movq_cfi_restore R12+8, r12
19435 movq_cfi_restore RBP+8, rbp
19436 movq_cfi_restore RBX+8, rbx
19437+ pax_force_retaddr
19438 ret $REST_SKIP /* pop extended registers */
19439 CFI_ENDPROC
19440-END(ptregscall_common)
19441+ENDPROC(ptregscall_common)
19442
19443 ENTRY(stub_execve)
19444 CFI_STARTPROC
19445@@ -891,7 +1191,7 @@ ENTRY(stub_execve)
19446 RESTORE_REST
19447 jmp int_ret_from_sys_call
19448 CFI_ENDPROC
19449-END(stub_execve)
19450+ENDPROC(stub_execve)
19451
19452 /*
19453 * sigreturn is special because it needs to restore all registers on return.
19454@@ -909,7 +1209,7 @@ ENTRY(stub_rt_sigreturn)
19455 RESTORE_REST
19456 jmp int_ret_from_sys_call
19457 CFI_ENDPROC
19458-END(stub_rt_sigreturn)
19459+ENDPROC(stub_rt_sigreturn)
19460
19461 #ifdef CONFIG_X86_X32_ABI
19462 ENTRY(stub_x32_rt_sigreturn)
19463@@ -975,7 +1275,7 @@ vector=vector+1
19464 2: jmp common_interrupt
19465 .endr
19466 CFI_ENDPROC
19467-END(irq_entries_start)
19468+ENDPROC(irq_entries_start)
19469
19470 .previous
19471 END(interrupt)
19472@@ -995,6 +1295,16 @@ END(interrupt)
19473 subq $ORIG_RAX-RBP, %rsp
19474 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
19475 SAVE_ARGS_IRQ
19476+#ifdef CONFIG_PAX_MEMORY_UDEREF
19477+ testb $3, CS(%rdi)
19478+ jnz 1f
19479+ pax_enter_kernel
19480+ jmp 2f
19481+1: pax_enter_kernel_user
19482+2:
19483+#else
19484+ pax_enter_kernel
19485+#endif
19486 call \func
19487 .endm
19488
19489@@ -1027,7 +1337,7 @@ ret_from_intr:
19490
19491 exit_intr:
19492 GET_THREAD_INFO(%rcx)
19493- testl $3,CS-ARGOFFSET(%rsp)
19494+ testb $3,CS-ARGOFFSET(%rsp)
19495 je retint_kernel
19496
19497 /* Interrupt came from user space */
19498@@ -1049,12 +1359,16 @@ retint_swapgs: /* return to user-space */
19499 * The iretq could re-enable interrupts:
19500 */
19501 DISABLE_INTERRUPTS(CLBR_ANY)
19502+ pax_exit_kernel_user
19503+retint_swapgs_pax:
19504 TRACE_IRQS_IRETQ
19505 SWAPGS
19506 jmp restore_args
19507
19508 retint_restore_args: /* return to kernel space */
19509 DISABLE_INTERRUPTS(CLBR_ANY)
19510+ pax_exit_kernel
19511+ pax_force_retaddr (RIP-ARGOFFSET)
19512 /*
19513 * The iretq could re-enable interrupts:
19514 */
19515@@ -1137,7 +1451,7 @@ ENTRY(retint_kernel)
19516 #endif
19517
19518 CFI_ENDPROC
19519-END(common_interrupt)
19520+ENDPROC(common_interrupt)
19521 /*
19522 * End of kprobes section
19523 */
19524@@ -1155,7 +1469,7 @@ ENTRY(\sym)
19525 interrupt \do_sym
19526 jmp ret_from_intr
19527 CFI_ENDPROC
19528-END(\sym)
19529+ENDPROC(\sym)
19530 .endm
19531
19532 #ifdef CONFIG_SMP
19533@@ -1211,12 +1525,22 @@ ENTRY(\sym)
19534 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19535 call error_entry
19536 DEFAULT_FRAME 0
19537+#ifdef CONFIG_PAX_MEMORY_UDEREF
19538+ testb $3, CS(%rsp)
19539+ jnz 1f
19540+ pax_enter_kernel
19541+ jmp 2f
19542+1: pax_enter_kernel_user
19543+2:
19544+#else
19545+ pax_enter_kernel
19546+#endif
19547 movq %rsp,%rdi /* pt_regs pointer */
19548 xorl %esi,%esi /* no error code */
19549 call \do_sym
19550 jmp error_exit /* %ebx: no swapgs flag */
19551 CFI_ENDPROC
19552-END(\sym)
19553+ENDPROC(\sym)
19554 .endm
19555
19556 .macro paranoidzeroentry sym do_sym
19557@@ -1229,15 +1553,25 @@ ENTRY(\sym)
19558 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19559 call save_paranoid
19560 TRACE_IRQS_OFF
19561+#ifdef CONFIG_PAX_MEMORY_UDEREF
19562+ testb $3, CS(%rsp)
19563+ jnz 1f
19564+ pax_enter_kernel
19565+ jmp 2f
19566+1: pax_enter_kernel_user
19567+2:
19568+#else
19569+ pax_enter_kernel
19570+#endif
19571 movq %rsp,%rdi /* pt_regs pointer */
19572 xorl %esi,%esi /* no error code */
19573 call \do_sym
19574 jmp paranoid_exit /* %ebx: no swapgs flag */
19575 CFI_ENDPROC
19576-END(\sym)
19577+ENDPROC(\sym)
19578 .endm
19579
19580-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
19581+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
19582 .macro paranoidzeroentry_ist sym do_sym ist
19583 ENTRY(\sym)
19584 INTR_FRAME
19585@@ -1248,14 +1582,30 @@ ENTRY(\sym)
19586 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19587 call save_paranoid
19588 TRACE_IRQS_OFF_DEBUG
19589+#ifdef CONFIG_PAX_MEMORY_UDEREF
19590+ testb $3, CS(%rsp)
19591+ jnz 1f
19592+ pax_enter_kernel
19593+ jmp 2f
19594+1: pax_enter_kernel_user
19595+2:
19596+#else
19597+ pax_enter_kernel
19598+#endif
19599 movq %rsp,%rdi /* pt_regs pointer */
19600 xorl %esi,%esi /* no error code */
19601+#ifdef CONFIG_SMP
19602+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
19603+ lea init_tss(%r12), %r12
19604+#else
19605+ lea init_tss(%rip), %r12
19606+#endif
19607 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19608 call \do_sym
19609 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19610 jmp paranoid_exit /* %ebx: no swapgs flag */
19611 CFI_ENDPROC
19612-END(\sym)
19613+ENDPROC(\sym)
19614 .endm
19615
19616 .macro errorentry sym do_sym
19617@@ -1267,13 +1617,23 @@ ENTRY(\sym)
19618 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19619 call error_entry
19620 DEFAULT_FRAME 0
19621+#ifdef CONFIG_PAX_MEMORY_UDEREF
19622+ testb $3, CS(%rsp)
19623+ jnz 1f
19624+ pax_enter_kernel
19625+ jmp 2f
19626+1: pax_enter_kernel_user
19627+2:
19628+#else
19629+ pax_enter_kernel
19630+#endif
19631 movq %rsp,%rdi /* pt_regs pointer */
19632 movq ORIG_RAX(%rsp),%rsi /* get error code */
19633 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19634 call \do_sym
19635 jmp error_exit /* %ebx: no swapgs flag */
19636 CFI_ENDPROC
19637-END(\sym)
19638+ENDPROC(\sym)
19639 .endm
19640
19641 /* error code is on the stack already */
19642@@ -1287,13 +1647,23 @@ ENTRY(\sym)
19643 call save_paranoid
19644 DEFAULT_FRAME 0
19645 TRACE_IRQS_OFF
19646+#ifdef CONFIG_PAX_MEMORY_UDEREF
19647+ testb $3, CS(%rsp)
19648+ jnz 1f
19649+ pax_enter_kernel
19650+ jmp 2f
19651+1: pax_enter_kernel_user
19652+2:
19653+#else
19654+ pax_enter_kernel
19655+#endif
19656 movq %rsp,%rdi /* pt_regs pointer */
19657 movq ORIG_RAX(%rsp),%rsi /* get error code */
19658 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19659 call \do_sym
19660 jmp paranoid_exit /* %ebx: no swapgs flag */
19661 CFI_ENDPROC
19662-END(\sym)
19663+ENDPROC(\sym)
19664 .endm
19665
19666 zeroentry divide_error do_divide_error
19667@@ -1323,9 +1693,10 @@ gs_change:
19668 2: mfence /* workaround */
19669 SWAPGS
19670 popfq_cfi
19671+ pax_force_retaddr
19672 ret
19673 CFI_ENDPROC
19674-END(native_load_gs_index)
19675+ENDPROC(native_load_gs_index)
19676
19677 _ASM_EXTABLE(gs_change,bad_gs)
19678 .section .fixup,"ax"
19679@@ -1353,9 +1724,10 @@ ENTRY(call_softirq)
19680 CFI_DEF_CFA_REGISTER rsp
19681 CFI_ADJUST_CFA_OFFSET -8
19682 decl PER_CPU_VAR(irq_count)
19683+ pax_force_retaddr
19684 ret
19685 CFI_ENDPROC
19686-END(call_softirq)
19687+ENDPROC(call_softirq)
19688
19689 #ifdef CONFIG_XEN
19690 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
19691@@ -1393,7 +1765,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
19692 decl PER_CPU_VAR(irq_count)
19693 jmp error_exit
19694 CFI_ENDPROC
19695-END(xen_do_hypervisor_callback)
19696+ENDPROC(xen_do_hypervisor_callback)
19697
19698 /*
19699 * Hypervisor uses this for application faults while it executes.
19700@@ -1452,7 +1824,7 @@ ENTRY(xen_failsafe_callback)
19701 SAVE_ALL
19702 jmp error_exit
19703 CFI_ENDPROC
19704-END(xen_failsafe_callback)
19705+ENDPROC(xen_failsafe_callback)
19706
19707 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
19708 xen_hvm_callback_vector xen_evtchn_do_upcall
19709@@ -1501,16 +1873,31 @@ ENTRY(paranoid_exit)
19710 TRACE_IRQS_OFF_DEBUG
19711 testl %ebx,%ebx /* swapgs needed? */
19712 jnz paranoid_restore
19713- testl $3,CS(%rsp)
19714+ testb $3,CS(%rsp)
19715 jnz paranoid_userspace
19716+#ifdef CONFIG_PAX_MEMORY_UDEREF
19717+ pax_exit_kernel
19718+ TRACE_IRQS_IRETQ 0
19719+ SWAPGS_UNSAFE_STACK
19720+ RESTORE_ALL 8
19721+ pax_force_retaddr_bts
19722+ jmp irq_return
19723+#endif
19724 paranoid_swapgs:
19725+#ifdef CONFIG_PAX_MEMORY_UDEREF
19726+ pax_exit_kernel_user
19727+#else
19728+ pax_exit_kernel
19729+#endif
19730 TRACE_IRQS_IRETQ 0
19731 SWAPGS_UNSAFE_STACK
19732 RESTORE_ALL 8
19733 jmp irq_return
19734 paranoid_restore:
19735+ pax_exit_kernel
19736 TRACE_IRQS_IRETQ_DEBUG 0
19737 RESTORE_ALL 8
19738+ pax_force_retaddr_bts
19739 jmp irq_return
19740 paranoid_userspace:
19741 GET_THREAD_INFO(%rcx)
19742@@ -1539,7 +1926,7 @@ paranoid_schedule:
19743 TRACE_IRQS_OFF
19744 jmp paranoid_userspace
19745 CFI_ENDPROC
19746-END(paranoid_exit)
19747+ENDPROC(paranoid_exit)
19748
19749 /*
19750 * Exception entry point. This expects an error code/orig_rax on the stack.
19751@@ -1566,12 +1953,13 @@ ENTRY(error_entry)
19752 movq_cfi r14, R14+8
19753 movq_cfi r15, R15+8
19754 xorl %ebx,%ebx
19755- testl $3,CS+8(%rsp)
19756+ testb $3,CS+8(%rsp)
19757 je error_kernelspace
19758 error_swapgs:
19759 SWAPGS
19760 error_sti:
19761 TRACE_IRQS_OFF
19762+ pax_force_retaddr_bts
19763 ret
19764
19765 /*
19766@@ -1598,7 +1986,7 @@ bstep_iret:
19767 movq %rcx,RIP+8(%rsp)
19768 jmp error_swapgs
19769 CFI_ENDPROC
19770-END(error_entry)
19771+ENDPROC(error_entry)
19772
19773
19774 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
19775@@ -1618,7 +2006,7 @@ ENTRY(error_exit)
19776 jnz retint_careful
19777 jmp retint_swapgs
19778 CFI_ENDPROC
19779-END(error_exit)
19780+ENDPROC(error_exit)
19781
19782 /*
19783 * Test if a given stack is an NMI stack or not.
19784@@ -1676,9 +2064,11 @@ ENTRY(nmi)
19785 * If %cs was not the kernel segment, then the NMI triggered in user
19786 * space, which means it is definitely not nested.
19787 */
19788+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
19789+ je 1f
19790 cmpl $__KERNEL_CS, 16(%rsp)
19791 jne first_nmi
19792-
19793+1:
19794 /*
19795 * Check the special variable on the stack to see if NMIs are
19796 * executing.
19797@@ -1847,6 +2237,17 @@ end_repeat_nmi:
19798 */
19799 movq %cr2, %r12
19800
19801+#ifdef CONFIG_PAX_MEMORY_UDEREF
19802+ testb $3, CS(%rsp)
19803+ jnz 1f
19804+ pax_enter_kernel
19805+ jmp 2f
19806+1: pax_enter_kernel_user
19807+2:
19808+#else
19809+ pax_enter_kernel
19810+#endif
19811+
19812 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
19813 movq %rsp,%rdi
19814 movq $-1,%rsi
19815@@ -1862,23 +2263,34 @@ end_repeat_nmi:
19816 testl %ebx,%ebx /* swapgs needed? */
19817 jnz nmi_restore
19818 nmi_swapgs:
19819+#ifdef CONFIG_PAX_MEMORY_UDEREF
19820+ pax_exit_kernel_user
19821+#else
19822+ pax_exit_kernel
19823+#endif
19824 SWAPGS_UNSAFE_STACK
19825+ RESTORE_ALL 6*8
19826+ /* Clear the NMI executing stack variable */
19827+ movq $0, 5*8(%rsp)
19828+ jmp irq_return
19829 nmi_restore:
19830+ pax_exit_kernel
19831 /* Pop the extra iret frame at once */
19832 RESTORE_ALL 6*8
19833+ pax_force_retaddr_bts
19834
19835 /* Clear the NMI executing stack variable */
19836 movq $0, 5*8(%rsp)
19837 jmp irq_return
19838 CFI_ENDPROC
19839-END(nmi)
19840+ENDPROC(nmi)
19841
19842 ENTRY(ignore_sysret)
19843 CFI_STARTPROC
19844 mov $-ENOSYS,%eax
19845 sysret
19846 CFI_ENDPROC
19847-END(ignore_sysret)
19848+ENDPROC(ignore_sysret)
19849
19850 /*
19851 * End of kprobes section
19852diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
19853index 1d41402..af9a46a 100644
19854--- a/arch/x86/kernel/ftrace.c
19855+++ b/arch/x86/kernel/ftrace.c
19856@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
19857 {
19858 unsigned char replaced[MCOUNT_INSN_SIZE];
19859
19860+ ip = ktla_ktva(ip);
19861+
19862 /*
19863 * Note: Due to modules and __init, code can
19864 * disappear and change, we need to protect against faulting
19865@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
19866 unsigned char old[MCOUNT_INSN_SIZE], *new;
19867 int ret;
19868
19869- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
19870+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
19871 new = ftrace_call_replace(ip, (unsigned long)func);
19872
19873 /* See comment above by declaration of modifying_ftrace_code */
19874@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
19875 /* Also update the regs callback function */
19876 if (!ret) {
19877 ip = (unsigned long)(&ftrace_regs_call);
19878- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
19879+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
19880 new = ftrace_call_replace(ip, (unsigned long)func);
19881 ret = ftrace_modify_code(ip, old, new);
19882 }
19883@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
19884 * kernel identity mapping to modify code.
19885 */
19886 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
19887- ip = (unsigned long)__va(__pa(ip));
19888+ ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
19889
19890 return probe_kernel_write((void *)ip, val, size);
19891 }
19892@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
19893 unsigned char replaced[MCOUNT_INSN_SIZE];
19894 unsigned char brk = BREAKPOINT_INSTRUCTION;
19895
19896- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
19897+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
19898 return -EFAULT;
19899
19900 /* Make sure it is what we expect it to be */
19901@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
19902 return ret;
19903
19904 fail_update:
19905- probe_kernel_write((void *)ip, &old_code[0], 1);
19906+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
19907 goto out;
19908 }
19909
19910@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
19911 {
19912 unsigned char code[MCOUNT_INSN_SIZE];
19913
19914+ ip = ktla_ktva(ip);
19915+
19916 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
19917 return -EFAULT;
19918
19919diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
19920index c18f59d..9c0c9f6 100644
19921--- a/arch/x86/kernel/head32.c
19922+++ b/arch/x86/kernel/head32.c
19923@@ -18,6 +18,7 @@
19924 #include <asm/io_apic.h>
19925 #include <asm/bios_ebda.h>
19926 #include <asm/tlbflush.h>
19927+#include <asm/boot.h>
19928
19929 static void __init i386_default_early_setup(void)
19930 {
19931@@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
19932
19933 void __init i386_start_kernel(void)
19934 {
19935- memblock_reserve(__pa_symbol(&_text),
19936- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
19937+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
19938
19939 #ifdef CONFIG_BLK_DEV_INITRD
19940 /* Reserve INITRD */
19941diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
19942index c8932c7..d56b622 100644
19943--- a/arch/x86/kernel/head_32.S
19944+++ b/arch/x86/kernel/head_32.S
19945@@ -26,6 +26,12 @@
19946 /* Physical address */
19947 #define pa(X) ((X) - __PAGE_OFFSET)
19948
19949+#ifdef CONFIG_PAX_KERNEXEC
19950+#define ta(X) (X)
19951+#else
19952+#define ta(X) ((X) - __PAGE_OFFSET)
19953+#endif
19954+
19955 /*
19956 * References to members of the new_cpu_data structure.
19957 */
19958@@ -55,11 +61,7 @@
19959 * and small than max_low_pfn, otherwise will waste some page table entries
19960 */
19961
19962-#if PTRS_PER_PMD > 1
19963-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
19964-#else
19965-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
19966-#endif
19967+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
19968
19969 /* Number of possible pages in the lowmem region */
19970 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
19971@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
19972 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
19973
19974 /*
19975+ * Real beginning of normal "text" segment
19976+ */
19977+ENTRY(stext)
19978+ENTRY(_stext)
19979+
19980+/*
19981 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
19982 * %esi points to the real-mode code as a 32-bit pointer.
19983 * CS and DS must be 4 GB flat segments, but we don't depend on
19984@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
19985 * can.
19986 */
19987 __HEAD
19988+
19989+#ifdef CONFIG_PAX_KERNEXEC
19990+ jmp startup_32
19991+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
19992+.fill PAGE_SIZE-5,1,0xcc
19993+#endif
19994+
19995 ENTRY(startup_32)
19996 movl pa(stack_start),%ecx
19997
19998@@ -106,6 +121,59 @@ ENTRY(startup_32)
19999 2:
20000 leal -__PAGE_OFFSET(%ecx),%esp
20001
20002+#ifdef CONFIG_SMP
20003+ movl $pa(cpu_gdt_table),%edi
20004+ movl $__per_cpu_load,%eax
20005+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
20006+ rorl $16,%eax
20007+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
20008+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
20009+ movl $__per_cpu_end - 1,%eax
20010+ subl $__per_cpu_start,%eax
20011+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
20012+#endif
20013+
20014+#ifdef CONFIG_PAX_MEMORY_UDEREF
20015+ movl $NR_CPUS,%ecx
20016+ movl $pa(cpu_gdt_table),%edi
20017+1:
20018+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
20019+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
20020+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
20021+ addl $PAGE_SIZE_asm,%edi
20022+ loop 1b
20023+#endif
20024+
20025+#ifdef CONFIG_PAX_KERNEXEC
20026+ movl $pa(boot_gdt),%edi
20027+ movl $__LOAD_PHYSICAL_ADDR,%eax
20028+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
20029+ rorl $16,%eax
20030+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
20031+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
20032+ rorl $16,%eax
20033+
20034+ ljmp $(__BOOT_CS),$1f
20035+1:
20036+
20037+ movl $NR_CPUS,%ecx
20038+ movl $pa(cpu_gdt_table),%edi
20039+ addl $__PAGE_OFFSET,%eax
20040+1:
20041+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
20042+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
20043+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
20044+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
20045+ rorl $16,%eax
20046+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
20047+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
20048+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
20049+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
20050+ rorl $16,%eax
20051+ addl $PAGE_SIZE_asm,%edi
20052+ loop 1b
20053+#endif
20054+
20055 /*
20056 * Clear BSS first so that there are no surprises...
20057 */
20058@@ -196,8 +264,11 @@ ENTRY(startup_32)
20059 movl %eax, pa(max_pfn_mapped)
20060
20061 /* Do early initialization of the fixmap area */
20062- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20063- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
20064+#ifdef CONFIG_COMPAT_VDSO
20065+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
20066+#else
20067+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
20068+#endif
20069 #else /* Not PAE */
20070
20071 page_pde_offset = (__PAGE_OFFSET >> 20);
20072@@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20073 movl %eax, pa(max_pfn_mapped)
20074
20075 /* Do early initialization of the fixmap area */
20076- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20077- movl %eax,pa(initial_page_table+0xffc)
20078+#ifdef CONFIG_COMPAT_VDSO
20079+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
20080+#else
20081+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
20082+#endif
20083 #endif
20084
20085 #ifdef CONFIG_PARAVIRT
20086@@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20087 cmpl $num_subarch_entries, %eax
20088 jae bad_subarch
20089
20090- movl pa(subarch_entries)(,%eax,4), %eax
20091- subl $__PAGE_OFFSET, %eax
20092- jmp *%eax
20093+ jmp *pa(subarch_entries)(,%eax,4)
20094
20095 bad_subarch:
20096 WEAK(lguest_entry)
20097@@ -256,10 +328,10 @@ WEAK(xen_entry)
20098 __INITDATA
20099
20100 subarch_entries:
20101- .long default_entry /* normal x86/PC */
20102- .long lguest_entry /* lguest hypervisor */
20103- .long xen_entry /* Xen hypervisor */
20104- .long default_entry /* Moorestown MID */
20105+ .long ta(default_entry) /* normal x86/PC */
20106+ .long ta(lguest_entry) /* lguest hypervisor */
20107+ .long ta(xen_entry) /* Xen hypervisor */
20108+ .long ta(default_entry) /* Moorestown MID */
20109 num_subarch_entries = (. - subarch_entries) / 4
20110 .previous
20111 #else
20112@@ -335,6 +407,7 @@ default_entry:
20113 movl pa(mmu_cr4_features),%eax
20114 movl %eax,%cr4
20115
20116+#ifdef CONFIG_X86_PAE
20117 testb $X86_CR4_PAE, %al # check if PAE is enabled
20118 jz 6f
20119
20120@@ -363,6 +436,9 @@ default_entry:
20121 /* Make changes effective */
20122 wrmsr
20123
20124+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
20125+#endif
20126+
20127 6:
20128
20129 /*
20130@@ -460,14 +536,20 @@ is386: movl $2,%ecx # set MP
20131 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
20132 movl %eax,%ss # after changing gdt.
20133
20134- movl $(__USER_DS),%eax # DS/ES contains default USER segment
20135+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
20136 movl %eax,%ds
20137 movl %eax,%es
20138
20139 movl $(__KERNEL_PERCPU), %eax
20140 movl %eax,%fs # set this cpu's percpu
20141
20142+#ifdef CONFIG_CC_STACKPROTECTOR
20143 movl $(__KERNEL_STACK_CANARY),%eax
20144+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
20145+ movl $(__USER_DS),%eax
20146+#else
20147+ xorl %eax,%eax
20148+#endif
20149 movl %eax,%gs
20150
20151 xorl %eax,%eax # Clear LDT
20152@@ -544,8 +626,11 @@ setup_once:
20153 * relocation. Manually set base address in stack canary
20154 * segment descriptor.
20155 */
20156- movl $gdt_page,%eax
20157+ movl $cpu_gdt_table,%eax
20158 movl $stack_canary,%ecx
20159+#ifdef CONFIG_SMP
20160+ addl $__per_cpu_load,%ecx
20161+#endif
20162 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
20163 shrl $16, %ecx
20164 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
20165@@ -576,7 +661,7 @@ ENDPROC(early_idt_handlers)
20166 /* This is global to keep gas from relaxing the jumps */
20167 ENTRY(early_idt_handler)
20168 cld
20169- cmpl $2,%ss:early_recursion_flag
20170+ cmpl $1,%ss:early_recursion_flag
20171 je hlt_loop
20172 incl %ss:early_recursion_flag
20173
20174@@ -614,8 +699,8 @@ ENTRY(early_idt_handler)
20175 pushl (20+6*4)(%esp) /* trapno */
20176 pushl $fault_msg
20177 call printk
20178-#endif
20179 call dump_stack
20180+#endif
20181 hlt_loop:
20182 hlt
20183 jmp hlt_loop
20184@@ -634,8 +719,11 @@ ENDPROC(early_idt_handler)
20185 /* This is the default interrupt "handler" :-) */
20186 ALIGN
20187 ignore_int:
20188- cld
20189 #ifdef CONFIG_PRINTK
20190+ cmpl $2,%ss:early_recursion_flag
20191+ je hlt_loop
20192+ incl %ss:early_recursion_flag
20193+ cld
20194 pushl %eax
20195 pushl %ecx
20196 pushl %edx
20197@@ -644,9 +732,6 @@ ignore_int:
20198 movl $(__KERNEL_DS),%eax
20199 movl %eax,%ds
20200 movl %eax,%es
20201- cmpl $2,early_recursion_flag
20202- je hlt_loop
20203- incl early_recursion_flag
20204 pushl 16(%esp)
20205 pushl 24(%esp)
20206 pushl 32(%esp)
20207@@ -680,29 +765,43 @@ ENTRY(setup_once_ref)
20208 /*
20209 * BSS section
20210 */
20211-__PAGE_ALIGNED_BSS
20212- .align PAGE_SIZE
20213 #ifdef CONFIG_X86_PAE
20214+.section .initial_pg_pmd,"a",@progbits
20215 initial_pg_pmd:
20216 .fill 1024*KPMDS,4,0
20217 #else
20218+.section .initial_page_table,"a",@progbits
20219 ENTRY(initial_page_table)
20220 .fill 1024,4,0
20221 #endif
20222+.section .initial_pg_fixmap,"a",@progbits
20223 initial_pg_fixmap:
20224 .fill 1024,4,0
20225+.section .empty_zero_page,"a",@progbits
20226 ENTRY(empty_zero_page)
20227 .fill 4096,1,0
20228+.section .swapper_pg_dir,"a",@progbits
20229 ENTRY(swapper_pg_dir)
20230+#ifdef CONFIG_X86_PAE
20231+ .fill 4,8,0
20232+#else
20233 .fill 1024,4,0
20234+#endif
20235+
20236+/*
20237+ * The IDT has to be page-aligned to simplify the Pentium
20238+ * F0 0F bug workaround.. We have a special link segment
20239+ * for this.
20240+ */
20241+.section .idt,"a",@progbits
20242+ENTRY(idt_table)
20243+ .fill 256,8,0
20244
20245 /*
20246 * This starts the data section.
20247 */
20248 #ifdef CONFIG_X86_PAE
20249-__PAGE_ALIGNED_DATA
20250- /* Page-aligned for the benefit of paravirt? */
20251- .align PAGE_SIZE
20252+.section .initial_page_table,"a",@progbits
20253 ENTRY(initial_page_table)
20254 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
20255 # if KPMDS == 3
20256@@ -721,12 +820,20 @@ ENTRY(initial_page_table)
20257 # error "Kernel PMDs should be 1, 2 or 3"
20258 # endif
20259 .align PAGE_SIZE /* needs to be page-sized too */
20260+
20261+#ifdef CONFIG_PAX_PER_CPU_PGD
20262+ENTRY(cpu_pgd)
20263+ .rept NR_CPUS
20264+ .fill 4,8,0
20265+ .endr
20266+#endif
20267+
20268 #endif
20269
20270 .data
20271 .balign 4
20272 ENTRY(stack_start)
20273- .long init_thread_union+THREAD_SIZE
20274+ .long init_thread_union+THREAD_SIZE-8
20275
20276 __INITRODATA
20277 int_msg:
20278@@ -754,7 +861,7 @@ fault_msg:
20279 * segment size, and 32-bit linear address value:
20280 */
20281
20282- .data
20283+.section .rodata,"a",@progbits
20284 .globl boot_gdt_descr
20285 .globl idt_descr
20286
20287@@ -763,7 +870,7 @@ fault_msg:
20288 .word 0 # 32 bit align gdt_desc.address
20289 boot_gdt_descr:
20290 .word __BOOT_DS+7
20291- .long boot_gdt - __PAGE_OFFSET
20292+ .long pa(boot_gdt)
20293
20294 .word 0 # 32-bit align idt_desc.address
20295 idt_descr:
20296@@ -774,7 +881,7 @@ idt_descr:
20297 .word 0 # 32 bit align gdt_desc.address
20298 ENTRY(early_gdt_descr)
20299 .word GDT_ENTRIES*8-1
20300- .long gdt_page /* Overwritten for secondary CPUs */
20301+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
20302
20303 /*
20304 * The boot_gdt must mirror the equivalent in setup.S and is
20305@@ -783,5 +890,65 @@ ENTRY(early_gdt_descr)
20306 .align L1_CACHE_BYTES
20307 ENTRY(boot_gdt)
20308 .fill GDT_ENTRY_BOOT_CS,8,0
20309- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
20310- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
20311+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
20312+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
20313+
20314+ .align PAGE_SIZE_asm
20315+ENTRY(cpu_gdt_table)
20316+ .rept NR_CPUS
20317+ .quad 0x0000000000000000 /* NULL descriptor */
20318+ .quad 0x0000000000000000 /* 0x0b reserved */
20319+ .quad 0x0000000000000000 /* 0x13 reserved */
20320+ .quad 0x0000000000000000 /* 0x1b reserved */
20321+
20322+#ifdef CONFIG_PAX_KERNEXEC
20323+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
20324+#else
20325+ .quad 0x0000000000000000 /* 0x20 unused */
20326+#endif
20327+
20328+ .quad 0x0000000000000000 /* 0x28 unused */
20329+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
20330+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
20331+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
20332+ .quad 0x0000000000000000 /* 0x4b reserved */
20333+ .quad 0x0000000000000000 /* 0x53 reserved */
20334+ .quad 0x0000000000000000 /* 0x5b reserved */
20335+
20336+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
20337+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
20338+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
20339+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
20340+
20341+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
20342+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
20343+
20344+ /*
20345+ * Segments used for calling PnP BIOS have byte granularity.
20346+ * The code segments and data segments have fixed 64k limits,
20347+ * the transfer segment sizes are set at run time.
20348+ */
20349+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
20350+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
20351+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
20352+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
20353+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
20354+
20355+ /*
20356+ * The APM segments have byte granularity and their bases
20357+ * are set at run time. All have 64k limits.
20358+ */
20359+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
20360+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
20361+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
20362+
20363+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
20364+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
20365+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
20366+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
20367+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
20368+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
20369+
20370+ /* Be sure this is zeroed to avoid false validations in Xen */
20371+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
20372+ .endr
20373diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
20374index 980053c..74d3b44 100644
20375--- a/arch/x86/kernel/head_64.S
20376+++ b/arch/x86/kernel/head_64.S
20377@@ -20,6 +20,8 @@
20378 #include <asm/processor-flags.h>
20379 #include <asm/percpu.h>
20380 #include <asm/nops.h>
20381+#include <asm/cpufeature.h>
20382+#include <asm/alternative-asm.h>
20383
20384 #ifdef CONFIG_PARAVIRT
20385 #include <asm/asm-offsets.h>
20386@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
20387 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
20388 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
20389 L3_START_KERNEL = pud_index(__START_KERNEL_map)
20390+L4_VMALLOC_START = pgd_index(VMALLOC_START)
20391+L3_VMALLOC_START = pud_index(VMALLOC_START)
20392+L4_VMALLOC_END = pgd_index(VMALLOC_END)
20393+L3_VMALLOC_END = pud_index(VMALLOC_END)
20394+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
20395+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
20396
20397 .text
20398 __HEAD
20399@@ -88,35 +96,23 @@ startup_64:
20400 */
20401 addq %rbp, init_level4_pgt + 0(%rip)
20402 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
20403+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
20404+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
20405+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
20406 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
20407
20408 addq %rbp, level3_ident_pgt + 0(%rip)
20409+#ifndef CONFIG_XEN
20410+ addq %rbp, level3_ident_pgt + 8(%rip)
20411+#endif
20412
20413- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
20414- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
20415+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
20416+
20417+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
20418+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
20419
20420 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
20421-
20422- /* Add an Identity mapping if I am above 1G */
20423- leaq _text(%rip), %rdi
20424- andq $PMD_PAGE_MASK, %rdi
20425-
20426- movq %rdi, %rax
20427- shrq $PUD_SHIFT, %rax
20428- andq $(PTRS_PER_PUD - 1), %rax
20429- jz ident_complete
20430-
20431- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
20432- leaq level3_ident_pgt(%rip), %rbx
20433- movq %rdx, 0(%rbx, %rax, 8)
20434-
20435- movq %rdi, %rax
20436- shrq $PMD_SHIFT, %rax
20437- andq $(PTRS_PER_PMD - 1), %rax
20438- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
20439- leaq level2_spare_pgt(%rip), %rbx
20440- movq %rdx, 0(%rbx, %rax, 8)
20441-ident_complete:
20442+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
20443
20444 /*
20445 * Fixup the kernel text+data virtual addresses. Note that
20446@@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
20447 * after the boot processor executes this code.
20448 */
20449
20450- /* Enable PAE mode and PGE */
20451- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
20452+ /* Enable PAE mode and PSE/PGE */
20453+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20454 movq %rax, %cr4
20455
20456 /* Setup early boot stage 4 level pagetables. */
20457@@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
20458 movl $MSR_EFER, %ecx
20459 rdmsr
20460 btsl $_EFER_SCE, %eax /* Enable System Call */
20461- btl $20,%edi /* No Execute supported? */
20462+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
20463 jnc 1f
20464 btsl $_EFER_NX, %eax
20465+ leaq init_level4_pgt(%rip), %rdi
20466+#ifndef CONFIG_EFI
20467+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
20468+#endif
20469+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
20470+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
20471+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
20472+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
20473 1: wrmsr /* Make changes effective */
20474
20475 /* Setup cr0 */
20476@@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
20477 * jump. In addition we need to ensure %cs is set so we make this
20478 * a far return.
20479 */
20480+ pax_set_fptr_mask
20481 movq initial_code(%rip),%rax
20482 pushq $0 # fake return address to stop unwinder
20483 pushq $__KERNEL_CS # set correct cs
20484@@ -284,7 +289,7 @@ ENDPROC(start_cpu0)
20485 bad_address:
20486 jmp bad_address
20487
20488- .section ".init.text","ax"
20489+ __INIT
20490 .globl early_idt_handlers
20491 early_idt_handlers:
20492 # 104(%rsp) %rflags
20493@@ -343,7 +348,7 @@ ENTRY(early_idt_handler)
20494 call dump_stack
20495 #ifdef CONFIG_KALLSYMS
20496 leaq early_idt_ripmsg(%rip),%rdi
20497- movq 40(%rsp),%rsi # %rip again
20498+ movq 88(%rsp),%rsi # %rip again
20499 call __print_symbol
20500 #endif
20501 #endif /* EARLY_PRINTK */
20502@@ -363,11 +368,15 @@ ENTRY(early_idt_handler)
20503 addq $16,%rsp # drop vector number and error code
20504 decl early_recursion_flag(%rip)
20505 INTERRUPT_RETURN
20506+ .previous
20507
20508+ __INITDATA
20509 .balign 4
20510 early_recursion_flag:
20511 .long 0
20512+ .previous
20513
20514+ .section .rodata,"a",@progbits
20515 #ifdef CONFIG_EARLY_PRINTK
20516 early_idt_msg:
20517 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
20518@@ -376,6 +385,7 @@ early_idt_ripmsg:
20519 #endif /* CONFIG_EARLY_PRINTK */
20520 .previous
20521
20522+ .section .rodata,"a",@progbits
20523 #define NEXT_PAGE(name) \
20524 .balign PAGE_SIZE; \
20525 ENTRY(name)
20526@@ -388,7 +398,6 @@ ENTRY(name)
20527 i = i + 1 ; \
20528 .endr
20529
20530- .data
20531 /*
20532 * This default setting generates an ident mapping at address 0x100000
20533 * and a mapping for the kernel that precisely maps virtual address
20534@@ -399,13 +408,41 @@ NEXT_PAGE(init_level4_pgt)
20535 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20536 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
20537 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20538+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
20539+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
20540+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
20541+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
20542+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
20543+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20544 .org init_level4_pgt + L4_START_KERNEL*8, 0
20545 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
20546 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
20547
20548+#ifdef CONFIG_PAX_PER_CPU_PGD
20549+NEXT_PAGE(cpu_pgd)
20550+ .rept NR_CPUS
20551+ .fill 512,8,0
20552+ .endr
20553+#endif
20554+
20555 NEXT_PAGE(level3_ident_pgt)
20556 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20557+#ifdef CONFIG_XEN
20558 .fill 511,8,0
20559+#else
20560+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
20561+ .fill 510,8,0
20562+#endif
20563+
20564+NEXT_PAGE(level3_vmalloc_start_pgt)
20565+ .fill 512,8,0
20566+
20567+NEXT_PAGE(level3_vmalloc_end_pgt)
20568+ .fill 512,8,0
20569+
20570+NEXT_PAGE(level3_vmemmap_pgt)
20571+ .fill L3_VMEMMAP_START,8,0
20572+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20573
20574 NEXT_PAGE(level3_kernel_pgt)
20575 .fill L3_START_KERNEL,8,0
20576@@ -413,20 +450,23 @@ NEXT_PAGE(level3_kernel_pgt)
20577 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
20578 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20579
20580+NEXT_PAGE(level2_vmemmap_pgt)
20581+ .fill 512,8,0
20582+
20583 NEXT_PAGE(level2_fixmap_pgt)
20584- .fill 506,8,0
20585- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20586- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
20587- .fill 5,8,0
20588+ .fill 507,8,0
20589+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
20590+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
20591+ .fill 4,8,0
20592
20593-NEXT_PAGE(level1_fixmap_pgt)
20594+NEXT_PAGE(level1_vsyscall_pgt)
20595 .fill 512,8,0
20596
20597-NEXT_PAGE(level2_ident_pgt)
20598- /* Since I easily can, map the first 1G.
20599+ /* Since I easily can, map the first 2G.
20600 * Don't set NX because code runs from these pages.
20601 */
20602- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
20603+NEXT_PAGE(level2_ident_pgt)
20604+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
20605
20606 NEXT_PAGE(level2_kernel_pgt)
20607 /*
20608@@ -439,37 +479,59 @@ NEXT_PAGE(level2_kernel_pgt)
20609 * If you want to increase this then increase MODULES_VADDR
20610 * too.)
20611 */
20612- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
20613- KERNEL_IMAGE_SIZE/PMD_SIZE)
20614-
20615-NEXT_PAGE(level2_spare_pgt)
20616- .fill 512, 8, 0
20617+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
20618
20619 #undef PMDS
20620 #undef NEXT_PAGE
20621
20622- .data
20623+ .align PAGE_SIZE
20624+ENTRY(cpu_gdt_table)
20625+ .rept NR_CPUS
20626+ .quad 0x0000000000000000 /* NULL descriptor */
20627+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
20628+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
20629+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
20630+ .quad 0x00cffb000000ffff /* __USER32_CS */
20631+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
20632+ .quad 0x00affb000000ffff /* __USER_CS */
20633+
20634+#ifdef CONFIG_PAX_KERNEXEC
20635+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
20636+#else
20637+ .quad 0x0 /* unused */
20638+#endif
20639+
20640+ .quad 0,0 /* TSS */
20641+ .quad 0,0 /* LDT */
20642+ .quad 0,0,0 /* three TLS descriptors */
20643+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
20644+ /* asm/segment.h:GDT_ENTRIES must match this */
20645+
20646+ /* zero the remaining page */
20647+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
20648+ .endr
20649+
20650 .align 16
20651 .globl early_gdt_descr
20652 early_gdt_descr:
20653 .word GDT_ENTRIES*8-1
20654 early_gdt_descr_base:
20655- .quad INIT_PER_CPU_VAR(gdt_page)
20656+ .quad cpu_gdt_table
20657
20658 ENTRY(phys_base)
20659 /* This must match the first entry in level2_kernel_pgt */
20660 .quad 0x0000000000000000
20661
20662 #include "../../x86/xen/xen-head.S"
20663-
20664- .section .bss, "aw", @nobits
20665+
20666+ .section .rodata,"a",@progbits
20667 .align L1_CACHE_BYTES
20668 ENTRY(idt_table)
20669- .skip IDT_ENTRIES * 16
20670+ .fill 512,8,0
20671
20672 .align L1_CACHE_BYTES
20673 ENTRY(nmi_idt_table)
20674- .skip IDT_ENTRIES * 16
20675+ .fill 512,8,0
20676
20677 __PAGE_ALIGNED_BSS
20678 .align PAGE_SIZE
20679diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
20680index 9c3bd4a..e1d9b35 100644
20681--- a/arch/x86/kernel/i386_ksyms_32.c
20682+++ b/arch/x86/kernel/i386_ksyms_32.c
20683@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
20684 EXPORT_SYMBOL(cmpxchg8b_emu);
20685 #endif
20686
20687+EXPORT_SYMBOL_GPL(cpu_gdt_table);
20688+
20689 /* Networking helper routines. */
20690 EXPORT_SYMBOL(csum_partial_copy_generic);
20691+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
20692+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
20693
20694 EXPORT_SYMBOL(__get_user_1);
20695 EXPORT_SYMBOL(__get_user_2);
20696@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
20697
20698 EXPORT_SYMBOL(csum_partial);
20699 EXPORT_SYMBOL(empty_zero_page);
20700+
20701+#ifdef CONFIG_PAX_KERNEXEC
20702+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
20703+#endif
20704diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
20705index 245a71d..89d9ce4 100644
20706--- a/arch/x86/kernel/i387.c
20707+++ b/arch/x86/kernel/i387.c
20708@@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
20709 static inline bool interrupted_user_mode(void)
20710 {
20711 struct pt_regs *regs = get_irq_regs();
20712- return regs && user_mode_vm(regs);
20713+ return regs && user_mode(regs);
20714 }
20715
20716 /*
20717diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
20718index 9a5c460..b332a4b 100644
20719--- a/arch/x86/kernel/i8259.c
20720+++ b/arch/x86/kernel/i8259.c
20721@@ -209,7 +209,7 @@ spurious_8259A_irq:
20722 "spurious 8259A interrupt: IRQ%d.\n", irq);
20723 spurious_irq_mask |= irqmask;
20724 }
20725- atomic_inc(&irq_err_count);
20726+ atomic_inc_unchecked(&irq_err_count);
20727 /*
20728 * Theoretically we do not have to handle this IRQ,
20729 * but in Linux this does not cause problems and is
20730@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
20731 /* (slave's support for AEOI in flat mode is to be investigated) */
20732 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
20733
20734+ pax_open_kernel();
20735 if (auto_eoi)
20736 /*
20737 * In AEOI mode we just have to mask the interrupt
20738 * when acking.
20739 */
20740- i8259A_chip.irq_mask_ack = disable_8259A_irq;
20741+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
20742 else
20743- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20744+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20745+ pax_close_kernel();
20746
20747 udelay(100); /* wait for 8259A to initialize */
20748
20749diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
20750index a979b5b..1d6db75 100644
20751--- a/arch/x86/kernel/io_delay.c
20752+++ b/arch/x86/kernel/io_delay.c
20753@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
20754 * Quirk table for systems that misbehave (lock up, etc.) if port
20755 * 0x80 is used:
20756 */
20757-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
20758+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
20759 {
20760 .callback = dmi_io_delay_0xed_port,
20761 .ident = "Compaq Presario V6000",
20762diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
20763index 8c96897..be66bfa 100644
20764--- a/arch/x86/kernel/ioport.c
20765+++ b/arch/x86/kernel/ioport.c
20766@@ -6,6 +6,7 @@
20767 #include <linux/sched.h>
20768 #include <linux/kernel.h>
20769 #include <linux/capability.h>
20770+#include <linux/security.h>
20771 #include <linux/errno.h>
20772 #include <linux/types.h>
20773 #include <linux/ioport.h>
20774@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20775
20776 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
20777 return -EINVAL;
20778+#ifdef CONFIG_GRKERNSEC_IO
20779+ if (turn_on && grsec_disable_privio) {
20780+ gr_handle_ioperm();
20781+ return -EPERM;
20782+ }
20783+#endif
20784 if (turn_on && !capable(CAP_SYS_RAWIO))
20785 return -EPERM;
20786
20787@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20788 * because the ->io_bitmap_max value must match the bitmap
20789 * contents:
20790 */
20791- tss = &per_cpu(init_tss, get_cpu());
20792+ tss = init_tss + get_cpu();
20793
20794 if (turn_on)
20795 bitmap_clear(t->io_bitmap_ptr, from, num);
20796@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
20797 return -EINVAL;
20798 /* Trying to gain more privileges? */
20799 if (level > old) {
20800+#ifdef CONFIG_GRKERNSEC_IO
20801+ if (grsec_disable_privio) {
20802+ gr_handle_iopl();
20803+ return -EPERM;
20804+ }
20805+#endif
20806 if (!capable(CAP_SYS_RAWIO))
20807 return -EPERM;
20808 }
20809diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
20810index e4595f1..ee3bfb8 100644
20811--- a/arch/x86/kernel/irq.c
20812+++ b/arch/x86/kernel/irq.c
20813@@ -18,7 +18,7 @@
20814 #include <asm/mce.h>
20815 #include <asm/hw_irq.h>
20816
20817-atomic_t irq_err_count;
20818+atomic_unchecked_t irq_err_count;
20819
20820 /* Function pointer for generic interrupt vector handling */
20821 void (*x86_platform_ipi_callback)(void) = NULL;
20822@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
20823 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
20824 seq_printf(p, " Machine check polls\n");
20825 #endif
20826- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
20827+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
20828 #if defined(CONFIG_X86_IO_APIC)
20829- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
20830+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
20831 #endif
20832 return 0;
20833 }
20834@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
20835
20836 u64 arch_irq_stat(void)
20837 {
20838- u64 sum = atomic_read(&irq_err_count);
20839+ u64 sum = atomic_read_unchecked(&irq_err_count);
20840
20841 #ifdef CONFIG_X86_IO_APIC
20842- sum += atomic_read(&irq_mis_count);
20843+ sum += atomic_read_unchecked(&irq_mis_count);
20844 #endif
20845 return sum;
20846 }
20847diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
20848index 344faf8..355f60d 100644
20849--- a/arch/x86/kernel/irq_32.c
20850+++ b/arch/x86/kernel/irq_32.c
20851@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
20852 __asm__ __volatile__("andl %%esp,%0" :
20853 "=r" (sp) : "0" (THREAD_SIZE - 1));
20854
20855- return sp < (sizeof(struct thread_info) + STACK_WARN);
20856+ return sp < STACK_WARN;
20857 }
20858
20859 static void print_stack_overflow(void)
20860@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
20861 * per-CPU IRQ handling contexts (thread information and stack)
20862 */
20863 union irq_ctx {
20864- struct thread_info tinfo;
20865- u32 stack[THREAD_SIZE/sizeof(u32)];
20866+ unsigned long previous_esp;
20867+ u32 stack[THREAD_SIZE/sizeof(u32)];
20868 } __attribute__((aligned(THREAD_SIZE)));
20869
20870 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
20871@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
20872 static inline int
20873 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20874 {
20875- union irq_ctx *curctx, *irqctx;
20876+ union irq_ctx *irqctx;
20877 u32 *isp, arg1, arg2;
20878
20879- curctx = (union irq_ctx *) current_thread_info();
20880 irqctx = __this_cpu_read(hardirq_ctx);
20881
20882 /*
20883@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20884 * handler) we can't do that and just have to keep using the
20885 * current stack (which is the irq stack already after all)
20886 */
20887- if (unlikely(curctx == irqctx))
20888+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
20889 return 0;
20890
20891 /* build the stack frame on the IRQ stack */
20892- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
20893- irqctx->tinfo.task = curctx->tinfo.task;
20894- irqctx->tinfo.previous_esp = current_stack_pointer;
20895+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
20896+ irqctx->previous_esp = current_stack_pointer;
20897
20898- /* Copy the preempt_count so that the [soft]irq checks work. */
20899- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
20900+#ifdef CONFIG_PAX_MEMORY_UDEREF
20901+ __set_fs(MAKE_MM_SEG(0));
20902+#endif
20903
20904 if (unlikely(overflow))
20905 call_on_stack(print_stack_overflow, isp);
20906@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20907 : "0" (irq), "1" (desc), "2" (isp),
20908 "D" (desc->handle_irq)
20909 : "memory", "cc", "ecx");
20910+
20911+#ifdef CONFIG_PAX_MEMORY_UDEREF
20912+ __set_fs(current_thread_info()->addr_limit);
20913+#endif
20914+
20915 return 1;
20916 }
20917
20918@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20919 */
20920 void __cpuinit irq_ctx_init(int cpu)
20921 {
20922- union irq_ctx *irqctx;
20923-
20924 if (per_cpu(hardirq_ctx, cpu))
20925 return;
20926
20927- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
20928- THREADINFO_GFP,
20929- THREAD_SIZE_ORDER));
20930- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
20931- irqctx->tinfo.cpu = cpu;
20932- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
20933- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
20934-
20935- per_cpu(hardirq_ctx, cpu) = irqctx;
20936-
20937- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
20938- THREADINFO_GFP,
20939- THREAD_SIZE_ORDER));
20940- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
20941- irqctx->tinfo.cpu = cpu;
20942- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
20943-
20944- per_cpu(softirq_ctx, cpu) = irqctx;
20945+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
20946+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
20947+
20948+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
20949+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
20950
20951 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
20952 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
20953@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
20954 asmlinkage void do_softirq(void)
20955 {
20956 unsigned long flags;
20957- struct thread_info *curctx;
20958 union irq_ctx *irqctx;
20959 u32 *isp;
20960
20961@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
20962 local_irq_save(flags);
20963
20964 if (local_softirq_pending()) {
20965- curctx = current_thread_info();
20966 irqctx = __this_cpu_read(softirq_ctx);
20967- irqctx->tinfo.task = curctx->task;
20968- irqctx->tinfo.previous_esp = current_stack_pointer;
20969+ irqctx->previous_esp = current_stack_pointer;
20970
20971 /* build the stack frame on the softirq stack */
20972- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
20973+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
20974+
20975+#ifdef CONFIG_PAX_MEMORY_UDEREF
20976+ __set_fs(MAKE_MM_SEG(0));
20977+#endif
20978
20979 call_on_stack(__do_softirq, isp);
20980+
20981+#ifdef CONFIG_PAX_MEMORY_UDEREF
20982+ __set_fs(current_thread_info()->addr_limit);
20983+#endif
20984+
20985 /*
20986 * Shouldn't happen, we returned above if in_interrupt():
20987 */
20988@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
20989 if (unlikely(!desc))
20990 return false;
20991
20992- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
20993+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
20994 if (unlikely(overflow))
20995 print_stack_overflow();
20996 desc->handle_irq(irq, desc);
20997diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
20998index d04d3ec..ea4b374 100644
20999--- a/arch/x86/kernel/irq_64.c
21000+++ b/arch/x86/kernel/irq_64.c
21001@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
21002 u64 estack_top, estack_bottom;
21003 u64 curbase = (u64)task_stack_page(current);
21004
21005- if (user_mode_vm(regs))
21006+ if (user_mode(regs))
21007 return;
21008
21009 if (regs->sp >= curbase + sizeof(struct thread_info) +
21010diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
21011index dc1404b..bbc43e7 100644
21012--- a/arch/x86/kernel/kdebugfs.c
21013+++ b/arch/x86/kernel/kdebugfs.c
21014@@ -27,7 +27,7 @@ struct setup_data_node {
21015 u32 len;
21016 };
21017
21018-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
21019+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
21020 size_t count, loff_t *ppos)
21021 {
21022 struct setup_data_node *node = file->private_data;
21023diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
21024index 836f832..a8bda67 100644
21025--- a/arch/x86/kernel/kgdb.c
21026+++ b/arch/x86/kernel/kgdb.c
21027@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
21028 #ifdef CONFIG_X86_32
21029 switch (regno) {
21030 case GDB_SS:
21031- if (!user_mode_vm(regs))
21032+ if (!user_mode(regs))
21033 *(unsigned long *)mem = __KERNEL_DS;
21034 break;
21035 case GDB_SP:
21036- if (!user_mode_vm(regs))
21037+ if (!user_mode(regs))
21038 *(unsigned long *)mem = kernel_stack_pointer(regs);
21039 break;
21040 case GDB_GS:
21041@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
21042 bp->attr.bp_addr = breakinfo[breakno].addr;
21043 bp->attr.bp_len = breakinfo[breakno].len;
21044 bp->attr.bp_type = breakinfo[breakno].type;
21045- info->address = breakinfo[breakno].addr;
21046+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
21047+ info->address = ktla_ktva(breakinfo[breakno].addr);
21048+ else
21049+ info->address = breakinfo[breakno].addr;
21050 info->len = breakinfo[breakno].len;
21051 info->type = breakinfo[breakno].type;
21052 val = arch_install_hw_breakpoint(bp);
21053@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
21054 case 'k':
21055 /* clear the trace bit */
21056 linux_regs->flags &= ~X86_EFLAGS_TF;
21057- atomic_set(&kgdb_cpu_doing_single_step, -1);
21058+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
21059
21060 /* set the trace bit if we're stepping */
21061 if (remcomInBuffer[0] == 's') {
21062 linux_regs->flags |= X86_EFLAGS_TF;
21063- atomic_set(&kgdb_cpu_doing_single_step,
21064+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
21065 raw_smp_processor_id());
21066 }
21067
21068@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
21069
21070 switch (cmd) {
21071 case DIE_DEBUG:
21072- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
21073+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
21074 if (user_mode(regs))
21075 return single_step_cont(regs, args);
21076 break;
21077@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21078 #endif /* CONFIG_DEBUG_RODATA */
21079
21080 bpt->type = BP_BREAKPOINT;
21081- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
21082+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
21083 BREAK_INSTR_SIZE);
21084 if (err)
21085 return err;
21086- err = probe_kernel_write((char *)bpt->bpt_addr,
21087+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21088 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
21089 #ifdef CONFIG_DEBUG_RODATA
21090 if (!err)
21091@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21092 return -EBUSY;
21093 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
21094 BREAK_INSTR_SIZE);
21095- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21096+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21097 if (err)
21098 return err;
21099 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
21100@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
21101 if (mutex_is_locked(&text_mutex))
21102 goto knl_write;
21103 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
21104- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21105+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21106 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
21107 goto knl_write;
21108 return err;
21109 knl_write:
21110 #endif /* CONFIG_DEBUG_RODATA */
21111- return probe_kernel_write((char *)bpt->bpt_addr,
21112+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21113 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
21114 }
21115
21116diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
21117index c5e410e..ed5a7f0 100644
21118--- a/arch/x86/kernel/kprobes-opt.c
21119+++ b/arch/x86/kernel/kprobes-opt.c
21120@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21121 * Verify if the address gap is in 2GB range, because this uses
21122 * a relative jump.
21123 */
21124- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
21125+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
21126 if (abs(rel) > 0x7fffffff)
21127 return -ERANGE;
21128
21129@@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21130 op->optinsn.size = ret;
21131
21132 /* Copy arch-dep-instance from template */
21133- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
21134+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
21135
21136 /* Set probe information */
21137 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
21138
21139 /* Set probe function call */
21140- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
21141+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
21142
21143 /* Set returning jmp instruction at the tail of out-of-line buffer */
21144- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
21145+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
21146 (u8 *)op->kp.addr + op->optinsn.size);
21147
21148 flush_icache_range((unsigned long) buf,
21149@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
21150 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
21151
21152 /* Backup instructions which will be replaced by jump address */
21153- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
21154+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
21155 RELATIVE_ADDR_SIZE);
21156
21157 insn_buf[0] = RELATIVEJUMP_OPCODE;
21158@@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
21159 /* This kprobe is really able to run optimized path. */
21160 op = container_of(p, struct optimized_kprobe, kp);
21161 /* Detour through copied instructions */
21162- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
21163+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
21164 if (!reenter)
21165 reset_current_kprobe();
21166 preempt_enable_no_resched();
21167diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
21168index 57916c0..9e0b9d0 100644
21169--- a/arch/x86/kernel/kprobes.c
21170+++ b/arch/x86/kernel/kprobes.c
21171@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
21172 s32 raddr;
21173 } __attribute__((packed)) *insn;
21174
21175- insn = (struct __arch_relative_insn *)from;
21176+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
21177+
21178+ pax_open_kernel();
21179 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
21180 insn->op = op;
21181+ pax_close_kernel();
21182 }
21183
21184 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
21185@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
21186 kprobe_opcode_t opcode;
21187 kprobe_opcode_t *orig_opcodes = opcodes;
21188
21189- if (search_exception_tables((unsigned long)opcodes))
21190+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
21191 return 0; /* Page fault may occur on this address. */
21192
21193 retry:
21194@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
21195 * for the first byte, we can recover the original instruction
21196 * from it and kp->opcode.
21197 */
21198- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21199+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21200 buf[0] = kp->opcode;
21201- return (unsigned long)buf;
21202+ return ktva_ktla((unsigned long)buf);
21203 }
21204
21205 /*
21206@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21207 /* Another subsystem puts a breakpoint, failed to recover */
21208 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
21209 return 0;
21210+ pax_open_kernel();
21211 memcpy(dest, insn.kaddr, insn.length);
21212+ pax_close_kernel();
21213
21214 #ifdef CONFIG_X86_64
21215 if (insn_rip_relative(&insn)) {
21216@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21217 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
21218 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
21219 disp = (u8 *) dest + insn_offset_displacement(&insn);
21220+ pax_open_kernel();
21221 *(s32 *) disp = (s32) newdisp;
21222+ pax_close_kernel();
21223 }
21224 #endif
21225 return insn.length;
21226@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21227 * nor set current_kprobe, because it doesn't use single
21228 * stepping.
21229 */
21230- regs->ip = (unsigned long)p->ainsn.insn;
21231+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21232 preempt_enable_no_resched();
21233 return;
21234 }
21235@@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21236 regs->flags &= ~X86_EFLAGS_IF;
21237 /* single step inline if the instruction is an int3 */
21238 if (p->opcode == BREAKPOINT_INSTRUCTION)
21239- regs->ip = (unsigned long)p->addr;
21240+ regs->ip = ktla_ktva((unsigned long)p->addr);
21241 else
21242- regs->ip = (unsigned long)p->ainsn.insn;
21243+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21244 }
21245
21246 /*
21247@@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
21248 setup_singlestep(p, regs, kcb, 0);
21249 return 1;
21250 }
21251- } else if (*addr != BREAKPOINT_INSTRUCTION) {
21252+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
21253 /*
21254 * The breakpoint instruction was removed right
21255 * after we hit it. Another cpu has removed
21256@@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
21257 " movq %rax, 152(%rsp)\n"
21258 RESTORE_REGS_STRING
21259 " popfq\n"
21260+#ifdef KERNEXEC_PLUGIN
21261+ " btsq $63,(%rsp)\n"
21262+#endif
21263 #else
21264 " pushf\n"
21265 SAVE_REGS_STRING
21266@@ -788,7 +798,7 @@ static void __kprobes
21267 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
21268 {
21269 unsigned long *tos = stack_addr(regs);
21270- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
21271+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
21272 unsigned long orig_ip = (unsigned long)p->addr;
21273 kprobe_opcode_t *insn = p->ainsn.insn;
21274
21275@@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
21276 struct die_args *args = data;
21277 int ret = NOTIFY_DONE;
21278
21279- if (args->regs && user_mode_vm(args->regs))
21280+ if (args->regs && user_mode(args->regs))
21281 return ret;
21282
21283 switch (val) {
21284diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
21285index 9c2bd8b..bb1131c 100644
21286--- a/arch/x86/kernel/kvm.c
21287+++ b/arch/x86/kernel/kvm.c
21288@@ -452,7 +452,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
21289 return NOTIFY_OK;
21290 }
21291
21292-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
21293+static struct notifier_block kvm_cpu_notifier = {
21294 .notifier_call = kvm_cpu_notify,
21295 };
21296 #endif
21297diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
21298index ebc9873..1b9724b 100644
21299--- a/arch/x86/kernel/ldt.c
21300+++ b/arch/x86/kernel/ldt.c
21301@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
21302 if (reload) {
21303 #ifdef CONFIG_SMP
21304 preempt_disable();
21305- load_LDT(pc);
21306+ load_LDT_nolock(pc);
21307 if (!cpumask_equal(mm_cpumask(current->mm),
21308 cpumask_of(smp_processor_id())))
21309 smp_call_function(flush_ldt, current->mm, 1);
21310 preempt_enable();
21311 #else
21312- load_LDT(pc);
21313+ load_LDT_nolock(pc);
21314 #endif
21315 }
21316 if (oldsize) {
21317@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
21318 return err;
21319
21320 for (i = 0; i < old->size; i++)
21321- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
21322+ write_ldt_entry(new->ldt, i, old->ldt + i);
21323 return 0;
21324 }
21325
21326@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
21327 retval = copy_ldt(&mm->context, &old_mm->context);
21328 mutex_unlock(&old_mm->context.lock);
21329 }
21330+
21331+ if (tsk == current) {
21332+ mm->context.vdso = 0;
21333+
21334+#ifdef CONFIG_X86_32
21335+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21336+ mm->context.user_cs_base = 0UL;
21337+ mm->context.user_cs_limit = ~0UL;
21338+
21339+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
21340+ cpus_clear(mm->context.cpu_user_cs_mask);
21341+#endif
21342+
21343+#endif
21344+#endif
21345+
21346+ }
21347+
21348 return retval;
21349 }
21350
21351@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
21352 }
21353 }
21354
21355+#ifdef CONFIG_PAX_SEGMEXEC
21356+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
21357+ error = -EINVAL;
21358+ goto out_unlock;
21359+ }
21360+#endif
21361+
21362 fill_ldt(&ldt, &ldt_info);
21363 if (oldmode)
21364 ldt.avl = 0;
21365diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
21366index 5b19e4d..6476a76 100644
21367--- a/arch/x86/kernel/machine_kexec_32.c
21368+++ b/arch/x86/kernel/machine_kexec_32.c
21369@@ -26,7 +26,7 @@
21370 #include <asm/cacheflush.h>
21371 #include <asm/debugreg.h>
21372
21373-static void set_idt(void *newidt, __u16 limit)
21374+static void set_idt(struct desc_struct *newidt, __u16 limit)
21375 {
21376 struct desc_ptr curidt;
21377
21378@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
21379 }
21380
21381
21382-static void set_gdt(void *newgdt, __u16 limit)
21383+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
21384 {
21385 struct desc_ptr curgdt;
21386
21387@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
21388 }
21389
21390 control_page = page_address(image->control_code_page);
21391- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
21392+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
21393
21394 relocate_kernel_ptr = control_page;
21395 page_list[PA_CONTROL_PAGE] = __pa(control_page);
21396diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
21397index 3a04b22..1d2eb09 100644
21398--- a/arch/x86/kernel/microcode_core.c
21399+++ b/arch/x86/kernel/microcode_core.c
21400@@ -512,7 +512,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21401 return NOTIFY_OK;
21402 }
21403
21404-static struct notifier_block __refdata mc_cpu_notifier = {
21405+static struct notifier_block mc_cpu_notifier = {
21406 .notifier_call = mc_cpu_callback,
21407 };
21408
21409diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
21410index 3544aed..01ddc1c 100644
21411--- a/arch/x86/kernel/microcode_intel.c
21412+++ b/arch/x86/kernel/microcode_intel.c
21413@@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21414
21415 static int get_ucode_user(void *to, const void *from, size_t n)
21416 {
21417- return copy_from_user(to, from, n);
21418+ return copy_from_user(to, (const void __force_user *)from, n);
21419 }
21420
21421 static enum ucode_state
21422 request_microcode_user(int cpu, const void __user *buf, size_t size)
21423 {
21424- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21425+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21426 }
21427
21428 static void microcode_fini_cpu(int cpu)
21429diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
21430index 216a4d7..228255a 100644
21431--- a/arch/x86/kernel/module.c
21432+++ b/arch/x86/kernel/module.c
21433@@ -43,15 +43,60 @@ do { \
21434 } while (0)
21435 #endif
21436
21437-void *module_alloc(unsigned long size)
21438+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
21439 {
21440- if (PAGE_ALIGN(size) > MODULES_LEN)
21441+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
21442 return NULL;
21443 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
21444- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
21445+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
21446 -1, __builtin_return_address(0));
21447 }
21448
21449+void *module_alloc(unsigned long size)
21450+{
21451+
21452+#ifdef CONFIG_PAX_KERNEXEC
21453+ return __module_alloc(size, PAGE_KERNEL);
21454+#else
21455+ return __module_alloc(size, PAGE_KERNEL_EXEC);
21456+#endif
21457+
21458+}
21459+
21460+#ifdef CONFIG_PAX_KERNEXEC
21461+#ifdef CONFIG_X86_32
21462+void *module_alloc_exec(unsigned long size)
21463+{
21464+ struct vm_struct *area;
21465+
21466+ if (size == 0)
21467+ return NULL;
21468+
21469+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
21470+ return area ? area->addr : NULL;
21471+}
21472+EXPORT_SYMBOL(module_alloc_exec);
21473+
21474+void module_free_exec(struct module *mod, void *module_region)
21475+{
21476+ vunmap(module_region);
21477+}
21478+EXPORT_SYMBOL(module_free_exec);
21479+#else
21480+void module_free_exec(struct module *mod, void *module_region)
21481+{
21482+ module_free(mod, module_region);
21483+}
21484+EXPORT_SYMBOL(module_free_exec);
21485+
21486+void *module_alloc_exec(unsigned long size)
21487+{
21488+ return __module_alloc(size, PAGE_KERNEL_RX);
21489+}
21490+EXPORT_SYMBOL(module_alloc_exec);
21491+#endif
21492+#endif
21493+
21494 #ifdef CONFIG_X86_32
21495 int apply_relocate(Elf32_Shdr *sechdrs,
21496 const char *strtab,
21497@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21498 unsigned int i;
21499 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
21500 Elf32_Sym *sym;
21501- uint32_t *location;
21502+ uint32_t *plocation, location;
21503
21504 DEBUGP("Applying relocate section %u to %u\n",
21505 relsec, sechdrs[relsec].sh_info);
21506 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
21507 /* This is where to make the change */
21508- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
21509- + rel[i].r_offset;
21510+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
21511+ location = (uint32_t)plocation;
21512+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
21513+ plocation = ktla_ktva((void *)plocation);
21514 /* This is the symbol it is referring to. Note that all
21515 undefined symbols have been resolved. */
21516 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
21517@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21518 switch (ELF32_R_TYPE(rel[i].r_info)) {
21519 case R_386_32:
21520 /* We add the value into the location given */
21521- *location += sym->st_value;
21522+ pax_open_kernel();
21523+ *plocation += sym->st_value;
21524+ pax_close_kernel();
21525 break;
21526 case R_386_PC32:
21527 /* Add the value, subtract its position */
21528- *location += sym->st_value - (uint32_t)location;
21529+ pax_open_kernel();
21530+ *plocation += sym->st_value - location;
21531+ pax_close_kernel();
21532 break;
21533 default:
21534 pr_err("%s: Unknown relocation: %u\n",
21535@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
21536 case R_X86_64_NONE:
21537 break;
21538 case R_X86_64_64:
21539+ pax_open_kernel();
21540 *(u64 *)loc = val;
21541+ pax_close_kernel();
21542 break;
21543 case R_X86_64_32:
21544+ pax_open_kernel();
21545 *(u32 *)loc = val;
21546+ pax_close_kernel();
21547 if (val != *(u32 *)loc)
21548 goto overflow;
21549 break;
21550 case R_X86_64_32S:
21551+ pax_open_kernel();
21552 *(s32 *)loc = val;
21553+ pax_close_kernel();
21554 if ((s64)val != *(s32 *)loc)
21555 goto overflow;
21556 break;
21557 case R_X86_64_PC32:
21558 val -= (u64)loc;
21559+ pax_open_kernel();
21560 *(u32 *)loc = val;
21561+ pax_close_kernel();
21562+
21563 #if 0
21564 if ((s64)val != *(s32 *)loc)
21565 goto overflow;
21566diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
21567index 4929502..686c291 100644
21568--- a/arch/x86/kernel/msr.c
21569+++ b/arch/x86/kernel/msr.c
21570@@ -234,7 +234,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
21571 return notifier_from_errno(err);
21572 }
21573
21574-static struct notifier_block __refdata msr_class_cpu_notifier = {
21575+static struct notifier_block msr_class_cpu_notifier = {
21576 .notifier_call = msr_class_cpu_callback,
21577 };
21578
21579diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
21580index f84f5c5..f404e81 100644
21581--- a/arch/x86/kernel/nmi.c
21582+++ b/arch/x86/kernel/nmi.c
21583@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
21584 return handled;
21585 }
21586
21587-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21588+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
21589 {
21590 struct nmi_desc *desc = nmi_to_desc(type);
21591 unsigned long flags;
21592@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21593 * event confuses some handlers (kdump uses this flag)
21594 */
21595 if (action->flags & NMI_FLAG_FIRST)
21596- list_add_rcu(&action->list, &desc->head);
21597+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
21598 else
21599- list_add_tail_rcu(&action->list, &desc->head);
21600+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
21601
21602 spin_unlock_irqrestore(&desc->lock, flags);
21603 return 0;
21604@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
21605 if (!strcmp(n->name, name)) {
21606 WARN(in_nmi(),
21607 "Trying to free NMI (%s) from NMI context!\n", n->name);
21608- list_del_rcu(&n->list);
21609+ pax_list_del_rcu((struct list_head *)&n->list);
21610 break;
21611 }
21612 }
21613@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
21614 dotraplinkage notrace __kprobes void
21615 do_nmi(struct pt_regs *regs, long error_code)
21616 {
21617+
21618+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21619+ if (!user_mode(regs)) {
21620+ unsigned long cs = regs->cs & 0xFFFF;
21621+ unsigned long ip = ktva_ktla(regs->ip);
21622+
21623+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21624+ regs->ip = ip;
21625+ }
21626+#endif
21627+
21628 nmi_nesting_preprocess(regs);
21629
21630 nmi_enter();
21631diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
21632index 6d9582e..f746287 100644
21633--- a/arch/x86/kernel/nmi_selftest.c
21634+++ b/arch/x86/kernel/nmi_selftest.c
21635@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
21636 {
21637 /* trap all the unknown NMIs we may generate */
21638 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
21639- __initdata);
21640+ __initconst);
21641 }
21642
21643 static void __init cleanup_nmi_testsuite(void)
21644@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
21645 unsigned long timeout;
21646
21647 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
21648- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
21649+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
21650 nmi_fail = FAILURE;
21651 return;
21652 }
21653diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
21654index 676b8c7..870ba04 100644
21655--- a/arch/x86/kernel/paravirt-spinlocks.c
21656+++ b/arch/x86/kernel/paravirt-spinlocks.c
21657@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
21658 arch_spin_lock(lock);
21659 }
21660
21661-struct pv_lock_ops pv_lock_ops = {
21662+struct pv_lock_ops pv_lock_ops __read_only = {
21663 #ifdef CONFIG_SMP
21664 .spin_is_locked = __ticket_spin_is_locked,
21665 .spin_is_contended = __ticket_spin_is_contended,
21666diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
21667index 17fff18..5cfa0f4 100644
21668--- a/arch/x86/kernel/paravirt.c
21669+++ b/arch/x86/kernel/paravirt.c
21670@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
21671 {
21672 return x;
21673 }
21674+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21675+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
21676+#endif
21677
21678 void __init default_banner(void)
21679 {
21680@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
21681 if (opfunc == NULL)
21682 /* If there's no function, patch it with a ud2a (BUG) */
21683 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
21684- else if (opfunc == _paravirt_nop)
21685+ else if (opfunc == (void *)_paravirt_nop)
21686 /* If the operation is a nop, then nop the callsite */
21687 ret = paravirt_patch_nop();
21688
21689 /* identity functions just return their single argument */
21690- else if (opfunc == _paravirt_ident_32)
21691+ else if (opfunc == (void *)_paravirt_ident_32)
21692 ret = paravirt_patch_ident_32(insnbuf, len);
21693- else if (opfunc == _paravirt_ident_64)
21694+ else if (opfunc == (void *)_paravirt_ident_64)
21695 ret = paravirt_patch_ident_64(insnbuf, len);
21696+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21697+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
21698+ ret = paravirt_patch_ident_64(insnbuf, len);
21699+#endif
21700
21701 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
21702 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
21703@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
21704 if (insn_len > len || start == NULL)
21705 insn_len = len;
21706 else
21707- memcpy(insnbuf, start, insn_len);
21708+ memcpy(insnbuf, ktla_ktva(start), insn_len);
21709
21710 return insn_len;
21711 }
21712@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
21713 preempt_enable();
21714 }
21715
21716-struct pv_info pv_info = {
21717+struct pv_info pv_info __read_only = {
21718 .name = "bare hardware",
21719 .paravirt_enabled = 0,
21720 .kernel_rpl = 0,
21721@@ -315,16 +322,16 @@ struct pv_info pv_info = {
21722 #endif
21723 };
21724
21725-struct pv_init_ops pv_init_ops = {
21726+struct pv_init_ops pv_init_ops __read_only = {
21727 .patch = native_patch,
21728 };
21729
21730-struct pv_time_ops pv_time_ops = {
21731+struct pv_time_ops pv_time_ops __read_only = {
21732 .sched_clock = native_sched_clock,
21733 .steal_clock = native_steal_clock,
21734 };
21735
21736-struct pv_irq_ops pv_irq_ops = {
21737+struct pv_irq_ops pv_irq_ops __read_only = {
21738 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
21739 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
21740 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
21741@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
21742 #endif
21743 };
21744
21745-struct pv_cpu_ops pv_cpu_ops = {
21746+struct pv_cpu_ops pv_cpu_ops __read_only = {
21747 .cpuid = native_cpuid,
21748 .get_debugreg = native_get_debugreg,
21749 .set_debugreg = native_set_debugreg,
21750@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
21751 .end_context_switch = paravirt_nop,
21752 };
21753
21754-struct pv_apic_ops pv_apic_ops = {
21755+struct pv_apic_ops pv_apic_ops __read_only= {
21756 #ifdef CONFIG_X86_LOCAL_APIC
21757 .startup_ipi_hook = paravirt_nop,
21758 #endif
21759 };
21760
21761-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
21762+#ifdef CONFIG_X86_32
21763+#ifdef CONFIG_X86_PAE
21764+/* 64-bit pagetable entries */
21765+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
21766+#else
21767 /* 32-bit pagetable entries */
21768 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
21769+#endif
21770 #else
21771 /* 64-bit pagetable entries */
21772 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
21773 #endif
21774
21775-struct pv_mmu_ops pv_mmu_ops = {
21776+struct pv_mmu_ops pv_mmu_ops __read_only = {
21777
21778 .read_cr2 = native_read_cr2,
21779 .write_cr2 = native_write_cr2,
21780@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
21781 .make_pud = PTE_IDENT,
21782
21783 .set_pgd = native_set_pgd,
21784+ .set_pgd_batched = native_set_pgd_batched,
21785 #endif
21786 #endif /* PAGETABLE_LEVELS >= 3 */
21787
21788@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
21789 },
21790
21791 .set_fixmap = native_set_fixmap,
21792+
21793+#ifdef CONFIG_PAX_KERNEXEC
21794+ .pax_open_kernel = native_pax_open_kernel,
21795+ .pax_close_kernel = native_pax_close_kernel,
21796+#endif
21797+
21798 };
21799
21800 EXPORT_SYMBOL_GPL(pv_time_ops);
21801diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
21802index 35ccf75..7a15747 100644
21803--- a/arch/x86/kernel/pci-iommu_table.c
21804+++ b/arch/x86/kernel/pci-iommu_table.c
21805@@ -2,7 +2,7 @@
21806 #include <asm/iommu_table.h>
21807 #include <linux/string.h>
21808 #include <linux/kallsyms.h>
21809-
21810+#include <linux/sched.h>
21811
21812 #define DEBUG 1
21813
21814diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
21815index 6c483ba..d10ce2f 100644
21816--- a/arch/x86/kernel/pci-swiotlb.c
21817+++ b/arch/x86/kernel/pci-swiotlb.c
21818@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
21819 void *vaddr, dma_addr_t dma_addr,
21820 struct dma_attrs *attrs)
21821 {
21822- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
21823+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
21824 }
21825
21826 static struct dma_map_ops swiotlb_dma_ops = {
21827diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
21828index 2ed787f..f70c9f6 100644
21829--- a/arch/x86/kernel/process.c
21830+++ b/arch/x86/kernel/process.c
21831@@ -36,7 +36,8 @@
21832 * section. Since TSS's are completely CPU-local, we want them
21833 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
21834 */
21835-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
21836+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
21837+EXPORT_SYMBOL(init_tss);
21838
21839 #ifdef CONFIG_X86_64
21840 static DEFINE_PER_CPU(unsigned char, is_idle);
21841@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
21842 task_xstate_cachep =
21843 kmem_cache_create("task_xstate", xstate_size,
21844 __alignof__(union thread_xstate),
21845- SLAB_PANIC | SLAB_NOTRACK, NULL);
21846+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
21847 }
21848
21849 /*
21850@@ -105,7 +106,7 @@ void exit_thread(void)
21851 unsigned long *bp = t->io_bitmap_ptr;
21852
21853 if (bp) {
21854- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
21855+ struct tss_struct *tss = init_tss + get_cpu();
21856
21857 t->io_bitmap_ptr = NULL;
21858 clear_thread_flag(TIF_IO_BITMAP);
21859@@ -136,7 +137,7 @@ void show_regs_common(void)
21860 board = dmi_get_system_info(DMI_BOARD_NAME);
21861
21862 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
21863- current->pid, current->comm, print_tainted(),
21864+ task_pid_nr(current), current->comm, print_tainted(),
21865 init_utsname()->release,
21866 (int)strcspn(init_utsname()->version, " "),
21867 init_utsname()->version,
21868@@ -149,6 +150,9 @@ void flush_thread(void)
21869 {
21870 struct task_struct *tsk = current;
21871
21872+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
21873+ loadsegment(gs, 0);
21874+#endif
21875 flush_ptrace_hw_breakpoint(tsk);
21876 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
21877 drop_init_fpu(tsk);
21878@@ -301,7 +305,7 @@ static void __exit_idle(void)
21879 void exit_idle(void)
21880 {
21881 /* idle loop has pid 0 */
21882- if (current->pid)
21883+ if (task_pid_nr(current))
21884 return;
21885 __exit_idle();
21886 }
21887@@ -404,7 +408,7 @@ bool set_pm_idle_to_default(void)
21888
21889 return ret;
21890 }
21891-void stop_this_cpu(void *dummy)
21892+__noreturn void stop_this_cpu(void *dummy)
21893 {
21894 local_irq_disable();
21895 /*
21896@@ -632,16 +636,37 @@ static int __init idle_setup(char *str)
21897 }
21898 early_param("idle", idle_setup);
21899
21900-unsigned long arch_align_stack(unsigned long sp)
21901+#ifdef CONFIG_PAX_RANDKSTACK
21902+void pax_randomize_kstack(struct pt_regs *regs)
21903 {
21904- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
21905- sp -= get_random_int() % 8192;
21906- return sp & ~0xf;
21907-}
21908+ struct thread_struct *thread = &current->thread;
21909+ unsigned long time;
21910
21911-unsigned long arch_randomize_brk(struct mm_struct *mm)
21912-{
21913- unsigned long range_end = mm->brk + 0x02000000;
21914- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
21915-}
21916+ if (!randomize_va_space)
21917+ return;
21918+
21919+ if (v8086_mode(regs))
21920+ return;
21921
21922+ rdtscl(time);
21923+
21924+ /* P4 seems to return a 0 LSB, ignore it */
21925+#ifdef CONFIG_MPENTIUM4
21926+ time &= 0x3EUL;
21927+ time <<= 2;
21928+#elif defined(CONFIG_X86_64)
21929+ time &= 0xFUL;
21930+ time <<= 4;
21931+#else
21932+ time &= 0x1FUL;
21933+ time <<= 3;
21934+#endif
21935+
21936+ thread->sp0 ^= time;
21937+ load_sp0(init_tss + smp_processor_id(), thread);
21938+
21939+#ifdef CONFIG_X86_64
21940+ this_cpu_write(kernel_stack, thread->sp0);
21941+#endif
21942+}
21943+#endif
21944diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
21945index b5a8905..d9cacac 100644
21946--- a/arch/x86/kernel/process_32.c
21947+++ b/arch/x86/kernel/process_32.c
21948@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
21949 unsigned long thread_saved_pc(struct task_struct *tsk)
21950 {
21951 return ((unsigned long *)tsk->thread.sp)[3];
21952+//XXX return tsk->thread.eip;
21953 }
21954
21955 void __show_regs(struct pt_regs *regs, int all)
21956@@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
21957 unsigned long sp;
21958 unsigned short ss, gs;
21959
21960- if (user_mode_vm(regs)) {
21961+ if (user_mode(regs)) {
21962 sp = regs->sp;
21963 ss = regs->ss & 0xffff;
21964- gs = get_user_gs(regs);
21965 } else {
21966 sp = kernel_stack_pointer(regs);
21967 savesegment(ss, ss);
21968- savesegment(gs, gs);
21969 }
21970+ gs = get_user_gs(regs);
21971
21972 show_regs_common();
21973
21974 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
21975 (u16)regs->cs, regs->ip, regs->flags,
21976- smp_processor_id());
21977+ raw_smp_processor_id());
21978 print_symbol("EIP is at %s\n", regs->ip);
21979
21980 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
21981@@ -130,20 +130,21 @@ void release_thread(struct task_struct *dead_task)
21982 int copy_thread(unsigned long clone_flags, unsigned long sp,
21983 unsigned long arg, struct task_struct *p)
21984 {
21985- struct pt_regs *childregs = task_pt_regs(p);
21986+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
21987 struct task_struct *tsk;
21988 int err;
21989
21990 p->thread.sp = (unsigned long) childregs;
21991 p->thread.sp0 = (unsigned long) (childregs+1);
21992+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
21993
21994 if (unlikely(p->flags & PF_KTHREAD)) {
21995 /* kernel thread */
21996 memset(childregs, 0, sizeof(struct pt_regs));
21997 p->thread.ip = (unsigned long) ret_from_kernel_thread;
21998- task_user_gs(p) = __KERNEL_STACK_CANARY;
21999- childregs->ds = __USER_DS;
22000- childregs->es = __USER_DS;
22001+ savesegment(gs, childregs->gs);
22002+ childregs->ds = __KERNEL_DS;
22003+ childregs->es = __KERNEL_DS;
22004 childregs->fs = __KERNEL_PERCPU;
22005 childregs->bx = sp; /* function */
22006 childregs->bp = arg;
22007@@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22008 struct thread_struct *prev = &prev_p->thread,
22009 *next = &next_p->thread;
22010 int cpu = smp_processor_id();
22011- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22012+ struct tss_struct *tss = init_tss + cpu;
22013 fpu_switch_t fpu;
22014
22015 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
22016@@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22017 */
22018 lazy_save_gs(prev->gs);
22019
22020+#ifdef CONFIG_PAX_MEMORY_UDEREF
22021+ __set_fs(task_thread_info(next_p)->addr_limit);
22022+#endif
22023+
22024 /*
22025 * Load the per-thread Thread-Local Storage descriptor.
22026 */
22027@@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22028 */
22029 arch_end_context_switch(next_p);
22030
22031+ this_cpu_write(current_task, next_p);
22032+ this_cpu_write(current_tinfo, &next_p->tinfo);
22033+
22034 /*
22035 * Restore %gs if needed (which is common)
22036 */
22037@@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22038
22039 switch_fpu_finish(next_p, fpu);
22040
22041- this_cpu_write(current_task, next_p);
22042-
22043 return prev_p;
22044 }
22045
22046@@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
22047 } while (count++ < 16);
22048 return 0;
22049 }
22050-
22051diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
22052index 6e68a61..955a9a5 100644
22053--- a/arch/x86/kernel/process_64.c
22054+++ b/arch/x86/kernel/process_64.c
22055@@ -152,10 +152,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
22056 struct pt_regs *childregs;
22057 struct task_struct *me = current;
22058
22059- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
22060+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
22061 childregs = task_pt_regs(p);
22062 p->thread.sp = (unsigned long) childregs;
22063 p->thread.usersp = me->thread.usersp;
22064+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22065 set_tsk_thread_flag(p, TIF_FORK);
22066 p->fpu_counter = 0;
22067 p->thread.io_bitmap_ptr = NULL;
22068@@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22069 struct thread_struct *prev = &prev_p->thread;
22070 struct thread_struct *next = &next_p->thread;
22071 int cpu = smp_processor_id();
22072- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22073+ struct tss_struct *tss = init_tss + cpu;
22074 unsigned fsindex, gsindex;
22075 fpu_switch_t fpu;
22076
22077@@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22078 prev->usersp = this_cpu_read(old_rsp);
22079 this_cpu_write(old_rsp, next->usersp);
22080 this_cpu_write(current_task, next_p);
22081+ this_cpu_write(current_tinfo, &next_p->tinfo);
22082
22083- this_cpu_write(kernel_stack,
22084- (unsigned long)task_stack_page(next_p) +
22085- THREAD_SIZE - KERNEL_STACK_OFFSET);
22086+ this_cpu_write(kernel_stack, next->sp0);
22087
22088 /*
22089 * Now maybe reload the debug registers and handle I/O bitmaps
22090@@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
22091 if (!p || p == current || p->state == TASK_RUNNING)
22092 return 0;
22093 stack = (unsigned long)task_stack_page(p);
22094- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
22095+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
22096 return 0;
22097 fp = *(u64 *)(p->thread.sp);
22098 do {
22099- if (fp < (unsigned long)stack ||
22100- fp >= (unsigned long)stack+THREAD_SIZE)
22101+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
22102 return 0;
22103 ip = *(u64 *)(fp+8);
22104 if (!in_sched_functions(ip))
22105diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
22106index b629bbe..0fa615a 100644
22107--- a/arch/x86/kernel/ptrace.c
22108+++ b/arch/x86/kernel/ptrace.c
22109@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
22110 {
22111 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
22112 unsigned long sp = (unsigned long)&regs->sp;
22113- struct thread_info *tinfo;
22114
22115- if (context == (sp & ~(THREAD_SIZE - 1)))
22116+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
22117 return sp;
22118
22119- tinfo = (struct thread_info *)context;
22120- if (tinfo->previous_esp)
22121- return tinfo->previous_esp;
22122+ sp = *(unsigned long *)context;
22123+ if (sp)
22124+ return sp;
22125
22126 return (unsigned long)regs;
22127 }
22128@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
22129 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
22130 {
22131 int i;
22132- int dr7 = 0;
22133+ unsigned long dr7 = 0;
22134 struct arch_hw_breakpoint *info;
22135
22136 for (i = 0; i < HBP_NUM; i++) {
22137@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
22138 unsigned long addr, unsigned long data)
22139 {
22140 int ret;
22141- unsigned long __user *datap = (unsigned long __user *)data;
22142+ unsigned long __user *datap = (__force unsigned long __user *)data;
22143
22144 switch (request) {
22145 /* read the word at location addr in the USER area. */
22146@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
22147 if ((int) addr < 0)
22148 return -EIO;
22149 ret = do_get_thread_area(child, addr,
22150- (struct user_desc __user *)data);
22151+ (__force struct user_desc __user *) data);
22152 break;
22153
22154 case PTRACE_SET_THREAD_AREA:
22155 if ((int) addr < 0)
22156 return -EIO;
22157 ret = do_set_thread_area(child, addr,
22158- (struct user_desc __user *)data, 0);
22159+ (__force struct user_desc __user *) data, 0);
22160 break;
22161 #endif
22162
22163@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
22164
22165 #ifdef CONFIG_X86_64
22166
22167-static struct user_regset x86_64_regsets[] __read_mostly = {
22168+static user_regset_no_const x86_64_regsets[] __read_only = {
22169 [REGSET_GENERAL] = {
22170 .core_note_type = NT_PRSTATUS,
22171 .n = sizeof(struct user_regs_struct) / sizeof(long),
22172@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
22173 #endif /* CONFIG_X86_64 */
22174
22175 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
22176-static struct user_regset x86_32_regsets[] __read_mostly = {
22177+static user_regset_no_const x86_32_regsets[] __read_only = {
22178 [REGSET_GENERAL] = {
22179 .core_note_type = NT_PRSTATUS,
22180 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
22181@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
22182 */
22183 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
22184
22185-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22186+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22187 {
22188 #ifdef CONFIG_X86_64
22189 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
22190@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
22191 memset(info, 0, sizeof(*info));
22192 info->si_signo = SIGTRAP;
22193 info->si_code = si_code;
22194- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
22195+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
22196 }
22197
22198 void user_single_step_siginfo(struct task_struct *tsk,
22199@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
22200 # define IS_IA32 0
22201 #endif
22202
22203+#ifdef CONFIG_GRKERNSEC_SETXID
22204+extern void gr_delayed_cred_worker(void);
22205+#endif
22206+
22207 /*
22208 * We must return the syscall number to actually look up in the table.
22209 * This can be -1L to skip running any syscall at all.
22210@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
22211
22212 user_exit();
22213
22214+#ifdef CONFIG_GRKERNSEC_SETXID
22215+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22216+ gr_delayed_cred_worker();
22217+#endif
22218+
22219 /*
22220 * If we stepped into a sysenter/syscall insn, it trapped in
22221 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
22222@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
22223 */
22224 user_exit();
22225
22226+#ifdef CONFIG_GRKERNSEC_SETXID
22227+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22228+ gr_delayed_cred_worker();
22229+#endif
22230+
22231 audit_syscall_exit(regs);
22232
22233 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
22234diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
22235index 2cb9470..ff1fd80 100644
22236--- a/arch/x86/kernel/pvclock.c
22237+++ b/arch/x86/kernel/pvclock.c
22238@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
22239 return pv_tsc_khz;
22240 }
22241
22242-static atomic64_t last_value = ATOMIC64_INIT(0);
22243+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
22244
22245 void pvclock_resume(void)
22246 {
22247- atomic64_set(&last_value, 0);
22248+ atomic64_set_unchecked(&last_value, 0);
22249 }
22250
22251 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
22252@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
22253 * updating at the same time, and one of them could be slightly behind,
22254 * making the assumption that last_value always go forward fail to hold.
22255 */
22256- last = atomic64_read(&last_value);
22257+ last = atomic64_read_unchecked(&last_value);
22258 do {
22259 if (ret < last)
22260 return last;
22261- last = atomic64_cmpxchg(&last_value, last, ret);
22262+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
22263 } while (unlikely(last != ret));
22264
22265 return ret;
22266diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
22267index 76fa1e9..abf09ea 100644
22268--- a/arch/x86/kernel/reboot.c
22269+++ b/arch/x86/kernel/reboot.c
22270@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
22271 EXPORT_SYMBOL(pm_power_off);
22272
22273 static const struct desc_ptr no_idt = {};
22274-static int reboot_mode;
22275+static unsigned short reboot_mode;
22276 enum reboot_type reboot_type = BOOT_ACPI;
22277 int reboot_force;
22278
22279@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
22280
22281 void __noreturn machine_real_restart(unsigned int type)
22282 {
22283+
22284+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22285+ struct desc_struct *gdt;
22286+#endif
22287+
22288 local_irq_disable();
22289
22290 /*
22291@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
22292
22293 /* Jump to the identity-mapped low memory code */
22294 #ifdef CONFIG_X86_32
22295- asm volatile("jmpl *%0" : :
22296+
22297+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22298+ gdt = get_cpu_gdt_table(smp_processor_id());
22299+ pax_open_kernel();
22300+#ifdef CONFIG_PAX_MEMORY_UDEREF
22301+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
22302+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
22303+ loadsegment(ds, __KERNEL_DS);
22304+ loadsegment(es, __KERNEL_DS);
22305+ loadsegment(ss, __KERNEL_DS);
22306+#endif
22307+#ifdef CONFIG_PAX_KERNEXEC
22308+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
22309+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
22310+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
22311+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
22312+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
22313+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
22314+#endif
22315+ pax_close_kernel();
22316+#endif
22317+
22318+ asm volatile("ljmpl *%0" : :
22319 "rm" (real_mode_header->machine_real_restart_asm),
22320 "a" (type));
22321 #else
22322@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
22323 * try to force a triple fault and then cycle between hitting the keyboard
22324 * controller and doing that
22325 */
22326-static void native_machine_emergency_restart(void)
22327+static void __noreturn native_machine_emergency_restart(void)
22328 {
22329 int i;
22330 int attempt = 0;
22331@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
22332 #endif
22333 }
22334
22335-static void __machine_emergency_restart(int emergency)
22336+static void __noreturn __machine_emergency_restart(int emergency)
22337 {
22338 reboot_emergency = emergency;
22339 machine_ops.emergency_restart();
22340 }
22341
22342-static void native_machine_restart(char *__unused)
22343+static void __noreturn native_machine_restart(char *__unused)
22344 {
22345 pr_notice("machine restart\n");
22346
22347@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
22348 __machine_emergency_restart(0);
22349 }
22350
22351-static void native_machine_halt(void)
22352+static void __noreturn native_machine_halt(void)
22353 {
22354 /* Stop other cpus and apics */
22355 machine_shutdown();
22356@@ -679,7 +706,7 @@ static void native_machine_halt(void)
22357 stop_this_cpu(NULL);
22358 }
22359
22360-static void native_machine_power_off(void)
22361+static void __noreturn native_machine_power_off(void)
22362 {
22363 if (pm_power_off) {
22364 if (!reboot_force)
22365@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
22366 }
22367 /* A fallback in case there is no PM info available */
22368 tboot_shutdown(TB_SHUTDOWN_HALT);
22369+ unreachable();
22370 }
22371
22372-struct machine_ops machine_ops = {
22373+struct machine_ops machine_ops __read_only = {
22374 .power_off = native_machine_power_off,
22375 .shutdown = native_machine_shutdown,
22376 .emergency_restart = native_machine_emergency_restart,
22377diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
22378index 7a6f3b3..bed145d7 100644
22379--- a/arch/x86/kernel/relocate_kernel_64.S
22380+++ b/arch/x86/kernel/relocate_kernel_64.S
22381@@ -11,6 +11,7 @@
22382 #include <asm/kexec.h>
22383 #include <asm/processor-flags.h>
22384 #include <asm/pgtable_types.h>
22385+#include <asm/alternative-asm.h>
22386
22387 /*
22388 * Must be relocatable PIC code callable as a C function
22389@@ -160,13 +161,14 @@ identity_mapped:
22390 xorq %rbp, %rbp
22391 xorq %r8, %r8
22392 xorq %r9, %r9
22393- xorq %r10, %r9
22394+ xorq %r10, %r10
22395 xorq %r11, %r11
22396 xorq %r12, %r12
22397 xorq %r13, %r13
22398 xorq %r14, %r14
22399 xorq %r15, %r15
22400
22401+ pax_force_retaddr 0, 1
22402 ret
22403
22404 1:
22405diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
22406index 8b24289..d37b58b 100644
22407--- a/arch/x86/kernel/setup.c
22408+++ b/arch/x86/kernel/setup.c
22409@@ -437,7 +437,7 @@ static void __init parse_setup_data(void)
22410
22411 switch (data->type) {
22412 case SETUP_E820_EXT:
22413- parse_e820_ext(data);
22414+ parse_e820_ext((struct setup_data __force_kernel *)data);
22415 break;
22416 case SETUP_DTB:
22417 add_dtb(pa_data);
22418@@ -706,7 +706,7 @@ static void __init trim_bios_range(void)
22419 * area (640->1Mb) as ram even though it is not.
22420 * take them out.
22421 */
22422- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
22423+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
22424
22425 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
22426 }
22427@@ -830,14 +830,14 @@ void __init setup_arch(char **cmdline_p)
22428
22429 if (!boot_params.hdr.root_flags)
22430 root_mountflags &= ~MS_RDONLY;
22431- init_mm.start_code = (unsigned long) _text;
22432- init_mm.end_code = (unsigned long) _etext;
22433+ init_mm.start_code = ktla_ktva((unsigned long) _text);
22434+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
22435 init_mm.end_data = (unsigned long) _edata;
22436 init_mm.brk = _brk_end;
22437
22438- code_resource.start = virt_to_phys(_text);
22439- code_resource.end = virt_to_phys(_etext)-1;
22440- data_resource.start = virt_to_phys(_etext);
22441+ code_resource.start = virt_to_phys(ktla_ktva(_text));
22442+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
22443+ data_resource.start = virt_to_phys(_sdata);
22444 data_resource.end = virt_to_phys(_edata)-1;
22445 bss_resource.start = virt_to_phys(&__bss_start);
22446 bss_resource.end = virt_to_phys(&__bss_stop)-1;
22447diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
22448index 5cdff03..80fa283 100644
22449--- a/arch/x86/kernel/setup_percpu.c
22450+++ b/arch/x86/kernel/setup_percpu.c
22451@@ -21,19 +21,17 @@
22452 #include <asm/cpu.h>
22453 #include <asm/stackprotector.h>
22454
22455-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
22456+#ifdef CONFIG_SMP
22457+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
22458 EXPORT_PER_CPU_SYMBOL(cpu_number);
22459+#endif
22460
22461-#ifdef CONFIG_X86_64
22462 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
22463-#else
22464-#define BOOT_PERCPU_OFFSET 0
22465-#endif
22466
22467 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
22468 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
22469
22470-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
22471+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
22472 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
22473 };
22474 EXPORT_SYMBOL(__per_cpu_offset);
22475@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
22476 {
22477 #ifdef CONFIG_NEED_MULTIPLE_NODES
22478 pg_data_t *last = NULL;
22479- unsigned int cpu;
22480+ int cpu;
22481
22482 for_each_possible_cpu(cpu) {
22483 int node = early_cpu_to_node(cpu);
22484@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
22485 {
22486 #ifdef CONFIG_X86_32
22487 struct desc_struct gdt;
22488+ unsigned long base = per_cpu_offset(cpu);
22489
22490- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
22491- 0x2 | DESCTYPE_S, 0x8);
22492- gdt.s = 1;
22493+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
22494+ 0x83 | DESCTYPE_S, 0xC);
22495 write_gdt_entry(get_cpu_gdt_table(cpu),
22496 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
22497 #endif
22498@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
22499 /* alrighty, percpu areas up and running */
22500 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
22501 for_each_possible_cpu(cpu) {
22502+#ifdef CONFIG_CC_STACKPROTECTOR
22503+#ifdef CONFIG_X86_32
22504+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
22505+#endif
22506+#endif
22507 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
22508 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
22509 per_cpu(cpu_number, cpu) = cpu;
22510@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
22511 */
22512 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
22513 #endif
22514+#ifdef CONFIG_CC_STACKPROTECTOR
22515+#ifdef CONFIG_X86_32
22516+ if (!cpu)
22517+ per_cpu(stack_canary.canary, cpu) = canary;
22518+#endif
22519+#endif
22520 /*
22521 * Up to this point, the boot CPU has been using .init.data
22522 * area. Reload any changed state for the boot CPU.
22523diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
22524index d6bf1f3..3ffce5a 100644
22525--- a/arch/x86/kernel/signal.c
22526+++ b/arch/x86/kernel/signal.c
22527@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
22528 * Align the stack pointer according to the i386 ABI,
22529 * i.e. so that on function entry ((sp + 4) & 15) == 0.
22530 */
22531- sp = ((sp + 4) & -16ul) - 4;
22532+ sp = ((sp - 12) & -16ul) - 4;
22533 #else /* !CONFIG_X86_32 */
22534 sp = round_down(sp, 16) - 8;
22535 #endif
22536@@ -304,9 +304,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22537 }
22538
22539 if (current->mm->context.vdso)
22540- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22541+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22542 else
22543- restorer = &frame->retcode;
22544+ restorer = (void __user *)&frame->retcode;
22545 if (ka->sa.sa_flags & SA_RESTORER)
22546 restorer = ka->sa.sa_restorer;
22547
22548@@ -320,7 +320,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22549 * reasons and because gdb uses it as a signature to notice
22550 * signal handler stack frames.
22551 */
22552- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
22553+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
22554
22555 if (err)
22556 return -EFAULT;
22557@@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22558 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
22559
22560 /* Set up to return from userspace. */
22561- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22562+ if (current->mm->context.vdso)
22563+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22564+ else
22565+ restorer = (void __user *)&frame->retcode;
22566 if (ka->sa.sa_flags & SA_RESTORER)
22567 restorer = ka->sa.sa_restorer;
22568 put_user_ex(restorer, &frame->pretcode);
22569@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22570 * reasons and because gdb uses it as a signature to notice
22571 * signal handler stack frames.
22572 */
22573- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
22574+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
22575 } put_user_catch(err);
22576
22577 err |= copy_siginfo_to_user(&frame->info, info);
22578diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
22579index 48d2b7d..90d328a 100644
22580--- a/arch/x86/kernel/smp.c
22581+++ b/arch/x86/kernel/smp.c
22582@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
22583
22584 __setup("nonmi_ipi", nonmi_ipi_setup);
22585
22586-struct smp_ops smp_ops = {
22587+struct smp_ops smp_ops __read_only = {
22588 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
22589 .smp_prepare_cpus = native_smp_prepare_cpus,
22590 .smp_cpus_done = native_smp_cpus_done,
22591diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
22592index ed0fe38..87fc692 100644
22593--- a/arch/x86/kernel/smpboot.c
22594+++ b/arch/x86/kernel/smpboot.c
22595@@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22596 idle->thread.sp = (unsigned long) (((struct pt_regs *)
22597 (THREAD_SIZE + task_stack_page(idle))) - 1);
22598 per_cpu(current_task, cpu) = idle;
22599+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22600
22601 #ifdef CONFIG_X86_32
22602 /* Stack for startup_32 can be just as for start_secondary onwards */
22603@@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22604 #else
22605 clear_tsk_thread_flag(idle, TIF_FORK);
22606 initial_gs = per_cpu_offset(cpu);
22607- per_cpu(kernel_stack, cpu) =
22608- (unsigned long)task_stack_page(idle) -
22609- KERNEL_STACK_OFFSET + THREAD_SIZE;
22610+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22611 #endif
22612+
22613+ pax_open_kernel();
22614 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22615+ pax_close_kernel();
22616+
22617 initial_code = (unsigned long)start_secondary;
22618 stack_start = idle->thread.sp;
22619
22620@@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
22621 /* the FPU context is blank, nobody can own it */
22622 __cpu_disable_lazy_restore(cpu);
22623
22624+#ifdef CONFIG_PAX_PER_CPU_PGD
22625+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
22626+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22627+ KERNEL_PGD_PTRS);
22628+#endif
22629+
22630+ /* the FPU context is blank, nobody can own it */
22631+ __cpu_disable_lazy_restore(cpu);
22632+
22633 err = do_boot_cpu(apicid, cpu, tidle);
22634 if (err) {
22635 pr_debug("do_boot_cpu failed %d\n", err);
22636diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
22637index 9b4d51d..5d28b58 100644
22638--- a/arch/x86/kernel/step.c
22639+++ b/arch/x86/kernel/step.c
22640@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22641 struct desc_struct *desc;
22642 unsigned long base;
22643
22644- seg &= ~7UL;
22645+ seg >>= 3;
22646
22647 mutex_lock(&child->mm->context.lock);
22648- if (unlikely((seg >> 3) >= child->mm->context.size))
22649+ if (unlikely(seg >= child->mm->context.size))
22650 addr = -1L; /* bogus selector, access would fault */
22651 else {
22652 desc = child->mm->context.ldt + seg;
22653@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22654 addr += base;
22655 }
22656 mutex_unlock(&child->mm->context.lock);
22657- }
22658+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
22659+ addr = ktla_ktva(addr);
22660
22661 return addr;
22662 }
22663@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
22664 unsigned char opcode[15];
22665 unsigned long addr = convert_ip_to_linear(child, regs);
22666
22667+ if (addr == -EINVAL)
22668+ return 0;
22669+
22670 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
22671 for (i = 0; i < copied; i++) {
22672 switch (opcode[i]) {
22673diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
22674new file mode 100644
22675index 0000000..207bec6
22676--- /dev/null
22677+++ b/arch/x86/kernel/sys_i386_32.c
22678@@ -0,0 +1,250 @@
22679+/*
22680+ * This file contains various random system calls that
22681+ * have a non-standard calling sequence on the Linux/i386
22682+ * platform.
22683+ */
22684+
22685+#include <linux/errno.h>
22686+#include <linux/sched.h>
22687+#include <linux/mm.h>
22688+#include <linux/fs.h>
22689+#include <linux/smp.h>
22690+#include <linux/sem.h>
22691+#include <linux/msg.h>
22692+#include <linux/shm.h>
22693+#include <linux/stat.h>
22694+#include <linux/syscalls.h>
22695+#include <linux/mman.h>
22696+#include <linux/file.h>
22697+#include <linux/utsname.h>
22698+#include <linux/ipc.h>
22699+
22700+#include <linux/uaccess.h>
22701+#include <linux/unistd.h>
22702+
22703+#include <asm/syscalls.h>
22704+
22705+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
22706+{
22707+ unsigned long pax_task_size = TASK_SIZE;
22708+
22709+#ifdef CONFIG_PAX_SEGMEXEC
22710+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
22711+ pax_task_size = SEGMEXEC_TASK_SIZE;
22712+#endif
22713+
22714+ if (flags & MAP_FIXED)
22715+ if (len > pax_task_size || addr > pax_task_size - len)
22716+ return -EINVAL;
22717+
22718+ return 0;
22719+}
22720+
22721+unsigned long
22722+arch_get_unmapped_area(struct file *filp, unsigned long addr,
22723+ unsigned long len, unsigned long pgoff, unsigned long flags)
22724+{
22725+ struct mm_struct *mm = current->mm;
22726+ struct vm_area_struct *vma;
22727+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22728+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22729+
22730+#ifdef CONFIG_PAX_SEGMEXEC
22731+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22732+ pax_task_size = SEGMEXEC_TASK_SIZE;
22733+#endif
22734+
22735+ pax_task_size -= PAGE_SIZE;
22736+
22737+ if (len > pax_task_size)
22738+ return -ENOMEM;
22739+
22740+ if (flags & MAP_FIXED)
22741+ return addr;
22742+
22743+#ifdef CONFIG_PAX_RANDMMAP
22744+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22745+#endif
22746+
22747+ if (addr) {
22748+ addr = PAGE_ALIGN(addr);
22749+ if (pax_task_size - len >= addr) {
22750+ vma = find_vma(mm, addr);
22751+ if (check_heap_stack_gap(vma, addr, len, offset))
22752+ return addr;
22753+ }
22754+ }
22755+ if (len > mm->cached_hole_size) {
22756+ start_addr = addr = mm->free_area_cache;
22757+ } else {
22758+ start_addr = addr = mm->mmap_base;
22759+ mm->cached_hole_size = 0;
22760+ }
22761+
22762+#ifdef CONFIG_PAX_PAGEEXEC
22763+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
22764+ start_addr = 0x00110000UL;
22765+
22766+#ifdef CONFIG_PAX_RANDMMAP
22767+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22768+ start_addr += mm->delta_mmap & 0x03FFF000UL;
22769+#endif
22770+
22771+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
22772+ start_addr = addr = mm->mmap_base;
22773+ else
22774+ addr = start_addr;
22775+ }
22776+#endif
22777+
22778+full_search:
22779+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22780+ /* At this point: (!vma || addr < vma->vm_end). */
22781+ if (pax_task_size - len < addr) {
22782+ /*
22783+ * Start a new search - just in case we missed
22784+ * some holes.
22785+ */
22786+ if (start_addr != mm->mmap_base) {
22787+ start_addr = addr = mm->mmap_base;
22788+ mm->cached_hole_size = 0;
22789+ goto full_search;
22790+ }
22791+ return -ENOMEM;
22792+ }
22793+ if (check_heap_stack_gap(vma, addr, len, offset))
22794+ break;
22795+ if (addr + mm->cached_hole_size < vma->vm_start)
22796+ mm->cached_hole_size = vma->vm_start - addr;
22797+ addr = vma->vm_end;
22798+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
22799+ start_addr = addr = mm->mmap_base;
22800+ mm->cached_hole_size = 0;
22801+ goto full_search;
22802+ }
22803+ }
22804+
22805+ /*
22806+ * Remember the place where we stopped the search:
22807+ */
22808+ mm->free_area_cache = addr + len;
22809+ return addr;
22810+}
22811+
22812+unsigned long
22813+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
22814+ const unsigned long len, const unsigned long pgoff,
22815+ const unsigned long flags)
22816+{
22817+ struct vm_area_struct *vma;
22818+ struct mm_struct *mm = current->mm;
22819+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
22820+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22821+
22822+#ifdef CONFIG_PAX_SEGMEXEC
22823+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22824+ pax_task_size = SEGMEXEC_TASK_SIZE;
22825+#endif
22826+
22827+ pax_task_size -= PAGE_SIZE;
22828+
22829+ /* requested length too big for entire address space */
22830+ if (len > pax_task_size)
22831+ return -ENOMEM;
22832+
22833+ if (flags & MAP_FIXED)
22834+ return addr;
22835+
22836+#ifdef CONFIG_PAX_PAGEEXEC
22837+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
22838+ goto bottomup;
22839+#endif
22840+
22841+#ifdef CONFIG_PAX_RANDMMAP
22842+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22843+#endif
22844+
22845+ /* requesting a specific address */
22846+ if (addr) {
22847+ addr = PAGE_ALIGN(addr);
22848+ if (pax_task_size - len >= addr) {
22849+ vma = find_vma(mm, addr);
22850+ if (check_heap_stack_gap(vma, addr, len, offset))
22851+ return addr;
22852+ }
22853+ }
22854+
22855+ /* check if free_area_cache is useful for us */
22856+ if (len <= mm->cached_hole_size) {
22857+ mm->cached_hole_size = 0;
22858+ mm->free_area_cache = mm->mmap_base;
22859+ }
22860+
22861+ /* either no address requested or can't fit in requested address hole */
22862+ addr = mm->free_area_cache;
22863+
22864+ /* make sure it can fit in the remaining address space */
22865+ if (addr > len) {
22866+ vma = find_vma(mm, addr-len);
22867+ if (check_heap_stack_gap(vma, addr - len, len, offset))
22868+ /* remember the address as a hint for next time */
22869+ return (mm->free_area_cache = addr-len);
22870+ }
22871+
22872+ if (mm->mmap_base < len)
22873+ goto bottomup;
22874+
22875+ addr = mm->mmap_base-len;
22876+
22877+ do {
22878+ /*
22879+ * Lookup failure means no vma is above this address,
22880+ * else if new region fits below vma->vm_start,
22881+ * return with success:
22882+ */
22883+ vma = find_vma(mm, addr);
22884+ if (check_heap_stack_gap(vma, addr, len, offset))
22885+ /* remember the address as a hint for next time */
22886+ return (mm->free_area_cache = addr);
22887+
22888+ /* remember the largest hole we saw so far */
22889+ if (addr + mm->cached_hole_size < vma->vm_start)
22890+ mm->cached_hole_size = vma->vm_start - addr;
22891+
22892+ /* try just below the current vma->vm_start */
22893+ addr = skip_heap_stack_gap(vma, len, offset);
22894+ } while (!IS_ERR_VALUE(addr));
22895+
22896+bottomup:
22897+ /*
22898+ * A failed mmap() very likely causes application failure,
22899+ * so fall back to the bottom-up function here. This scenario
22900+ * can happen with large stack limits and large mmap()
22901+ * allocations.
22902+ */
22903+
22904+#ifdef CONFIG_PAX_SEGMEXEC
22905+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22906+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22907+ else
22908+#endif
22909+
22910+ mm->mmap_base = TASK_UNMAPPED_BASE;
22911+
22912+#ifdef CONFIG_PAX_RANDMMAP
22913+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22914+ mm->mmap_base += mm->delta_mmap;
22915+#endif
22916+
22917+ mm->free_area_cache = mm->mmap_base;
22918+ mm->cached_hole_size = ~0UL;
22919+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
22920+ /*
22921+ * Restore the topdown base:
22922+ */
22923+ mm->mmap_base = base;
22924+ mm->free_area_cache = base;
22925+ mm->cached_hole_size = ~0UL;
22926+
22927+ return addr;
22928+}
22929diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
22930index 97ef74b..57a1882 100644
22931--- a/arch/x86/kernel/sys_x86_64.c
22932+++ b/arch/x86/kernel/sys_x86_64.c
22933@@ -81,8 +81,8 @@ out:
22934 return error;
22935 }
22936
22937-static void find_start_end(unsigned long flags, unsigned long *begin,
22938- unsigned long *end)
22939+static void find_start_end(struct mm_struct *mm, unsigned long flags,
22940+ unsigned long *begin, unsigned long *end)
22941 {
22942 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
22943 unsigned long new_begin;
22944@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
22945 *begin = new_begin;
22946 }
22947 } else {
22948- *begin = TASK_UNMAPPED_BASE;
22949+ *begin = mm->mmap_base;
22950 *end = TASK_SIZE;
22951 }
22952 }
22953@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
22954 struct vm_area_struct *vma;
22955 struct vm_unmapped_area_info info;
22956 unsigned long begin, end;
22957+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22958
22959 if (flags & MAP_FIXED)
22960 return addr;
22961
22962- find_start_end(flags, &begin, &end);
22963+ find_start_end(mm, flags, &begin, &end);
22964
22965 if (len > end)
22966 return -ENOMEM;
22967
22968+#ifdef CONFIG_PAX_RANDMMAP
22969+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22970+#endif
22971+
22972 if (addr) {
22973 addr = PAGE_ALIGN(addr);
22974 vma = find_vma(mm, addr);
22975- if (end - len >= addr &&
22976- (!vma || addr + len <= vma->vm_start))
22977+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
22978 return addr;
22979 }
22980
22981@@ -161,6 +165,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
22982 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
22983 goto bottomup;
22984
22985+#ifdef CONFIG_PAX_RANDMMAP
22986+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22987+#endif
22988+
22989 /* requesting a specific address */
22990 if (addr) {
22991 addr = PAGE_ALIGN(addr);
22992diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
22993index f84fe00..f41d9f1 100644
22994--- a/arch/x86/kernel/tboot.c
22995+++ b/arch/x86/kernel/tboot.c
22996@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
22997
22998 void tboot_shutdown(u32 shutdown_type)
22999 {
23000- void (*shutdown)(void);
23001+ void (* __noreturn shutdown)(void);
23002
23003 if (!tboot_enabled())
23004 return;
23005@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
23006
23007 switch_to_tboot_pt();
23008
23009- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
23010+ shutdown = (void *)tboot->shutdown_entry;
23011 shutdown();
23012
23013 /* should not reach here */
23014@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
23015 return 0;
23016 }
23017
23018-static atomic_t ap_wfs_count;
23019+static atomic_unchecked_t ap_wfs_count;
23020
23021 static int tboot_wait_for_aps(int num_aps)
23022 {
23023@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
23024 {
23025 switch (action) {
23026 case CPU_DYING:
23027- atomic_inc(&ap_wfs_count);
23028+ atomic_inc_unchecked(&ap_wfs_count);
23029 if (num_online_cpus() == 1)
23030- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
23031+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
23032 return NOTIFY_BAD;
23033 break;
23034 }
23035 return NOTIFY_OK;
23036 }
23037
23038-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
23039+static struct notifier_block tboot_cpu_notifier =
23040 {
23041 .notifier_call = tboot_cpu_callback,
23042 };
23043@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
23044
23045 tboot_create_trampoline();
23046
23047- atomic_set(&ap_wfs_count, 0);
23048+ atomic_set_unchecked(&ap_wfs_count, 0);
23049 register_hotcpu_notifier(&tboot_cpu_notifier);
23050
23051 acpi_os_set_prepare_sleep(&tboot_sleep);
23052diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
23053index 24d3c91..d06b473 100644
23054--- a/arch/x86/kernel/time.c
23055+++ b/arch/x86/kernel/time.c
23056@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
23057 {
23058 unsigned long pc = instruction_pointer(regs);
23059
23060- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
23061+ if (!user_mode(regs) && in_lock_functions(pc)) {
23062 #ifdef CONFIG_FRAME_POINTER
23063- return *(unsigned long *)(regs->bp + sizeof(long));
23064+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
23065 #else
23066 unsigned long *sp =
23067 (unsigned long *)kernel_stack_pointer(regs);
23068@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
23069 * or above a saved flags. Eflags has bits 22-31 zero,
23070 * kernel addresses don't.
23071 */
23072+
23073+#ifdef CONFIG_PAX_KERNEXEC
23074+ return ktla_ktva(sp[0]);
23075+#else
23076 if (sp[0] >> 22)
23077 return sp[0];
23078 if (sp[1] >> 22)
23079 return sp[1];
23080 #endif
23081+
23082+#endif
23083 }
23084 return pc;
23085 }
23086diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
23087index 9d9d2f9..cad418a 100644
23088--- a/arch/x86/kernel/tls.c
23089+++ b/arch/x86/kernel/tls.c
23090@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
23091 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
23092 return -EINVAL;
23093
23094+#ifdef CONFIG_PAX_SEGMEXEC
23095+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
23096+ return -EINVAL;
23097+#endif
23098+
23099 set_tls_desc(p, idx, &info, 1);
23100
23101 return 0;
23102@@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
23103
23104 if (kbuf)
23105 info = kbuf;
23106- else if (__copy_from_user(infobuf, ubuf, count))
23107+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
23108 return -EFAULT;
23109 else
23110 info = infobuf;
23111diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
23112index ecffca1..95c4d13 100644
23113--- a/arch/x86/kernel/traps.c
23114+++ b/arch/x86/kernel/traps.c
23115@@ -68,12 +68,6 @@
23116 #include <asm/setup.h>
23117
23118 asmlinkage int system_call(void);
23119-
23120-/*
23121- * The IDT has to be page-aligned to simplify the Pentium
23122- * F0 0F bug workaround.
23123- */
23124-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
23125 #endif
23126
23127 DECLARE_BITMAP(used_vectors, NR_VECTORS);
23128@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
23129 }
23130
23131 static int __kprobes
23132-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23133+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
23134 struct pt_regs *regs, long error_code)
23135 {
23136 #ifdef CONFIG_X86_32
23137- if (regs->flags & X86_VM_MASK) {
23138+ if (v8086_mode(regs)) {
23139 /*
23140 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
23141 * On nmi (interrupt 2), do_trap should not be called.
23142@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23143 return -1;
23144 }
23145 #endif
23146- if (!user_mode(regs)) {
23147+ if (!user_mode_novm(regs)) {
23148 if (!fixup_exception(regs)) {
23149 tsk->thread.error_code = error_code;
23150 tsk->thread.trap_nr = trapnr;
23151+
23152+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23153+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
23154+ str = "PAX: suspicious stack segment fault";
23155+#endif
23156+
23157 die(str, regs, error_code);
23158 }
23159+
23160+#ifdef CONFIG_PAX_REFCOUNT
23161+ if (trapnr == 4)
23162+ pax_report_refcount_overflow(regs);
23163+#endif
23164+
23165 return 0;
23166 }
23167
23168@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23169 }
23170
23171 static void __kprobes
23172-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23173+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
23174 long error_code, siginfo_t *info)
23175 {
23176 struct task_struct *tsk = current;
23177@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23178 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
23179 printk_ratelimit()) {
23180 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
23181- tsk->comm, tsk->pid, str,
23182+ tsk->comm, task_pid_nr(tsk), str,
23183 regs->ip, regs->sp, error_code);
23184 print_vma_addr(" in ", regs->ip);
23185 pr_cont("\n");
23186@@ -266,7 +272,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
23187 conditional_sti(regs);
23188
23189 #ifdef CONFIG_X86_32
23190- if (regs->flags & X86_VM_MASK) {
23191+ if (v8086_mode(regs)) {
23192 local_irq_enable();
23193 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
23194 goto exit;
23195@@ -274,18 +280,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
23196 #endif
23197
23198 tsk = current;
23199- if (!user_mode(regs)) {
23200+ if (!user_mode_novm(regs)) {
23201 if (fixup_exception(regs))
23202 goto exit;
23203
23204 tsk->thread.error_code = error_code;
23205 tsk->thread.trap_nr = X86_TRAP_GP;
23206 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
23207- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
23208+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
23209+
23210+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23211+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
23212+ die("PAX: suspicious general protection fault", regs, error_code);
23213+ else
23214+#endif
23215+
23216 die("general protection fault", regs, error_code);
23217+ }
23218 goto exit;
23219 }
23220
23221+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23222+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
23223+ struct mm_struct *mm = tsk->mm;
23224+ unsigned long limit;
23225+
23226+ down_write(&mm->mmap_sem);
23227+ limit = mm->context.user_cs_limit;
23228+ if (limit < TASK_SIZE) {
23229+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
23230+ up_write(&mm->mmap_sem);
23231+ return;
23232+ }
23233+ up_write(&mm->mmap_sem);
23234+ }
23235+#endif
23236+
23237 tsk->thread.error_code = error_code;
23238 tsk->thread.trap_nr = X86_TRAP_GP;
23239
23240@@ -440,7 +470,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23241 /* It's safe to allow irq's after DR6 has been saved */
23242 preempt_conditional_sti(regs);
23243
23244- if (regs->flags & X86_VM_MASK) {
23245+ if (v8086_mode(regs)) {
23246 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
23247 X86_TRAP_DB);
23248 preempt_conditional_cli(regs);
23249@@ -455,7 +485,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23250 * We already checked v86 mode above, so we can check for kernel mode
23251 * by just checking the CPL of CS.
23252 */
23253- if ((dr6 & DR_STEP) && !user_mode(regs)) {
23254+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
23255 tsk->thread.debugreg6 &= ~DR_STEP;
23256 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
23257 regs->flags &= ~X86_EFLAGS_TF;
23258@@ -487,7 +517,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
23259 return;
23260 conditional_sti(regs);
23261
23262- if (!user_mode_vm(regs))
23263+ if (!user_mode(regs))
23264 {
23265 if (!fixup_exception(regs)) {
23266 task->thread.error_code = error_code;
23267diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
23268index c71025b..b117501 100644
23269--- a/arch/x86/kernel/uprobes.c
23270+++ b/arch/x86/kernel/uprobes.c
23271@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
23272 int ret = NOTIFY_DONE;
23273
23274 /* We are only interested in userspace traps */
23275- if (regs && !user_mode_vm(regs))
23276+ if (regs && !user_mode(regs))
23277 return NOTIFY_DONE;
23278
23279 switch (val) {
23280diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
23281index b9242ba..50c5edd 100644
23282--- a/arch/x86/kernel/verify_cpu.S
23283+++ b/arch/x86/kernel/verify_cpu.S
23284@@ -20,6 +20,7 @@
23285 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
23286 * arch/x86/kernel/trampoline_64.S: secondary processor verification
23287 * arch/x86/kernel/head_32.S: processor startup
23288+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
23289 *
23290 * verify_cpu, returns the status of longmode and SSE in register %eax.
23291 * 0: Success 1: Failure
23292diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
23293index 1dfe69c..a3df6f6 100644
23294--- a/arch/x86/kernel/vm86_32.c
23295+++ b/arch/x86/kernel/vm86_32.c
23296@@ -43,6 +43,7 @@
23297 #include <linux/ptrace.h>
23298 #include <linux/audit.h>
23299 #include <linux/stddef.h>
23300+#include <linux/grsecurity.h>
23301
23302 #include <asm/uaccess.h>
23303 #include <asm/io.h>
23304@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
23305 do_exit(SIGSEGV);
23306 }
23307
23308- tss = &per_cpu(init_tss, get_cpu());
23309+ tss = init_tss + get_cpu();
23310 current->thread.sp0 = current->thread.saved_sp0;
23311 current->thread.sysenter_cs = __KERNEL_CS;
23312 load_sp0(tss, &current->thread);
23313@@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
23314 struct task_struct *tsk;
23315 int tmp, ret = -EPERM;
23316
23317+#ifdef CONFIG_GRKERNSEC_VM86
23318+ if (!capable(CAP_SYS_RAWIO)) {
23319+ gr_handle_vm86();
23320+ goto out;
23321+ }
23322+#endif
23323+
23324 tsk = current;
23325 if (tsk->thread.saved_sp0)
23326 goto out;
23327@@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
23328 int tmp, ret;
23329 struct vm86plus_struct __user *v86;
23330
23331+#ifdef CONFIG_GRKERNSEC_VM86
23332+ if (!capable(CAP_SYS_RAWIO)) {
23333+ gr_handle_vm86();
23334+ ret = -EPERM;
23335+ goto out;
23336+ }
23337+#endif
23338+
23339 tsk = current;
23340 switch (cmd) {
23341 case VM86_REQUEST_IRQ:
23342@@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
23343 tsk->thread.saved_fs = info->regs32->fs;
23344 tsk->thread.saved_gs = get_user_gs(info->regs32);
23345
23346- tss = &per_cpu(init_tss, get_cpu());
23347+ tss = init_tss + get_cpu();
23348 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
23349 if (cpu_has_sep)
23350 tsk->thread.sysenter_cs = 0;
23351@@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
23352 goto cannot_handle;
23353 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
23354 goto cannot_handle;
23355- intr_ptr = (unsigned long __user *) (i << 2);
23356+ intr_ptr = (__force unsigned long __user *) (i << 2);
23357 if (get_user(segoffs, intr_ptr))
23358 goto cannot_handle;
23359 if ((segoffs >> 16) == BIOSSEG)
23360diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
23361index 22a1530..8fbaaad 100644
23362--- a/arch/x86/kernel/vmlinux.lds.S
23363+++ b/arch/x86/kernel/vmlinux.lds.S
23364@@ -26,6 +26,13 @@
23365 #include <asm/page_types.h>
23366 #include <asm/cache.h>
23367 #include <asm/boot.h>
23368+#include <asm/segment.h>
23369+
23370+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23371+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
23372+#else
23373+#define __KERNEL_TEXT_OFFSET 0
23374+#endif
23375
23376 #undef i386 /* in case the preprocessor is a 32bit one */
23377
23378@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
23379
23380 PHDRS {
23381 text PT_LOAD FLAGS(5); /* R_E */
23382+#ifdef CONFIG_X86_32
23383+ module PT_LOAD FLAGS(5); /* R_E */
23384+#endif
23385+#ifdef CONFIG_XEN
23386+ rodata PT_LOAD FLAGS(5); /* R_E */
23387+#else
23388+ rodata PT_LOAD FLAGS(4); /* R__ */
23389+#endif
23390 data PT_LOAD FLAGS(6); /* RW_ */
23391-#ifdef CONFIG_X86_64
23392+ init.begin PT_LOAD FLAGS(6); /* RW_ */
23393 #ifdef CONFIG_SMP
23394 percpu PT_LOAD FLAGS(6); /* RW_ */
23395 #endif
23396+ text.init PT_LOAD FLAGS(5); /* R_E */
23397+ text.exit PT_LOAD FLAGS(5); /* R_E */
23398 init PT_LOAD FLAGS(7); /* RWE */
23399-#endif
23400 note PT_NOTE FLAGS(0); /* ___ */
23401 }
23402
23403 SECTIONS
23404 {
23405 #ifdef CONFIG_X86_32
23406- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
23407- phys_startup_32 = startup_32 - LOAD_OFFSET;
23408+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
23409 #else
23410- . = __START_KERNEL;
23411- phys_startup_64 = startup_64 - LOAD_OFFSET;
23412+ . = __START_KERNEL;
23413 #endif
23414
23415 /* Text and read-only data */
23416- .text : AT(ADDR(.text) - LOAD_OFFSET) {
23417- _text = .;
23418+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23419 /* bootstrapping code */
23420+#ifdef CONFIG_X86_32
23421+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23422+#else
23423+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23424+#endif
23425+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23426+ _text = .;
23427 HEAD_TEXT
23428 #ifdef CONFIG_X86_32
23429 . = ALIGN(PAGE_SIZE);
23430@@ -108,13 +128,48 @@ SECTIONS
23431 IRQENTRY_TEXT
23432 *(.fixup)
23433 *(.gnu.warning)
23434- /* End of text section */
23435- _etext = .;
23436 } :text = 0x9090
23437
23438- NOTES :text :note
23439+ . += __KERNEL_TEXT_OFFSET;
23440
23441- EXCEPTION_TABLE(16) :text = 0x9090
23442+#ifdef CONFIG_X86_32
23443+ . = ALIGN(PAGE_SIZE);
23444+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
23445+
23446+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
23447+ MODULES_EXEC_VADDR = .;
23448+ BYTE(0)
23449+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
23450+ . = ALIGN(HPAGE_SIZE) - 1;
23451+ MODULES_EXEC_END = .;
23452+#endif
23453+
23454+ } :module
23455+#endif
23456+
23457+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
23458+ /* End of text section */
23459+ BYTE(0)
23460+ _etext = . - __KERNEL_TEXT_OFFSET;
23461+ }
23462+
23463+#ifdef CONFIG_X86_32
23464+ . = ALIGN(PAGE_SIZE);
23465+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
23466+ *(.idt)
23467+ . = ALIGN(PAGE_SIZE);
23468+ *(.empty_zero_page)
23469+ *(.initial_pg_fixmap)
23470+ *(.initial_pg_pmd)
23471+ *(.initial_page_table)
23472+ *(.swapper_pg_dir)
23473+ } :rodata
23474+#endif
23475+
23476+ . = ALIGN(PAGE_SIZE);
23477+ NOTES :rodata :note
23478+
23479+ EXCEPTION_TABLE(16) :rodata
23480
23481 #if defined(CONFIG_DEBUG_RODATA)
23482 /* .text should occupy whole number of pages */
23483@@ -126,16 +181,20 @@ SECTIONS
23484
23485 /* Data */
23486 .data : AT(ADDR(.data) - LOAD_OFFSET) {
23487+
23488+#ifdef CONFIG_PAX_KERNEXEC
23489+ . = ALIGN(HPAGE_SIZE);
23490+#else
23491+ . = ALIGN(PAGE_SIZE);
23492+#endif
23493+
23494 /* Start of data section */
23495 _sdata = .;
23496
23497 /* init_task */
23498 INIT_TASK_DATA(THREAD_SIZE)
23499
23500-#ifdef CONFIG_X86_32
23501- /* 32 bit has nosave before _edata */
23502 NOSAVE_DATA
23503-#endif
23504
23505 PAGE_ALIGNED_DATA(PAGE_SIZE)
23506
23507@@ -176,12 +235,19 @@ SECTIONS
23508 #endif /* CONFIG_X86_64 */
23509
23510 /* Init code and data - will be freed after init */
23511- . = ALIGN(PAGE_SIZE);
23512 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
23513+ BYTE(0)
23514+
23515+#ifdef CONFIG_PAX_KERNEXEC
23516+ . = ALIGN(HPAGE_SIZE);
23517+#else
23518+ . = ALIGN(PAGE_SIZE);
23519+#endif
23520+
23521 __init_begin = .; /* paired with __init_end */
23522- }
23523+ } :init.begin
23524
23525-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
23526+#ifdef CONFIG_SMP
23527 /*
23528 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
23529 * output PHDR, so the next output section - .init.text - should
23530@@ -190,12 +256,27 @@ SECTIONS
23531 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
23532 #endif
23533
23534- INIT_TEXT_SECTION(PAGE_SIZE)
23535-#ifdef CONFIG_X86_64
23536- :init
23537-#endif
23538+ . = ALIGN(PAGE_SIZE);
23539+ init_begin = .;
23540+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
23541+ VMLINUX_SYMBOL(_sinittext) = .;
23542+ INIT_TEXT
23543+ VMLINUX_SYMBOL(_einittext) = .;
23544+ . = ALIGN(PAGE_SIZE);
23545+ } :text.init
23546
23547- INIT_DATA_SECTION(16)
23548+ /*
23549+ * .exit.text is discard at runtime, not link time, to deal with
23550+ * references from .altinstructions and .eh_frame
23551+ */
23552+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23553+ EXIT_TEXT
23554+ . = ALIGN(16);
23555+ } :text.exit
23556+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
23557+
23558+ . = ALIGN(PAGE_SIZE);
23559+ INIT_DATA_SECTION(16) :init
23560
23561 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
23562 __x86_cpu_dev_start = .;
23563@@ -257,19 +338,12 @@ SECTIONS
23564 }
23565
23566 . = ALIGN(8);
23567- /*
23568- * .exit.text is discard at runtime, not link time, to deal with
23569- * references from .altinstructions and .eh_frame
23570- */
23571- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
23572- EXIT_TEXT
23573- }
23574
23575 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
23576 EXIT_DATA
23577 }
23578
23579-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
23580+#ifndef CONFIG_SMP
23581 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
23582 #endif
23583
23584@@ -288,16 +362,10 @@ SECTIONS
23585 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
23586 __smp_locks = .;
23587 *(.smp_locks)
23588- . = ALIGN(PAGE_SIZE);
23589 __smp_locks_end = .;
23590+ . = ALIGN(PAGE_SIZE);
23591 }
23592
23593-#ifdef CONFIG_X86_64
23594- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
23595- NOSAVE_DATA
23596- }
23597-#endif
23598-
23599 /* BSS */
23600 . = ALIGN(PAGE_SIZE);
23601 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
23602@@ -313,6 +381,7 @@ SECTIONS
23603 __brk_base = .;
23604 . += 64 * 1024; /* 64k alignment slop space */
23605 *(.brk_reservation) /* areas brk users have reserved */
23606+ . = ALIGN(HPAGE_SIZE);
23607 __brk_limit = .;
23608 }
23609
23610@@ -339,13 +408,12 @@ SECTIONS
23611 * for the boot processor.
23612 */
23613 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
23614-INIT_PER_CPU(gdt_page);
23615 INIT_PER_CPU(irq_stack_union);
23616
23617 /*
23618 * Build-time check on the image size:
23619 */
23620-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
23621+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
23622 "kernel image bigger than KERNEL_IMAGE_SIZE");
23623
23624 #ifdef CONFIG_SMP
23625diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
23626index 9a907a6..f83f921 100644
23627--- a/arch/x86/kernel/vsyscall_64.c
23628+++ b/arch/x86/kernel/vsyscall_64.c
23629@@ -56,15 +56,13 @@
23630 DEFINE_VVAR(int, vgetcpu_mode);
23631 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
23632
23633-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
23634+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
23635
23636 static int __init vsyscall_setup(char *str)
23637 {
23638 if (str) {
23639 if (!strcmp("emulate", str))
23640 vsyscall_mode = EMULATE;
23641- else if (!strcmp("native", str))
23642- vsyscall_mode = NATIVE;
23643 else if (!strcmp("none", str))
23644 vsyscall_mode = NONE;
23645 else
23646@@ -323,8 +321,7 @@ do_ret:
23647 return true;
23648
23649 sigsegv:
23650- force_sig(SIGSEGV, current);
23651- return true;
23652+ do_group_exit(SIGKILL);
23653 }
23654
23655 /*
23656@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
23657 extern char __vvar_page;
23658 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
23659
23660- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
23661- vsyscall_mode == NATIVE
23662- ? PAGE_KERNEL_VSYSCALL
23663- : PAGE_KERNEL_VVAR);
23664+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
23665 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
23666 (unsigned long)VSYSCALL_START);
23667
23668diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
23669index 1330dd1..d220b99 100644
23670--- a/arch/x86/kernel/x8664_ksyms_64.c
23671+++ b/arch/x86/kernel/x8664_ksyms_64.c
23672@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
23673 EXPORT_SYMBOL(copy_user_generic_unrolled);
23674 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
23675 EXPORT_SYMBOL(__copy_user_nocache);
23676-EXPORT_SYMBOL(_copy_from_user);
23677-EXPORT_SYMBOL(_copy_to_user);
23678
23679 EXPORT_SYMBOL(copy_page);
23680 EXPORT_SYMBOL(clear_page);
23681diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
23682index 7a3d075..6cb373d 100644
23683--- a/arch/x86/kernel/x86_init.c
23684+++ b/arch/x86/kernel/x86_init.c
23685@@ -88,7 +88,7 @@ struct x86_init_ops x86_init __initdata = {
23686 },
23687 };
23688
23689-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23690+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
23691 .early_percpu_clock_init = x86_init_noop,
23692 .setup_percpu_clockev = setup_secondary_APIC_clock,
23693 };
23694@@ -96,7 +96,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23695 static void default_nmi_init(void) { };
23696 static int default_i8042_detect(void) { return 1; };
23697
23698-struct x86_platform_ops x86_platform = {
23699+struct x86_platform_ops x86_platform __read_only = {
23700 .calibrate_tsc = native_calibrate_tsc,
23701 .get_wallclock = mach_get_cmos_time,
23702 .set_wallclock = mach_set_rtc_mmss,
23703@@ -110,14 +110,14 @@ struct x86_platform_ops x86_platform = {
23704 };
23705
23706 EXPORT_SYMBOL_GPL(x86_platform);
23707-struct x86_msi_ops x86_msi = {
23708+struct x86_msi_ops x86_msi __read_only = {
23709 .setup_msi_irqs = native_setup_msi_irqs,
23710 .teardown_msi_irq = native_teardown_msi_irq,
23711 .teardown_msi_irqs = default_teardown_msi_irqs,
23712 .restore_msi_irqs = default_restore_msi_irqs,
23713 };
23714
23715-struct x86_io_apic_ops x86_io_apic_ops = {
23716+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
23717 .init = native_io_apic_init_mappings,
23718 .read = native_io_apic_read,
23719 .write = native_io_apic_write,
23720diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
23721index ada87a3..afea76d 100644
23722--- a/arch/x86/kernel/xsave.c
23723+++ b/arch/x86/kernel/xsave.c
23724@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
23725 {
23726 int err;
23727
23728+ buf = (struct xsave_struct __user *)____m(buf);
23729 if (use_xsave())
23730 err = xsave_user(buf);
23731 else if (use_fxsr())
23732@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
23733 */
23734 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
23735 {
23736+ buf = (void __user *)____m(buf);
23737 if (use_xsave()) {
23738 if ((unsigned long)buf % 64 || fx_only) {
23739 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
23740diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
23741index a20ecb5..d0e2194 100644
23742--- a/arch/x86/kvm/cpuid.c
23743+++ b/arch/x86/kvm/cpuid.c
23744@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
23745 struct kvm_cpuid2 *cpuid,
23746 struct kvm_cpuid_entry2 __user *entries)
23747 {
23748- int r;
23749+ int r, i;
23750
23751 r = -E2BIG;
23752 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
23753 goto out;
23754 r = -EFAULT;
23755- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
23756- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23757+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23758 goto out;
23759+ for (i = 0; i < cpuid->nent; ++i) {
23760+ struct kvm_cpuid_entry2 cpuid_entry;
23761+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
23762+ goto out;
23763+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
23764+ }
23765 vcpu->arch.cpuid_nent = cpuid->nent;
23766 kvm_apic_set_version(vcpu);
23767 kvm_x86_ops->cpuid_update(vcpu);
23768@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
23769 struct kvm_cpuid2 *cpuid,
23770 struct kvm_cpuid_entry2 __user *entries)
23771 {
23772- int r;
23773+ int r, i;
23774
23775 r = -E2BIG;
23776 if (cpuid->nent < vcpu->arch.cpuid_nent)
23777 goto out;
23778 r = -EFAULT;
23779- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
23780- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
23781+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
23782 goto out;
23783+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
23784+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
23785+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
23786+ goto out;
23787+ }
23788 return 0;
23789
23790 out:
23791diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
23792index a27e763..54bfe43 100644
23793--- a/arch/x86/kvm/emulate.c
23794+++ b/arch/x86/kvm/emulate.c
23795@@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23796
23797 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
23798 do { \
23799+ unsigned long _tmp; \
23800 __asm__ __volatile__ ( \
23801 _PRE_EFLAGS("0", "4", "2") \
23802 _op _suffix " %"_x"3,%1; " \
23803@@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23804 /* Raw emulation: instruction has two explicit operands. */
23805 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
23806 do { \
23807- unsigned long _tmp; \
23808- \
23809 switch ((ctxt)->dst.bytes) { \
23810 case 2: \
23811 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
23812@@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23813
23814 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
23815 do { \
23816- unsigned long _tmp; \
23817 switch ((ctxt)->dst.bytes) { \
23818 case 1: \
23819 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
23820diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
23821index 9392f52..0e56d77 100644
23822--- a/arch/x86/kvm/lapic.c
23823+++ b/arch/x86/kvm/lapic.c
23824@@ -55,7 +55,7 @@
23825 #define APIC_BUS_CYCLE_NS 1
23826
23827 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
23828-#define apic_debug(fmt, arg...)
23829+#define apic_debug(fmt, arg...) do {} while (0)
23830
23831 #define APIC_LVT_NUM 6
23832 /* 14 is the version for Xeon and Pentium 8.4.8*/
23833diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
23834index 891eb6d..e027900 100644
23835--- a/arch/x86/kvm/paging_tmpl.h
23836+++ b/arch/x86/kvm/paging_tmpl.h
23837@@ -208,7 +208,7 @@ retry_walk:
23838 if (unlikely(kvm_is_error_hva(host_addr)))
23839 goto error;
23840
23841- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
23842+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
23843 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
23844 goto error;
23845 walker->ptep_user[walker->level - 1] = ptep_user;
23846diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
23847index d29d3cd..ec9d522 100644
23848--- a/arch/x86/kvm/svm.c
23849+++ b/arch/x86/kvm/svm.c
23850@@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
23851 int cpu = raw_smp_processor_id();
23852
23853 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
23854+
23855+ pax_open_kernel();
23856 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
23857+ pax_close_kernel();
23858+
23859 load_TR_desc();
23860 }
23861
23862@@ -3881,6 +3885,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
23863 #endif
23864 #endif
23865
23866+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23867+ __set_fs(current_thread_info()->addr_limit);
23868+#endif
23869+
23870 reload_tss(vcpu);
23871
23872 local_irq_disable();
23873diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
23874index 9120ae1..238abc0 100644
23875--- a/arch/x86/kvm/vmx.c
23876+++ b/arch/x86/kvm/vmx.c
23877@@ -1370,7 +1370,11 @@ static void reload_tss(void)
23878 struct desc_struct *descs;
23879
23880 descs = (void *)gdt->address;
23881+
23882+ pax_open_kernel();
23883 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
23884+ pax_close_kernel();
23885+
23886 load_TR_desc();
23887 }
23888
23889@@ -1594,6 +1598,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
23890 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
23891 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
23892
23893+#ifdef CONFIG_PAX_PER_CPU_PGD
23894+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
23895+#endif
23896+
23897 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
23898 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
23899 vmx->loaded_vmcs->cpu = cpu;
23900@@ -2738,8 +2746,11 @@ static __init int hardware_setup(void)
23901 if (!cpu_has_vmx_flexpriority())
23902 flexpriority_enabled = 0;
23903
23904- if (!cpu_has_vmx_tpr_shadow())
23905- kvm_x86_ops->update_cr8_intercept = NULL;
23906+ if (!cpu_has_vmx_tpr_shadow()) {
23907+ pax_open_kernel();
23908+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
23909+ pax_close_kernel();
23910+ }
23911
23912 if (enable_ept && !cpu_has_vmx_ept_2m_page())
23913 kvm_disable_largepages();
23914@@ -3782,7 +3793,10 @@ static void vmx_set_constant_host_state(void)
23915
23916 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
23917 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
23918+
23919+#ifndef CONFIG_PAX_PER_CPU_PGD
23920 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
23921+#endif
23922
23923 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
23924 #ifdef CONFIG_X86_64
23925@@ -3803,7 +3817,7 @@ static void vmx_set_constant_host_state(void)
23926 native_store_idt(&dt);
23927 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
23928
23929- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
23930+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
23931
23932 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
23933 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
23934@@ -6355,6 +6369,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23935 "jmp 2f \n\t"
23936 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
23937 "2: "
23938+
23939+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23940+ "ljmp %[cs],$3f\n\t"
23941+ "3: "
23942+#endif
23943+
23944 /* Save guest registers, load host registers, keep flags */
23945 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
23946 "pop %0 \n\t"
23947@@ -6407,6 +6427,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23948 #endif
23949 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
23950 [wordsize]"i"(sizeof(ulong))
23951+
23952+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23953+ ,[cs]"i"(__KERNEL_CS)
23954+#endif
23955+
23956 : "cc", "memory"
23957 #ifdef CONFIG_X86_64
23958 , "rax", "rbx", "rdi", "rsi"
23959@@ -6420,7 +6445,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23960 if (debugctlmsr)
23961 update_debugctlmsr(debugctlmsr);
23962
23963-#ifndef CONFIG_X86_64
23964+#ifdef CONFIG_X86_32
23965 /*
23966 * The sysexit path does not restore ds/es, so we must set them to
23967 * a reasonable value ourselves.
23968@@ -6429,8 +6454,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23969 * may be executed in interrupt context, which saves and restore segments
23970 * around it, nullifying its effect.
23971 */
23972- loadsegment(ds, __USER_DS);
23973- loadsegment(es, __USER_DS);
23974+ loadsegment(ds, __KERNEL_DS);
23975+ loadsegment(es, __KERNEL_DS);
23976+ loadsegment(ss, __KERNEL_DS);
23977+
23978+#ifdef CONFIG_PAX_KERNEXEC
23979+ loadsegment(fs, __KERNEL_PERCPU);
23980+#endif
23981+
23982+#ifdef CONFIG_PAX_MEMORY_UDEREF
23983+ __set_fs(current_thread_info()->addr_limit);
23984+#endif
23985+
23986 #endif
23987
23988 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
23989diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
23990index c243b81..b692af3 100644
23991--- a/arch/x86/kvm/x86.c
23992+++ b/arch/x86/kvm/x86.c
23993@@ -1408,10 +1408,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
23994 unsigned long flags, this_tsc_khz;
23995 struct kvm_vcpu_arch *vcpu = &v->arch;
23996 struct kvm_arch *ka = &v->kvm->arch;
23997- void *shared_kaddr;
23998 s64 kernel_ns, max_kernel_ns;
23999 u64 tsc_timestamp, host_tsc;
24000- struct pvclock_vcpu_time_info *guest_hv_clock;
24001+ struct pvclock_vcpu_time_info guest_hv_clock;
24002 u8 pvclock_flags;
24003 bool use_master_clock;
24004
24005@@ -1465,7 +1464,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24006
24007 local_irq_restore(flags);
24008
24009- if (!vcpu->time_page)
24010+ if (!vcpu->pv_time_enabled)
24011 return 0;
24012
24013 /*
24014@@ -1527,12 +1526,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24015 */
24016 vcpu->hv_clock.version += 2;
24017
24018- shared_kaddr = kmap_atomic(vcpu->time_page);
24019-
24020- guest_hv_clock = shared_kaddr + vcpu->time_offset;
24021+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
24022+ &guest_hv_clock, sizeof(guest_hv_clock))))
24023+ return 0;
24024
24025 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
24026- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
24027+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
24028
24029 if (vcpu->pvclock_set_guest_stopped_request) {
24030 pvclock_flags |= PVCLOCK_GUEST_STOPPED;
24031@@ -1545,12 +1544,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24032
24033 vcpu->hv_clock.flags = pvclock_flags;
24034
24035- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
24036- sizeof(vcpu->hv_clock));
24037-
24038- kunmap_atomic(shared_kaddr);
24039-
24040- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
24041+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
24042+ &vcpu->hv_clock,
24043+ sizeof(vcpu->hv_clock));
24044 return 0;
24045 }
24046
24047@@ -1692,8 +1688,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
24048 {
24049 struct kvm *kvm = vcpu->kvm;
24050 int lm = is_long_mode(vcpu);
24051- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24052- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24053+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24054+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24055 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
24056 : kvm->arch.xen_hvm_config.blob_size_32;
24057 u32 page_num = data & ~PAGE_MASK;
24058@@ -1839,10 +1835,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
24059
24060 static void kvmclock_reset(struct kvm_vcpu *vcpu)
24061 {
24062- if (vcpu->arch.time_page) {
24063- kvm_release_page_dirty(vcpu->arch.time_page);
24064- vcpu->arch.time_page = NULL;
24065- }
24066+ vcpu->arch.pv_time_enabled = false;
24067 }
24068
24069 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
24070@@ -1948,6 +1941,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
24071 break;
24072 case MSR_KVM_SYSTEM_TIME_NEW:
24073 case MSR_KVM_SYSTEM_TIME: {
24074+ u64 gpa_offset;
24075 kvmclock_reset(vcpu);
24076
24077 vcpu->arch.time = data;
24078@@ -1957,14 +1951,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
24079 if (!(data & 1))
24080 break;
24081
24082- /* ...but clean it before doing the actual write */
24083- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
24084+ gpa_offset = data & ~(PAGE_MASK | 1);
24085
24086- vcpu->arch.time_page =
24087- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
24088+ /* Check that the address is 32-byte aligned. */
24089+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
24090+ break;
24091
24092- if (is_error_page(vcpu->arch.time_page))
24093- vcpu->arch.time_page = NULL;
24094+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
24095+ &vcpu->arch.pv_time, data & ~1ULL))
24096+ vcpu->arch.pv_time_enabled = false;
24097+ else
24098+ vcpu->arch.pv_time_enabled = true;
24099
24100 break;
24101 }
24102@@ -2571,6 +2568,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
24103 if (n < msr_list.nmsrs)
24104 goto out;
24105 r = -EFAULT;
24106+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
24107+ goto out;
24108 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
24109 num_msrs_to_save * sizeof(u32)))
24110 goto out;
24111@@ -2700,7 +2699,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
24112 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
24113 struct kvm_interrupt *irq)
24114 {
24115- if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
24116+ if (irq->irq >= KVM_NR_INTERRUPTS)
24117 return -EINVAL;
24118 if (irqchip_in_kernel(vcpu->kvm))
24119 return -ENXIO;
24120@@ -2967,7 +2966,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
24121 */
24122 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
24123 {
24124- if (!vcpu->arch.time_page)
24125+ if (!vcpu->arch.pv_time_enabled)
24126 return -EINVAL;
24127 vcpu->arch.pvclock_set_guest_stopped_request = true;
24128 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
24129@@ -5213,7 +5212,7 @@ static struct notifier_block pvclock_gtod_notifier = {
24130 };
24131 #endif
24132
24133-int kvm_arch_init(void *opaque)
24134+int kvm_arch_init(const void *opaque)
24135 {
24136 int r;
24137 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
24138@@ -6661,6 +6660,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
24139 goto fail_free_wbinvd_dirty_mask;
24140
24141 vcpu->arch.ia32_tsc_adjust_msr = 0x0;
24142+ vcpu->arch.pv_time_enabled = false;
24143 kvm_async_pf_hash_reset(vcpu);
24144 kvm_pmu_init(vcpu);
24145
24146diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
24147index df4176c..23ce092 100644
24148--- a/arch/x86/lguest/boot.c
24149+++ b/arch/x86/lguest/boot.c
24150@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
24151 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
24152 * Launcher to reboot us.
24153 */
24154-static void lguest_restart(char *reason)
24155+static __noreturn void lguest_restart(char *reason)
24156 {
24157 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
24158+ BUG();
24159 }
24160
24161 /*G:050
24162diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
24163index 00933d5..3a64af9 100644
24164--- a/arch/x86/lib/atomic64_386_32.S
24165+++ b/arch/x86/lib/atomic64_386_32.S
24166@@ -48,6 +48,10 @@ BEGIN(read)
24167 movl (v), %eax
24168 movl 4(v), %edx
24169 RET_ENDP
24170+BEGIN(read_unchecked)
24171+ movl (v), %eax
24172+ movl 4(v), %edx
24173+RET_ENDP
24174 #undef v
24175
24176 #define v %esi
24177@@ -55,6 +59,10 @@ BEGIN(set)
24178 movl %ebx, (v)
24179 movl %ecx, 4(v)
24180 RET_ENDP
24181+BEGIN(set_unchecked)
24182+ movl %ebx, (v)
24183+ movl %ecx, 4(v)
24184+RET_ENDP
24185 #undef v
24186
24187 #define v %esi
24188@@ -70,6 +78,20 @@ RET_ENDP
24189 BEGIN(add)
24190 addl %eax, (v)
24191 adcl %edx, 4(v)
24192+
24193+#ifdef CONFIG_PAX_REFCOUNT
24194+ jno 0f
24195+ subl %eax, (v)
24196+ sbbl %edx, 4(v)
24197+ int $4
24198+0:
24199+ _ASM_EXTABLE(0b, 0b)
24200+#endif
24201+
24202+RET_ENDP
24203+BEGIN(add_unchecked)
24204+ addl %eax, (v)
24205+ adcl %edx, 4(v)
24206 RET_ENDP
24207 #undef v
24208
24209@@ -77,6 +99,24 @@ RET_ENDP
24210 BEGIN(add_return)
24211 addl (v), %eax
24212 adcl 4(v), %edx
24213+
24214+#ifdef CONFIG_PAX_REFCOUNT
24215+ into
24216+1234:
24217+ _ASM_EXTABLE(1234b, 2f)
24218+#endif
24219+
24220+ movl %eax, (v)
24221+ movl %edx, 4(v)
24222+
24223+#ifdef CONFIG_PAX_REFCOUNT
24224+2:
24225+#endif
24226+
24227+RET_ENDP
24228+BEGIN(add_return_unchecked)
24229+ addl (v), %eax
24230+ adcl 4(v), %edx
24231 movl %eax, (v)
24232 movl %edx, 4(v)
24233 RET_ENDP
24234@@ -86,6 +126,20 @@ RET_ENDP
24235 BEGIN(sub)
24236 subl %eax, (v)
24237 sbbl %edx, 4(v)
24238+
24239+#ifdef CONFIG_PAX_REFCOUNT
24240+ jno 0f
24241+ addl %eax, (v)
24242+ adcl %edx, 4(v)
24243+ int $4
24244+0:
24245+ _ASM_EXTABLE(0b, 0b)
24246+#endif
24247+
24248+RET_ENDP
24249+BEGIN(sub_unchecked)
24250+ subl %eax, (v)
24251+ sbbl %edx, 4(v)
24252 RET_ENDP
24253 #undef v
24254
24255@@ -96,6 +150,27 @@ BEGIN(sub_return)
24256 sbbl $0, %edx
24257 addl (v), %eax
24258 adcl 4(v), %edx
24259+
24260+#ifdef CONFIG_PAX_REFCOUNT
24261+ into
24262+1234:
24263+ _ASM_EXTABLE(1234b, 2f)
24264+#endif
24265+
24266+ movl %eax, (v)
24267+ movl %edx, 4(v)
24268+
24269+#ifdef CONFIG_PAX_REFCOUNT
24270+2:
24271+#endif
24272+
24273+RET_ENDP
24274+BEGIN(sub_return_unchecked)
24275+ negl %edx
24276+ negl %eax
24277+ sbbl $0, %edx
24278+ addl (v), %eax
24279+ adcl 4(v), %edx
24280 movl %eax, (v)
24281 movl %edx, 4(v)
24282 RET_ENDP
24283@@ -105,6 +180,20 @@ RET_ENDP
24284 BEGIN(inc)
24285 addl $1, (v)
24286 adcl $0, 4(v)
24287+
24288+#ifdef CONFIG_PAX_REFCOUNT
24289+ jno 0f
24290+ subl $1, (v)
24291+ sbbl $0, 4(v)
24292+ int $4
24293+0:
24294+ _ASM_EXTABLE(0b, 0b)
24295+#endif
24296+
24297+RET_ENDP
24298+BEGIN(inc_unchecked)
24299+ addl $1, (v)
24300+ adcl $0, 4(v)
24301 RET_ENDP
24302 #undef v
24303
24304@@ -114,6 +203,26 @@ BEGIN(inc_return)
24305 movl 4(v), %edx
24306 addl $1, %eax
24307 adcl $0, %edx
24308+
24309+#ifdef CONFIG_PAX_REFCOUNT
24310+ into
24311+1234:
24312+ _ASM_EXTABLE(1234b, 2f)
24313+#endif
24314+
24315+ movl %eax, (v)
24316+ movl %edx, 4(v)
24317+
24318+#ifdef CONFIG_PAX_REFCOUNT
24319+2:
24320+#endif
24321+
24322+RET_ENDP
24323+BEGIN(inc_return_unchecked)
24324+ movl (v), %eax
24325+ movl 4(v), %edx
24326+ addl $1, %eax
24327+ adcl $0, %edx
24328 movl %eax, (v)
24329 movl %edx, 4(v)
24330 RET_ENDP
24331@@ -123,6 +232,20 @@ RET_ENDP
24332 BEGIN(dec)
24333 subl $1, (v)
24334 sbbl $0, 4(v)
24335+
24336+#ifdef CONFIG_PAX_REFCOUNT
24337+ jno 0f
24338+ addl $1, (v)
24339+ adcl $0, 4(v)
24340+ int $4
24341+0:
24342+ _ASM_EXTABLE(0b, 0b)
24343+#endif
24344+
24345+RET_ENDP
24346+BEGIN(dec_unchecked)
24347+ subl $1, (v)
24348+ sbbl $0, 4(v)
24349 RET_ENDP
24350 #undef v
24351
24352@@ -132,6 +255,26 @@ BEGIN(dec_return)
24353 movl 4(v), %edx
24354 subl $1, %eax
24355 sbbl $0, %edx
24356+
24357+#ifdef CONFIG_PAX_REFCOUNT
24358+ into
24359+1234:
24360+ _ASM_EXTABLE(1234b, 2f)
24361+#endif
24362+
24363+ movl %eax, (v)
24364+ movl %edx, 4(v)
24365+
24366+#ifdef CONFIG_PAX_REFCOUNT
24367+2:
24368+#endif
24369+
24370+RET_ENDP
24371+BEGIN(dec_return_unchecked)
24372+ movl (v), %eax
24373+ movl 4(v), %edx
24374+ subl $1, %eax
24375+ sbbl $0, %edx
24376 movl %eax, (v)
24377 movl %edx, 4(v)
24378 RET_ENDP
24379@@ -143,6 +286,13 @@ BEGIN(add_unless)
24380 adcl %edx, %edi
24381 addl (v), %eax
24382 adcl 4(v), %edx
24383+
24384+#ifdef CONFIG_PAX_REFCOUNT
24385+ into
24386+1234:
24387+ _ASM_EXTABLE(1234b, 2f)
24388+#endif
24389+
24390 cmpl %eax, %ecx
24391 je 3f
24392 1:
24393@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
24394 1:
24395 addl $1, %eax
24396 adcl $0, %edx
24397+
24398+#ifdef CONFIG_PAX_REFCOUNT
24399+ into
24400+1234:
24401+ _ASM_EXTABLE(1234b, 2f)
24402+#endif
24403+
24404 movl %eax, (v)
24405 movl %edx, 4(v)
24406 movl $1, %eax
24407@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
24408 movl 4(v), %edx
24409 subl $1, %eax
24410 sbbl $0, %edx
24411+
24412+#ifdef CONFIG_PAX_REFCOUNT
24413+ into
24414+1234:
24415+ _ASM_EXTABLE(1234b, 1f)
24416+#endif
24417+
24418 js 1f
24419 movl %eax, (v)
24420 movl %edx, 4(v)
24421diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
24422index f5cc9eb..51fa319 100644
24423--- a/arch/x86/lib/atomic64_cx8_32.S
24424+++ b/arch/x86/lib/atomic64_cx8_32.S
24425@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
24426 CFI_STARTPROC
24427
24428 read64 %ecx
24429+ pax_force_retaddr
24430 ret
24431 CFI_ENDPROC
24432 ENDPROC(atomic64_read_cx8)
24433
24434+ENTRY(atomic64_read_unchecked_cx8)
24435+ CFI_STARTPROC
24436+
24437+ read64 %ecx
24438+ pax_force_retaddr
24439+ ret
24440+ CFI_ENDPROC
24441+ENDPROC(atomic64_read_unchecked_cx8)
24442+
24443 ENTRY(atomic64_set_cx8)
24444 CFI_STARTPROC
24445
24446@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
24447 cmpxchg8b (%esi)
24448 jne 1b
24449
24450+ pax_force_retaddr
24451 ret
24452 CFI_ENDPROC
24453 ENDPROC(atomic64_set_cx8)
24454
24455+ENTRY(atomic64_set_unchecked_cx8)
24456+ CFI_STARTPROC
24457+
24458+1:
24459+/* we don't need LOCK_PREFIX since aligned 64-bit writes
24460+ * are atomic on 586 and newer */
24461+ cmpxchg8b (%esi)
24462+ jne 1b
24463+
24464+ pax_force_retaddr
24465+ ret
24466+ CFI_ENDPROC
24467+ENDPROC(atomic64_set_unchecked_cx8)
24468+
24469 ENTRY(atomic64_xchg_cx8)
24470 CFI_STARTPROC
24471
24472@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
24473 cmpxchg8b (%esi)
24474 jne 1b
24475
24476+ pax_force_retaddr
24477 ret
24478 CFI_ENDPROC
24479 ENDPROC(atomic64_xchg_cx8)
24480
24481-.macro addsub_return func ins insc
24482-ENTRY(atomic64_\func\()_return_cx8)
24483+.macro addsub_return func ins insc unchecked=""
24484+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24485 CFI_STARTPROC
24486 SAVE ebp
24487 SAVE ebx
24488@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
24489 movl %edx, %ecx
24490 \ins\()l %esi, %ebx
24491 \insc\()l %edi, %ecx
24492+
24493+.ifb \unchecked
24494+#ifdef CONFIG_PAX_REFCOUNT
24495+ into
24496+2:
24497+ _ASM_EXTABLE(2b, 3f)
24498+#endif
24499+.endif
24500+
24501 LOCK_PREFIX
24502 cmpxchg8b (%ebp)
24503 jne 1b
24504-
24505-10:
24506 movl %ebx, %eax
24507 movl %ecx, %edx
24508+
24509+.ifb \unchecked
24510+#ifdef CONFIG_PAX_REFCOUNT
24511+3:
24512+#endif
24513+.endif
24514+
24515 RESTORE edi
24516 RESTORE esi
24517 RESTORE ebx
24518 RESTORE ebp
24519+ pax_force_retaddr
24520 ret
24521 CFI_ENDPROC
24522-ENDPROC(atomic64_\func\()_return_cx8)
24523+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24524 .endm
24525
24526 addsub_return add add adc
24527 addsub_return sub sub sbb
24528+addsub_return add add adc _unchecked
24529+addsub_return sub sub sbb _unchecked
24530
24531-.macro incdec_return func ins insc
24532-ENTRY(atomic64_\func\()_return_cx8)
24533+.macro incdec_return func ins insc unchecked=""
24534+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24535 CFI_STARTPROC
24536 SAVE ebx
24537
24538@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
24539 movl %edx, %ecx
24540 \ins\()l $1, %ebx
24541 \insc\()l $0, %ecx
24542+
24543+.ifb \unchecked
24544+#ifdef CONFIG_PAX_REFCOUNT
24545+ into
24546+2:
24547+ _ASM_EXTABLE(2b, 3f)
24548+#endif
24549+.endif
24550+
24551 LOCK_PREFIX
24552 cmpxchg8b (%esi)
24553 jne 1b
24554
24555-10:
24556 movl %ebx, %eax
24557 movl %ecx, %edx
24558+
24559+.ifb \unchecked
24560+#ifdef CONFIG_PAX_REFCOUNT
24561+3:
24562+#endif
24563+.endif
24564+
24565 RESTORE ebx
24566+ pax_force_retaddr
24567 ret
24568 CFI_ENDPROC
24569-ENDPROC(atomic64_\func\()_return_cx8)
24570+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24571 .endm
24572
24573 incdec_return inc add adc
24574 incdec_return dec sub sbb
24575+incdec_return inc add adc _unchecked
24576+incdec_return dec sub sbb _unchecked
24577
24578 ENTRY(atomic64_dec_if_positive_cx8)
24579 CFI_STARTPROC
24580@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
24581 movl %edx, %ecx
24582 subl $1, %ebx
24583 sbb $0, %ecx
24584+
24585+#ifdef CONFIG_PAX_REFCOUNT
24586+ into
24587+1234:
24588+ _ASM_EXTABLE(1234b, 2f)
24589+#endif
24590+
24591 js 2f
24592 LOCK_PREFIX
24593 cmpxchg8b (%esi)
24594@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
24595 movl %ebx, %eax
24596 movl %ecx, %edx
24597 RESTORE ebx
24598+ pax_force_retaddr
24599 ret
24600 CFI_ENDPROC
24601 ENDPROC(atomic64_dec_if_positive_cx8)
24602@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
24603 movl %edx, %ecx
24604 addl %ebp, %ebx
24605 adcl %edi, %ecx
24606+
24607+#ifdef CONFIG_PAX_REFCOUNT
24608+ into
24609+1234:
24610+ _ASM_EXTABLE(1234b, 3f)
24611+#endif
24612+
24613 LOCK_PREFIX
24614 cmpxchg8b (%esi)
24615 jne 1b
24616@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
24617 CFI_ADJUST_CFA_OFFSET -8
24618 RESTORE ebx
24619 RESTORE ebp
24620+ pax_force_retaddr
24621 ret
24622 4:
24623 cmpl %edx, 4(%esp)
24624@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
24625 xorl %ecx, %ecx
24626 addl $1, %ebx
24627 adcl %edx, %ecx
24628+
24629+#ifdef CONFIG_PAX_REFCOUNT
24630+ into
24631+1234:
24632+ _ASM_EXTABLE(1234b, 3f)
24633+#endif
24634+
24635 LOCK_PREFIX
24636 cmpxchg8b (%esi)
24637 jne 1b
24638@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
24639 movl $1, %eax
24640 3:
24641 RESTORE ebx
24642+ pax_force_retaddr
24643 ret
24644 CFI_ENDPROC
24645 ENDPROC(atomic64_inc_not_zero_cx8)
24646diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
24647index 2af5df3..62b1a5a 100644
24648--- a/arch/x86/lib/checksum_32.S
24649+++ b/arch/x86/lib/checksum_32.S
24650@@ -29,7 +29,8 @@
24651 #include <asm/dwarf2.h>
24652 #include <asm/errno.h>
24653 #include <asm/asm.h>
24654-
24655+#include <asm/segment.h>
24656+
24657 /*
24658 * computes a partial checksum, e.g. for TCP/UDP fragments
24659 */
24660@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
24661
24662 #define ARGBASE 16
24663 #define FP 12
24664-
24665-ENTRY(csum_partial_copy_generic)
24666+
24667+ENTRY(csum_partial_copy_generic_to_user)
24668 CFI_STARTPROC
24669+
24670+#ifdef CONFIG_PAX_MEMORY_UDEREF
24671+ pushl_cfi %gs
24672+ popl_cfi %es
24673+ jmp csum_partial_copy_generic
24674+#endif
24675+
24676+ENTRY(csum_partial_copy_generic_from_user)
24677+
24678+#ifdef CONFIG_PAX_MEMORY_UDEREF
24679+ pushl_cfi %gs
24680+ popl_cfi %ds
24681+#endif
24682+
24683+ENTRY(csum_partial_copy_generic)
24684 subl $4,%esp
24685 CFI_ADJUST_CFA_OFFSET 4
24686 pushl_cfi %edi
24687@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
24688 jmp 4f
24689 SRC(1: movw (%esi), %bx )
24690 addl $2, %esi
24691-DST( movw %bx, (%edi) )
24692+DST( movw %bx, %es:(%edi) )
24693 addl $2, %edi
24694 addw %bx, %ax
24695 adcl $0, %eax
24696@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
24697 SRC(1: movl (%esi), %ebx )
24698 SRC( movl 4(%esi), %edx )
24699 adcl %ebx, %eax
24700-DST( movl %ebx, (%edi) )
24701+DST( movl %ebx, %es:(%edi) )
24702 adcl %edx, %eax
24703-DST( movl %edx, 4(%edi) )
24704+DST( movl %edx, %es:4(%edi) )
24705
24706 SRC( movl 8(%esi), %ebx )
24707 SRC( movl 12(%esi), %edx )
24708 adcl %ebx, %eax
24709-DST( movl %ebx, 8(%edi) )
24710+DST( movl %ebx, %es:8(%edi) )
24711 adcl %edx, %eax
24712-DST( movl %edx, 12(%edi) )
24713+DST( movl %edx, %es:12(%edi) )
24714
24715 SRC( movl 16(%esi), %ebx )
24716 SRC( movl 20(%esi), %edx )
24717 adcl %ebx, %eax
24718-DST( movl %ebx, 16(%edi) )
24719+DST( movl %ebx, %es:16(%edi) )
24720 adcl %edx, %eax
24721-DST( movl %edx, 20(%edi) )
24722+DST( movl %edx, %es:20(%edi) )
24723
24724 SRC( movl 24(%esi), %ebx )
24725 SRC( movl 28(%esi), %edx )
24726 adcl %ebx, %eax
24727-DST( movl %ebx, 24(%edi) )
24728+DST( movl %ebx, %es:24(%edi) )
24729 adcl %edx, %eax
24730-DST( movl %edx, 28(%edi) )
24731+DST( movl %edx, %es:28(%edi) )
24732
24733 lea 32(%esi), %esi
24734 lea 32(%edi), %edi
24735@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
24736 shrl $2, %edx # This clears CF
24737 SRC(3: movl (%esi), %ebx )
24738 adcl %ebx, %eax
24739-DST( movl %ebx, (%edi) )
24740+DST( movl %ebx, %es:(%edi) )
24741 lea 4(%esi), %esi
24742 lea 4(%edi), %edi
24743 dec %edx
24744@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
24745 jb 5f
24746 SRC( movw (%esi), %cx )
24747 leal 2(%esi), %esi
24748-DST( movw %cx, (%edi) )
24749+DST( movw %cx, %es:(%edi) )
24750 leal 2(%edi), %edi
24751 je 6f
24752 shll $16,%ecx
24753 SRC(5: movb (%esi), %cl )
24754-DST( movb %cl, (%edi) )
24755+DST( movb %cl, %es:(%edi) )
24756 6: addl %ecx, %eax
24757 adcl $0, %eax
24758 7:
24759@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
24760
24761 6001:
24762 movl ARGBASE+20(%esp), %ebx # src_err_ptr
24763- movl $-EFAULT, (%ebx)
24764+ movl $-EFAULT, %ss:(%ebx)
24765
24766 # zero the complete destination - computing the rest
24767 # is too much work
24768@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
24769
24770 6002:
24771 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24772- movl $-EFAULT,(%ebx)
24773+ movl $-EFAULT,%ss:(%ebx)
24774 jmp 5000b
24775
24776 .previous
24777
24778+ pushl_cfi %ss
24779+ popl_cfi %ds
24780+ pushl_cfi %ss
24781+ popl_cfi %es
24782 popl_cfi %ebx
24783 CFI_RESTORE ebx
24784 popl_cfi %esi
24785@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
24786 popl_cfi %ecx # equivalent to addl $4,%esp
24787 ret
24788 CFI_ENDPROC
24789-ENDPROC(csum_partial_copy_generic)
24790+ENDPROC(csum_partial_copy_generic_to_user)
24791
24792 #else
24793
24794 /* Version for PentiumII/PPro */
24795
24796 #define ROUND1(x) \
24797+ nop; nop; nop; \
24798 SRC(movl x(%esi), %ebx ) ; \
24799 addl %ebx, %eax ; \
24800- DST(movl %ebx, x(%edi) ) ;
24801+ DST(movl %ebx, %es:x(%edi)) ;
24802
24803 #define ROUND(x) \
24804+ nop; nop; nop; \
24805 SRC(movl x(%esi), %ebx ) ; \
24806 adcl %ebx, %eax ; \
24807- DST(movl %ebx, x(%edi) ) ;
24808+ DST(movl %ebx, %es:x(%edi)) ;
24809
24810 #define ARGBASE 12
24811-
24812-ENTRY(csum_partial_copy_generic)
24813+
24814+ENTRY(csum_partial_copy_generic_to_user)
24815 CFI_STARTPROC
24816+
24817+#ifdef CONFIG_PAX_MEMORY_UDEREF
24818+ pushl_cfi %gs
24819+ popl_cfi %es
24820+ jmp csum_partial_copy_generic
24821+#endif
24822+
24823+ENTRY(csum_partial_copy_generic_from_user)
24824+
24825+#ifdef CONFIG_PAX_MEMORY_UDEREF
24826+ pushl_cfi %gs
24827+ popl_cfi %ds
24828+#endif
24829+
24830+ENTRY(csum_partial_copy_generic)
24831 pushl_cfi %ebx
24832 CFI_REL_OFFSET ebx, 0
24833 pushl_cfi %edi
24834@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
24835 subl %ebx, %edi
24836 lea -1(%esi),%edx
24837 andl $-32,%edx
24838- lea 3f(%ebx,%ebx), %ebx
24839+ lea 3f(%ebx,%ebx,2), %ebx
24840 testl %esi, %esi
24841 jmp *%ebx
24842 1: addl $64,%esi
24843@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
24844 jb 5f
24845 SRC( movw (%esi), %dx )
24846 leal 2(%esi), %esi
24847-DST( movw %dx, (%edi) )
24848+DST( movw %dx, %es:(%edi) )
24849 leal 2(%edi), %edi
24850 je 6f
24851 shll $16,%edx
24852 5:
24853 SRC( movb (%esi), %dl )
24854-DST( movb %dl, (%edi) )
24855+DST( movb %dl, %es:(%edi) )
24856 6: addl %edx, %eax
24857 adcl $0, %eax
24858 7:
24859 .section .fixup, "ax"
24860 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
24861- movl $-EFAULT, (%ebx)
24862+ movl $-EFAULT, %ss:(%ebx)
24863 # zero the complete destination (computing the rest is too much work)
24864 movl ARGBASE+8(%esp),%edi # dst
24865 movl ARGBASE+12(%esp),%ecx # len
24866@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
24867 rep; stosb
24868 jmp 7b
24869 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24870- movl $-EFAULT, (%ebx)
24871+ movl $-EFAULT, %ss:(%ebx)
24872 jmp 7b
24873 .previous
24874
24875+#ifdef CONFIG_PAX_MEMORY_UDEREF
24876+ pushl_cfi %ss
24877+ popl_cfi %ds
24878+ pushl_cfi %ss
24879+ popl_cfi %es
24880+#endif
24881+
24882 popl_cfi %esi
24883 CFI_RESTORE esi
24884 popl_cfi %edi
24885@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
24886 CFI_RESTORE ebx
24887 ret
24888 CFI_ENDPROC
24889-ENDPROC(csum_partial_copy_generic)
24890+ENDPROC(csum_partial_copy_generic_to_user)
24891
24892 #undef ROUND
24893 #undef ROUND1
24894diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
24895index f2145cf..cea889d 100644
24896--- a/arch/x86/lib/clear_page_64.S
24897+++ b/arch/x86/lib/clear_page_64.S
24898@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
24899 movl $4096/8,%ecx
24900 xorl %eax,%eax
24901 rep stosq
24902+ pax_force_retaddr
24903 ret
24904 CFI_ENDPROC
24905 ENDPROC(clear_page_c)
24906@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
24907 movl $4096,%ecx
24908 xorl %eax,%eax
24909 rep stosb
24910+ pax_force_retaddr
24911 ret
24912 CFI_ENDPROC
24913 ENDPROC(clear_page_c_e)
24914@@ -43,6 +45,7 @@ ENTRY(clear_page)
24915 leaq 64(%rdi),%rdi
24916 jnz .Lloop
24917 nop
24918+ pax_force_retaddr
24919 ret
24920 CFI_ENDPROC
24921 .Lclear_page_end:
24922@@ -58,7 +61,7 @@ ENDPROC(clear_page)
24923
24924 #include <asm/cpufeature.h>
24925
24926- .section .altinstr_replacement,"ax"
24927+ .section .altinstr_replacement,"a"
24928 1: .byte 0xeb /* jmp <disp8> */
24929 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
24930 2: .byte 0xeb /* jmp <disp8> */
24931diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
24932index 1e572c5..2a162cd 100644
24933--- a/arch/x86/lib/cmpxchg16b_emu.S
24934+++ b/arch/x86/lib/cmpxchg16b_emu.S
24935@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
24936
24937 popf
24938 mov $1, %al
24939+ pax_force_retaddr
24940 ret
24941
24942 not_same:
24943 popf
24944 xor %al,%al
24945+ pax_force_retaddr
24946 ret
24947
24948 CFI_ENDPROC
24949diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
24950index 176cca6..1166c50 100644
24951--- a/arch/x86/lib/copy_page_64.S
24952+++ b/arch/x86/lib/copy_page_64.S
24953@@ -9,6 +9,7 @@ copy_page_rep:
24954 CFI_STARTPROC
24955 movl $4096/8, %ecx
24956 rep movsq
24957+ pax_force_retaddr
24958 ret
24959 CFI_ENDPROC
24960 ENDPROC(copy_page_rep)
24961@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
24962
24963 ENTRY(copy_page)
24964 CFI_STARTPROC
24965- subq $2*8, %rsp
24966- CFI_ADJUST_CFA_OFFSET 2*8
24967+ subq $3*8, %rsp
24968+ CFI_ADJUST_CFA_OFFSET 3*8
24969 movq %rbx, (%rsp)
24970 CFI_REL_OFFSET rbx, 0
24971 movq %r12, 1*8(%rsp)
24972 CFI_REL_OFFSET r12, 1*8
24973+ movq %r13, 2*8(%rsp)
24974+ CFI_REL_OFFSET r13, 2*8
24975
24976 movl $(4096/64)-5, %ecx
24977 .p2align 4
24978@@ -36,7 +39,7 @@ ENTRY(copy_page)
24979 movq 0x8*2(%rsi), %rdx
24980 movq 0x8*3(%rsi), %r8
24981 movq 0x8*4(%rsi), %r9
24982- movq 0x8*5(%rsi), %r10
24983+ movq 0x8*5(%rsi), %r13
24984 movq 0x8*6(%rsi), %r11
24985 movq 0x8*7(%rsi), %r12
24986
24987@@ -47,7 +50,7 @@ ENTRY(copy_page)
24988 movq %rdx, 0x8*2(%rdi)
24989 movq %r8, 0x8*3(%rdi)
24990 movq %r9, 0x8*4(%rdi)
24991- movq %r10, 0x8*5(%rdi)
24992+ movq %r13, 0x8*5(%rdi)
24993 movq %r11, 0x8*6(%rdi)
24994 movq %r12, 0x8*7(%rdi)
24995
24996@@ -66,7 +69,7 @@ ENTRY(copy_page)
24997 movq 0x8*2(%rsi), %rdx
24998 movq 0x8*3(%rsi), %r8
24999 movq 0x8*4(%rsi), %r9
25000- movq 0x8*5(%rsi), %r10
25001+ movq 0x8*5(%rsi), %r13
25002 movq 0x8*6(%rsi), %r11
25003 movq 0x8*7(%rsi), %r12
25004
25005@@ -75,7 +78,7 @@ ENTRY(copy_page)
25006 movq %rdx, 0x8*2(%rdi)
25007 movq %r8, 0x8*3(%rdi)
25008 movq %r9, 0x8*4(%rdi)
25009- movq %r10, 0x8*5(%rdi)
25010+ movq %r13, 0x8*5(%rdi)
25011 movq %r11, 0x8*6(%rdi)
25012 movq %r12, 0x8*7(%rdi)
25013
25014@@ -87,8 +90,11 @@ ENTRY(copy_page)
25015 CFI_RESTORE rbx
25016 movq 1*8(%rsp), %r12
25017 CFI_RESTORE r12
25018- addq $2*8, %rsp
25019- CFI_ADJUST_CFA_OFFSET -2*8
25020+ movq 2*8(%rsp), %r13
25021+ CFI_RESTORE r13
25022+ addq $3*8, %rsp
25023+ CFI_ADJUST_CFA_OFFSET -3*8
25024+ pax_force_retaddr
25025 ret
25026 .Lcopy_page_end:
25027 CFI_ENDPROC
25028@@ -99,7 +105,7 @@ ENDPROC(copy_page)
25029
25030 #include <asm/cpufeature.h>
25031
25032- .section .altinstr_replacement,"ax"
25033+ .section .altinstr_replacement,"a"
25034 1: .byte 0xeb /* jmp <disp8> */
25035 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
25036 2:
25037diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
25038index a30ca15..d25fab6 100644
25039--- a/arch/x86/lib/copy_user_64.S
25040+++ b/arch/x86/lib/copy_user_64.S
25041@@ -18,6 +18,7 @@
25042 #include <asm/alternative-asm.h>
25043 #include <asm/asm.h>
25044 #include <asm/smap.h>
25045+#include <asm/pgtable.h>
25046
25047 /*
25048 * By placing feature2 after feature1 in altinstructions section, we logically
25049@@ -31,7 +32,7 @@
25050 .byte 0xe9 /* 32bit jump */
25051 .long \orig-1f /* by default jump to orig */
25052 1:
25053- .section .altinstr_replacement,"ax"
25054+ .section .altinstr_replacement,"a"
25055 2: .byte 0xe9 /* near jump with 32bit immediate */
25056 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
25057 3: .byte 0xe9 /* near jump with 32bit immediate */
25058@@ -70,47 +71,20 @@
25059 #endif
25060 .endm
25061
25062-/* Standard copy_to_user with segment limit checking */
25063-ENTRY(_copy_to_user)
25064- CFI_STARTPROC
25065- GET_THREAD_INFO(%rax)
25066- movq %rdi,%rcx
25067- addq %rdx,%rcx
25068- jc bad_to_user
25069- cmpq TI_addr_limit(%rax),%rcx
25070- ja bad_to_user
25071- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25072- copy_user_generic_unrolled,copy_user_generic_string, \
25073- copy_user_enhanced_fast_string
25074- CFI_ENDPROC
25075-ENDPROC(_copy_to_user)
25076-
25077-/* Standard copy_from_user with segment limit checking */
25078-ENTRY(_copy_from_user)
25079- CFI_STARTPROC
25080- GET_THREAD_INFO(%rax)
25081- movq %rsi,%rcx
25082- addq %rdx,%rcx
25083- jc bad_from_user
25084- cmpq TI_addr_limit(%rax),%rcx
25085- ja bad_from_user
25086- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25087- copy_user_generic_unrolled,copy_user_generic_string, \
25088- copy_user_enhanced_fast_string
25089- CFI_ENDPROC
25090-ENDPROC(_copy_from_user)
25091-
25092 .section .fixup,"ax"
25093 /* must zero dest */
25094 ENTRY(bad_from_user)
25095 bad_from_user:
25096 CFI_STARTPROC
25097+ testl %edx,%edx
25098+ js bad_to_user
25099 movl %edx,%ecx
25100 xorl %eax,%eax
25101 rep
25102 stosb
25103 bad_to_user:
25104 movl %edx,%eax
25105+ pax_force_retaddr
25106 ret
25107 CFI_ENDPROC
25108 ENDPROC(bad_from_user)
25109@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
25110 jz 17f
25111 1: movq (%rsi),%r8
25112 2: movq 1*8(%rsi),%r9
25113-3: movq 2*8(%rsi),%r10
25114+3: movq 2*8(%rsi),%rax
25115 4: movq 3*8(%rsi),%r11
25116 5: movq %r8,(%rdi)
25117 6: movq %r9,1*8(%rdi)
25118-7: movq %r10,2*8(%rdi)
25119+7: movq %rax,2*8(%rdi)
25120 8: movq %r11,3*8(%rdi)
25121 9: movq 4*8(%rsi),%r8
25122 10: movq 5*8(%rsi),%r9
25123-11: movq 6*8(%rsi),%r10
25124+11: movq 6*8(%rsi),%rax
25125 12: movq 7*8(%rsi),%r11
25126 13: movq %r8,4*8(%rdi)
25127 14: movq %r9,5*8(%rdi)
25128-15: movq %r10,6*8(%rdi)
25129+15: movq %rax,6*8(%rdi)
25130 16: movq %r11,7*8(%rdi)
25131 leaq 64(%rsi),%rsi
25132 leaq 64(%rdi),%rdi
25133@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
25134 jnz 21b
25135 23: xor %eax,%eax
25136 ASM_CLAC
25137+ pax_force_retaddr
25138 ret
25139
25140 .section .fixup,"ax"
25141@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
25142 movsb
25143 4: xorl %eax,%eax
25144 ASM_CLAC
25145+ pax_force_retaddr
25146 ret
25147
25148 .section .fixup,"ax"
25149@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
25150 movsb
25151 2: xorl %eax,%eax
25152 ASM_CLAC
25153+ pax_force_retaddr
25154 ret
25155
25156 .section .fixup,"ax"
25157diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
25158index 6a4f43c..f5f9e26 100644
25159--- a/arch/x86/lib/copy_user_nocache_64.S
25160+++ b/arch/x86/lib/copy_user_nocache_64.S
25161@@ -8,6 +8,7 @@
25162
25163 #include <linux/linkage.h>
25164 #include <asm/dwarf2.h>
25165+#include <asm/alternative-asm.h>
25166
25167 #define FIX_ALIGNMENT 1
25168
25169@@ -16,6 +17,7 @@
25170 #include <asm/thread_info.h>
25171 #include <asm/asm.h>
25172 #include <asm/smap.h>
25173+#include <asm/pgtable.h>
25174
25175 .macro ALIGN_DESTINATION
25176 #ifdef FIX_ALIGNMENT
25177@@ -49,6 +51,15 @@
25178 */
25179 ENTRY(__copy_user_nocache)
25180 CFI_STARTPROC
25181+
25182+#ifdef CONFIG_PAX_MEMORY_UDEREF
25183+ mov $PAX_USER_SHADOW_BASE,%rcx
25184+ cmp %rcx,%rsi
25185+ jae 1f
25186+ add %rcx,%rsi
25187+1:
25188+#endif
25189+
25190 ASM_STAC
25191 cmpl $8,%edx
25192 jb 20f /* less then 8 bytes, go to byte copy loop */
25193@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
25194 jz 17f
25195 1: movq (%rsi),%r8
25196 2: movq 1*8(%rsi),%r9
25197-3: movq 2*8(%rsi),%r10
25198+3: movq 2*8(%rsi),%rax
25199 4: movq 3*8(%rsi),%r11
25200 5: movnti %r8,(%rdi)
25201 6: movnti %r9,1*8(%rdi)
25202-7: movnti %r10,2*8(%rdi)
25203+7: movnti %rax,2*8(%rdi)
25204 8: movnti %r11,3*8(%rdi)
25205 9: movq 4*8(%rsi),%r8
25206 10: movq 5*8(%rsi),%r9
25207-11: movq 6*8(%rsi),%r10
25208+11: movq 6*8(%rsi),%rax
25209 12: movq 7*8(%rsi),%r11
25210 13: movnti %r8,4*8(%rdi)
25211 14: movnti %r9,5*8(%rdi)
25212-15: movnti %r10,6*8(%rdi)
25213+15: movnti %rax,6*8(%rdi)
25214 16: movnti %r11,7*8(%rdi)
25215 leaq 64(%rsi),%rsi
25216 leaq 64(%rdi),%rdi
25217@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
25218 23: xorl %eax,%eax
25219 ASM_CLAC
25220 sfence
25221+ pax_force_retaddr
25222 ret
25223
25224 .section .fixup,"ax"
25225diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
25226index 2419d5f..953ee51 100644
25227--- a/arch/x86/lib/csum-copy_64.S
25228+++ b/arch/x86/lib/csum-copy_64.S
25229@@ -9,6 +9,7 @@
25230 #include <asm/dwarf2.h>
25231 #include <asm/errno.h>
25232 #include <asm/asm.h>
25233+#include <asm/alternative-asm.h>
25234
25235 /*
25236 * Checksum copy with exception handling.
25237@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
25238 CFI_RESTORE rbp
25239 addq $7*8, %rsp
25240 CFI_ADJUST_CFA_OFFSET -7*8
25241+ pax_force_retaddr 0, 1
25242 ret
25243 CFI_RESTORE_STATE
25244
25245diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
25246index 25b7ae8..169fafc 100644
25247--- a/arch/x86/lib/csum-wrappers_64.c
25248+++ b/arch/x86/lib/csum-wrappers_64.c
25249@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
25250 len -= 2;
25251 }
25252 }
25253- isum = csum_partial_copy_generic((__force const void *)src,
25254+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
25255 dst, len, isum, errp, NULL);
25256 if (unlikely(*errp))
25257 goto out_err;
25258@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
25259 }
25260
25261 *errp = 0;
25262- return csum_partial_copy_generic(src, (void __force *)dst,
25263+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
25264 len, isum, NULL, errp);
25265 }
25266 EXPORT_SYMBOL(csum_partial_copy_to_user);
25267diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
25268index 156b9c8..b144132 100644
25269--- a/arch/x86/lib/getuser.S
25270+++ b/arch/x86/lib/getuser.S
25271@@ -34,17 +34,40 @@
25272 #include <asm/thread_info.h>
25273 #include <asm/asm.h>
25274 #include <asm/smap.h>
25275+#include <asm/segment.h>
25276+#include <asm/pgtable.h>
25277+#include <asm/alternative-asm.h>
25278+
25279+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25280+#define __copyuser_seg gs;
25281+#else
25282+#define __copyuser_seg
25283+#endif
25284
25285 .text
25286 ENTRY(__get_user_1)
25287 CFI_STARTPROC
25288+
25289+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25290 GET_THREAD_INFO(%_ASM_DX)
25291 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25292 jae bad_get_user
25293 ASM_STAC
25294-1: movzb (%_ASM_AX),%edx
25295+
25296+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25297+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25298+ cmp %_ASM_DX,%_ASM_AX
25299+ jae 1234f
25300+ add %_ASM_DX,%_ASM_AX
25301+1234:
25302+#endif
25303+
25304+#endif
25305+
25306+1: __copyuser_seg movzb (%_ASM_AX),%edx
25307 xor %eax,%eax
25308 ASM_CLAC
25309+ pax_force_retaddr
25310 ret
25311 CFI_ENDPROC
25312 ENDPROC(__get_user_1)
25313@@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
25314 ENTRY(__get_user_2)
25315 CFI_STARTPROC
25316 add $1,%_ASM_AX
25317+
25318+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25319 jc bad_get_user
25320 GET_THREAD_INFO(%_ASM_DX)
25321 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25322 jae bad_get_user
25323 ASM_STAC
25324-2: movzwl -1(%_ASM_AX),%edx
25325+
25326+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25327+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25328+ cmp %_ASM_DX,%_ASM_AX
25329+ jae 1234f
25330+ add %_ASM_DX,%_ASM_AX
25331+1234:
25332+#endif
25333+
25334+#endif
25335+
25336+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
25337 xor %eax,%eax
25338 ASM_CLAC
25339+ pax_force_retaddr
25340 ret
25341 CFI_ENDPROC
25342 ENDPROC(__get_user_2)
25343@@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
25344 ENTRY(__get_user_4)
25345 CFI_STARTPROC
25346 add $3,%_ASM_AX
25347+
25348+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25349 jc bad_get_user
25350 GET_THREAD_INFO(%_ASM_DX)
25351 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25352 jae bad_get_user
25353 ASM_STAC
25354-3: mov -3(%_ASM_AX),%edx
25355+
25356+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25357+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25358+ cmp %_ASM_DX,%_ASM_AX
25359+ jae 1234f
25360+ add %_ASM_DX,%_ASM_AX
25361+1234:
25362+#endif
25363+
25364+#endif
25365+
25366+3: __copyuser_seg mov -3(%_ASM_AX),%edx
25367 xor %eax,%eax
25368 ASM_CLAC
25369+ pax_force_retaddr
25370 ret
25371 CFI_ENDPROC
25372 ENDPROC(__get_user_4)
25373@@ -87,10 +138,20 @@ ENTRY(__get_user_8)
25374 GET_THREAD_INFO(%_ASM_DX)
25375 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25376 jae bad_get_user
25377+
25378+#ifdef CONFIG_PAX_MEMORY_UDEREF
25379+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25380+ cmp %_ASM_DX,%_ASM_AX
25381+ jae 1234f
25382+ add %_ASM_DX,%_ASM_AX
25383+1234:
25384+#endif
25385+
25386 ASM_STAC
25387 4: movq -7(%_ASM_AX),%_ASM_DX
25388 xor %eax,%eax
25389 ASM_CLAC
25390+ pax_force_retaddr
25391 ret
25392 CFI_ENDPROC
25393 ENDPROC(__get_user_8)
25394@@ -101,6 +162,7 @@ bad_get_user:
25395 xor %edx,%edx
25396 mov $(-EFAULT),%_ASM_AX
25397 ASM_CLAC
25398+ pax_force_retaddr
25399 ret
25400 CFI_ENDPROC
25401 END(bad_get_user)
25402diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
25403index 54fcffe..7be149e 100644
25404--- a/arch/x86/lib/insn.c
25405+++ b/arch/x86/lib/insn.c
25406@@ -20,8 +20,10 @@
25407
25408 #ifdef __KERNEL__
25409 #include <linux/string.h>
25410+#include <asm/pgtable_types.h>
25411 #else
25412 #include <string.h>
25413+#define ktla_ktva(addr) addr
25414 #endif
25415 #include <asm/inat.h>
25416 #include <asm/insn.h>
25417@@ -53,8 +55,8 @@
25418 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
25419 {
25420 memset(insn, 0, sizeof(*insn));
25421- insn->kaddr = kaddr;
25422- insn->next_byte = kaddr;
25423+ insn->kaddr = ktla_ktva(kaddr);
25424+ insn->next_byte = ktla_ktva(kaddr);
25425 insn->x86_64 = x86_64 ? 1 : 0;
25426 insn->opnd_bytes = 4;
25427 if (x86_64)
25428diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
25429index 05a95e7..326f2fa 100644
25430--- a/arch/x86/lib/iomap_copy_64.S
25431+++ b/arch/x86/lib/iomap_copy_64.S
25432@@ -17,6 +17,7 @@
25433
25434 #include <linux/linkage.h>
25435 #include <asm/dwarf2.h>
25436+#include <asm/alternative-asm.h>
25437
25438 /*
25439 * override generic version in lib/iomap_copy.c
25440@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
25441 CFI_STARTPROC
25442 movl %edx,%ecx
25443 rep movsd
25444+ pax_force_retaddr
25445 ret
25446 CFI_ENDPROC
25447 ENDPROC(__iowrite32_copy)
25448diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
25449index 1c273be..da9cc0e 100644
25450--- a/arch/x86/lib/memcpy_64.S
25451+++ b/arch/x86/lib/memcpy_64.S
25452@@ -33,6 +33,7 @@
25453 rep movsq
25454 movl %edx, %ecx
25455 rep movsb
25456+ pax_force_retaddr
25457 ret
25458 .Lmemcpy_e:
25459 .previous
25460@@ -49,6 +50,7 @@
25461 movq %rdi, %rax
25462 movq %rdx, %rcx
25463 rep movsb
25464+ pax_force_retaddr
25465 ret
25466 .Lmemcpy_e_e:
25467 .previous
25468@@ -76,13 +78,13 @@ ENTRY(memcpy)
25469 */
25470 movq 0*8(%rsi), %r8
25471 movq 1*8(%rsi), %r9
25472- movq 2*8(%rsi), %r10
25473+ movq 2*8(%rsi), %rcx
25474 movq 3*8(%rsi), %r11
25475 leaq 4*8(%rsi), %rsi
25476
25477 movq %r8, 0*8(%rdi)
25478 movq %r9, 1*8(%rdi)
25479- movq %r10, 2*8(%rdi)
25480+ movq %rcx, 2*8(%rdi)
25481 movq %r11, 3*8(%rdi)
25482 leaq 4*8(%rdi), %rdi
25483 jae .Lcopy_forward_loop
25484@@ -105,12 +107,12 @@ ENTRY(memcpy)
25485 subq $0x20, %rdx
25486 movq -1*8(%rsi), %r8
25487 movq -2*8(%rsi), %r9
25488- movq -3*8(%rsi), %r10
25489+ movq -3*8(%rsi), %rcx
25490 movq -4*8(%rsi), %r11
25491 leaq -4*8(%rsi), %rsi
25492 movq %r8, -1*8(%rdi)
25493 movq %r9, -2*8(%rdi)
25494- movq %r10, -3*8(%rdi)
25495+ movq %rcx, -3*8(%rdi)
25496 movq %r11, -4*8(%rdi)
25497 leaq -4*8(%rdi), %rdi
25498 jae .Lcopy_backward_loop
25499@@ -130,12 +132,13 @@ ENTRY(memcpy)
25500 */
25501 movq 0*8(%rsi), %r8
25502 movq 1*8(%rsi), %r9
25503- movq -2*8(%rsi, %rdx), %r10
25504+ movq -2*8(%rsi, %rdx), %rcx
25505 movq -1*8(%rsi, %rdx), %r11
25506 movq %r8, 0*8(%rdi)
25507 movq %r9, 1*8(%rdi)
25508- movq %r10, -2*8(%rdi, %rdx)
25509+ movq %rcx, -2*8(%rdi, %rdx)
25510 movq %r11, -1*8(%rdi, %rdx)
25511+ pax_force_retaddr
25512 retq
25513 .p2align 4
25514 .Lless_16bytes:
25515@@ -148,6 +151,7 @@ ENTRY(memcpy)
25516 movq -1*8(%rsi, %rdx), %r9
25517 movq %r8, 0*8(%rdi)
25518 movq %r9, -1*8(%rdi, %rdx)
25519+ pax_force_retaddr
25520 retq
25521 .p2align 4
25522 .Lless_8bytes:
25523@@ -161,6 +165,7 @@ ENTRY(memcpy)
25524 movl -4(%rsi, %rdx), %r8d
25525 movl %ecx, (%rdi)
25526 movl %r8d, -4(%rdi, %rdx)
25527+ pax_force_retaddr
25528 retq
25529 .p2align 4
25530 .Lless_3bytes:
25531@@ -179,6 +184,7 @@ ENTRY(memcpy)
25532 movb %cl, (%rdi)
25533
25534 .Lend:
25535+ pax_force_retaddr
25536 retq
25537 CFI_ENDPROC
25538 ENDPROC(memcpy)
25539diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
25540index ee16461..c39c199 100644
25541--- a/arch/x86/lib/memmove_64.S
25542+++ b/arch/x86/lib/memmove_64.S
25543@@ -61,13 +61,13 @@ ENTRY(memmove)
25544 5:
25545 sub $0x20, %rdx
25546 movq 0*8(%rsi), %r11
25547- movq 1*8(%rsi), %r10
25548+ movq 1*8(%rsi), %rcx
25549 movq 2*8(%rsi), %r9
25550 movq 3*8(%rsi), %r8
25551 leaq 4*8(%rsi), %rsi
25552
25553 movq %r11, 0*8(%rdi)
25554- movq %r10, 1*8(%rdi)
25555+ movq %rcx, 1*8(%rdi)
25556 movq %r9, 2*8(%rdi)
25557 movq %r8, 3*8(%rdi)
25558 leaq 4*8(%rdi), %rdi
25559@@ -81,10 +81,10 @@ ENTRY(memmove)
25560 4:
25561 movq %rdx, %rcx
25562 movq -8(%rsi, %rdx), %r11
25563- lea -8(%rdi, %rdx), %r10
25564+ lea -8(%rdi, %rdx), %r9
25565 shrq $3, %rcx
25566 rep movsq
25567- movq %r11, (%r10)
25568+ movq %r11, (%r9)
25569 jmp 13f
25570 .Lmemmove_end_forward:
25571
25572@@ -95,14 +95,14 @@ ENTRY(memmove)
25573 7:
25574 movq %rdx, %rcx
25575 movq (%rsi), %r11
25576- movq %rdi, %r10
25577+ movq %rdi, %r9
25578 leaq -8(%rsi, %rdx), %rsi
25579 leaq -8(%rdi, %rdx), %rdi
25580 shrq $3, %rcx
25581 std
25582 rep movsq
25583 cld
25584- movq %r11, (%r10)
25585+ movq %r11, (%r9)
25586 jmp 13f
25587
25588 /*
25589@@ -127,13 +127,13 @@ ENTRY(memmove)
25590 8:
25591 subq $0x20, %rdx
25592 movq -1*8(%rsi), %r11
25593- movq -2*8(%rsi), %r10
25594+ movq -2*8(%rsi), %rcx
25595 movq -3*8(%rsi), %r9
25596 movq -4*8(%rsi), %r8
25597 leaq -4*8(%rsi), %rsi
25598
25599 movq %r11, -1*8(%rdi)
25600- movq %r10, -2*8(%rdi)
25601+ movq %rcx, -2*8(%rdi)
25602 movq %r9, -3*8(%rdi)
25603 movq %r8, -4*8(%rdi)
25604 leaq -4*8(%rdi), %rdi
25605@@ -151,11 +151,11 @@ ENTRY(memmove)
25606 * Move data from 16 bytes to 31 bytes.
25607 */
25608 movq 0*8(%rsi), %r11
25609- movq 1*8(%rsi), %r10
25610+ movq 1*8(%rsi), %rcx
25611 movq -2*8(%rsi, %rdx), %r9
25612 movq -1*8(%rsi, %rdx), %r8
25613 movq %r11, 0*8(%rdi)
25614- movq %r10, 1*8(%rdi)
25615+ movq %rcx, 1*8(%rdi)
25616 movq %r9, -2*8(%rdi, %rdx)
25617 movq %r8, -1*8(%rdi, %rdx)
25618 jmp 13f
25619@@ -167,9 +167,9 @@ ENTRY(memmove)
25620 * Move data from 8 bytes to 15 bytes.
25621 */
25622 movq 0*8(%rsi), %r11
25623- movq -1*8(%rsi, %rdx), %r10
25624+ movq -1*8(%rsi, %rdx), %r9
25625 movq %r11, 0*8(%rdi)
25626- movq %r10, -1*8(%rdi, %rdx)
25627+ movq %r9, -1*8(%rdi, %rdx)
25628 jmp 13f
25629 10:
25630 cmpq $4, %rdx
25631@@ -178,9 +178,9 @@ ENTRY(memmove)
25632 * Move data from 4 bytes to 7 bytes.
25633 */
25634 movl (%rsi), %r11d
25635- movl -4(%rsi, %rdx), %r10d
25636+ movl -4(%rsi, %rdx), %r9d
25637 movl %r11d, (%rdi)
25638- movl %r10d, -4(%rdi, %rdx)
25639+ movl %r9d, -4(%rdi, %rdx)
25640 jmp 13f
25641 11:
25642 cmp $2, %rdx
25643@@ -189,9 +189,9 @@ ENTRY(memmove)
25644 * Move data from 2 bytes to 3 bytes.
25645 */
25646 movw (%rsi), %r11w
25647- movw -2(%rsi, %rdx), %r10w
25648+ movw -2(%rsi, %rdx), %r9w
25649 movw %r11w, (%rdi)
25650- movw %r10w, -2(%rdi, %rdx)
25651+ movw %r9w, -2(%rdi, %rdx)
25652 jmp 13f
25653 12:
25654 cmp $1, %rdx
25655@@ -202,6 +202,7 @@ ENTRY(memmove)
25656 movb (%rsi), %r11b
25657 movb %r11b, (%rdi)
25658 13:
25659+ pax_force_retaddr
25660 retq
25661 CFI_ENDPROC
25662
25663@@ -210,6 +211,7 @@ ENTRY(memmove)
25664 /* Forward moving data. */
25665 movq %rdx, %rcx
25666 rep movsb
25667+ pax_force_retaddr
25668 retq
25669 .Lmemmove_end_forward_efs:
25670 .previous
25671diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
25672index 2dcb380..963660a 100644
25673--- a/arch/x86/lib/memset_64.S
25674+++ b/arch/x86/lib/memset_64.S
25675@@ -30,6 +30,7 @@
25676 movl %edx,%ecx
25677 rep stosb
25678 movq %r9,%rax
25679+ pax_force_retaddr
25680 ret
25681 .Lmemset_e:
25682 .previous
25683@@ -52,6 +53,7 @@
25684 movq %rdx,%rcx
25685 rep stosb
25686 movq %r9,%rax
25687+ pax_force_retaddr
25688 ret
25689 .Lmemset_e_e:
25690 .previous
25691@@ -59,7 +61,7 @@
25692 ENTRY(memset)
25693 ENTRY(__memset)
25694 CFI_STARTPROC
25695- movq %rdi,%r10
25696+ movq %rdi,%r11
25697
25698 /* expand byte value */
25699 movzbl %sil,%ecx
25700@@ -117,7 +119,8 @@ ENTRY(__memset)
25701 jnz .Lloop_1
25702
25703 .Lende:
25704- movq %r10,%rax
25705+ movq %r11,%rax
25706+ pax_force_retaddr
25707 ret
25708
25709 CFI_RESTORE_STATE
25710diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
25711index c9f2d9b..e7fd2c0 100644
25712--- a/arch/x86/lib/mmx_32.c
25713+++ b/arch/x86/lib/mmx_32.c
25714@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25715 {
25716 void *p;
25717 int i;
25718+ unsigned long cr0;
25719
25720 if (unlikely(in_interrupt()))
25721 return __memcpy(to, from, len);
25722@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25723 kernel_fpu_begin();
25724
25725 __asm__ __volatile__ (
25726- "1: prefetch (%0)\n" /* This set is 28 bytes */
25727- " prefetch 64(%0)\n"
25728- " prefetch 128(%0)\n"
25729- " prefetch 192(%0)\n"
25730- " prefetch 256(%0)\n"
25731+ "1: prefetch (%1)\n" /* This set is 28 bytes */
25732+ " prefetch 64(%1)\n"
25733+ " prefetch 128(%1)\n"
25734+ " prefetch 192(%1)\n"
25735+ " prefetch 256(%1)\n"
25736 "2: \n"
25737 ".section .fixup, \"ax\"\n"
25738- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25739+ "3: \n"
25740+
25741+#ifdef CONFIG_PAX_KERNEXEC
25742+ " movl %%cr0, %0\n"
25743+ " movl %0, %%eax\n"
25744+ " andl $0xFFFEFFFF, %%eax\n"
25745+ " movl %%eax, %%cr0\n"
25746+#endif
25747+
25748+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25749+
25750+#ifdef CONFIG_PAX_KERNEXEC
25751+ " movl %0, %%cr0\n"
25752+#endif
25753+
25754 " jmp 2b\n"
25755 ".previous\n"
25756 _ASM_EXTABLE(1b, 3b)
25757- : : "r" (from));
25758+ : "=&r" (cr0) : "r" (from) : "ax");
25759
25760 for ( ; i > 5; i--) {
25761 __asm__ __volatile__ (
25762- "1: prefetch 320(%0)\n"
25763- "2: movq (%0), %%mm0\n"
25764- " movq 8(%0), %%mm1\n"
25765- " movq 16(%0), %%mm2\n"
25766- " movq 24(%0), %%mm3\n"
25767- " movq %%mm0, (%1)\n"
25768- " movq %%mm1, 8(%1)\n"
25769- " movq %%mm2, 16(%1)\n"
25770- " movq %%mm3, 24(%1)\n"
25771- " movq 32(%0), %%mm0\n"
25772- " movq 40(%0), %%mm1\n"
25773- " movq 48(%0), %%mm2\n"
25774- " movq 56(%0), %%mm3\n"
25775- " movq %%mm0, 32(%1)\n"
25776- " movq %%mm1, 40(%1)\n"
25777- " movq %%mm2, 48(%1)\n"
25778- " movq %%mm3, 56(%1)\n"
25779+ "1: prefetch 320(%1)\n"
25780+ "2: movq (%1), %%mm0\n"
25781+ " movq 8(%1), %%mm1\n"
25782+ " movq 16(%1), %%mm2\n"
25783+ " movq 24(%1), %%mm3\n"
25784+ " movq %%mm0, (%2)\n"
25785+ " movq %%mm1, 8(%2)\n"
25786+ " movq %%mm2, 16(%2)\n"
25787+ " movq %%mm3, 24(%2)\n"
25788+ " movq 32(%1), %%mm0\n"
25789+ " movq 40(%1), %%mm1\n"
25790+ " movq 48(%1), %%mm2\n"
25791+ " movq 56(%1), %%mm3\n"
25792+ " movq %%mm0, 32(%2)\n"
25793+ " movq %%mm1, 40(%2)\n"
25794+ " movq %%mm2, 48(%2)\n"
25795+ " movq %%mm3, 56(%2)\n"
25796 ".section .fixup, \"ax\"\n"
25797- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25798+ "3:\n"
25799+
25800+#ifdef CONFIG_PAX_KERNEXEC
25801+ " movl %%cr0, %0\n"
25802+ " movl %0, %%eax\n"
25803+ " andl $0xFFFEFFFF, %%eax\n"
25804+ " movl %%eax, %%cr0\n"
25805+#endif
25806+
25807+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25808+
25809+#ifdef CONFIG_PAX_KERNEXEC
25810+ " movl %0, %%cr0\n"
25811+#endif
25812+
25813 " jmp 2b\n"
25814 ".previous\n"
25815 _ASM_EXTABLE(1b, 3b)
25816- : : "r" (from), "r" (to) : "memory");
25817+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25818
25819 from += 64;
25820 to += 64;
25821@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
25822 static void fast_copy_page(void *to, void *from)
25823 {
25824 int i;
25825+ unsigned long cr0;
25826
25827 kernel_fpu_begin();
25828
25829@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
25830 * but that is for later. -AV
25831 */
25832 __asm__ __volatile__(
25833- "1: prefetch (%0)\n"
25834- " prefetch 64(%0)\n"
25835- " prefetch 128(%0)\n"
25836- " prefetch 192(%0)\n"
25837- " prefetch 256(%0)\n"
25838+ "1: prefetch (%1)\n"
25839+ " prefetch 64(%1)\n"
25840+ " prefetch 128(%1)\n"
25841+ " prefetch 192(%1)\n"
25842+ " prefetch 256(%1)\n"
25843 "2: \n"
25844 ".section .fixup, \"ax\"\n"
25845- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25846+ "3: \n"
25847+
25848+#ifdef CONFIG_PAX_KERNEXEC
25849+ " movl %%cr0, %0\n"
25850+ " movl %0, %%eax\n"
25851+ " andl $0xFFFEFFFF, %%eax\n"
25852+ " movl %%eax, %%cr0\n"
25853+#endif
25854+
25855+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25856+
25857+#ifdef CONFIG_PAX_KERNEXEC
25858+ " movl %0, %%cr0\n"
25859+#endif
25860+
25861 " jmp 2b\n"
25862 ".previous\n"
25863- _ASM_EXTABLE(1b, 3b) : : "r" (from));
25864+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
25865
25866 for (i = 0; i < (4096-320)/64; i++) {
25867 __asm__ __volatile__ (
25868- "1: prefetch 320(%0)\n"
25869- "2: movq (%0), %%mm0\n"
25870- " movntq %%mm0, (%1)\n"
25871- " movq 8(%0), %%mm1\n"
25872- " movntq %%mm1, 8(%1)\n"
25873- " movq 16(%0), %%mm2\n"
25874- " movntq %%mm2, 16(%1)\n"
25875- " movq 24(%0), %%mm3\n"
25876- " movntq %%mm3, 24(%1)\n"
25877- " movq 32(%0), %%mm4\n"
25878- " movntq %%mm4, 32(%1)\n"
25879- " movq 40(%0), %%mm5\n"
25880- " movntq %%mm5, 40(%1)\n"
25881- " movq 48(%0), %%mm6\n"
25882- " movntq %%mm6, 48(%1)\n"
25883- " movq 56(%0), %%mm7\n"
25884- " movntq %%mm7, 56(%1)\n"
25885+ "1: prefetch 320(%1)\n"
25886+ "2: movq (%1), %%mm0\n"
25887+ " movntq %%mm0, (%2)\n"
25888+ " movq 8(%1), %%mm1\n"
25889+ " movntq %%mm1, 8(%2)\n"
25890+ " movq 16(%1), %%mm2\n"
25891+ " movntq %%mm2, 16(%2)\n"
25892+ " movq 24(%1), %%mm3\n"
25893+ " movntq %%mm3, 24(%2)\n"
25894+ " movq 32(%1), %%mm4\n"
25895+ " movntq %%mm4, 32(%2)\n"
25896+ " movq 40(%1), %%mm5\n"
25897+ " movntq %%mm5, 40(%2)\n"
25898+ " movq 48(%1), %%mm6\n"
25899+ " movntq %%mm6, 48(%2)\n"
25900+ " movq 56(%1), %%mm7\n"
25901+ " movntq %%mm7, 56(%2)\n"
25902 ".section .fixup, \"ax\"\n"
25903- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25904+ "3:\n"
25905+
25906+#ifdef CONFIG_PAX_KERNEXEC
25907+ " movl %%cr0, %0\n"
25908+ " movl %0, %%eax\n"
25909+ " andl $0xFFFEFFFF, %%eax\n"
25910+ " movl %%eax, %%cr0\n"
25911+#endif
25912+
25913+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25914+
25915+#ifdef CONFIG_PAX_KERNEXEC
25916+ " movl %0, %%cr0\n"
25917+#endif
25918+
25919 " jmp 2b\n"
25920 ".previous\n"
25921- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
25922+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25923
25924 from += 64;
25925 to += 64;
25926@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
25927 static void fast_copy_page(void *to, void *from)
25928 {
25929 int i;
25930+ unsigned long cr0;
25931
25932 kernel_fpu_begin();
25933
25934 __asm__ __volatile__ (
25935- "1: prefetch (%0)\n"
25936- " prefetch 64(%0)\n"
25937- " prefetch 128(%0)\n"
25938- " prefetch 192(%0)\n"
25939- " prefetch 256(%0)\n"
25940+ "1: prefetch (%1)\n"
25941+ " prefetch 64(%1)\n"
25942+ " prefetch 128(%1)\n"
25943+ " prefetch 192(%1)\n"
25944+ " prefetch 256(%1)\n"
25945 "2: \n"
25946 ".section .fixup, \"ax\"\n"
25947- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25948+ "3: \n"
25949+
25950+#ifdef CONFIG_PAX_KERNEXEC
25951+ " movl %%cr0, %0\n"
25952+ " movl %0, %%eax\n"
25953+ " andl $0xFFFEFFFF, %%eax\n"
25954+ " movl %%eax, %%cr0\n"
25955+#endif
25956+
25957+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25958+
25959+#ifdef CONFIG_PAX_KERNEXEC
25960+ " movl %0, %%cr0\n"
25961+#endif
25962+
25963 " jmp 2b\n"
25964 ".previous\n"
25965- _ASM_EXTABLE(1b, 3b) : : "r" (from));
25966+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
25967
25968 for (i = 0; i < 4096/64; i++) {
25969 __asm__ __volatile__ (
25970- "1: prefetch 320(%0)\n"
25971- "2: movq (%0), %%mm0\n"
25972- " movq 8(%0), %%mm1\n"
25973- " movq 16(%0), %%mm2\n"
25974- " movq 24(%0), %%mm3\n"
25975- " movq %%mm0, (%1)\n"
25976- " movq %%mm1, 8(%1)\n"
25977- " movq %%mm2, 16(%1)\n"
25978- " movq %%mm3, 24(%1)\n"
25979- " movq 32(%0), %%mm0\n"
25980- " movq 40(%0), %%mm1\n"
25981- " movq 48(%0), %%mm2\n"
25982- " movq 56(%0), %%mm3\n"
25983- " movq %%mm0, 32(%1)\n"
25984- " movq %%mm1, 40(%1)\n"
25985- " movq %%mm2, 48(%1)\n"
25986- " movq %%mm3, 56(%1)\n"
25987+ "1: prefetch 320(%1)\n"
25988+ "2: movq (%1), %%mm0\n"
25989+ " movq 8(%1), %%mm1\n"
25990+ " movq 16(%1), %%mm2\n"
25991+ " movq 24(%1), %%mm3\n"
25992+ " movq %%mm0, (%2)\n"
25993+ " movq %%mm1, 8(%2)\n"
25994+ " movq %%mm2, 16(%2)\n"
25995+ " movq %%mm3, 24(%2)\n"
25996+ " movq 32(%1), %%mm0\n"
25997+ " movq 40(%1), %%mm1\n"
25998+ " movq 48(%1), %%mm2\n"
25999+ " movq 56(%1), %%mm3\n"
26000+ " movq %%mm0, 32(%2)\n"
26001+ " movq %%mm1, 40(%2)\n"
26002+ " movq %%mm2, 48(%2)\n"
26003+ " movq %%mm3, 56(%2)\n"
26004 ".section .fixup, \"ax\"\n"
26005- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26006+ "3:\n"
26007+
26008+#ifdef CONFIG_PAX_KERNEXEC
26009+ " movl %%cr0, %0\n"
26010+ " movl %0, %%eax\n"
26011+ " andl $0xFFFEFFFF, %%eax\n"
26012+ " movl %%eax, %%cr0\n"
26013+#endif
26014+
26015+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26016+
26017+#ifdef CONFIG_PAX_KERNEXEC
26018+ " movl %0, %%cr0\n"
26019+#endif
26020+
26021 " jmp 2b\n"
26022 ".previous\n"
26023 _ASM_EXTABLE(1b, 3b)
26024- : : "r" (from), "r" (to) : "memory");
26025+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26026
26027 from += 64;
26028 to += 64;
26029diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
26030index f6d13ee..aca5f0b 100644
26031--- a/arch/x86/lib/msr-reg.S
26032+++ b/arch/x86/lib/msr-reg.S
26033@@ -3,6 +3,7 @@
26034 #include <asm/dwarf2.h>
26035 #include <asm/asm.h>
26036 #include <asm/msr.h>
26037+#include <asm/alternative-asm.h>
26038
26039 #ifdef CONFIG_X86_64
26040 /*
26041@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
26042 CFI_STARTPROC
26043 pushq_cfi %rbx
26044 pushq_cfi %rbp
26045- movq %rdi, %r10 /* Save pointer */
26046+ movq %rdi, %r9 /* Save pointer */
26047 xorl %r11d, %r11d /* Return value */
26048 movl (%rdi), %eax
26049 movl 4(%rdi), %ecx
26050@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
26051 movl 28(%rdi), %edi
26052 CFI_REMEMBER_STATE
26053 1: \op
26054-2: movl %eax, (%r10)
26055+2: movl %eax, (%r9)
26056 movl %r11d, %eax /* Return value */
26057- movl %ecx, 4(%r10)
26058- movl %edx, 8(%r10)
26059- movl %ebx, 12(%r10)
26060- movl %ebp, 20(%r10)
26061- movl %esi, 24(%r10)
26062- movl %edi, 28(%r10)
26063+ movl %ecx, 4(%r9)
26064+ movl %edx, 8(%r9)
26065+ movl %ebx, 12(%r9)
26066+ movl %ebp, 20(%r9)
26067+ movl %esi, 24(%r9)
26068+ movl %edi, 28(%r9)
26069 popq_cfi %rbp
26070 popq_cfi %rbx
26071+ pax_force_retaddr
26072 ret
26073 3:
26074 CFI_RESTORE_STATE
26075diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
26076index fc6ba17..04471c5 100644
26077--- a/arch/x86/lib/putuser.S
26078+++ b/arch/x86/lib/putuser.S
26079@@ -16,7 +16,9 @@
26080 #include <asm/errno.h>
26081 #include <asm/asm.h>
26082 #include <asm/smap.h>
26083-
26084+#include <asm/segment.h>
26085+#include <asm/pgtable.h>
26086+#include <asm/alternative-asm.h>
26087
26088 /*
26089 * __put_user_X
26090@@ -30,57 +32,125 @@
26091 * as they get called from within inline assembly.
26092 */
26093
26094-#define ENTER CFI_STARTPROC ; \
26095- GET_THREAD_INFO(%_ASM_BX)
26096-#define EXIT ASM_CLAC ; \
26097- ret ; \
26098+#define ENTER CFI_STARTPROC
26099+#define EXIT ASM_CLAC ; \
26100+ pax_force_retaddr ; \
26101+ ret ; \
26102 CFI_ENDPROC
26103
26104+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26105+#define _DEST %_ASM_CX,%_ASM_BX
26106+#else
26107+#define _DEST %_ASM_CX
26108+#endif
26109+
26110+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26111+#define __copyuser_seg gs;
26112+#else
26113+#define __copyuser_seg
26114+#endif
26115+
26116 .text
26117 ENTRY(__put_user_1)
26118 ENTER
26119+
26120+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26121+ GET_THREAD_INFO(%_ASM_BX)
26122 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
26123 jae bad_put_user
26124 ASM_STAC
26125-1: movb %al,(%_ASM_CX)
26126+
26127+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26128+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26129+ cmp %_ASM_BX,%_ASM_CX
26130+ jb 1234f
26131+ xor %ebx,%ebx
26132+1234:
26133+#endif
26134+
26135+#endif
26136+
26137+1: __copyuser_seg movb %al,(_DEST)
26138 xor %eax,%eax
26139 EXIT
26140 ENDPROC(__put_user_1)
26141
26142 ENTRY(__put_user_2)
26143 ENTER
26144+
26145+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26146+ GET_THREAD_INFO(%_ASM_BX)
26147 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26148 sub $1,%_ASM_BX
26149 cmp %_ASM_BX,%_ASM_CX
26150 jae bad_put_user
26151 ASM_STAC
26152-2: movw %ax,(%_ASM_CX)
26153+
26154+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26155+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26156+ cmp %_ASM_BX,%_ASM_CX
26157+ jb 1234f
26158+ xor %ebx,%ebx
26159+1234:
26160+#endif
26161+
26162+#endif
26163+
26164+2: __copyuser_seg movw %ax,(_DEST)
26165 xor %eax,%eax
26166 EXIT
26167 ENDPROC(__put_user_2)
26168
26169 ENTRY(__put_user_4)
26170 ENTER
26171+
26172+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26173+ GET_THREAD_INFO(%_ASM_BX)
26174 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26175 sub $3,%_ASM_BX
26176 cmp %_ASM_BX,%_ASM_CX
26177 jae bad_put_user
26178 ASM_STAC
26179-3: movl %eax,(%_ASM_CX)
26180+
26181+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26182+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26183+ cmp %_ASM_BX,%_ASM_CX
26184+ jb 1234f
26185+ xor %ebx,%ebx
26186+1234:
26187+#endif
26188+
26189+#endif
26190+
26191+3: __copyuser_seg movl %eax,(_DEST)
26192 xor %eax,%eax
26193 EXIT
26194 ENDPROC(__put_user_4)
26195
26196 ENTRY(__put_user_8)
26197 ENTER
26198+
26199+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26200+ GET_THREAD_INFO(%_ASM_BX)
26201 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26202 sub $7,%_ASM_BX
26203 cmp %_ASM_BX,%_ASM_CX
26204 jae bad_put_user
26205 ASM_STAC
26206-4: mov %_ASM_AX,(%_ASM_CX)
26207+
26208+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26209+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26210+ cmp %_ASM_BX,%_ASM_CX
26211+ jb 1234f
26212+ xor %ebx,%ebx
26213+1234:
26214+#endif
26215+
26216+#endif
26217+
26218+4: __copyuser_seg mov %_ASM_AX,(_DEST)
26219 #ifdef CONFIG_X86_32
26220-5: movl %edx,4(%_ASM_CX)
26221+5: __copyuser_seg movl %edx,4(_DEST)
26222 #endif
26223 xor %eax,%eax
26224 EXIT
26225diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
26226index 1cad221..de671ee 100644
26227--- a/arch/x86/lib/rwlock.S
26228+++ b/arch/x86/lib/rwlock.S
26229@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
26230 FRAME
26231 0: LOCK_PREFIX
26232 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26233+
26234+#ifdef CONFIG_PAX_REFCOUNT
26235+ jno 1234f
26236+ LOCK_PREFIX
26237+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26238+ int $4
26239+1234:
26240+ _ASM_EXTABLE(1234b, 1234b)
26241+#endif
26242+
26243 1: rep; nop
26244 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
26245 jne 1b
26246 LOCK_PREFIX
26247 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26248+
26249+#ifdef CONFIG_PAX_REFCOUNT
26250+ jno 1234f
26251+ LOCK_PREFIX
26252+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26253+ int $4
26254+1234:
26255+ _ASM_EXTABLE(1234b, 1234b)
26256+#endif
26257+
26258 jnz 0b
26259 ENDFRAME
26260+ pax_force_retaddr
26261 ret
26262 CFI_ENDPROC
26263 END(__write_lock_failed)
26264@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
26265 FRAME
26266 0: LOCK_PREFIX
26267 READ_LOCK_SIZE(inc) (%__lock_ptr)
26268+
26269+#ifdef CONFIG_PAX_REFCOUNT
26270+ jno 1234f
26271+ LOCK_PREFIX
26272+ READ_LOCK_SIZE(dec) (%__lock_ptr)
26273+ int $4
26274+1234:
26275+ _ASM_EXTABLE(1234b, 1234b)
26276+#endif
26277+
26278 1: rep; nop
26279 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
26280 js 1b
26281 LOCK_PREFIX
26282 READ_LOCK_SIZE(dec) (%__lock_ptr)
26283+
26284+#ifdef CONFIG_PAX_REFCOUNT
26285+ jno 1234f
26286+ LOCK_PREFIX
26287+ READ_LOCK_SIZE(inc) (%__lock_ptr)
26288+ int $4
26289+1234:
26290+ _ASM_EXTABLE(1234b, 1234b)
26291+#endif
26292+
26293 js 0b
26294 ENDFRAME
26295+ pax_force_retaddr
26296 ret
26297 CFI_ENDPROC
26298 END(__read_lock_failed)
26299diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
26300index 5dff5f0..cadebf4 100644
26301--- a/arch/x86/lib/rwsem.S
26302+++ b/arch/x86/lib/rwsem.S
26303@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
26304 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26305 CFI_RESTORE __ASM_REG(dx)
26306 restore_common_regs
26307+ pax_force_retaddr
26308 ret
26309 CFI_ENDPROC
26310 ENDPROC(call_rwsem_down_read_failed)
26311@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
26312 movq %rax,%rdi
26313 call rwsem_down_write_failed
26314 restore_common_regs
26315+ pax_force_retaddr
26316 ret
26317 CFI_ENDPROC
26318 ENDPROC(call_rwsem_down_write_failed)
26319@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
26320 movq %rax,%rdi
26321 call rwsem_wake
26322 restore_common_regs
26323-1: ret
26324+1: pax_force_retaddr
26325+ ret
26326 CFI_ENDPROC
26327 ENDPROC(call_rwsem_wake)
26328
26329@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
26330 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26331 CFI_RESTORE __ASM_REG(dx)
26332 restore_common_regs
26333+ pax_force_retaddr
26334 ret
26335 CFI_ENDPROC
26336 ENDPROC(call_rwsem_downgrade_wake)
26337diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
26338index a63efd6..ccecad8 100644
26339--- a/arch/x86/lib/thunk_64.S
26340+++ b/arch/x86/lib/thunk_64.S
26341@@ -8,6 +8,7 @@
26342 #include <linux/linkage.h>
26343 #include <asm/dwarf2.h>
26344 #include <asm/calling.h>
26345+#include <asm/alternative-asm.h>
26346
26347 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
26348 .macro THUNK name, func, put_ret_addr_in_rdi=0
26349@@ -41,5 +42,6 @@
26350 SAVE_ARGS
26351 restore:
26352 RESTORE_ARGS
26353+ pax_force_retaddr
26354 ret
26355 CFI_ENDPROC
26356diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
26357index f0312d7..9c39d63 100644
26358--- a/arch/x86/lib/usercopy_32.c
26359+++ b/arch/x86/lib/usercopy_32.c
26360@@ -42,11 +42,13 @@ do { \
26361 int __d0; \
26362 might_fault(); \
26363 __asm__ __volatile__( \
26364+ __COPYUSER_SET_ES \
26365 ASM_STAC "\n" \
26366 "0: rep; stosl\n" \
26367 " movl %2,%0\n" \
26368 "1: rep; stosb\n" \
26369 "2: " ASM_CLAC "\n" \
26370+ __COPYUSER_RESTORE_ES \
26371 ".section .fixup,\"ax\"\n" \
26372 "3: lea 0(%2,%0,4),%0\n" \
26373 " jmp 2b\n" \
26374@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
26375
26376 #ifdef CONFIG_X86_INTEL_USERCOPY
26377 static unsigned long
26378-__copy_user_intel(void __user *to, const void *from, unsigned long size)
26379+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
26380 {
26381 int d0, d1;
26382 __asm__ __volatile__(
26383@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26384 " .align 2,0x90\n"
26385 "3: movl 0(%4), %%eax\n"
26386 "4: movl 4(%4), %%edx\n"
26387- "5: movl %%eax, 0(%3)\n"
26388- "6: movl %%edx, 4(%3)\n"
26389+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
26390+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
26391 "7: movl 8(%4), %%eax\n"
26392 "8: movl 12(%4),%%edx\n"
26393- "9: movl %%eax, 8(%3)\n"
26394- "10: movl %%edx, 12(%3)\n"
26395+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
26396+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
26397 "11: movl 16(%4), %%eax\n"
26398 "12: movl 20(%4), %%edx\n"
26399- "13: movl %%eax, 16(%3)\n"
26400- "14: movl %%edx, 20(%3)\n"
26401+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
26402+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
26403 "15: movl 24(%4), %%eax\n"
26404 "16: movl 28(%4), %%edx\n"
26405- "17: movl %%eax, 24(%3)\n"
26406- "18: movl %%edx, 28(%3)\n"
26407+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
26408+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
26409 "19: movl 32(%4), %%eax\n"
26410 "20: movl 36(%4), %%edx\n"
26411- "21: movl %%eax, 32(%3)\n"
26412- "22: movl %%edx, 36(%3)\n"
26413+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
26414+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
26415 "23: movl 40(%4), %%eax\n"
26416 "24: movl 44(%4), %%edx\n"
26417- "25: movl %%eax, 40(%3)\n"
26418- "26: movl %%edx, 44(%3)\n"
26419+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
26420+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
26421 "27: movl 48(%4), %%eax\n"
26422 "28: movl 52(%4), %%edx\n"
26423- "29: movl %%eax, 48(%3)\n"
26424- "30: movl %%edx, 52(%3)\n"
26425+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
26426+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
26427 "31: movl 56(%4), %%eax\n"
26428 "32: movl 60(%4), %%edx\n"
26429- "33: movl %%eax, 56(%3)\n"
26430- "34: movl %%edx, 60(%3)\n"
26431+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
26432+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
26433 " addl $-64, %0\n"
26434 " addl $64, %4\n"
26435 " addl $64, %3\n"
26436@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26437 " shrl $2, %0\n"
26438 " andl $3, %%eax\n"
26439 " cld\n"
26440+ __COPYUSER_SET_ES
26441 "99: rep; movsl\n"
26442 "36: movl %%eax, %0\n"
26443 "37: rep; movsb\n"
26444 "100:\n"
26445+ __COPYUSER_RESTORE_ES
26446 ".section .fixup,\"ax\"\n"
26447 "101: lea 0(%%eax,%0,4),%0\n"
26448 " jmp 100b\n"
26449@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26450 }
26451
26452 static unsigned long
26453+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
26454+{
26455+ int d0, d1;
26456+ __asm__ __volatile__(
26457+ " .align 2,0x90\n"
26458+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
26459+ " cmpl $67, %0\n"
26460+ " jbe 3f\n"
26461+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
26462+ " .align 2,0x90\n"
26463+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
26464+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
26465+ "5: movl %%eax, 0(%3)\n"
26466+ "6: movl %%edx, 4(%3)\n"
26467+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
26468+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
26469+ "9: movl %%eax, 8(%3)\n"
26470+ "10: movl %%edx, 12(%3)\n"
26471+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
26472+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
26473+ "13: movl %%eax, 16(%3)\n"
26474+ "14: movl %%edx, 20(%3)\n"
26475+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
26476+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
26477+ "17: movl %%eax, 24(%3)\n"
26478+ "18: movl %%edx, 28(%3)\n"
26479+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
26480+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
26481+ "21: movl %%eax, 32(%3)\n"
26482+ "22: movl %%edx, 36(%3)\n"
26483+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
26484+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
26485+ "25: movl %%eax, 40(%3)\n"
26486+ "26: movl %%edx, 44(%3)\n"
26487+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
26488+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
26489+ "29: movl %%eax, 48(%3)\n"
26490+ "30: movl %%edx, 52(%3)\n"
26491+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
26492+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
26493+ "33: movl %%eax, 56(%3)\n"
26494+ "34: movl %%edx, 60(%3)\n"
26495+ " addl $-64, %0\n"
26496+ " addl $64, %4\n"
26497+ " addl $64, %3\n"
26498+ " cmpl $63, %0\n"
26499+ " ja 1b\n"
26500+ "35: movl %0, %%eax\n"
26501+ " shrl $2, %0\n"
26502+ " andl $3, %%eax\n"
26503+ " cld\n"
26504+ "99: rep; "__copyuser_seg" movsl\n"
26505+ "36: movl %%eax, %0\n"
26506+ "37: rep; "__copyuser_seg" movsb\n"
26507+ "100:\n"
26508+ ".section .fixup,\"ax\"\n"
26509+ "101: lea 0(%%eax,%0,4),%0\n"
26510+ " jmp 100b\n"
26511+ ".previous\n"
26512+ _ASM_EXTABLE(1b,100b)
26513+ _ASM_EXTABLE(2b,100b)
26514+ _ASM_EXTABLE(3b,100b)
26515+ _ASM_EXTABLE(4b,100b)
26516+ _ASM_EXTABLE(5b,100b)
26517+ _ASM_EXTABLE(6b,100b)
26518+ _ASM_EXTABLE(7b,100b)
26519+ _ASM_EXTABLE(8b,100b)
26520+ _ASM_EXTABLE(9b,100b)
26521+ _ASM_EXTABLE(10b,100b)
26522+ _ASM_EXTABLE(11b,100b)
26523+ _ASM_EXTABLE(12b,100b)
26524+ _ASM_EXTABLE(13b,100b)
26525+ _ASM_EXTABLE(14b,100b)
26526+ _ASM_EXTABLE(15b,100b)
26527+ _ASM_EXTABLE(16b,100b)
26528+ _ASM_EXTABLE(17b,100b)
26529+ _ASM_EXTABLE(18b,100b)
26530+ _ASM_EXTABLE(19b,100b)
26531+ _ASM_EXTABLE(20b,100b)
26532+ _ASM_EXTABLE(21b,100b)
26533+ _ASM_EXTABLE(22b,100b)
26534+ _ASM_EXTABLE(23b,100b)
26535+ _ASM_EXTABLE(24b,100b)
26536+ _ASM_EXTABLE(25b,100b)
26537+ _ASM_EXTABLE(26b,100b)
26538+ _ASM_EXTABLE(27b,100b)
26539+ _ASM_EXTABLE(28b,100b)
26540+ _ASM_EXTABLE(29b,100b)
26541+ _ASM_EXTABLE(30b,100b)
26542+ _ASM_EXTABLE(31b,100b)
26543+ _ASM_EXTABLE(32b,100b)
26544+ _ASM_EXTABLE(33b,100b)
26545+ _ASM_EXTABLE(34b,100b)
26546+ _ASM_EXTABLE(35b,100b)
26547+ _ASM_EXTABLE(36b,100b)
26548+ _ASM_EXTABLE(37b,100b)
26549+ _ASM_EXTABLE(99b,101b)
26550+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
26551+ : "1"(to), "2"(from), "0"(size)
26552+ : "eax", "edx", "memory");
26553+ return size;
26554+}
26555+
26556+static unsigned long __size_overflow(3)
26557 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26558 {
26559 int d0, d1;
26560 __asm__ __volatile__(
26561 " .align 2,0x90\n"
26562- "0: movl 32(%4), %%eax\n"
26563+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26564 " cmpl $67, %0\n"
26565 " jbe 2f\n"
26566- "1: movl 64(%4), %%eax\n"
26567+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26568 " .align 2,0x90\n"
26569- "2: movl 0(%4), %%eax\n"
26570- "21: movl 4(%4), %%edx\n"
26571+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26572+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26573 " movl %%eax, 0(%3)\n"
26574 " movl %%edx, 4(%3)\n"
26575- "3: movl 8(%4), %%eax\n"
26576- "31: movl 12(%4),%%edx\n"
26577+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26578+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26579 " movl %%eax, 8(%3)\n"
26580 " movl %%edx, 12(%3)\n"
26581- "4: movl 16(%4), %%eax\n"
26582- "41: movl 20(%4), %%edx\n"
26583+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26584+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26585 " movl %%eax, 16(%3)\n"
26586 " movl %%edx, 20(%3)\n"
26587- "10: movl 24(%4), %%eax\n"
26588- "51: movl 28(%4), %%edx\n"
26589+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26590+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26591 " movl %%eax, 24(%3)\n"
26592 " movl %%edx, 28(%3)\n"
26593- "11: movl 32(%4), %%eax\n"
26594- "61: movl 36(%4), %%edx\n"
26595+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26596+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26597 " movl %%eax, 32(%3)\n"
26598 " movl %%edx, 36(%3)\n"
26599- "12: movl 40(%4), %%eax\n"
26600- "71: movl 44(%4), %%edx\n"
26601+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26602+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26603 " movl %%eax, 40(%3)\n"
26604 " movl %%edx, 44(%3)\n"
26605- "13: movl 48(%4), %%eax\n"
26606- "81: movl 52(%4), %%edx\n"
26607+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26608+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26609 " movl %%eax, 48(%3)\n"
26610 " movl %%edx, 52(%3)\n"
26611- "14: movl 56(%4), %%eax\n"
26612- "91: movl 60(%4), %%edx\n"
26613+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26614+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26615 " movl %%eax, 56(%3)\n"
26616 " movl %%edx, 60(%3)\n"
26617 " addl $-64, %0\n"
26618@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26619 " shrl $2, %0\n"
26620 " andl $3, %%eax\n"
26621 " cld\n"
26622- "6: rep; movsl\n"
26623+ "6: rep; "__copyuser_seg" movsl\n"
26624 " movl %%eax,%0\n"
26625- "7: rep; movsb\n"
26626+ "7: rep; "__copyuser_seg" movsb\n"
26627 "8:\n"
26628 ".section .fixup,\"ax\"\n"
26629 "9: lea 0(%%eax,%0,4),%0\n"
26630@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26631 * hyoshiok@miraclelinux.com
26632 */
26633
26634-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26635+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
26636 const void __user *from, unsigned long size)
26637 {
26638 int d0, d1;
26639
26640 __asm__ __volatile__(
26641 " .align 2,0x90\n"
26642- "0: movl 32(%4), %%eax\n"
26643+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26644 " cmpl $67, %0\n"
26645 " jbe 2f\n"
26646- "1: movl 64(%4), %%eax\n"
26647+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26648 " .align 2,0x90\n"
26649- "2: movl 0(%4), %%eax\n"
26650- "21: movl 4(%4), %%edx\n"
26651+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26652+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26653 " movnti %%eax, 0(%3)\n"
26654 " movnti %%edx, 4(%3)\n"
26655- "3: movl 8(%4), %%eax\n"
26656- "31: movl 12(%4),%%edx\n"
26657+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26658+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26659 " movnti %%eax, 8(%3)\n"
26660 " movnti %%edx, 12(%3)\n"
26661- "4: movl 16(%4), %%eax\n"
26662- "41: movl 20(%4), %%edx\n"
26663+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26664+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26665 " movnti %%eax, 16(%3)\n"
26666 " movnti %%edx, 20(%3)\n"
26667- "10: movl 24(%4), %%eax\n"
26668- "51: movl 28(%4), %%edx\n"
26669+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26670+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26671 " movnti %%eax, 24(%3)\n"
26672 " movnti %%edx, 28(%3)\n"
26673- "11: movl 32(%4), %%eax\n"
26674- "61: movl 36(%4), %%edx\n"
26675+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26676+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26677 " movnti %%eax, 32(%3)\n"
26678 " movnti %%edx, 36(%3)\n"
26679- "12: movl 40(%4), %%eax\n"
26680- "71: movl 44(%4), %%edx\n"
26681+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26682+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26683 " movnti %%eax, 40(%3)\n"
26684 " movnti %%edx, 44(%3)\n"
26685- "13: movl 48(%4), %%eax\n"
26686- "81: movl 52(%4), %%edx\n"
26687+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26688+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26689 " movnti %%eax, 48(%3)\n"
26690 " movnti %%edx, 52(%3)\n"
26691- "14: movl 56(%4), %%eax\n"
26692- "91: movl 60(%4), %%edx\n"
26693+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26694+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26695 " movnti %%eax, 56(%3)\n"
26696 " movnti %%edx, 60(%3)\n"
26697 " addl $-64, %0\n"
26698@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26699 " shrl $2, %0\n"
26700 " andl $3, %%eax\n"
26701 " cld\n"
26702- "6: rep; movsl\n"
26703+ "6: rep; "__copyuser_seg" movsl\n"
26704 " movl %%eax,%0\n"
26705- "7: rep; movsb\n"
26706+ "7: rep; "__copyuser_seg" movsb\n"
26707 "8:\n"
26708 ".section .fixup,\"ax\"\n"
26709 "9: lea 0(%%eax,%0,4),%0\n"
26710@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26711 return size;
26712 }
26713
26714-static unsigned long __copy_user_intel_nocache(void *to,
26715+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
26716 const void __user *from, unsigned long size)
26717 {
26718 int d0, d1;
26719
26720 __asm__ __volatile__(
26721 " .align 2,0x90\n"
26722- "0: movl 32(%4), %%eax\n"
26723+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26724 " cmpl $67, %0\n"
26725 " jbe 2f\n"
26726- "1: movl 64(%4), %%eax\n"
26727+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26728 " .align 2,0x90\n"
26729- "2: movl 0(%4), %%eax\n"
26730- "21: movl 4(%4), %%edx\n"
26731+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26732+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26733 " movnti %%eax, 0(%3)\n"
26734 " movnti %%edx, 4(%3)\n"
26735- "3: movl 8(%4), %%eax\n"
26736- "31: movl 12(%4),%%edx\n"
26737+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26738+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26739 " movnti %%eax, 8(%3)\n"
26740 " movnti %%edx, 12(%3)\n"
26741- "4: movl 16(%4), %%eax\n"
26742- "41: movl 20(%4), %%edx\n"
26743+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26744+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26745 " movnti %%eax, 16(%3)\n"
26746 " movnti %%edx, 20(%3)\n"
26747- "10: movl 24(%4), %%eax\n"
26748- "51: movl 28(%4), %%edx\n"
26749+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26750+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26751 " movnti %%eax, 24(%3)\n"
26752 " movnti %%edx, 28(%3)\n"
26753- "11: movl 32(%4), %%eax\n"
26754- "61: movl 36(%4), %%edx\n"
26755+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26756+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26757 " movnti %%eax, 32(%3)\n"
26758 " movnti %%edx, 36(%3)\n"
26759- "12: movl 40(%4), %%eax\n"
26760- "71: movl 44(%4), %%edx\n"
26761+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26762+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26763 " movnti %%eax, 40(%3)\n"
26764 " movnti %%edx, 44(%3)\n"
26765- "13: movl 48(%4), %%eax\n"
26766- "81: movl 52(%4), %%edx\n"
26767+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26768+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26769 " movnti %%eax, 48(%3)\n"
26770 " movnti %%edx, 52(%3)\n"
26771- "14: movl 56(%4), %%eax\n"
26772- "91: movl 60(%4), %%edx\n"
26773+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26774+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26775 " movnti %%eax, 56(%3)\n"
26776 " movnti %%edx, 60(%3)\n"
26777 " addl $-64, %0\n"
26778@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
26779 " shrl $2, %0\n"
26780 " andl $3, %%eax\n"
26781 " cld\n"
26782- "6: rep; movsl\n"
26783+ "6: rep; "__copyuser_seg" movsl\n"
26784 " movl %%eax,%0\n"
26785- "7: rep; movsb\n"
26786+ "7: rep; "__copyuser_seg" movsb\n"
26787 "8:\n"
26788 ".section .fixup,\"ax\"\n"
26789 "9: lea 0(%%eax,%0,4),%0\n"
26790@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
26791 */
26792 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
26793 unsigned long size);
26794-unsigned long __copy_user_intel(void __user *to, const void *from,
26795+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
26796+ unsigned long size);
26797+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
26798 unsigned long size);
26799 unsigned long __copy_user_zeroing_intel_nocache(void *to,
26800 const void __user *from, unsigned long size);
26801 #endif /* CONFIG_X86_INTEL_USERCOPY */
26802
26803 /* Generic arbitrary sized copy. */
26804-#define __copy_user(to, from, size) \
26805+#define __copy_user(to, from, size, prefix, set, restore) \
26806 do { \
26807 int __d0, __d1, __d2; \
26808 __asm__ __volatile__( \
26809+ set \
26810 " cmp $7,%0\n" \
26811 " jbe 1f\n" \
26812 " movl %1,%0\n" \
26813 " negl %0\n" \
26814 " andl $7,%0\n" \
26815 " subl %0,%3\n" \
26816- "4: rep; movsb\n" \
26817+ "4: rep; "prefix"movsb\n" \
26818 " movl %3,%0\n" \
26819 " shrl $2,%0\n" \
26820 " andl $3,%3\n" \
26821 " .align 2,0x90\n" \
26822- "0: rep; movsl\n" \
26823+ "0: rep; "prefix"movsl\n" \
26824 " movl %3,%0\n" \
26825- "1: rep; movsb\n" \
26826+ "1: rep; "prefix"movsb\n" \
26827 "2:\n" \
26828+ restore \
26829 ".section .fixup,\"ax\"\n" \
26830 "5: addl %3,%0\n" \
26831 " jmp 2b\n" \
26832@@ -538,14 +650,14 @@ do { \
26833 " negl %0\n" \
26834 " andl $7,%0\n" \
26835 " subl %0,%3\n" \
26836- "4: rep; movsb\n" \
26837+ "4: rep; "__copyuser_seg"movsb\n" \
26838 " movl %3,%0\n" \
26839 " shrl $2,%0\n" \
26840 " andl $3,%3\n" \
26841 " .align 2,0x90\n" \
26842- "0: rep; movsl\n" \
26843+ "0: rep; "__copyuser_seg"movsl\n" \
26844 " movl %3,%0\n" \
26845- "1: rep; movsb\n" \
26846+ "1: rep; "__copyuser_seg"movsb\n" \
26847 "2:\n" \
26848 ".section .fixup,\"ax\"\n" \
26849 "5: addl %3,%0\n" \
26850@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
26851 {
26852 stac();
26853 if (movsl_is_ok(to, from, n))
26854- __copy_user(to, from, n);
26855+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
26856 else
26857- n = __copy_user_intel(to, from, n);
26858+ n = __generic_copy_to_user_intel(to, from, n);
26859 clac();
26860 return n;
26861 }
26862@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
26863 {
26864 stac();
26865 if (movsl_is_ok(to, from, n))
26866- __copy_user(to, from, n);
26867+ __copy_user(to, from, n, __copyuser_seg, "", "");
26868 else
26869- n = __copy_user_intel((void __user *)to,
26870- (const void *)from, n);
26871+ n = __generic_copy_from_user_intel(to, from, n);
26872 clac();
26873 return n;
26874 }
26875@@ -632,66 +743,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
26876 if (n > 64 && cpu_has_xmm2)
26877 n = __copy_user_intel_nocache(to, from, n);
26878 else
26879- __copy_user(to, from, n);
26880+ __copy_user(to, from, n, __copyuser_seg, "", "");
26881 #else
26882- __copy_user(to, from, n);
26883+ __copy_user(to, from, n, __copyuser_seg, "", "");
26884 #endif
26885 clac();
26886 return n;
26887 }
26888 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
26889
26890-/**
26891- * copy_to_user: - Copy a block of data into user space.
26892- * @to: Destination address, in user space.
26893- * @from: Source address, in kernel space.
26894- * @n: Number of bytes to copy.
26895- *
26896- * Context: User context only. This function may sleep.
26897- *
26898- * Copy data from kernel space to user space.
26899- *
26900- * Returns number of bytes that could not be copied.
26901- * On success, this will be zero.
26902- */
26903-unsigned long
26904-copy_to_user(void __user *to, const void *from, unsigned long n)
26905-{
26906- if (access_ok(VERIFY_WRITE, to, n))
26907- n = __copy_to_user(to, from, n);
26908- return n;
26909-}
26910-EXPORT_SYMBOL(copy_to_user);
26911-
26912-/**
26913- * copy_from_user: - Copy a block of data from user space.
26914- * @to: Destination address, in kernel space.
26915- * @from: Source address, in user space.
26916- * @n: Number of bytes to copy.
26917- *
26918- * Context: User context only. This function may sleep.
26919- *
26920- * Copy data from user space to kernel space.
26921- *
26922- * Returns number of bytes that could not be copied.
26923- * On success, this will be zero.
26924- *
26925- * If some data could not be copied, this function will pad the copied
26926- * data to the requested size using zero bytes.
26927- */
26928-unsigned long
26929-_copy_from_user(void *to, const void __user *from, unsigned long n)
26930-{
26931- if (access_ok(VERIFY_READ, from, n))
26932- n = __copy_from_user(to, from, n);
26933- else
26934- memset(to, 0, n);
26935- return n;
26936-}
26937-EXPORT_SYMBOL(_copy_from_user);
26938-
26939 void copy_from_user_overflow(void)
26940 {
26941 WARN(1, "Buffer overflow detected!\n");
26942 }
26943 EXPORT_SYMBOL(copy_from_user_overflow);
26944+
26945+void copy_to_user_overflow(void)
26946+{
26947+ WARN(1, "Buffer overflow detected!\n");
26948+}
26949+EXPORT_SYMBOL(copy_to_user_overflow);
26950+
26951+#ifdef CONFIG_PAX_MEMORY_UDEREF
26952+void __set_fs(mm_segment_t x)
26953+{
26954+ switch (x.seg) {
26955+ case 0:
26956+ loadsegment(gs, 0);
26957+ break;
26958+ case TASK_SIZE_MAX:
26959+ loadsegment(gs, __USER_DS);
26960+ break;
26961+ case -1UL:
26962+ loadsegment(gs, __KERNEL_DS);
26963+ break;
26964+ default:
26965+ BUG();
26966+ }
26967+ return;
26968+}
26969+EXPORT_SYMBOL(__set_fs);
26970+
26971+void set_fs(mm_segment_t x)
26972+{
26973+ current_thread_info()->addr_limit = x;
26974+ __set_fs(x);
26975+}
26976+EXPORT_SYMBOL(set_fs);
26977+#endif
26978diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
26979index 05928aa..b33dea1 100644
26980--- a/arch/x86/lib/usercopy_64.c
26981+++ b/arch/x86/lib/usercopy_64.c
26982@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
26983 _ASM_EXTABLE(0b,3b)
26984 _ASM_EXTABLE(1b,2b)
26985 : [size8] "=&c"(size), [dst] "=&D" (__d0)
26986- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
26987+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
26988 [zero] "r" (0UL), [eight] "r" (8UL));
26989 clac();
26990 return size;
26991@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
26992 }
26993 EXPORT_SYMBOL(clear_user);
26994
26995-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
26996+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
26997 {
26998- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
26999- return copy_user_generic((__force void *)to, (__force void *)from, len);
27000- }
27001- return len;
27002+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
27003+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
27004+ return len;
27005 }
27006 EXPORT_SYMBOL(copy_in_user);
27007
27008@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
27009 * it is not necessary to optimize tail handling.
27010 */
27011 unsigned long
27012-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27013+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
27014 {
27015 char c;
27016 unsigned zero_len;
27017@@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27018 clac();
27019 return len;
27020 }
27021+
27022+void copy_from_user_overflow(void)
27023+{
27024+ WARN(1, "Buffer overflow detected!\n");
27025+}
27026+EXPORT_SYMBOL(copy_from_user_overflow);
27027+
27028+void copy_to_user_overflow(void)
27029+{
27030+ WARN(1, "Buffer overflow detected!\n");
27031+}
27032+EXPORT_SYMBOL(copy_to_user_overflow);
27033diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
27034index 903ec1e..c4166b2 100644
27035--- a/arch/x86/mm/extable.c
27036+++ b/arch/x86/mm/extable.c
27037@@ -6,12 +6,24 @@
27038 static inline unsigned long
27039 ex_insn_addr(const struct exception_table_entry *x)
27040 {
27041- return (unsigned long)&x->insn + x->insn;
27042+ unsigned long reloc = 0;
27043+
27044+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27045+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27046+#endif
27047+
27048+ return (unsigned long)&x->insn + x->insn + reloc;
27049 }
27050 static inline unsigned long
27051 ex_fixup_addr(const struct exception_table_entry *x)
27052 {
27053- return (unsigned long)&x->fixup + x->fixup;
27054+ unsigned long reloc = 0;
27055+
27056+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27057+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27058+#endif
27059+
27060+ return (unsigned long)&x->fixup + x->fixup + reloc;
27061 }
27062
27063 int fixup_exception(struct pt_regs *regs)
27064@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
27065 unsigned long new_ip;
27066
27067 #ifdef CONFIG_PNPBIOS
27068- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
27069+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
27070 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
27071 extern u32 pnp_bios_is_utter_crap;
27072 pnp_bios_is_utter_crap = 1;
27073@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
27074 i += 4;
27075 p->fixup -= i;
27076 i += 4;
27077+
27078+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27079+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
27080+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27081+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27082+#endif
27083+
27084 }
27085 }
27086
27087diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
27088index fb674fd..1be28b9 100644
27089--- a/arch/x86/mm/fault.c
27090+++ b/arch/x86/mm/fault.c
27091@@ -13,12 +13,19 @@
27092 #include <linux/perf_event.h> /* perf_sw_event */
27093 #include <linux/hugetlb.h> /* hstate_index_to_shift */
27094 #include <linux/prefetch.h> /* prefetchw */
27095+#include <linux/unistd.h>
27096+#include <linux/compiler.h>
27097
27098 #include <asm/traps.h> /* dotraplinkage, ... */
27099 #include <asm/pgalloc.h> /* pgd_*(), ... */
27100 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
27101 #include <asm/fixmap.h> /* VSYSCALL_START */
27102 #include <asm/context_tracking.h> /* exception_enter(), ... */
27103+#include <asm/tlbflush.h>
27104+
27105+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27106+#include <asm/stacktrace.h>
27107+#endif
27108
27109 /*
27110 * Page fault error code bits:
27111@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
27112 int ret = 0;
27113
27114 /* kprobe_running() needs smp_processor_id() */
27115- if (kprobes_built_in() && !user_mode_vm(regs)) {
27116+ if (kprobes_built_in() && !user_mode(regs)) {
27117 preempt_disable();
27118 if (kprobe_running() && kprobe_fault_handler(regs, 14))
27119 ret = 1;
27120@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
27121 return !instr_lo || (instr_lo>>1) == 1;
27122 case 0x00:
27123 /* Prefetch instruction is 0x0F0D or 0x0F18 */
27124- if (probe_kernel_address(instr, opcode))
27125+ if (user_mode(regs)) {
27126+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27127+ return 0;
27128+ } else if (probe_kernel_address(instr, opcode))
27129 return 0;
27130
27131 *prefetch = (instr_lo == 0xF) &&
27132@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
27133 while (instr < max_instr) {
27134 unsigned char opcode;
27135
27136- if (probe_kernel_address(instr, opcode))
27137+ if (user_mode(regs)) {
27138+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27139+ break;
27140+ } else if (probe_kernel_address(instr, opcode))
27141 break;
27142
27143 instr++;
27144@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
27145 force_sig_info(si_signo, &info, tsk);
27146 }
27147
27148+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27149+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
27150+#endif
27151+
27152+#ifdef CONFIG_PAX_EMUTRAMP
27153+static int pax_handle_fetch_fault(struct pt_regs *regs);
27154+#endif
27155+
27156+#ifdef CONFIG_PAX_PAGEEXEC
27157+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
27158+{
27159+ pgd_t *pgd;
27160+ pud_t *pud;
27161+ pmd_t *pmd;
27162+
27163+ pgd = pgd_offset(mm, address);
27164+ if (!pgd_present(*pgd))
27165+ return NULL;
27166+ pud = pud_offset(pgd, address);
27167+ if (!pud_present(*pud))
27168+ return NULL;
27169+ pmd = pmd_offset(pud, address);
27170+ if (!pmd_present(*pmd))
27171+ return NULL;
27172+ return pmd;
27173+}
27174+#endif
27175+
27176 DEFINE_SPINLOCK(pgd_lock);
27177 LIST_HEAD(pgd_list);
27178
27179@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
27180 for (address = VMALLOC_START & PMD_MASK;
27181 address >= TASK_SIZE && address < FIXADDR_TOP;
27182 address += PMD_SIZE) {
27183+
27184+#ifdef CONFIG_PAX_PER_CPU_PGD
27185+ unsigned long cpu;
27186+#else
27187 struct page *page;
27188+#endif
27189
27190 spin_lock(&pgd_lock);
27191+
27192+#ifdef CONFIG_PAX_PER_CPU_PGD
27193+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27194+ pgd_t *pgd = get_cpu_pgd(cpu);
27195+ pmd_t *ret;
27196+#else
27197 list_for_each_entry(page, &pgd_list, lru) {
27198+ pgd_t *pgd = page_address(page);
27199 spinlock_t *pgt_lock;
27200 pmd_t *ret;
27201
27202@@ -243,8 +296,13 @@ void vmalloc_sync_all(void)
27203 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
27204
27205 spin_lock(pgt_lock);
27206- ret = vmalloc_sync_one(page_address(page), address);
27207+#endif
27208+
27209+ ret = vmalloc_sync_one(pgd, address);
27210+
27211+#ifndef CONFIG_PAX_PER_CPU_PGD
27212 spin_unlock(pgt_lock);
27213+#endif
27214
27215 if (!ret)
27216 break;
27217@@ -278,6 +336,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27218 * an interrupt in the middle of a task switch..
27219 */
27220 pgd_paddr = read_cr3();
27221+
27222+#ifdef CONFIG_PAX_PER_CPU_PGD
27223+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
27224+#endif
27225+
27226 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
27227 if (!pmd_k)
27228 return -1;
27229@@ -373,7 +436,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27230 * happen within a race in page table update. In the later
27231 * case just flush:
27232 */
27233+
27234+#ifdef CONFIG_PAX_PER_CPU_PGD
27235+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
27236+ pgd = pgd_offset_cpu(smp_processor_id(), address);
27237+#else
27238 pgd = pgd_offset(current->active_mm, address);
27239+#endif
27240+
27241 pgd_ref = pgd_offset_k(address);
27242 if (pgd_none(*pgd_ref))
27243 return -1;
27244@@ -541,7 +611,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
27245 static int is_errata100(struct pt_regs *regs, unsigned long address)
27246 {
27247 #ifdef CONFIG_X86_64
27248- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
27249+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
27250 return 1;
27251 #endif
27252 return 0;
27253@@ -568,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
27254 }
27255
27256 static const char nx_warning[] = KERN_CRIT
27257-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
27258+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
27259
27260 static void
27261 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27262@@ -577,15 +647,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27263 if (!oops_may_print())
27264 return;
27265
27266- if (error_code & PF_INSTR) {
27267+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
27268 unsigned int level;
27269
27270 pte_t *pte = lookup_address(address, &level);
27271
27272 if (pte && pte_present(*pte) && !pte_exec(*pte))
27273- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
27274+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
27275 }
27276
27277+#ifdef CONFIG_PAX_KERNEXEC
27278+ if (init_mm.start_code <= address && address < init_mm.end_code) {
27279+ if (current->signal->curr_ip)
27280+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
27281+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
27282+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27283+ else
27284+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
27285+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27286+ }
27287+#endif
27288+
27289 printk(KERN_ALERT "BUG: unable to handle kernel ");
27290 if (address < PAGE_SIZE)
27291 printk(KERN_CONT "NULL pointer dereference");
27292@@ -748,6 +830,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
27293 return;
27294 }
27295 #endif
27296+
27297+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27298+ if (pax_is_fetch_fault(regs, error_code, address)) {
27299+
27300+#ifdef CONFIG_PAX_EMUTRAMP
27301+ switch (pax_handle_fetch_fault(regs)) {
27302+ case 2:
27303+ return;
27304+ }
27305+#endif
27306+
27307+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27308+ do_group_exit(SIGKILL);
27309+ }
27310+#endif
27311+
27312 /* Kernel addresses are always protection faults: */
27313 if (address >= TASK_SIZE)
27314 error_code |= PF_PROT;
27315@@ -833,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
27316 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
27317 printk(KERN_ERR
27318 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
27319- tsk->comm, tsk->pid, address);
27320+ tsk->comm, task_pid_nr(tsk), address);
27321 code = BUS_MCEERR_AR;
27322 }
27323 #endif
27324@@ -896,6 +994,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
27325 return 1;
27326 }
27327
27328+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27329+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
27330+{
27331+ pte_t *pte;
27332+ pmd_t *pmd;
27333+ spinlock_t *ptl;
27334+ unsigned char pte_mask;
27335+
27336+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
27337+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
27338+ return 0;
27339+
27340+ /* PaX: it's our fault, let's handle it if we can */
27341+
27342+ /* PaX: take a look at read faults before acquiring any locks */
27343+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
27344+ /* instruction fetch attempt from a protected page in user mode */
27345+ up_read(&mm->mmap_sem);
27346+
27347+#ifdef CONFIG_PAX_EMUTRAMP
27348+ switch (pax_handle_fetch_fault(regs)) {
27349+ case 2:
27350+ return 1;
27351+ }
27352+#endif
27353+
27354+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27355+ do_group_exit(SIGKILL);
27356+ }
27357+
27358+ pmd = pax_get_pmd(mm, address);
27359+ if (unlikely(!pmd))
27360+ return 0;
27361+
27362+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
27363+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
27364+ pte_unmap_unlock(pte, ptl);
27365+ return 0;
27366+ }
27367+
27368+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
27369+ /* write attempt to a protected page in user mode */
27370+ pte_unmap_unlock(pte, ptl);
27371+ return 0;
27372+ }
27373+
27374+#ifdef CONFIG_SMP
27375+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
27376+#else
27377+ if (likely(address > get_limit(regs->cs)))
27378+#endif
27379+ {
27380+ set_pte(pte, pte_mkread(*pte));
27381+ __flush_tlb_one(address);
27382+ pte_unmap_unlock(pte, ptl);
27383+ up_read(&mm->mmap_sem);
27384+ return 1;
27385+ }
27386+
27387+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
27388+
27389+ /*
27390+ * PaX: fill DTLB with user rights and retry
27391+ */
27392+ __asm__ __volatile__ (
27393+ "orb %2,(%1)\n"
27394+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
27395+/*
27396+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
27397+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
27398+ * page fault when examined during a TLB load attempt. this is true not only
27399+ * for PTEs holding a non-present entry but also present entries that will
27400+ * raise a page fault (such as those set up by PaX, or the copy-on-write
27401+ * mechanism). in effect it means that we do *not* need to flush the TLBs
27402+ * for our target pages since their PTEs are simply not in the TLBs at all.
27403+
27404+ * the best thing in omitting it is that we gain around 15-20% speed in the
27405+ * fast path of the page fault handler and can get rid of tracing since we
27406+ * can no longer flush unintended entries.
27407+ */
27408+ "invlpg (%0)\n"
27409+#endif
27410+ __copyuser_seg"testb $0,(%0)\n"
27411+ "xorb %3,(%1)\n"
27412+ :
27413+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
27414+ : "memory", "cc");
27415+ pte_unmap_unlock(pte, ptl);
27416+ up_read(&mm->mmap_sem);
27417+ return 1;
27418+}
27419+#endif
27420+
27421 /*
27422 * Handle a spurious fault caused by a stale TLB entry.
27423 *
27424@@ -968,6 +1159,9 @@ int show_unhandled_signals = 1;
27425 static inline int
27426 access_error(unsigned long error_code, struct vm_area_struct *vma)
27427 {
27428+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
27429+ return 1;
27430+
27431 if (error_code & PF_WRITE) {
27432 /* write, present and write, not present: */
27433 if (unlikely(!(vma->vm_flags & VM_WRITE)))
27434@@ -996,7 +1190,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
27435 if (error_code & PF_USER)
27436 return false;
27437
27438- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
27439+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
27440 return false;
27441
27442 return true;
27443@@ -1012,18 +1206,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27444 {
27445 struct vm_area_struct *vma;
27446 struct task_struct *tsk;
27447- unsigned long address;
27448 struct mm_struct *mm;
27449 int fault;
27450 int write = error_code & PF_WRITE;
27451 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
27452 (write ? FAULT_FLAG_WRITE : 0);
27453
27454- tsk = current;
27455- mm = tsk->mm;
27456-
27457 /* Get the faulting address: */
27458- address = read_cr2();
27459+ unsigned long address = read_cr2();
27460+
27461+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27462+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
27463+ if (!search_exception_tables(regs->ip)) {
27464+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27465+ bad_area_nosemaphore(regs, error_code, address);
27466+ return;
27467+ }
27468+ if (address < PAX_USER_SHADOW_BASE) {
27469+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27470+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
27471+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
27472+ } else
27473+ address -= PAX_USER_SHADOW_BASE;
27474+ }
27475+#endif
27476+
27477+ tsk = current;
27478+ mm = tsk->mm;
27479
27480 /*
27481 * Detect and handle instructions that would cause a page fault for
27482@@ -1084,7 +1293,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27483 * User-mode registers count as a user access even for any
27484 * potential system fault or CPU buglet:
27485 */
27486- if (user_mode_vm(regs)) {
27487+ if (user_mode(regs)) {
27488 local_irq_enable();
27489 error_code |= PF_USER;
27490 } else {
27491@@ -1146,6 +1355,11 @@ retry:
27492 might_sleep();
27493 }
27494
27495+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27496+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
27497+ return;
27498+#endif
27499+
27500 vma = find_vma(mm, address);
27501 if (unlikely(!vma)) {
27502 bad_area(regs, error_code, address);
27503@@ -1157,18 +1371,24 @@ retry:
27504 bad_area(regs, error_code, address);
27505 return;
27506 }
27507- if (error_code & PF_USER) {
27508- /*
27509- * Accessing the stack below %sp is always a bug.
27510- * The large cushion allows instructions like enter
27511- * and pusha to work. ("enter $65535, $31" pushes
27512- * 32 pointers and then decrements %sp by 65535.)
27513- */
27514- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
27515- bad_area(regs, error_code, address);
27516- return;
27517- }
27518+ /*
27519+ * Accessing the stack below %sp is always a bug.
27520+ * The large cushion allows instructions like enter
27521+ * and pusha to work. ("enter $65535, $31" pushes
27522+ * 32 pointers and then decrements %sp by 65535.)
27523+ */
27524+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
27525+ bad_area(regs, error_code, address);
27526+ return;
27527 }
27528+
27529+#ifdef CONFIG_PAX_SEGMEXEC
27530+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
27531+ bad_area(regs, error_code, address);
27532+ return;
27533+ }
27534+#endif
27535+
27536 if (unlikely(expand_stack(vma, address))) {
27537 bad_area(regs, error_code, address);
27538 return;
27539@@ -1232,3 +1452,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
27540 __do_page_fault(regs, error_code);
27541 exception_exit(regs);
27542 }
27543+
27544+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27545+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
27546+{
27547+ struct mm_struct *mm = current->mm;
27548+ unsigned long ip = regs->ip;
27549+
27550+ if (v8086_mode(regs))
27551+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
27552+
27553+#ifdef CONFIG_PAX_PAGEEXEC
27554+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
27555+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
27556+ return true;
27557+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
27558+ return true;
27559+ return false;
27560+ }
27561+#endif
27562+
27563+#ifdef CONFIG_PAX_SEGMEXEC
27564+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
27565+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
27566+ return true;
27567+ return false;
27568+ }
27569+#endif
27570+
27571+ return false;
27572+}
27573+#endif
27574+
27575+#ifdef CONFIG_PAX_EMUTRAMP
27576+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
27577+{
27578+ int err;
27579+
27580+ do { /* PaX: libffi trampoline emulation */
27581+ unsigned char mov, jmp;
27582+ unsigned int addr1, addr2;
27583+
27584+#ifdef CONFIG_X86_64
27585+ if ((regs->ip + 9) >> 32)
27586+ break;
27587+#endif
27588+
27589+ err = get_user(mov, (unsigned char __user *)regs->ip);
27590+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27591+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27592+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27593+
27594+ if (err)
27595+ break;
27596+
27597+ if (mov == 0xB8 && jmp == 0xE9) {
27598+ regs->ax = addr1;
27599+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27600+ return 2;
27601+ }
27602+ } while (0);
27603+
27604+ do { /* PaX: gcc trampoline emulation #1 */
27605+ unsigned char mov1, mov2;
27606+ unsigned short jmp;
27607+ unsigned int addr1, addr2;
27608+
27609+#ifdef CONFIG_X86_64
27610+ if ((regs->ip + 11) >> 32)
27611+ break;
27612+#endif
27613+
27614+ err = get_user(mov1, (unsigned char __user *)regs->ip);
27615+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27616+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
27617+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27618+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
27619+
27620+ if (err)
27621+ break;
27622+
27623+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
27624+ regs->cx = addr1;
27625+ regs->ax = addr2;
27626+ regs->ip = addr2;
27627+ return 2;
27628+ }
27629+ } while (0);
27630+
27631+ do { /* PaX: gcc trampoline emulation #2 */
27632+ unsigned char mov, jmp;
27633+ unsigned int addr1, addr2;
27634+
27635+#ifdef CONFIG_X86_64
27636+ if ((regs->ip + 9) >> 32)
27637+ break;
27638+#endif
27639+
27640+ err = get_user(mov, (unsigned char __user *)regs->ip);
27641+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27642+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27643+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27644+
27645+ if (err)
27646+ break;
27647+
27648+ if (mov == 0xB9 && jmp == 0xE9) {
27649+ regs->cx = addr1;
27650+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27651+ return 2;
27652+ }
27653+ } while (0);
27654+
27655+ return 1; /* PaX in action */
27656+}
27657+
27658+#ifdef CONFIG_X86_64
27659+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
27660+{
27661+ int err;
27662+
27663+ do { /* PaX: libffi trampoline emulation */
27664+ unsigned short mov1, mov2, jmp1;
27665+ unsigned char stcclc, jmp2;
27666+ unsigned long addr1, addr2;
27667+
27668+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27669+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27670+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27671+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27672+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
27673+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
27674+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
27675+
27676+ if (err)
27677+ break;
27678+
27679+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27680+ regs->r11 = addr1;
27681+ regs->r10 = addr2;
27682+ if (stcclc == 0xF8)
27683+ regs->flags &= ~X86_EFLAGS_CF;
27684+ else
27685+ regs->flags |= X86_EFLAGS_CF;
27686+ regs->ip = addr1;
27687+ return 2;
27688+ }
27689+ } while (0);
27690+
27691+ do { /* PaX: gcc trampoline emulation #1 */
27692+ unsigned short mov1, mov2, jmp1;
27693+ unsigned char jmp2;
27694+ unsigned int addr1;
27695+ unsigned long addr2;
27696+
27697+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27698+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
27699+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
27700+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
27701+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
27702+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
27703+
27704+ if (err)
27705+ break;
27706+
27707+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27708+ regs->r11 = addr1;
27709+ regs->r10 = addr2;
27710+ regs->ip = addr1;
27711+ return 2;
27712+ }
27713+ } while (0);
27714+
27715+ do { /* PaX: gcc trampoline emulation #2 */
27716+ unsigned short mov1, mov2, jmp1;
27717+ unsigned char jmp2;
27718+ unsigned long addr1, addr2;
27719+
27720+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27721+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27722+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27723+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27724+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
27725+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
27726+
27727+ if (err)
27728+ break;
27729+
27730+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27731+ regs->r11 = addr1;
27732+ regs->r10 = addr2;
27733+ regs->ip = addr1;
27734+ return 2;
27735+ }
27736+ } while (0);
27737+
27738+ return 1; /* PaX in action */
27739+}
27740+#endif
27741+
27742+/*
27743+ * PaX: decide what to do with offenders (regs->ip = fault address)
27744+ *
27745+ * returns 1 when task should be killed
27746+ * 2 when gcc trampoline was detected
27747+ */
27748+static int pax_handle_fetch_fault(struct pt_regs *regs)
27749+{
27750+ if (v8086_mode(regs))
27751+ return 1;
27752+
27753+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
27754+ return 1;
27755+
27756+#ifdef CONFIG_X86_32
27757+ return pax_handle_fetch_fault_32(regs);
27758+#else
27759+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
27760+ return pax_handle_fetch_fault_32(regs);
27761+ else
27762+ return pax_handle_fetch_fault_64(regs);
27763+#endif
27764+}
27765+#endif
27766+
27767+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27768+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
27769+{
27770+ long i;
27771+
27772+ printk(KERN_ERR "PAX: bytes at PC: ");
27773+ for (i = 0; i < 20; i++) {
27774+ unsigned char c;
27775+ if (get_user(c, (unsigned char __force_user *)pc+i))
27776+ printk(KERN_CONT "?? ");
27777+ else
27778+ printk(KERN_CONT "%02x ", c);
27779+ }
27780+ printk("\n");
27781+
27782+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
27783+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
27784+ unsigned long c;
27785+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
27786+#ifdef CONFIG_X86_32
27787+ printk(KERN_CONT "???????? ");
27788+#else
27789+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
27790+ printk(KERN_CONT "???????? ???????? ");
27791+ else
27792+ printk(KERN_CONT "???????????????? ");
27793+#endif
27794+ } else {
27795+#ifdef CONFIG_X86_64
27796+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
27797+ printk(KERN_CONT "%08x ", (unsigned int)c);
27798+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
27799+ } else
27800+#endif
27801+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
27802+ }
27803+ }
27804+ printk("\n");
27805+}
27806+#endif
27807+
27808+/**
27809+ * probe_kernel_write(): safely attempt to write to a location
27810+ * @dst: address to write to
27811+ * @src: pointer to the data that shall be written
27812+ * @size: size of the data chunk
27813+ *
27814+ * Safely write to address @dst from the buffer at @src. If a kernel fault
27815+ * happens, handle that and return -EFAULT.
27816+ */
27817+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
27818+{
27819+ long ret;
27820+ mm_segment_t old_fs = get_fs();
27821+
27822+ set_fs(KERNEL_DS);
27823+ pagefault_disable();
27824+ pax_open_kernel();
27825+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
27826+ pax_close_kernel();
27827+ pagefault_enable();
27828+ set_fs(old_fs);
27829+
27830+ return ret ? -EFAULT : 0;
27831+}
27832diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
27833index dd74e46..7d26398 100644
27834--- a/arch/x86/mm/gup.c
27835+++ b/arch/x86/mm/gup.c
27836@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
27837 addr = start;
27838 len = (unsigned long) nr_pages << PAGE_SHIFT;
27839 end = start + len;
27840- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27841+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27842 (void __user *)start, len)))
27843 return 0;
27844
27845diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
27846index 6f31ee5..8ee4164 100644
27847--- a/arch/x86/mm/highmem_32.c
27848+++ b/arch/x86/mm/highmem_32.c
27849@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
27850 idx = type + KM_TYPE_NR*smp_processor_id();
27851 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
27852 BUG_ON(!pte_none(*(kmap_pte-idx)));
27853+
27854+ pax_open_kernel();
27855 set_pte(kmap_pte-idx, mk_pte(page, prot));
27856+ pax_close_kernel();
27857+
27858 arch_flush_lazy_mmu_mode();
27859
27860 return (void *)vaddr;
27861diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
27862index ae1aa71..56316db 100644
27863--- a/arch/x86/mm/hugetlbpage.c
27864+++ b/arch/x86/mm/hugetlbpage.c
27865@@ -279,6 +279,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
27866 info.flags = 0;
27867 info.length = len;
27868 info.low_limit = TASK_UNMAPPED_BASE;
27869+
27870+#ifdef CONFIG_PAX_RANDMMAP
27871+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
27872+ info.low_limit += current->mm->delta_mmap;
27873+#endif
27874+
27875 info.high_limit = TASK_SIZE;
27876 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
27877 info.align_offset = 0;
27878@@ -311,6 +317,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
27879 VM_BUG_ON(addr != -ENOMEM);
27880 info.flags = 0;
27881 info.low_limit = TASK_UNMAPPED_BASE;
27882+
27883+#ifdef CONFIG_PAX_RANDMMAP
27884+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
27885+ info.low_limit += current->mm->delta_mmap;
27886+#endif
27887+
27888 info.high_limit = TASK_SIZE;
27889 addr = vm_unmapped_area(&info);
27890 }
27891@@ -325,10 +337,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
27892 struct hstate *h = hstate_file(file);
27893 struct mm_struct *mm = current->mm;
27894 struct vm_area_struct *vma;
27895+ unsigned long pax_task_size = TASK_SIZE;
27896+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
27897
27898 if (len & ~huge_page_mask(h))
27899 return -EINVAL;
27900- if (len > TASK_SIZE)
27901+
27902+#ifdef CONFIG_PAX_SEGMEXEC
27903+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27904+ pax_task_size = SEGMEXEC_TASK_SIZE;
27905+#endif
27906+
27907+ pax_task_size -= PAGE_SIZE;
27908+
27909+ if (len > pax_task_size)
27910 return -ENOMEM;
27911
27912 if (flags & MAP_FIXED) {
27913@@ -337,11 +359,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
27914 return addr;
27915 }
27916
27917+#ifdef CONFIG_PAX_RANDMMAP
27918+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27919+#endif
27920+
27921 if (addr) {
27922 addr = ALIGN(addr, huge_page_size(h));
27923 vma = find_vma(mm, addr);
27924- if (TASK_SIZE - len >= addr &&
27925- (!vma || addr + len <= vma->vm_start))
27926+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27927 return addr;
27928 }
27929 if (mm->get_unmapped_area == arch_get_unmapped_area)
27930diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
27931index d7aea41..0fc945b 100644
27932--- a/arch/x86/mm/init.c
27933+++ b/arch/x86/mm/init.c
27934@@ -4,6 +4,7 @@
27935 #include <linux/swap.h>
27936 #include <linux/memblock.h>
27937 #include <linux/bootmem.h> /* for max_low_pfn */
27938+#include <linux/tboot.h>
27939
27940 #include <asm/cacheflush.h>
27941 #include <asm/e820.h>
27942@@ -16,6 +17,8 @@
27943 #include <asm/tlb.h>
27944 #include <asm/proto.h>
27945 #include <asm/dma.h> /* for MAX_DMA_PFN */
27946+#include <asm/desc.h>
27947+#include <asm/bios_ebda.h>
27948
27949 unsigned long __initdata pgt_buf_start;
27950 unsigned long __meminitdata pgt_buf_end;
27951@@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
27952 {
27953 int i;
27954 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
27955- unsigned long start = 0, good_end;
27956+ unsigned long start = 0x100000, good_end;
27957 phys_addr_t base;
27958
27959 for (i = 0; i < nr_range; i++) {
27960@@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
27961 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
27962 * mmio resources as well as potential bios/acpi data regions.
27963 */
27964+
27965+#ifdef CONFIG_GRKERNSEC_KMEM
27966+static unsigned int ebda_start __read_only;
27967+static unsigned int ebda_end __read_only;
27968+#endif
27969+
27970 int devmem_is_allowed(unsigned long pagenr)
27971 {
27972- if (pagenr < 256)
27973+#ifdef CONFIG_GRKERNSEC_KMEM
27974+ /* allow BDA */
27975+ if (!pagenr)
27976 return 1;
27977+ /* allow EBDA */
27978+ if (pagenr >= ebda_start && pagenr < ebda_end)
27979+ return 1;
27980+ /* if tboot is in use, allow access to its hardcoded serial log range */
27981+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
27982+ return 1;
27983+#else
27984+ if (!pagenr)
27985+ return 1;
27986+#ifdef CONFIG_VM86
27987+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
27988+ return 1;
27989+#endif
27990+#endif
27991+
27992+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
27993+ return 1;
27994+#ifdef CONFIG_GRKERNSEC_KMEM
27995+ /* throw out everything else below 1MB */
27996+ if (pagenr <= 256)
27997+ return 0;
27998+#endif
27999 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
28000 return 0;
28001 if (!page_is_ram(pagenr))
28002@@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
28003 #endif
28004 }
28005
28006+#ifdef CONFIG_GRKERNSEC_KMEM
28007+static inline void gr_init_ebda(void)
28008+{
28009+ unsigned int ebda_addr;
28010+ unsigned int ebda_size = 0;
28011+
28012+ ebda_addr = get_bios_ebda();
28013+ if (ebda_addr) {
28014+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
28015+ ebda_size <<= 10;
28016+ }
28017+ if (ebda_addr && ebda_size) {
28018+ ebda_start = ebda_addr >> PAGE_SHIFT;
28019+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
28020+ } else {
28021+ ebda_start = 0x9f000 >> PAGE_SHIFT;
28022+ ebda_end = 0xa0000 >> PAGE_SHIFT;
28023+ }
28024+}
28025+#else
28026+static inline void gr_init_ebda(void) { }
28027+#endif
28028+
28029 void free_initmem(void)
28030 {
28031+#ifdef CONFIG_PAX_KERNEXEC
28032+#ifdef CONFIG_X86_32
28033+ /* PaX: limit KERNEL_CS to actual size */
28034+ unsigned long addr, limit;
28035+ struct desc_struct d;
28036+ int cpu;
28037+#else
28038+ pgd_t *pgd;
28039+ pud_t *pud;
28040+ pmd_t *pmd;
28041+ unsigned long addr, end;
28042+#endif
28043+#endif
28044+
28045+ gr_init_ebda();
28046+
28047+#ifdef CONFIG_PAX_KERNEXEC
28048+#ifdef CONFIG_X86_32
28049+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
28050+ limit = (limit - 1UL) >> PAGE_SHIFT;
28051+
28052+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
28053+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
28054+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
28055+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
28056+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
28057+ }
28058+
28059+ /* PaX: make KERNEL_CS read-only */
28060+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
28061+ if (!paravirt_enabled())
28062+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
28063+/*
28064+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
28065+ pgd = pgd_offset_k(addr);
28066+ pud = pud_offset(pgd, addr);
28067+ pmd = pmd_offset(pud, addr);
28068+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28069+ }
28070+*/
28071+#ifdef CONFIG_X86_PAE
28072+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
28073+/*
28074+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
28075+ pgd = pgd_offset_k(addr);
28076+ pud = pud_offset(pgd, addr);
28077+ pmd = pmd_offset(pud, addr);
28078+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28079+ }
28080+*/
28081+#endif
28082+
28083+#ifdef CONFIG_MODULES
28084+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
28085+#endif
28086+
28087+#else
28088+ /* PaX: make kernel code/rodata read-only, rest non-executable */
28089+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
28090+ pgd = pgd_offset_k(addr);
28091+ pud = pud_offset(pgd, addr);
28092+ pmd = pmd_offset(pud, addr);
28093+ if (!pmd_present(*pmd))
28094+ continue;
28095+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
28096+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28097+ else
28098+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28099+ }
28100+
28101+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
28102+ end = addr + KERNEL_IMAGE_SIZE;
28103+ for (; addr < end; addr += PMD_SIZE) {
28104+ pgd = pgd_offset_k(addr);
28105+ pud = pud_offset(pgd, addr);
28106+ pmd = pmd_offset(pud, addr);
28107+ if (!pmd_present(*pmd))
28108+ continue;
28109+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
28110+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28111+ }
28112+#endif
28113+
28114+ flush_tlb_all();
28115+#endif
28116+
28117 free_init_pages("unused kernel memory",
28118 (unsigned long)(&__init_begin),
28119 (unsigned long)(&__init_end));
28120diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
28121index 745d66b..56bf568 100644
28122--- a/arch/x86/mm/init_32.c
28123+++ b/arch/x86/mm/init_32.c
28124@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
28125 }
28126
28127 /*
28128- * Creates a middle page table and puts a pointer to it in the
28129- * given global directory entry. This only returns the gd entry
28130- * in non-PAE compilation mode, since the middle layer is folded.
28131- */
28132-static pmd_t * __init one_md_table_init(pgd_t *pgd)
28133-{
28134- pud_t *pud;
28135- pmd_t *pmd_table;
28136-
28137-#ifdef CONFIG_X86_PAE
28138- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
28139- if (after_bootmem)
28140- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
28141- else
28142- pmd_table = (pmd_t *)alloc_low_page();
28143- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
28144- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
28145- pud = pud_offset(pgd, 0);
28146- BUG_ON(pmd_table != pmd_offset(pud, 0));
28147-
28148- return pmd_table;
28149- }
28150-#endif
28151- pud = pud_offset(pgd, 0);
28152- pmd_table = pmd_offset(pud, 0);
28153-
28154- return pmd_table;
28155-}
28156-
28157-/*
28158 * Create a page table and place a pointer to it in a middle page
28159 * directory entry:
28160 */
28161@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
28162 page_table = (pte_t *)alloc_low_page();
28163
28164 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
28165+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28166+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
28167+#else
28168 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
28169+#endif
28170 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
28171 }
28172
28173 return pte_offset_kernel(pmd, 0);
28174 }
28175
28176+static pmd_t * __init one_md_table_init(pgd_t *pgd)
28177+{
28178+ pud_t *pud;
28179+ pmd_t *pmd_table;
28180+
28181+ pud = pud_offset(pgd, 0);
28182+ pmd_table = pmd_offset(pud, 0);
28183+
28184+ return pmd_table;
28185+}
28186+
28187 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
28188 {
28189 int pgd_idx = pgd_index(vaddr);
28190@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28191 int pgd_idx, pmd_idx;
28192 unsigned long vaddr;
28193 pgd_t *pgd;
28194+ pud_t *pud;
28195 pmd_t *pmd;
28196 pte_t *pte = NULL;
28197
28198@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28199 pgd = pgd_base + pgd_idx;
28200
28201 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
28202- pmd = one_md_table_init(pgd);
28203- pmd = pmd + pmd_index(vaddr);
28204+ pud = pud_offset(pgd, vaddr);
28205+ pmd = pmd_offset(pud, vaddr);
28206+
28207+#ifdef CONFIG_X86_PAE
28208+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28209+#endif
28210+
28211 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
28212 pmd++, pmd_idx++) {
28213 pte = page_table_kmap_check(one_page_table_init(pmd),
28214@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28215 }
28216 }
28217
28218-static inline int is_kernel_text(unsigned long addr)
28219+static inline int is_kernel_text(unsigned long start, unsigned long end)
28220 {
28221- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
28222- return 1;
28223- return 0;
28224+ if ((start > ktla_ktva((unsigned long)_etext) ||
28225+ end <= ktla_ktva((unsigned long)_stext)) &&
28226+ (start > ktla_ktva((unsigned long)_einittext) ||
28227+ end <= ktla_ktva((unsigned long)_sinittext)) &&
28228+
28229+#ifdef CONFIG_ACPI_SLEEP
28230+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
28231+#endif
28232+
28233+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
28234+ return 0;
28235+ return 1;
28236 }
28237
28238 /*
28239@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
28240 unsigned long last_map_addr = end;
28241 unsigned long start_pfn, end_pfn;
28242 pgd_t *pgd_base = swapper_pg_dir;
28243- int pgd_idx, pmd_idx, pte_ofs;
28244+ unsigned int pgd_idx, pmd_idx, pte_ofs;
28245 unsigned long pfn;
28246 pgd_t *pgd;
28247+ pud_t *pud;
28248 pmd_t *pmd;
28249 pte_t *pte;
28250 unsigned pages_2m, pages_4k;
28251@@ -280,8 +281,13 @@ repeat:
28252 pfn = start_pfn;
28253 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28254 pgd = pgd_base + pgd_idx;
28255- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
28256- pmd = one_md_table_init(pgd);
28257+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
28258+ pud = pud_offset(pgd, 0);
28259+ pmd = pmd_offset(pud, 0);
28260+
28261+#ifdef CONFIG_X86_PAE
28262+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28263+#endif
28264
28265 if (pfn >= end_pfn)
28266 continue;
28267@@ -293,14 +299,13 @@ repeat:
28268 #endif
28269 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
28270 pmd++, pmd_idx++) {
28271- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
28272+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
28273
28274 /*
28275 * Map with big pages if possible, otherwise
28276 * create normal page tables:
28277 */
28278 if (use_pse) {
28279- unsigned int addr2;
28280 pgprot_t prot = PAGE_KERNEL_LARGE;
28281 /*
28282 * first pass will use the same initial
28283@@ -310,11 +315,7 @@ repeat:
28284 __pgprot(PTE_IDENT_ATTR |
28285 _PAGE_PSE);
28286
28287- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
28288- PAGE_OFFSET + PAGE_SIZE-1;
28289-
28290- if (is_kernel_text(addr) ||
28291- is_kernel_text(addr2))
28292+ if (is_kernel_text(address, address + PMD_SIZE))
28293 prot = PAGE_KERNEL_LARGE_EXEC;
28294
28295 pages_2m++;
28296@@ -331,7 +332,7 @@ repeat:
28297 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28298 pte += pte_ofs;
28299 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
28300- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
28301+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
28302 pgprot_t prot = PAGE_KERNEL;
28303 /*
28304 * first pass will use the same initial
28305@@ -339,7 +340,7 @@ repeat:
28306 */
28307 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
28308
28309- if (is_kernel_text(addr))
28310+ if (is_kernel_text(address, address + PAGE_SIZE))
28311 prot = PAGE_KERNEL_EXEC;
28312
28313 pages_4k++;
28314@@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
28315
28316 pud = pud_offset(pgd, va);
28317 pmd = pmd_offset(pud, va);
28318- if (!pmd_present(*pmd))
28319+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
28320 break;
28321
28322 pte = pte_offset_kernel(pmd, va);
28323@@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
28324
28325 static void __init pagetable_init(void)
28326 {
28327- pgd_t *pgd_base = swapper_pg_dir;
28328-
28329- permanent_kmaps_init(pgd_base);
28330+ permanent_kmaps_init(swapper_pg_dir);
28331 }
28332
28333-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28334+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28335 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28336
28337 /* user-defined highmem size */
28338@@ -728,6 +727,12 @@ void __init mem_init(void)
28339
28340 pci_iommu_alloc();
28341
28342+#ifdef CONFIG_PAX_PER_CPU_PGD
28343+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28344+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28345+ KERNEL_PGD_PTRS);
28346+#endif
28347+
28348 #ifdef CONFIG_FLATMEM
28349 BUG_ON(!mem_map);
28350 #endif
28351@@ -754,7 +759,7 @@ void __init mem_init(void)
28352 reservedpages++;
28353
28354 codesize = (unsigned long) &_etext - (unsigned long) &_text;
28355- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
28356+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
28357 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
28358
28359 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
28360@@ -795,10 +800,10 @@ void __init mem_init(void)
28361 ((unsigned long)&__init_end -
28362 (unsigned long)&__init_begin) >> 10,
28363
28364- (unsigned long)&_etext, (unsigned long)&_edata,
28365- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
28366+ (unsigned long)&_sdata, (unsigned long)&_edata,
28367+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
28368
28369- (unsigned long)&_text, (unsigned long)&_etext,
28370+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
28371 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
28372
28373 /*
28374@@ -876,6 +881,7 @@ void set_kernel_text_rw(void)
28375 if (!kernel_set_to_readonly)
28376 return;
28377
28378+ start = ktla_ktva(start);
28379 pr_debug("Set kernel text: %lx - %lx for read write\n",
28380 start, start+size);
28381
28382@@ -890,6 +896,7 @@ void set_kernel_text_ro(void)
28383 if (!kernel_set_to_readonly)
28384 return;
28385
28386+ start = ktla_ktva(start);
28387 pr_debug("Set kernel text: %lx - %lx for read only\n",
28388 start, start+size);
28389
28390@@ -918,6 +925,7 @@ void mark_rodata_ro(void)
28391 unsigned long start = PFN_ALIGN(_text);
28392 unsigned long size = PFN_ALIGN(_etext) - start;
28393
28394+ start = ktla_ktva(start);
28395 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
28396 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
28397 size >> 10);
28398diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
28399index 75c9a6a..498d677 100644
28400--- a/arch/x86/mm/init_64.c
28401+++ b/arch/x86/mm/init_64.c
28402@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
28403 * around without checking the pgd every time.
28404 */
28405
28406-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
28407+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
28408 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28409
28410 int force_personality32;
28411@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28412
28413 for (address = start; address <= end; address += PGDIR_SIZE) {
28414 const pgd_t *pgd_ref = pgd_offset_k(address);
28415+
28416+#ifdef CONFIG_PAX_PER_CPU_PGD
28417+ unsigned long cpu;
28418+#else
28419 struct page *page;
28420+#endif
28421
28422 if (pgd_none(*pgd_ref))
28423 continue;
28424
28425 spin_lock(&pgd_lock);
28426+
28427+#ifdef CONFIG_PAX_PER_CPU_PGD
28428+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28429+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
28430+#else
28431 list_for_each_entry(page, &pgd_list, lru) {
28432 pgd_t *pgd;
28433 spinlock_t *pgt_lock;
28434@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28435 /* the pgt_lock only for Xen */
28436 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
28437 spin_lock(pgt_lock);
28438+#endif
28439
28440 if (pgd_none(*pgd))
28441 set_pgd(pgd, *pgd_ref);
28442@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28443 BUG_ON(pgd_page_vaddr(*pgd)
28444 != pgd_page_vaddr(*pgd_ref));
28445
28446+#ifndef CONFIG_PAX_PER_CPU_PGD
28447 spin_unlock(pgt_lock);
28448+#endif
28449+
28450 }
28451 spin_unlock(&pgd_lock);
28452 }
28453@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
28454 {
28455 if (pgd_none(*pgd)) {
28456 pud_t *pud = (pud_t *)spp_getpage();
28457- pgd_populate(&init_mm, pgd, pud);
28458+ pgd_populate_kernel(&init_mm, pgd, pud);
28459 if (pud != pud_offset(pgd, 0))
28460 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
28461 pud, pud_offset(pgd, 0));
28462@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
28463 {
28464 if (pud_none(*pud)) {
28465 pmd_t *pmd = (pmd_t *) spp_getpage();
28466- pud_populate(&init_mm, pud, pmd);
28467+ pud_populate_kernel(&init_mm, pud, pmd);
28468 if (pmd != pmd_offset(pud, 0))
28469 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
28470 pmd, pmd_offset(pud, 0));
28471@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
28472 pmd = fill_pmd(pud, vaddr);
28473 pte = fill_pte(pmd, vaddr);
28474
28475+ pax_open_kernel();
28476 set_pte(pte, new_pte);
28477+ pax_close_kernel();
28478
28479 /*
28480 * It's enough to flush this one mapping.
28481@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
28482 pgd = pgd_offset_k((unsigned long)__va(phys));
28483 if (pgd_none(*pgd)) {
28484 pud = (pud_t *) spp_getpage();
28485- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
28486- _PAGE_USER));
28487+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
28488 }
28489 pud = pud_offset(pgd, (unsigned long)__va(phys));
28490 if (pud_none(*pud)) {
28491 pmd = (pmd_t *) spp_getpage();
28492- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
28493- _PAGE_USER));
28494+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
28495 }
28496 pmd = pmd_offset(pud, phys);
28497 BUG_ON(!pmd_none(*pmd));
28498@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
28499 if (pfn >= pgt_buf_top)
28500 panic("alloc_low_page: ran out of memory");
28501
28502- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28503+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28504 clear_page(adr);
28505 *phys = pfn * PAGE_SIZE;
28506 return adr;
28507@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
28508
28509 phys = __pa(virt);
28510 left = phys & (PAGE_SIZE - 1);
28511- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28512+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28513 adr = (void *)(((unsigned long)adr) | left);
28514
28515 return adr;
28516@@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
28517 unmap_low_page(pmd);
28518
28519 spin_lock(&init_mm.page_table_lock);
28520- pud_populate(&init_mm, pud, __va(pmd_phys));
28521+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
28522 spin_unlock(&init_mm.page_table_lock);
28523 }
28524 __flush_tlb_all();
28525@@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
28526 unmap_low_page(pud);
28527
28528 spin_lock(&init_mm.page_table_lock);
28529- pgd_populate(&init_mm, pgd, __va(pud_phys));
28530+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
28531 spin_unlock(&init_mm.page_table_lock);
28532 pgd_changed = true;
28533 }
28534@@ -693,6 +707,12 @@ void __init mem_init(void)
28535
28536 pci_iommu_alloc();
28537
28538+#ifdef CONFIG_PAX_PER_CPU_PGD
28539+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28540+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28541+ KERNEL_PGD_PTRS);
28542+#endif
28543+
28544 /* clear_bss() already clear the empty_zero_page */
28545
28546 reservedpages = 0;
28547@@ -856,8 +876,8 @@ int kern_addr_valid(unsigned long addr)
28548 static struct vm_area_struct gate_vma = {
28549 .vm_start = VSYSCALL_START,
28550 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
28551- .vm_page_prot = PAGE_READONLY_EXEC,
28552- .vm_flags = VM_READ | VM_EXEC
28553+ .vm_page_prot = PAGE_READONLY,
28554+ .vm_flags = VM_READ
28555 };
28556
28557 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28558@@ -891,7 +911,7 @@ int in_gate_area_no_mm(unsigned long addr)
28559
28560 const char *arch_vma_name(struct vm_area_struct *vma)
28561 {
28562- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28563+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28564 return "[vdso]";
28565 if (vma == &gate_vma)
28566 return "[vsyscall]";
28567diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
28568index 7b179b4..6bd1777 100644
28569--- a/arch/x86/mm/iomap_32.c
28570+++ b/arch/x86/mm/iomap_32.c
28571@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
28572 type = kmap_atomic_idx_push();
28573 idx = type + KM_TYPE_NR * smp_processor_id();
28574 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28575+
28576+ pax_open_kernel();
28577 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
28578+ pax_close_kernel();
28579+
28580 arch_flush_lazy_mmu_mode();
28581
28582 return (void *)vaddr;
28583diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
28584index 78fe3f1..73b95e2 100644
28585--- a/arch/x86/mm/ioremap.c
28586+++ b/arch/x86/mm/ioremap.c
28587@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
28588 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
28589 int is_ram = page_is_ram(pfn);
28590
28591- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
28592+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
28593 return NULL;
28594 WARN_ON_ONCE(is_ram);
28595 }
28596@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
28597 *
28598 * Caller must ensure there is only one unmapping for the same pointer.
28599 */
28600-void iounmap(volatile void __iomem *addr)
28601+void iounmap(const volatile void __iomem *addr)
28602 {
28603 struct vm_struct *p, *o;
28604
28605@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28606
28607 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
28608 if (page_is_ram(start >> PAGE_SHIFT))
28609+#ifdef CONFIG_HIGHMEM
28610+ if ((start >> PAGE_SHIFT) < max_low_pfn)
28611+#endif
28612 return __va(phys);
28613
28614 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
28615@@ -327,6 +330,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28616 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
28617 {
28618 if (page_is_ram(phys >> PAGE_SHIFT))
28619+#ifdef CONFIG_HIGHMEM
28620+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
28621+#endif
28622 return;
28623
28624 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
28625@@ -344,7 +350,7 @@ static int __init early_ioremap_debug_setup(char *str)
28626 early_param("early_ioremap_debug", early_ioremap_debug_setup);
28627
28628 static __initdata int after_paging_init;
28629-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
28630+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
28631
28632 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
28633 {
28634@@ -381,8 +387,7 @@ void __init early_ioremap_init(void)
28635 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
28636
28637 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
28638- memset(bm_pte, 0, sizeof(bm_pte));
28639- pmd_populate_kernel(&init_mm, pmd, bm_pte);
28640+ pmd_populate_user(&init_mm, pmd, bm_pte);
28641
28642 /*
28643 * The boot-ioremap range spans multiple pmds, for which
28644diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
28645index d87dd6d..bf3fa66 100644
28646--- a/arch/x86/mm/kmemcheck/kmemcheck.c
28647+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
28648@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
28649 * memory (e.g. tracked pages)? For now, we need this to avoid
28650 * invoking kmemcheck for PnP BIOS calls.
28651 */
28652- if (regs->flags & X86_VM_MASK)
28653+ if (v8086_mode(regs))
28654 return false;
28655- if (regs->cs != __KERNEL_CS)
28656+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
28657 return false;
28658
28659 pte = kmemcheck_pte_lookup(address);
28660diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
28661index 845df68..1d8d29f 100644
28662--- a/arch/x86/mm/mmap.c
28663+++ b/arch/x86/mm/mmap.c
28664@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
28665 * Leave an at least ~128 MB hole with possible stack randomization.
28666 */
28667 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
28668-#define MAX_GAP (TASK_SIZE/6*5)
28669+#define MAX_GAP (pax_task_size/6*5)
28670
28671 static int mmap_is_legacy(void)
28672 {
28673@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
28674 return rnd << PAGE_SHIFT;
28675 }
28676
28677-static unsigned long mmap_base(void)
28678+static unsigned long mmap_base(struct mm_struct *mm)
28679 {
28680 unsigned long gap = rlimit(RLIMIT_STACK);
28681+ unsigned long pax_task_size = TASK_SIZE;
28682+
28683+#ifdef CONFIG_PAX_SEGMEXEC
28684+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28685+ pax_task_size = SEGMEXEC_TASK_SIZE;
28686+#endif
28687
28688 if (gap < MIN_GAP)
28689 gap = MIN_GAP;
28690 else if (gap > MAX_GAP)
28691 gap = MAX_GAP;
28692
28693- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
28694+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
28695 }
28696
28697 /*
28698 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
28699 * does, but not when emulating X86_32
28700 */
28701-static unsigned long mmap_legacy_base(void)
28702+static unsigned long mmap_legacy_base(struct mm_struct *mm)
28703 {
28704- if (mmap_is_ia32())
28705+ if (mmap_is_ia32()) {
28706+
28707+#ifdef CONFIG_PAX_SEGMEXEC
28708+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28709+ return SEGMEXEC_TASK_UNMAPPED_BASE;
28710+ else
28711+#endif
28712+
28713 return TASK_UNMAPPED_BASE;
28714- else
28715+ } else
28716 return TASK_UNMAPPED_BASE + mmap_rnd();
28717 }
28718
28719@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
28720 void arch_pick_mmap_layout(struct mm_struct *mm)
28721 {
28722 if (mmap_is_legacy()) {
28723- mm->mmap_base = mmap_legacy_base();
28724+ mm->mmap_base = mmap_legacy_base(mm);
28725+
28726+#ifdef CONFIG_PAX_RANDMMAP
28727+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28728+ mm->mmap_base += mm->delta_mmap;
28729+#endif
28730+
28731 mm->get_unmapped_area = arch_get_unmapped_area;
28732 mm->unmap_area = arch_unmap_area;
28733 } else {
28734- mm->mmap_base = mmap_base();
28735+ mm->mmap_base = mmap_base(mm);
28736+
28737+#ifdef CONFIG_PAX_RANDMMAP
28738+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28739+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
28740+#endif
28741+
28742 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
28743 mm->unmap_area = arch_unmap_area_topdown;
28744 }
28745diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
28746index dc0b727..f612039 100644
28747--- a/arch/x86/mm/mmio-mod.c
28748+++ b/arch/x86/mm/mmio-mod.c
28749@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
28750 break;
28751 default:
28752 {
28753- unsigned char *ip = (unsigned char *)instptr;
28754+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
28755 my_trace->opcode = MMIO_UNKNOWN_OP;
28756 my_trace->width = 0;
28757 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
28758@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
28759 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28760 void __iomem *addr)
28761 {
28762- static atomic_t next_id;
28763+ static atomic_unchecked_t next_id;
28764 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
28765 /* These are page-unaligned. */
28766 struct mmiotrace_map map = {
28767@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28768 .private = trace
28769 },
28770 .phys = offset,
28771- .id = atomic_inc_return(&next_id)
28772+ .id = atomic_inc_return_unchecked(&next_id)
28773 };
28774 map.map_id = trace->id;
28775
28776@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
28777 ioremap_trace_core(offset, size, addr);
28778 }
28779
28780-static void iounmap_trace_core(volatile void __iomem *addr)
28781+static void iounmap_trace_core(const volatile void __iomem *addr)
28782 {
28783 struct mmiotrace_map map = {
28784 .phys = 0,
28785@@ -328,7 +328,7 @@ not_enabled:
28786 }
28787 }
28788
28789-void mmiotrace_iounmap(volatile void __iomem *addr)
28790+void mmiotrace_iounmap(const volatile void __iomem *addr)
28791 {
28792 might_sleep();
28793 if (is_enabled()) /* recheck and proper locking in *_core() */
28794diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
28795index 8504f36..5fc68f2 100644
28796--- a/arch/x86/mm/numa.c
28797+++ b/arch/x86/mm/numa.c
28798@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
28799 return true;
28800 }
28801
28802-static int __init numa_register_memblks(struct numa_meminfo *mi)
28803+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
28804 {
28805 unsigned long uninitialized_var(pfn_align);
28806 int i, nid;
28807diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
28808index b008656..773eac2 100644
28809--- a/arch/x86/mm/pageattr-test.c
28810+++ b/arch/x86/mm/pageattr-test.c
28811@@ -36,7 +36,7 @@ enum {
28812
28813 static int pte_testbit(pte_t pte)
28814 {
28815- return pte_flags(pte) & _PAGE_UNUSED1;
28816+ return pte_flags(pte) & _PAGE_CPA_TEST;
28817 }
28818
28819 struct split_state {
28820diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
28821index a718e0d..77419bc 100644
28822--- a/arch/x86/mm/pageattr.c
28823+++ b/arch/x86/mm/pageattr.c
28824@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28825 */
28826 #ifdef CONFIG_PCI_BIOS
28827 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
28828- pgprot_val(forbidden) |= _PAGE_NX;
28829+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28830 #endif
28831
28832 /*
28833@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28834 * Does not cover __inittext since that is gone later on. On
28835 * 64bit we do not enforce !NX on the low mapping
28836 */
28837- if (within(address, (unsigned long)_text, (unsigned long)_etext))
28838- pgprot_val(forbidden) |= _PAGE_NX;
28839+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
28840+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28841
28842+#ifdef CONFIG_DEBUG_RODATA
28843 /*
28844 * The .rodata section needs to be read-only. Using the pfn
28845 * catches all aliases.
28846@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28847 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
28848 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
28849 pgprot_val(forbidden) |= _PAGE_RW;
28850+#endif
28851
28852 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
28853 /*
28854@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28855 }
28856 #endif
28857
28858+#ifdef CONFIG_PAX_KERNEXEC
28859+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
28860+ pgprot_val(forbidden) |= _PAGE_RW;
28861+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28862+ }
28863+#endif
28864+
28865 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
28866
28867 return prot;
28868@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
28869 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
28870 {
28871 /* change init_mm */
28872+ pax_open_kernel();
28873 set_pte_atomic(kpte, pte);
28874+
28875 #ifdef CONFIG_X86_32
28876 if (!SHARED_KERNEL_PMD) {
28877+
28878+#ifdef CONFIG_PAX_PER_CPU_PGD
28879+ unsigned long cpu;
28880+#else
28881 struct page *page;
28882+#endif
28883
28884+#ifdef CONFIG_PAX_PER_CPU_PGD
28885+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28886+ pgd_t *pgd = get_cpu_pgd(cpu);
28887+#else
28888 list_for_each_entry(page, &pgd_list, lru) {
28889- pgd_t *pgd;
28890+ pgd_t *pgd = (pgd_t *)page_address(page);
28891+#endif
28892+
28893 pud_t *pud;
28894 pmd_t *pmd;
28895
28896- pgd = (pgd_t *)page_address(page) + pgd_index(address);
28897+ pgd += pgd_index(address);
28898 pud = pud_offset(pgd, address);
28899 pmd = pmd_offset(pud, address);
28900 set_pte_atomic((pte_t *)pmd, pte);
28901 }
28902 }
28903 #endif
28904+ pax_close_kernel();
28905 }
28906
28907 static int
28908diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
28909index 0eb572e..92f5c1e 100644
28910--- a/arch/x86/mm/pat.c
28911+++ b/arch/x86/mm/pat.c
28912@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
28913
28914 if (!entry) {
28915 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
28916- current->comm, current->pid, start, end - 1);
28917+ current->comm, task_pid_nr(current), start, end - 1);
28918 return -EINVAL;
28919 }
28920
28921@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28922
28923 while (cursor < to) {
28924 if (!devmem_is_allowed(pfn)) {
28925- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
28926- current->comm, from, to - 1);
28927+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
28928+ current->comm, from, to - 1, cursor);
28929 return 0;
28930 }
28931 cursor += PAGE_SIZE;
28932@@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
28933 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
28934 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
28935 "for [mem %#010Lx-%#010Lx]\n",
28936- current->comm, current->pid,
28937+ current->comm, task_pid_nr(current),
28938 cattr_name(flags),
28939 base, (unsigned long long)(base + size-1));
28940 return -EINVAL;
28941@@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
28942 flags = lookup_memtype(paddr);
28943 if (want_flags != flags) {
28944 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
28945- current->comm, current->pid,
28946+ current->comm, task_pid_nr(current),
28947 cattr_name(want_flags),
28948 (unsigned long long)paddr,
28949 (unsigned long long)(paddr + size - 1),
28950@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
28951 free_memtype(paddr, paddr + size);
28952 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
28953 " for [mem %#010Lx-%#010Lx], got %s\n",
28954- current->comm, current->pid,
28955+ current->comm, task_pid_nr(current),
28956 cattr_name(want_flags),
28957 (unsigned long long)paddr,
28958 (unsigned long long)(paddr + size - 1),
28959diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
28960index 9f0614d..92ae64a 100644
28961--- a/arch/x86/mm/pf_in.c
28962+++ b/arch/x86/mm/pf_in.c
28963@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
28964 int i;
28965 enum reason_type rv = OTHERS;
28966
28967- p = (unsigned char *)ins_addr;
28968+ p = (unsigned char *)ktla_ktva(ins_addr);
28969 p += skip_prefix(p, &prf);
28970 p += get_opcode(p, &opcode);
28971
28972@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
28973 struct prefix_bits prf;
28974 int i;
28975
28976- p = (unsigned char *)ins_addr;
28977+ p = (unsigned char *)ktla_ktva(ins_addr);
28978 p += skip_prefix(p, &prf);
28979 p += get_opcode(p, &opcode);
28980
28981@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
28982 struct prefix_bits prf;
28983 int i;
28984
28985- p = (unsigned char *)ins_addr;
28986+ p = (unsigned char *)ktla_ktva(ins_addr);
28987 p += skip_prefix(p, &prf);
28988 p += get_opcode(p, &opcode);
28989
28990@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
28991 struct prefix_bits prf;
28992 int i;
28993
28994- p = (unsigned char *)ins_addr;
28995+ p = (unsigned char *)ktla_ktva(ins_addr);
28996 p += skip_prefix(p, &prf);
28997 p += get_opcode(p, &opcode);
28998 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
28999@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
29000 struct prefix_bits prf;
29001 int i;
29002
29003- p = (unsigned char *)ins_addr;
29004+ p = (unsigned char *)ktla_ktva(ins_addr);
29005 p += skip_prefix(p, &prf);
29006 p += get_opcode(p, &opcode);
29007 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
29008diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
29009index e27fbf8..8b56dc9 100644
29010--- a/arch/x86/mm/pgtable.c
29011+++ b/arch/x86/mm/pgtable.c
29012@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
29013 list_del(&page->lru);
29014 }
29015
29016-#define UNSHARED_PTRS_PER_PGD \
29017- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29018+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29019+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
29020
29021+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
29022+{
29023+ unsigned int count = USER_PGD_PTRS;
29024
29025+ while (count--)
29026+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
29027+}
29028+#endif
29029+
29030+#ifdef CONFIG_PAX_PER_CPU_PGD
29031+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
29032+{
29033+ unsigned int count = USER_PGD_PTRS;
29034+
29035+ while (count--) {
29036+ pgd_t pgd;
29037+
29038+#ifdef CONFIG_X86_64
29039+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
29040+#else
29041+ pgd = *src++;
29042+#endif
29043+
29044+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29045+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
29046+#endif
29047+
29048+ *dst++ = pgd;
29049+ }
29050+
29051+}
29052+#endif
29053+
29054+#ifdef CONFIG_X86_64
29055+#define pxd_t pud_t
29056+#define pyd_t pgd_t
29057+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
29058+#define pxd_free(mm, pud) pud_free((mm), (pud))
29059+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
29060+#define pyd_offset(mm, address) pgd_offset((mm), (address))
29061+#define PYD_SIZE PGDIR_SIZE
29062+#else
29063+#define pxd_t pmd_t
29064+#define pyd_t pud_t
29065+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
29066+#define pxd_free(mm, pud) pmd_free((mm), (pud))
29067+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
29068+#define pyd_offset(mm, address) pud_offset((mm), (address))
29069+#define PYD_SIZE PUD_SIZE
29070+#endif
29071+
29072+#ifdef CONFIG_PAX_PER_CPU_PGD
29073+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
29074+static inline void pgd_dtor(pgd_t *pgd) {}
29075+#else
29076 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
29077 {
29078 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
29079@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
29080 pgd_list_del(pgd);
29081 spin_unlock(&pgd_lock);
29082 }
29083+#endif
29084
29085 /*
29086 * List of all pgd's needed for non-PAE so it can invalidate entries
29087@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
29088 * -- nyc
29089 */
29090
29091-#ifdef CONFIG_X86_PAE
29092+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
29093 /*
29094 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
29095 * updating the top-level pagetable entries to guarantee the
29096@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
29097 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
29098 * and initialize the kernel pmds here.
29099 */
29100-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
29101+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29102
29103 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29104 {
29105@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29106 */
29107 flush_tlb_mm(mm);
29108 }
29109+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
29110+#define PREALLOCATED_PXDS USER_PGD_PTRS
29111 #else /* !CONFIG_X86_PAE */
29112
29113 /* No need to prepopulate any pagetable entries in non-PAE modes. */
29114-#define PREALLOCATED_PMDS 0
29115+#define PREALLOCATED_PXDS 0
29116
29117 #endif /* CONFIG_X86_PAE */
29118
29119-static void free_pmds(pmd_t *pmds[])
29120+static void free_pxds(pxd_t *pxds[])
29121 {
29122 int i;
29123
29124- for(i = 0; i < PREALLOCATED_PMDS; i++)
29125- if (pmds[i])
29126- free_page((unsigned long)pmds[i]);
29127+ for(i = 0; i < PREALLOCATED_PXDS; i++)
29128+ if (pxds[i])
29129+ free_page((unsigned long)pxds[i]);
29130 }
29131
29132-static int preallocate_pmds(pmd_t *pmds[])
29133+static int preallocate_pxds(pxd_t *pxds[])
29134 {
29135 int i;
29136 bool failed = false;
29137
29138- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29139- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
29140- if (pmd == NULL)
29141+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29142+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
29143+ if (pxd == NULL)
29144 failed = true;
29145- pmds[i] = pmd;
29146+ pxds[i] = pxd;
29147 }
29148
29149 if (failed) {
29150- free_pmds(pmds);
29151+ free_pxds(pxds);
29152 return -ENOMEM;
29153 }
29154
29155@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
29156 * preallocate which never got a corresponding vma will need to be
29157 * freed manually.
29158 */
29159-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
29160+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
29161 {
29162 int i;
29163
29164- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29165+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29166 pgd_t pgd = pgdp[i];
29167
29168 if (pgd_val(pgd) != 0) {
29169- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
29170+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
29171
29172- pgdp[i] = native_make_pgd(0);
29173+ set_pgd(pgdp + i, native_make_pgd(0));
29174
29175- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
29176- pmd_free(mm, pmd);
29177+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
29178+ pxd_free(mm, pxd);
29179 }
29180 }
29181 }
29182
29183-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
29184+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
29185 {
29186- pud_t *pud;
29187+ pyd_t *pyd;
29188 unsigned long addr;
29189 int i;
29190
29191- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
29192+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
29193 return;
29194
29195- pud = pud_offset(pgd, 0);
29196+#ifdef CONFIG_X86_64
29197+ pyd = pyd_offset(mm, 0L);
29198+#else
29199+ pyd = pyd_offset(pgd, 0L);
29200+#endif
29201
29202- for (addr = i = 0; i < PREALLOCATED_PMDS;
29203- i++, pud++, addr += PUD_SIZE) {
29204- pmd_t *pmd = pmds[i];
29205+ for (addr = i = 0; i < PREALLOCATED_PXDS;
29206+ i++, pyd++, addr += PYD_SIZE) {
29207+ pxd_t *pxd = pxds[i];
29208
29209 if (i >= KERNEL_PGD_BOUNDARY)
29210- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29211- sizeof(pmd_t) * PTRS_PER_PMD);
29212+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29213+ sizeof(pxd_t) * PTRS_PER_PMD);
29214
29215- pud_populate(mm, pud, pmd);
29216+ pyd_populate(mm, pyd, pxd);
29217 }
29218 }
29219
29220 pgd_t *pgd_alloc(struct mm_struct *mm)
29221 {
29222 pgd_t *pgd;
29223- pmd_t *pmds[PREALLOCATED_PMDS];
29224+ pxd_t *pxds[PREALLOCATED_PXDS];
29225
29226 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
29227
29228@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29229
29230 mm->pgd = pgd;
29231
29232- if (preallocate_pmds(pmds) != 0)
29233+ if (preallocate_pxds(pxds) != 0)
29234 goto out_free_pgd;
29235
29236 if (paravirt_pgd_alloc(mm) != 0)
29237- goto out_free_pmds;
29238+ goto out_free_pxds;
29239
29240 /*
29241 * Make sure that pre-populating the pmds is atomic with
29242@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29243 spin_lock(&pgd_lock);
29244
29245 pgd_ctor(mm, pgd);
29246- pgd_prepopulate_pmd(mm, pgd, pmds);
29247+ pgd_prepopulate_pxd(mm, pgd, pxds);
29248
29249 spin_unlock(&pgd_lock);
29250
29251 return pgd;
29252
29253-out_free_pmds:
29254- free_pmds(pmds);
29255+out_free_pxds:
29256+ free_pxds(pxds);
29257 out_free_pgd:
29258 free_page((unsigned long)pgd);
29259 out:
29260@@ -295,7 +356,7 @@ out:
29261
29262 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
29263 {
29264- pgd_mop_up_pmds(mm, pgd);
29265+ pgd_mop_up_pxds(mm, pgd);
29266 pgd_dtor(pgd);
29267 paravirt_pgd_free(mm, pgd);
29268 free_page((unsigned long)pgd);
29269diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
29270index a69bcb8..19068ab 100644
29271--- a/arch/x86/mm/pgtable_32.c
29272+++ b/arch/x86/mm/pgtable_32.c
29273@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
29274 return;
29275 }
29276 pte = pte_offset_kernel(pmd, vaddr);
29277+
29278+ pax_open_kernel();
29279 if (pte_val(pteval))
29280 set_pte_at(&init_mm, vaddr, pte, pteval);
29281 else
29282 pte_clear(&init_mm, vaddr, pte);
29283+ pax_close_kernel();
29284
29285 /*
29286 * It's enough to flush this one mapping.
29287diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
29288index d2e2735..5c6586f 100644
29289--- a/arch/x86/mm/physaddr.c
29290+++ b/arch/x86/mm/physaddr.c
29291@@ -8,7 +8,7 @@
29292
29293 #ifdef CONFIG_X86_64
29294
29295-unsigned long __phys_addr(unsigned long x)
29296+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29297 {
29298 if (x >= __START_KERNEL_map) {
29299 x -= __START_KERNEL_map;
29300@@ -45,7 +45,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
29301 #else
29302
29303 #ifdef CONFIG_DEBUG_VIRTUAL
29304-unsigned long __phys_addr(unsigned long x)
29305+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29306 {
29307 /* VMALLOC_* aren't constants */
29308 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
29309diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
29310index 410531d..0f16030 100644
29311--- a/arch/x86/mm/setup_nx.c
29312+++ b/arch/x86/mm/setup_nx.c
29313@@ -5,8 +5,10 @@
29314 #include <asm/pgtable.h>
29315 #include <asm/proto.h>
29316
29317+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29318 static int disable_nx __cpuinitdata;
29319
29320+#ifndef CONFIG_PAX_PAGEEXEC
29321 /*
29322 * noexec = on|off
29323 *
29324@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
29325 return 0;
29326 }
29327 early_param("noexec", noexec_setup);
29328+#endif
29329+
29330+#endif
29331
29332 void __cpuinit x86_configure_nx(void)
29333 {
29334+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29335 if (cpu_has_nx && !disable_nx)
29336 __supported_pte_mask |= _PAGE_NX;
29337 else
29338+#endif
29339 __supported_pte_mask &= ~_PAGE_NX;
29340 }
29341
29342diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
29343index 13a6b29..c2fff23 100644
29344--- a/arch/x86/mm/tlb.c
29345+++ b/arch/x86/mm/tlb.c
29346@@ -48,7 +48,11 @@ void leave_mm(int cpu)
29347 BUG();
29348 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
29349 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
29350+
29351+#ifndef CONFIG_PAX_PER_CPU_PGD
29352 load_cr3(swapper_pg_dir);
29353+#endif
29354+
29355 }
29356 }
29357 EXPORT_SYMBOL_GPL(leave_mm);
29358diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
29359index 877b9a1..a8ecf42 100644
29360--- a/arch/x86/net/bpf_jit.S
29361+++ b/arch/x86/net/bpf_jit.S
29362@@ -9,6 +9,7 @@
29363 */
29364 #include <linux/linkage.h>
29365 #include <asm/dwarf2.h>
29366+#include <asm/alternative-asm.h>
29367
29368 /*
29369 * Calling convention :
29370@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
29371 jle bpf_slow_path_word
29372 mov (SKBDATA,%rsi),%eax
29373 bswap %eax /* ntohl() */
29374+ pax_force_retaddr
29375 ret
29376
29377 sk_load_half:
29378@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
29379 jle bpf_slow_path_half
29380 movzwl (SKBDATA,%rsi),%eax
29381 rol $8,%ax # ntohs()
29382+ pax_force_retaddr
29383 ret
29384
29385 sk_load_byte:
29386@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
29387 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
29388 jle bpf_slow_path_byte
29389 movzbl (SKBDATA,%rsi),%eax
29390+ pax_force_retaddr
29391 ret
29392
29393 /**
29394@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
29395 movzbl (SKBDATA,%rsi),%ebx
29396 and $15,%bl
29397 shl $2,%bl
29398+ pax_force_retaddr
29399 ret
29400
29401 /* rsi contains offset and can be scratched */
29402@@ -109,6 +114,7 @@ bpf_slow_path_word:
29403 js bpf_error
29404 mov -12(%rbp),%eax
29405 bswap %eax
29406+ pax_force_retaddr
29407 ret
29408
29409 bpf_slow_path_half:
29410@@ -117,12 +123,14 @@ bpf_slow_path_half:
29411 mov -12(%rbp),%ax
29412 rol $8,%ax
29413 movzwl %ax,%eax
29414+ pax_force_retaddr
29415 ret
29416
29417 bpf_slow_path_byte:
29418 bpf_slow_path_common(1)
29419 js bpf_error
29420 movzbl -12(%rbp),%eax
29421+ pax_force_retaddr
29422 ret
29423
29424 bpf_slow_path_byte_msh:
29425@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
29426 and $15,%al
29427 shl $2,%al
29428 xchg %eax,%ebx
29429+ pax_force_retaddr
29430 ret
29431
29432 #define sk_negative_common(SIZE) \
29433@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
29434 sk_negative_common(4)
29435 mov (%rax), %eax
29436 bswap %eax
29437+ pax_force_retaddr
29438 ret
29439
29440 bpf_slow_path_half_neg:
29441@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
29442 mov (%rax),%ax
29443 rol $8,%ax
29444 movzwl %ax,%eax
29445+ pax_force_retaddr
29446 ret
29447
29448 bpf_slow_path_byte_neg:
29449@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
29450 .globl sk_load_byte_negative_offset
29451 sk_negative_common(1)
29452 movzbl (%rax), %eax
29453+ pax_force_retaddr
29454 ret
29455
29456 bpf_slow_path_byte_msh_neg:
29457@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
29458 and $15,%al
29459 shl $2,%al
29460 xchg %eax,%ebx
29461+ pax_force_retaddr
29462 ret
29463
29464 bpf_error:
29465@@ -197,4 +210,5 @@ bpf_error:
29466 xor %eax,%eax
29467 mov -8(%rbp),%rbx
29468 leaveq
29469+ pax_force_retaddr
29470 ret
29471diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
29472index d11a470..3f9adff3 100644
29473--- a/arch/x86/net/bpf_jit_comp.c
29474+++ b/arch/x86/net/bpf_jit_comp.c
29475@@ -12,6 +12,7 @@
29476 #include <linux/netdevice.h>
29477 #include <linux/filter.h>
29478 #include <linux/if_vlan.h>
29479+#include <linux/random.h>
29480
29481 /*
29482 * Conventions :
29483@@ -49,13 +50,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29484 return ptr + len;
29485 }
29486
29487+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29488+#define MAX_INSTR_CODE_SIZE 96
29489+#else
29490+#define MAX_INSTR_CODE_SIZE 64
29491+#endif
29492+
29493 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
29494
29495 #define EMIT1(b1) EMIT(b1, 1)
29496 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
29497 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
29498 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
29499+
29500+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29501+/* original constant will appear in ecx */
29502+#define DILUTE_CONST_SEQUENCE(_off, _key) \
29503+do { \
29504+ /* mov ecx, randkey */ \
29505+ EMIT1(0xb9); \
29506+ EMIT(_key, 4); \
29507+ /* xor ecx, randkey ^ off */ \
29508+ EMIT2(0x81, 0xf1); \
29509+ EMIT((_key) ^ (_off), 4); \
29510+} while (0)
29511+
29512+#define EMIT1_off32(b1, _off) \
29513+do { \
29514+ switch (b1) { \
29515+ case 0x05: /* add eax, imm32 */ \
29516+ case 0x2d: /* sub eax, imm32 */ \
29517+ case 0x25: /* and eax, imm32 */ \
29518+ case 0x0d: /* or eax, imm32 */ \
29519+ case 0xb8: /* mov eax, imm32 */ \
29520+ case 0x3d: /* cmp eax, imm32 */ \
29521+ case 0xa9: /* test eax, imm32 */ \
29522+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29523+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
29524+ break; \
29525+ case 0xbb: /* mov ebx, imm32 */ \
29526+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29527+ /* mov ebx, ecx */ \
29528+ EMIT2(0x89, 0xcb); \
29529+ break; \
29530+ case 0xbe: /* mov esi, imm32 */ \
29531+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29532+ /* mov esi, ecx */ \
29533+ EMIT2(0x89, 0xce); \
29534+ break; \
29535+ case 0xe9: /* jmp rel imm32 */ \
29536+ EMIT1(b1); \
29537+ EMIT(_off, 4); \
29538+ /* prevent fall-through, we're not called if off = 0 */ \
29539+ EMIT(0xcccccccc, 4); \
29540+ EMIT(0xcccccccc, 4); \
29541+ break; \
29542+ default: \
29543+ EMIT1(b1); \
29544+ EMIT(_off, 4); \
29545+ } \
29546+} while (0)
29547+
29548+#define EMIT2_off32(b1, b2, _off) \
29549+do { \
29550+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
29551+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
29552+ EMIT(randkey, 4); \
29553+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
29554+ EMIT((_off) - randkey, 4); \
29555+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
29556+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29557+ /* imul eax, ecx */ \
29558+ EMIT3(0x0f, 0xaf, 0xc1); \
29559+ } else { \
29560+ EMIT2(b1, b2); \
29561+ EMIT(_off, 4); \
29562+ } \
29563+} while (0)
29564+#else
29565 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
29566+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
29567+#endif
29568
29569 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
29570 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
29571@@ -90,6 +165,24 @@ do { \
29572 #define X86_JBE 0x76
29573 #define X86_JA 0x77
29574
29575+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29576+#define APPEND_FLOW_VERIFY() \
29577+do { \
29578+ /* mov ecx, randkey */ \
29579+ EMIT1(0xb9); \
29580+ EMIT(randkey, 4); \
29581+ /* cmp ecx, randkey */ \
29582+ EMIT2(0x81, 0xf9); \
29583+ EMIT(randkey, 4); \
29584+ /* jz after 8 int 3s */ \
29585+ EMIT2(0x74, 0x08); \
29586+ EMIT(0xcccccccc, 4); \
29587+ EMIT(0xcccccccc, 4); \
29588+} while (0)
29589+#else
29590+#define APPEND_FLOW_VERIFY() do { } while (0)
29591+#endif
29592+
29593 #define EMIT_COND_JMP(op, offset) \
29594 do { \
29595 if (is_near(offset)) \
29596@@ -97,6 +190,7 @@ do { \
29597 else { \
29598 EMIT2(0x0f, op + 0x10); \
29599 EMIT(offset, 4); /* jxx .+off32 */ \
29600+ APPEND_FLOW_VERIFY(); \
29601 } \
29602 } while (0)
29603
29604@@ -121,12 +215,17 @@ static inline void bpf_flush_icache(void *start, void *end)
29605 set_fs(old_fs);
29606 }
29607
29608+struct bpf_jit_work {
29609+ struct work_struct work;
29610+ void *image;
29611+};
29612+
29613 #define CHOOSE_LOAD_FUNC(K, func) \
29614 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
29615
29616 void bpf_jit_compile(struct sk_filter *fp)
29617 {
29618- u8 temp[64];
29619+ u8 temp[MAX_INSTR_CODE_SIZE];
29620 u8 *prog;
29621 unsigned int proglen, oldproglen = 0;
29622 int ilen, i;
29623@@ -139,6 +238,9 @@ void bpf_jit_compile(struct sk_filter *fp)
29624 unsigned int *addrs;
29625 const struct sock_filter *filter = fp->insns;
29626 int flen = fp->len;
29627+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29628+ unsigned int randkey;
29629+#endif
29630
29631 if (!bpf_jit_enable)
29632 return;
29633@@ -147,11 +249,19 @@ void bpf_jit_compile(struct sk_filter *fp)
29634 if (addrs == NULL)
29635 return;
29636
29637+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
29638+ if (!fp->work)
29639+ goto out;
29640+
29641+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29642+ randkey = get_random_int();
29643+#endif
29644+
29645 /* Before first pass, make a rough estimation of addrs[]
29646- * each bpf instruction is translated to less than 64 bytes
29647+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
29648 */
29649 for (proglen = 0, i = 0; i < flen; i++) {
29650- proglen += 64;
29651+ proglen += MAX_INSTR_CODE_SIZE;
29652 addrs[i] = proglen;
29653 }
29654 cleanup_addr = proglen; /* epilogue address */
29655@@ -261,10 +371,8 @@ void bpf_jit_compile(struct sk_filter *fp)
29656 case BPF_S_ALU_MUL_K: /* A *= K */
29657 if (is_imm8(K))
29658 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
29659- else {
29660- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
29661- EMIT(K, 4);
29662- }
29663+ else
29664+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
29665 break;
29666 case BPF_S_ALU_DIV_X: /* A /= X; */
29667 seen |= SEEN_XREG;
29668@@ -304,13 +412,23 @@ void bpf_jit_compile(struct sk_filter *fp)
29669 break;
29670 case BPF_S_ALU_MOD_K: /* A %= K; */
29671 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
29672+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29673+ DILUTE_CONST_SEQUENCE(K, randkey);
29674+#else
29675 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
29676+#endif
29677 EMIT2(0xf7, 0xf1); /* div %ecx */
29678 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
29679 break;
29680 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
29681+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29682+ DILUTE_CONST_SEQUENCE(K, randkey);
29683+ // imul rax, rcx
29684+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
29685+#else
29686 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
29687 EMIT(K, 4);
29688+#endif
29689 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
29690 break;
29691 case BPF_S_ALU_AND_X:
29692@@ -564,8 +682,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
29693 if (is_imm8(K)) {
29694 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
29695 } else {
29696- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
29697- EMIT(K, 4);
29698+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
29699 }
29700 } else {
29701 EMIT2(0x89,0xde); /* mov %ebx,%esi */
29702@@ -648,17 +765,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29703 break;
29704 default:
29705 /* hmm, too complex filter, give up with jit compiler */
29706- goto out;
29707+ goto error;
29708 }
29709 ilen = prog - temp;
29710 if (image) {
29711 if (unlikely(proglen + ilen > oldproglen)) {
29712 pr_err("bpb_jit_compile fatal error\n");
29713- kfree(addrs);
29714- module_free(NULL, image);
29715- return;
29716+ module_free_exec(NULL, image);
29717+ goto error;
29718 }
29719+ pax_open_kernel();
29720 memcpy(image + proglen, temp, ilen);
29721+ pax_close_kernel();
29722 }
29723 proglen += ilen;
29724 addrs[i] = proglen;
29725@@ -679,11 +797,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29726 break;
29727 }
29728 if (proglen == oldproglen) {
29729- image = module_alloc(max_t(unsigned int,
29730- proglen,
29731- sizeof(struct work_struct)));
29732+ image = module_alloc_exec(proglen);
29733 if (!image)
29734- goto out;
29735+ goto error;
29736 }
29737 oldproglen = proglen;
29738 }
29739@@ -699,7 +815,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29740 bpf_flush_icache(image, image + proglen);
29741
29742 fp->bpf_func = (void *)image;
29743- }
29744+ } else
29745+error:
29746+ kfree(fp->work);
29747+
29748 out:
29749 kfree(addrs);
29750 return;
29751@@ -707,18 +826,20 @@ out:
29752
29753 static void jit_free_defer(struct work_struct *arg)
29754 {
29755- module_free(NULL, arg);
29756+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
29757+ kfree(arg);
29758 }
29759
29760 /* run from softirq, we must use a work_struct to call
29761- * module_free() from process context
29762+ * module_free_exec() from process context
29763 */
29764 void bpf_jit_free(struct sk_filter *fp)
29765 {
29766 if (fp->bpf_func != sk_run_filter) {
29767- struct work_struct *work = (struct work_struct *)fp->bpf_func;
29768+ struct work_struct *work = &fp->work->work;
29769
29770 INIT_WORK(work, jit_free_defer);
29771+ fp->work->image = fp->bpf_func;
29772 schedule_work(work);
29773 }
29774 }
29775diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
29776index d6aa6e8..266395a 100644
29777--- a/arch/x86/oprofile/backtrace.c
29778+++ b/arch/x86/oprofile/backtrace.c
29779@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
29780 struct stack_frame_ia32 *fp;
29781 unsigned long bytes;
29782
29783- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29784+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29785 if (bytes != sizeof(bufhead))
29786 return NULL;
29787
29788- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
29789+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
29790
29791 oprofile_add_trace(bufhead[0].return_address);
29792
29793@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
29794 struct stack_frame bufhead[2];
29795 unsigned long bytes;
29796
29797- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29798+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29799 if (bytes != sizeof(bufhead))
29800 return NULL;
29801
29802@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
29803 {
29804 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
29805
29806- if (!user_mode_vm(regs)) {
29807+ if (!user_mode(regs)) {
29808 unsigned long stack = kernel_stack_pointer(regs);
29809 if (depth)
29810 dump_trace(NULL, regs, (unsigned long *)stack, 0,
29811diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
29812index 48768df..ba9143c 100644
29813--- a/arch/x86/oprofile/nmi_int.c
29814+++ b/arch/x86/oprofile/nmi_int.c
29815@@ -23,6 +23,7 @@
29816 #include <asm/nmi.h>
29817 #include <asm/msr.h>
29818 #include <asm/apic.h>
29819+#include <asm/pgtable.h>
29820
29821 #include "op_counter.h"
29822 #include "op_x86_model.h"
29823@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
29824 if (ret)
29825 return ret;
29826
29827- if (!model->num_virt_counters)
29828- model->num_virt_counters = model->num_counters;
29829+ if (!model->num_virt_counters) {
29830+ pax_open_kernel();
29831+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
29832+ pax_close_kernel();
29833+ }
29834
29835 mux_init(ops);
29836
29837diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
29838index b2b9443..be58856 100644
29839--- a/arch/x86/oprofile/op_model_amd.c
29840+++ b/arch/x86/oprofile/op_model_amd.c
29841@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
29842 num_counters = AMD64_NUM_COUNTERS;
29843 }
29844
29845- op_amd_spec.num_counters = num_counters;
29846- op_amd_spec.num_controls = num_counters;
29847- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29848+ pax_open_kernel();
29849+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
29850+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
29851+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29852+ pax_close_kernel();
29853
29854 return 0;
29855 }
29856diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
29857index d90528e..0127e2b 100644
29858--- a/arch/x86/oprofile/op_model_ppro.c
29859+++ b/arch/x86/oprofile/op_model_ppro.c
29860@@ -19,6 +19,7 @@
29861 #include <asm/msr.h>
29862 #include <asm/apic.h>
29863 #include <asm/nmi.h>
29864+#include <asm/pgtable.h>
29865
29866 #include "op_x86_model.h"
29867 #include "op_counter.h"
29868@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
29869
29870 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
29871
29872- op_arch_perfmon_spec.num_counters = num_counters;
29873- op_arch_perfmon_spec.num_controls = num_counters;
29874+ pax_open_kernel();
29875+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
29876+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
29877+ pax_close_kernel();
29878 }
29879
29880 static int arch_perfmon_init(struct oprofile_operations *ignore)
29881diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
29882index 71e8a67..6a313bb 100644
29883--- a/arch/x86/oprofile/op_x86_model.h
29884+++ b/arch/x86/oprofile/op_x86_model.h
29885@@ -52,7 +52,7 @@ struct op_x86_model_spec {
29886 void (*switch_ctrl)(struct op_x86_model_spec const *model,
29887 struct op_msrs const * const msrs);
29888 #endif
29889-};
29890+} __do_const;
29891
29892 struct op_counter_config;
29893
29894diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
29895index e9e6ed5..e47ae67 100644
29896--- a/arch/x86/pci/amd_bus.c
29897+++ b/arch/x86/pci/amd_bus.c
29898@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
29899 return NOTIFY_OK;
29900 }
29901
29902-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
29903+static struct notifier_block amd_cpu_notifier = {
29904 .notifier_call = amd_cpu_notify,
29905 };
29906
29907diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
29908index 372e9b8..e775a6c 100644
29909--- a/arch/x86/pci/irq.c
29910+++ b/arch/x86/pci/irq.c
29911@@ -50,7 +50,7 @@ struct irq_router {
29912 struct irq_router_handler {
29913 u16 vendor;
29914 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
29915-};
29916+} __do_const;
29917
29918 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
29919 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
29920@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
29921 return 0;
29922 }
29923
29924-static __initdata struct irq_router_handler pirq_routers[] = {
29925+static __initconst const struct irq_router_handler pirq_routers[] = {
29926 { PCI_VENDOR_ID_INTEL, intel_router_probe },
29927 { PCI_VENDOR_ID_AL, ali_router_probe },
29928 { PCI_VENDOR_ID_ITE, ite_router_probe },
29929@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
29930 static void __init pirq_find_router(struct irq_router *r)
29931 {
29932 struct irq_routing_table *rt = pirq_table;
29933- struct irq_router_handler *h;
29934+ const struct irq_router_handler *h;
29935
29936 #ifdef CONFIG_PCI_BIOS
29937 if (!rt->signature) {
29938@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
29939 return 0;
29940 }
29941
29942-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
29943+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
29944 {
29945 .callback = fix_broken_hp_bios_irq9,
29946 .ident = "HP Pavilion N5400 Series Laptop",
29947diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
29948index 6eb18c4..20d83de 100644
29949--- a/arch/x86/pci/mrst.c
29950+++ b/arch/x86/pci/mrst.c
29951@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
29952 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
29953 pci_mmcfg_late_init();
29954 pcibios_enable_irq = mrst_pci_irq_enable;
29955- pci_root_ops = pci_mrst_ops;
29956+ pax_open_kernel();
29957+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
29958+ pax_close_kernel();
29959 pci_soc_mode = 1;
29960 /* Continue with standard init */
29961 return 1;
29962diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
29963index c77b24a..c979855 100644
29964--- a/arch/x86/pci/pcbios.c
29965+++ b/arch/x86/pci/pcbios.c
29966@@ -79,7 +79,7 @@ union bios32 {
29967 static struct {
29968 unsigned long address;
29969 unsigned short segment;
29970-} bios32_indirect = { 0, __KERNEL_CS };
29971+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
29972
29973 /*
29974 * Returns the entry point for the given service, NULL on error
29975@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
29976 unsigned long length; /* %ecx */
29977 unsigned long entry; /* %edx */
29978 unsigned long flags;
29979+ struct desc_struct d, *gdt;
29980
29981 local_irq_save(flags);
29982- __asm__("lcall *(%%edi); cld"
29983+
29984+ gdt = get_cpu_gdt_table(smp_processor_id());
29985+
29986+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
29987+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
29988+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
29989+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
29990+
29991+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
29992 : "=a" (return_code),
29993 "=b" (address),
29994 "=c" (length),
29995 "=d" (entry)
29996 : "0" (service),
29997 "1" (0),
29998- "D" (&bios32_indirect));
29999+ "D" (&bios32_indirect),
30000+ "r"(__PCIBIOS_DS)
30001+ : "memory");
30002+
30003+ pax_open_kernel();
30004+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
30005+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
30006+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
30007+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
30008+ pax_close_kernel();
30009+
30010 local_irq_restore(flags);
30011
30012 switch (return_code) {
30013- case 0:
30014- return address + entry;
30015- case 0x80: /* Not present */
30016- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30017- return 0;
30018- default: /* Shouldn't happen */
30019- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30020- service, return_code);
30021+ case 0: {
30022+ int cpu;
30023+ unsigned char flags;
30024+
30025+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
30026+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
30027+ printk(KERN_WARNING "bios32_service: not valid\n");
30028 return 0;
30029+ }
30030+ address = address + PAGE_OFFSET;
30031+ length += 16UL; /* some BIOSs underreport this... */
30032+ flags = 4;
30033+ if (length >= 64*1024*1024) {
30034+ length >>= PAGE_SHIFT;
30035+ flags |= 8;
30036+ }
30037+
30038+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30039+ gdt = get_cpu_gdt_table(cpu);
30040+ pack_descriptor(&d, address, length, 0x9b, flags);
30041+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30042+ pack_descriptor(&d, address, length, 0x93, flags);
30043+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30044+ }
30045+ return entry;
30046+ }
30047+ case 0x80: /* Not present */
30048+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30049+ return 0;
30050+ default: /* Shouldn't happen */
30051+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30052+ service, return_code);
30053+ return 0;
30054 }
30055 }
30056
30057 static struct {
30058 unsigned long address;
30059 unsigned short segment;
30060-} pci_indirect = { 0, __KERNEL_CS };
30061+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
30062
30063-static int pci_bios_present;
30064+static int pci_bios_present __read_only;
30065
30066 static int check_pcibios(void)
30067 {
30068@@ -131,11 +174,13 @@ static int check_pcibios(void)
30069 unsigned long flags, pcibios_entry;
30070
30071 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
30072- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
30073+ pci_indirect.address = pcibios_entry;
30074
30075 local_irq_save(flags);
30076- __asm__(
30077- "lcall *(%%edi); cld\n\t"
30078+ __asm__("movw %w6, %%ds\n\t"
30079+ "lcall *%%ss:(%%edi); cld\n\t"
30080+ "push %%ss\n\t"
30081+ "pop %%ds\n\t"
30082 "jc 1f\n\t"
30083 "xor %%ah, %%ah\n"
30084 "1:"
30085@@ -144,7 +189,8 @@ static int check_pcibios(void)
30086 "=b" (ebx),
30087 "=c" (ecx)
30088 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
30089- "D" (&pci_indirect)
30090+ "D" (&pci_indirect),
30091+ "r" (__PCIBIOS_DS)
30092 : "memory");
30093 local_irq_restore(flags);
30094
30095@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30096
30097 switch (len) {
30098 case 1:
30099- __asm__("lcall *(%%esi); cld\n\t"
30100+ __asm__("movw %w6, %%ds\n\t"
30101+ "lcall *%%ss:(%%esi); cld\n\t"
30102+ "push %%ss\n\t"
30103+ "pop %%ds\n\t"
30104 "jc 1f\n\t"
30105 "xor %%ah, %%ah\n"
30106 "1:"
30107@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30108 : "1" (PCIBIOS_READ_CONFIG_BYTE),
30109 "b" (bx),
30110 "D" ((long)reg),
30111- "S" (&pci_indirect));
30112+ "S" (&pci_indirect),
30113+ "r" (__PCIBIOS_DS));
30114 /*
30115 * Zero-extend the result beyond 8 bits, do not trust the
30116 * BIOS having done it:
30117@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30118 *value &= 0xff;
30119 break;
30120 case 2:
30121- __asm__("lcall *(%%esi); cld\n\t"
30122+ __asm__("movw %w6, %%ds\n\t"
30123+ "lcall *%%ss:(%%esi); cld\n\t"
30124+ "push %%ss\n\t"
30125+ "pop %%ds\n\t"
30126 "jc 1f\n\t"
30127 "xor %%ah, %%ah\n"
30128 "1:"
30129@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30130 : "1" (PCIBIOS_READ_CONFIG_WORD),
30131 "b" (bx),
30132 "D" ((long)reg),
30133- "S" (&pci_indirect));
30134+ "S" (&pci_indirect),
30135+ "r" (__PCIBIOS_DS));
30136 /*
30137 * Zero-extend the result beyond 16 bits, do not trust the
30138 * BIOS having done it:
30139@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30140 *value &= 0xffff;
30141 break;
30142 case 4:
30143- __asm__("lcall *(%%esi); cld\n\t"
30144+ __asm__("movw %w6, %%ds\n\t"
30145+ "lcall *%%ss:(%%esi); cld\n\t"
30146+ "push %%ss\n\t"
30147+ "pop %%ds\n\t"
30148 "jc 1f\n\t"
30149 "xor %%ah, %%ah\n"
30150 "1:"
30151@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30152 : "1" (PCIBIOS_READ_CONFIG_DWORD),
30153 "b" (bx),
30154 "D" ((long)reg),
30155- "S" (&pci_indirect));
30156+ "S" (&pci_indirect),
30157+ "r" (__PCIBIOS_DS));
30158 break;
30159 }
30160
30161@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30162
30163 switch (len) {
30164 case 1:
30165- __asm__("lcall *(%%esi); cld\n\t"
30166+ __asm__("movw %w6, %%ds\n\t"
30167+ "lcall *%%ss:(%%esi); cld\n\t"
30168+ "push %%ss\n\t"
30169+ "pop %%ds\n\t"
30170 "jc 1f\n\t"
30171 "xor %%ah, %%ah\n"
30172 "1:"
30173@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30174 "c" (value),
30175 "b" (bx),
30176 "D" ((long)reg),
30177- "S" (&pci_indirect));
30178+ "S" (&pci_indirect),
30179+ "r" (__PCIBIOS_DS));
30180 break;
30181 case 2:
30182- __asm__("lcall *(%%esi); cld\n\t"
30183+ __asm__("movw %w6, %%ds\n\t"
30184+ "lcall *%%ss:(%%esi); cld\n\t"
30185+ "push %%ss\n\t"
30186+ "pop %%ds\n\t"
30187 "jc 1f\n\t"
30188 "xor %%ah, %%ah\n"
30189 "1:"
30190@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30191 "c" (value),
30192 "b" (bx),
30193 "D" ((long)reg),
30194- "S" (&pci_indirect));
30195+ "S" (&pci_indirect),
30196+ "r" (__PCIBIOS_DS));
30197 break;
30198 case 4:
30199- __asm__("lcall *(%%esi); cld\n\t"
30200+ __asm__("movw %w6, %%ds\n\t"
30201+ "lcall *%%ss:(%%esi); cld\n\t"
30202+ "push %%ss\n\t"
30203+ "pop %%ds\n\t"
30204 "jc 1f\n\t"
30205 "xor %%ah, %%ah\n"
30206 "1:"
30207@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30208 "c" (value),
30209 "b" (bx),
30210 "D" ((long)reg),
30211- "S" (&pci_indirect));
30212+ "S" (&pci_indirect),
30213+ "r" (__PCIBIOS_DS));
30214 break;
30215 }
30216
30217@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30218
30219 DBG("PCI: Fetching IRQ routing table... ");
30220 __asm__("push %%es\n\t"
30221+ "movw %w8, %%ds\n\t"
30222 "push %%ds\n\t"
30223 "pop %%es\n\t"
30224- "lcall *(%%esi); cld\n\t"
30225+ "lcall *%%ss:(%%esi); cld\n\t"
30226 "pop %%es\n\t"
30227+ "push %%ss\n\t"
30228+ "pop %%ds\n"
30229 "jc 1f\n\t"
30230 "xor %%ah, %%ah\n"
30231 "1:"
30232@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30233 "1" (0),
30234 "D" ((long) &opt),
30235 "S" (&pci_indirect),
30236- "m" (opt)
30237+ "m" (opt),
30238+ "r" (__PCIBIOS_DS)
30239 : "memory");
30240 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
30241 if (ret & 0xff00)
30242@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30243 {
30244 int ret;
30245
30246- __asm__("lcall *(%%esi); cld\n\t"
30247+ __asm__("movw %w5, %%ds\n\t"
30248+ "lcall *%%ss:(%%esi); cld\n\t"
30249+ "push %%ss\n\t"
30250+ "pop %%ds\n"
30251 "jc 1f\n\t"
30252 "xor %%ah, %%ah\n"
30253 "1:"
30254@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30255 : "0" (PCIBIOS_SET_PCI_HW_INT),
30256 "b" ((dev->bus->number << 8) | dev->devfn),
30257 "c" ((irq << 8) | (pin + 10)),
30258- "S" (&pci_indirect));
30259+ "S" (&pci_indirect),
30260+ "r" (__PCIBIOS_DS));
30261 return !(ret & 0xff00);
30262 }
30263 EXPORT_SYMBOL(pcibios_set_irq_routing);
30264diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
30265index 40e4469..1ab536e 100644
30266--- a/arch/x86/platform/efi/efi_32.c
30267+++ b/arch/x86/platform/efi/efi_32.c
30268@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
30269 {
30270 struct desc_ptr gdt_descr;
30271
30272+#ifdef CONFIG_PAX_KERNEXEC
30273+ struct desc_struct d;
30274+#endif
30275+
30276 local_irq_save(efi_rt_eflags);
30277
30278 load_cr3(initial_page_table);
30279 __flush_tlb_all();
30280
30281+#ifdef CONFIG_PAX_KERNEXEC
30282+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
30283+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30284+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
30285+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30286+#endif
30287+
30288 gdt_descr.address = __pa(get_cpu_gdt_table(0));
30289 gdt_descr.size = GDT_SIZE - 1;
30290 load_gdt(&gdt_descr);
30291@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
30292 {
30293 struct desc_ptr gdt_descr;
30294
30295+#ifdef CONFIG_PAX_KERNEXEC
30296+ struct desc_struct d;
30297+
30298+ memset(&d, 0, sizeof d);
30299+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30300+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30301+#endif
30302+
30303 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
30304 gdt_descr.size = GDT_SIZE - 1;
30305 load_gdt(&gdt_descr);
30306diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
30307index fbe66e6..eae5e38 100644
30308--- a/arch/x86/platform/efi/efi_stub_32.S
30309+++ b/arch/x86/platform/efi/efi_stub_32.S
30310@@ -6,7 +6,9 @@
30311 */
30312
30313 #include <linux/linkage.h>
30314+#include <linux/init.h>
30315 #include <asm/page_types.h>
30316+#include <asm/segment.h>
30317
30318 /*
30319 * efi_call_phys(void *, ...) is a function with variable parameters.
30320@@ -20,7 +22,7 @@
30321 * service functions will comply with gcc calling convention, too.
30322 */
30323
30324-.text
30325+__INIT
30326 ENTRY(efi_call_phys)
30327 /*
30328 * 0. The function can only be called in Linux kernel. So CS has been
30329@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
30330 * The mapping of lower virtual memory has been created in prelog and
30331 * epilog.
30332 */
30333- movl $1f, %edx
30334- subl $__PAGE_OFFSET, %edx
30335- jmp *%edx
30336+#ifdef CONFIG_PAX_KERNEXEC
30337+ movl $(__KERNEXEC_EFI_DS), %edx
30338+ mov %edx, %ds
30339+ mov %edx, %es
30340+ mov %edx, %ss
30341+ addl $2f,(1f)
30342+ ljmp *(1f)
30343+
30344+__INITDATA
30345+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
30346+.previous
30347+
30348+2:
30349+ subl $2b,(1b)
30350+#else
30351+ jmp 1f-__PAGE_OFFSET
30352 1:
30353+#endif
30354
30355 /*
30356 * 2. Now on the top of stack is the return
30357@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
30358 * parameter 2, ..., param n. To make things easy, we save the return
30359 * address of efi_call_phys in a global variable.
30360 */
30361- popl %edx
30362- movl %edx, saved_return_addr
30363- /* get the function pointer into ECX*/
30364- popl %ecx
30365- movl %ecx, efi_rt_function_ptr
30366- movl $2f, %edx
30367- subl $__PAGE_OFFSET, %edx
30368- pushl %edx
30369+ popl (saved_return_addr)
30370+ popl (efi_rt_function_ptr)
30371
30372 /*
30373 * 3. Clear PG bit in %CR0.
30374@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
30375 /*
30376 * 5. Call the physical function.
30377 */
30378- jmp *%ecx
30379+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
30380
30381-2:
30382 /*
30383 * 6. After EFI runtime service returns, control will return to
30384 * following instruction. We'd better readjust stack pointer first.
30385@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
30386 movl %cr0, %edx
30387 orl $0x80000000, %edx
30388 movl %edx, %cr0
30389- jmp 1f
30390-1:
30391+
30392 /*
30393 * 8. Now restore the virtual mode from flat mode by
30394 * adding EIP with PAGE_OFFSET.
30395 */
30396- movl $1f, %edx
30397- jmp *%edx
30398+#ifdef CONFIG_PAX_KERNEXEC
30399+ movl $(__KERNEL_DS), %edx
30400+ mov %edx, %ds
30401+ mov %edx, %es
30402+ mov %edx, %ss
30403+ ljmp $(__KERNEL_CS),$1f
30404+#else
30405+ jmp 1f+__PAGE_OFFSET
30406+#endif
30407 1:
30408
30409 /*
30410 * 9. Balance the stack. And because EAX contain the return value,
30411 * we'd better not clobber it.
30412 */
30413- leal efi_rt_function_ptr, %edx
30414- movl (%edx), %ecx
30415- pushl %ecx
30416+ pushl (efi_rt_function_ptr)
30417
30418 /*
30419- * 10. Push the saved return address onto the stack and return.
30420+ * 10. Return to the saved return address.
30421 */
30422- leal saved_return_addr, %edx
30423- movl (%edx), %ecx
30424- pushl %ecx
30425- ret
30426+ jmpl *(saved_return_addr)
30427 ENDPROC(efi_call_phys)
30428 .previous
30429
30430-.data
30431+__INITDATA
30432 saved_return_addr:
30433 .long 0
30434 efi_rt_function_ptr:
30435diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
30436index 4c07cca..2c8427d 100644
30437--- a/arch/x86/platform/efi/efi_stub_64.S
30438+++ b/arch/x86/platform/efi/efi_stub_64.S
30439@@ -7,6 +7,7 @@
30440 */
30441
30442 #include <linux/linkage.h>
30443+#include <asm/alternative-asm.h>
30444
30445 #define SAVE_XMM \
30446 mov %rsp, %rax; \
30447@@ -40,6 +41,7 @@ ENTRY(efi_call0)
30448 call *%rdi
30449 addq $32, %rsp
30450 RESTORE_XMM
30451+ pax_force_retaddr 0, 1
30452 ret
30453 ENDPROC(efi_call0)
30454
30455@@ -50,6 +52,7 @@ ENTRY(efi_call1)
30456 call *%rdi
30457 addq $32, %rsp
30458 RESTORE_XMM
30459+ pax_force_retaddr 0, 1
30460 ret
30461 ENDPROC(efi_call1)
30462
30463@@ -60,6 +63,7 @@ ENTRY(efi_call2)
30464 call *%rdi
30465 addq $32, %rsp
30466 RESTORE_XMM
30467+ pax_force_retaddr 0, 1
30468 ret
30469 ENDPROC(efi_call2)
30470
30471@@ -71,6 +75,7 @@ ENTRY(efi_call3)
30472 call *%rdi
30473 addq $32, %rsp
30474 RESTORE_XMM
30475+ pax_force_retaddr 0, 1
30476 ret
30477 ENDPROC(efi_call3)
30478
30479@@ -83,6 +88,7 @@ ENTRY(efi_call4)
30480 call *%rdi
30481 addq $32, %rsp
30482 RESTORE_XMM
30483+ pax_force_retaddr 0, 1
30484 ret
30485 ENDPROC(efi_call4)
30486
30487@@ -96,6 +102,7 @@ ENTRY(efi_call5)
30488 call *%rdi
30489 addq $48, %rsp
30490 RESTORE_XMM
30491+ pax_force_retaddr 0, 1
30492 ret
30493 ENDPROC(efi_call5)
30494
30495@@ -112,5 +119,6 @@ ENTRY(efi_call6)
30496 call *%rdi
30497 addq $48, %rsp
30498 RESTORE_XMM
30499+ pax_force_retaddr 0, 1
30500 ret
30501 ENDPROC(efi_call6)
30502diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
30503index e31bcd8..f12dc46 100644
30504--- a/arch/x86/platform/mrst/mrst.c
30505+++ b/arch/x86/platform/mrst/mrst.c
30506@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
30507 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
30508 int sfi_mrtc_num;
30509
30510-static void mrst_power_off(void)
30511+static __noreturn void mrst_power_off(void)
30512 {
30513+ BUG();
30514 }
30515
30516-static void mrst_reboot(void)
30517+static __noreturn void mrst_reboot(void)
30518 {
30519 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
30520+ BUG();
30521 }
30522
30523 /* parse all the mtimer info to a static mtimer array */
30524diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
30525index d6ee929..3637cb5 100644
30526--- a/arch/x86/platform/olpc/olpc_dt.c
30527+++ b/arch/x86/platform/olpc/olpc_dt.c
30528@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
30529 return res;
30530 }
30531
30532-static struct of_pdt_ops prom_olpc_ops __initdata = {
30533+static struct of_pdt_ops prom_olpc_ops __initconst = {
30534 .nextprop = olpc_dt_nextprop,
30535 .getproplen = olpc_dt_getproplen,
30536 .getproperty = olpc_dt_getproperty,
30537diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
30538index 3c68768..07e82b8 100644
30539--- a/arch/x86/power/cpu.c
30540+++ b/arch/x86/power/cpu.c
30541@@ -134,7 +134,7 @@ static void do_fpu_end(void)
30542 static void fix_processor_context(void)
30543 {
30544 int cpu = smp_processor_id();
30545- struct tss_struct *t = &per_cpu(init_tss, cpu);
30546+ struct tss_struct *t = init_tss + cpu;
30547
30548 set_tss_desc(cpu, t); /*
30549 * This just modifies memory; should not be
30550@@ -144,8 +144,6 @@ static void fix_processor_context(void)
30551 */
30552
30553 #ifdef CONFIG_X86_64
30554- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
30555-
30556 syscall_init(); /* This sets MSR_*STAR and related */
30557 #endif
30558 load_TR_desc(); /* This does ltr */
30559diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
30560index cbca565..bae7133 100644
30561--- a/arch/x86/realmode/init.c
30562+++ b/arch/x86/realmode/init.c
30563@@ -62,7 +62,13 @@ void __init setup_real_mode(void)
30564 __va(real_mode_header->trampoline_header);
30565
30566 #ifdef CONFIG_X86_32
30567- trampoline_header->start = __pa(startup_32_smp);
30568+ trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
30569+
30570+#ifdef CONFIG_PAX_KERNEXEC
30571+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
30572+#endif
30573+
30574+ trampoline_header->boot_cs = __BOOT_CS;
30575 trampoline_header->gdt_limit = __BOOT_DS + 7;
30576 trampoline_header->gdt_base = __pa(boot_gdt);
30577 #else
30578diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
30579index 8869287..d577672 100644
30580--- a/arch/x86/realmode/rm/Makefile
30581+++ b/arch/x86/realmode/rm/Makefile
30582@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
30583 $(call cc-option, -fno-unit-at-a-time)) \
30584 $(call cc-option, -fno-stack-protector) \
30585 $(call cc-option, -mpreferred-stack-boundary=2)
30586+ifdef CONSTIFY_PLUGIN
30587+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
30588+endif
30589 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
30590 GCOV_PROFILE := n
30591diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
30592index a28221d..93c40f1 100644
30593--- a/arch/x86/realmode/rm/header.S
30594+++ b/arch/x86/realmode/rm/header.S
30595@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
30596 #endif
30597 /* APM/BIOS reboot */
30598 .long pa_machine_real_restart_asm
30599-#ifdef CONFIG_X86_64
30600+#ifdef CONFIG_X86_32
30601+ .long __KERNEL_CS
30602+#else
30603 .long __KERNEL32_CS
30604 #endif
30605 END(real_mode_header)
30606diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
30607index c1b2791..f9e31c7 100644
30608--- a/arch/x86/realmode/rm/trampoline_32.S
30609+++ b/arch/x86/realmode/rm/trampoline_32.S
30610@@ -25,6 +25,12 @@
30611 #include <asm/page_types.h>
30612 #include "realmode.h"
30613
30614+#ifdef CONFIG_PAX_KERNEXEC
30615+#define ta(X) (X)
30616+#else
30617+#define ta(X) (pa_ ## X)
30618+#endif
30619+
30620 .text
30621 .code16
30622
30623@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
30624
30625 cli # We should be safe anyway
30626
30627- movl tr_start, %eax # where we need to go
30628-
30629 movl $0xA5A5A5A5, trampoline_status
30630 # write marker for master knows we're running
30631
30632@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
30633 movw $1, %dx # protected mode (PE) bit
30634 lmsw %dx # into protected mode
30635
30636- ljmpl $__BOOT_CS, $pa_startup_32
30637+ ljmpl *(trampoline_header)
30638
30639 .section ".text32","ax"
30640 .code32
30641@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
30642 .balign 8
30643 GLOBAL(trampoline_header)
30644 tr_start: .space 4
30645- tr_gdt_pad: .space 2
30646+ tr_boot_cs: .space 2
30647 tr_gdt: .space 6
30648 END(trampoline_header)
30649
30650diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
30651index bb360dc..3e5945f 100644
30652--- a/arch/x86/realmode/rm/trampoline_64.S
30653+++ b/arch/x86/realmode/rm/trampoline_64.S
30654@@ -107,7 +107,7 @@ ENTRY(startup_32)
30655 wrmsr
30656
30657 # Enable paging and in turn activate Long Mode
30658- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
30659+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
30660 movl %eax, %cr0
30661
30662 /*
30663diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
30664index 79d67bd..c7e1b90 100644
30665--- a/arch/x86/tools/relocs.c
30666+++ b/arch/x86/tools/relocs.c
30667@@ -12,10 +12,13 @@
30668 #include <regex.h>
30669 #include <tools/le_byteshift.h>
30670
30671+#include "../../../include/generated/autoconf.h"
30672+
30673 static void die(char *fmt, ...);
30674
30675 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
30676 static Elf32_Ehdr ehdr;
30677+static Elf32_Phdr *phdr;
30678 static unsigned long reloc_count, reloc_idx;
30679 static unsigned long *relocs;
30680 static unsigned long reloc16_count, reloc16_idx;
30681@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
30682 }
30683 }
30684
30685+static void read_phdrs(FILE *fp)
30686+{
30687+ unsigned int i;
30688+
30689+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
30690+ if (!phdr) {
30691+ die("Unable to allocate %d program headers\n",
30692+ ehdr.e_phnum);
30693+ }
30694+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
30695+ die("Seek to %d failed: %s\n",
30696+ ehdr.e_phoff, strerror(errno));
30697+ }
30698+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
30699+ die("Cannot read ELF program headers: %s\n",
30700+ strerror(errno));
30701+ }
30702+ for(i = 0; i < ehdr.e_phnum; i++) {
30703+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
30704+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
30705+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
30706+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
30707+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
30708+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
30709+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
30710+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
30711+ }
30712+
30713+}
30714+
30715 static void read_shdrs(FILE *fp)
30716 {
30717- int i;
30718+ unsigned int i;
30719 Elf32_Shdr shdr;
30720
30721 secs = calloc(ehdr.e_shnum, sizeof(struct section));
30722@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
30723
30724 static void read_strtabs(FILE *fp)
30725 {
30726- int i;
30727+ unsigned int i;
30728 for (i = 0; i < ehdr.e_shnum; i++) {
30729 struct section *sec = &secs[i];
30730 if (sec->shdr.sh_type != SHT_STRTAB) {
30731@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
30732
30733 static void read_symtabs(FILE *fp)
30734 {
30735- int i,j;
30736+ unsigned int i,j;
30737 for (i = 0; i < ehdr.e_shnum; i++) {
30738 struct section *sec = &secs[i];
30739 if (sec->shdr.sh_type != SHT_SYMTAB) {
30740@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
30741 }
30742
30743
30744-static void read_relocs(FILE *fp)
30745+static void read_relocs(FILE *fp, int use_real_mode)
30746 {
30747- int i,j;
30748+ unsigned int i,j;
30749+ uint32_t base;
30750+
30751 for (i = 0; i < ehdr.e_shnum; i++) {
30752 struct section *sec = &secs[i];
30753 if (sec->shdr.sh_type != SHT_REL) {
30754@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
30755 die("Cannot read symbol table: %s\n",
30756 strerror(errno));
30757 }
30758+ base = 0;
30759+
30760+#ifdef CONFIG_X86_32
30761+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
30762+ if (phdr[j].p_type != PT_LOAD )
30763+ continue;
30764+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
30765+ continue;
30766+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
30767+ break;
30768+ }
30769+#endif
30770+
30771 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
30772 Elf32_Rel *rel = &sec->reltab[j];
30773- rel->r_offset = elf32_to_cpu(rel->r_offset);
30774+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
30775 rel->r_info = elf32_to_cpu(rel->r_info);
30776 }
30777 }
30778@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
30779
30780 static void print_absolute_symbols(void)
30781 {
30782- int i;
30783+ unsigned int i;
30784 printf("Absolute symbols\n");
30785 printf(" Num: Value Size Type Bind Visibility Name\n");
30786 for (i = 0; i < ehdr.e_shnum; i++) {
30787 struct section *sec = &secs[i];
30788 char *sym_strtab;
30789- int j;
30790+ unsigned int j;
30791
30792 if (sec->shdr.sh_type != SHT_SYMTAB) {
30793 continue;
30794@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
30795
30796 static void print_absolute_relocs(void)
30797 {
30798- int i, printed = 0;
30799+ unsigned int i, printed = 0;
30800
30801 for (i = 0; i < ehdr.e_shnum; i++) {
30802 struct section *sec = &secs[i];
30803 struct section *sec_applies, *sec_symtab;
30804 char *sym_strtab;
30805 Elf32_Sym *sh_symtab;
30806- int j;
30807+ unsigned int j;
30808 if (sec->shdr.sh_type != SHT_REL) {
30809 continue;
30810 }
30811@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
30812 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
30813 int use_real_mode)
30814 {
30815- int i;
30816+ unsigned int i;
30817 /* Walk through the relocations */
30818 for (i = 0; i < ehdr.e_shnum; i++) {
30819 char *sym_strtab;
30820 Elf32_Sym *sh_symtab;
30821 struct section *sec_applies, *sec_symtab;
30822- int j;
30823+ unsigned int j;
30824 struct section *sec = &secs[i];
30825
30826 if (sec->shdr.sh_type != SHT_REL) {
30827@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
30828 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
30829 r_type = ELF32_R_TYPE(rel->r_info);
30830
30831+ if (!use_real_mode) {
30832+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
30833+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
30834+ continue;
30835+
30836+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
30837+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
30838+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
30839+ continue;
30840+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
30841+ continue;
30842+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
30843+ continue;
30844+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
30845+ continue;
30846+#endif
30847+ }
30848+
30849 shn_abs = sym->st_shndx == SHN_ABS;
30850
30851 switch (r_type) {
30852@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
30853
30854 static void emit_relocs(int as_text, int use_real_mode)
30855 {
30856- int i;
30857+ unsigned int i;
30858 /* Count how many relocations I have and allocate space for them. */
30859 reloc_count = 0;
30860 walk_relocs(count_reloc, use_real_mode);
30861@@ -808,10 +874,11 @@ int main(int argc, char **argv)
30862 fname, strerror(errno));
30863 }
30864 read_ehdr(fp);
30865+ read_phdrs(fp);
30866 read_shdrs(fp);
30867 read_strtabs(fp);
30868 read_symtabs(fp);
30869- read_relocs(fp);
30870+ read_relocs(fp, use_real_mode);
30871 if (show_absolute_syms) {
30872 print_absolute_symbols();
30873 goto out;
30874diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
30875index fd14be1..e3c79c0 100644
30876--- a/arch/x86/vdso/Makefile
30877+++ b/arch/x86/vdso/Makefile
30878@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
30879 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
30880 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
30881
30882-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
30883+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
30884 GCOV_PROFILE := n
30885
30886 #
30887diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
30888index 0faad64..39ef157 100644
30889--- a/arch/x86/vdso/vdso32-setup.c
30890+++ b/arch/x86/vdso/vdso32-setup.c
30891@@ -25,6 +25,7 @@
30892 #include <asm/tlbflush.h>
30893 #include <asm/vdso.h>
30894 #include <asm/proto.h>
30895+#include <asm/mman.h>
30896
30897 enum {
30898 VDSO_DISABLED = 0,
30899@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
30900 void enable_sep_cpu(void)
30901 {
30902 int cpu = get_cpu();
30903- struct tss_struct *tss = &per_cpu(init_tss, cpu);
30904+ struct tss_struct *tss = init_tss + cpu;
30905
30906 if (!boot_cpu_has(X86_FEATURE_SEP)) {
30907 put_cpu();
30908@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
30909 gate_vma.vm_start = FIXADDR_USER_START;
30910 gate_vma.vm_end = FIXADDR_USER_END;
30911 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
30912- gate_vma.vm_page_prot = __P101;
30913+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
30914
30915 return 0;
30916 }
30917@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
30918 if (compat)
30919 addr = VDSO_HIGH_BASE;
30920 else {
30921- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
30922+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
30923 if (IS_ERR_VALUE(addr)) {
30924 ret = addr;
30925 goto up_fail;
30926 }
30927 }
30928
30929- current->mm->context.vdso = (void *)addr;
30930+ current->mm->context.vdso = addr;
30931
30932 if (compat_uses_vma || !compat) {
30933 /*
30934@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
30935 }
30936
30937 current_thread_info()->sysenter_return =
30938- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
30939+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
30940
30941 up_fail:
30942 if (ret)
30943- current->mm->context.vdso = NULL;
30944+ current->mm->context.vdso = 0;
30945
30946 up_write(&mm->mmap_sem);
30947
30948@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
30949
30950 const char *arch_vma_name(struct vm_area_struct *vma)
30951 {
30952- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
30953+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
30954 return "[vdso]";
30955+
30956+#ifdef CONFIG_PAX_SEGMEXEC
30957+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
30958+ return "[vdso]";
30959+#endif
30960+
30961 return NULL;
30962 }
30963
30964@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
30965 * Check to see if the corresponding task was created in compat vdso
30966 * mode.
30967 */
30968- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
30969+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
30970 return &gate_vma;
30971 return NULL;
30972 }
30973diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
30974index 431e875..cbb23f3 100644
30975--- a/arch/x86/vdso/vma.c
30976+++ b/arch/x86/vdso/vma.c
30977@@ -16,8 +16,6 @@
30978 #include <asm/vdso.h>
30979 #include <asm/page.h>
30980
30981-unsigned int __read_mostly vdso_enabled = 1;
30982-
30983 extern char vdso_start[], vdso_end[];
30984 extern unsigned short vdso_sync_cpuid;
30985
30986@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
30987 * unaligned here as a result of stack start randomization.
30988 */
30989 addr = PAGE_ALIGN(addr);
30990- addr = align_vdso_addr(addr);
30991
30992 return addr;
30993 }
30994@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
30995 unsigned size)
30996 {
30997 struct mm_struct *mm = current->mm;
30998- unsigned long addr;
30999+ unsigned long addr = 0;
31000 int ret;
31001
31002- if (!vdso_enabled)
31003- return 0;
31004-
31005 down_write(&mm->mmap_sem);
31006+
31007+#ifdef CONFIG_PAX_RANDMMAP
31008+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31009+#endif
31010+
31011 addr = vdso_addr(mm->start_stack, size);
31012+ addr = align_vdso_addr(addr);
31013 addr = get_unmapped_area(NULL, addr, size, 0, 0);
31014 if (IS_ERR_VALUE(addr)) {
31015 ret = addr;
31016 goto up_fail;
31017 }
31018
31019- current->mm->context.vdso = (void *)addr;
31020+ mm->context.vdso = addr;
31021
31022 ret = install_special_mapping(mm, addr, size,
31023 VM_READ|VM_EXEC|
31024 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
31025 pages);
31026- if (ret) {
31027- current->mm->context.vdso = NULL;
31028- goto up_fail;
31029- }
31030+ if (ret)
31031+ mm->context.vdso = 0;
31032
31033 up_fail:
31034 up_write(&mm->mmap_sem);
31035@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31036 vdsox32_size);
31037 }
31038 #endif
31039-
31040-static __init int vdso_setup(char *s)
31041-{
31042- vdso_enabled = simple_strtoul(s, NULL, 0);
31043- return 0;
31044-}
31045-__setup("vdso=", vdso_setup);
31046diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
31047index 2262003..f229ced 100644
31048--- a/arch/x86/xen/enlighten.c
31049+++ b/arch/x86/xen/enlighten.c
31050@@ -100,8 +100,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
31051
31052 struct shared_info xen_dummy_shared_info;
31053
31054-void *xen_initial_gdt;
31055-
31056 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
31057 __read_mostly int xen_have_vector_callback;
31058 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
31059@@ -496,8 +494,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
31060 {
31061 unsigned long va = dtr->address;
31062 unsigned int size = dtr->size + 1;
31063- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31064- unsigned long frames[pages];
31065+ unsigned long frames[65536 / PAGE_SIZE];
31066 int f;
31067
31068 /*
31069@@ -545,8 +542,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31070 {
31071 unsigned long va = dtr->address;
31072 unsigned int size = dtr->size + 1;
31073- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31074- unsigned long frames[pages];
31075+ unsigned long frames[65536 / PAGE_SIZE];
31076 int f;
31077
31078 /*
31079@@ -939,7 +935,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
31080 return 0;
31081 }
31082
31083-static void set_xen_basic_apic_ops(void)
31084+static void __init set_xen_basic_apic_ops(void)
31085 {
31086 apic->read = xen_apic_read;
31087 apic->write = xen_apic_write;
31088@@ -1245,30 +1241,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
31089 #endif
31090 };
31091
31092-static void xen_reboot(int reason)
31093+static __noreturn void xen_reboot(int reason)
31094 {
31095 struct sched_shutdown r = { .reason = reason };
31096
31097- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
31098- BUG();
31099+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
31100+ BUG();
31101 }
31102
31103-static void xen_restart(char *msg)
31104+static __noreturn void xen_restart(char *msg)
31105 {
31106 xen_reboot(SHUTDOWN_reboot);
31107 }
31108
31109-static void xen_emergency_restart(void)
31110+static __noreturn void xen_emergency_restart(void)
31111 {
31112 xen_reboot(SHUTDOWN_reboot);
31113 }
31114
31115-static void xen_machine_halt(void)
31116+static __noreturn void xen_machine_halt(void)
31117 {
31118 xen_reboot(SHUTDOWN_poweroff);
31119 }
31120
31121-static void xen_machine_power_off(void)
31122+static __noreturn void xen_machine_power_off(void)
31123 {
31124 if (pm_power_off)
31125 pm_power_off();
31126@@ -1370,7 +1366,17 @@ asmlinkage void __init xen_start_kernel(void)
31127 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
31128
31129 /* Work out if we support NX */
31130- x86_configure_nx();
31131+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31132+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
31133+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
31134+ unsigned l, h;
31135+
31136+ __supported_pte_mask |= _PAGE_NX;
31137+ rdmsr(MSR_EFER, l, h);
31138+ l |= EFER_NX;
31139+ wrmsr(MSR_EFER, l, h);
31140+ }
31141+#endif
31142
31143 xen_setup_features();
31144
31145@@ -1399,14 +1405,7 @@ asmlinkage void __init xen_start_kernel(void)
31146 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
31147 }
31148
31149- machine_ops = xen_machine_ops;
31150-
31151- /*
31152- * The only reliable way to retain the initial address of the
31153- * percpu gdt_page is to remember it here, so we can go and
31154- * mark it RW later, when the initial percpu area is freed.
31155- */
31156- xen_initial_gdt = &per_cpu(gdt_page, 0);
31157+ memcpy((void *)&machine_ops, &xen_machine_ops, sizeof machine_ops);
31158
31159 xen_smp_init();
31160
31161@@ -1598,7 +1597,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
31162 return NOTIFY_OK;
31163 }
31164
31165-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
31166+static struct notifier_block xen_hvm_cpu_notifier = {
31167 .notifier_call = xen_hvm_cpu_notify,
31168 };
31169
31170diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
31171index 01de35c..0bda07b 100644
31172--- a/arch/x86/xen/mmu.c
31173+++ b/arch/x86/xen/mmu.c
31174@@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31175 /* L3_k[510] -> level2_kernel_pgt
31176 * L3_i[511] -> level2_fixmap_pgt */
31177 convert_pfn_mfn(level3_kernel_pgt);
31178+ convert_pfn_mfn(level3_vmalloc_start_pgt);
31179+ convert_pfn_mfn(level3_vmalloc_end_pgt);
31180+ convert_pfn_mfn(level3_vmemmap_pgt);
31181
31182 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
31183 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
31184@@ -1910,8 +1913,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31185 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
31186 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
31187 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
31188+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
31189+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
31190+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
31191 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
31192 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
31193+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
31194 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
31195 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
31196
31197@@ -2097,6 +2104,7 @@ static void __init xen_post_allocator_init(void)
31198 pv_mmu_ops.set_pud = xen_set_pud;
31199 #if PAGETABLE_LEVELS == 4
31200 pv_mmu_ops.set_pgd = xen_set_pgd;
31201+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
31202 #endif
31203
31204 /* This will work as long as patching hasn't happened yet
31205@@ -2178,6 +2186,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
31206 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
31207 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
31208 .set_pgd = xen_set_pgd_hyper,
31209+ .set_pgd_batched = xen_set_pgd_hyper,
31210
31211 .alloc_pud = xen_alloc_pmd_init,
31212 .release_pud = xen_release_pmd_init,
31213diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
31214index 34bc4ce..c34aa24 100644
31215--- a/arch/x86/xen/smp.c
31216+++ b/arch/x86/xen/smp.c
31217@@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
31218 {
31219 BUG_ON(smp_processor_id() != 0);
31220 native_smp_prepare_boot_cpu();
31221-
31222- /* We've switched to the "real" per-cpu gdt, so make sure the
31223- old memory can be recycled */
31224- make_lowmem_page_readwrite(xen_initial_gdt);
31225-
31226 xen_filter_cpu_maps();
31227 xen_setup_vcpu_info_placement();
31228 }
31229@@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31230 gdt = get_cpu_gdt_table(cpu);
31231
31232 ctxt->flags = VGCF_IN_KERNEL;
31233- ctxt->user_regs.ds = __USER_DS;
31234- ctxt->user_regs.es = __USER_DS;
31235+ ctxt->user_regs.ds = __KERNEL_DS;
31236+ ctxt->user_regs.es = __KERNEL_DS;
31237 ctxt->user_regs.ss = __KERNEL_DS;
31238 #ifdef CONFIG_X86_32
31239 ctxt->user_regs.fs = __KERNEL_PERCPU;
31240- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
31241+ savesegment(gs, ctxt->user_regs.gs);
31242 #else
31243 ctxt->gs_base_kernel = per_cpu_offset(cpu);
31244 #endif
31245@@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
31246 int rc;
31247
31248 per_cpu(current_task, cpu) = idle;
31249+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
31250 #ifdef CONFIG_X86_32
31251 irq_ctx_init(cpu);
31252 #else
31253 clear_tsk_thread_flag(idle, TIF_FORK);
31254- per_cpu(kernel_stack, cpu) =
31255- (unsigned long)task_stack_page(idle) -
31256- KERNEL_STACK_OFFSET + THREAD_SIZE;
31257+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
31258 #endif
31259 xen_setup_runstate_info(cpu);
31260 xen_setup_timer(cpu);
31261@@ -630,7 +624,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
31262
31263 void __init xen_smp_init(void)
31264 {
31265- smp_ops = xen_smp_ops;
31266+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
31267 xen_fill_possible_map();
31268 xen_init_spinlocks();
31269 }
31270diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
31271index 33ca6e4..0ded929 100644
31272--- a/arch/x86/xen/xen-asm_32.S
31273+++ b/arch/x86/xen/xen-asm_32.S
31274@@ -84,14 +84,14 @@ ENTRY(xen_iret)
31275 ESP_OFFSET=4 # bytes pushed onto stack
31276
31277 /*
31278- * Store vcpu_info pointer for easy access. Do it this way to
31279- * avoid having to reload %fs
31280+ * Store vcpu_info pointer for easy access.
31281 */
31282 #ifdef CONFIG_SMP
31283- GET_THREAD_INFO(%eax)
31284- movl %ss:TI_cpu(%eax), %eax
31285- movl %ss:__per_cpu_offset(,%eax,4), %eax
31286- mov %ss:xen_vcpu(%eax), %eax
31287+ push %fs
31288+ mov $(__KERNEL_PERCPU), %eax
31289+ mov %eax, %fs
31290+ mov PER_CPU_VAR(xen_vcpu), %eax
31291+ pop %fs
31292 #else
31293 movl %ss:xen_vcpu, %eax
31294 #endif
31295diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
31296index 7faed58..ba4427c 100644
31297--- a/arch/x86/xen/xen-head.S
31298+++ b/arch/x86/xen/xen-head.S
31299@@ -19,6 +19,17 @@ ENTRY(startup_xen)
31300 #ifdef CONFIG_X86_32
31301 mov %esi,xen_start_info
31302 mov $init_thread_union+THREAD_SIZE,%esp
31303+#ifdef CONFIG_SMP
31304+ movl $cpu_gdt_table,%edi
31305+ movl $__per_cpu_load,%eax
31306+ movw %ax,__KERNEL_PERCPU + 2(%edi)
31307+ rorl $16,%eax
31308+ movb %al,__KERNEL_PERCPU + 4(%edi)
31309+ movb %ah,__KERNEL_PERCPU + 7(%edi)
31310+ movl $__per_cpu_end - 1,%eax
31311+ subl $__per_cpu_start,%eax
31312+ movw %ax,__KERNEL_PERCPU + 0(%edi)
31313+#endif
31314 #else
31315 mov %rsi,xen_start_info
31316 mov $init_thread_union+THREAD_SIZE,%rsp
31317diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
31318index a95b417..b6dbd0b 100644
31319--- a/arch/x86/xen/xen-ops.h
31320+++ b/arch/x86/xen/xen-ops.h
31321@@ -10,8 +10,6 @@
31322 extern const char xen_hypervisor_callback[];
31323 extern const char xen_failsafe_callback[];
31324
31325-extern void *xen_initial_gdt;
31326-
31327 struct trap_info;
31328 void xen_copy_trap_info(struct trap_info *traps);
31329
31330diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
31331index 525bd3d..ef888b1 100644
31332--- a/arch/xtensa/variants/dc232b/include/variant/core.h
31333+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
31334@@ -119,9 +119,9 @@
31335 ----------------------------------------------------------------------*/
31336
31337 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
31338-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
31339 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
31340 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
31341+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31342
31343 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
31344 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
31345diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
31346index 2f33760..835e50a 100644
31347--- a/arch/xtensa/variants/fsf/include/variant/core.h
31348+++ b/arch/xtensa/variants/fsf/include/variant/core.h
31349@@ -11,6 +11,7 @@
31350 #ifndef _XTENSA_CORE_H
31351 #define _XTENSA_CORE_H
31352
31353+#include <linux/const.h>
31354
31355 /****************************************************************************
31356 Parameters Useful for Any Code, USER or PRIVILEGED
31357@@ -112,9 +113,9 @@
31358 ----------------------------------------------------------------------*/
31359
31360 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31361-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31362 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31363 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31364+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31365
31366 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
31367 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
31368diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
31369index af00795..2bb8105 100644
31370--- a/arch/xtensa/variants/s6000/include/variant/core.h
31371+++ b/arch/xtensa/variants/s6000/include/variant/core.h
31372@@ -11,6 +11,7 @@
31373 #ifndef _XTENSA_CORE_CONFIGURATION_H
31374 #define _XTENSA_CORE_CONFIGURATION_H
31375
31376+#include <linux/const.h>
31377
31378 /****************************************************************************
31379 Parameters Useful for Any Code, USER or PRIVILEGED
31380@@ -118,9 +119,9 @@
31381 ----------------------------------------------------------------------*/
31382
31383 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31384-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31385 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31386 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31387+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31388
31389 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
31390 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
31391diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
31392index 58916af..eb9dbcf6 100644
31393--- a/block/blk-iopoll.c
31394+++ b/block/blk-iopoll.c
31395@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
31396 }
31397 EXPORT_SYMBOL(blk_iopoll_complete);
31398
31399-static void blk_iopoll_softirq(struct softirq_action *h)
31400+static void blk_iopoll_softirq(void)
31401 {
31402 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
31403 int rearm = 0, budget = blk_iopoll_budget;
31404@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
31405 return NOTIFY_OK;
31406 }
31407
31408-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
31409+static struct notifier_block blk_iopoll_cpu_notifier = {
31410 .notifier_call = blk_iopoll_cpu_notify,
31411 };
31412
31413diff --git a/block/blk-map.c b/block/blk-map.c
31414index 623e1cd..ca1e109 100644
31415--- a/block/blk-map.c
31416+++ b/block/blk-map.c
31417@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
31418 if (!len || !kbuf)
31419 return -EINVAL;
31420
31421- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
31422+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
31423 if (do_copy)
31424 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
31425 else
31426diff --git a/block/blk-softirq.c b/block/blk-softirq.c
31427index 467c8de..f3628c5 100644
31428--- a/block/blk-softirq.c
31429+++ b/block/blk-softirq.c
31430@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
31431 * Softirq action handler - move entries to local list and loop over them
31432 * while passing them to the queue registered handler.
31433 */
31434-static void blk_done_softirq(struct softirq_action *h)
31435+static void blk_done_softirq(void)
31436 {
31437 struct list_head *cpu_list, local_list;
31438
31439@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
31440 return NOTIFY_OK;
31441 }
31442
31443-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
31444+static struct notifier_block blk_cpu_notifier = {
31445 .notifier_call = blk_cpu_notify,
31446 };
31447
31448diff --git a/block/bsg.c b/block/bsg.c
31449index ff64ae3..593560c 100644
31450--- a/block/bsg.c
31451+++ b/block/bsg.c
31452@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
31453 struct sg_io_v4 *hdr, struct bsg_device *bd,
31454 fmode_t has_write_perm)
31455 {
31456+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31457+ unsigned char *cmdptr;
31458+
31459 if (hdr->request_len > BLK_MAX_CDB) {
31460 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
31461 if (!rq->cmd)
31462 return -ENOMEM;
31463- }
31464+ cmdptr = rq->cmd;
31465+ } else
31466+ cmdptr = tmpcmd;
31467
31468- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
31469+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
31470 hdr->request_len))
31471 return -EFAULT;
31472
31473+ if (cmdptr != rq->cmd)
31474+ memcpy(rq->cmd, cmdptr, hdr->request_len);
31475+
31476 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
31477 if (blk_verify_command(rq->cmd, has_write_perm))
31478 return -EPERM;
31479diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
31480index 7c668c8..db3521c 100644
31481--- a/block/compat_ioctl.c
31482+++ b/block/compat_ioctl.c
31483@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
31484 err |= __get_user(f->spec1, &uf->spec1);
31485 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
31486 err |= __get_user(name, &uf->name);
31487- f->name = compat_ptr(name);
31488+ f->name = (void __force_kernel *)compat_ptr(name);
31489 if (err) {
31490 err = -EFAULT;
31491 goto out;
31492diff --git a/block/partitions/efi.c b/block/partitions/efi.c
31493index b62fb88..bdab4c4 100644
31494--- a/block/partitions/efi.c
31495+++ b/block/partitions/efi.c
31496@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
31497 if (!gpt)
31498 return NULL;
31499
31500+ if (!le32_to_cpu(gpt->num_partition_entries))
31501+ return NULL;
31502+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
31503+ if (!pte)
31504+ return NULL;
31505+
31506 count = le32_to_cpu(gpt->num_partition_entries) *
31507 le32_to_cpu(gpt->sizeof_partition_entry);
31508- if (!count)
31509- return NULL;
31510- pte = kzalloc(count, GFP_KERNEL);
31511- if (!pte)
31512- return NULL;
31513-
31514 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
31515 (u8 *) pte,
31516 count) < count) {
31517diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
31518index 9a87daa..fb17486 100644
31519--- a/block/scsi_ioctl.c
31520+++ b/block/scsi_ioctl.c
31521@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
31522 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
31523 struct sg_io_hdr *hdr, fmode_t mode)
31524 {
31525- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
31526+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31527+ unsigned char *cmdptr;
31528+
31529+ if (rq->cmd != rq->__cmd)
31530+ cmdptr = rq->cmd;
31531+ else
31532+ cmdptr = tmpcmd;
31533+
31534+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
31535 return -EFAULT;
31536+
31537+ if (cmdptr != rq->cmd)
31538+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
31539+
31540 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
31541 return -EPERM;
31542
31543@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31544 int err;
31545 unsigned int in_len, out_len, bytes, opcode, cmdlen;
31546 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
31547+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31548+ unsigned char *cmdptr;
31549
31550 if (!sic)
31551 return -EINVAL;
31552@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31553 */
31554 err = -EFAULT;
31555 rq->cmd_len = cmdlen;
31556- if (copy_from_user(rq->cmd, sic->data, cmdlen))
31557+
31558+ if (rq->cmd != rq->__cmd)
31559+ cmdptr = rq->cmd;
31560+ else
31561+ cmdptr = tmpcmd;
31562+
31563+ if (copy_from_user(cmdptr, sic->data, cmdlen))
31564 goto error;
31565
31566+ if (rq->cmd != cmdptr)
31567+ memcpy(rq->cmd, cmdptr, cmdlen);
31568+
31569 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
31570 goto error;
31571
31572diff --git a/crypto/cryptd.c b/crypto/cryptd.c
31573index 7bdd61b..afec999 100644
31574--- a/crypto/cryptd.c
31575+++ b/crypto/cryptd.c
31576@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
31577
31578 struct cryptd_blkcipher_request_ctx {
31579 crypto_completion_t complete;
31580-};
31581+} __no_const;
31582
31583 struct cryptd_hash_ctx {
31584 struct crypto_shash *child;
31585@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
31586
31587 struct cryptd_aead_request_ctx {
31588 crypto_completion_t complete;
31589-};
31590+} __no_const;
31591
31592 static void cryptd_queue_worker(struct work_struct *work);
31593
31594diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
31595index f6d9baf..dfd511f 100644
31596--- a/crypto/crypto_user.c
31597+++ b/crypto/crypto_user.c
31598@@ -30,6 +30,8 @@
31599
31600 #include "internal.h"
31601
31602+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
31603+
31604 static DEFINE_MUTEX(crypto_cfg_mutex);
31605
31606 /* The crypto netlink socket */
31607@@ -196,7 +198,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
31608 struct crypto_dump_info info;
31609 int err;
31610
31611- if (!p->cru_driver_name)
31612+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31613+ return -EINVAL;
31614+
31615+ if (!p->cru_driver_name[0])
31616 return -EINVAL;
31617
31618 alg = crypto_alg_match(p, 1);
31619@@ -260,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31620 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
31621 LIST_HEAD(list);
31622
31623+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31624+ return -EINVAL;
31625+
31626 if (priority && !strlen(p->cru_driver_name))
31627 return -EINVAL;
31628
31629@@ -287,6 +295,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31630 struct crypto_alg *alg;
31631 struct crypto_user_alg *p = nlmsg_data(nlh);
31632
31633+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31634+ return -EINVAL;
31635+
31636 alg = crypto_alg_match(p, 1);
31637 if (!alg)
31638 return -ENOENT;
31639@@ -368,6 +379,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31640 struct crypto_user_alg *p = nlmsg_data(nlh);
31641 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
31642
31643+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31644+ return -EINVAL;
31645+
31646 if (strlen(p->cru_driver_name))
31647 exact = 1;
31648
31649diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
31650index f220d64..d359ad6 100644
31651--- a/drivers/acpi/apei/apei-internal.h
31652+++ b/drivers/acpi/apei/apei-internal.h
31653@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
31654 struct apei_exec_ins_type {
31655 u32 flags;
31656 apei_exec_ins_func_t run;
31657-};
31658+} __do_const;
31659
31660 struct apei_exec_context {
31661 u32 ip;
31662diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
31663index e6defd8..c26a225 100644
31664--- a/drivers/acpi/apei/cper.c
31665+++ b/drivers/acpi/apei/cper.c
31666@@ -38,12 +38,12 @@
31667 */
31668 u64 cper_next_record_id(void)
31669 {
31670- static atomic64_t seq;
31671+ static atomic64_unchecked_t seq;
31672
31673- if (!atomic64_read(&seq))
31674- atomic64_set(&seq, ((u64)get_seconds()) << 32);
31675+ if (!atomic64_read_unchecked(&seq))
31676+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
31677
31678- return atomic64_inc_return(&seq);
31679+ return atomic64_inc_return_unchecked(&seq);
31680 }
31681 EXPORT_SYMBOL_GPL(cper_next_record_id);
31682
31683diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
31684index be60399..778b33e8 100644
31685--- a/drivers/acpi/bgrt.c
31686+++ b/drivers/acpi/bgrt.c
31687@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
31688 return -ENODEV;
31689
31690 sysfs_bin_attr_init(&image_attr);
31691- image_attr.private = bgrt_image;
31692- image_attr.size = bgrt_image_size;
31693+ pax_open_kernel();
31694+ *(void **)&image_attr.private = bgrt_image;
31695+ *(size_t *)&image_attr.size = bgrt_image_size;
31696+ pax_close_kernel();
31697
31698 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
31699 if (!bgrt_kobj)
31700diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
31701index cb96296..b81293b 100644
31702--- a/drivers/acpi/blacklist.c
31703+++ b/drivers/acpi/blacklist.c
31704@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
31705 u32 is_critical_error;
31706 };
31707
31708-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
31709+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
31710
31711 /*
31712 * POLICY: If *anything* doesn't work, put it on the blacklist.
31713@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
31714 return 0;
31715 }
31716
31717-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
31718+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
31719 {
31720 .callback = dmi_disable_osi_vista,
31721 .ident = "Fujitsu Siemens",
31722diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
31723index 7586544..636a2f0 100644
31724--- a/drivers/acpi/ec_sys.c
31725+++ b/drivers/acpi/ec_sys.c
31726@@ -12,6 +12,7 @@
31727 #include <linux/acpi.h>
31728 #include <linux/debugfs.h>
31729 #include <linux/module.h>
31730+#include <linux/uaccess.h>
31731 #include "internal.h"
31732
31733 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
31734@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31735 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
31736 */
31737 unsigned int size = EC_SPACE_SIZE;
31738- u8 *data = (u8 *) buf;
31739+ u8 data;
31740 loff_t init_off = *off;
31741 int err = 0;
31742
31743@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31744 size = count;
31745
31746 while (size) {
31747- err = ec_read(*off, &data[*off - init_off]);
31748+ err = ec_read(*off, &data);
31749 if (err)
31750 return err;
31751+ if (put_user(data, &buf[*off - init_off]))
31752+ return -EFAULT;
31753 *off += 1;
31754 size--;
31755 }
31756@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
31757
31758 unsigned int size = count;
31759 loff_t init_off = *off;
31760- u8 *data = (u8 *) buf;
31761 int err = 0;
31762
31763 if (*off >= EC_SPACE_SIZE)
31764@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
31765 }
31766
31767 while (size) {
31768- u8 byte_write = data[*off - init_off];
31769+ u8 byte_write;
31770+ if (get_user(byte_write, &buf[*off - init_off]))
31771+ return -EFAULT;
31772 err = ec_write(*off, byte_write);
31773 if (err)
31774 return err;
31775diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
31776index e83311b..142b5cc 100644
31777--- a/drivers/acpi/processor_driver.c
31778+++ b/drivers/acpi/processor_driver.c
31779@@ -558,7 +558,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
31780 return 0;
31781 #endif
31782
31783- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
31784+ BUG_ON(pr->id >= nr_cpu_ids);
31785
31786 /*
31787 * Buggy BIOS check
31788diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
31789index ed9a1cc..f4a354c 100644
31790--- a/drivers/acpi/processor_idle.c
31791+++ b/drivers/acpi/processor_idle.c
31792@@ -1005,7 +1005,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
31793 {
31794 int i, count = CPUIDLE_DRIVER_STATE_START;
31795 struct acpi_processor_cx *cx;
31796- struct cpuidle_state *state;
31797+ cpuidle_state_no_const *state;
31798 struct cpuidle_driver *drv = &acpi_idle_driver;
31799
31800 if (!pr->flags.power_setup_done)
31801diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
31802index ea61ca9..3fdd70d 100644
31803--- a/drivers/acpi/sysfs.c
31804+++ b/drivers/acpi/sysfs.c
31805@@ -420,11 +420,11 @@ static u32 num_counters;
31806 static struct attribute **all_attrs;
31807 static u32 acpi_gpe_count;
31808
31809-static struct attribute_group interrupt_stats_attr_group = {
31810+static attribute_group_no_const interrupt_stats_attr_group = {
31811 .name = "interrupts",
31812 };
31813
31814-static struct kobj_attribute *counter_attrs;
31815+static kobj_attribute_no_const *counter_attrs;
31816
31817 static void delete_gpe_attr_array(void)
31818 {
31819diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
31820index 6cd7805..07facb3 100644
31821--- a/drivers/ata/libahci.c
31822+++ b/drivers/ata/libahci.c
31823@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
31824 }
31825 EXPORT_SYMBOL_GPL(ahci_kick_engine);
31826
31827-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
31828+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
31829 struct ata_taskfile *tf, int is_cmd, u16 flags,
31830 unsigned long timeout_msec)
31831 {
31832diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
31833index 46cd3f4..0871ad0 100644
31834--- a/drivers/ata/libata-core.c
31835+++ b/drivers/ata/libata-core.c
31836@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
31837 struct ata_port *ap;
31838 unsigned int tag;
31839
31840- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31841+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31842 ap = qc->ap;
31843
31844 qc->flags = 0;
31845@@ -4796,7 +4796,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
31846 struct ata_port *ap;
31847 struct ata_link *link;
31848
31849- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31850+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31851 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
31852 ap = qc->ap;
31853 link = qc->dev->link;
31854@@ -5892,6 +5892,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
31855 return;
31856
31857 spin_lock(&lock);
31858+ pax_open_kernel();
31859
31860 for (cur = ops->inherits; cur; cur = cur->inherits) {
31861 void **inherit = (void **)cur;
31862@@ -5905,8 +5906,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
31863 if (IS_ERR(*pp))
31864 *pp = NULL;
31865
31866- ops->inherits = NULL;
31867+ *(struct ata_port_operations **)&ops->inherits = NULL;
31868
31869+ pax_close_kernel();
31870 spin_unlock(&lock);
31871 }
31872
31873diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
31874index 405022d..fb70e53 100644
31875--- a/drivers/ata/pata_arasan_cf.c
31876+++ b/drivers/ata/pata_arasan_cf.c
31877@@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
31878 /* Handle platform specific quirks */
31879 if (pdata->quirk) {
31880 if (pdata->quirk & CF_BROKEN_PIO) {
31881- ap->ops->set_piomode = NULL;
31882+ pax_open_kernel();
31883+ *(void **)&ap->ops->set_piomode = NULL;
31884+ pax_close_kernel();
31885 ap->pio_mask = 0;
31886 }
31887 if (pdata->quirk & CF_BROKEN_MWDMA)
31888diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
31889index f9b983a..887b9d8 100644
31890--- a/drivers/atm/adummy.c
31891+++ b/drivers/atm/adummy.c
31892@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
31893 vcc->pop(vcc, skb);
31894 else
31895 dev_kfree_skb_any(skb);
31896- atomic_inc(&vcc->stats->tx);
31897+ atomic_inc_unchecked(&vcc->stats->tx);
31898
31899 return 0;
31900 }
31901diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
31902index 77a7480..05cde58 100644
31903--- a/drivers/atm/ambassador.c
31904+++ b/drivers/atm/ambassador.c
31905@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
31906 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
31907
31908 // VC layer stats
31909- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31910+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31911
31912 // free the descriptor
31913 kfree (tx_descr);
31914@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31915 dump_skb ("<<<", vc, skb);
31916
31917 // VC layer stats
31918- atomic_inc(&atm_vcc->stats->rx);
31919+ atomic_inc_unchecked(&atm_vcc->stats->rx);
31920 __net_timestamp(skb);
31921 // end of our responsibility
31922 atm_vcc->push (atm_vcc, skb);
31923@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31924 } else {
31925 PRINTK (KERN_INFO, "dropped over-size frame");
31926 // should we count this?
31927- atomic_inc(&atm_vcc->stats->rx_drop);
31928+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31929 }
31930
31931 } else {
31932@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
31933 }
31934
31935 if (check_area (skb->data, skb->len)) {
31936- atomic_inc(&atm_vcc->stats->tx_err);
31937+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
31938 return -ENOMEM; // ?
31939 }
31940
31941diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
31942index b22d71c..d6e1049 100644
31943--- a/drivers/atm/atmtcp.c
31944+++ b/drivers/atm/atmtcp.c
31945@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31946 if (vcc->pop) vcc->pop(vcc,skb);
31947 else dev_kfree_skb(skb);
31948 if (dev_data) return 0;
31949- atomic_inc(&vcc->stats->tx_err);
31950+ atomic_inc_unchecked(&vcc->stats->tx_err);
31951 return -ENOLINK;
31952 }
31953 size = skb->len+sizeof(struct atmtcp_hdr);
31954@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31955 if (!new_skb) {
31956 if (vcc->pop) vcc->pop(vcc,skb);
31957 else dev_kfree_skb(skb);
31958- atomic_inc(&vcc->stats->tx_err);
31959+ atomic_inc_unchecked(&vcc->stats->tx_err);
31960 return -ENOBUFS;
31961 }
31962 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
31963@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31964 if (vcc->pop) vcc->pop(vcc,skb);
31965 else dev_kfree_skb(skb);
31966 out_vcc->push(out_vcc,new_skb);
31967- atomic_inc(&vcc->stats->tx);
31968- atomic_inc(&out_vcc->stats->rx);
31969+ atomic_inc_unchecked(&vcc->stats->tx);
31970+ atomic_inc_unchecked(&out_vcc->stats->rx);
31971 return 0;
31972 }
31973
31974@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31975 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
31976 read_unlock(&vcc_sklist_lock);
31977 if (!out_vcc) {
31978- atomic_inc(&vcc->stats->tx_err);
31979+ atomic_inc_unchecked(&vcc->stats->tx_err);
31980 goto done;
31981 }
31982 skb_pull(skb,sizeof(struct atmtcp_hdr));
31983@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31984 __net_timestamp(new_skb);
31985 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
31986 out_vcc->push(out_vcc,new_skb);
31987- atomic_inc(&vcc->stats->tx);
31988- atomic_inc(&out_vcc->stats->rx);
31989+ atomic_inc_unchecked(&vcc->stats->tx);
31990+ atomic_inc_unchecked(&out_vcc->stats->rx);
31991 done:
31992 if (vcc->pop) vcc->pop(vcc,skb);
31993 else dev_kfree_skb(skb);
31994diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
31995index c1eb6fa..4c71be9 100644
31996--- a/drivers/atm/eni.c
31997+++ b/drivers/atm/eni.c
31998@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
31999 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
32000 vcc->dev->number);
32001 length = 0;
32002- atomic_inc(&vcc->stats->rx_err);
32003+ atomic_inc_unchecked(&vcc->stats->rx_err);
32004 }
32005 else {
32006 length = ATM_CELL_SIZE-1; /* no HEC */
32007@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32008 size);
32009 }
32010 eff = length = 0;
32011- atomic_inc(&vcc->stats->rx_err);
32012+ atomic_inc_unchecked(&vcc->stats->rx_err);
32013 }
32014 else {
32015 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
32016@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32017 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
32018 vcc->dev->number,vcc->vci,length,size << 2,descr);
32019 length = eff = 0;
32020- atomic_inc(&vcc->stats->rx_err);
32021+ atomic_inc_unchecked(&vcc->stats->rx_err);
32022 }
32023 }
32024 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
32025@@ -767,7 +767,7 @@ rx_dequeued++;
32026 vcc->push(vcc,skb);
32027 pushed++;
32028 }
32029- atomic_inc(&vcc->stats->rx);
32030+ atomic_inc_unchecked(&vcc->stats->rx);
32031 }
32032 wake_up(&eni_dev->rx_wait);
32033 }
32034@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
32035 PCI_DMA_TODEVICE);
32036 if (vcc->pop) vcc->pop(vcc,skb);
32037 else dev_kfree_skb_irq(skb);
32038- atomic_inc(&vcc->stats->tx);
32039+ atomic_inc_unchecked(&vcc->stats->tx);
32040 wake_up(&eni_dev->tx_wait);
32041 dma_complete++;
32042 }
32043diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
32044index b41c948..a002b17 100644
32045--- a/drivers/atm/firestream.c
32046+++ b/drivers/atm/firestream.c
32047@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
32048 }
32049 }
32050
32051- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32052+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32053
32054 fs_dprintk (FS_DEBUG_TXMEM, "i");
32055 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
32056@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32057 #endif
32058 skb_put (skb, qe->p1 & 0xffff);
32059 ATM_SKB(skb)->vcc = atm_vcc;
32060- atomic_inc(&atm_vcc->stats->rx);
32061+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32062 __net_timestamp(skb);
32063 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
32064 atm_vcc->push (atm_vcc, skb);
32065@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32066 kfree (pe);
32067 }
32068 if (atm_vcc)
32069- atomic_inc(&atm_vcc->stats->rx_drop);
32070+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32071 break;
32072 case 0x1f: /* Reassembly abort: no buffers. */
32073 /* Silently increment error counter. */
32074 if (atm_vcc)
32075- atomic_inc(&atm_vcc->stats->rx_drop);
32076+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32077 break;
32078 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
32079 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
32080diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
32081index 204814e..cede831 100644
32082--- a/drivers/atm/fore200e.c
32083+++ b/drivers/atm/fore200e.c
32084@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
32085 #endif
32086 /* check error condition */
32087 if (*entry->status & STATUS_ERROR)
32088- atomic_inc(&vcc->stats->tx_err);
32089+ atomic_inc_unchecked(&vcc->stats->tx_err);
32090 else
32091- atomic_inc(&vcc->stats->tx);
32092+ atomic_inc_unchecked(&vcc->stats->tx);
32093 }
32094 }
32095
32096@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32097 if (skb == NULL) {
32098 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
32099
32100- atomic_inc(&vcc->stats->rx_drop);
32101+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32102 return -ENOMEM;
32103 }
32104
32105@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32106
32107 dev_kfree_skb_any(skb);
32108
32109- atomic_inc(&vcc->stats->rx_drop);
32110+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32111 return -ENOMEM;
32112 }
32113
32114 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32115
32116 vcc->push(vcc, skb);
32117- atomic_inc(&vcc->stats->rx);
32118+ atomic_inc_unchecked(&vcc->stats->rx);
32119
32120 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32121
32122@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
32123 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
32124 fore200e->atm_dev->number,
32125 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
32126- atomic_inc(&vcc->stats->rx_err);
32127+ atomic_inc_unchecked(&vcc->stats->rx_err);
32128 }
32129 }
32130
32131@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
32132 goto retry_here;
32133 }
32134
32135- atomic_inc(&vcc->stats->tx_err);
32136+ atomic_inc_unchecked(&vcc->stats->tx_err);
32137
32138 fore200e->tx_sat++;
32139 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
32140diff --git a/drivers/atm/he.c b/drivers/atm/he.c
32141index 72b6960..cf9167a 100644
32142--- a/drivers/atm/he.c
32143+++ b/drivers/atm/he.c
32144@@ -1699,7 +1699,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32145
32146 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
32147 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
32148- atomic_inc(&vcc->stats->rx_drop);
32149+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32150 goto return_host_buffers;
32151 }
32152
32153@@ -1726,7 +1726,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32154 RBRQ_LEN_ERR(he_dev->rbrq_head)
32155 ? "LEN_ERR" : "",
32156 vcc->vpi, vcc->vci);
32157- atomic_inc(&vcc->stats->rx_err);
32158+ atomic_inc_unchecked(&vcc->stats->rx_err);
32159 goto return_host_buffers;
32160 }
32161
32162@@ -1778,7 +1778,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32163 vcc->push(vcc, skb);
32164 spin_lock(&he_dev->global_lock);
32165
32166- atomic_inc(&vcc->stats->rx);
32167+ atomic_inc_unchecked(&vcc->stats->rx);
32168
32169 return_host_buffers:
32170 ++pdus_assembled;
32171@@ -2104,7 +2104,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
32172 tpd->vcc->pop(tpd->vcc, tpd->skb);
32173 else
32174 dev_kfree_skb_any(tpd->skb);
32175- atomic_inc(&tpd->vcc->stats->tx_err);
32176+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
32177 }
32178 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
32179 return;
32180@@ -2516,7 +2516,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32181 vcc->pop(vcc, skb);
32182 else
32183 dev_kfree_skb_any(skb);
32184- atomic_inc(&vcc->stats->tx_err);
32185+ atomic_inc_unchecked(&vcc->stats->tx_err);
32186 return -EINVAL;
32187 }
32188
32189@@ -2527,7 +2527,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32190 vcc->pop(vcc, skb);
32191 else
32192 dev_kfree_skb_any(skb);
32193- atomic_inc(&vcc->stats->tx_err);
32194+ atomic_inc_unchecked(&vcc->stats->tx_err);
32195 return -EINVAL;
32196 }
32197 #endif
32198@@ -2539,7 +2539,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32199 vcc->pop(vcc, skb);
32200 else
32201 dev_kfree_skb_any(skb);
32202- atomic_inc(&vcc->stats->tx_err);
32203+ atomic_inc_unchecked(&vcc->stats->tx_err);
32204 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32205 return -ENOMEM;
32206 }
32207@@ -2581,7 +2581,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32208 vcc->pop(vcc, skb);
32209 else
32210 dev_kfree_skb_any(skb);
32211- atomic_inc(&vcc->stats->tx_err);
32212+ atomic_inc_unchecked(&vcc->stats->tx_err);
32213 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32214 return -ENOMEM;
32215 }
32216@@ -2612,7 +2612,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32217 __enqueue_tpd(he_dev, tpd, cid);
32218 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32219
32220- atomic_inc(&vcc->stats->tx);
32221+ atomic_inc_unchecked(&vcc->stats->tx);
32222
32223 return 0;
32224 }
32225diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
32226index 1dc0519..1aadaf7 100644
32227--- a/drivers/atm/horizon.c
32228+++ b/drivers/atm/horizon.c
32229@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
32230 {
32231 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
32232 // VC layer stats
32233- atomic_inc(&vcc->stats->rx);
32234+ atomic_inc_unchecked(&vcc->stats->rx);
32235 __net_timestamp(skb);
32236 // end of our responsibility
32237 vcc->push (vcc, skb);
32238@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
32239 dev->tx_iovec = NULL;
32240
32241 // VC layer stats
32242- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32243+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32244
32245 // free the skb
32246 hrz_kfree_skb (skb);
32247diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
32248index 272f009..a18ba55 100644
32249--- a/drivers/atm/idt77252.c
32250+++ b/drivers/atm/idt77252.c
32251@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
32252 else
32253 dev_kfree_skb(skb);
32254
32255- atomic_inc(&vcc->stats->tx);
32256+ atomic_inc_unchecked(&vcc->stats->tx);
32257 }
32258
32259 atomic_dec(&scq->used);
32260@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32261 if ((sb = dev_alloc_skb(64)) == NULL) {
32262 printk("%s: Can't allocate buffers for aal0.\n",
32263 card->name);
32264- atomic_add(i, &vcc->stats->rx_drop);
32265+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32266 break;
32267 }
32268 if (!atm_charge(vcc, sb->truesize)) {
32269 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
32270 card->name);
32271- atomic_add(i - 1, &vcc->stats->rx_drop);
32272+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
32273 dev_kfree_skb(sb);
32274 break;
32275 }
32276@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32277 ATM_SKB(sb)->vcc = vcc;
32278 __net_timestamp(sb);
32279 vcc->push(vcc, sb);
32280- atomic_inc(&vcc->stats->rx);
32281+ atomic_inc_unchecked(&vcc->stats->rx);
32282
32283 cell += ATM_CELL_PAYLOAD;
32284 }
32285@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32286 "(CDC: %08x)\n",
32287 card->name, len, rpp->len, readl(SAR_REG_CDC));
32288 recycle_rx_pool_skb(card, rpp);
32289- atomic_inc(&vcc->stats->rx_err);
32290+ atomic_inc_unchecked(&vcc->stats->rx_err);
32291 return;
32292 }
32293 if (stat & SAR_RSQE_CRC) {
32294 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
32295 recycle_rx_pool_skb(card, rpp);
32296- atomic_inc(&vcc->stats->rx_err);
32297+ atomic_inc_unchecked(&vcc->stats->rx_err);
32298 return;
32299 }
32300 if (skb_queue_len(&rpp->queue) > 1) {
32301@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32302 RXPRINTK("%s: Can't alloc RX skb.\n",
32303 card->name);
32304 recycle_rx_pool_skb(card, rpp);
32305- atomic_inc(&vcc->stats->rx_err);
32306+ atomic_inc_unchecked(&vcc->stats->rx_err);
32307 return;
32308 }
32309 if (!atm_charge(vcc, skb->truesize)) {
32310@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32311 __net_timestamp(skb);
32312
32313 vcc->push(vcc, skb);
32314- atomic_inc(&vcc->stats->rx);
32315+ atomic_inc_unchecked(&vcc->stats->rx);
32316
32317 return;
32318 }
32319@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32320 __net_timestamp(skb);
32321
32322 vcc->push(vcc, skb);
32323- atomic_inc(&vcc->stats->rx);
32324+ atomic_inc_unchecked(&vcc->stats->rx);
32325
32326 if (skb->truesize > SAR_FB_SIZE_3)
32327 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
32328@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
32329 if (vcc->qos.aal != ATM_AAL0) {
32330 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
32331 card->name, vpi, vci);
32332- atomic_inc(&vcc->stats->rx_drop);
32333+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32334 goto drop;
32335 }
32336
32337 if ((sb = dev_alloc_skb(64)) == NULL) {
32338 printk("%s: Can't allocate buffers for AAL0.\n",
32339 card->name);
32340- atomic_inc(&vcc->stats->rx_err);
32341+ atomic_inc_unchecked(&vcc->stats->rx_err);
32342 goto drop;
32343 }
32344
32345@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
32346 ATM_SKB(sb)->vcc = vcc;
32347 __net_timestamp(sb);
32348 vcc->push(vcc, sb);
32349- atomic_inc(&vcc->stats->rx);
32350+ atomic_inc_unchecked(&vcc->stats->rx);
32351
32352 drop:
32353 skb_pull(queue, 64);
32354@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32355
32356 if (vc == NULL) {
32357 printk("%s: NULL connection in send().\n", card->name);
32358- atomic_inc(&vcc->stats->tx_err);
32359+ atomic_inc_unchecked(&vcc->stats->tx_err);
32360 dev_kfree_skb(skb);
32361 return -EINVAL;
32362 }
32363 if (!test_bit(VCF_TX, &vc->flags)) {
32364 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
32365- atomic_inc(&vcc->stats->tx_err);
32366+ atomic_inc_unchecked(&vcc->stats->tx_err);
32367 dev_kfree_skb(skb);
32368 return -EINVAL;
32369 }
32370@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32371 break;
32372 default:
32373 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
32374- atomic_inc(&vcc->stats->tx_err);
32375+ atomic_inc_unchecked(&vcc->stats->tx_err);
32376 dev_kfree_skb(skb);
32377 return -EINVAL;
32378 }
32379
32380 if (skb_shinfo(skb)->nr_frags != 0) {
32381 printk("%s: No scatter-gather yet.\n", card->name);
32382- atomic_inc(&vcc->stats->tx_err);
32383+ atomic_inc_unchecked(&vcc->stats->tx_err);
32384 dev_kfree_skb(skb);
32385 return -EINVAL;
32386 }
32387@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32388
32389 err = queue_skb(card, vc, skb, oam);
32390 if (err) {
32391- atomic_inc(&vcc->stats->tx_err);
32392+ atomic_inc_unchecked(&vcc->stats->tx_err);
32393 dev_kfree_skb(skb);
32394 return err;
32395 }
32396@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
32397 skb = dev_alloc_skb(64);
32398 if (!skb) {
32399 printk("%s: Out of memory in send_oam().\n", card->name);
32400- atomic_inc(&vcc->stats->tx_err);
32401+ atomic_inc_unchecked(&vcc->stats->tx_err);
32402 return -ENOMEM;
32403 }
32404 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
32405diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
32406index 4217f29..88f547a 100644
32407--- a/drivers/atm/iphase.c
32408+++ b/drivers/atm/iphase.c
32409@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
32410 status = (u_short) (buf_desc_ptr->desc_mode);
32411 if (status & (RX_CER | RX_PTE | RX_OFL))
32412 {
32413- atomic_inc(&vcc->stats->rx_err);
32414+ atomic_inc_unchecked(&vcc->stats->rx_err);
32415 IF_ERR(printk("IA: bad packet, dropping it");)
32416 if (status & RX_CER) {
32417 IF_ERR(printk(" cause: packet CRC error\n");)
32418@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
32419 len = dma_addr - buf_addr;
32420 if (len > iadev->rx_buf_sz) {
32421 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
32422- atomic_inc(&vcc->stats->rx_err);
32423+ atomic_inc_unchecked(&vcc->stats->rx_err);
32424 goto out_free_desc;
32425 }
32426
32427@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32428 ia_vcc = INPH_IA_VCC(vcc);
32429 if (ia_vcc == NULL)
32430 {
32431- atomic_inc(&vcc->stats->rx_err);
32432+ atomic_inc_unchecked(&vcc->stats->rx_err);
32433 atm_return(vcc, skb->truesize);
32434 dev_kfree_skb_any(skb);
32435 goto INCR_DLE;
32436@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32437 if ((length > iadev->rx_buf_sz) || (length >
32438 (skb->len - sizeof(struct cpcs_trailer))))
32439 {
32440- atomic_inc(&vcc->stats->rx_err);
32441+ atomic_inc_unchecked(&vcc->stats->rx_err);
32442 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
32443 length, skb->len);)
32444 atm_return(vcc, skb->truesize);
32445@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32446
32447 IF_RX(printk("rx_dle_intr: skb push");)
32448 vcc->push(vcc,skb);
32449- atomic_inc(&vcc->stats->rx);
32450+ atomic_inc_unchecked(&vcc->stats->rx);
32451 iadev->rx_pkt_cnt++;
32452 }
32453 INCR_DLE:
32454@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
32455 {
32456 struct k_sonet_stats *stats;
32457 stats = &PRIV(_ia_dev[board])->sonet_stats;
32458- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
32459- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
32460- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
32461- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
32462- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
32463- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
32464- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
32465- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
32466- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
32467+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
32468+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
32469+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
32470+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
32471+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
32472+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
32473+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
32474+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
32475+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
32476 }
32477 ia_cmds.status = 0;
32478 break;
32479@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32480 if ((desc == 0) || (desc > iadev->num_tx_desc))
32481 {
32482 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
32483- atomic_inc(&vcc->stats->tx);
32484+ atomic_inc_unchecked(&vcc->stats->tx);
32485 if (vcc->pop)
32486 vcc->pop(vcc, skb);
32487 else
32488@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32489 ATM_DESC(skb) = vcc->vci;
32490 skb_queue_tail(&iadev->tx_dma_q, skb);
32491
32492- atomic_inc(&vcc->stats->tx);
32493+ atomic_inc_unchecked(&vcc->stats->tx);
32494 iadev->tx_pkt_cnt++;
32495 /* Increment transaction counter */
32496 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
32497
32498 #if 0
32499 /* add flow control logic */
32500- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
32501+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
32502 if (iavcc->vc_desc_cnt > 10) {
32503 vcc->tx_quota = vcc->tx_quota * 3 / 4;
32504 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
32505diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
32506index fa7d701..1e404c7 100644
32507--- a/drivers/atm/lanai.c
32508+++ b/drivers/atm/lanai.c
32509@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
32510 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
32511 lanai_endtx(lanai, lvcc);
32512 lanai_free_skb(lvcc->tx.atmvcc, skb);
32513- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
32514+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
32515 }
32516
32517 /* Try to fill the buffer - don't call unless there is backlog */
32518@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
32519 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
32520 __net_timestamp(skb);
32521 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
32522- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
32523+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
32524 out:
32525 lvcc->rx.buf.ptr = end;
32526 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
32527@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32528 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
32529 "vcc %d\n", lanai->number, (unsigned int) s, vci);
32530 lanai->stats.service_rxnotaal5++;
32531- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32532+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32533 return 0;
32534 }
32535 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
32536@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32537 int bytes;
32538 read_unlock(&vcc_sklist_lock);
32539 DPRINTK("got trashed rx pdu on vci %d\n", vci);
32540- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32541+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32542 lvcc->stats.x.aal5.service_trash++;
32543 bytes = (SERVICE_GET_END(s) * 16) -
32544 (((unsigned long) lvcc->rx.buf.ptr) -
32545@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32546 }
32547 if (s & SERVICE_STREAM) {
32548 read_unlock(&vcc_sklist_lock);
32549- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32550+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32551 lvcc->stats.x.aal5.service_stream++;
32552 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
32553 "PDU on VCI %d!\n", lanai->number, vci);
32554@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32555 return 0;
32556 }
32557 DPRINTK("got rx crc error on vci %d\n", vci);
32558- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32559+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32560 lvcc->stats.x.aal5.service_rxcrc++;
32561 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
32562 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
32563diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
32564index ed1d2b7..8cffc1f 100644
32565--- a/drivers/atm/nicstar.c
32566+++ b/drivers/atm/nicstar.c
32567@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32568 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
32569 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
32570 card->index);
32571- atomic_inc(&vcc->stats->tx_err);
32572+ atomic_inc_unchecked(&vcc->stats->tx_err);
32573 dev_kfree_skb_any(skb);
32574 return -EINVAL;
32575 }
32576@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32577 if (!vc->tx) {
32578 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
32579 card->index);
32580- atomic_inc(&vcc->stats->tx_err);
32581+ atomic_inc_unchecked(&vcc->stats->tx_err);
32582 dev_kfree_skb_any(skb);
32583 return -EINVAL;
32584 }
32585@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32586 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
32587 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
32588 card->index);
32589- atomic_inc(&vcc->stats->tx_err);
32590+ atomic_inc_unchecked(&vcc->stats->tx_err);
32591 dev_kfree_skb_any(skb);
32592 return -EINVAL;
32593 }
32594
32595 if (skb_shinfo(skb)->nr_frags != 0) {
32596 printk("nicstar%d: No scatter-gather yet.\n", card->index);
32597- atomic_inc(&vcc->stats->tx_err);
32598+ atomic_inc_unchecked(&vcc->stats->tx_err);
32599 dev_kfree_skb_any(skb);
32600 return -EINVAL;
32601 }
32602@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32603 }
32604
32605 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
32606- atomic_inc(&vcc->stats->tx_err);
32607+ atomic_inc_unchecked(&vcc->stats->tx_err);
32608 dev_kfree_skb_any(skb);
32609 return -EIO;
32610 }
32611- atomic_inc(&vcc->stats->tx);
32612+ atomic_inc_unchecked(&vcc->stats->tx);
32613
32614 return 0;
32615 }
32616@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32617 printk
32618 ("nicstar%d: Can't allocate buffers for aal0.\n",
32619 card->index);
32620- atomic_add(i, &vcc->stats->rx_drop);
32621+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32622 break;
32623 }
32624 if (!atm_charge(vcc, sb->truesize)) {
32625 RXPRINTK
32626 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
32627 card->index);
32628- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32629+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32630 dev_kfree_skb_any(sb);
32631 break;
32632 }
32633@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32634 ATM_SKB(sb)->vcc = vcc;
32635 __net_timestamp(sb);
32636 vcc->push(vcc, sb);
32637- atomic_inc(&vcc->stats->rx);
32638+ atomic_inc_unchecked(&vcc->stats->rx);
32639 cell += ATM_CELL_PAYLOAD;
32640 }
32641
32642@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32643 if (iovb == NULL) {
32644 printk("nicstar%d: Out of iovec buffers.\n",
32645 card->index);
32646- atomic_inc(&vcc->stats->rx_drop);
32647+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32648 recycle_rx_buf(card, skb);
32649 return;
32650 }
32651@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32652 small or large buffer itself. */
32653 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
32654 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
32655- atomic_inc(&vcc->stats->rx_err);
32656+ atomic_inc_unchecked(&vcc->stats->rx_err);
32657 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32658 NS_MAX_IOVECS);
32659 NS_PRV_IOVCNT(iovb) = 0;
32660@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32661 ("nicstar%d: Expected a small buffer, and this is not one.\n",
32662 card->index);
32663 which_list(card, skb);
32664- atomic_inc(&vcc->stats->rx_err);
32665+ atomic_inc_unchecked(&vcc->stats->rx_err);
32666 recycle_rx_buf(card, skb);
32667 vc->rx_iov = NULL;
32668 recycle_iov_buf(card, iovb);
32669@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32670 ("nicstar%d: Expected a large buffer, and this is not one.\n",
32671 card->index);
32672 which_list(card, skb);
32673- atomic_inc(&vcc->stats->rx_err);
32674+ atomic_inc_unchecked(&vcc->stats->rx_err);
32675 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32676 NS_PRV_IOVCNT(iovb));
32677 vc->rx_iov = NULL;
32678@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32679 printk(" - PDU size mismatch.\n");
32680 else
32681 printk(".\n");
32682- atomic_inc(&vcc->stats->rx_err);
32683+ atomic_inc_unchecked(&vcc->stats->rx_err);
32684 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32685 NS_PRV_IOVCNT(iovb));
32686 vc->rx_iov = NULL;
32687@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32688 /* skb points to a small buffer */
32689 if (!atm_charge(vcc, skb->truesize)) {
32690 push_rxbufs(card, skb);
32691- atomic_inc(&vcc->stats->rx_drop);
32692+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32693 } else {
32694 skb_put(skb, len);
32695 dequeue_sm_buf(card, skb);
32696@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32697 ATM_SKB(skb)->vcc = vcc;
32698 __net_timestamp(skb);
32699 vcc->push(vcc, skb);
32700- atomic_inc(&vcc->stats->rx);
32701+ atomic_inc_unchecked(&vcc->stats->rx);
32702 }
32703 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
32704 struct sk_buff *sb;
32705@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32706 if (len <= NS_SMBUFSIZE) {
32707 if (!atm_charge(vcc, sb->truesize)) {
32708 push_rxbufs(card, sb);
32709- atomic_inc(&vcc->stats->rx_drop);
32710+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32711 } else {
32712 skb_put(sb, len);
32713 dequeue_sm_buf(card, sb);
32714@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32715 ATM_SKB(sb)->vcc = vcc;
32716 __net_timestamp(sb);
32717 vcc->push(vcc, sb);
32718- atomic_inc(&vcc->stats->rx);
32719+ atomic_inc_unchecked(&vcc->stats->rx);
32720 }
32721
32722 push_rxbufs(card, skb);
32723@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32724
32725 if (!atm_charge(vcc, skb->truesize)) {
32726 push_rxbufs(card, skb);
32727- atomic_inc(&vcc->stats->rx_drop);
32728+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32729 } else {
32730 dequeue_lg_buf(card, skb);
32731 #ifdef NS_USE_DESTRUCTORS
32732@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32733 ATM_SKB(skb)->vcc = vcc;
32734 __net_timestamp(skb);
32735 vcc->push(vcc, skb);
32736- atomic_inc(&vcc->stats->rx);
32737+ atomic_inc_unchecked(&vcc->stats->rx);
32738 }
32739
32740 push_rxbufs(card, sb);
32741@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32742 printk
32743 ("nicstar%d: Out of huge buffers.\n",
32744 card->index);
32745- atomic_inc(&vcc->stats->rx_drop);
32746+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32747 recycle_iovec_rx_bufs(card,
32748 (struct iovec *)
32749 iovb->data,
32750@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32751 card->hbpool.count++;
32752 } else
32753 dev_kfree_skb_any(hb);
32754- atomic_inc(&vcc->stats->rx_drop);
32755+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32756 } else {
32757 /* Copy the small buffer to the huge buffer */
32758 sb = (struct sk_buff *)iov->iov_base;
32759@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32760 #endif /* NS_USE_DESTRUCTORS */
32761 __net_timestamp(hb);
32762 vcc->push(vcc, hb);
32763- atomic_inc(&vcc->stats->rx);
32764+ atomic_inc_unchecked(&vcc->stats->rx);
32765 }
32766 }
32767
32768diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
32769index 0474a89..06ea4a1 100644
32770--- a/drivers/atm/solos-pci.c
32771+++ b/drivers/atm/solos-pci.c
32772@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
32773 }
32774 atm_charge(vcc, skb->truesize);
32775 vcc->push(vcc, skb);
32776- atomic_inc(&vcc->stats->rx);
32777+ atomic_inc_unchecked(&vcc->stats->rx);
32778 break;
32779
32780 case PKT_STATUS:
32781@@ -1117,7 +1117,7 @@ static uint32_t fpga_tx(struct solos_card *card)
32782 vcc = SKB_CB(oldskb)->vcc;
32783
32784 if (vcc) {
32785- atomic_inc(&vcc->stats->tx);
32786+ atomic_inc_unchecked(&vcc->stats->tx);
32787 solos_pop(vcc, oldskb);
32788 } else {
32789 dev_kfree_skb_irq(oldskb);
32790diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
32791index 0215934..ce9f5b1 100644
32792--- a/drivers/atm/suni.c
32793+++ b/drivers/atm/suni.c
32794@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
32795
32796
32797 #define ADD_LIMITED(s,v) \
32798- atomic_add((v),&stats->s); \
32799- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
32800+ atomic_add_unchecked((v),&stats->s); \
32801+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
32802
32803
32804 static void suni_hz(unsigned long from_timer)
32805diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
32806index 5120a96..e2572bd 100644
32807--- a/drivers/atm/uPD98402.c
32808+++ b/drivers/atm/uPD98402.c
32809@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
32810 struct sonet_stats tmp;
32811 int error = 0;
32812
32813- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
32814+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
32815 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
32816 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
32817 if (zero && !error) {
32818@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
32819
32820
32821 #define ADD_LIMITED(s,v) \
32822- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
32823- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
32824- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
32825+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
32826+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
32827+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
32828
32829
32830 static void stat_event(struct atm_dev *dev)
32831@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
32832 if (reason & uPD98402_INT_PFM) stat_event(dev);
32833 if (reason & uPD98402_INT_PCO) {
32834 (void) GET(PCOCR); /* clear interrupt cause */
32835- atomic_add(GET(HECCT),
32836+ atomic_add_unchecked(GET(HECCT),
32837 &PRIV(dev)->sonet_stats.uncorr_hcs);
32838 }
32839 if ((reason & uPD98402_INT_RFO) &&
32840@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
32841 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
32842 uPD98402_INT_LOS),PIMR); /* enable them */
32843 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
32844- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
32845- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
32846- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
32847+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
32848+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
32849+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
32850 return 0;
32851 }
32852
32853diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
32854index 969c3c2..9b72956 100644
32855--- a/drivers/atm/zatm.c
32856+++ b/drivers/atm/zatm.c
32857@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32858 }
32859 if (!size) {
32860 dev_kfree_skb_irq(skb);
32861- if (vcc) atomic_inc(&vcc->stats->rx_err);
32862+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
32863 continue;
32864 }
32865 if (!atm_charge(vcc,skb->truesize)) {
32866@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32867 skb->len = size;
32868 ATM_SKB(skb)->vcc = vcc;
32869 vcc->push(vcc,skb);
32870- atomic_inc(&vcc->stats->rx);
32871+ atomic_inc_unchecked(&vcc->stats->rx);
32872 }
32873 zout(pos & 0xffff,MTA(mbx));
32874 #if 0 /* probably a stupid idea */
32875@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
32876 skb_queue_head(&zatm_vcc->backlog,skb);
32877 break;
32878 }
32879- atomic_inc(&vcc->stats->tx);
32880+ atomic_inc_unchecked(&vcc->stats->tx);
32881 wake_up(&zatm_vcc->tx_wait);
32882 }
32883
32884diff --git a/drivers/base/bus.c b/drivers/base/bus.c
32885index 6856303..0602d70 100644
32886--- a/drivers/base/bus.c
32887+++ b/drivers/base/bus.c
32888@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
32889 return -EINVAL;
32890
32891 mutex_lock(&subsys->p->mutex);
32892- list_add_tail(&sif->node, &subsys->p->interfaces);
32893+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
32894 if (sif->add_dev) {
32895 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
32896 while ((dev = subsys_dev_iter_next(&iter)))
32897@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
32898 subsys = sif->subsys;
32899
32900 mutex_lock(&subsys->p->mutex);
32901- list_del_init(&sif->node);
32902+ pax_list_del_init((struct list_head *)&sif->node);
32903 if (sif->remove_dev) {
32904 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
32905 while ((dev = subsys_dev_iter_next(&iter)))
32906diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
32907index 17cf7ca..7e553e1 100644
32908--- a/drivers/base/devtmpfs.c
32909+++ b/drivers/base/devtmpfs.c
32910@@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
32911 if (!thread)
32912 return 0;
32913
32914- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
32915+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
32916 if (err)
32917 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
32918 else
32919diff --git a/drivers/base/node.c b/drivers/base/node.c
32920index fac124a..66bd4ab 100644
32921--- a/drivers/base/node.c
32922+++ b/drivers/base/node.c
32923@@ -625,7 +625,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
32924 struct node_attr {
32925 struct device_attribute attr;
32926 enum node_states state;
32927-};
32928+} __do_const;
32929
32930 static ssize_t show_node_state(struct device *dev,
32931 struct device_attribute *attr, char *buf)
32932diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
32933index acc3a8d..981c236 100644
32934--- a/drivers/base/power/domain.c
32935+++ b/drivers/base/power/domain.c
32936@@ -1851,7 +1851,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
32937 {
32938 struct cpuidle_driver *cpuidle_drv;
32939 struct gpd_cpu_data *cpu_data;
32940- struct cpuidle_state *idle_state;
32941+ cpuidle_state_no_const *idle_state;
32942 int ret = 0;
32943
32944 if (IS_ERR_OR_NULL(genpd) || state < 0)
32945@@ -1919,7 +1919,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
32946 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
32947 {
32948 struct gpd_cpu_data *cpu_data;
32949- struct cpuidle_state *idle_state;
32950+ cpuidle_state_no_const *idle_state;
32951 int ret = 0;
32952
32953 if (IS_ERR_OR_NULL(genpd))
32954diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
32955index e6ee5e8..98ad7fc 100644
32956--- a/drivers/base/power/wakeup.c
32957+++ b/drivers/base/power/wakeup.c
32958@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
32959 * They need to be modified together atomically, so it's better to use one
32960 * atomic variable to hold them both.
32961 */
32962-static atomic_t combined_event_count = ATOMIC_INIT(0);
32963+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
32964
32965 #define IN_PROGRESS_BITS (sizeof(int) * 4)
32966 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
32967
32968 static void split_counters(unsigned int *cnt, unsigned int *inpr)
32969 {
32970- unsigned int comb = atomic_read(&combined_event_count);
32971+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
32972
32973 *cnt = (comb >> IN_PROGRESS_BITS);
32974 *inpr = comb & MAX_IN_PROGRESS;
32975@@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
32976 ws->start_prevent_time = ws->last_time;
32977
32978 /* Increment the counter of events in progress. */
32979- cec = atomic_inc_return(&combined_event_count);
32980+ cec = atomic_inc_return_unchecked(&combined_event_count);
32981
32982 trace_wakeup_source_activate(ws->name, cec);
32983 }
32984@@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
32985 * Increment the counter of registered wakeup events and decrement the
32986 * couter of wakeup events in progress simultaneously.
32987 */
32988- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
32989+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
32990 trace_wakeup_source_deactivate(ws->name, cec);
32991
32992 split_counters(&cnt, &inpr);
32993diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
32994index e8d11b6..7b1b36f 100644
32995--- a/drivers/base/syscore.c
32996+++ b/drivers/base/syscore.c
32997@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
32998 void register_syscore_ops(struct syscore_ops *ops)
32999 {
33000 mutex_lock(&syscore_ops_lock);
33001- list_add_tail(&ops->node, &syscore_ops_list);
33002+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
33003 mutex_unlock(&syscore_ops_lock);
33004 }
33005 EXPORT_SYMBOL_GPL(register_syscore_ops);
33006@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
33007 void unregister_syscore_ops(struct syscore_ops *ops)
33008 {
33009 mutex_lock(&syscore_ops_lock);
33010- list_del(&ops->node);
33011+ pax_list_del((struct list_head *)&ops->node);
33012 mutex_unlock(&syscore_ops_lock);
33013 }
33014 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
33015diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
33016index ade58bc..867143d 100644
33017--- a/drivers/block/cciss.c
33018+++ b/drivers/block/cciss.c
33019@@ -1196,6 +1196,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
33020 int err;
33021 u32 cp;
33022
33023+ memset(&arg64, 0, sizeof(arg64));
33024+
33025 err = 0;
33026 err |=
33027 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
33028@@ -3005,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
33029 while (!list_empty(&h->reqQ)) {
33030 c = list_entry(h->reqQ.next, CommandList_struct, list);
33031 /* can't do anything if fifo is full */
33032- if ((h->access.fifo_full(h))) {
33033+ if ((h->access->fifo_full(h))) {
33034 dev_warn(&h->pdev->dev, "fifo full\n");
33035 break;
33036 }
33037@@ -3015,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
33038 h->Qdepth--;
33039
33040 /* Tell the controller execute command */
33041- h->access.submit_command(h, c);
33042+ h->access->submit_command(h, c);
33043
33044 /* Put job onto the completed Q */
33045 addQ(&h->cmpQ, c);
33046@@ -3441,17 +3443,17 @@ startio:
33047
33048 static inline unsigned long get_next_completion(ctlr_info_t *h)
33049 {
33050- return h->access.command_completed(h);
33051+ return h->access->command_completed(h);
33052 }
33053
33054 static inline int interrupt_pending(ctlr_info_t *h)
33055 {
33056- return h->access.intr_pending(h);
33057+ return h->access->intr_pending(h);
33058 }
33059
33060 static inline long interrupt_not_for_us(ctlr_info_t *h)
33061 {
33062- return ((h->access.intr_pending(h) == 0) ||
33063+ return ((h->access->intr_pending(h) == 0) ||
33064 (h->interrupts_enabled == 0));
33065 }
33066
33067@@ -3484,7 +3486,7 @@ static inline u32 next_command(ctlr_info_t *h)
33068 u32 a;
33069
33070 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33071- return h->access.command_completed(h);
33072+ return h->access->command_completed(h);
33073
33074 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33075 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33076@@ -4041,7 +4043,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
33077 trans_support & CFGTBL_Trans_use_short_tags);
33078
33079 /* Change the access methods to the performant access methods */
33080- h->access = SA5_performant_access;
33081+ h->access = &SA5_performant_access;
33082 h->transMethod = CFGTBL_Trans_Performant;
33083
33084 return;
33085@@ -4310,7 +4312,7 @@ static int cciss_pci_init(ctlr_info_t *h)
33086 if (prod_index < 0)
33087 return -ENODEV;
33088 h->product_name = products[prod_index].product_name;
33089- h->access = *(products[prod_index].access);
33090+ h->access = products[prod_index].access;
33091
33092 if (cciss_board_disabled(h)) {
33093 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33094@@ -5032,7 +5034,7 @@ reinit_after_soft_reset:
33095 }
33096
33097 /* make sure the board interrupts are off */
33098- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33099+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33100 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
33101 if (rc)
33102 goto clean2;
33103@@ -5082,7 +5084,7 @@ reinit_after_soft_reset:
33104 * fake ones to scoop up any residual completions.
33105 */
33106 spin_lock_irqsave(&h->lock, flags);
33107- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33108+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33109 spin_unlock_irqrestore(&h->lock, flags);
33110 free_irq(h->intr[h->intr_mode], h);
33111 rc = cciss_request_irq(h, cciss_msix_discard_completions,
33112@@ -5102,9 +5104,9 @@ reinit_after_soft_reset:
33113 dev_info(&h->pdev->dev, "Board READY.\n");
33114 dev_info(&h->pdev->dev,
33115 "Waiting for stale completions to drain.\n");
33116- h->access.set_intr_mask(h, CCISS_INTR_ON);
33117+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33118 msleep(10000);
33119- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33120+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33121
33122 rc = controller_reset_failed(h->cfgtable);
33123 if (rc)
33124@@ -5127,7 +5129,7 @@ reinit_after_soft_reset:
33125 cciss_scsi_setup(h);
33126
33127 /* Turn the interrupts on so we can service requests */
33128- h->access.set_intr_mask(h, CCISS_INTR_ON);
33129+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33130
33131 /* Get the firmware version */
33132 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
33133@@ -5199,7 +5201,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
33134 kfree(flush_buf);
33135 if (return_code != IO_OK)
33136 dev_warn(&h->pdev->dev, "Error flushing cache\n");
33137- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33138+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33139 free_irq(h->intr[h->intr_mode], h);
33140 }
33141
33142diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
33143index 7fda30e..eb5dfe0 100644
33144--- a/drivers/block/cciss.h
33145+++ b/drivers/block/cciss.h
33146@@ -101,7 +101,7 @@ struct ctlr_info
33147 /* information about each logical volume */
33148 drive_info_struct *drv[CISS_MAX_LUN];
33149
33150- struct access_method access;
33151+ struct access_method *access;
33152
33153 /* queue and queue Info */
33154 struct list_head reqQ;
33155diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
33156index 3f08713..56a586a 100644
33157--- a/drivers/block/cpqarray.c
33158+++ b/drivers/block/cpqarray.c
33159@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33160 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
33161 goto Enomem4;
33162 }
33163- hba[i]->access.set_intr_mask(hba[i], 0);
33164+ hba[i]->access->set_intr_mask(hba[i], 0);
33165 if (request_irq(hba[i]->intr, do_ida_intr,
33166 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
33167 {
33168@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33169 add_timer(&hba[i]->timer);
33170
33171 /* Enable IRQ now that spinlock and rate limit timer are set up */
33172- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33173+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33174
33175 for(j=0; j<NWD; j++) {
33176 struct gendisk *disk = ida_gendisk[i][j];
33177@@ -694,7 +694,7 @@ DBGINFO(
33178 for(i=0; i<NR_PRODUCTS; i++) {
33179 if (board_id == products[i].board_id) {
33180 c->product_name = products[i].product_name;
33181- c->access = *(products[i].access);
33182+ c->access = products[i].access;
33183 break;
33184 }
33185 }
33186@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
33187 hba[ctlr]->intr = intr;
33188 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
33189 hba[ctlr]->product_name = products[j].product_name;
33190- hba[ctlr]->access = *(products[j].access);
33191+ hba[ctlr]->access = products[j].access;
33192 hba[ctlr]->ctlr = ctlr;
33193 hba[ctlr]->board_id = board_id;
33194 hba[ctlr]->pci_dev = NULL; /* not PCI */
33195@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
33196
33197 while((c = h->reqQ) != NULL) {
33198 /* Can't do anything if we're busy */
33199- if (h->access.fifo_full(h) == 0)
33200+ if (h->access->fifo_full(h) == 0)
33201 return;
33202
33203 /* Get the first entry from the request Q */
33204@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
33205 h->Qdepth--;
33206
33207 /* Tell the controller to do our bidding */
33208- h->access.submit_command(h, c);
33209+ h->access->submit_command(h, c);
33210
33211 /* Get onto the completion Q */
33212 addQ(&h->cmpQ, c);
33213@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33214 unsigned long flags;
33215 __u32 a,a1;
33216
33217- istat = h->access.intr_pending(h);
33218+ istat = h->access->intr_pending(h);
33219 /* Is this interrupt for us? */
33220 if (istat == 0)
33221 return IRQ_NONE;
33222@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33223 */
33224 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
33225 if (istat & FIFO_NOT_EMPTY) {
33226- while((a = h->access.command_completed(h))) {
33227+ while((a = h->access->command_completed(h))) {
33228 a1 = a; a &= ~3;
33229 if ((c = h->cmpQ) == NULL)
33230 {
33231@@ -1449,11 +1449,11 @@ static int sendcmd(
33232 /*
33233 * Disable interrupt
33234 */
33235- info_p->access.set_intr_mask(info_p, 0);
33236+ info_p->access->set_intr_mask(info_p, 0);
33237 /* Make sure there is room in the command FIFO */
33238 /* Actually it should be completely empty at this time. */
33239 for (i = 200000; i > 0; i--) {
33240- temp = info_p->access.fifo_full(info_p);
33241+ temp = info_p->access->fifo_full(info_p);
33242 if (temp != 0) {
33243 break;
33244 }
33245@@ -1466,7 +1466,7 @@ DBG(
33246 /*
33247 * Send the cmd
33248 */
33249- info_p->access.submit_command(info_p, c);
33250+ info_p->access->submit_command(info_p, c);
33251 complete = pollcomplete(ctlr);
33252
33253 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
33254@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
33255 * we check the new geometry. Then turn interrupts back on when
33256 * we're done.
33257 */
33258- host->access.set_intr_mask(host, 0);
33259+ host->access->set_intr_mask(host, 0);
33260 getgeometry(ctlr);
33261- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
33262+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
33263
33264 for(i=0; i<NWD; i++) {
33265 struct gendisk *disk = ida_gendisk[ctlr][i];
33266@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
33267 /* Wait (up to 2 seconds) for a command to complete */
33268
33269 for (i = 200000; i > 0; i--) {
33270- done = hba[ctlr]->access.command_completed(hba[ctlr]);
33271+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
33272 if (done == 0) {
33273 udelay(10); /* a short fixed delay */
33274 } else
33275diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
33276index be73e9d..7fbf140 100644
33277--- a/drivers/block/cpqarray.h
33278+++ b/drivers/block/cpqarray.h
33279@@ -99,7 +99,7 @@ struct ctlr_info {
33280 drv_info_t drv[NWD];
33281 struct proc_dir_entry *proc;
33282
33283- struct access_method access;
33284+ struct access_method *access;
33285
33286 cmdlist_t *reqQ;
33287 cmdlist_t *cmpQ;
33288diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
33289index 6b51afa..17e1191 100644
33290--- a/drivers/block/drbd/drbd_int.h
33291+++ b/drivers/block/drbd/drbd_int.h
33292@@ -582,7 +582,7 @@ struct drbd_epoch {
33293 struct drbd_tconn *tconn;
33294 struct list_head list;
33295 unsigned int barrier_nr;
33296- atomic_t epoch_size; /* increased on every request added. */
33297+ atomic_unchecked_t epoch_size; /* increased on every request added. */
33298 atomic_t active; /* increased on every req. added, and dec on every finished. */
33299 unsigned long flags;
33300 };
33301@@ -1011,7 +1011,7 @@ struct drbd_conf {
33302 int al_tr_cycle;
33303 int al_tr_pos; /* position of the next transaction in the journal */
33304 wait_queue_head_t seq_wait;
33305- atomic_t packet_seq;
33306+ atomic_unchecked_t packet_seq;
33307 unsigned int peer_seq;
33308 spinlock_t peer_seq_lock;
33309 unsigned int minor;
33310@@ -1527,7 +1527,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
33311 char __user *uoptval;
33312 int err;
33313
33314- uoptval = (char __user __force *)optval;
33315+ uoptval = (char __force_user *)optval;
33316
33317 set_fs(KERNEL_DS);
33318 if (level == SOL_SOCKET)
33319diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
33320index 8c13eeb..217adee 100644
33321--- a/drivers/block/drbd/drbd_main.c
33322+++ b/drivers/block/drbd/drbd_main.c
33323@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
33324 p->sector = sector;
33325 p->block_id = block_id;
33326 p->blksize = blksize;
33327- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33328+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33329 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
33330 }
33331
33332@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
33333 return -EIO;
33334 p->sector = cpu_to_be64(req->i.sector);
33335 p->block_id = (unsigned long)req;
33336- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33337+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33338 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
33339 if (mdev->state.conn >= C_SYNC_SOURCE &&
33340 mdev->state.conn <= C_PAUSED_SYNC_T)
33341@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
33342 {
33343 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
33344
33345- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
33346- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
33347+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
33348+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
33349 kfree(tconn->current_epoch);
33350
33351 idr_destroy(&tconn->volumes);
33352diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
33353index a9eccfc..f5efe87 100644
33354--- a/drivers/block/drbd/drbd_receiver.c
33355+++ b/drivers/block/drbd/drbd_receiver.c
33356@@ -833,7 +833,7 @@ int drbd_connected(struct drbd_conf *mdev)
33357 {
33358 int err;
33359
33360- atomic_set(&mdev->packet_seq, 0);
33361+ atomic_set_unchecked(&mdev->packet_seq, 0);
33362 mdev->peer_seq = 0;
33363
33364 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
33365@@ -1191,7 +1191,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33366 do {
33367 next_epoch = NULL;
33368
33369- epoch_size = atomic_read(&epoch->epoch_size);
33370+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
33371
33372 switch (ev & ~EV_CLEANUP) {
33373 case EV_PUT:
33374@@ -1231,7 +1231,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33375 rv = FE_DESTROYED;
33376 } else {
33377 epoch->flags = 0;
33378- atomic_set(&epoch->epoch_size, 0);
33379+ atomic_set_unchecked(&epoch->epoch_size, 0);
33380 /* atomic_set(&epoch->active, 0); is already zero */
33381 if (rv == FE_STILL_LIVE)
33382 rv = FE_RECYCLED;
33383@@ -1449,7 +1449,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33384 conn_wait_active_ee_empty(tconn);
33385 drbd_flush(tconn);
33386
33387- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33388+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33389 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
33390 if (epoch)
33391 break;
33392@@ -1462,11 +1462,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33393 }
33394
33395 epoch->flags = 0;
33396- atomic_set(&epoch->epoch_size, 0);
33397+ atomic_set_unchecked(&epoch->epoch_size, 0);
33398 atomic_set(&epoch->active, 0);
33399
33400 spin_lock(&tconn->epoch_lock);
33401- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33402+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33403 list_add(&epoch->list, &tconn->current_epoch->list);
33404 tconn->current_epoch = epoch;
33405 tconn->epochs++;
33406@@ -2170,7 +2170,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33407
33408 err = wait_for_and_update_peer_seq(mdev, peer_seq);
33409 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
33410- atomic_inc(&tconn->current_epoch->epoch_size);
33411+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
33412 err2 = drbd_drain_block(mdev, pi->size);
33413 if (!err)
33414 err = err2;
33415@@ -2204,7 +2204,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33416
33417 spin_lock(&tconn->epoch_lock);
33418 peer_req->epoch = tconn->current_epoch;
33419- atomic_inc(&peer_req->epoch->epoch_size);
33420+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
33421 atomic_inc(&peer_req->epoch->active);
33422 spin_unlock(&tconn->epoch_lock);
33423
33424@@ -4346,7 +4346,7 @@ struct data_cmd {
33425 int expect_payload;
33426 size_t pkt_size;
33427 int (*fn)(struct drbd_tconn *, struct packet_info *);
33428-};
33429+} __do_const;
33430
33431 static struct data_cmd drbd_cmd_handler[] = {
33432 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
33433@@ -4466,7 +4466,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
33434 if (!list_empty(&tconn->current_epoch->list))
33435 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
33436 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
33437- atomic_set(&tconn->current_epoch->epoch_size, 0);
33438+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
33439 tconn->send.seen_any_write_yet = false;
33440
33441 conn_info(tconn, "Connection closed\n");
33442@@ -5222,7 +5222,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
33443 struct asender_cmd {
33444 size_t pkt_size;
33445 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
33446-};
33447+} __do_const;
33448
33449 static struct asender_cmd asender_tbl[] = {
33450 [P_PING] = { 0, got_Ping },
33451diff --git a/drivers/block/loop.c b/drivers/block/loop.c
33452index 8bc6d39..f492563 100644
33453--- a/drivers/block/loop.c
33454+++ b/drivers/block/loop.c
33455@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
33456 mm_segment_t old_fs = get_fs();
33457
33458 set_fs(get_ds());
33459- bw = file->f_op->write(file, buf, len, &pos);
33460+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
33461 set_fs(old_fs);
33462 if (likely(bw == len))
33463 return 0;
33464diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
33465index d620b44..587561e 100644
33466--- a/drivers/cdrom/cdrom.c
33467+++ b/drivers/cdrom/cdrom.c
33468@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
33469 ENSURE(reset, CDC_RESET);
33470 ENSURE(generic_packet, CDC_GENERIC_PACKET);
33471 cdi->mc_flags = 0;
33472- cdo->n_minors = 0;
33473 cdi->options = CDO_USE_FFLAGS;
33474
33475 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
33476@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
33477 else
33478 cdi->cdda_method = CDDA_OLD;
33479
33480- if (!cdo->generic_packet)
33481- cdo->generic_packet = cdrom_dummy_generic_packet;
33482+ if (!cdo->generic_packet) {
33483+ pax_open_kernel();
33484+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
33485+ pax_close_kernel();
33486+ }
33487
33488 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
33489 mutex_lock(&cdrom_mutex);
33490@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
33491 if (cdi->exit)
33492 cdi->exit(cdi);
33493
33494- cdi->ops->n_minors--;
33495 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
33496 }
33497
33498diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
33499index d59cdcb..11afddf 100644
33500--- a/drivers/cdrom/gdrom.c
33501+++ b/drivers/cdrom/gdrom.c
33502@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
33503 .audio_ioctl = gdrom_audio_ioctl,
33504 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
33505 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
33506- .n_minors = 1,
33507 };
33508
33509 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
33510diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
33511index 72bedad..8181ce1 100644
33512--- a/drivers/char/Kconfig
33513+++ b/drivers/char/Kconfig
33514@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
33515
33516 config DEVKMEM
33517 bool "/dev/kmem virtual device support"
33518- default y
33519+ default n
33520+ depends on !GRKERNSEC_KMEM
33521 help
33522 Say Y here if you want to support the /dev/kmem device. The
33523 /dev/kmem device is rarely used, but can be used for certain
33524@@ -581,6 +582,7 @@ config DEVPORT
33525 bool
33526 depends on !M68K
33527 depends on ISA || PCI
33528+ depends on !GRKERNSEC_KMEM
33529 default y
33530
33531 source "drivers/s390/char/Kconfig"
33532diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
33533index 2e04433..22afc64 100644
33534--- a/drivers/char/agp/frontend.c
33535+++ b/drivers/char/agp/frontend.c
33536@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33537 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
33538 return -EFAULT;
33539
33540- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
33541+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
33542 return -EFAULT;
33543
33544 client = agp_find_client_by_pid(reserve.pid);
33545diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
33546index 21cb980..f15107c 100644
33547--- a/drivers/char/genrtc.c
33548+++ b/drivers/char/genrtc.c
33549@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
33550 switch (cmd) {
33551
33552 case RTC_PLL_GET:
33553+ memset(&pll, 0, sizeof(pll));
33554 if (get_rtc_pll(&pll))
33555 return -EINVAL;
33556 else
33557diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
33558index fe6d4be..89f32100 100644
33559--- a/drivers/char/hpet.c
33560+++ b/drivers/char/hpet.c
33561@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
33562 }
33563
33564 static int
33565-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
33566+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
33567 struct hpet_info *info)
33568 {
33569 struct hpet_timer __iomem *timer;
33570diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
33571index 053201b0..8335cce 100644
33572--- a/drivers/char/ipmi/ipmi_msghandler.c
33573+++ b/drivers/char/ipmi/ipmi_msghandler.c
33574@@ -420,7 +420,7 @@ struct ipmi_smi {
33575 struct proc_dir_entry *proc_dir;
33576 char proc_dir_name[10];
33577
33578- atomic_t stats[IPMI_NUM_STATS];
33579+ atomic_unchecked_t stats[IPMI_NUM_STATS];
33580
33581 /*
33582 * run_to_completion duplicate of smb_info, smi_info
33583@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
33584
33585
33586 #define ipmi_inc_stat(intf, stat) \
33587- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
33588+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
33589 #define ipmi_get_stat(intf, stat) \
33590- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
33591+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
33592
33593 static int is_lan_addr(struct ipmi_addr *addr)
33594 {
33595@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
33596 INIT_LIST_HEAD(&intf->cmd_rcvrs);
33597 init_waitqueue_head(&intf->waitq);
33598 for (i = 0; i < IPMI_NUM_STATS; i++)
33599- atomic_set(&intf->stats[i], 0);
33600+ atomic_set_unchecked(&intf->stats[i], 0);
33601
33602 intf->proc_dir = NULL;
33603
33604diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
33605index 1c7fdcd..4899100 100644
33606--- a/drivers/char/ipmi/ipmi_si_intf.c
33607+++ b/drivers/char/ipmi/ipmi_si_intf.c
33608@@ -275,7 +275,7 @@ struct smi_info {
33609 unsigned char slave_addr;
33610
33611 /* Counters and things for the proc filesystem. */
33612- atomic_t stats[SI_NUM_STATS];
33613+ atomic_unchecked_t stats[SI_NUM_STATS];
33614
33615 struct task_struct *thread;
33616
33617@@ -284,9 +284,9 @@ struct smi_info {
33618 };
33619
33620 #define smi_inc_stat(smi, stat) \
33621- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
33622+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
33623 #define smi_get_stat(smi, stat) \
33624- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
33625+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
33626
33627 #define SI_MAX_PARMS 4
33628
33629@@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
33630 atomic_set(&new_smi->req_events, 0);
33631 new_smi->run_to_completion = 0;
33632 for (i = 0; i < SI_NUM_STATS; i++)
33633- atomic_set(&new_smi->stats[i], 0);
33634+ atomic_set_unchecked(&new_smi->stats[i], 0);
33635
33636 new_smi->interrupt_disabled = 1;
33637 atomic_set(&new_smi->stop_operation, 0);
33638diff --git a/drivers/char/mem.c b/drivers/char/mem.c
33639index c6fa3bc..4ca3e42 100644
33640--- a/drivers/char/mem.c
33641+++ b/drivers/char/mem.c
33642@@ -18,6 +18,7 @@
33643 #include <linux/raw.h>
33644 #include <linux/tty.h>
33645 #include <linux/capability.h>
33646+#include <linux/security.h>
33647 #include <linux/ptrace.h>
33648 #include <linux/device.h>
33649 #include <linux/highmem.h>
33650@@ -37,6 +38,10 @@
33651
33652 #define DEVPORT_MINOR 4
33653
33654+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33655+extern const struct file_operations grsec_fops;
33656+#endif
33657+
33658 static inline unsigned long size_inside_page(unsigned long start,
33659 unsigned long size)
33660 {
33661@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33662
33663 while (cursor < to) {
33664 if (!devmem_is_allowed(pfn)) {
33665+#ifdef CONFIG_GRKERNSEC_KMEM
33666+ gr_handle_mem_readwrite(from, to);
33667+#else
33668 printk(KERN_INFO
33669 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
33670 current->comm, from, to);
33671+#endif
33672 return 0;
33673 }
33674 cursor += PAGE_SIZE;
33675@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33676 }
33677 return 1;
33678 }
33679+#elif defined(CONFIG_GRKERNSEC_KMEM)
33680+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33681+{
33682+ return 0;
33683+}
33684 #else
33685 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33686 {
33687@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
33688
33689 while (count > 0) {
33690 unsigned long remaining;
33691+ char *temp;
33692
33693 sz = size_inside_page(p, count);
33694
33695@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
33696 if (!ptr)
33697 return -EFAULT;
33698
33699- remaining = copy_to_user(buf, ptr, sz);
33700+#ifdef CONFIG_PAX_USERCOPY
33701+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
33702+ if (!temp) {
33703+ unxlate_dev_mem_ptr(p, ptr);
33704+ return -ENOMEM;
33705+ }
33706+ memcpy(temp, ptr, sz);
33707+#else
33708+ temp = ptr;
33709+#endif
33710+
33711+ remaining = copy_to_user(buf, temp, sz);
33712+
33713+#ifdef CONFIG_PAX_USERCOPY
33714+ kfree(temp);
33715+#endif
33716+
33717 unxlate_dev_mem_ptr(p, ptr);
33718 if (remaining)
33719 return -EFAULT;
33720@@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33721 size_t count, loff_t *ppos)
33722 {
33723 unsigned long p = *ppos;
33724- ssize_t low_count, read, sz;
33725+ ssize_t low_count, read, sz, err = 0;
33726 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
33727- int err = 0;
33728
33729 read = 0;
33730 if (p < (unsigned long) high_memory) {
33731@@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33732 }
33733 #endif
33734 while (low_count > 0) {
33735+ char *temp;
33736+
33737 sz = size_inside_page(p, low_count);
33738
33739 /*
33740@@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33741 */
33742 kbuf = xlate_dev_kmem_ptr((char *)p);
33743
33744- if (copy_to_user(buf, kbuf, sz))
33745+#ifdef CONFIG_PAX_USERCOPY
33746+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
33747+ if (!temp)
33748+ return -ENOMEM;
33749+ memcpy(temp, kbuf, sz);
33750+#else
33751+ temp = kbuf;
33752+#endif
33753+
33754+ err = copy_to_user(buf, temp, sz);
33755+
33756+#ifdef CONFIG_PAX_USERCOPY
33757+ kfree(temp);
33758+#endif
33759+
33760+ if (err)
33761 return -EFAULT;
33762 buf += sz;
33763 p += sz;
33764@@ -833,6 +880,9 @@ static const struct memdev {
33765 #ifdef CONFIG_CRASH_DUMP
33766 [12] = { "oldmem", 0, &oldmem_fops, NULL },
33767 #endif
33768+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33769+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
33770+#endif
33771 };
33772
33773 static int memory_open(struct inode *inode, struct file *filp)
33774diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
33775index 9df78e2..01ba9ae 100644
33776--- a/drivers/char/nvram.c
33777+++ b/drivers/char/nvram.c
33778@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
33779
33780 spin_unlock_irq(&rtc_lock);
33781
33782- if (copy_to_user(buf, contents, tmp - contents))
33783+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
33784 return -EFAULT;
33785
33786 *ppos = i;
33787diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
33788index b66eaa0..2619d1b 100644
33789--- a/drivers/char/pcmcia/synclink_cs.c
33790+++ b/drivers/char/pcmcia/synclink_cs.c
33791@@ -2348,9 +2348,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
33792
33793 if (debug_level >= DEBUG_LEVEL_INFO)
33794 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
33795- __FILE__,__LINE__, info->device_name, port->count);
33796+ __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
33797
33798- WARN_ON(!port->count);
33799+ WARN_ON(!atomic_read(&port->count));
33800
33801 if (tty_port_close_start(port, tty, filp) == 0)
33802 goto cleanup;
33803@@ -2368,7 +2368,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
33804 cleanup:
33805 if (debug_level >= DEBUG_LEVEL_INFO)
33806 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
33807- tty->driver->name, port->count);
33808+ tty->driver->name, atomic_read(&port->count));
33809 }
33810
33811 /* Wait until the transmitter is empty.
33812@@ -2510,7 +2510,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
33813
33814 if (debug_level >= DEBUG_LEVEL_INFO)
33815 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
33816- __FILE__,__LINE__,tty->driver->name, port->count);
33817+ __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
33818
33819 /* If port is closing, signal caller to try again */
33820 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
33821@@ -2530,11 +2530,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
33822 goto cleanup;
33823 }
33824 spin_lock(&port->lock);
33825- port->count++;
33826+ atomic_inc(&port->count);
33827 spin_unlock(&port->lock);
33828 spin_unlock_irqrestore(&info->netlock, flags);
33829
33830- if (port->count == 1) {
33831+ if (atomic_read(&port->count) == 1) {
33832 /* 1st open on this device, init hardware */
33833 retval = startup(info, tty);
33834 if (retval < 0)
33835@@ -3889,7 +3889,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
33836 unsigned short new_crctype;
33837
33838 /* return error if TTY interface open */
33839- if (info->port.count)
33840+ if (atomic_read(&info->port.count))
33841 return -EBUSY;
33842
33843 switch (encoding)
33844@@ -3992,7 +3992,7 @@ static int hdlcdev_open(struct net_device *dev)
33845
33846 /* arbitrate between network and tty opens */
33847 spin_lock_irqsave(&info->netlock, flags);
33848- if (info->port.count != 0 || info->netcount != 0) {
33849+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
33850 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
33851 spin_unlock_irqrestore(&info->netlock, flags);
33852 return -EBUSY;
33853@@ -4081,7 +4081,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33854 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
33855
33856 /* return error if TTY interface open */
33857- if (info->port.count)
33858+ if (atomic_read(&info->port.count))
33859 return -EBUSY;
33860
33861 if (cmd != SIOCWANDEV)
33862diff --git a/drivers/char/random.c b/drivers/char/random.c
33863index 57d4b15..253207b 100644
33864--- a/drivers/char/random.c
33865+++ b/drivers/char/random.c
33866@@ -272,8 +272,13 @@
33867 /*
33868 * Configuration information
33869 */
33870+#ifdef CONFIG_GRKERNSEC_RANDNET
33871+#define INPUT_POOL_WORDS 512
33872+#define OUTPUT_POOL_WORDS 128
33873+#else
33874 #define INPUT_POOL_WORDS 128
33875 #define OUTPUT_POOL_WORDS 32
33876+#endif
33877 #define SEC_XFER_SIZE 512
33878 #define EXTRACT_SIZE 10
33879
33880@@ -313,10 +318,17 @@ static struct poolinfo {
33881 int poolwords;
33882 int tap1, tap2, tap3, tap4, tap5;
33883 } poolinfo_table[] = {
33884+#ifdef CONFIG_GRKERNSEC_RANDNET
33885+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
33886+ { 512, 411, 308, 208, 104, 1 },
33887+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
33888+ { 128, 103, 76, 51, 25, 1 },
33889+#else
33890 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
33891 { 128, 103, 76, 51, 25, 1 },
33892 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
33893 { 32, 26, 20, 14, 7, 1 },
33894+#endif
33895 #if 0
33896 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
33897 { 2048, 1638, 1231, 819, 411, 1 },
33898@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
33899 input_rotate += i ? 7 : 14;
33900 }
33901
33902- ACCESS_ONCE(r->input_rotate) = input_rotate;
33903- ACCESS_ONCE(r->add_ptr) = i;
33904+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
33905+ ACCESS_ONCE_RW(r->add_ptr) = i;
33906 smp_wmb();
33907
33908 if (out)
33909@@ -1024,7 +1036,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
33910
33911 extract_buf(r, tmp);
33912 i = min_t(int, nbytes, EXTRACT_SIZE);
33913- if (copy_to_user(buf, tmp, i)) {
33914+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
33915 ret = -EFAULT;
33916 break;
33917 }
33918@@ -1360,7 +1372,7 @@ EXPORT_SYMBOL(generate_random_uuid);
33919 #include <linux/sysctl.h>
33920
33921 static int min_read_thresh = 8, min_write_thresh;
33922-static int max_read_thresh = INPUT_POOL_WORDS * 32;
33923+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
33924 static int max_write_thresh = INPUT_POOL_WORDS * 32;
33925 static char sysctl_bootid[16];
33926
33927@@ -1376,7 +1388,7 @@ static char sysctl_bootid[16];
33928 static int proc_do_uuid(ctl_table *table, int write,
33929 void __user *buffer, size_t *lenp, loff_t *ppos)
33930 {
33931- ctl_table fake_table;
33932+ ctl_table_no_const fake_table;
33933 unsigned char buf[64], tmp_uuid[16], *uuid;
33934
33935 uuid = table->data;
33936diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
33937index d780295..b29f3a8 100644
33938--- a/drivers/char/sonypi.c
33939+++ b/drivers/char/sonypi.c
33940@@ -54,6 +54,7 @@
33941
33942 #include <asm/uaccess.h>
33943 #include <asm/io.h>
33944+#include <asm/local.h>
33945
33946 #include <linux/sonypi.h>
33947
33948@@ -490,7 +491,7 @@ static struct sonypi_device {
33949 spinlock_t fifo_lock;
33950 wait_queue_head_t fifo_proc_list;
33951 struct fasync_struct *fifo_async;
33952- int open_count;
33953+ local_t open_count;
33954 int model;
33955 struct input_dev *input_jog_dev;
33956 struct input_dev *input_key_dev;
33957@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
33958 static int sonypi_misc_release(struct inode *inode, struct file *file)
33959 {
33960 mutex_lock(&sonypi_device.lock);
33961- sonypi_device.open_count--;
33962+ local_dec(&sonypi_device.open_count);
33963 mutex_unlock(&sonypi_device.lock);
33964 return 0;
33965 }
33966@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
33967 {
33968 mutex_lock(&sonypi_device.lock);
33969 /* Flush input queue on first open */
33970- if (!sonypi_device.open_count)
33971+ if (!local_read(&sonypi_device.open_count))
33972 kfifo_reset(&sonypi_device.fifo);
33973- sonypi_device.open_count++;
33974+ local_inc(&sonypi_device.open_count);
33975 mutex_unlock(&sonypi_device.lock);
33976
33977 return 0;
33978diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
33979index 93211df..c7805f7 100644
33980--- a/drivers/char/tpm/tpm.c
33981+++ b/drivers/char/tpm/tpm.c
33982@@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
33983 chip->vendor.req_complete_val)
33984 goto out_recv;
33985
33986- if ((status == chip->vendor.req_canceled)) {
33987+ if (status == chip->vendor.req_canceled) {
33988 dev_err(chip->dev, "Operation Canceled\n");
33989 rc = -ECANCELED;
33990 goto out;
33991diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
33992index 56051d0..11cf3b7 100644
33993--- a/drivers/char/tpm/tpm_acpi.c
33994+++ b/drivers/char/tpm/tpm_acpi.c
33995@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
33996 virt = acpi_os_map_memory(start, len);
33997 if (!virt) {
33998 kfree(log->bios_event_log);
33999+ log->bios_event_log = NULL;
34000 printk("%s: ERROR - Unable to map memory\n", __func__);
34001 return -EIO;
34002 }
34003
34004- memcpy_fromio(log->bios_event_log, virt, len);
34005+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
34006
34007 acpi_os_unmap_memory(virt, len);
34008 return 0;
34009diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
34010index 84ddc55..1d32f1e 100644
34011--- a/drivers/char/tpm/tpm_eventlog.c
34012+++ b/drivers/char/tpm/tpm_eventlog.c
34013@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
34014 event = addr;
34015
34016 if ((event->event_type == 0 && event->event_size == 0) ||
34017- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
34018+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
34019 return NULL;
34020
34021 return addr;
34022@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
34023 return NULL;
34024
34025 if ((event->event_type == 0 && event->event_size == 0) ||
34026- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
34027+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
34028 return NULL;
34029
34030 (*pos)++;
34031@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
34032 int i;
34033
34034 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
34035- seq_putc(m, data[i]);
34036+ if (!seq_putc(m, data[i]))
34037+ return -EFAULT;
34038
34039 return 0;
34040 }
34041diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
34042index ee4dbea..69c817b 100644
34043--- a/drivers/char/virtio_console.c
34044+++ b/drivers/char/virtio_console.c
34045@@ -681,7 +681,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
34046 if (to_user) {
34047 ssize_t ret;
34048
34049- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
34050+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
34051 if (ret)
34052 return -EFAULT;
34053 } else {
34054@@ -780,7 +780,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
34055 if (!port_has_data(port) && !port->host_connected)
34056 return 0;
34057
34058- return fill_readbuf(port, ubuf, count, true);
34059+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
34060 }
34061
34062 static int wait_port_writable(struct port *port, bool nonblock)
34063diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
34064index 8ae1a61..9c00613 100644
34065--- a/drivers/clocksource/arm_generic.c
34066+++ b/drivers/clocksource/arm_generic.c
34067@@ -181,7 +181,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34068 return NOTIFY_OK;
34069 }
34070
34071-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
34072+static struct notifier_block arch_timer_cpu_nb = {
34073 .notifier_call = arch_timer_cpu_notify,
34074 };
34075
34076diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
34077index 7b0d49d..134fac9 100644
34078--- a/drivers/cpufreq/acpi-cpufreq.c
34079+++ b/drivers/cpufreq/acpi-cpufreq.c
34080@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
34081 return sprintf(buf, "%u\n", boost_enabled);
34082 }
34083
34084-static struct global_attr global_boost = __ATTR(boost, 0644,
34085+static global_attr_no_const global_boost = __ATTR(boost, 0644,
34086 show_global_boost,
34087 store_global_boost);
34088
34089@@ -712,8 +712,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34090 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
34091 per_cpu(acfreq_data, cpu) = data;
34092
34093- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
34094- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34095+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
34096+ pax_open_kernel();
34097+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34098+ pax_close_kernel();
34099+ }
34100
34101 result = acpi_processor_register_performance(data->acpi_data, cpu);
34102 if (result)
34103@@ -835,7 +838,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34104 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
34105 break;
34106 case ACPI_ADR_SPACE_FIXED_HARDWARE:
34107- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34108+ pax_open_kernel();
34109+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34110+ pax_close_kernel();
34111 policy->cur = get_cur_freq_on_cpu(cpu);
34112 break;
34113 default:
34114@@ -846,8 +851,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34115 acpi_processor_notify_smm(THIS_MODULE);
34116
34117 /* Check for APERF/MPERF support in hardware */
34118- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
34119- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34120+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
34121+ pax_open_kernel();
34122+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34123+ pax_close_kernel();
34124+ }
34125
34126 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
34127 for (i = 0; i < perf->state_count; i++)
34128diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
34129index 1f93dbd..305cef1 100644
34130--- a/drivers/cpufreq/cpufreq.c
34131+++ b/drivers/cpufreq/cpufreq.c
34132@@ -1843,7 +1843,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
34133 return NOTIFY_OK;
34134 }
34135
34136-static struct notifier_block __refdata cpufreq_cpu_notifier = {
34137+static struct notifier_block cpufreq_cpu_notifier = {
34138 .notifier_call = cpufreq_cpu_callback,
34139 };
34140
34141@@ -1875,8 +1875,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
34142
34143 pr_debug("trying to register driver %s\n", driver_data->name);
34144
34145- if (driver_data->setpolicy)
34146- driver_data->flags |= CPUFREQ_CONST_LOOPS;
34147+ if (driver_data->setpolicy) {
34148+ pax_open_kernel();
34149+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
34150+ pax_close_kernel();
34151+ }
34152
34153 spin_lock_irqsave(&cpufreq_driver_lock, flags);
34154 if (cpufreq_driver) {
34155diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
34156index 6c5f1d3..c7e2f35e 100644
34157--- a/drivers/cpufreq/cpufreq_governor.c
34158+++ b/drivers/cpufreq/cpufreq_governor.c
34159@@ -243,7 +243,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
34160 * governor, thus we are bound to jiffes/HZ
34161 */
34162 if (dbs_data->governor == GOV_CONSERVATIVE) {
34163- struct cs_ops *ops = dbs_data->gov_ops;
34164+ const struct cs_ops *ops = dbs_data->gov_ops;
34165
34166 cpufreq_register_notifier(ops->notifier_block,
34167 CPUFREQ_TRANSITION_NOTIFIER);
34168@@ -251,7 +251,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
34169 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
34170 jiffies_to_usecs(10);
34171 } else {
34172- struct od_ops *ops = dbs_data->gov_ops;
34173+ const struct od_ops *ops = dbs_data->gov_ops;
34174
34175 od_tuners->io_is_busy = ops->io_busy();
34176 }
34177@@ -268,7 +268,7 @@ second_time:
34178 cs_dbs_info->enable = 1;
34179 cs_dbs_info->requested_freq = policy->cur;
34180 } else {
34181- struct od_ops *ops = dbs_data->gov_ops;
34182+ const struct od_ops *ops = dbs_data->gov_ops;
34183 od_dbs_info->rate_mult = 1;
34184 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
34185 ops->powersave_bias_init_cpu(cpu);
34186@@ -289,7 +289,7 @@ second_time:
34187 mutex_destroy(&cpu_cdbs->timer_mutex);
34188 dbs_data->enable--;
34189 if (!dbs_data->enable) {
34190- struct cs_ops *ops = dbs_data->gov_ops;
34191+ const struct cs_ops *ops = dbs_data->gov_ops;
34192
34193 sysfs_remove_group(cpufreq_global_kobject,
34194 dbs_data->attr_group);
34195diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
34196index f661654..6c8e638 100644
34197--- a/drivers/cpufreq/cpufreq_governor.h
34198+++ b/drivers/cpufreq/cpufreq_governor.h
34199@@ -142,7 +142,7 @@ struct dbs_data {
34200 void (*gov_check_cpu)(int cpu, unsigned int load);
34201
34202 /* Governor specific ops, see below */
34203- void *gov_ops;
34204+ const void *gov_ops;
34205 };
34206
34207 /* Governor specific ops, will be passed to dbs_data->gov_ops */
34208diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
34209index 9d7732b..0b1a793 100644
34210--- a/drivers/cpufreq/cpufreq_stats.c
34211+++ b/drivers/cpufreq/cpufreq_stats.c
34212@@ -340,7 +340,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
34213 }
34214
34215 /* priority=1 so this will get called before cpufreq_remove_dev */
34216-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
34217+static struct notifier_block cpufreq_stat_cpu_notifier = {
34218 .notifier_call = cpufreq_stat_cpu_callback,
34219 .priority = 1,
34220 };
34221diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
34222index 827629c9..0bc6a03 100644
34223--- a/drivers/cpufreq/p4-clockmod.c
34224+++ b/drivers/cpufreq/p4-clockmod.c
34225@@ -167,10 +167,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34226 case 0x0F: /* Core Duo */
34227 case 0x16: /* Celeron Core */
34228 case 0x1C: /* Atom */
34229- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34230+ pax_open_kernel();
34231+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34232+ pax_close_kernel();
34233 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
34234 case 0x0D: /* Pentium M (Dothan) */
34235- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34236+ pax_open_kernel();
34237+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34238+ pax_close_kernel();
34239 /* fall through */
34240 case 0x09: /* Pentium M (Banias) */
34241 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
34242@@ -182,7 +186,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34243
34244 /* on P-4s, the TSC runs with constant frequency independent whether
34245 * throttling is active or not. */
34246- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34247+ pax_open_kernel();
34248+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34249+ pax_close_kernel();
34250
34251 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
34252 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
34253diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
34254index 3a953d5..f5993f6 100644
34255--- a/drivers/cpufreq/speedstep-centrino.c
34256+++ b/drivers/cpufreq/speedstep-centrino.c
34257@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
34258 !cpu_has(cpu, X86_FEATURE_EST))
34259 return -ENODEV;
34260
34261- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
34262- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34263+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
34264+ pax_open_kernel();
34265+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34266+ pax_close_kernel();
34267+ }
34268
34269 if (policy->cpu != 0)
34270 return -ENODEV;
34271diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
34272index e1f6860..f8de20b 100644
34273--- a/drivers/cpuidle/cpuidle.c
34274+++ b/drivers/cpuidle/cpuidle.c
34275@@ -279,7 +279,7 @@ static int poll_idle(struct cpuidle_device *dev,
34276
34277 static void poll_idle_init(struct cpuidle_driver *drv)
34278 {
34279- struct cpuidle_state *state = &drv->states[0];
34280+ cpuidle_state_no_const *state = &drv->states[0];
34281
34282 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
34283 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
34284diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
34285index ea2f8e7..70ac501 100644
34286--- a/drivers/cpuidle/governor.c
34287+++ b/drivers/cpuidle/governor.c
34288@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
34289 mutex_lock(&cpuidle_lock);
34290 if (__cpuidle_find_governor(gov->name) == NULL) {
34291 ret = 0;
34292- list_add_tail(&gov->governor_list, &cpuidle_governors);
34293+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
34294 if (!cpuidle_curr_governor ||
34295 cpuidle_curr_governor->rating < gov->rating)
34296 cpuidle_switch_governor(gov);
34297@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
34298 new_gov = cpuidle_replace_governor(gov->rating);
34299 cpuidle_switch_governor(new_gov);
34300 }
34301- list_del(&gov->governor_list);
34302+ pax_list_del((struct list_head *)&gov->governor_list);
34303 mutex_unlock(&cpuidle_lock);
34304 }
34305
34306diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
34307index 428754a..8bdf9cc 100644
34308--- a/drivers/cpuidle/sysfs.c
34309+++ b/drivers/cpuidle/sysfs.c
34310@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
34311 NULL
34312 };
34313
34314-static struct attribute_group cpuidle_attr_group = {
34315+static attribute_group_no_const cpuidle_attr_group = {
34316 .attrs = cpuidle_default_attrs,
34317 .name = "cpuidle",
34318 };
34319diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
34320index 3b36797..289c16a 100644
34321--- a/drivers/devfreq/devfreq.c
34322+++ b/drivers/devfreq/devfreq.c
34323@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
34324 goto err_out;
34325 }
34326
34327- list_add(&governor->node, &devfreq_governor_list);
34328+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
34329
34330 list_for_each_entry(devfreq, &devfreq_list, node) {
34331 int ret = 0;
34332@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
34333 }
34334 }
34335
34336- list_del(&governor->node);
34337+ pax_list_del((struct list_head *)&governor->node);
34338 err_out:
34339 mutex_unlock(&devfreq_list_lock);
34340
34341diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
34342index b70709b..1d8d02a 100644
34343--- a/drivers/dma/sh/shdma.c
34344+++ b/drivers/dma/sh/shdma.c
34345@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
34346 return ret;
34347 }
34348
34349-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
34350+static struct notifier_block sh_dmae_nmi_notifier = {
34351 .notifier_call = sh_dmae_nmi_handler,
34352
34353 /* Run before NMI debug handler and KGDB */
34354diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
34355index 0ca1ca7..6e6f454 100644
34356--- a/drivers/edac/edac_mc_sysfs.c
34357+++ b/drivers/edac/edac_mc_sysfs.c
34358@@ -148,7 +148,7 @@ static const char *edac_caps[] = {
34359 struct dev_ch_attribute {
34360 struct device_attribute attr;
34361 int channel;
34362-};
34363+} __do_const;
34364
34365 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
34366 struct dev_ch_attribute dev_attr_legacy_##_name = \
34367diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
34368index 0056c4d..23b54d9 100644
34369--- a/drivers/edac/edac_pci_sysfs.c
34370+++ b/drivers/edac/edac_pci_sysfs.c
34371@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
34372 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
34373 static int edac_pci_poll_msec = 1000; /* one second workq period */
34374
34375-static atomic_t pci_parity_count = ATOMIC_INIT(0);
34376-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
34377+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
34378+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
34379
34380 static struct kobject *edac_pci_top_main_kobj;
34381 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
34382@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
34383 void *value;
34384 ssize_t(*show) (void *, char *);
34385 ssize_t(*store) (void *, const char *, size_t);
34386-};
34387+} __do_const;
34388
34389 /* Set of show/store abstract level functions for PCI Parity object */
34390 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
34391@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34392 edac_printk(KERN_CRIT, EDAC_PCI,
34393 "Signaled System Error on %s\n",
34394 pci_name(dev));
34395- atomic_inc(&pci_nonparity_count);
34396+ atomic_inc_unchecked(&pci_nonparity_count);
34397 }
34398
34399 if (status & (PCI_STATUS_PARITY)) {
34400@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34401 "Master Data Parity Error on %s\n",
34402 pci_name(dev));
34403
34404- atomic_inc(&pci_parity_count);
34405+ atomic_inc_unchecked(&pci_parity_count);
34406 }
34407
34408 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34409@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34410 "Detected Parity Error on %s\n",
34411 pci_name(dev));
34412
34413- atomic_inc(&pci_parity_count);
34414+ atomic_inc_unchecked(&pci_parity_count);
34415 }
34416 }
34417
34418@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34419 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
34420 "Signaled System Error on %s\n",
34421 pci_name(dev));
34422- atomic_inc(&pci_nonparity_count);
34423+ atomic_inc_unchecked(&pci_nonparity_count);
34424 }
34425
34426 if (status & (PCI_STATUS_PARITY)) {
34427@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34428 "Master Data Parity Error on "
34429 "%s\n", pci_name(dev));
34430
34431- atomic_inc(&pci_parity_count);
34432+ atomic_inc_unchecked(&pci_parity_count);
34433 }
34434
34435 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34436@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34437 "Detected Parity Error on %s\n",
34438 pci_name(dev));
34439
34440- atomic_inc(&pci_parity_count);
34441+ atomic_inc_unchecked(&pci_parity_count);
34442 }
34443 }
34444 }
34445@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
34446 if (!check_pci_errors)
34447 return;
34448
34449- before_count = atomic_read(&pci_parity_count);
34450+ before_count = atomic_read_unchecked(&pci_parity_count);
34451
34452 /* scan all PCI devices looking for a Parity Error on devices and
34453 * bridges.
34454@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
34455 /* Only if operator has selected panic on PCI Error */
34456 if (edac_pci_get_panic_on_pe()) {
34457 /* If the count is different 'after' from 'before' */
34458- if (before_count != atomic_read(&pci_parity_count))
34459+ if (before_count != atomic_read_unchecked(&pci_parity_count))
34460 panic("EDAC: PCI Parity Error");
34461 }
34462 }
34463diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
34464index 6796799..99e8377 100644
34465--- a/drivers/edac/mce_amd.h
34466+++ b/drivers/edac/mce_amd.h
34467@@ -78,7 +78,7 @@ extern const char * const ii_msgs[];
34468 struct amd_decoder_ops {
34469 bool (*mc0_mce)(u16, u8);
34470 bool (*mc1_mce)(u16, u8);
34471-};
34472+} __no_const;
34473
34474 void amd_report_gart_errors(bool);
34475 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
34476diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34477index 57ea7f4..789e3c3 100644
34478--- a/drivers/firewire/core-card.c
34479+++ b/drivers/firewire/core-card.c
34480@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
34481
34482 void fw_core_remove_card(struct fw_card *card)
34483 {
34484- struct fw_card_driver dummy_driver = dummy_driver_template;
34485+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
34486
34487 card->driver->update_phy_reg(card, 4,
34488 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34489diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34490index f8d2287..5aaf4db 100644
34491--- a/drivers/firewire/core-cdev.c
34492+++ b/drivers/firewire/core-cdev.c
34493@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
34494 int ret;
34495
34496 if ((request->channels == 0 && request->bandwidth == 0) ||
34497- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34498- request->bandwidth < 0)
34499+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34500 return -EINVAL;
34501
34502 r = kmalloc(sizeof(*r), GFP_KERNEL);
34503diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
34504index af3e8aa..eb2f227 100644
34505--- a/drivers/firewire/core-device.c
34506+++ b/drivers/firewire/core-device.c
34507@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
34508 struct config_rom_attribute {
34509 struct device_attribute attr;
34510 u32 key;
34511-};
34512+} __do_const;
34513
34514 static ssize_t show_immediate(struct device *dev,
34515 struct device_attribute *dattr, char *buf)
34516diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34517index 28a94c7..58da63a 100644
34518--- a/drivers/firewire/core-transaction.c
34519+++ b/drivers/firewire/core-transaction.c
34520@@ -38,6 +38,7 @@
34521 #include <linux/timer.h>
34522 #include <linux/types.h>
34523 #include <linux/workqueue.h>
34524+#include <linux/sched.h>
34525
34526 #include <asm/byteorder.h>
34527
34528diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34529index 515a42c..5ecf3ba 100644
34530--- a/drivers/firewire/core.h
34531+++ b/drivers/firewire/core.h
34532@@ -111,6 +111,7 @@ struct fw_card_driver {
34533
34534 int (*stop_iso)(struct fw_iso_context *ctx);
34535 };
34536+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34537
34538 void fw_card_initialize(struct fw_card *card,
34539 const struct fw_card_driver *driver, struct device *device);
34540diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
34541index 94a58a0..f5eba42 100644
34542--- a/drivers/firmware/dmi-id.c
34543+++ b/drivers/firmware/dmi-id.c
34544@@ -16,7 +16,7 @@
34545 struct dmi_device_attribute{
34546 struct device_attribute dev_attr;
34547 int field;
34548-};
34549+} __do_const;
34550 #define to_dmi_dev_attr(_dev_attr) \
34551 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
34552
34553diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
34554index 4cd392d..4b629e1 100644
34555--- a/drivers/firmware/dmi_scan.c
34556+++ b/drivers/firmware/dmi_scan.c
34557@@ -490,11 +490,6 @@ void __init dmi_scan_machine(void)
34558 }
34559 }
34560 else {
34561- /*
34562- * no iounmap() for that ioremap(); it would be a no-op, but
34563- * it's so early in setup that sucker gets confused into doing
34564- * what it shouldn't if we actually call it.
34565- */
34566 p = dmi_ioremap(0xF0000, 0x10000);
34567 if (p == NULL)
34568 goto error;
34569@@ -769,7 +764,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
34570 if (buf == NULL)
34571 return -1;
34572
34573- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
34574+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
34575
34576 iounmap(buf);
34577 return 0;
34578diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
34579index 2a2e145..73745e79 100644
34580--- a/drivers/firmware/efivars.c
34581+++ b/drivers/firmware/efivars.c
34582@@ -133,7 +133,7 @@ struct efivar_attribute {
34583 };
34584
34585 static struct efivars __efivars;
34586-static struct efivar_operations ops;
34587+static efivar_operations_no_const ops __read_only;
34588
34589 #define PSTORE_EFI_ATTRIBUTES \
34590 (EFI_VARIABLE_NON_VOLATILE | \
34591@@ -1798,7 +1798,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
34592 static int
34593 create_efivars_bin_attributes(struct efivars *efivars)
34594 {
34595- struct bin_attribute *attr;
34596+ bin_attribute_no_const *attr;
34597 int error;
34598
34599 /* new_var */
34600diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
34601index 2a90ba6..07f3733 100644
34602--- a/drivers/firmware/google/memconsole.c
34603+++ b/drivers/firmware/google/memconsole.c
34604@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
34605 if (!found_memconsole())
34606 return -ENODEV;
34607
34608- memconsole_bin_attr.size = memconsole_length;
34609+ pax_open_kernel();
34610+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
34611+ pax_close_kernel();
34612
34613 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
34614
34615diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
34616index 6f2306d..af9476a 100644
34617--- a/drivers/gpio/gpio-ich.c
34618+++ b/drivers/gpio/gpio-ich.c
34619@@ -69,7 +69,7 @@ struct ichx_desc {
34620 /* Some chipsets have quirks, let these use their own request/get */
34621 int (*request)(struct gpio_chip *chip, unsigned offset);
34622 int (*get)(struct gpio_chip *chip, unsigned offset);
34623-};
34624+} __do_const;
34625
34626 static struct {
34627 spinlock_t lock;
34628diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
34629index 9902732..64b62dd 100644
34630--- a/drivers/gpio/gpio-vr41xx.c
34631+++ b/drivers/gpio/gpio-vr41xx.c
34632@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
34633 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
34634 maskl, pendl, maskh, pendh);
34635
34636- atomic_inc(&irq_err_count);
34637+ atomic_inc_unchecked(&irq_err_count);
34638
34639 return -EINVAL;
34640 }
34641diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
34642index 7b2d378..cc947ea 100644
34643--- a/drivers/gpu/drm/drm_crtc_helper.c
34644+++ b/drivers/gpu/drm/drm_crtc_helper.c
34645@@ -319,7 +319,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
34646 struct drm_crtc *tmp;
34647 int crtc_mask = 1;
34648
34649- WARN(!crtc, "checking null crtc?\n");
34650+ BUG_ON(!crtc);
34651
34652 dev = crtc->dev;
34653
34654diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
34655index be174ca..7f38143 100644
34656--- a/drivers/gpu/drm/drm_drv.c
34657+++ b/drivers/gpu/drm/drm_drv.c
34658@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
34659 /**
34660 * Copy and IOCTL return string to user space
34661 */
34662-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
34663+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
34664 {
34665 int len;
34666
34667@@ -377,7 +377,7 @@ long drm_ioctl(struct file *filp,
34668 struct drm_file *file_priv = filp->private_data;
34669 struct drm_device *dev;
34670 struct drm_ioctl_desc *ioctl;
34671- drm_ioctl_t *func;
34672+ drm_ioctl_no_const_t func;
34673 unsigned int nr = DRM_IOCTL_NR(cmd);
34674 int retcode = -EINVAL;
34675 char stack_kdata[128];
34676@@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
34677 return -ENODEV;
34678
34679 atomic_inc(&dev->ioctl_count);
34680- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
34681+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
34682 ++file_priv->ioctl_count;
34683
34684 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
34685diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
34686index 133b413..fd68225 100644
34687--- a/drivers/gpu/drm/drm_fops.c
34688+++ b/drivers/gpu/drm/drm_fops.c
34689@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
34690 }
34691
34692 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
34693- atomic_set(&dev->counts[i], 0);
34694+ atomic_set_unchecked(&dev->counts[i], 0);
34695
34696 dev->sigdata.lock = NULL;
34697
34698@@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
34699 if (drm_device_is_unplugged(dev))
34700 return -ENODEV;
34701
34702- if (!dev->open_count++)
34703+ if (local_inc_return(&dev->open_count) == 1)
34704 need_setup = 1;
34705 mutex_lock(&dev->struct_mutex);
34706 old_mapping = dev->dev_mapping;
34707@@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
34708 retcode = drm_open_helper(inode, filp, dev);
34709 if (retcode)
34710 goto err_undo;
34711- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
34712+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
34713 if (need_setup) {
34714 retcode = drm_setup(dev);
34715 if (retcode)
34716@@ -164,7 +164,7 @@ err_undo:
34717 iput(container_of(dev->dev_mapping, struct inode, i_data));
34718 dev->dev_mapping = old_mapping;
34719 mutex_unlock(&dev->struct_mutex);
34720- dev->open_count--;
34721+ local_dec(&dev->open_count);
34722 return retcode;
34723 }
34724 EXPORT_SYMBOL(drm_open);
34725@@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
34726
34727 mutex_lock(&drm_global_mutex);
34728
34729- DRM_DEBUG("open_count = %d\n", dev->open_count);
34730+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
34731
34732 if (dev->driver->preclose)
34733 dev->driver->preclose(dev, file_priv);
34734@@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
34735 * Begin inline drm_release
34736 */
34737
34738- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
34739+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
34740 task_pid_nr(current),
34741 (long)old_encode_dev(file_priv->minor->device),
34742- dev->open_count);
34743+ local_read(&dev->open_count));
34744
34745 /* Release any auth tokens that might point to this file_priv,
34746 (do that under the drm_global_mutex) */
34747@@ -547,8 +547,8 @@ int drm_release(struct inode *inode, struct file *filp)
34748 * End inline drm_release
34749 */
34750
34751- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
34752- if (!--dev->open_count) {
34753+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
34754+ if (local_dec_and_test(&dev->open_count)) {
34755 if (atomic_read(&dev->ioctl_count)) {
34756 DRM_ERROR("Device busy: %d\n",
34757 atomic_read(&dev->ioctl_count));
34758diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
34759index f731116..629842c 100644
34760--- a/drivers/gpu/drm/drm_global.c
34761+++ b/drivers/gpu/drm/drm_global.c
34762@@ -36,7 +36,7 @@
34763 struct drm_global_item {
34764 struct mutex mutex;
34765 void *object;
34766- int refcount;
34767+ atomic_t refcount;
34768 };
34769
34770 static struct drm_global_item glob[DRM_GLOBAL_NUM];
34771@@ -49,7 +49,7 @@ void drm_global_init(void)
34772 struct drm_global_item *item = &glob[i];
34773 mutex_init(&item->mutex);
34774 item->object = NULL;
34775- item->refcount = 0;
34776+ atomic_set(&item->refcount, 0);
34777 }
34778 }
34779
34780@@ -59,7 +59,7 @@ void drm_global_release(void)
34781 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
34782 struct drm_global_item *item = &glob[i];
34783 BUG_ON(item->object != NULL);
34784- BUG_ON(item->refcount != 0);
34785+ BUG_ON(atomic_read(&item->refcount) != 0);
34786 }
34787 }
34788
34789@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
34790 void *object;
34791
34792 mutex_lock(&item->mutex);
34793- if (item->refcount == 0) {
34794+ if (atomic_read(&item->refcount) == 0) {
34795 item->object = kzalloc(ref->size, GFP_KERNEL);
34796 if (unlikely(item->object == NULL)) {
34797 ret = -ENOMEM;
34798@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
34799 goto out_err;
34800
34801 }
34802- ++item->refcount;
34803+ atomic_inc(&item->refcount);
34804 ref->object = item->object;
34805 object = item->object;
34806 mutex_unlock(&item->mutex);
34807@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
34808 struct drm_global_item *item = &glob[ref->global_type];
34809
34810 mutex_lock(&item->mutex);
34811- BUG_ON(item->refcount == 0);
34812+ BUG_ON(atomic_read(&item->refcount) == 0);
34813 BUG_ON(ref->object != item->object);
34814- if (--item->refcount == 0) {
34815+ if (atomic_dec_and_test(&item->refcount)) {
34816 ref->release(ref);
34817 item->object = NULL;
34818 }
34819diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
34820index d4b20ce..77a8d41 100644
34821--- a/drivers/gpu/drm/drm_info.c
34822+++ b/drivers/gpu/drm/drm_info.c
34823@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
34824 struct drm_local_map *map;
34825 struct drm_map_list *r_list;
34826
34827- /* Hardcoded from _DRM_FRAME_BUFFER,
34828- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
34829- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
34830- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
34831+ static const char * const types[] = {
34832+ [_DRM_FRAME_BUFFER] = "FB",
34833+ [_DRM_REGISTERS] = "REG",
34834+ [_DRM_SHM] = "SHM",
34835+ [_DRM_AGP] = "AGP",
34836+ [_DRM_SCATTER_GATHER] = "SG",
34837+ [_DRM_CONSISTENT] = "PCI",
34838+ [_DRM_GEM] = "GEM" };
34839 const char *type;
34840 int i;
34841
34842@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
34843 map = r_list->map;
34844 if (!map)
34845 continue;
34846- if (map->type < 0 || map->type > 5)
34847+ if (map->type >= ARRAY_SIZE(types))
34848 type = "??";
34849 else
34850 type = types[map->type];
34851@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
34852 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
34853 vma->vm_flags & VM_LOCKED ? 'l' : '-',
34854 vma->vm_flags & VM_IO ? 'i' : '-',
34855+#ifdef CONFIG_GRKERNSEC_HIDESYM
34856+ 0);
34857+#else
34858 vma->vm_pgoff);
34859+#endif
34860
34861 #if defined(__i386__)
34862 pgprot = pgprot_val(vma->vm_page_prot);
34863diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
34864index 2f4c434..dd12cd2 100644
34865--- a/drivers/gpu/drm/drm_ioc32.c
34866+++ b/drivers/gpu/drm/drm_ioc32.c
34867@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
34868 request = compat_alloc_user_space(nbytes);
34869 if (!access_ok(VERIFY_WRITE, request, nbytes))
34870 return -EFAULT;
34871- list = (struct drm_buf_desc *) (request + 1);
34872+ list = (struct drm_buf_desc __user *) (request + 1);
34873
34874 if (__put_user(count, &request->count)
34875 || __put_user(list, &request->list))
34876@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
34877 request = compat_alloc_user_space(nbytes);
34878 if (!access_ok(VERIFY_WRITE, request, nbytes))
34879 return -EFAULT;
34880- list = (struct drm_buf_pub *) (request + 1);
34881+ list = (struct drm_buf_pub __user *) (request + 1);
34882
34883 if (__put_user(count, &request->count)
34884 || __put_user(list, &request->list))
34885@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
34886 return 0;
34887 }
34888
34889-drm_ioctl_compat_t *drm_compat_ioctls[] = {
34890+drm_ioctl_compat_t drm_compat_ioctls[] = {
34891 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
34892 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
34893 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
34894@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
34895 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34896 {
34897 unsigned int nr = DRM_IOCTL_NR(cmd);
34898- drm_ioctl_compat_t *fn;
34899 int ret;
34900
34901 /* Assume that ioctls without an explicit compat routine will just
34902@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34903 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
34904 return drm_ioctl(filp, cmd, arg);
34905
34906- fn = drm_compat_ioctls[nr];
34907-
34908- if (fn != NULL)
34909- ret = (*fn) (filp, cmd, arg);
34910+ if (drm_compat_ioctls[nr] != NULL)
34911+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
34912 else
34913 ret = drm_ioctl(filp, cmd, arg);
34914
34915diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
34916index e77bd8b..1571b85 100644
34917--- a/drivers/gpu/drm/drm_ioctl.c
34918+++ b/drivers/gpu/drm/drm_ioctl.c
34919@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
34920 stats->data[i].value =
34921 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
34922 else
34923- stats->data[i].value = atomic_read(&dev->counts[i]);
34924+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
34925 stats->data[i].type = dev->types[i];
34926 }
34927
34928diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
34929index d752c96..fe08455 100644
34930--- a/drivers/gpu/drm/drm_lock.c
34931+++ b/drivers/gpu/drm/drm_lock.c
34932@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34933 if (drm_lock_take(&master->lock, lock->context)) {
34934 master->lock.file_priv = file_priv;
34935 master->lock.lock_time = jiffies;
34936- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
34937+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
34938 break; /* Got lock */
34939 }
34940
34941@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34942 return -EINVAL;
34943 }
34944
34945- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
34946+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
34947
34948 if (drm_lock_free(&master->lock, lock->context)) {
34949 /* FIXME: Should really bail out here. */
34950diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
34951index 200e104..59facda 100644
34952--- a/drivers/gpu/drm/drm_stub.c
34953+++ b/drivers/gpu/drm/drm_stub.c
34954@@ -516,7 +516,7 @@ void drm_unplug_dev(struct drm_device *dev)
34955
34956 drm_device_set_unplugged(dev);
34957
34958- if (dev->open_count == 0) {
34959+ if (local_read(&dev->open_count) == 0) {
34960 drm_put_dev(dev);
34961 }
34962 mutex_unlock(&drm_global_mutex);
34963diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
34964index 004ecdf..db1f6e0 100644
34965--- a/drivers/gpu/drm/i810/i810_dma.c
34966+++ b/drivers/gpu/drm/i810/i810_dma.c
34967@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
34968 dma->buflist[vertex->idx],
34969 vertex->discard, vertex->used);
34970
34971- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34972- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34973+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34974+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34975 sarea_priv->last_enqueue = dev_priv->counter - 1;
34976 sarea_priv->last_dispatch = (int)hw_status[5];
34977
34978@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
34979 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
34980 mc->last_render);
34981
34982- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34983- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34984+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34985+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34986 sarea_priv->last_enqueue = dev_priv->counter - 1;
34987 sarea_priv->last_dispatch = (int)hw_status[5];
34988
34989diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
34990index 6e0acad..93c8289 100644
34991--- a/drivers/gpu/drm/i810/i810_drv.h
34992+++ b/drivers/gpu/drm/i810/i810_drv.h
34993@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
34994 int page_flipping;
34995
34996 wait_queue_head_t irq_queue;
34997- atomic_t irq_received;
34998- atomic_t irq_emitted;
34999+ atomic_unchecked_t irq_received;
35000+ atomic_unchecked_t irq_emitted;
35001
35002 int front_offset;
35003 } drm_i810_private_t;
35004diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
35005index 8a7c48b..27af8a5 100644
35006--- a/drivers/gpu/drm/i915/i915_debugfs.c
35007+++ b/drivers/gpu/drm/i915/i915_debugfs.c
35008@@ -103,7 +103,7 @@ static const char *cache_level_str(int type)
35009 static void
35010 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
35011 {
35012- seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
35013+ seq_printf(m, "%pK: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
35014 &obj->base,
35015 get_pin_flag(obj),
35016 get_tiling_flag(obj),
35017@@ -496,7 +496,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
35018 I915_READ(GTIMR));
35019 }
35020 seq_printf(m, "Interrupts received: %d\n",
35021- atomic_read(&dev_priv->irq_received));
35022+ atomic_read_unchecked(&dev_priv->irq_received));
35023 for_each_ring(ring, dev_priv, i) {
35024 if (IS_GEN6(dev) || IS_GEN7(dev)) {
35025 seq_printf(m,
35026diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
35027index 99daa89..84ebd44 100644
35028--- a/drivers/gpu/drm/i915/i915_dma.c
35029+++ b/drivers/gpu/drm/i915/i915_dma.c
35030@@ -1253,7 +1253,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
35031 bool can_switch;
35032
35033 spin_lock(&dev->count_lock);
35034- can_switch = (dev->open_count == 0);
35035+ can_switch = (local_read(&dev->open_count) == 0);
35036 spin_unlock(&dev->count_lock);
35037 return can_switch;
35038 }
35039diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
35040index 7339a4b..445aaba 100644
35041--- a/drivers/gpu/drm/i915/i915_drv.h
35042+++ b/drivers/gpu/drm/i915/i915_drv.h
35043@@ -656,7 +656,7 @@ typedef struct drm_i915_private {
35044 drm_dma_handle_t *status_page_dmah;
35045 struct resource mch_res;
35046
35047- atomic_t irq_received;
35048+ atomic_unchecked_t irq_received;
35049
35050 /* protects the irq masks */
35051 spinlock_t irq_lock;
35052@@ -1102,7 +1102,7 @@ struct drm_i915_gem_object {
35053 * will be page flipped away on the next vblank. When it
35054 * reaches 0, dev_priv->pending_flip_queue will be woken up.
35055 */
35056- atomic_t pending_flip;
35057+ atomic_unchecked_t pending_flip;
35058 };
35059 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
35060
35061@@ -1633,7 +1633,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
35062 struct drm_i915_private *dev_priv, unsigned port);
35063 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
35064 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
35065-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35066+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35067 {
35068 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
35069 }
35070diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35071index 26d08bb..e24fb51 100644
35072--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35073+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35074@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
35075 i915_gem_clflush_object(obj);
35076
35077 if (obj->base.pending_write_domain)
35078- flips |= atomic_read(&obj->pending_flip);
35079+ flips |= atomic_read_unchecked(&obj->pending_flip);
35080
35081 flush_domains |= obj->base.write_domain;
35082 }
35083@@ -703,18 +703,23 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
35084
35085 static int
35086 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
35087- int count)
35088+ unsigned int count)
35089 {
35090- int i;
35091+ unsigned int i;
35092+ int relocs_total = 0;
35093+ int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
35094
35095 for (i = 0; i < count; i++) {
35096 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
35097 int length; /* limited by fault_in_pages_readable() */
35098
35099- /* First check for malicious input causing overflow */
35100- if (exec[i].relocation_count >
35101- INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
35102+ /* First check for malicious input causing overflow in
35103+ * the worst case where we need to allocate the entire
35104+ * relocation tree as a single array.
35105+ */
35106+ if (exec[i].relocation_count > relocs_max - relocs_total)
35107 return -EINVAL;
35108+ relocs_total += exec[i].relocation_count;
35109
35110 length = exec[i].relocation_count *
35111 sizeof(struct drm_i915_gem_relocation_entry);
35112@@ -1197,7 +1202,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
35113 return -ENOMEM;
35114 }
35115 ret = copy_from_user(exec2_list,
35116- (struct drm_i915_relocation_entry __user *)
35117+ (struct drm_i915_gem_exec_object2 __user *)
35118 (uintptr_t) args->buffers_ptr,
35119 sizeof(*exec2_list) * args->buffer_count);
35120 if (ret != 0) {
35121diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
35122index 3c59584..500f2e9 100644
35123--- a/drivers/gpu/drm/i915/i915_ioc32.c
35124+++ b/drivers/gpu/drm/i915/i915_ioc32.c
35125@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
35126 (unsigned long)request);
35127 }
35128
35129-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35130+static drm_ioctl_compat_t i915_compat_ioctls[] = {
35131 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
35132 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
35133 [DRM_I915_GETPARAM] = compat_i915_getparam,
35134@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35135 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35136 {
35137 unsigned int nr = DRM_IOCTL_NR(cmd);
35138- drm_ioctl_compat_t *fn = NULL;
35139 int ret;
35140
35141 if (nr < DRM_COMMAND_BASE)
35142 return drm_compat_ioctl(filp, cmd, arg);
35143
35144- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
35145- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35146-
35147- if (fn != NULL)
35148+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
35149+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35150 ret = (*fn) (filp, cmd, arg);
35151- else
35152+ } else
35153 ret = drm_ioctl(filp, cmd, arg);
35154
35155 return ret;
35156diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
35157index fe84338..a863190 100644
35158--- a/drivers/gpu/drm/i915/i915_irq.c
35159+++ b/drivers/gpu/drm/i915/i915_irq.c
35160@@ -535,7 +535,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
35161 u32 pipe_stats[I915_MAX_PIPES];
35162 bool blc_event;
35163
35164- atomic_inc(&dev_priv->irq_received);
35165+ atomic_inc_unchecked(&dev_priv->irq_received);
35166
35167 while (true) {
35168 iir = I915_READ(VLV_IIR);
35169@@ -688,7 +688,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
35170 irqreturn_t ret = IRQ_NONE;
35171 int i;
35172
35173- atomic_inc(&dev_priv->irq_received);
35174+ atomic_inc_unchecked(&dev_priv->irq_received);
35175
35176 /* disable master interrupt before clearing iir */
35177 de_ier = I915_READ(DEIER);
35178@@ -760,7 +760,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
35179 int ret = IRQ_NONE;
35180 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
35181
35182- atomic_inc(&dev_priv->irq_received);
35183+ atomic_inc_unchecked(&dev_priv->irq_received);
35184
35185 /* disable master interrupt before clearing iir */
35186 de_ier = I915_READ(DEIER);
35187@@ -1787,7 +1787,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
35188 {
35189 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35190
35191- atomic_set(&dev_priv->irq_received, 0);
35192+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35193
35194 I915_WRITE(HWSTAM, 0xeffe);
35195
35196@@ -1813,7 +1813,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
35197 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35198 int pipe;
35199
35200- atomic_set(&dev_priv->irq_received, 0);
35201+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35202
35203 /* VLV magic */
35204 I915_WRITE(VLV_IMR, 0);
35205@@ -2108,7 +2108,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
35206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35207 int pipe;
35208
35209- atomic_set(&dev_priv->irq_received, 0);
35210+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35211
35212 for_each_pipe(pipe)
35213 I915_WRITE(PIPESTAT(pipe), 0);
35214@@ -2159,7 +2159,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
35215 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
35216 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35217
35218- atomic_inc(&dev_priv->irq_received);
35219+ atomic_inc_unchecked(&dev_priv->irq_received);
35220
35221 iir = I915_READ16(IIR);
35222 if (iir == 0)
35223@@ -2244,7 +2244,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
35224 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35225 int pipe;
35226
35227- atomic_set(&dev_priv->irq_received, 0);
35228+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35229
35230 if (I915_HAS_HOTPLUG(dev)) {
35231 I915_WRITE(PORT_HOTPLUG_EN, 0);
35232@@ -2339,7 +2339,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
35233 };
35234 int pipe, ret = IRQ_NONE;
35235
35236- atomic_inc(&dev_priv->irq_received);
35237+ atomic_inc_unchecked(&dev_priv->irq_received);
35238
35239 iir = I915_READ(IIR);
35240 do {
35241@@ -2465,7 +2465,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
35242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35243 int pipe;
35244
35245- atomic_set(&dev_priv->irq_received, 0);
35246+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35247
35248 I915_WRITE(PORT_HOTPLUG_EN, 0);
35249 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
35250@@ -2572,7 +2572,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
35251 int irq_received;
35252 int ret = IRQ_NONE, pipe;
35253
35254- atomic_inc(&dev_priv->irq_received);
35255+ atomic_inc_unchecked(&dev_priv->irq_received);
35256
35257 iir = I915_READ(IIR);
35258
35259diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
35260index 80aa1fc..85cfce3 100644
35261--- a/drivers/gpu/drm/i915/intel_display.c
35262+++ b/drivers/gpu/drm/i915/intel_display.c
35263@@ -2255,7 +2255,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
35264
35265 wait_event(dev_priv->pending_flip_queue,
35266 atomic_read(&dev_priv->mm.wedged) ||
35267- atomic_read(&obj->pending_flip) == 0);
35268+ atomic_read_unchecked(&obj->pending_flip) == 0);
35269
35270 /* Big Hammer, we also need to ensure that any pending
35271 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
35272@@ -7122,8 +7122,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
35273
35274 obj = work->old_fb_obj;
35275
35276- atomic_clear_mask(1 << intel_crtc->plane,
35277- &obj->pending_flip.counter);
35278+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
35279 wake_up(&dev_priv->pending_flip_queue);
35280
35281 queue_work(dev_priv->wq, &work->work);
35282@@ -7490,7 +7489,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
35283 /* Block clients from rendering to the new back buffer until
35284 * the flip occurs and the object is no longer visible.
35285 */
35286- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35287+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35288 atomic_inc(&intel_crtc->unpin_work_count);
35289
35290 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
35291@@ -7507,7 +7506,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
35292
35293 cleanup_pending:
35294 atomic_dec(&intel_crtc->unpin_work_count);
35295- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35296+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35297 drm_gem_object_unreference(&work->old_fb_obj->base);
35298 drm_gem_object_unreference(&obj->base);
35299 mutex_unlock(&dev->struct_mutex);
35300@@ -8849,13 +8848,13 @@ struct intel_quirk {
35301 int subsystem_vendor;
35302 int subsystem_device;
35303 void (*hook)(struct drm_device *dev);
35304-};
35305+} __do_const;
35306
35307 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
35308 struct intel_dmi_quirk {
35309 void (*hook)(struct drm_device *dev);
35310 const struct dmi_system_id (*dmi_id_list)[];
35311-};
35312+} __do_const;
35313
35314 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35315 {
35316@@ -8863,18 +8862,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35317 return 1;
35318 }
35319
35320+static const struct dmi_system_id intel_dmi_quirks_table[] = {
35321+ {
35322+ .callback = intel_dmi_reverse_brightness,
35323+ .ident = "NCR Corporation",
35324+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35325+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
35326+ },
35327+ },
35328+ { } /* terminating entry */
35329+};
35330+
35331 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35332 {
35333- .dmi_id_list = &(const struct dmi_system_id[]) {
35334- {
35335- .callback = intel_dmi_reverse_brightness,
35336- .ident = "NCR Corporation",
35337- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35338- DMI_MATCH(DMI_PRODUCT_NAME, ""),
35339- },
35340- },
35341- { } /* terminating entry */
35342- },
35343+ .dmi_id_list = &intel_dmi_quirks_table,
35344 .hook = quirk_invert_brightness,
35345 },
35346 };
35347diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
35348index 54558a0..2d97005 100644
35349--- a/drivers/gpu/drm/mga/mga_drv.h
35350+++ b/drivers/gpu/drm/mga/mga_drv.h
35351@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
35352 u32 clear_cmd;
35353 u32 maccess;
35354
35355- atomic_t vbl_received; /**< Number of vblanks received. */
35356+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35357 wait_queue_head_t fence_queue;
35358- atomic_t last_fence_retired;
35359+ atomic_unchecked_t last_fence_retired;
35360 u32 next_fence_to_post;
35361
35362 unsigned int fb_cpp;
35363diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
35364index 709e90d..89a1c0d 100644
35365--- a/drivers/gpu/drm/mga/mga_ioc32.c
35366+++ b/drivers/gpu/drm/mga/mga_ioc32.c
35367@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
35368 return 0;
35369 }
35370
35371-drm_ioctl_compat_t *mga_compat_ioctls[] = {
35372+drm_ioctl_compat_t mga_compat_ioctls[] = {
35373 [DRM_MGA_INIT] = compat_mga_init,
35374 [DRM_MGA_GETPARAM] = compat_mga_getparam,
35375 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
35376@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
35377 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35378 {
35379 unsigned int nr = DRM_IOCTL_NR(cmd);
35380- drm_ioctl_compat_t *fn = NULL;
35381 int ret;
35382
35383 if (nr < DRM_COMMAND_BASE)
35384 return drm_compat_ioctl(filp, cmd, arg);
35385
35386- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
35387- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35388-
35389- if (fn != NULL)
35390+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
35391+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35392 ret = (*fn) (filp, cmd, arg);
35393- else
35394+ } else
35395 ret = drm_ioctl(filp, cmd, arg);
35396
35397 return ret;
35398diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35399index 598c281..60d590e 100644
35400--- a/drivers/gpu/drm/mga/mga_irq.c
35401+++ b/drivers/gpu/drm/mga/mga_irq.c
35402@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35403 if (crtc != 0)
35404 return 0;
35405
35406- return atomic_read(&dev_priv->vbl_received);
35407+ return atomic_read_unchecked(&dev_priv->vbl_received);
35408 }
35409
35410
35411@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35412 /* VBLANK interrupt */
35413 if (status & MGA_VLINEPEN) {
35414 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35415- atomic_inc(&dev_priv->vbl_received);
35416+ atomic_inc_unchecked(&dev_priv->vbl_received);
35417 drm_handle_vblank(dev, 0);
35418 handled = 1;
35419 }
35420@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35421 if ((prim_start & ~0x03) != (prim_end & ~0x03))
35422 MGA_WRITE(MGA_PRIMEND, prim_end);
35423
35424- atomic_inc(&dev_priv->last_fence_retired);
35425+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
35426 DRM_WAKEUP(&dev_priv->fence_queue);
35427 handled = 1;
35428 }
35429@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
35430 * using fences.
35431 */
35432 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35433- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35434+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35435 - *sequence) <= (1 << 23)));
35436
35437 *sequence = cur_fence;
35438diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
35439index 865eddf..62c4cc3 100644
35440--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
35441+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
35442@@ -1015,7 +1015,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
35443 struct bit_table {
35444 const char id;
35445 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
35446-};
35447+} __no_const;
35448
35449 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
35450
35451diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
35452index aa89eb9..d45d38b 100644
35453--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
35454+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
35455@@ -80,7 +80,7 @@ struct nouveau_drm {
35456 struct drm_global_reference mem_global_ref;
35457 struct ttm_bo_global_ref bo_global_ref;
35458 struct ttm_bo_device bdev;
35459- atomic_t validate_sequence;
35460+ atomic_unchecked_t validate_sequence;
35461 int (*move)(struct nouveau_channel *,
35462 struct ttm_buffer_object *,
35463 struct ttm_mem_reg *, struct ttm_mem_reg *);
35464diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
35465index cdb83ac..27f0a16 100644
35466--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
35467+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
35468@@ -43,7 +43,7 @@ struct nouveau_fence_priv {
35469 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
35470 struct nouveau_channel *);
35471 u32 (*read)(struct nouveau_channel *);
35472-};
35473+} __no_const;
35474
35475 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
35476
35477diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
35478index 8bf695c..9fbc90a 100644
35479--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
35480+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
35481@@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
35482 int trycnt = 0;
35483 int ret, i;
35484
35485- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35486+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35487 retry:
35488 if (++trycnt > 100000) {
35489 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
35490diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35491index 08214bc..9208577 100644
35492--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35493+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35494@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
35495 unsigned long arg)
35496 {
35497 unsigned int nr = DRM_IOCTL_NR(cmd);
35498- drm_ioctl_compat_t *fn = NULL;
35499+ drm_ioctl_compat_t fn = NULL;
35500 int ret;
35501
35502 if (nr < DRM_COMMAND_BASE)
35503diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
35504index 25d3495..d81aaf6 100644
35505--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
35506+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
35507@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
35508 bool can_switch;
35509
35510 spin_lock(&dev->count_lock);
35511- can_switch = (dev->open_count == 0);
35512+ can_switch = (local_read(&dev->open_count) == 0);
35513 spin_unlock(&dev->count_lock);
35514 return can_switch;
35515 }
35516diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
35517index d4660cf..70dbe65 100644
35518--- a/drivers/gpu/drm/r128/r128_cce.c
35519+++ b/drivers/gpu/drm/r128/r128_cce.c
35520@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
35521
35522 /* GH: Simple idle check.
35523 */
35524- atomic_set(&dev_priv->idle_count, 0);
35525+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35526
35527 /* We don't support anything other than bus-mastering ring mode,
35528 * but the ring can be in either AGP or PCI space for the ring
35529diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35530index 930c71b..499aded 100644
35531--- a/drivers/gpu/drm/r128/r128_drv.h
35532+++ b/drivers/gpu/drm/r128/r128_drv.h
35533@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35534 int is_pci;
35535 unsigned long cce_buffers_offset;
35536
35537- atomic_t idle_count;
35538+ atomic_unchecked_t idle_count;
35539
35540 int page_flipping;
35541 int current_page;
35542 u32 crtc_offset;
35543 u32 crtc_offset_cntl;
35544
35545- atomic_t vbl_received;
35546+ atomic_unchecked_t vbl_received;
35547
35548 u32 color_fmt;
35549 unsigned int front_offset;
35550diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
35551index a954c54..9cc595c 100644
35552--- a/drivers/gpu/drm/r128/r128_ioc32.c
35553+++ b/drivers/gpu/drm/r128/r128_ioc32.c
35554@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
35555 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
35556 }
35557
35558-drm_ioctl_compat_t *r128_compat_ioctls[] = {
35559+drm_ioctl_compat_t r128_compat_ioctls[] = {
35560 [DRM_R128_INIT] = compat_r128_init,
35561 [DRM_R128_DEPTH] = compat_r128_depth,
35562 [DRM_R128_STIPPLE] = compat_r128_stipple,
35563@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
35564 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35565 {
35566 unsigned int nr = DRM_IOCTL_NR(cmd);
35567- drm_ioctl_compat_t *fn = NULL;
35568 int ret;
35569
35570 if (nr < DRM_COMMAND_BASE)
35571 return drm_compat_ioctl(filp, cmd, arg);
35572
35573- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
35574- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35575-
35576- if (fn != NULL)
35577+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
35578+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35579 ret = (*fn) (filp, cmd, arg);
35580- else
35581+ } else
35582 ret = drm_ioctl(filp, cmd, arg);
35583
35584 return ret;
35585diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35586index 2ea4f09..d391371 100644
35587--- a/drivers/gpu/drm/r128/r128_irq.c
35588+++ b/drivers/gpu/drm/r128/r128_irq.c
35589@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
35590 if (crtc != 0)
35591 return 0;
35592
35593- return atomic_read(&dev_priv->vbl_received);
35594+ return atomic_read_unchecked(&dev_priv->vbl_received);
35595 }
35596
35597 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35598@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35599 /* VBLANK interrupt */
35600 if (status & R128_CRTC_VBLANK_INT) {
35601 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
35602- atomic_inc(&dev_priv->vbl_received);
35603+ atomic_inc_unchecked(&dev_priv->vbl_received);
35604 drm_handle_vblank(dev, 0);
35605 return IRQ_HANDLED;
35606 }
35607diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
35608index 19bb7e6..de7e2a2 100644
35609--- a/drivers/gpu/drm/r128/r128_state.c
35610+++ b/drivers/gpu/drm/r128/r128_state.c
35611@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
35612
35613 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
35614 {
35615- if (atomic_read(&dev_priv->idle_count) == 0)
35616+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
35617 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
35618 else
35619- atomic_set(&dev_priv->idle_count, 0);
35620+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35621 }
35622
35623 #endif
35624diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
35625index 5a82b6b..9e69c73 100644
35626--- a/drivers/gpu/drm/radeon/mkregtable.c
35627+++ b/drivers/gpu/drm/radeon/mkregtable.c
35628@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
35629 regex_t mask_rex;
35630 regmatch_t match[4];
35631 char buf[1024];
35632- size_t end;
35633+ long end;
35634 int len;
35635 int done = 0;
35636 int r;
35637 unsigned o;
35638 struct offset *offset;
35639 char last_reg_s[10];
35640- int last_reg;
35641+ unsigned long last_reg;
35642
35643 if (regcomp
35644 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
35645diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
35646index 0d6562b..a154330 100644
35647--- a/drivers/gpu/drm/radeon/radeon_device.c
35648+++ b/drivers/gpu/drm/radeon/radeon_device.c
35649@@ -969,7 +969,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
35650 bool can_switch;
35651
35652 spin_lock(&dev->count_lock);
35653- can_switch = (dev->open_count == 0);
35654+ can_switch = (local_read(&dev->open_count) == 0);
35655 spin_unlock(&dev->count_lock);
35656 return can_switch;
35657 }
35658diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
35659index e7fdf16..f4f6490 100644
35660--- a/drivers/gpu/drm/radeon/radeon_drv.h
35661+++ b/drivers/gpu/drm/radeon/radeon_drv.h
35662@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
35663
35664 /* SW interrupt */
35665 wait_queue_head_t swi_queue;
35666- atomic_t swi_emitted;
35667+ atomic_unchecked_t swi_emitted;
35668 int vblank_crtc;
35669 uint32_t irq_enable_reg;
35670 uint32_t r500_disp_irq_reg;
35671diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
35672index c180df8..5fd8186 100644
35673--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
35674+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
35675@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35676 request = compat_alloc_user_space(sizeof(*request));
35677 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
35678 || __put_user(req32.param, &request->param)
35679- || __put_user((void __user *)(unsigned long)req32.value,
35680+ || __put_user((unsigned long)req32.value,
35681 &request->value))
35682 return -EFAULT;
35683
35684@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35685 #define compat_radeon_cp_setparam NULL
35686 #endif /* X86_64 || IA64 */
35687
35688-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
35689+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
35690 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
35691 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
35692 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
35693@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
35694 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35695 {
35696 unsigned int nr = DRM_IOCTL_NR(cmd);
35697- drm_ioctl_compat_t *fn = NULL;
35698 int ret;
35699
35700 if (nr < DRM_COMMAND_BASE)
35701 return drm_compat_ioctl(filp, cmd, arg);
35702
35703- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
35704- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
35705-
35706- if (fn != NULL)
35707+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
35708+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
35709 ret = (*fn) (filp, cmd, arg);
35710- else
35711+ } else
35712 ret = drm_ioctl(filp, cmd, arg);
35713
35714 return ret;
35715diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
35716index e771033..a0bc6b3 100644
35717--- a/drivers/gpu/drm/radeon/radeon_irq.c
35718+++ b/drivers/gpu/drm/radeon/radeon_irq.c
35719@@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
35720 unsigned int ret;
35721 RING_LOCALS;
35722
35723- atomic_inc(&dev_priv->swi_emitted);
35724- ret = atomic_read(&dev_priv->swi_emitted);
35725+ atomic_inc_unchecked(&dev_priv->swi_emitted);
35726+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
35727
35728 BEGIN_RING(4);
35729 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
35730@@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
35731 drm_radeon_private_t *dev_priv =
35732 (drm_radeon_private_t *) dev->dev_private;
35733
35734- atomic_set(&dev_priv->swi_emitted, 0);
35735+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
35736 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
35737
35738 dev->max_vblank_count = 0x001fffff;
35739diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
35740index 8e9057b..af6dacb 100644
35741--- a/drivers/gpu/drm/radeon/radeon_state.c
35742+++ b/drivers/gpu/drm/radeon/radeon_state.c
35743@@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
35744 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
35745 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
35746
35747- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
35748+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
35749 sarea_priv->nbox * sizeof(depth_boxes[0])))
35750 return -EFAULT;
35751
35752@@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
35753 {
35754 drm_radeon_private_t *dev_priv = dev->dev_private;
35755 drm_radeon_getparam_t *param = data;
35756- int value;
35757+ int value = 0;
35758
35759 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
35760
35761diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
35762index 93f760e..8088227 100644
35763--- a/drivers/gpu/drm/radeon/radeon_ttm.c
35764+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
35765@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
35766 man->size = size >> PAGE_SHIFT;
35767 }
35768
35769-static struct vm_operations_struct radeon_ttm_vm_ops;
35770+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
35771 static const struct vm_operations_struct *ttm_vm_ops = NULL;
35772
35773 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35774@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35775 }
35776 if (unlikely(ttm_vm_ops == NULL)) {
35777 ttm_vm_ops = vma->vm_ops;
35778+ pax_open_kernel();
35779 radeon_ttm_vm_ops = *ttm_vm_ops;
35780 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
35781+ pax_close_kernel();
35782 }
35783 vma->vm_ops = &radeon_ttm_vm_ops;
35784 return 0;
35785@@ -862,28 +864,33 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
35786 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
35787 else
35788 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
35789- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35790- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
35791- radeon_mem_types_list[i].driver_features = 0;
35792+ pax_open_kernel();
35793+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35794+ *(void **)&radeon_mem_types_list[i].show = &radeon_mm_dump_table;
35795+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35796 if (i == 0)
35797- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
35798+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
35799 else
35800- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
35801-
35802+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
35803+ pax_close_kernel();
35804 }
35805 /* Add ttm page pool to debugfs */
35806 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
35807- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35808- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
35809- radeon_mem_types_list[i].driver_features = 0;
35810- radeon_mem_types_list[i++].data = NULL;
35811+ pax_open_kernel();
35812+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35813+ *(void **)&radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
35814+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35815+ *(void **)&radeon_mem_types_list[i++].data = NULL;
35816+ pax_close_kernel();
35817 #ifdef CONFIG_SWIOTLB
35818 if (swiotlb_nr_tbl()) {
35819 sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
35820- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35821- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
35822- radeon_mem_types_list[i].driver_features = 0;
35823- radeon_mem_types_list[i++].data = NULL;
35824+ pax_open_kernel();
35825+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35826+ *(void **)&radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
35827+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35828+ *(void **)&radeon_mem_types_list[i++].data = NULL;
35829+ pax_close_kernel();
35830 }
35831 #endif
35832 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
35833diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
35834index 5706d2a..17aedaa 100644
35835--- a/drivers/gpu/drm/radeon/rs690.c
35836+++ b/drivers/gpu/drm/radeon/rs690.c
35837@@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
35838 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
35839 rdev->pm.sideport_bandwidth.full)
35840 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
35841- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
35842+ read_delay_latency.full = dfixed_const(800 * 1000);
35843 read_delay_latency.full = dfixed_div(read_delay_latency,
35844 rdev->pm.igp_sideport_mclk);
35845+ a.full = dfixed_const(370);
35846+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
35847 } else {
35848 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
35849 rdev->pm.k8_bandwidth.full)
35850diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
35851index bd2a3b4..122d9ad 100644
35852--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
35853+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
35854@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
35855 static int ttm_pool_mm_shrink(struct shrinker *shrink,
35856 struct shrink_control *sc)
35857 {
35858- static atomic_t start_pool = ATOMIC_INIT(0);
35859+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
35860 unsigned i;
35861- unsigned pool_offset = atomic_add_return(1, &start_pool);
35862+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
35863 struct ttm_page_pool *pool;
35864 int shrink_pages = sc->nr_to_scan;
35865
35866diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
35867index 1eb060c..188b1fc 100644
35868--- a/drivers/gpu/drm/udl/udl_fb.c
35869+++ b/drivers/gpu/drm/udl/udl_fb.c
35870@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
35871 fb_deferred_io_cleanup(info);
35872 kfree(info->fbdefio);
35873 info->fbdefio = NULL;
35874- info->fbops->fb_mmap = udl_fb_mmap;
35875 }
35876
35877 pr_warn("released /dev/fb%d user=%d count=%d\n",
35878diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
35879index 893a650..6190d3b 100644
35880--- a/drivers/gpu/drm/via/via_drv.h
35881+++ b/drivers/gpu/drm/via/via_drv.h
35882@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
35883 typedef uint32_t maskarray_t[5];
35884
35885 typedef struct drm_via_irq {
35886- atomic_t irq_received;
35887+ atomic_unchecked_t irq_received;
35888 uint32_t pending_mask;
35889 uint32_t enable_mask;
35890 wait_queue_head_t irq_queue;
35891@@ -75,7 +75,7 @@ typedef struct drm_via_private {
35892 struct timeval last_vblank;
35893 int last_vblank_valid;
35894 unsigned usec_per_vblank;
35895- atomic_t vbl_received;
35896+ atomic_unchecked_t vbl_received;
35897 drm_via_state_t hc_state;
35898 char pci_buf[VIA_PCI_BUF_SIZE];
35899 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
35900diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
35901index ac98964..5dbf512 100644
35902--- a/drivers/gpu/drm/via/via_irq.c
35903+++ b/drivers/gpu/drm/via/via_irq.c
35904@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
35905 if (crtc != 0)
35906 return 0;
35907
35908- return atomic_read(&dev_priv->vbl_received);
35909+ return atomic_read_unchecked(&dev_priv->vbl_received);
35910 }
35911
35912 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35913@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35914
35915 status = VIA_READ(VIA_REG_INTERRUPT);
35916 if (status & VIA_IRQ_VBLANK_PENDING) {
35917- atomic_inc(&dev_priv->vbl_received);
35918- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
35919+ atomic_inc_unchecked(&dev_priv->vbl_received);
35920+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
35921 do_gettimeofday(&cur_vblank);
35922 if (dev_priv->last_vblank_valid) {
35923 dev_priv->usec_per_vblank =
35924@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35925 dev_priv->last_vblank = cur_vblank;
35926 dev_priv->last_vblank_valid = 1;
35927 }
35928- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
35929+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
35930 DRM_DEBUG("US per vblank is: %u\n",
35931 dev_priv->usec_per_vblank);
35932 }
35933@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35934
35935 for (i = 0; i < dev_priv->num_irqs; ++i) {
35936 if (status & cur_irq->pending_mask) {
35937- atomic_inc(&cur_irq->irq_received);
35938+ atomic_inc_unchecked(&cur_irq->irq_received);
35939 DRM_WAKEUP(&cur_irq->irq_queue);
35940 handled = 1;
35941 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
35942@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
35943 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35944 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
35945 masks[irq][4]));
35946- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
35947+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
35948 } else {
35949 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35950 (((cur_irq_sequence =
35951- atomic_read(&cur_irq->irq_received)) -
35952+ atomic_read_unchecked(&cur_irq->irq_received)) -
35953 *sequence) <= (1 << 23)));
35954 }
35955 *sequence = cur_irq_sequence;
35956@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
35957 }
35958
35959 for (i = 0; i < dev_priv->num_irqs; ++i) {
35960- atomic_set(&cur_irq->irq_received, 0);
35961+ atomic_set_unchecked(&cur_irq->irq_received, 0);
35962 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
35963 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
35964 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
35965@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
35966 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
35967 case VIA_IRQ_RELATIVE:
35968 irqwait->request.sequence +=
35969- atomic_read(&cur_irq->irq_received);
35970+ atomic_read_unchecked(&cur_irq->irq_received);
35971 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
35972 case VIA_IRQ_ABSOLUTE:
35973 break;
35974diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35975index 13aeda7..4a952d1 100644
35976--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35977+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35978@@ -290,7 +290,7 @@ struct vmw_private {
35979 * Fencing and IRQs.
35980 */
35981
35982- atomic_t marker_seq;
35983+ atomic_unchecked_t marker_seq;
35984 wait_queue_head_t fence_queue;
35985 wait_queue_head_t fifo_queue;
35986 int fence_queue_waiters; /* Protected by hw_mutex */
35987diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35988index 3eb1486..0a47ee9 100644
35989--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35990+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35991@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
35992 (unsigned int) min,
35993 (unsigned int) fifo->capabilities);
35994
35995- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
35996+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
35997 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
35998 vmw_marker_queue_init(&fifo->marker_queue);
35999 return vmw_fifo_send_fence(dev_priv, &dummy);
36000@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
36001 if (reserveable)
36002 iowrite32(bytes, fifo_mem +
36003 SVGA_FIFO_RESERVED);
36004- return fifo_mem + (next_cmd >> 2);
36005+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
36006 } else {
36007 need_bounce = true;
36008 }
36009@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36010
36011 fm = vmw_fifo_reserve(dev_priv, bytes);
36012 if (unlikely(fm == NULL)) {
36013- *seqno = atomic_read(&dev_priv->marker_seq);
36014+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36015 ret = -ENOMEM;
36016 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
36017 false, 3*HZ);
36018@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36019 }
36020
36021 do {
36022- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
36023+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
36024 } while (*seqno == 0);
36025
36026 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
36027diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36028index 4640adb..e1384ed 100644
36029--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36030+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36031@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
36032 * emitted. Then the fence is stale and signaled.
36033 */
36034
36035- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
36036+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
36037 > VMW_FENCE_WRAP);
36038
36039 return ret;
36040@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
36041
36042 if (fifo_idle)
36043 down_read(&fifo_state->rwsem);
36044- signal_seq = atomic_read(&dev_priv->marker_seq);
36045+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
36046 ret = 0;
36047
36048 for (;;) {
36049diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36050index 8a8725c..afed796 100644
36051--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36052+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36053@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
36054 while (!vmw_lag_lt(queue, us)) {
36055 spin_lock(&queue->lock);
36056 if (list_empty(&queue->head))
36057- seqno = atomic_read(&dev_priv->marker_seq);
36058+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36059 else {
36060 marker = list_first_entry(&queue->head,
36061 struct vmw_marker, head);
36062diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
36063index ceb3040..6160c5c 100644
36064--- a/drivers/hid/hid-core.c
36065+++ b/drivers/hid/hid-core.c
36066@@ -2242,7 +2242,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
36067
36068 int hid_add_device(struct hid_device *hdev)
36069 {
36070- static atomic_t id = ATOMIC_INIT(0);
36071+ static atomic_unchecked_t id = ATOMIC_INIT(0);
36072 int ret;
36073
36074 if (WARN_ON(hdev->status & HID_STAT_ADDED))
36075@@ -2276,7 +2276,7 @@ int hid_add_device(struct hid_device *hdev)
36076 /* XXX hack, any other cleaner solution after the driver core
36077 * is converted to allow more than 20 bytes as the device name? */
36078 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
36079- hdev->vendor, hdev->product, atomic_inc_return(&id));
36080+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
36081
36082 hid_debug_register(hdev, dev_name(&hdev->dev));
36083 ret = device_add(&hdev->dev);
36084diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
36085index eec3291..8ed706b 100644
36086--- a/drivers/hid/hid-wiimote-debug.c
36087+++ b/drivers/hid/hid-wiimote-debug.c
36088@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
36089 else if (size == 0)
36090 return -EIO;
36091
36092- if (copy_to_user(u, buf, size))
36093+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
36094 return -EFAULT;
36095
36096 *off += size;
36097diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
36098index 773a2f2..7ce08bc 100644
36099--- a/drivers/hv/channel.c
36100+++ b/drivers/hv/channel.c
36101@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
36102 int ret = 0;
36103 int t;
36104
36105- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36106- atomic_inc(&vmbus_connection.next_gpadl_handle);
36107+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36108+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
36109
36110 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36111 if (ret)
36112diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
36113index 3648f8f..30ef30d 100644
36114--- a/drivers/hv/hv.c
36115+++ b/drivers/hv/hv.c
36116@@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
36117 u64 output_address = (output) ? virt_to_phys(output) : 0;
36118 u32 output_address_hi = output_address >> 32;
36119 u32 output_address_lo = output_address & 0xFFFFFFFF;
36120- void *hypercall_page = hv_context.hypercall_page;
36121+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
36122
36123 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36124 "=a"(hv_status_lo) : "d" (control_hi),
36125diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
36126index d8d1fad..b91caf7 100644
36127--- a/drivers/hv/hyperv_vmbus.h
36128+++ b/drivers/hv/hyperv_vmbus.h
36129@@ -594,7 +594,7 @@ enum vmbus_connect_state {
36130 struct vmbus_connection {
36131 enum vmbus_connect_state conn_state;
36132
36133- atomic_t next_gpadl_handle;
36134+ atomic_unchecked_t next_gpadl_handle;
36135
36136 /*
36137 * Represents channel interrupts. Each bit position represents a
36138diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
36139index 8e1a9ec..4687821 100644
36140--- a/drivers/hv/vmbus_drv.c
36141+++ b/drivers/hv/vmbus_drv.c
36142@@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
36143 {
36144 int ret = 0;
36145
36146- static atomic_t device_num = ATOMIC_INIT(0);
36147+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
36148
36149 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
36150- atomic_inc_return(&device_num));
36151+ atomic_inc_return_unchecked(&device_num));
36152
36153 child_device_obj->device.bus = &hv_bus;
36154 child_device_obj->device.parent = &hv_acpi_dev->dev;
36155diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
36156index 1672e2a..4a6297c 100644
36157--- a/drivers/hwmon/acpi_power_meter.c
36158+++ b/drivers/hwmon/acpi_power_meter.c
36159@@ -117,7 +117,7 @@ struct sensor_template {
36160 struct device_attribute *devattr,
36161 const char *buf, size_t count);
36162 int index;
36163-};
36164+} __do_const;
36165
36166 /* Averaging interval */
36167 static int update_avg_interval(struct acpi_power_meter_resource *resource)
36168@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
36169 struct sensor_template *attrs)
36170 {
36171 struct device *dev = &resource->acpi_dev->dev;
36172- struct sensor_device_attribute *sensors =
36173+ sensor_device_attribute_no_const *sensors =
36174 &resource->sensors[resource->num_sensors];
36175 int res = 0;
36176
36177diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
36178index b41baff..4953e4d 100644
36179--- a/drivers/hwmon/applesmc.c
36180+++ b/drivers/hwmon/applesmc.c
36181@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
36182 {
36183 struct applesmc_node_group *grp;
36184 struct applesmc_dev_attr *node;
36185- struct attribute *attr;
36186+ attribute_no_const *attr;
36187 int ret, i;
36188
36189 for (grp = groups; grp->format; grp++) {
36190diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
36191index 56dbcfb..9874bf1 100644
36192--- a/drivers/hwmon/asus_atk0110.c
36193+++ b/drivers/hwmon/asus_atk0110.c
36194@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
36195 struct atk_sensor_data {
36196 struct list_head list;
36197 struct atk_data *data;
36198- struct device_attribute label_attr;
36199- struct device_attribute input_attr;
36200- struct device_attribute limit1_attr;
36201- struct device_attribute limit2_attr;
36202+ device_attribute_no_const label_attr;
36203+ device_attribute_no_const input_attr;
36204+ device_attribute_no_const limit1_attr;
36205+ device_attribute_no_const limit2_attr;
36206 char label_attr_name[ATTR_NAME_SIZE];
36207 char input_attr_name[ATTR_NAME_SIZE];
36208 char limit1_attr_name[ATTR_NAME_SIZE];
36209@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
36210 static struct device_attribute atk_name_attr =
36211 __ATTR(name, 0444, atk_name_show, NULL);
36212
36213-static void atk_init_attribute(struct device_attribute *attr, char *name,
36214+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
36215 sysfs_show_func show)
36216 {
36217 sysfs_attr_init(&attr->attr);
36218diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
36219index d64923d..72591e8 100644
36220--- a/drivers/hwmon/coretemp.c
36221+++ b/drivers/hwmon/coretemp.c
36222@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
36223 return NOTIFY_OK;
36224 }
36225
36226-static struct notifier_block coretemp_cpu_notifier __refdata = {
36227+static struct notifier_block coretemp_cpu_notifier = {
36228 .notifier_call = coretemp_cpu_callback,
36229 };
36230
36231diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
36232index a14f634..2916ee2 100644
36233--- a/drivers/hwmon/ibmaem.c
36234+++ b/drivers/hwmon/ibmaem.c
36235@@ -925,7 +925,7 @@ static int aem_register_sensors(struct aem_data *data,
36236 struct aem_rw_sensor_template *rw)
36237 {
36238 struct device *dev = &data->pdev->dev;
36239- struct sensor_device_attribute *sensors = data->sensors;
36240+ sensor_device_attribute_no_const *sensors = data->sensors;
36241 int err;
36242
36243 /* Set up read-only sensors */
36244diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
36245index 7d19b1b..8fdaaac 100644
36246--- a/drivers/hwmon/pmbus/pmbus_core.c
36247+++ b/drivers/hwmon/pmbus/pmbus_core.c
36248@@ -811,7 +811,7 @@ static ssize_t pmbus_show_label(struct device *dev,
36249
36250 #define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
36251 do { \
36252- struct sensor_device_attribute *a \
36253+ sensor_device_attribute_no_const *a \
36254 = &data->_type##s[data->num_##_type##s].attribute; \
36255 BUG_ON(data->num_attributes >= data->max_attributes); \
36256 sysfs_attr_init(&a->dev_attr.attr); \
36257diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
36258index 8047fed..1e956f0 100644
36259--- a/drivers/hwmon/sht15.c
36260+++ b/drivers/hwmon/sht15.c
36261@@ -169,7 +169,7 @@ struct sht15_data {
36262 int supply_uV;
36263 bool supply_uV_valid;
36264 struct work_struct update_supply_work;
36265- atomic_t interrupt_handled;
36266+ atomic_unchecked_t interrupt_handled;
36267 };
36268
36269 /**
36270@@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
36271 return ret;
36272
36273 gpio_direction_input(data->pdata->gpio_data);
36274- atomic_set(&data->interrupt_handled, 0);
36275+ atomic_set_unchecked(&data->interrupt_handled, 0);
36276
36277 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36278 if (gpio_get_value(data->pdata->gpio_data) == 0) {
36279 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
36280 /* Only relevant if the interrupt hasn't occurred. */
36281- if (!atomic_read(&data->interrupt_handled))
36282+ if (!atomic_read_unchecked(&data->interrupt_handled))
36283 schedule_work(&data->read_work);
36284 }
36285 ret = wait_event_timeout(data->wait_queue,
36286@@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
36287
36288 /* First disable the interrupt */
36289 disable_irq_nosync(irq);
36290- atomic_inc(&data->interrupt_handled);
36291+ atomic_inc_unchecked(&data->interrupt_handled);
36292 /* Then schedule a reading work struct */
36293 if (data->state != SHT15_READING_NOTHING)
36294 schedule_work(&data->read_work);
36295@@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
36296 * If not, then start the interrupt again - care here as could
36297 * have gone low in meantime so verify it hasn't!
36298 */
36299- atomic_set(&data->interrupt_handled, 0);
36300+ atomic_set_unchecked(&data->interrupt_handled, 0);
36301 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36302 /* If still not occurred or another handler was scheduled */
36303 if (gpio_get_value(data->pdata->gpio_data)
36304- || atomic_read(&data->interrupt_handled))
36305+ || atomic_read_unchecked(&data->interrupt_handled))
36306 return;
36307 }
36308
36309diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
36310index 76f157b..9c0db1b 100644
36311--- a/drivers/hwmon/via-cputemp.c
36312+++ b/drivers/hwmon/via-cputemp.c
36313@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
36314 return NOTIFY_OK;
36315 }
36316
36317-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
36318+static struct notifier_block via_cputemp_cpu_notifier = {
36319 .notifier_call = via_cputemp_cpu_callback,
36320 };
36321
36322diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
36323index 378fcb5..5e91fa8 100644
36324--- a/drivers/i2c/busses/i2c-amd756-s4882.c
36325+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
36326@@ -43,7 +43,7 @@
36327 extern struct i2c_adapter amd756_smbus;
36328
36329 static struct i2c_adapter *s4882_adapter;
36330-static struct i2c_algorithm *s4882_algo;
36331+static i2c_algorithm_no_const *s4882_algo;
36332
36333 /* Wrapper access functions for multiplexed SMBus */
36334 static DEFINE_MUTEX(amd756_lock);
36335diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
36336index 29015eb..af2d8e9 100644
36337--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
36338+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
36339@@ -41,7 +41,7 @@
36340 extern struct i2c_adapter *nforce2_smbus;
36341
36342 static struct i2c_adapter *s4985_adapter;
36343-static struct i2c_algorithm *s4985_algo;
36344+static i2c_algorithm_no_const *s4985_algo;
36345
36346 /* Wrapper access functions for multiplexed SMBus */
36347 static DEFINE_MUTEX(nforce2_lock);
36348diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
36349index 8126824..55a2798 100644
36350--- a/drivers/ide/ide-cd.c
36351+++ b/drivers/ide/ide-cd.c
36352@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
36353 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
36354 if ((unsigned long)buf & alignment
36355 || blk_rq_bytes(rq) & q->dma_pad_mask
36356- || object_is_on_stack(buf))
36357+ || object_starts_on_stack(buf))
36358 drive->dma = 0;
36359 }
36360 }
36361diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
36362index 8848f16..f8e6dd8 100644
36363--- a/drivers/iio/industrialio-core.c
36364+++ b/drivers/iio/industrialio-core.c
36365@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
36366 }
36367
36368 static
36369-int __iio_device_attr_init(struct device_attribute *dev_attr,
36370+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
36371 const char *postfix,
36372 struct iio_chan_spec const *chan,
36373 ssize_t (*readfunc)(struct device *dev,
36374diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36375index 394fea2..c833880 100644
36376--- a/drivers/infiniband/core/cm.c
36377+++ b/drivers/infiniband/core/cm.c
36378@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36379
36380 struct cm_counter_group {
36381 struct kobject obj;
36382- atomic_long_t counter[CM_ATTR_COUNT];
36383+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36384 };
36385
36386 struct cm_counter_attribute {
36387@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36388 struct ib_mad_send_buf *msg = NULL;
36389 int ret;
36390
36391- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36392+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36393 counter[CM_REQ_COUNTER]);
36394
36395 /* Quick state check to discard duplicate REQs. */
36396@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36397 if (!cm_id_priv)
36398 return;
36399
36400- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36401+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36402 counter[CM_REP_COUNTER]);
36403 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36404 if (ret)
36405@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
36406 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36407 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36408 spin_unlock_irq(&cm_id_priv->lock);
36409- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36410+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36411 counter[CM_RTU_COUNTER]);
36412 goto out;
36413 }
36414@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
36415 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36416 dreq_msg->local_comm_id);
36417 if (!cm_id_priv) {
36418- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36419+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36420 counter[CM_DREQ_COUNTER]);
36421 cm_issue_drep(work->port, work->mad_recv_wc);
36422 return -EINVAL;
36423@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
36424 case IB_CM_MRA_REP_RCVD:
36425 break;
36426 case IB_CM_TIMEWAIT:
36427- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36428+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36429 counter[CM_DREQ_COUNTER]);
36430 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36431 goto unlock;
36432@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
36433 cm_free_msg(msg);
36434 goto deref;
36435 case IB_CM_DREQ_RCVD:
36436- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36437+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36438 counter[CM_DREQ_COUNTER]);
36439 goto unlock;
36440 default:
36441@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
36442 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36443 cm_id_priv->msg, timeout)) {
36444 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36445- atomic_long_inc(&work->port->
36446+ atomic_long_inc_unchecked(&work->port->
36447 counter_group[CM_RECV_DUPLICATES].
36448 counter[CM_MRA_COUNTER]);
36449 goto out;
36450@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
36451 break;
36452 case IB_CM_MRA_REQ_RCVD:
36453 case IB_CM_MRA_REP_RCVD:
36454- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36455+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36456 counter[CM_MRA_COUNTER]);
36457 /* fall through */
36458 default:
36459@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
36460 case IB_CM_LAP_IDLE:
36461 break;
36462 case IB_CM_MRA_LAP_SENT:
36463- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36464+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36465 counter[CM_LAP_COUNTER]);
36466 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36467 goto unlock;
36468@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
36469 cm_free_msg(msg);
36470 goto deref;
36471 case IB_CM_LAP_RCVD:
36472- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36473+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36474 counter[CM_LAP_COUNTER]);
36475 goto unlock;
36476 default:
36477@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36478 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36479 if (cur_cm_id_priv) {
36480 spin_unlock_irq(&cm.lock);
36481- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36482+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36483 counter[CM_SIDR_REQ_COUNTER]);
36484 goto out; /* Duplicate message. */
36485 }
36486@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36487 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36488 msg->retries = 1;
36489
36490- atomic_long_add(1 + msg->retries,
36491+ atomic_long_add_unchecked(1 + msg->retries,
36492 &port->counter_group[CM_XMIT].counter[attr_index]);
36493 if (msg->retries)
36494- atomic_long_add(msg->retries,
36495+ atomic_long_add_unchecked(msg->retries,
36496 &port->counter_group[CM_XMIT_RETRIES].
36497 counter[attr_index]);
36498
36499@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36500 }
36501
36502 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36503- atomic_long_inc(&port->counter_group[CM_RECV].
36504+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36505 counter[attr_id - CM_ATTR_ID_OFFSET]);
36506
36507 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36508@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36509 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36510
36511 return sprintf(buf, "%ld\n",
36512- atomic_long_read(&group->counter[cm_attr->index]));
36513+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36514 }
36515
36516 static const struct sysfs_ops cm_counter_ops = {
36517diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36518index 176c8f9..2627b62 100644
36519--- a/drivers/infiniband/core/fmr_pool.c
36520+++ b/drivers/infiniband/core/fmr_pool.c
36521@@ -98,8 +98,8 @@ struct ib_fmr_pool {
36522
36523 struct task_struct *thread;
36524
36525- atomic_t req_ser;
36526- atomic_t flush_ser;
36527+ atomic_unchecked_t req_ser;
36528+ atomic_unchecked_t flush_ser;
36529
36530 wait_queue_head_t force_wait;
36531 };
36532@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36533 struct ib_fmr_pool *pool = pool_ptr;
36534
36535 do {
36536- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36537+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36538 ib_fmr_batch_release(pool);
36539
36540- atomic_inc(&pool->flush_ser);
36541+ atomic_inc_unchecked(&pool->flush_ser);
36542 wake_up_interruptible(&pool->force_wait);
36543
36544 if (pool->flush_function)
36545@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36546 }
36547
36548 set_current_state(TASK_INTERRUPTIBLE);
36549- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36550+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36551 !kthread_should_stop())
36552 schedule();
36553 __set_current_state(TASK_RUNNING);
36554@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36555 pool->dirty_watermark = params->dirty_watermark;
36556 pool->dirty_len = 0;
36557 spin_lock_init(&pool->pool_lock);
36558- atomic_set(&pool->req_ser, 0);
36559- atomic_set(&pool->flush_ser, 0);
36560+ atomic_set_unchecked(&pool->req_ser, 0);
36561+ atomic_set_unchecked(&pool->flush_ser, 0);
36562 init_waitqueue_head(&pool->force_wait);
36563
36564 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36565@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36566 }
36567 spin_unlock_irq(&pool->pool_lock);
36568
36569- serial = atomic_inc_return(&pool->req_ser);
36570+ serial = atomic_inc_return_unchecked(&pool->req_ser);
36571 wake_up_process(pool->thread);
36572
36573 if (wait_event_interruptible(pool->force_wait,
36574- atomic_read(&pool->flush_ser) - serial >= 0))
36575+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36576 return -EINTR;
36577
36578 return 0;
36579@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36580 } else {
36581 list_add_tail(&fmr->list, &pool->dirty_list);
36582 if (++pool->dirty_len >= pool->dirty_watermark) {
36583- atomic_inc(&pool->req_ser);
36584+ atomic_inc_unchecked(&pool->req_ser);
36585 wake_up_process(pool->thread);
36586 }
36587 }
36588diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
36589index afd8179..598063f 100644
36590--- a/drivers/infiniband/hw/cxgb4/mem.c
36591+++ b/drivers/infiniband/hw/cxgb4/mem.c
36592@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36593 int err;
36594 struct fw_ri_tpte tpt;
36595 u32 stag_idx;
36596- static atomic_t key;
36597+ static atomic_unchecked_t key;
36598
36599 if (c4iw_fatal_error(rdev))
36600 return -EIO;
36601@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36602 if (rdev->stats.stag.cur > rdev->stats.stag.max)
36603 rdev->stats.stag.max = rdev->stats.stag.cur;
36604 mutex_unlock(&rdev->stats.lock);
36605- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
36606+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
36607 }
36608 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
36609 __func__, stag_state, type, pdid, stag_idx);
36610diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
36611index 79b3dbc..96e5fcc 100644
36612--- a/drivers/infiniband/hw/ipath/ipath_rc.c
36613+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
36614@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36615 struct ib_atomic_eth *ateth;
36616 struct ipath_ack_entry *e;
36617 u64 vaddr;
36618- atomic64_t *maddr;
36619+ atomic64_unchecked_t *maddr;
36620 u64 sdata;
36621 u32 rkey;
36622 u8 next;
36623@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36624 IB_ACCESS_REMOTE_ATOMIC)))
36625 goto nack_acc_unlck;
36626 /* Perform atomic OP and save result. */
36627- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
36628+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
36629 sdata = be64_to_cpu(ateth->swap_data);
36630 e = &qp->s_ack_queue[qp->r_head_ack_queue];
36631 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
36632- (u64) atomic64_add_return(sdata, maddr) - sdata :
36633+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
36634 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
36635 be64_to_cpu(ateth->compare_data),
36636 sdata);
36637diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
36638index 1f95bba..9530f87 100644
36639--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
36640+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
36641@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
36642 unsigned long flags;
36643 struct ib_wc wc;
36644 u64 sdata;
36645- atomic64_t *maddr;
36646+ atomic64_unchecked_t *maddr;
36647 enum ib_wc_status send_status;
36648
36649 /*
36650@@ -382,11 +382,11 @@ again:
36651 IB_ACCESS_REMOTE_ATOMIC)))
36652 goto acc_err;
36653 /* Perform atomic OP and save result. */
36654- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
36655+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
36656 sdata = wqe->wr.wr.atomic.compare_add;
36657 *(u64 *) sqp->s_sge.sge.vaddr =
36658 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
36659- (u64) atomic64_add_return(sdata, maddr) - sdata :
36660+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
36661 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
36662 sdata, wqe->wr.wr.atomic.swap);
36663 goto send_comp;
36664diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
36665index 9d3e5c1..d9afe4a 100644
36666--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
36667+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
36668@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
36669 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
36670 }
36671
36672-int mthca_QUERY_FW(struct mthca_dev *dev)
36673+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
36674 {
36675 struct mthca_mailbox *mailbox;
36676 u32 *outbox;
36677diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
36678index ed9a989..e0c5871 100644
36679--- a/drivers/infiniband/hw/mthca/mthca_mr.c
36680+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
36681@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
36682 return key;
36683 }
36684
36685-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
36686+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
36687 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
36688 {
36689 struct mthca_mailbox *mailbox;
36690diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
36691index 5b152a3..c1f3e83 100644
36692--- a/drivers/infiniband/hw/nes/nes.c
36693+++ b/drivers/infiniband/hw/nes/nes.c
36694@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
36695 LIST_HEAD(nes_adapter_list);
36696 static LIST_HEAD(nes_dev_list);
36697
36698-atomic_t qps_destroyed;
36699+atomic_unchecked_t qps_destroyed;
36700
36701 static unsigned int ee_flsh_adapter;
36702 static unsigned int sysfs_nonidx_addr;
36703@@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
36704 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
36705 struct nes_adapter *nesadapter = nesdev->nesadapter;
36706
36707- atomic_inc(&qps_destroyed);
36708+ atomic_inc_unchecked(&qps_destroyed);
36709
36710 /* Free the control structures */
36711
36712diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
36713index 33cc589..3bd6538 100644
36714--- a/drivers/infiniband/hw/nes/nes.h
36715+++ b/drivers/infiniband/hw/nes/nes.h
36716@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
36717 extern unsigned int wqm_quanta;
36718 extern struct list_head nes_adapter_list;
36719
36720-extern atomic_t cm_connects;
36721-extern atomic_t cm_accepts;
36722-extern atomic_t cm_disconnects;
36723-extern atomic_t cm_closes;
36724-extern atomic_t cm_connecteds;
36725-extern atomic_t cm_connect_reqs;
36726-extern atomic_t cm_rejects;
36727-extern atomic_t mod_qp_timouts;
36728-extern atomic_t qps_created;
36729-extern atomic_t qps_destroyed;
36730-extern atomic_t sw_qps_destroyed;
36731+extern atomic_unchecked_t cm_connects;
36732+extern atomic_unchecked_t cm_accepts;
36733+extern atomic_unchecked_t cm_disconnects;
36734+extern atomic_unchecked_t cm_closes;
36735+extern atomic_unchecked_t cm_connecteds;
36736+extern atomic_unchecked_t cm_connect_reqs;
36737+extern atomic_unchecked_t cm_rejects;
36738+extern atomic_unchecked_t mod_qp_timouts;
36739+extern atomic_unchecked_t qps_created;
36740+extern atomic_unchecked_t qps_destroyed;
36741+extern atomic_unchecked_t sw_qps_destroyed;
36742 extern u32 mh_detected;
36743 extern u32 mh_pauses_sent;
36744 extern u32 cm_packets_sent;
36745@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
36746 extern u32 cm_packets_received;
36747 extern u32 cm_packets_dropped;
36748 extern u32 cm_packets_retrans;
36749-extern atomic_t cm_listens_created;
36750-extern atomic_t cm_listens_destroyed;
36751+extern atomic_unchecked_t cm_listens_created;
36752+extern atomic_unchecked_t cm_listens_destroyed;
36753 extern u32 cm_backlog_drops;
36754-extern atomic_t cm_loopbacks;
36755-extern atomic_t cm_nodes_created;
36756-extern atomic_t cm_nodes_destroyed;
36757-extern atomic_t cm_accel_dropped_pkts;
36758-extern atomic_t cm_resets_recvd;
36759-extern atomic_t pau_qps_created;
36760-extern atomic_t pau_qps_destroyed;
36761+extern atomic_unchecked_t cm_loopbacks;
36762+extern atomic_unchecked_t cm_nodes_created;
36763+extern atomic_unchecked_t cm_nodes_destroyed;
36764+extern atomic_unchecked_t cm_accel_dropped_pkts;
36765+extern atomic_unchecked_t cm_resets_recvd;
36766+extern atomic_unchecked_t pau_qps_created;
36767+extern atomic_unchecked_t pau_qps_destroyed;
36768
36769 extern u32 int_mod_timer_init;
36770 extern u32 int_mod_cq_depth_256;
36771diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
36772index 22ea67e..dcbe3bc 100644
36773--- a/drivers/infiniband/hw/nes/nes_cm.c
36774+++ b/drivers/infiniband/hw/nes/nes_cm.c
36775@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
36776 u32 cm_packets_retrans;
36777 u32 cm_packets_created;
36778 u32 cm_packets_received;
36779-atomic_t cm_listens_created;
36780-atomic_t cm_listens_destroyed;
36781+atomic_unchecked_t cm_listens_created;
36782+atomic_unchecked_t cm_listens_destroyed;
36783 u32 cm_backlog_drops;
36784-atomic_t cm_loopbacks;
36785-atomic_t cm_nodes_created;
36786-atomic_t cm_nodes_destroyed;
36787-atomic_t cm_accel_dropped_pkts;
36788-atomic_t cm_resets_recvd;
36789+atomic_unchecked_t cm_loopbacks;
36790+atomic_unchecked_t cm_nodes_created;
36791+atomic_unchecked_t cm_nodes_destroyed;
36792+atomic_unchecked_t cm_accel_dropped_pkts;
36793+atomic_unchecked_t cm_resets_recvd;
36794
36795 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
36796 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
36797@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
36798
36799 static struct nes_cm_core *g_cm_core;
36800
36801-atomic_t cm_connects;
36802-atomic_t cm_accepts;
36803-atomic_t cm_disconnects;
36804-atomic_t cm_closes;
36805-atomic_t cm_connecteds;
36806-atomic_t cm_connect_reqs;
36807-atomic_t cm_rejects;
36808+atomic_unchecked_t cm_connects;
36809+atomic_unchecked_t cm_accepts;
36810+atomic_unchecked_t cm_disconnects;
36811+atomic_unchecked_t cm_closes;
36812+atomic_unchecked_t cm_connecteds;
36813+atomic_unchecked_t cm_connect_reqs;
36814+atomic_unchecked_t cm_rejects;
36815
36816 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
36817 {
36818@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
36819 kfree(listener);
36820 listener = NULL;
36821 ret = 0;
36822- atomic_inc(&cm_listens_destroyed);
36823+ atomic_inc_unchecked(&cm_listens_destroyed);
36824 } else {
36825 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
36826 }
36827@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
36828 cm_node->rem_mac);
36829
36830 add_hte_node(cm_core, cm_node);
36831- atomic_inc(&cm_nodes_created);
36832+ atomic_inc_unchecked(&cm_nodes_created);
36833
36834 return cm_node;
36835 }
36836@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
36837 }
36838
36839 atomic_dec(&cm_core->node_cnt);
36840- atomic_inc(&cm_nodes_destroyed);
36841+ atomic_inc_unchecked(&cm_nodes_destroyed);
36842 nesqp = cm_node->nesqp;
36843 if (nesqp) {
36844 nesqp->cm_node = NULL;
36845@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
36846
36847 static void drop_packet(struct sk_buff *skb)
36848 {
36849- atomic_inc(&cm_accel_dropped_pkts);
36850+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36851 dev_kfree_skb_any(skb);
36852 }
36853
36854@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
36855 {
36856
36857 int reset = 0; /* whether to send reset in case of err.. */
36858- atomic_inc(&cm_resets_recvd);
36859+ atomic_inc_unchecked(&cm_resets_recvd);
36860 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
36861 " refcnt=%d\n", cm_node, cm_node->state,
36862 atomic_read(&cm_node->ref_count));
36863@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
36864 rem_ref_cm_node(cm_node->cm_core, cm_node);
36865 return NULL;
36866 }
36867- atomic_inc(&cm_loopbacks);
36868+ atomic_inc_unchecked(&cm_loopbacks);
36869 loopbackremotenode->loopbackpartner = cm_node;
36870 loopbackremotenode->tcp_cntxt.rcv_wscale =
36871 NES_CM_DEFAULT_RCV_WND_SCALE;
36872@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
36873 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
36874 else {
36875 rem_ref_cm_node(cm_core, cm_node);
36876- atomic_inc(&cm_accel_dropped_pkts);
36877+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36878 dev_kfree_skb_any(skb);
36879 }
36880 break;
36881@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36882
36883 if ((cm_id) && (cm_id->event_handler)) {
36884 if (issue_disconn) {
36885- atomic_inc(&cm_disconnects);
36886+ atomic_inc_unchecked(&cm_disconnects);
36887 cm_event.event = IW_CM_EVENT_DISCONNECT;
36888 cm_event.status = disconn_status;
36889 cm_event.local_addr = cm_id->local_addr;
36890@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36891 }
36892
36893 if (issue_close) {
36894- atomic_inc(&cm_closes);
36895+ atomic_inc_unchecked(&cm_closes);
36896 nes_disconnect(nesqp, 1);
36897
36898 cm_id->provider_data = nesqp;
36899@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36900
36901 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
36902 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
36903- atomic_inc(&cm_accepts);
36904+ atomic_inc_unchecked(&cm_accepts);
36905
36906 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
36907 netdev_refcnt_read(nesvnic->netdev));
36908@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
36909 struct nes_cm_core *cm_core;
36910 u8 *start_buff;
36911
36912- atomic_inc(&cm_rejects);
36913+ atomic_inc_unchecked(&cm_rejects);
36914 cm_node = (struct nes_cm_node *)cm_id->provider_data;
36915 loopback = cm_node->loopbackpartner;
36916 cm_core = cm_node->cm_core;
36917@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36918 ntohl(cm_id->local_addr.sin_addr.s_addr),
36919 ntohs(cm_id->local_addr.sin_port));
36920
36921- atomic_inc(&cm_connects);
36922+ atomic_inc_unchecked(&cm_connects);
36923 nesqp->active_conn = 1;
36924
36925 /* cache the cm_id in the qp */
36926@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
36927 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
36928 return err;
36929 }
36930- atomic_inc(&cm_listens_created);
36931+ atomic_inc_unchecked(&cm_listens_created);
36932 }
36933
36934 cm_id->add_ref(cm_id);
36935@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
36936
36937 if (nesqp->destroyed)
36938 return;
36939- atomic_inc(&cm_connecteds);
36940+ atomic_inc_unchecked(&cm_connecteds);
36941 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
36942 " local port 0x%04X. jiffies = %lu.\n",
36943 nesqp->hwqp.qp_id,
36944@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
36945
36946 cm_id->add_ref(cm_id);
36947 ret = cm_id->event_handler(cm_id, &cm_event);
36948- atomic_inc(&cm_closes);
36949+ atomic_inc_unchecked(&cm_closes);
36950 cm_event.event = IW_CM_EVENT_CLOSE;
36951 cm_event.status = 0;
36952 cm_event.provider_data = cm_id->provider_data;
36953@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
36954 return;
36955 cm_id = cm_node->cm_id;
36956
36957- atomic_inc(&cm_connect_reqs);
36958+ atomic_inc_unchecked(&cm_connect_reqs);
36959 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36960 cm_node, cm_id, jiffies);
36961
36962@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
36963 return;
36964 cm_id = cm_node->cm_id;
36965
36966- atomic_inc(&cm_connect_reqs);
36967+ atomic_inc_unchecked(&cm_connect_reqs);
36968 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36969 cm_node, cm_id, jiffies);
36970
36971diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
36972index 4166452..fc952c3 100644
36973--- a/drivers/infiniband/hw/nes/nes_mgt.c
36974+++ b/drivers/infiniband/hw/nes/nes_mgt.c
36975@@ -40,8 +40,8 @@
36976 #include "nes.h"
36977 #include "nes_mgt.h"
36978
36979-atomic_t pau_qps_created;
36980-atomic_t pau_qps_destroyed;
36981+atomic_unchecked_t pau_qps_created;
36982+atomic_unchecked_t pau_qps_destroyed;
36983
36984 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
36985 {
36986@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
36987 {
36988 struct sk_buff *skb;
36989 unsigned long flags;
36990- atomic_inc(&pau_qps_destroyed);
36991+ atomic_inc_unchecked(&pau_qps_destroyed);
36992
36993 /* Free packets that have not yet been forwarded */
36994 /* Lock is acquired by skb_dequeue when removing the skb */
36995@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
36996 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
36997 skb_queue_head_init(&nesqp->pau_list);
36998 spin_lock_init(&nesqp->pau_lock);
36999- atomic_inc(&pau_qps_created);
37000+ atomic_inc_unchecked(&pau_qps_created);
37001 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
37002 }
37003
37004diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
37005index 9542e16..a008c40 100644
37006--- a/drivers/infiniband/hw/nes/nes_nic.c
37007+++ b/drivers/infiniband/hw/nes/nes_nic.c
37008@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37009 target_stat_values[++index] = mh_detected;
37010 target_stat_values[++index] = mh_pauses_sent;
37011 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
37012- target_stat_values[++index] = atomic_read(&cm_connects);
37013- target_stat_values[++index] = atomic_read(&cm_accepts);
37014- target_stat_values[++index] = atomic_read(&cm_disconnects);
37015- target_stat_values[++index] = atomic_read(&cm_connecteds);
37016- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
37017- target_stat_values[++index] = atomic_read(&cm_rejects);
37018- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
37019- target_stat_values[++index] = atomic_read(&qps_created);
37020- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37021- target_stat_values[++index] = atomic_read(&qps_destroyed);
37022- target_stat_values[++index] = atomic_read(&cm_closes);
37023+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37024+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37025+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37026+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37027+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37028+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37029+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37030+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37031+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37032+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37033+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37034 target_stat_values[++index] = cm_packets_sent;
37035 target_stat_values[++index] = cm_packets_bounced;
37036 target_stat_values[++index] = cm_packets_created;
37037 target_stat_values[++index] = cm_packets_received;
37038 target_stat_values[++index] = cm_packets_dropped;
37039 target_stat_values[++index] = cm_packets_retrans;
37040- target_stat_values[++index] = atomic_read(&cm_listens_created);
37041- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
37042+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
37043+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
37044 target_stat_values[++index] = cm_backlog_drops;
37045- target_stat_values[++index] = atomic_read(&cm_loopbacks);
37046- target_stat_values[++index] = atomic_read(&cm_nodes_created);
37047- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
37048- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
37049- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
37050+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
37051+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
37052+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
37053+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
37054+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
37055 target_stat_values[++index] = nesadapter->free_4kpbl;
37056 target_stat_values[++index] = nesadapter->free_256pbl;
37057 target_stat_values[++index] = int_mod_timer_init;
37058 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
37059 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
37060 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
37061- target_stat_values[++index] = atomic_read(&pau_qps_created);
37062- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
37063+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
37064+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
37065 }
37066
37067 /**
37068diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
37069index 07e4fba..685f041 100644
37070--- a/drivers/infiniband/hw/nes/nes_verbs.c
37071+++ b/drivers/infiniband/hw/nes/nes_verbs.c
37072@@ -46,9 +46,9 @@
37073
37074 #include <rdma/ib_umem.h>
37075
37076-atomic_t mod_qp_timouts;
37077-atomic_t qps_created;
37078-atomic_t sw_qps_destroyed;
37079+atomic_unchecked_t mod_qp_timouts;
37080+atomic_unchecked_t qps_created;
37081+atomic_unchecked_t sw_qps_destroyed;
37082
37083 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
37084
37085@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
37086 if (init_attr->create_flags)
37087 return ERR_PTR(-EINVAL);
37088
37089- atomic_inc(&qps_created);
37090+ atomic_inc_unchecked(&qps_created);
37091 switch (init_attr->qp_type) {
37092 case IB_QPT_RC:
37093 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
37094@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37095 struct iw_cm_event cm_event;
37096 int ret = 0;
37097
37098- atomic_inc(&sw_qps_destroyed);
37099+ atomic_inc_unchecked(&sw_qps_destroyed);
37100 nesqp->destroyed = 1;
37101
37102 /* Blow away the connection if it exists. */
37103diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
37104index 4d11575..3e890e5 100644
37105--- a/drivers/infiniband/hw/qib/qib.h
37106+++ b/drivers/infiniband/hw/qib/qib.h
37107@@ -51,6 +51,7 @@
37108 #include <linux/completion.h>
37109 #include <linux/kref.h>
37110 #include <linux/sched.h>
37111+#include <linux/slab.h>
37112
37113 #include "qib_common.h"
37114 #include "qib_verbs.h"
37115diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37116index da739d9..da1c7f4 100644
37117--- a/drivers/input/gameport/gameport.c
37118+++ b/drivers/input/gameport/gameport.c
37119@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
37120 */
37121 static void gameport_init_port(struct gameport *gameport)
37122 {
37123- static atomic_t gameport_no = ATOMIC_INIT(0);
37124+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37125
37126 __module_get(THIS_MODULE);
37127
37128 mutex_init(&gameport->drv_mutex);
37129 device_initialize(&gameport->dev);
37130 dev_set_name(&gameport->dev, "gameport%lu",
37131- (unsigned long)atomic_inc_return(&gameport_no) - 1);
37132+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37133 gameport->dev.bus = &gameport_bus;
37134 gameport->dev.release = gameport_release_port;
37135 if (gameport->parent)
37136diff --git a/drivers/input/input.c b/drivers/input/input.c
37137index c044699..174d71a 100644
37138--- a/drivers/input/input.c
37139+++ b/drivers/input/input.c
37140@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
37141 */
37142 int input_register_device(struct input_dev *dev)
37143 {
37144- static atomic_t input_no = ATOMIC_INIT(0);
37145+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37146 struct input_devres *devres = NULL;
37147 struct input_handler *handler;
37148 unsigned int packet_size;
37149@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
37150 dev->setkeycode = input_default_setkeycode;
37151
37152 dev_set_name(&dev->dev, "input%ld",
37153- (unsigned long) atomic_inc_return(&input_no) - 1);
37154+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37155
37156 error = device_add(&dev->dev);
37157 if (error)
37158diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37159index 04c69af..5f92d00 100644
37160--- a/drivers/input/joystick/sidewinder.c
37161+++ b/drivers/input/joystick/sidewinder.c
37162@@ -30,6 +30,7 @@
37163 #include <linux/kernel.h>
37164 #include <linux/module.h>
37165 #include <linux/slab.h>
37166+#include <linux/sched.h>
37167 #include <linux/init.h>
37168 #include <linux/input.h>
37169 #include <linux/gameport.h>
37170diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37171index d6cbfe9..6225402 100644
37172--- a/drivers/input/joystick/xpad.c
37173+++ b/drivers/input/joystick/xpad.c
37174@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37175
37176 static int xpad_led_probe(struct usb_xpad *xpad)
37177 {
37178- static atomic_t led_seq = ATOMIC_INIT(0);
37179+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37180 long led_no;
37181 struct xpad_led *led;
37182 struct led_classdev *led_cdev;
37183@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37184 if (!led)
37185 return -ENOMEM;
37186
37187- led_no = (long)atomic_inc_return(&led_seq) - 1;
37188+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37189
37190 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37191 led->xpad = xpad;
37192diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
37193index fe1df23..5b710f3 100644
37194--- a/drivers/input/mouse/psmouse.h
37195+++ b/drivers/input/mouse/psmouse.h
37196@@ -115,7 +115,7 @@ struct psmouse_attribute {
37197 ssize_t (*set)(struct psmouse *psmouse, void *data,
37198 const char *buf, size_t count);
37199 bool protect;
37200-};
37201+} __do_const;
37202 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
37203
37204 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
37205diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
37206index 4c842c3..590b0bf 100644
37207--- a/drivers/input/mousedev.c
37208+++ b/drivers/input/mousedev.c
37209@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
37210
37211 spin_unlock_irq(&client->packet_lock);
37212
37213- if (copy_to_user(buffer, data, count))
37214+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
37215 return -EFAULT;
37216
37217 return count;
37218diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37219index 25fc597..558bf3b 100644
37220--- a/drivers/input/serio/serio.c
37221+++ b/drivers/input/serio/serio.c
37222@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
37223 */
37224 static void serio_init_port(struct serio *serio)
37225 {
37226- static atomic_t serio_no = ATOMIC_INIT(0);
37227+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37228
37229 __module_get(THIS_MODULE);
37230
37231@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
37232 mutex_init(&serio->drv_mutex);
37233 device_initialize(&serio->dev);
37234 dev_set_name(&serio->dev, "serio%ld",
37235- (long)atomic_inc_return(&serio_no) - 1);
37236+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
37237 serio->dev.bus = &serio_bus;
37238 serio->dev.release = serio_release_port;
37239 serio->dev.groups = serio_device_attr_groups;
37240diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
37241index ddbdaca..be18a78 100644
37242--- a/drivers/iommu/iommu.c
37243+++ b/drivers/iommu/iommu.c
37244@@ -554,7 +554,7 @@ static struct notifier_block iommu_bus_nb = {
37245 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
37246 {
37247 bus_register_notifier(bus, &iommu_bus_nb);
37248- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
37249+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
37250 }
37251
37252 /**
37253diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
37254index 89562a8..218999b 100644
37255--- a/drivers/isdn/capi/capi.c
37256+++ b/drivers/isdn/capi/capi.c
37257@@ -81,8 +81,8 @@ struct capiminor {
37258
37259 struct capi20_appl *ap;
37260 u32 ncci;
37261- atomic_t datahandle;
37262- atomic_t msgid;
37263+ atomic_unchecked_t datahandle;
37264+ atomic_unchecked_t msgid;
37265
37266 struct tty_port port;
37267 int ttyinstop;
37268@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
37269 capimsg_setu16(s, 2, mp->ap->applid);
37270 capimsg_setu8 (s, 4, CAPI_DATA_B3);
37271 capimsg_setu8 (s, 5, CAPI_RESP);
37272- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
37273+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
37274 capimsg_setu32(s, 8, mp->ncci);
37275 capimsg_setu16(s, 12, datahandle);
37276 }
37277@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
37278 mp->outbytes -= len;
37279 spin_unlock_bh(&mp->outlock);
37280
37281- datahandle = atomic_inc_return(&mp->datahandle);
37282+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
37283 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
37284 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37285 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37286 capimsg_setu16(skb->data, 2, mp->ap->applid);
37287 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
37288 capimsg_setu8 (skb->data, 5, CAPI_REQ);
37289- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
37290+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
37291 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
37292 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
37293 capimsg_setu16(skb->data, 16, len); /* Data length */
37294diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
37295index 67abf3f..076b3a6 100644
37296--- a/drivers/isdn/gigaset/interface.c
37297+++ b/drivers/isdn/gigaset/interface.c
37298@@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
37299 }
37300 tty->driver_data = cs;
37301
37302- ++cs->port.count;
37303+ atomic_inc(&cs->port.count);
37304
37305- if (cs->port.count == 1) {
37306+ if (atomic_read(&cs->port.count) == 1) {
37307 tty_port_tty_set(&cs->port, tty);
37308 tty->low_latency = 1;
37309 }
37310@@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
37311
37312 if (!cs->connected)
37313 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37314- else if (!cs->port.count)
37315+ else if (!atomic_read(&cs->port.count))
37316 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37317- else if (!--cs->port.count)
37318+ else if (!atomic_dec_return(&cs->port.count))
37319 tty_port_tty_set(&cs->port, NULL);
37320
37321 mutex_unlock(&cs->mutex);
37322diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
37323index 821f7ac..28d4030 100644
37324--- a/drivers/isdn/hardware/avm/b1.c
37325+++ b/drivers/isdn/hardware/avm/b1.c
37326@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
37327 }
37328 if (left) {
37329 if (t4file->user) {
37330- if (copy_from_user(buf, dp, left))
37331+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37332 return -EFAULT;
37333 } else {
37334 memcpy(buf, dp, left);
37335@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
37336 }
37337 if (left) {
37338 if (config->user) {
37339- if (copy_from_user(buf, dp, left))
37340+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37341 return -EFAULT;
37342 } else {
37343 memcpy(buf, dp, left);
37344diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
37345index e09dc8a..15e2efb 100644
37346--- a/drivers/isdn/i4l/isdn_tty.c
37347+++ b/drivers/isdn/i4l/isdn_tty.c
37348@@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
37349
37350 #ifdef ISDN_DEBUG_MODEM_OPEN
37351 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
37352- port->count);
37353+ atomic_read(&port->count));
37354 #endif
37355- port->count++;
37356+ atomic_inc(&port->count);
37357 port->tty = tty;
37358 /*
37359 * Start up serial port
37360@@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37361 #endif
37362 return;
37363 }
37364- if ((tty->count == 1) && (port->count != 1)) {
37365+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
37366 /*
37367 * Uh, oh. tty->count is 1, which means that the tty
37368 * structure will be freed. Info->count should always
37369@@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37370 * serial port won't be shutdown.
37371 */
37372 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
37373- "info->count is %d\n", port->count);
37374- port->count = 1;
37375+ "info->count is %d\n", atomic_read(&port->count));
37376+ atomic_set(&port->count, 1);
37377 }
37378- if (--port->count < 0) {
37379+ if (atomic_dec_return(&port->count) < 0) {
37380 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
37381- info->line, port->count);
37382- port->count = 0;
37383+ info->line, atomic_read(&port->count));
37384+ atomic_set(&port->count, 0);
37385 }
37386- if (port->count) {
37387+ if (atomic_read(&port->count)) {
37388 #ifdef ISDN_DEBUG_MODEM_OPEN
37389 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
37390 #endif
37391@@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
37392 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
37393 return;
37394 isdn_tty_shutdown(info);
37395- port->count = 0;
37396+ atomic_set(&port->count, 0);
37397 port->flags &= ~ASYNC_NORMAL_ACTIVE;
37398 port->tty = NULL;
37399 wake_up_interruptible(&port->open_wait);
37400@@ -1975,7 +1975,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
37401 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
37402 modem_info *info = &dev->mdm.info[i];
37403
37404- if (info->port.count == 0)
37405+ if (atomic_read(&info->port.count) == 0)
37406 continue;
37407 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
37408 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
37409diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37410index e74df7c..03a03ba 100644
37411--- a/drivers/isdn/icn/icn.c
37412+++ b/drivers/isdn/icn/icn.c
37413@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
37414 if (count > len)
37415 count = len;
37416 if (user) {
37417- if (copy_from_user(msg, buf, count))
37418+ if (count > sizeof msg || copy_from_user(msg, buf, count))
37419 return -EFAULT;
37420 } else
37421 memcpy(msg, buf, count);
37422diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
37423index 6a8405d..0bd1c7e 100644
37424--- a/drivers/leds/leds-clevo-mail.c
37425+++ b/drivers/leds/leds-clevo-mail.c
37426@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
37427 * detected as working, but in reality it is not) as low as
37428 * possible.
37429 */
37430-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
37431+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
37432 {
37433 .callback = clevo_mail_led_dmi_callback,
37434 .ident = "Clevo D410J",
37435diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
37436index ec9b287..65c9bf4 100644
37437--- a/drivers/leds/leds-ss4200.c
37438+++ b/drivers/leds/leds-ss4200.c
37439@@ -92,7 +92,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
37440 * detected as working, but in reality it is not) as low as
37441 * possible.
37442 */
37443-static struct dmi_system_id __initdata nas_led_whitelist[] = {
37444+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
37445 {
37446 .callback = ss4200_led_dmi_callback,
37447 .ident = "Intel SS4200-E",
37448diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37449index a5ebc00..982886f 100644
37450--- a/drivers/lguest/core.c
37451+++ b/drivers/lguest/core.c
37452@@ -92,9 +92,17 @@ static __init int map_switcher(void)
37453 * it's worked so far. The end address needs +1 because __get_vm_area
37454 * allocates an extra guard page, so we need space for that.
37455 */
37456+
37457+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37458+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37459+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37460+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37461+#else
37462 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37463 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37464 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37465+#endif
37466+
37467 if (!switcher_vma) {
37468 err = -ENOMEM;
37469 printk("lguest: could not map switcher pages high\n");
37470@@ -119,7 +127,7 @@ static __init int map_switcher(void)
37471 * Now the Switcher is mapped at the right address, we can't fail!
37472 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
37473 */
37474- memcpy(switcher_vma->addr, start_switcher_text,
37475+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37476 end_switcher_text - start_switcher_text);
37477
37478 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37479diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
37480index 3b62be16..e33134a 100644
37481--- a/drivers/lguest/page_tables.c
37482+++ b/drivers/lguest/page_tables.c
37483@@ -532,7 +532,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
37484 /*:*/
37485
37486 #ifdef CONFIG_X86_PAE
37487-static void release_pmd(pmd_t *spmd)
37488+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
37489 {
37490 /* If the entry's not present, there's nothing to release. */
37491 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
37492diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37493index 4af12e1..0e89afe 100644
37494--- a/drivers/lguest/x86/core.c
37495+++ b/drivers/lguest/x86/core.c
37496@@ -59,7 +59,7 @@ static struct {
37497 /* Offset from where switcher.S was compiled to where we've copied it */
37498 static unsigned long switcher_offset(void)
37499 {
37500- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37501+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37502 }
37503
37504 /* This cpu's struct lguest_pages. */
37505@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37506 * These copies are pretty cheap, so we do them unconditionally: */
37507 /* Save the current Host top-level page directory.
37508 */
37509+
37510+#ifdef CONFIG_PAX_PER_CPU_PGD
37511+ pages->state.host_cr3 = read_cr3();
37512+#else
37513 pages->state.host_cr3 = __pa(current->mm->pgd);
37514+#endif
37515+
37516 /*
37517 * Set up the Guest's page tables to see this CPU's pages (and no
37518 * other CPU's pages).
37519@@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
37520 * compiled-in switcher code and the high-mapped copy we just made.
37521 */
37522 for (i = 0; i < IDT_ENTRIES; i++)
37523- default_idt_entries[i] += switcher_offset();
37524+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37525
37526 /*
37527 * Set up the Switcher's per-cpu areas.
37528@@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
37529 * it will be undisturbed when we switch. To change %cs and jump we
37530 * need this structure to feed to Intel's "lcall" instruction.
37531 */
37532- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37533+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37534 lguest_entry.segment = LGUEST_CS;
37535
37536 /*
37537diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37538index 40634b0..4f5855e 100644
37539--- a/drivers/lguest/x86/switcher_32.S
37540+++ b/drivers/lguest/x86/switcher_32.S
37541@@ -87,6 +87,7 @@
37542 #include <asm/page.h>
37543 #include <asm/segment.h>
37544 #include <asm/lguest.h>
37545+#include <asm/processor-flags.h>
37546
37547 // We mark the start of the code to copy
37548 // It's placed in .text tho it's never run here
37549@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37550 // Changes type when we load it: damn Intel!
37551 // For after we switch over our page tables
37552 // That entry will be read-only: we'd crash.
37553+
37554+#ifdef CONFIG_PAX_KERNEXEC
37555+ mov %cr0, %edx
37556+ xor $X86_CR0_WP, %edx
37557+ mov %edx, %cr0
37558+#endif
37559+
37560 movl $(GDT_ENTRY_TSS*8), %edx
37561 ltr %dx
37562
37563@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37564 // Let's clear it again for our return.
37565 // The GDT descriptor of the Host
37566 // Points to the table after two "size" bytes
37567- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37568+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37569 // Clear "used" from type field (byte 5, bit 2)
37570- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37571+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37572+
37573+#ifdef CONFIG_PAX_KERNEXEC
37574+ mov %cr0, %eax
37575+ xor $X86_CR0_WP, %eax
37576+ mov %eax, %cr0
37577+#endif
37578
37579 // Once our page table's switched, the Guest is live!
37580 // The Host fades as we run this final step.
37581@@ -295,13 +309,12 @@ deliver_to_host:
37582 // I consulted gcc, and it gave
37583 // These instructions, which I gladly credit:
37584 leal (%edx,%ebx,8), %eax
37585- movzwl (%eax),%edx
37586- movl 4(%eax), %eax
37587- xorw %ax, %ax
37588- orl %eax, %edx
37589+ movl 4(%eax), %edx
37590+ movw (%eax), %dx
37591 // Now the address of the handler's in %edx
37592 // We call it now: its "iret" drops us home.
37593- jmp *%edx
37594+ ljmp $__KERNEL_CS, $1f
37595+1: jmp *%edx
37596
37597 // Every interrupt can come to us here
37598 // But we must truly tell each apart.
37599diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
37600index 7155945..4bcc562 100644
37601--- a/drivers/md/bitmap.c
37602+++ b/drivers/md/bitmap.c
37603@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
37604 chunk_kb ? "KB" : "B");
37605 if (bitmap->storage.file) {
37606 seq_printf(seq, ", file: ");
37607- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
37608+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
37609 }
37610
37611 seq_printf(seq, "\n");
37612diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
37613index eee353d..74504c4 100644
37614--- a/drivers/md/dm-ioctl.c
37615+++ b/drivers/md/dm-ioctl.c
37616@@ -1632,7 +1632,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
37617 cmd == DM_LIST_VERSIONS_CMD)
37618 return 0;
37619
37620- if ((cmd == DM_DEV_CREATE_CMD)) {
37621+ if (cmd == DM_DEV_CREATE_CMD) {
37622 if (!*param->name) {
37623 DMWARN("name not supplied when creating device");
37624 return -EINVAL;
37625diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
37626index 7f24190..0e18099 100644
37627--- a/drivers/md/dm-raid1.c
37628+++ b/drivers/md/dm-raid1.c
37629@@ -40,7 +40,7 @@ enum dm_raid1_error {
37630
37631 struct mirror {
37632 struct mirror_set *ms;
37633- atomic_t error_count;
37634+ atomic_unchecked_t error_count;
37635 unsigned long error_type;
37636 struct dm_dev *dev;
37637 sector_t offset;
37638@@ -183,7 +183,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
37639 struct mirror *m;
37640
37641 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
37642- if (!atomic_read(&m->error_count))
37643+ if (!atomic_read_unchecked(&m->error_count))
37644 return m;
37645
37646 return NULL;
37647@@ -215,7 +215,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37648 * simple way to tell if a device has encountered
37649 * errors.
37650 */
37651- atomic_inc(&m->error_count);
37652+ atomic_inc_unchecked(&m->error_count);
37653
37654 if (test_and_set_bit(error_type, &m->error_type))
37655 return;
37656@@ -406,7 +406,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
37657 struct mirror *m = get_default_mirror(ms);
37658
37659 do {
37660- if (likely(!atomic_read(&m->error_count)))
37661+ if (likely(!atomic_read_unchecked(&m->error_count)))
37662 return m;
37663
37664 if (m-- == ms->mirror)
37665@@ -420,7 +420,7 @@ static int default_ok(struct mirror *m)
37666 {
37667 struct mirror *default_mirror = get_default_mirror(m->ms);
37668
37669- return !atomic_read(&default_mirror->error_count);
37670+ return !atomic_read_unchecked(&default_mirror->error_count);
37671 }
37672
37673 static int mirror_available(struct mirror_set *ms, struct bio *bio)
37674@@ -557,7 +557,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
37675 */
37676 if (likely(region_in_sync(ms, region, 1)))
37677 m = choose_mirror(ms, bio->bi_sector);
37678- else if (m && atomic_read(&m->error_count))
37679+ else if (m && atomic_read_unchecked(&m->error_count))
37680 m = NULL;
37681
37682 if (likely(m))
37683@@ -924,7 +924,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
37684 }
37685
37686 ms->mirror[mirror].ms = ms;
37687- atomic_set(&(ms->mirror[mirror].error_count), 0);
37688+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
37689 ms->mirror[mirror].error_type = 0;
37690 ms->mirror[mirror].offset = offset;
37691
37692@@ -1337,7 +1337,7 @@ static void mirror_resume(struct dm_target *ti)
37693 */
37694 static char device_status_char(struct mirror *m)
37695 {
37696- if (!atomic_read(&(m->error_count)))
37697+ if (!atomic_read_unchecked(&(m->error_count)))
37698 return 'A';
37699
37700 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
37701diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
37702index aaecefa..23b3026 100644
37703--- a/drivers/md/dm-stripe.c
37704+++ b/drivers/md/dm-stripe.c
37705@@ -20,7 +20,7 @@ struct stripe {
37706 struct dm_dev *dev;
37707 sector_t physical_start;
37708
37709- atomic_t error_count;
37710+ atomic_unchecked_t error_count;
37711 };
37712
37713 struct stripe_c {
37714@@ -184,7 +184,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37715 kfree(sc);
37716 return r;
37717 }
37718- atomic_set(&(sc->stripe[i].error_count), 0);
37719+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
37720 }
37721
37722 ti->private = sc;
37723@@ -325,7 +325,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
37724 DMEMIT("%d ", sc->stripes);
37725 for (i = 0; i < sc->stripes; i++) {
37726 DMEMIT("%s ", sc->stripe[i].dev->name);
37727- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
37728+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
37729 'D' : 'A';
37730 }
37731 buffer[i] = '\0';
37732@@ -370,8 +370,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
37733 */
37734 for (i = 0; i < sc->stripes; i++)
37735 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
37736- atomic_inc(&(sc->stripe[i].error_count));
37737- if (atomic_read(&(sc->stripe[i].error_count)) <
37738+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
37739+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
37740 DM_IO_ERROR_THRESHOLD)
37741 schedule_work(&sc->trigger_event);
37742 }
37743diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
37744index daf25d0..d74f49f 100644
37745--- a/drivers/md/dm-table.c
37746+++ b/drivers/md/dm-table.c
37747@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
37748 if (!dev_size)
37749 return 0;
37750
37751- if ((start >= dev_size) || (start + len > dev_size)) {
37752+ if ((start >= dev_size) || (len > dev_size - start)) {
37753 DMWARN("%s: %s too small for target: "
37754 "start=%llu, len=%llu, dev_size=%llu",
37755 dm_device_name(ti->table->md), bdevname(bdev, b),
37756diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
37757index 4d6e853..a234157 100644
37758--- a/drivers/md/dm-thin-metadata.c
37759+++ b/drivers/md/dm-thin-metadata.c
37760@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
37761 {
37762 pmd->info.tm = pmd->tm;
37763 pmd->info.levels = 2;
37764- pmd->info.value_type.context = pmd->data_sm;
37765+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
37766 pmd->info.value_type.size = sizeof(__le64);
37767 pmd->info.value_type.inc = data_block_inc;
37768 pmd->info.value_type.dec = data_block_dec;
37769@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
37770
37771 pmd->bl_info.tm = pmd->tm;
37772 pmd->bl_info.levels = 1;
37773- pmd->bl_info.value_type.context = pmd->data_sm;
37774+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
37775 pmd->bl_info.value_type.size = sizeof(__le64);
37776 pmd->bl_info.value_type.inc = data_block_inc;
37777 pmd->bl_info.value_type.dec = data_block_dec;
37778diff --git a/drivers/md/dm.c b/drivers/md/dm.c
37779index 0d8f086..f5a91d5 100644
37780--- a/drivers/md/dm.c
37781+++ b/drivers/md/dm.c
37782@@ -170,9 +170,9 @@ struct mapped_device {
37783 /*
37784 * Event handling.
37785 */
37786- atomic_t event_nr;
37787+ atomic_unchecked_t event_nr;
37788 wait_queue_head_t eventq;
37789- atomic_t uevent_seq;
37790+ atomic_unchecked_t uevent_seq;
37791 struct list_head uevent_list;
37792 spinlock_t uevent_lock; /* Protect access to uevent_list */
37793
37794@@ -1872,8 +1872,8 @@ static struct mapped_device *alloc_dev(int minor)
37795 rwlock_init(&md->map_lock);
37796 atomic_set(&md->holders, 1);
37797 atomic_set(&md->open_count, 0);
37798- atomic_set(&md->event_nr, 0);
37799- atomic_set(&md->uevent_seq, 0);
37800+ atomic_set_unchecked(&md->event_nr, 0);
37801+ atomic_set_unchecked(&md->uevent_seq, 0);
37802 INIT_LIST_HEAD(&md->uevent_list);
37803 spin_lock_init(&md->uevent_lock);
37804
37805@@ -2026,7 +2026,7 @@ static void event_callback(void *context)
37806
37807 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
37808
37809- atomic_inc(&md->event_nr);
37810+ atomic_inc_unchecked(&md->event_nr);
37811 wake_up(&md->eventq);
37812 }
37813
37814@@ -2683,18 +2683,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
37815
37816 uint32_t dm_next_uevent_seq(struct mapped_device *md)
37817 {
37818- return atomic_add_return(1, &md->uevent_seq);
37819+ return atomic_add_return_unchecked(1, &md->uevent_seq);
37820 }
37821
37822 uint32_t dm_get_event_nr(struct mapped_device *md)
37823 {
37824- return atomic_read(&md->event_nr);
37825+ return atomic_read_unchecked(&md->event_nr);
37826 }
37827
37828 int dm_wait_event(struct mapped_device *md, int event_nr)
37829 {
37830 return wait_event_interruptible(md->eventq,
37831- (event_nr != atomic_read(&md->event_nr)));
37832+ (event_nr != atomic_read_unchecked(&md->event_nr)));
37833 }
37834
37835 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
37836diff --git a/drivers/md/md.c b/drivers/md/md.c
37837index f363135..9b38815 100644
37838--- a/drivers/md/md.c
37839+++ b/drivers/md/md.c
37840@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
37841 * start build, activate spare
37842 */
37843 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
37844-static atomic_t md_event_count;
37845+static atomic_unchecked_t md_event_count;
37846 void md_new_event(struct mddev *mddev)
37847 {
37848- atomic_inc(&md_event_count);
37849+ atomic_inc_unchecked(&md_event_count);
37850 wake_up(&md_event_waiters);
37851 }
37852 EXPORT_SYMBOL_GPL(md_new_event);
37853@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
37854 */
37855 static void md_new_event_inintr(struct mddev *mddev)
37856 {
37857- atomic_inc(&md_event_count);
37858+ atomic_inc_unchecked(&md_event_count);
37859 wake_up(&md_event_waiters);
37860 }
37861
37862@@ -1507,7 +1507,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
37863 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
37864 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
37865 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
37866- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37867+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37868
37869 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
37870 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
37871@@ -1751,7 +1751,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
37872 else
37873 sb->resync_offset = cpu_to_le64(0);
37874
37875- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
37876+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
37877
37878 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
37879 sb->size = cpu_to_le64(mddev->dev_sectors);
37880@@ -2751,7 +2751,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
37881 static ssize_t
37882 errors_show(struct md_rdev *rdev, char *page)
37883 {
37884- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
37885+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
37886 }
37887
37888 static ssize_t
37889@@ -2760,7 +2760,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
37890 char *e;
37891 unsigned long n = simple_strtoul(buf, &e, 10);
37892 if (*buf && (*e == 0 || *e == '\n')) {
37893- atomic_set(&rdev->corrected_errors, n);
37894+ atomic_set_unchecked(&rdev->corrected_errors, n);
37895 return len;
37896 }
37897 return -EINVAL;
37898@@ -3210,8 +3210,8 @@ int md_rdev_init(struct md_rdev *rdev)
37899 rdev->sb_loaded = 0;
37900 rdev->bb_page = NULL;
37901 atomic_set(&rdev->nr_pending, 0);
37902- atomic_set(&rdev->read_errors, 0);
37903- atomic_set(&rdev->corrected_errors, 0);
37904+ atomic_set_unchecked(&rdev->read_errors, 0);
37905+ atomic_set_unchecked(&rdev->corrected_errors, 0);
37906
37907 INIT_LIST_HEAD(&rdev->same_set);
37908 init_waitqueue_head(&rdev->blocked_wait);
37909@@ -6987,7 +6987,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
37910
37911 spin_unlock(&pers_lock);
37912 seq_printf(seq, "\n");
37913- seq->poll_event = atomic_read(&md_event_count);
37914+ seq->poll_event = atomic_read_unchecked(&md_event_count);
37915 return 0;
37916 }
37917 if (v == (void*)2) {
37918@@ -7090,7 +7090,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
37919 return error;
37920
37921 seq = file->private_data;
37922- seq->poll_event = atomic_read(&md_event_count);
37923+ seq->poll_event = atomic_read_unchecked(&md_event_count);
37924 return error;
37925 }
37926
37927@@ -7104,7 +7104,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
37928 /* always allow read */
37929 mask = POLLIN | POLLRDNORM;
37930
37931- if (seq->poll_event != atomic_read(&md_event_count))
37932+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
37933 mask |= POLLERR | POLLPRI;
37934 return mask;
37935 }
37936@@ -7148,7 +7148,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
37937 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
37938 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
37939 (int)part_stat_read(&disk->part0, sectors[1]) -
37940- atomic_read(&disk->sync_io);
37941+ atomic_read_unchecked(&disk->sync_io);
37942 /* sync IO will cause sync_io to increase before the disk_stats
37943 * as sync_io is counted when a request starts, and
37944 * disk_stats is counted when it completes.
37945diff --git a/drivers/md/md.h b/drivers/md/md.h
37946index eca59c3..7c42285 100644
37947--- a/drivers/md/md.h
37948+++ b/drivers/md/md.h
37949@@ -94,13 +94,13 @@ struct md_rdev {
37950 * only maintained for arrays that
37951 * support hot removal
37952 */
37953- atomic_t read_errors; /* number of consecutive read errors that
37954+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
37955 * we have tried to ignore.
37956 */
37957 struct timespec last_read_error; /* monotonic time since our
37958 * last read error
37959 */
37960- atomic_t corrected_errors; /* number of corrected read errors,
37961+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
37962 * for reporting to userspace and storing
37963 * in superblock.
37964 */
37965@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
37966
37967 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
37968 {
37969- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
37970+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
37971 }
37972
37973 struct md_personality
37974diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
37975index 1cbfc6b..56e1dbb 100644
37976--- a/drivers/md/persistent-data/dm-space-map.h
37977+++ b/drivers/md/persistent-data/dm-space-map.h
37978@@ -60,6 +60,7 @@ struct dm_space_map {
37979 int (*root_size)(struct dm_space_map *sm, size_t *result);
37980 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
37981 };
37982+typedef struct dm_space_map __no_const dm_space_map_no_const;
37983
37984 /*----------------------------------------------------------------*/
37985
37986diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
37987index 75b1f89..00ba344 100644
37988--- a/drivers/md/raid1.c
37989+++ b/drivers/md/raid1.c
37990@@ -1819,7 +1819,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
37991 if (r1_sync_page_io(rdev, sect, s,
37992 bio->bi_io_vec[idx].bv_page,
37993 READ) != 0)
37994- atomic_add(s, &rdev->corrected_errors);
37995+ atomic_add_unchecked(s, &rdev->corrected_errors);
37996 }
37997 sectors -= s;
37998 sect += s;
37999@@ -2041,7 +2041,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
38000 test_bit(In_sync, &rdev->flags)) {
38001 if (r1_sync_page_io(rdev, sect, s,
38002 conf->tmppage, READ)) {
38003- atomic_add(s, &rdev->corrected_errors);
38004+ atomic_add_unchecked(s, &rdev->corrected_errors);
38005 printk(KERN_INFO
38006 "md/raid1:%s: read error corrected "
38007 "(%d sectors at %llu on %s)\n",
38008diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38009index 8d925dc..11d674f 100644
38010--- a/drivers/md/raid10.c
38011+++ b/drivers/md/raid10.c
38012@@ -1878,7 +1878,7 @@ static void end_sync_read(struct bio *bio, int error)
38013 /* The write handler will notice the lack of
38014 * R10BIO_Uptodate and record any errors etc
38015 */
38016- atomic_add(r10_bio->sectors,
38017+ atomic_add_unchecked(r10_bio->sectors,
38018 &conf->mirrors[d].rdev->corrected_errors);
38019
38020 /* for reconstruct, we always reschedule after a read.
38021@@ -2227,7 +2227,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38022 {
38023 struct timespec cur_time_mon;
38024 unsigned long hours_since_last;
38025- unsigned int read_errors = atomic_read(&rdev->read_errors);
38026+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
38027
38028 ktime_get_ts(&cur_time_mon);
38029
38030@@ -2249,9 +2249,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38031 * overflowing the shift of read_errors by hours_since_last.
38032 */
38033 if (hours_since_last >= 8 * sizeof(read_errors))
38034- atomic_set(&rdev->read_errors, 0);
38035+ atomic_set_unchecked(&rdev->read_errors, 0);
38036 else
38037- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
38038+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
38039 }
38040
38041 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
38042@@ -2305,8 +2305,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38043 return;
38044
38045 check_decay_read_errors(mddev, rdev);
38046- atomic_inc(&rdev->read_errors);
38047- if (atomic_read(&rdev->read_errors) > max_read_errors) {
38048+ atomic_inc_unchecked(&rdev->read_errors);
38049+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
38050 char b[BDEVNAME_SIZE];
38051 bdevname(rdev->bdev, b);
38052
38053@@ -2314,7 +2314,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38054 "md/raid10:%s: %s: Raid device exceeded "
38055 "read_error threshold [cur %d:max %d]\n",
38056 mdname(mddev), b,
38057- atomic_read(&rdev->read_errors), max_read_errors);
38058+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
38059 printk(KERN_NOTICE
38060 "md/raid10:%s: %s: Failing raid device\n",
38061 mdname(mddev), b);
38062@@ -2469,7 +2469,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38063 sect +
38064 choose_data_offset(r10_bio, rdev)),
38065 bdevname(rdev->bdev, b));
38066- atomic_add(s, &rdev->corrected_errors);
38067+ atomic_add_unchecked(s, &rdev->corrected_errors);
38068 }
38069
38070 rdev_dec_pending(rdev, mddev);
38071diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38072index 19d77a0..56051b92 100644
38073--- a/drivers/md/raid5.c
38074+++ b/drivers/md/raid5.c
38075@@ -1797,21 +1797,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
38076 mdname(conf->mddev), STRIPE_SECTORS,
38077 (unsigned long long)s,
38078 bdevname(rdev->bdev, b));
38079- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
38080+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
38081 clear_bit(R5_ReadError, &sh->dev[i].flags);
38082 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38083 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
38084 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
38085
38086- if (atomic_read(&rdev->read_errors))
38087- atomic_set(&rdev->read_errors, 0);
38088+ if (atomic_read_unchecked(&rdev->read_errors))
38089+ atomic_set_unchecked(&rdev->read_errors, 0);
38090 } else {
38091 const char *bdn = bdevname(rdev->bdev, b);
38092 int retry = 0;
38093 int set_bad = 0;
38094
38095 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38096- atomic_inc(&rdev->read_errors);
38097+ atomic_inc_unchecked(&rdev->read_errors);
38098 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
38099 printk_ratelimited(
38100 KERN_WARNING
38101@@ -1839,7 +1839,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38102 mdname(conf->mddev),
38103 (unsigned long long)s,
38104 bdn);
38105- } else if (atomic_read(&rdev->read_errors)
38106+ } else if (atomic_read_unchecked(&rdev->read_errors)
38107 > conf->max_nr_stripes)
38108 printk(KERN_WARNING
38109 "md/raid:%s: Too many read errors, failing device %s.\n",
38110diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
38111index d33101a..6b13069 100644
38112--- a/drivers/media/dvb-core/dvbdev.c
38113+++ b/drivers/media/dvb-core/dvbdev.c
38114@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38115 const struct dvb_device *template, void *priv, int type)
38116 {
38117 struct dvb_device *dvbdev;
38118- struct file_operations *dvbdevfops;
38119+ file_operations_no_const *dvbdevfops;
38120 struct device *clsdev;
38121 int minor;
38122 int id;
38123diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
38124index 404f63a..4796533 100644
38125--- a/drivers/media/dvb-frontends/dib3000.h
38126+++ b/drivers/media/dvb-frontends/dib3000.h
38127@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38128 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38129 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38130 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38131-};
38132+} __no_const;
38133
38134 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
38135 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38136diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
38137index 8e9a668..78d6310 100644
38138--- a/drivers/media/platform/omap/omap_vout.c
38139+++ b/drivers/media/platform/omap/omap_vout.c
38140@@ -63,7 +63,6 @@ enum omap_vout_channels {
38141 OMAP_VIDEO2,
38142 };
38143
38144-static struct videobuf_queue_ops video_vbq_ops;
38145 /* Variables configurable through module params*/
38146 static u32 video1_numbuffers = 3;
38147 static u32 video2_numbuffers = 3;
38148@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
38149 {
38150 struct videobuf_queue *q;
38151 struct omap_vout_device *vout = NULL;
38152+ static struct videobuf_queue_ops video_vbq_ops = {
38153+ .buf_setup = omap_vout_buffer_setup,
38154+ .buf_prepare = omap_vout_buffer_prepare,
38155+ .buf_release = omap_vout_buffer_release,
38156+ .buf_queue = omap_vout_buffer_queue,
38157+ };
38158
38159 vout = video_drvdata(file);
38160 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
38161@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
38162 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
38163
38164 q = &vout->vbq;
38165- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
38166- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
38167- video_vbq_ops.buf_release = omap_vout_buffer_release;
38168- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
38169 spin_lock_init(&vout->vbq_lock);
38170
38171 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
38172diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
38173index b671e20..34088b7 100644
38174--- a/drivers/media/platform/s5p-tv/mixer.h
38175+++ b/drivers/media/platform/s5p-tv/mixer.h
38176@@ -155,7 +155,7 @@ struct mxr_layer {
38177 /** layer index (unique identifier) */
38178 int idx;
38179 /** callbacks for layer methods */
38180- struct mxr_layer_ops ops;
38181+ struct mxr_layer_ops *ops;
38182 /** format array */
38183 const struct mxr_format **fmt_array;
38184 /** size of format array */
38185diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38186index b93a21f..2535195 100644
38187--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38188+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38189@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
38190 {
38191 struct mxr_layer *layer;
38192 int ret;
38193- struct mxr_layer_ops ops = {
38194+ static struct mxr_layer_ops ops = {
38195 .release = mxr_graph_layer_release,
38196 .buffer_set = mxr_graph_buffer_set,
38197 .stream_set = mxr_graph_stream_set,
38198diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
38199index 3b1670a..595c939 100644
38200--- a/drivers/media/platform/s5p-tv/mixer_reg.c
38201+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
38202@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
38203 layer->update_buf = next;
38204 }
38205
38206- layer->ops.buffer_set(layer, layer->update_buf);
38207+ layer->ops->buffer_set(layer, layer->update_buf);
38208
38209 if (done && done != layer->shadow_buf)
38210 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
38211diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
38212index 1f3b743..e839271 100644
38213--- a/drivers/media/platform/s5p-tv/mixer_video.c
38214+++ b/drivers/media/platform/s5p-tv/mixer_video.c
38215@@ -208,7 +208,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
38216 layer->geo.src.height = layer->geo.src.full_height;
38217
38218 mxr_geometry_dump(mdev, &layer->geo);
38219- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38220+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38221 mxr_geometry_dump(mdev, &layer->geo);
38222 }
38223
38224@@ -226,7 +226,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
38225 layer->geo.dst.full_width = mbus_fmt.width;
38226 layer->geo.dst.full_height = mbus_fmt.height;
38227 layer->geo.dst.field = mbus_fmt.field;
38228- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38229+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38230
38231 mxr_geometry_dump(mdev, &layer->geo);
38232 }
38233@@ -332,7 +332,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
38234 /* set source size to highest accepted value */
38235 geo->src.full_width = max(geo->dst.full_width, pix->width);
38236 geo->src.full_height = max(geo->dst.full_height, pix->height);
38237- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38238+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38239 mxr_geometry_dump(mdev, &layer->geo);
38240 /* set cropping to total visible screen */
38241 geo->src.width = pix->width;
38242@@ -340,12 +340,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
38243 geo->src.x_offset = 0;
38244 geo->src.y_offset = 0;
38245 /* assure consistency of geometry */
38246- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38247+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38248 mxr_geometry_dump(mdev, &layer->geo);
38249 /* set full size to lowest possible value */
38250 geo->src.full_width = 0;
38251 geo->src.full_height = 0;
38252- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38253+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38254 mxr_geometry_dump(mdev, &layer->geo);
38255
38256 /* returning results */
38257@@ -472,7 +472,7 @@ static int mxr_s_selection(struct file *file, void *fh,
38258 target->width = s->r.width;
38259 target->height = s->r.height;
38260
38261- layer->ops.fix_geometry(layer, stage, s->flags);
38262+ layer->ops->fix_geometry(layer, stage, s->flags);
38263
38264 /* retrieve update selection rectangle */
38265 res.left = target->x_offset;
38266@@ -937,13 +937,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
38267 mxr_output_get(mdev);
38268
38269 mxr_layer_update_output(layer);
38270- layer->ops.format_set(layer);
38271+ layer->ops->format_set(layer);
38272 /* enabling layer in hardware */
38273 spin_lock_irqsave(&layer->enq_slock, flags);
38274 layer->state = MXR_LAYER_STREAMING;
38275 spin_unlock_irqrestore(&layer->enq_slock, flags);
38276
38277- layer->ops.stream_set(layer, MXR_ENABLE);
38278+ layer->ops->stream_set(layer, MXR_ENABLE);
38279 mxr_streamer_get(mdev);
38280
38281 return 0;
38282@@ -1013,7 +1013,7 @@ static int stop_streaming(struct vb2_queue *vq)
38283 spin_unlock_irqrestore(&layer->enq_slock, flags);
38284
38285 /* disabling layer in hardware */
38286- layer->ops.stream_set(layer, MXR_DISABLE);
38287+ layer->ops->stream_set(layer, MXR_DISABLE);
38288 /* remove one streamer */
38289 mxr_streamer_put(mdev);
38290 /* allow changes in output configuration */
38291@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
38292
38293 void mxr_layer_release(struct mxr_layer *layer)
38294 {
38295- if (layer->ops.release)
38296- layer->ops.release(layer);
38297+ if (layer->ops->release)
38298+ layer->ops->release(layer);
38299 }
38300
38301 void mxr_base_layer_release(struct mxr_layer *layer)
38302@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
38303
38304 layer->mdev = mdev;
38305 layer->idx = idx;
38306- layer->ops = *ops;
38307+ layer->ops = ops;
38308
38309 spin_lock_init(&layer->enq_slock);
38310 INIT_LIST_HEAD(&layer->enq_list);
38311diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38312index 3d13a63..da31bf1 100644
38313--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38314+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38315@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
38316 {
38317 struct mxr_layer *layer;
38318 int ret;
38319- struct mxr_layer_ops ops = {
38320+ static struct mxr_layer_ops ops = {
38321 .release = mxr_vp_layer_release,
38322 .buffer_set = mxr_vp_buffer_set,
38323 .stream_set = mxr_vp_stream_set,
38324diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
38325index 643d80a..56bb96b 100644
38326--- a/drivers/media/radio/radio-cadet.c
38327+++ b/drivers/media/radio/radio-cadet.c
38328@@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38329 unsigned char readbuf[RDS_BUFFER];
38330 int i = 0;
38331
38332+ if (count > RDS_BUFFER)
38333+ return -EFAULT;
38334 mutex_lock(&dev->lock);
38335 if (dev->rdsstat == 0)
38336 cadet_start_rds(dev);
38337@@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38338 while (i < count && dev->rdsin != dev->rdsout)
38339 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
38340
38341- if (i && copy_to_user(data, readbuf, i))
38342+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
38343 i = -EFAULT;
38344 unlock:
38345 mutex_unlock(&dev->lock);
38346diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
38347index 3940bb0..fb3952a 100644
38348--- a/drivers/media/usb/dvb-usb/cxusb.c
38349+++ b/drivers/media/usb/dvb-usb/cxusb.c
38350@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
38351
38352 struct dib0700_adapter_state {
38353 int (*set_param_save) (struct dvb_frontend *);
38354-};
38355+} __no_const;
38356
38357 static int dib7070_set_param_override(struct dvb_frontend *fe)
38358 {
38359diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
38360index 9382895..ac8093c 100644
38361--- a/drivers/media/usb/dvb-usb/dw2102.c
38362+++ b/drivers/media/usb/dvb-usb/dw2102.c
38363@@ -95,7 +95,7 @@ struct su3000_state {
38364
38365 struct s6x0_state {
38366 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
38367-};
38368+} __no_const;
38369
38370 /* debug */
38371 static int dvb_usb_dw2102_debug;
38372diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
38373index aa6e7c7..4cd8061 100644
38374--- a/drivers/media/v4l2-core/v4l2-ioctl.c
38375+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
38376@@ -1923,7 +1923,8 @@ struct v4l2_ioctl_info {
38377 struct file *file, void *fh, void *p);
38378 } u;
38379 void (*debug)(const void *arg, bool write_only);
38380-};
38381+} __do_const;
38382+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
38383
38384 /* This control needs a priority check */
38385 #define INFO_FL_PRIO (1 << 0)
38386@@ -2108,7 +2109,7 @@ static long __video_do_ioctl(struct file *file,
38387 struct video_device *vfd = video_devdata(file);
38388 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
38389 bool write_only = false;
38390- struct v4l2_ioctl_info default_info;
38391+ v4l2_ioctl_info_no_const default_info;
38392 const struct v4l2_ioctl_info *info;
38393 void *fh = file->private_data;
38394 struct v4l2_fh *vfh = NULL;
38395diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
38396index 29b2172..a7c5b31 100644
38397--- a/drivers/memstick/host/r592.c
38398+++ b/drivers/memstick/host/r592.c
38399@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
38400 /* Executes one TPC (data is read/written from small or large fifo) */
38401 static void r592_execute_tpc(struct r592_device *dev)
38402 {
38403- bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38404+ bool is_write;
38405 int len, error;
38406 u32 status, reg;
38407
38408@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
38409 return;
38410 }
38411
38412+ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38413 len = dev->req->long_data ?
38414 dev->req->sg.length : dev->req->data_len;
38415
38416diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38417index fb69baa..3aeea2e 100644
38418--- a/drivers/message/fusion/mptbase.c
38419+++ b/drivers/message/fusion/mptbase.c
38420@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38421 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38422 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38423
38424+#ifdef CONFIG_GRKERNSEC_HIDESYM
38425+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
38426+#else
38427 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38428 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38429+#endif
38430+
38431 /*
38432 * Rounding UP to nearest 4-kB boundary here...
38433 */
38434@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38435 ioc->facts.GlobalCredits);
38436
38437 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
38438+#ifdef CONFIG_GRKERNSEC_HIDESYM
38439+ NULL, NULL);
38440+#else
38441 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
38442+#endif
38443 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
38444 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
38445 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
38446diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38447index fa43c39..daeb158 100644
38448--- a/drivers/message/fusion/mptsas.c
38449+++ b/drivers/message/fusion/mptsas.c
38450@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38451 return 0;
38452 }
38453
38454+static inline void
38455+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38456+{
38457+ if (phy_info->port_details) {
38458+ phy_info->port_details->rphy = rphy;
38459+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38460+ ioc->name, rphy));
38461+ }
38462+
38463+ if (rphy) {
38464+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38465+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38466+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38467+ ioc->name, rphy, rphy->dev.release));
38468+ }
38469+}
38470+
38471 /* no mutex */
38472 static void
38473 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38474@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38475 return NULL;
38476 }
38477
38478-static inline void
38479-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38480-{
38481- if (phy_info->port_details) {
38482- phy_info->port_details->rphy = rphy;
38483- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38484- ioc->name, rphy));
38485- }
38486-
38487- if (rphy) {
38488- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38489- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38490- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38491- ioc->name, rphy, rphy->dev.release));
38492- }
38493-}
38494-
38495 static inline struct sas_port *
38496 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38497 {
38498diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38499index 164afa7..b6b2e74 100644
38500--- a/drivers/message/fusion/mptscsih.c
38501+++ b/drivers/message/fusion/mptscsih.c
38502@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38503
38504 h = shost_priv(SChost);
38505
38506- if (h) {
38507- if (h->info_kbuf == NULL)
38508- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38509- return h->info_kbuf;
38510- h->info_kbuf[0] = '\0';
38511+ if (!h)
38512+ return NULL;
38513
38514- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38515- h->info_kbuf[size-1] = '\0';
38516- }
38517+ if (h->info_kbuf == NULL)
38518+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38519+ return h->info_kbuf;
38520+ h->info_kbuf[0] = '\0';
38521+
38522+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38523+ h->info_kbuf[size-1] = '\0';
38524
38525 return h->info_kbuf;
38526 }
38527diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38528index 8001aa6..b137580 100644
38529--- a/drivers/message/i2o/i2o_proc.c
38530+++ b/drivers/message/i2o/i2o_proc.c
38531@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
38532 "Array Controller Device"
38533 };
38534
38535-static char *chtostr(char *tmp, u8 *chars, int n)
38536-{
38537- tmp[0] = 0;
38538- return strncat(tmp, (char *)chars, n);
38539-}
38540-
38541 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38542 char *group)
38543 {
38544@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38545 } *result;
38546
38547 i2o_exec_execute_ddm_table ddm_table;
38548- char tmp[28 + 1];
38549
38550 result = kmalloc(sizeof(*result), GFP_KERNEL);
38551 if (!result)
38552@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38553
38554 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38555 seq_printf(seq, "%-#8x", ddm_table.module_id);
38556- seq_printf(seq, "%-29s",
38557- chtostr(tmp, ddm_table.module_name_version, 28));
38558+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38559 seq_printf(seq, "%9d ", ddm_table.data_size);
38560 seq_printf(seq, "%8d", ddm_table.code_size);
38561
38562@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38563
38564 i2o_driver_result_table *result;
38565 i2o_driver_store_table *dst;
38566- char tmp[28 + 1];
38567
38568 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
38569 if (result == NULL)
38570@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38571
38572 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
38573 seq_printf(seq, "%-#8x", dst->module_id);
38574- seq_printf(seq, "%-29s",
38575- chtostr(tmp, dst->module_name_version, 28));
38576- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
38577+ seq_printf(seq, "%-.28s", dst->module_name_version);
38578+ seq_printf(seq, "%-.8s", dst->date);
38579 seq_printf(seq, "%8d ", dst->module_size);
38580 seq_printf(seq, "%8d ", dst->mpb_size);
38581 seq_printf(seq, "0x%04x", dst->module_flags);
38582@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38583 // == (allow) 512d bytes (max)
38584 static u16 *work16 = (u16 *) work32;
38585 int token;
38586- char tmp[16 + 1];
38587
38588 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
38589
38590@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38591 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
38592 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
38593 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
38594- seq_printf(seq, "Vendor info : %s\n",
38595- chtostr(tmp, (u8 *) (work32 + 2), 16));
38596- seq_printf(seq, "Product info : %s\n",
38597- chtostr(tmp, (u8 *) (work32 + 6), 16));
38598- seq_printf(seq, "Description : %s\n",
38599- chtostr(tmp, (u8 *) (work32 + 10), 16));
38600- seq_printf(seq, "Product rev. : %s\n",
38601- chtostr(tmp, (u8 *) (work32 + 14), 8));
38602+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
38603+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
38604+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
38605+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
38606
38607 seq_printf(seq, "Serial number : ");
38608 print_serial_number(seq, (u8 *) (work32 + 16),
38609@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38610 u8 pad[256]; // allow up to 256 byte (max) serial number
38611 } result;
38612
38613- char tmp[24 + 1];
38614-
38615 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
38616
38617 if (token < 0) {
38618@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38619 }
38620
38621 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
38622- seq_printf(seq, "Module name : %s\n",
38623- chtostr(tmp, result.module_name, 24));
38624- seq_printf(seq, "Module revision : %s\n",
38625- chtostr(tmp, result.module_rev, 8));
38626+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
38627+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
38628
38629 seq_printf(seq, "Serial number : ");
38630 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
38631@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38632 u8 instance_number[4];
38633 } result;
38634
38635- char tmp[64 + 1];
38636-
38637 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
38638
38639 if (token < 0) {
38640@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38641 return 0;
38642 }
38643
38644- seq_printf(seq, "Device name : %s\n",
38645- chtostr(tmp, result.device_name, 64));
38646- seq_printf(seq, "Service name : %s\n",
38647- chtostr(tmp, result.service_name, 64));
38648- seq_printf(seq, "Physical name : %s\n",
38649- chtostr(tmp, result.physical_location, 64));
38650- seq_printf(seq, "Instance number : %s\n",
38651- chtostr(tmp, result.instance_number, 4));
38652+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
38653+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
38654+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
38655+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
38656
38657 return 0;
38658 }
38659diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
38660index a8c08f3..155fe3d 100644
38661--- a/drivers/message/i2o/iop.c
38662+++ b/drivers/message/i2o/iop.c
38663@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
38664
38665 spin_lock_irqsave(&c->context_list_lock, flags);
38666
38667- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
38668- atomic_inc(&c->context_list_counter);
38669+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
38670+ atomic_inc_unchecked(&c->context_list_counter);
38671
38672- entry->context = atomic_read(&c->context_list_counter);
38673+ entry->context = atomic_read_unchecked(&c->context_list_counter);
38674
38675 list_add(&entry->list, &c->context_list);
38676
38677@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
38678
38679 #if BITS_PER_LONG == 64
38680 spin_lock_init(&c->context_list_lock);
38681- atomic_set(&c->context_list_counter, 0);
38682+ atomic_set_unchecked(&c->context_list_counter, 0);
38683 INIT_LIST_HEAD(&c->context_list);
38684 #endif
38685
38686diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
38687index 45ece11..8efa218 100644
38688--- a/drivers/mfd/janz-cmodio.c
38689+++ b/drivers/mfd/janz-cmodio.c
38690@@ -13,6 +13,7 @@
38691
38692 #include <linux/kernel.h>
38693 #include <linux/module.h>
38694+#include <linux/slab.h>
38695 #include <linux/init.h>
38696 #include <linux/pci.h>
38697 #include <linux/interrupt.h>
38698diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
38699index a5f9888..1c0ed56 100644
38700--- a/drivers/mfd/twl4030-irq.c
38701+++ b/drivers/mfd/twl4030-irq.c
38702@@ -35,6 +35,7 @@
38703 #include <linux/of.h>
38704 #include <linux/irqdomain.h>
38705 #include <linux/i2c/twl.h>
38706+#include <asm/pgtable.h>
38707
38708 #include "twl-core.h"
38709
38710@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
38711 * Install an irq handler for each of the SIH modules;
38712 * clone dummy irq_chip since PIH can't *do* anything
38713 */
38714- twl4030_irq_chip = dummy_irq_chip;
38715- twl4030_irq_chip.name = "twl4030";
38716+ pax_open_kernel();
38717+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
38718+ *(const char **)&twl4030_irq_chip.name = "twl4030";
38719
38720- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
38721+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
38722+ pax_close_kernel();
38723
38724 for (i = irq_base; i < irq_end; i++) {
38725 irq_set_chip_and_handler(i, &twl4030_irq_chip,
38726diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
38727index 277a8db..0e0b754 100644
38728--- a/drivers/mfd/twl6030-irq.c
38729+++ b/drivers/mfd/twl6030-irq.c
38730@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
38731 * install an irq handler for each of the modules;
38732 * clone dummy irq_chip since PIH can't *do* anything
38733 */
38734- twl6030_irq_chip = dummy_irq_chip;
38735- twl6030_irq_chip.name = "twl6030";
38736- twl6030_irq_chip.irq_set_type = NULL;
38737- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
38738+ pax_open_kernel();
38739+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
38740+ *(const char **)&twl6030_irq_chip.name = "twl6030";
38741+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
38742+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
38743+ pax_close_kernel();
38744
38745 for (i = irq_base; i < irq_end; i++) {
38746 irq_set_chip_and_handler(i, &twl6030_irq_chip,
38747diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
38748index f428d86..274c368 100644
38749--- a/drivers/misc/c2port/core.c
38750+++ b/drivers/misc/c2port/core.c
38751@@ -924,7 +924,9 @@ struct c2port_device *c2port_device_register(char *name,
38752 mutex_init(&c2dev->mutex);
38753
38754 /* Create binary file */
38755- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
38756+ pax_open_kernel();
38757+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
38758+ pax_close_kernel();
38759 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
38760 if (unlikely(ret))
38761 goto error_device_create_bin_file;
38762diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
38763index 3aa9a96..59cf685 100644
38764--- a/drivers/misc/kgdbts.c
38765+++ b/drivers/misc/kgdbts.c
38766@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
38767 char before[BREAK_INSTR_SIZE];
38768 char after[BREAK_INSTR_SIZE];
38769
38770- probe_kernel_read(before, (char *)kgdbts_break_test,
38771+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
38772 BREAK_INSTR_SIZE);
38773 init_simple_test();
38774 ts.tst = plant_and_detach_test;
38775@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
38776 /* Activate test with initial breakpoint */
38777 if (!is_early)
38778 kgdb_breakpoint();
38779- probe_kernel_read(after, (char *)kgdbts_break_test,
38780+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
38781 BREAK_INSTR_SIZE);
38782 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
38783 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
38784diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
38785index 4a87e5c..76bdf5c 100644
38786--- a/drivers/misc/lis3lv02d/lis3lv02d.c
38787+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
38788@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
38789 * the lid is closed. This leads to interrupts as soon as a little move
38790 * is done.
38791 */
38792- atomic_inc(&lis3->count);
38793+ atomic_inc_unchecked(&lis3->count);
38794
38795 wake_up_interruptible(&lis3->misc_wait);
38796 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
38797@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
38798 if (lis3->pm_dev)
38799 pm_runtime_get_sync(lis3->pm_dev);
38800
38801- atomic_set(&lis3->count, 0);
38802+ atomic_set_unchecked(&lis3->count, 0);
38803 return 0;
38804 }
38805
38806@@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
38807 add_wait_queue(&lis3->misc_wait, &wait);
38808 while (true) {
38809 set_current_state(TASK_INTERRUPTIBLE);
38810- data = atomic_xchg(&lis3->count, 0);
38811+ data = atomic_xchg_unchecked(&lis3->count, 0);
38812 if (data)
38813 break;
38814
38815@@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
38816 struct lis3lv02d, miscdev);
38817
38818 poll_wait(file, &lis3->misc_wait, wait);
38819- if (atomic_read(&lis3->count))
38820+ if (atomic_read_unchecked(&lis3->count))
38821 return POLLIN | POLLRDNORM;
38822 return 0;
38823 }
38824diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
38825index c439c82..1f20f57 100644
38826--- a/drivers/misc/lis3lv02d/lis3lv02d.h
38827+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
38828@@ -297,7 +297,7 @@ struct lis3lv02d {
38829 struct input_polled_dev *idev; /* input device */
38830 struct platform_device *pdev; /* platform device */
38831 struct regulator_bulk_data regulators[2];
38832- atomic_t count; /* interrupt count after last read */
38833+ atomic_unchecked_t count; /* interrupt count after last read */
38834 union axis_conversion ac; /* hw -> logical axis */
38835 int mapped_btns[3];
38836
38837diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
38838index 2f30bad..c4c13d0 100644
38839--- a/drivers/misc/sgi-gru/gruhandles.c
38840+++ b/drivers/misc/sgi-gru/gruhandles.c
38841@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
38842 unsigned long nsec;
38843
38844 nsec = CLKS2NSEC(clks);
38845- atomic_long_inc(&mcs_op_statistics[op].count);
38846- atomic_long_add(nsec, &mcs_op_statistics[op].total);
38847+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
38848+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
38849 if (mcs_op_statistics[op].max < nsec)
38850 mcs_op_statistics[op].max = nsec;
38851 }
38852diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
38853index 950dbe9..eeef0f8 100644
38854--- a/drivers/misc/sgi-gru/gruprocfs.c
38855+++ b/drivers/misc/sgi-gru/gruprocfs.c
38856@@ -32,9 +32,9 @@
38857
38858 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
38859
38860-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
38861+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
38862 {
38863- unsigned long val = atomic_long_read(v);
38864+ unsigned long val = atomic_long_read_unchecked(v);
38865
38866 seq_printf(s, "%16lu %s\n", val, id);
38867 }
38868@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
38869
38870 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
38871 for (op = 0; op < mcsop_last; op++) {
38872- count = atomic_long_read(&mcs_op_statistics[op].count);
38873- total = atomic_long_read(&mcs_op_statistics[op].total);
38874+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
38875+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
38876 max = mcs_op_statistics[op].max;
38877 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
38878 count ? total / count : 0, max);
38879diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
38880index 5c3ce24..4915ccb 100644
38881--- a/drivers/misc/sgi-gru/grutables.h
38882+++ b/drivers/misc/sgi-gru/grutables.h
38883@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
38884 * GRU statistics.
38885 */
38886 struct gru_stats_s {
38887- atomic_long_t vdata_alloc;
38888- atomic_long_t vdata_free;
38889- atomic_long_t gts_alloc;
38890- atomic_long_t gts_free;
38891- atomic_long_t gms_alloc;
38892- atomic_long_t gms_free;
38893- atomic_long_t gts_double_allocate;
38894- atomic_long_t assign_context;
38895- atomic_long_t assign_context_failed;
38896- atomic_long_t free_context;
38897- atomic_long_t load_user_context;
38898- atomic_long_t load_kernel_context;
38899- atomic_long_t lock_kernel_context;
38900- atomic_long_t unlock_kernel_context;
38901- atomic_long_t steal_user_context;
38902- atomic_long_t steal_kernel_context;
38903- atomic_long_t steal_context_failed;
38904- atomic_long_t nopfn;
38905- atomic_long_t asid_new;
38906- atomic_long_t asid_next;
38907- atomic_long_t asid_wrap;
38908- atomic_long_t asid_reuse;
38909- atomic_long_t intr;
38910- atomic_long_t intr_cbr;
38911- atomic_long_t intr_tfh;
38912- atomic_long_t intr_spurious;
38913- atomic_long_t intr_mm_lock_failed;
38914- atomic_long_t call_os;
38915- atomic_long_t call_os_wait_queue;
38916- atomic_long_t user_flush_tlb;
38917- atomic_long_t user_unload_context;
38918- atomic_long_t user_exception;
38919- atomic_long_t set_context_option;
38920- atomic_long_t check_context_retarget_intr;
38921- atomic_long_t check_context_unload;
38922- atomic_long_t tlb_dropin;
38923- atomic_long_t tlb_preload_page;
38924- atomic_long_t tlb_dropin_fail_no_asid;
38925- atomic_long_t tlb_dropin_fail_upm;
38926- atomic_long_t tlb_dropin_fail_invalid;
38927- atomic_long_t tlb_dropin_fail_range_active;
38928- atomic_long_t tlb_dropin_fail_idle;
38929- atomic_long_t tlb_dropin_fail_fmm;
38930- atomic_long_t tlb_dropin_fail_no_exception;
38931- atomic_long_t tfh_stale_on_fault;
38932- atomic_long_t mmu_invalidate_range;
38933- atomic_long_t mmu_invalidate_page;
38934- atomic_long_t flush_tlb;
38935- atomic_long_t flush_tlb_gru;
38936- atomic_long_t flush_tlb_gru_tgh;
38937- atomic_long_t flush_tlb_gru_zero_asid;
38938+ atomic_long_unchecked_t vdata_alloc;
38939+ atomic_long_unchecked_t vdata_free;
38940+ atomic_long_unchecked_t gts_alloc;
38941+ atomic_long_unchecked_t gts_free;
38942+ atomic_long_unchecked_t gms_alloc;
38943+ atomic_long_unchecked_t gms_free;
38944+ atomic_long_unchecked_t gts_double_allocate;
38945+ atomic_long_unchecked_t assign_context;
38946+ atomic_long_unchecked_t assign_context_failed;
38947+ atomic_long_unchecked_t free_context;
38948+ atomic_long_unchecked_t load_user_context;
38949+ atomic_long_unchecked_t load_kernel_context;
38950+ atomic_long_unchecked_t lock_kernel_context;
38951+ atomic_long_unchecked_t unlock_kernel_context;
38952+ atomic_long_unchecked_t steal_user_context;
38953+ atomic_long_unchecked_t steal_kernel_context;
38954+ atomic_long_unchecked_t steal_context_failed;
38955+ atomic_long_unchecked_t nopfn;
38956+ atomic_long_unchecked_t asid_new;
38957+ atomic_long_unchecked_t asid_next;
38958+ atomic_long_unchecked_t asid_wrap;
38959+ atomic_long_unchecked_t asid_reuse;
38960+ atomic_long_unchecked_t intr;
38961+ atomic_long_unchecked_t intr_cbr;
38962+ atomic_long_unchecked_t intr_tfh;
38963+ atomic_long_unchecked_t intr_spurious;
38964+ atomic_long_unchecked_t intr_mm_lock_failed;
38965+ atomic_long_unchecked_t call_os;
38966+ atomic_long_unchecked_t call_os_wait_queue;
38967+ atomic_long_unchecked_t user_flush_tlb;
38968+ atomic_long_unchecked_t user_unload_context;
38969+ atomic_long_unchecked_t user_exception;
38970+ atomic_long_unchecked_t set_context_option;
38971+ atomic_long_unchecked_t check_context_retarget_intr;
38972+ atomic_long_unchecked_t check_context_unload;
38973+ atomic_long_unchecked_t tlb_dropin;
38974+ atomic_long_unchecked_t tlb_preload_page;
38975+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
38976+ atomic_long_unchecked_t tlb_dropin_fail_upm;
38977+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
38978+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
38979+ atomic_long_unchecked_t tlb_dropin_fail_idle;
38980+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
38981+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
38982+ atomic_long_unchecked_t tfh_stale_on_fault;
38983+ atomic_long_unchecked_t mmu_invalidate_range;
38984+ atomic_long_unchecked_t mmu_invalidate_page;
38985+ atomic_long_unchecked_t flush_tlb;
38986+ atomic_long_unchecked_t flush_tlb_gru;
38987+ atomic_long_unchecked_t flush_tlb_gru_tgh;
38988+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
38989
38990- atomic_long_t copy_gpa;
38991- atomic_long_t read_gpa;
38992+ atomic_long_unchecked_t copy_gpa;
38993+ atomic_long_unchecked_t read_gpa;
38994
38995- atomic_long_t mesq_receive;
38996- atomic_long_t mesq_receive_none;
38997- atomic_long_t mesq_send;
38998- atomic_long_t mesq_send_failed;
38999- atomic_long_t mesq_noop;
39000- atomic_long_t mesq_send_unexpected_error;
39001- atomic_long_t mesq_send_lb_overflow;
39002- atomic_long_t mesq_send_qlimit_reached;
39003- atomic_long_t mesq_send_amo_nacked;
39004- atomic_long_t mesq_send_put_nacked;
39005- atomic_long_t mesq_page_overflow;
39006- atomic_long_t mesq_qf_locked;
39007- atomic_long_t mesq_qf_noop_not_full;
39008- atomic_long_t mesq_qf_switch_head_failed;
39009- atomic_long_t mesq_qf_unexpected_error;
39010- atomic_long_t mesq_noop_unexpected_error;
39011- atomic_long_t mesq_noop_lb_overflow;
39012- atomic_long_t mesq_noop_qlimit_reached;
39013- atomic_long_t mesq_noop_amo_nacked;
39014- atomic_long_t mesq_noop_put_nacked;
39015- atomic_long_t mesq_noop_page_overflow;
39016+ atomic_long_unchecked_t mesq_receive;
39017+ atomic_long_unchecked_t mesq_receive_none;
39018+ atomic_long_unchecked_t mesq_send;
39019+ atomic_long_unchecked_t mesq_send_failed;
39020+ atomic_long_unchecked_t mesq_noop;
39021+ atomic_long_unchecked_t mesq_send_unexpected_error;
39022+ atomic_long_unchecked_t mesq_send_lb_overflow;
39023+ atomic_long_unchecked_t mesq_send_qlimit_reached;
39024+ atomic_long_unchecked_t mesq_send_amo_nacked;
39025+ atomic_long_unchecked_t mesq_send_put_nacked;
39026+ atomic_long_unchecked_t mesq_page_overflow;
39027+ atomic_long_unchecked_t mesq_qf_locked;
39028+ atomic_long_unchecked_t mesq_qf_noop_not_full;
39029+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
39030+ atomic_long_unchecked_t mesq_qf_unexpected_error;
39031+ atomic_long_unchecked_t mesq_noop_unexpected_error;
39032+ atomic_long_unchecked_t mesq_noop_lb_overflow;
39033+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
39034+ atomic_long_unchecked_t mesq_noop_amo_nacked;
39035+ atomic_long_unchecked_t mesq_noop_put_nacked;
39036+ atomic_long_unchecked_t mesq_noop_page_overflow;
39037
39038 };
39039
39040@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39041 tghop_invalidate, mcsop_last};
39042
39043 struct mcs_op_statistic {
39044- atomic_long_t count;
39045- atomic_long_t total;
39046+ atomic_long_unchecked_t count;
39047+ atomic_long_unchecked_t total;
39048 unsigned long max;
39049 };
39050
39051@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39052
39053 #define STAT(id) do { \
39054 if (gru_options & OPT_STATS) \
39055- atomic_long_inc(&gru_stats.id); \
39056+ atomic_long_inc_unchecked(&gru_stats.id); \
39057 } while (0)
39058
39059 #ifdef CONFIG_SGI_GRU_DEBUG
39060diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39061index c862cd4..0d176fe 100644
39062--- a/drivers/misc/sgi-xp/xp.h
39063+++ b/drivers/misc/sgi-xp/xp.h
39064@@ -288,7 +288,7 @@ struct xpc_interface {
39065 xpc_notify_func, void *);
39066 void (*received) (short, int, void *);
39067 enum xp_retval (*partid_to_nasids) (short, void *);
39068-};
39069+} __no_const;
39070
39071 extern struct xpc_interface xpc_interface;
39072
39073diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39074index b94d5f7..7f494c5 100644
39075--- a/drivers/misc/sgi-xp/xpc.h
39076+++ b/drivers/misc/sgi-xp/xpc.h
39077@@ -835,6 +835,7 @@ struct xpc_arch_operations {
39078 void (*received_payload) (struct xpc_channel *, void *);
39079 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39080 };
39081+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39082
39083 /* struct xpc_partition act_state values (for XPC HB) */
39084
39085@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39086 /* found in xpc_main.c */
39087 extern struct device *xpc_part;
39088 extern struct device *xpc_chan;
39089-extern struct xpc_arch_operations xpc_arch_ops;
39090+extern xpc_arch_operations_no_const xpc_arch_ops;
39091 extern int xpc_disengage_timelimit;
39092 extern int xpc_disengage_timedout;
39093 extern int xpc_activate_IRQ_rcvd;
39094diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39095index d971817..33bdca5 100644
39096--- a/drivers/misc/sgi-xp/xpc_main.c
39097+++ b/drivers/misc/sgi-xp/xpc_main.c
39098@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
39099 .notifier_call = xpc_system_die,
39100 };
39101
39102-struct xpc_arch_operations xpc_arch_ops;
39103+xpc_arch_operations_no_const xpc_arch_ops;
39104
39105 /*
39106 * Timer function to enforce the timelimit on the partition disengage.
39107@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
39108
39109 if (((die_args->trapnr == X86_TRAP_MF) ||
39110 (die_args->trapnr == X86_TRAP_XF)) &&
39111- !user_mode_vm(die_args->regs))
39112+ !user_mode(die_args->regs))
39113 xpc_die_deactivate();
39114
39115 break;
39116diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
39117index 6d8f701..35b6369 100644
39118--- a/drivers/mmc/core/mmc_ops.c
39119+++ b/drivers/mmc/core/mmc_ops.c
39120@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
39121 void *data_buf;
39122 int is_on_stack;
39123
39124- is_on_stack = object_is_on_stack(buf);
39125+ is_on_stack = object_starts_on_stack(buf);
39126 if (is_on_stack) {
39127 /*
39128 * dma onto stack is unsafe/nonportable, but callers to this
39129diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
39130index 53b8fd9..615b462 100644
39131--- a/drivers/mmc/host/dw_mmc.h
39132+++ b/drivers/mmc/host/dw_mmc.h
39133@@ -205,5 +205,5 @@ struct dw_mci_drv_data {
39134 int (*parse_dt)(struct dw_mci *host);
39135 int (*setup_bus)(struct dw_mci *host,
39136 struct device_node *slot_np, u8 bus_width);
39137-};
39138+} __do_const;
39139 #endif /* _DW_MMC_H_ */
39140diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
39141index 82a8de1..3c56ccb 100644
39142--- a/drivers/mmc/host/sdhci-s3c.c
39143+++ b/drivers/mmc/host/sdhci-s3c.c
39144@@ -721,9 +721,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
39145 * we can use overriding functions instead of default.
39146 */
39147 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
39148- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39149- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39150- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39151+ pax_open_kernel();
39152+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39153+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39154+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39155+ pax_close_kernel();
39156 }
39157
39158 /* It supports additional host capabilities if needed */
39159diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
39160index a4eb8b5..8c0628f 100644
39161--- a/drivers/mtd/devices/doc2000.c
39162+++ b/drivers/mtd/devices/doc2000.c
39163@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39164
39165 /* The ECC will not be calculated correctly if less than 512 is written */
39166 /* DBB-
39167- if (len != 0x200 && eccbuf)
39168+ if (len != 0x200)
39169 printk(KERN_WARNING
39170 "ECC needs a full sector write (adr: %lx size %lx)\n",
39171 (long) to, (long) len);
39172diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
39173index 0c8bb6b..6f35deb 100644
39174--- a/drivers/mtd/nand/denali.c
39175+++ b/drivers/mtd/nand/denali.c
39176@@ -24,6 +24,7 @@
39177 #include <linux/slab.h>
39178 #include <linux/mtd/mtd.h>
39179 #include <linux/module.h>
39180+#include <linux/slab.h>
39181
39182 #include "denali.h"
39183
39184diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39185index 51b9d6a..52af9a7 100644
39186--- a/drivers/mtd/nftlmount.c
39187+++ b/drivers/mtd/nftlmount.c
39188@@ -24,6 +24,7 @@
39189 #include <asm/errno.h>
39190 #include <linux/delay.h>
39191 #include <linux/slab.h>
39192+#include <linux/sched.h>
39193 #include <linux/mtd/mtd.h>
39194 #include <linux/mtd/nand.h>
39195 #include <linux/mtd/nftl.h>
39196diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
39197index 8dd6ba5..419cc1d 100644
39198--- a/drivers/mtd/sm_ftl.c
39199+++ b/drivers/mtd/sm_ftl.c
39200@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
39201 #define SM_CIS_VENDOR_OFFSET 0x59
39202 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
39203 {
39204- struct attribute_group *attr_group;
39205+ attribute_group_no_const *attr_group;
39206 struct attribute **attributes;
39207 struct sm_sysfs_attribute *vendor_attribute;
39208
39209diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
39210index a079da17..f86ffd5 100644
39211--- a/drivers/net/bonding/bond_main.c
39212+++ b/drivers/net/bonding/bond_main.c
39213@@ -4862,7 +4862,7 @@ static unsigned int bond_get_num_tx_queues(void)
39214 return tx_queues;
39215 }
39216
39217-static struct rtnl_link_ops bond_link_ops __read_mostly = {
39218+static struct rtnl_link_ops bond_link_ops = {
39219 .kind = "bond",
39220 .priv_size = sizeof(struct bonding),
39221 .setup = bond_setup,
39222diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
39223index 70dba5d..11a0919 100644
39224--- a/drivers/net/ethernet/8390/ax88796.c
39225+++ b/drivers/net/ethernet/8390/ax88796.c
39226@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
39227 if (ax->plat->reg_offsets)
39228 ei_local->reg_offset = ax->plat->reg_offsets;
39229 else {
39230+ resource_size_t _mem_size = mem_size;
39231+ do_div(_mem_size, 0x18);
39232 ei_local->reg_offset = ax->reg_offsets;
39233 for (ret = 0; ret < 0x18; ret++)
39234- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
39235+ ax->reg_offsets[ret] = _mem_size * ret;
39236 }
39237
39238 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
39239diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39240index 0991534..8098e92 100644
39241--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39242+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39243@@ -1094,7 +1094,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
39244 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
39245 {
39246 /* RX_MODE controlling object */
39247- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
39248+ bnx2x_init_rx_mode_obj(bp);
39249
39250 /* multicast configuration controlling object */
39251 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
39252diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39253index 09b625e..15b16fe 100644
39254--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39255+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39256@@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
39257 return rc;
39258 }
39259
39260-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39261- struct bnx2x_rx_mode_obj *o)
39262+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
39263 {
39264 if (CHIP_IS_E1x(bp)) {
39265- o->wait_comp = bnx2x_empty_rx_mode_wait;
39266- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
39267+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
39268+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
39269 } else {
39270- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
39271- o->config_rx_mode = bnx2x_set_rx_mode_e2;
39272+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
39273+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
39274 }
39275 }
39276
39277diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39278index adbd91b..58ec94a 100644
39279--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39280+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39281@@ -1293,8 +1293,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
39282
39283 /********************* RX MODE ****************/
39284
39285-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39286- struct bnx2x_rx_mode_obj *o);
39287+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
39288
39289 /**
39290 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
39291diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
39292index f0b38fa..3a312b5 100644
39293--- a/drivers/net/ethernet/broadcom/tg3.c
39294+++ b/drivers/net/ethernet/broadcom/tg3.c
39295@@ -14395,8 +14395,10 @@ static void tg3_read_vpd(struct tg3 *tp)
39296 if (j + len > block_end)
39297 goto partno;
39298
39299- memcpy(tp->fw_ver, &vpd_data[j], len);
39300- strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
39301+ if (len >= sizeof(tp->fw_ver))
39302+ len = sizeof(tp->fw_ver) - 1;
39303+ memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
39304+ snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, &vpd_data[j]);
39305 }
39306
39307 partno:
39308diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
39309index d330e81..ce1fb9a 100644
39310--- a/drivers/net/ethernet/broadcom/tg3.h
39311+++ b/drivers/net/ethernet/broadcom/tg3.h
39312@@ -146,6 +146,7 @@
39313 #define CHIPREV_ID_5750_A0 0x4000
39314 #define CHIPREV_ID_5750_A1 0x4001
39315 #define CHIPREV_ID_5750_A3 0x4003
39316+#define CHIPREV_ID_5750_C1 0x4201
39317 #define CHIPREV_ID_5750_C2 0x4202
39318 #define CHIPREV_ID_5752_A0_HW 0x5000
39319 #define CHIPREV_ID_5752_A0 0x6000
39320diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39321index 8cffcdf..aadf043 100644
39322--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39323+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39324@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
39325 */
39326 struct l2t_skb_cb {
39327 arp_failure_handler_func arp_failure_handler;
39328-};
39329+} __no_const;
39330
39331 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
39332
39333diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
39334index 4c83003..2a2a5b9 100644
39335--- a/drivers/net/ethernet/dec/tulip/de4x5.c
39336+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
39337@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39338 for (i=0; i<ETH_ALEN; i++) {
39339 tmp.addr[i] = dev->dev_addr[i];
39340 }
39341- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39342+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39343 break;
39344
39345 case DE4X5_SET_HWADDR: /* Set the hardware address */
39346@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39347 spin_lock_irqsave(&lp->lock, flags);
39348 memcpy(&statbuf, &lp->pktStats, ioc->len);
39349 spin_unlock_irqrestore(&lp->lock, flags);
39350- if (copy_to_user(ioc->data, &statbuf, ioc->len))
39351+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39352 return -EFAULT;
39353 break;
39354 }
39355diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
39356index 4d6f3c5..6169e60 100644
39357--- a/drivers/net/ethernet/emulex/benet/be_main.c
39358+++ b/drivers/net/ethernet/emulex/benet/be_main.c
39359@@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
39360
39361 if (wrapped)
39362 newacc += 65536;
39363- ACCESS_ONCE(*acc) = newacc;
39364+ ACCESS_ONCE_RW(*acc) = newacc;
39365 }
39366
39367 void be_parse_stats(struct be_adapter *adapter)
39368diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
39369index 74d749e..eefb1bd 100644
39370--- a/drivers/net/ethernet/faraday/ftgmac100.c
39371+++ b/drivers/net/ethernet/faraday/ftgmac100.c
39372@@ -31,6 +31,8 @@
39373 #include <linux/netdevice.h>
39374 #include <linux/phy.h>
39375 #include <linux/platform_device.h>
39376+#include <linux/interrupt.h>
39377+#include <linux/irqreturn.h>
39378 #include <net/ip.h>
39379
39380 #include "ftgmac100.h"
39381diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
39382index b901a01..1ff32ee 100644
39383--- a/drivers/net/ethernet/faraday/ftmac100.c
39384+++ b/drivers/net/ethernet/faraday/ftmac100.c
39385@@ -31,6 +31,8 @@
39386 #include <linux/module.h>
39387 #include <linux/netdevice.h>
39388 #include <linux/platform_device.h>
39389+#include <linux/interrupt.h>
39390+#include <linux/irqreturn.h>
39391
39392 #include "ftmac100.h"
39393
39394diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39395index bb9256a..56d8752 100644
39396--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39397+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39398@@ -806,7 +806,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
39399 }
39400
39401 /* update the base incval used to calculate frequency adjustment */
39402- ACCESS_ONCE(adapter->base_incval) = incval;
39403+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
39404 smp_mb();
39405
39406 /* need lock to prevent incorrect read while modifying cyclecounter */
39407diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39408index fbe5363..266b4e3 100644
39409--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
39410+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39411@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39412 struct __vxge_hw_fifo *fifo;
39413 struct vxge_hw_fifo_config *config;
39414 u32 txdl_size, txdl_per_memblock;
39415- struct vxge_hw_mempool_cbs fifo_mp_callback;
39416+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
39417+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
39418+ };
39419+
39420 struct __vxge_hw_virtualpath *vpath;
39421
39422 if ((vp == NULL) || (attr == NULL)) {
39423@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39424 goto exit;
39425 }
39426
39427- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
39428-
39429 fifo->mempool =
39430 __vxge_hw_mempool_create(vpath->hldev,
39431 fifo->config->memblock_size,
39432diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
39433index 998974f..ecd26db 100644
39434--- a/drivers/net/ethernet/realtek/r8169.c
39435+++ b/drivers/net/ethernet/realtek/r8169.c
39436@@ -741,22 +741,22 @@ struct rtl8169_private {
39437 struct mdio_ops {
39438 void (*write)(struct rtl8169_private *, int, int);
39439 int (*read)(struct rtl8169_private *, int);
39440- } mdio_ops;
39441+ } __no_const mdio_ops;
39442
39443 struct pll_power_ops {
39444 void (*down)(struct rtl8169_private *);
39445 void (*up)(struct rtl8169_private *);
39446- } pll_power_ops;
39447+ } __no_const pll_power_ops;
39448
39449 struct jumbo_ops {
39450 void (*enable)(struct rtl8169_private *);
39451 void (*disable)(struct rtl8169_private *);
39452- } jumbo_ops;
39453+ } __no_const jumbo_ops;
39454
39455 struct csi_ops {
39456 void (*write)(struct rtl8169_private *, int, int);
39457 u32 (*read)(struct rtl8169_private *, int);
39458- } csi_ops;
39459+ } __no_const csi_ops;
39460
39461 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
39462 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
39463diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
39464index 0767043f..08c2553 100644
39465--- a/drivers/net/ethernet/sfc/ptp.c
39466+++ b/drivers/net/ethernet/sfc/ptp.c
39467@@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
39468 (u32)((u64)ptp->start.dma_addr >> 32));
39469
39470 /* Clear flag that signals MC ready */
39471- ACCESS_ONCE(*start) = 0;
39472+ ACCESS_ONCE_RW(*start) = 0;
39473 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
39474 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
39475
39476diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39477index 0c74a70..3bc6f68 100644
39478--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39479+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39480@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
39481
39482 writel(value, ioaddr + MMC_CNTRL);
39483
39484- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39485- MMC_CNTRL, value);
39486+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39487+// MMC_CNTRL, value);
39488 }
39489
39490 /* To mask all all interrupts.*/
39491diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
39492index e6fe0d8..2b7d752 100644
39493--- a/drivers/net/hyperv/hyperv_net.h
39494+++ b/drivers/net/hyperv/hyperv_net.h
39495@@ -101,7 +101,7 @@ struct rndis_device {
39496
39497 enum rndis_device_state state;
39498 bool link_state;
39499- atomic_t new_req_id;
39500+ atomic_unchecked_t new_req_id;
39501
39502 spinlock_t request_lock;
39503 struct list_head req_list;
39504diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
39505index 2b657d4..9903bc0 100644
39506--- a/drivers/net/hyperv/rndis_filter.c
39507+++ b/drivers/net/hyperv/rndis_filter.c
39508@@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
39509 * template
39510 */
39511 set = &rndis_msg->msg.set_req;
39512- set->req_id = atomic_inc_return(&dev->new_req_id);
39513+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39514
39515 /* Add to the request list */
39516 spin_lock_irqsave(&dev->request_lock, flags);
39517@@ -758,7 +758,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
39518
39519 /* Setup the rndis set */
39520 halt = &request->request_msg.msg.halt_req;
39521- halt->req_id = atomic_inc_return(&dev->new_req_id);
39522+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39523
39524 /* Ignore return since this msg is optional. */
39525 rndis_filter_send_request(dev, request);
39526diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
39527index 1e9cb0b..7839125 100644
39528--- a/drivers/net/ieee802154/fakehard.c
39529+++ b/drivers/net/ieee802154/fakehard.c
39530@@ -386,7 +386,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
39531 phy->transmit_power = 0xbf;
39532
39533 dev->netdev_ops = &fake_ops;
39534- dev->ml_priv = &fake_mlme;
39535+ dev->ml_priv = (void *)&fake_mlme;
39536
39537 priv = netdev_priv(dev);
39538 priv->phy = phy;
39539diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
39540index e5cb723..1fc0461 100644
39541--- a/drivers/net/macvlan.c
39542+++ b/drivers/net/macvlan.c
39543@@ -852,13 +852,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
39544 int macvlan_link_register(struct rtnl_link_ops *ops)
39545 {
39546 /* common fields */
39547- ops->priv_size = sizeof(struct macvlan_dev);
39548- ops->validate = macvlan_validate;
39549- ops->maxtype = IFLA_MACVLAN_MAX;
39550- ops->policy = macvlan_policy;
39551- ops->changelink = macvlan_changelink;
39552- ops->get_size = macvlan_get_size;
39553- ops->fill_info = macvlan_fill_info;
39554+ pax_open_kernel();
39555+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
39556+ *(void **)&ops->validate = macvlan_validate;
39557+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
39558+ *(const void **)&ops->policy = macvlan_policy;
39559+ *(void **)&ops->changelink = macvlan_changelink;
39560+ *(void **)&ops->get_size = macvlan_get_size;
39561+ *(void **)&ops->fill_info = macvlan_fill_info;
39562+ pax_close_kernel();
39563
39564 return rtnl_link_register(ops);
39565 };
39566@@ -914,7 +916,7 @@ static int macvlan_device_event(struct notifier_block *unused,
39567 return NOTIFY_DONE;
39568 }
39569
39570-static struct notifier_block macvlan_notifier_block __read_mostly = {
39571+static struct notifier_block macvlan_notifier_block = {
39572 .notifier_call = macvlan_device_event,
39573 };
39574
39575diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
39576index 0f0f9ce..0ca5819 100644
39577--- a/drivers/net/macvtap.c
39578+++ b/drivers/net/macvtap.c
39579@@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
39580 return NOTIFY_DONE;
39581 }
39582
39583-static struct notifier_block macvtap_notifier_block __read_mostly = {
39584+static struct notifier_block macvtap_notifier_block = {
39585 .notifier_call = macvtap_device_event,
39586 };
39587
39588diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
39589index daec9b0..6428fcb 100644
39590--- a/drivers/net/phy/mdio-bitbang.c
39591+++ b/drivers/net/phy/mdio-bitbang.c
39592@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
39593 struct mdiobb_ctrl *ctrl = bus->priv;
39594
39595 module_put(ctrl->ops->owner);
39596+ mdiobus_unregister(bus);
39597 mdiobus_free(bus);
39598 }
39599 EXPORT_SYMBOL(free_mdio_bitbang);
39600diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
39601index 508570e..f706dc7 100644
39602--- a/drivers/net/ppp/ppp_generic.c
39603+++ b/drivers/net/ppp/ppp_generic.c
39604@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39605 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
39606 struct ppp_stats stats;
39607 struct ppp_comp_stats cstats;
39608- char *vers;
39609
39610 switch (cmd) {
39611 case SIOCGPPPSTATS:
39612@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39613 break;
39614
39615 case SIOCGPPPVER:
39616- vers = PPP_VERSION;
39617- if (copy_to_user(addr, vers, strlen(vers) + 1))
39618+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
39619 break;
39620 err = 0;
39621 break;
39622diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
39623index 8efe47a..a8075c5 100644
39624--- a/drivers/net/team/team.c
39625+++ b/drivers/net/team/team.c
39626@@ -2603,7 +2603,7 @@ static int team_device_event(struct notifier_block *unused,
39627 return NOTIFY_DONE;
39628 }
39629
39630-static struct notifier_block team_notifier_block __read_mostly = {
39631+static struct notifier_block team_notifier_block = {
39632 .notifier_call = team_device_event,
39633 };
39634
39635diff --git a/drivers/net/tun.c b/drivers/net/tun.c
39636index cb95fe5..a5bdab5 100644
39637--- a/drivers/net/tun.c
39638+++ b/drivers/net/tun.c
39639@@ -1838,7 +1838,7 @@ unlock:
39640 }
39641
39642 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
39643- unsigned long arg, int ifreq_len)
39644+ unsigned long arg, size_t ifreq_len)
39645 {
39646 struct tun_file *tfile = file->private_data;
39647 struct tun_struct *tun;
39648@@ -1850,6 +1850,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
39649 int vnet_hdr_sz;
39650 int ret;
39651
39652+ if (ifreq_len > sizeof ifr)
39653+ return -EFAULT;
39654+
39655 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
39656 if (copy_from_user(&ifr, argp, ifreq_len))
39657 return -EFAULT;
39658diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39659index cd8ccb2..cff5144 100644
39660--- a/drivers/net/usb/hso.c
39661+++ b/drivers/net/usb/hso.c
39662@@ -71,7 +71,7 @@
39663 #include <asm/byteorder.h>
39664 #include <linux/serial_core.h>
39665 #include <linux/serial.h>
39666-
39667+#include <asm/local.h>
39668
39669 #define MOD_AUTHOR "Option Wireless"
39670 #define MOD_DESCRIPTION "USB High Speed Option driver"
39671@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39672 struct urb *urb;
39673
39674 urb = serial->rx_urb[0];
39675- if (serial->port.count > 0) {
39676+ if (atomic_read(&serial->port.count) > 0) {
39677 count = put_rxbuf_data(urb, serial);
39678 if (count == -1)
39679 return;
39680@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39681 DUMP1(urb->transfer_buffer, urb->actual_length);
39682
39683 /* Anyone listening? */
39684- if (serial->port.count == 0)
39685+ if (atomic_read(&serial->port.count) == 0)
39686 return;
39687
39688 if (status == 0) {
39689@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39690 tty_port_tty_set(&serial->port, tty);
39691
39692 /* check for port already opened, if not set the termios */
39693- serial->port.count++;
39694- if (serial->port.count == 1) {
39695+ if (atomic_inc_return(&serial->port.count) == 1) {
39696 serial->rx_state = RX_IDLE;
39697 /* Force default termio settings */
39698 _hso_serial_set_termios(tty, NULL);
39699@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39700 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39701 if (result) {
39702 hso_stop_serial_device(serial->parent);
39703- serial->port.count--;
39704+ atomic_dec(&serial->port.count);
39705 kref_put(&serial->parent->ref, hso_serial_ref_free);
39706 }
39707 } else {
39708@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39709
39710 /* reset the rts and dtr */
39711 /* do the actual close */
39712- serial->port.count--;
39713+ atomic_dec(&serial->port.count);
39714
39715- if (serial->port.count <= 0) {
39716- serial->port.count = 0;
39717+ if (atomic_read(&serial->port.count) <= 0) {
39718+ atomic_set(&serial->port.count, 0);
39719 tty_port_tty_set(&serial->port, NULL);
39720 if (!usb_gone)
39721 hso_stop_serial_device(serial->parent);
39722@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39723
39724 /* the actual setup */
39725 spin_lock_irqsave(&serial->serial_lock, flags);
39726- if (serial->port.count)
39727+ if (atomic_read(&serial->port.count))
39728 _hso_serial_set_termios(tty, old);
39729 else
39730 tty->termios = *old;
39731@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
39732 D1("Pending read interrupt on port %d\n", i);
39733 spin_lock(&serial->serial_lock);
39734 if (serial->rx_state == RX_IDLE &&
39735- serial->port.count > 0) {
39736+ atomic_read(&serial->port.count) > 0) {
39737 /* Setup and send a ctrl req read on
39738 * port i */
39739 if (!serial->rx_urb_filled[0]) {
39740@@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
39741 /* Start all serial ports */
39742 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39743 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39744- if (dev2ser(serial_table[i])->port.count) {
39745+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
39746 result =
39747 hso_start_serial_device(serial_table[i], GFP_NOIO);
39748 hso_kick_transmit(dev2ser(serial_table[i]));
39749diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
39750index 6993bfa..9053a34 100644
39751--- a/drivers/net/vxlan.c
39752+++ b/drivers/net/vxlan.c
39753@@ -1428,7 +1428,7 @@ nla_put_failure:
39754 return -EMSGSIZE;
39755 }
39756
39757-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
39758+static struct rtnl_link_ops vxlan_link_ops = {
39759 .kind = "vxlan",
39760 .maxtype = IFLA_VXLAN_MAX,
39761 .policy = vxlan_policy,
39762diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
39763index 77fa428..996b355 100644
39764--- a/drivers/net/wireless/at76c50x-usb.c
39765+++ b/drivers/net/wireless/at76c50x-usb.c
39766@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
39767 }
39768
39769 /* Convert timeout from the DFU status to jiffies */
39770-static inline unsigned long at76_get_timeout(struct dfu_status *s)
39771+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
39772 {
39773 return msecs_to_jiffies((s->poll_timeout[2] << 16)
39774 | (s->poll_timeout[1] << 8)
39775diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39776index 8d78253..bebbb68 100644
39777--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39778+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39779@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39780 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
39781 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
39782
39783- ACCESS_ONCE(ads->ds_link) = i->link;
39784- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
39785+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
39786+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
39787
39788 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
39789 ctl6 = SM(i->keytype, AR_EncrType);
39790@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39791
39792 if ((i->is_first || i->is_last) &&
39793 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
39794- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
39795+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
39796 | set11nTries(i->rates, 1)
39797 | set11nTries(i->rates, 2)
39798 | set11nTries(i->rates, 3)
39799 | (i->dur_update ? AR_DurUpdateEna : 0)
39800 | SM(0, AR_BurstDur);
39801
39802- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
39803+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
39804 | set11nRate(i->rates, 1)
39805 | set11nRate(i->rates, 2)
39806 | set11nRate(i->rates, 3);
39807 } else {
39808- ACCESS_ONCE(ads->ds_ctl2) = 0;
39809- ACCESS_ONCE(ads->ds_ctl3) = 0;
39810+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
39811+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
39812 }
39813
39814 if (!i->is_first) {
39815- ACCESS_ONCE(ads->ds_ctl0) = 0;
39816- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
39817- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
39818+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
39819+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
39820+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
39821 return;
39822 }
39823
39824@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39825 break;
39826 }
39827
39828- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
39829+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
39830 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
39831 | SM(i->txpower, AR_XmitPower)
39832 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
39833@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39834 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
39835 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
39836
39837- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
39838- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
39839+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
39840+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
39841
39842 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
39843 return;
39844
39845- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
39846+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
39847 | set11nPktDurRTSCTS(i->rates, 1);
39848
39849- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
39850+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
39851 | set11nPktDurRTSCTS(i->rates, 3);
39852
39853- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
39854+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
39855 | set11nRateFlags(i->rates, 1)
39856 | set11nRateFlags(i->rates, 2)
39857 | set11nRateFlags(i->rates, 3)
39858diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39859index 301bf72..3f5654f 100644
39860--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39861+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39862@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39863 (i->qcu << AR_TxQcuNum_S) | desc_len;
39864
39865 checksum += val;
39866- ACCESS_ONCE(ads->info) = val;
39867+ ACCESS_ONCE_RW(ads->info) = val;
39868
39869 checksum += i->link;
39870- ACCESS_ONCE(ads->link) = i->link;
39871+ ACCESS_ONCE_RW(ads->link) = i->link;
39872
39873 checksum += i->buf_addr[0];
39874- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
39875+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
39876 checksum += i->buf_addr[1];
39877- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
39878+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
39879 checksum += i->buf_addr[2];
39880- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
39881+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
39882 checksum += i->buf_addr[3];
39883- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
39884+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
39885
39886 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
39887- ACCESS_ONCE(ads->ctl3) = val;
39888+ ACCESS_ONCE_RW(ads->ctl3) = val;
39889 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
39890- ACCESS_ONCE(ads->ctl5) = val;
39891+ ACCESS_ONCE_RW(ads->ctl5) = val;
39892 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
39893- ACCESS_ONCE(ads->ctl7) = val;
39894+ ACCESS_ONCE_RW(ads->ctl7) = val;
39895 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
39896- ACCESS_ONCE(ads->ctl9) = val;
39897+ ACCESS_ONCE_RW(ads->ctl9) = val;
39898
39899 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
39900- ACCESS_ONCE(ads->ctl10) = checksum;
39901+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
39902
39903 if (i->is_first || i->is_last) {
39904- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
39905+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
39906 | set11nTries(i->rates, 1)
39907 | set11nTries(i->rates, 2)
39908 | set11nTries(i->rates, 3)
39909 | (i->dur_update ? AR_DurUpdateEna : 0)
39910 | SM(0, AR_BurstDur);
39911
39912- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
39913+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
39914 | set11nRate(i->rates, 1)
39915 | set11nRate(i->rates, 2)
39916 | set11nRate(i->rates, 3);
39917 } else {
39918- ACCESS_ONCE(ads->ctl13) = 0;
39919- ACCESS_ONCE(ads->ctl14) = 0;
39920+ ACCESS_ONCE_RW(ads->ctl13) = 0;
39921+ ACCESS_ONCE_RW(ads->ctl14) = 0;
39922 }
39923
39924 ads->ctl20 = 0;
39925@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39926
39927 ctl17 = SM(i->keytype, AR_EncrType);
39928 if (!i->is_first) {
39929- ACCESS_ONCE(ads->ctl11) = 0;
39930- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
39931- ACCESS_ONCE(ads->ctl15) = 0;
39932- ACCESS_ONCE(ads->ctl16) = 0;
39933- ACCESS_ONCE(ads->ctl17) = ctl17;
39934- ACCESS_ONCE(ads->ctl18) = 0;
39935- ACCESS_ONCE(ads->ctl19) = 0;
39936+ ACCESS_ONCE_RW(ads->ctl11) = 0;
39937+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
39938+ ACCESS_ONCE_RW(ads->ctl15) = 0;
39939+ ACCESS_ONCE_RW(ads->ctl16) = 0;
39940+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
39941+ ACCESS_ONCE_RW(ads->ctl18) = 0;
39942+ ACCESS_ONCE_RW(ads->ctl19) = 0;
39943 return;
39944 }
39945
39946- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
39947+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
39948 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
39949 | SM(i->txpower, AR_XmitPower)
39950 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
39951@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39952 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
39953 ctl12 |= SM(val, AR_PAPRDChainMask);
39954
39955- ACCESS_ONCE(ads->ctl12) = ctl12;
39956- ACCESS_ONCE(ads->ctl17) = ctl17;
39957+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
39958+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
39959
39960- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
39961+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
39962 | set11nPktDurRTSCTS(i->rates, 1);
39963
39964- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
39965+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
39966 | set11nPktDurRTSCTS(i->rates, 3);
39967
39968- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
39969+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
39970 | set11nRateFlags(i->rates, 1)
39971 | set11nRateFlags(i->rates, 2)
39972 | set11nRateFlags(i->rates, 3)
39973 | SM(i->rtscts_rate, AR_RTSCTSRate);
39974
39975- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
39976+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
39977 }
39978
39979 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
39980diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
39981index 9d26fc5..60d9f14 100644
39982--- a/drivers/net/wireless/ath/ath9k/hw.h
39983+++ b/drivers/net/wireless/ath/ath9k/hw.h
39984@@ -658,7 +658,7 @@ struct ath_hw_private_ops {
39985
39986 /* ANI */
39987 void (*ani_cache_ini_regs)(struct ath_hw *ah);
39988-};
39989+} __no_const;
39990
39991 /**
39992 * struct ath_hw_ops - callbacks used by hardware code and driver code
39993@@ -688,7 +688,7 @@ struct ath_hw_ops {
39994 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
39995 struct ath_hw_antcomb_conf *antconf);
39996 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
39997-};
39998+} __no_const;
39999
40000 struct ath_nf_limits {
40001 s16 max;
40002diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
40003index 3726cd6..b655808 100644
40004--- a/drivers/net/wireless/iwlegacy/3945-mac.c
40005+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
40006@@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40007 */
40008 if (il3945_mod_params.disable_hw_scan) {
40009 D_INFO("Disabling hw_scan\n");
40010- il3945_mac_ops.hw_scan = NULL;
40011+ pax_open_kernel();
40012+ *(void **)&il3945_mac_ops.hw_scan = NULL;
40013+ pax_close_kernel();
40014 }
40015
40016 D_INFO("*** LOAD DRIVER ***\n");
40017diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40018index 5b9533e..7733880 100644
40019--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40020+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40021@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
40022 {
40023 struct iwl_priv *priv = file->private_data;
40024 char buf[64];
40025- int buf_size;
40026+ size_t buf_size;
40027 u32 offset, len;
40028
40029 memset(buf, 0, sizeof(buf));
40030@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
40031 struct iwl_priv *priv = file->private_data;
40032
40033 char buf[8];
40034- int buf_size;
40035+ size_t buf_size;
40036 u32 reset_flag;
40037
40038 memset(buf, 0, sizeof(buf));
40039@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
40040 {
40041 struct iwl_priv *priv = file->private_data;
40042 char buf[8];
40043- int buf_size;
40044+ size_t buf_size;
40045 int ht40;
40046
40047 memset(buf, 0, sizeof(buf));
40048@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
40049 {
40050 struct iwl_priv *priv = file->private_data;
40051 char buf[8];
40052- int buf_size;
40053+ size_t buf_size;
40054 int value;
40055
40056 memset(buf, 0, sizeof(buf));
40057@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
40058 {
40059 struct iwl_priv *priv = file->private_data;
40060 char buf[8];
40061- int buf_size;
40062+ size_t buf_size;
40063 int clear;
40064
40065 memset(buf, 0, sizeof(buf));
40066@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
40067 {
40068 struct iwl_priv *priv = file->private_data;
40069 char buf[8];
40070- int buf_size;
40071+ size_t buf_size;
40072 int trace;
40073
40074 memset(buf, 0, sizeof(buf));
40075@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
40076 {
40077 struct iwl_priv *priv = file->private_data;
40078 char buf[8];
40079- int buf_size;
40080+ size_t buf_size;
40081 int missed;
40082
40083 memset(buf, 0, sizeof(buf));
40084@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
40085
40086 struct iwl_priv *priv = file->private_data;
40087 char buf[8];
40088- int buf_size;
40089+ size_t buf_size;
40090 int plcp;
40091
40092 memset(buf, 0, sizeof(buf));
40093@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
40094
40095 struct iwl_priv *priv = file->private_data;
40096 char buf[8];
40097- int buf_size;
40098+ size_t buf_size;
40099 int flush;
40100
40101 memset(buf, 0, sizeof(buf));
40102@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
40103
40104 struct iwl_priv *priv = file->private_data;
40105 char buf[8];
40106- int buf_size;
40107+ size_t buf_size;
40108 int rts;
40109
40110 if (!priv->cfg->ht_params)
40111@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
40112 {
40113 struct iwl_priv *priv = file->private_data;
40114 char buf[8];
40115- int buf_size;
40116+ size_t buf_size;
40117
40118 memset(buf, 0, sizeof(buf));
40119 buf_size = min(count, sizeof(buf) - 1);
40120@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
40121 struct iwl_priv *priv = file->private_data;
40122 u32 event_log_flag;
40123 char buf[8];
40124- int buf_size;
40125+ size_t buf_size;
40126
40127 /* check that the interface is up */
40128 if (!iwl_is_ready(priv))
40129@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
40130 struct iwl_priv *priv = file->private_data;
40131 char buf[8];
40132 u32 calib_disabled;
40133- int buf_size;
40134+ size_t buf_size;
40135
40136 memset(buf, 0, sizeof(buf));
40137 buf_size = min(count, sizeof(buf) - 1);
40138diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
40139index 35708b9..31f7754 100644
40140--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
40141+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
40142@@ -1100,7 +1100,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
40143 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
40144
40145 char buf[8];
40146- int buf_size;
40147+ size_t buf_size;
40148 u32 reset_flag;
40149
40150 memset(buf, 0, sizeof(buf));
40151@@ -1121,7 +1121,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
40152 {
40153 struct iwl_trans *trans = file->private_data;
40154 char buf[8];
40155- int buf_size;
40156+ size_t buf_size;
40157 int csr;
40158
40159 memset(buf, 0, sizeof(buf));
40160diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
40161index ff90855..e46d223 100644
40162--- a/drivers/net/wireless/mac80211_hwsim.c
40163+++ b/drivers/net/wireless/mac80211_hwsim.c
40164@@ -2062,25 +2062,19 @@ static int __init init_mac80211_hwsim(void)
40165
40166 if (channels > 1) {
40167 hwsim_if_comb.num_different_channels = channels;
40168- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40169- mac80211_hwsim_ops.cancel_hw_scan =
40170- mac80211_hwsim_cancel_hw_scan;
40171- mac80211_hwsim_ops.sw_scan_start = NULL;
40172- mac80211_hwsim_ops.sw_scan_complete = NULL;
40173- mac80211_hwsim_ops.remain_on_channel =
40174- mac80211_hwsim_roc;
40175- mac80211_hwsim_ops.cancel_remain_on_channel =
40176- mac80211_hwsim_croc;
40177- mac80211_hwsim_ops.add_chanctx =
40178- mac80211_hwsim_add_chanctx;
40179- mac80211_hwsim_ops.remove_chanctx =
40180- mac80211_hwsim_remove_chanctx;
40181- mac80211_hwsim_ops.change_chanctx =
40182- mac80211_hwsim_change_chanctx;
40183- mac80211_hwsim_ops.assign_vif_chanctx =
40184- mac80211_hwsim_assign_vif_chanctx;
40185- mac80211_hwsim_ops.unassign_vif_chanctx =
40186- mac80211_hwsim_unassign_vif_chanctx;
40187+ pax_open_kernel();
40188+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40189+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
40190+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
40191+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
40192+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
40193+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
40194+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
40195+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
40196+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
40197+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
40198+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
40199+ pax_close_kernel();
40200 }
40201
40202 spin_lock_init(&hwsim_radio_lock);
40203diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
40204index abe1d03..fb02c22 100644
40205--- a/drivers/net/wireless/rndis_wlan.c
40206+++ b/drivers/net/wireless/rndis_wlan.c
40207@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
40208
40209 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
40210
40211- if (rts_threshold < 0 || rts_threshold > 2347)
40212+ if (rts_threshold > 2347)
40213 rts_threshold = 2347;
40214
40215 tmp = cpu_to_le32(rts_threshold);
40216diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
40217index 0751b35..246ba3e 100644
40218--- a/drivers/net/wireless/rt2x00/rt2x00.h
40219+++ b/drivers/net/wireless/rt2x00/rt2x00.h
40220@@ -398,7 +398,7 @@ struct rt2x00_intf {
40221 * for hardware which doesn't support hardware
40222 * sequence counting.
40223 */
40224- atomic_t seqno;
40225+ atomic_unchecked_t seqno;
40226 };
40227
40228 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
40229diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
40230index e488b94..14b6a0c 100644
40231--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
40232+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
40233@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
40234 * sequence counter given by mac80211.
40235 */
40236 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
40237- seqno = atomic_add_return(0x10, &intf->seqno);
40238+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
40239 else
40240- seqno = atomic_read(&intf->seqno);
40241+ seqno = atomic_read_unchecked(&intf->seqno);
40242
40243 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
40244 hdr->seq_ctrl |= cpu_to_le16(seqno);
40245diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
40246index e57ee48..541cf6c 100644
40247--- a/drivers/net/wireless/ti/wl1251/sdio.c
40248+++ b/drivers/net/wireless/ti/wl1251/sdio.c
40249@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
40250
40251 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
40252
40253- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
40254- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
40255+ pax_open_kernel();
40256+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
40257+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
40258+ pax_close_kernel();
40259
40260 wl1251_info("using dedicated interrupt line");
40261 } else {
40262- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
40263- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
40264+ pax_open_kernel();
40265+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
40266+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
40267+ pax_close_kernel();
40268
40269 wl1251_info("using SDIO interrupt");
40270 }
40271diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
40272index e5f5f8f..fdf15b7 100644
40273--- a/drivers/net/wireless/ti/wl12xx/main.c
40274+++ b/drivers/net/wireless/ti/wl12xx/main.c
40275@@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
40276 sizeof(wl->conf.mem));
40277
40278 /* read data preparation is only needed by wl127x */
40279- wl->ops->prepare_read = wl127x_prepare_read;
40280+ pax_open_kernel();
40281+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
40282+ pax_close_kernel();
40283
40284 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
40285 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
40286@@ -665,7 +667,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
40287 sizeof(wl->conf.mem));
40288
40289 /* read data preparation is only needed by wl127x */
40290- wl->ops->prepare_read = wl127x_prepare_read;
40291+ pax_open_kernel();
40292+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
40293+ pax_close_kernel();
40294
40295 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
40296 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
40297diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
40298index 8d8c1f8..e754844 100644
40299--- a/drivers/net/wireless/ti/wl18xx/main.c
40300+++ b/drivers/net/wireless/ti/wl18xx/main.c
40301@@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
40302 }
40303
40304 if (!checksum_param) {
40305- wl18xx_ops.set_rx_csum = NULL;
40306- wl18xx_ops.init_vif = NULL;
40307+ pax_open_kernel();
40308+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
40309+ *(void **)&wl18xx_ops.init_vif = NULL;
40310+ pax_close_kernel();
40311 }
40312
40313 /* Enable 11a Band only if we have 5G antennas */
40314diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
40315index ef2b171..bb513a6 100644
40316--- a/drivers/net/wireless/zd1211rw/zd_usb.c
40317+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
40318@@ -387,7 +387,7 @@ static inline void handle_regs_int(struct urb *urb)
40319 {
40320 struct zd_usb *usb = urb->context;
40321 struct zd_usb_interrupt *intr = &usb->intr;
40322- int len;
40323+ unsigned int len;
40324 u16 int_num;
40325
40326 ZD_ASSERT(in_interrupt());
40327diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
40328index d93b2b6..ae50401 100644
40329--- a/drivers/oprofile/buffer_sync.c
40330+++ b/drivers/oprofile/buffer_sync.c
40331@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
40332 if (cookie == NO_COOKIE)
40333 offset = pc;
40334 if (cookie == INVALID_COOKIE) {
40335- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40336+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40337 offset = pc;
40338 }
40339 if (cookie != last_cookie) {
40340@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
40341 /* add userspace sample */
40342
40343 if (!mm) {
40344- atomic_inc(&oprofile_stats.sample_lost_no_mm);
40345+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
40346 return 0;
40347 }
40348
40349 cookie = lookup_dcookie(mm, s->eip, &offset);
40350
40351 if (cookie == INVALID_COOKIE) {
40352- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40353+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40354 return 0;
40355 }
40356
40357@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
40358 /* ignore backtraces if failed to add a sample */
40359 if (state == sb_bt_start) {
40360 state = sb_bt_ignore;
40361- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40362+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40363 }
40364 }
40365 release_mm(mm);
40366diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40367index c0cc4e7..44d4e54 100644
40368--- a/drivers/oprofile/event_buffer.c
40369+++ b/drivers/oprofile/event_buffer.c
40370@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40371 }
40372
40373 if (buffer_pos == buffer_size) {
40374- atomic_inc(&oprofile_stats.event_lost_overflow);
40375+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40376 return;
40377 }
40378
40379diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40380index ed2c3ec..deda85a 100644
40381--- a/drivers/oprofile/oprof.c
40382+++ b/drivers/oprofile/oprof.c
40383@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40384 if (oprofile_ops.switch_events())
40385 return;
40386
40387- atomic_inc(&oprofile_stats.multiplex_counter);
40388+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40389 start_switch_worker();
40390 }
40391
40392diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
40393index 84a208d..d61b0a1 100644
40394--- a/drivers/oprofile/oprofile_files.c
40395+++ b/drivers/oprofile/oprofile_files.c
40396@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
40397
40398 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
40399
40400-static ssize_t timeout_read(struct file *file, char __user *buf,
40401+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
40402 size_t count, loff_t *offset)
40403 {
40404 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
40405diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40406index 917d28e..d62d981 100644
40407--- a/drivers/oprofile/oprofile_stats.c
40408+++ b/drivers/oprofile/oprofile_stats.c
40409@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40410 cpu_buf->sample_invalid_eip = 0;
40411 }
40412
40413- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40414- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40415- atomic_set(&oprofile_stats.event_lost_overflow, 0);
40416- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40417- atomic_set(&oprofile_stats.multiplex_counter, 0);
40418+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40419+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40420+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40421+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40422+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40423 }
40424
40425
40426diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40427index 38b6fc0..b5cbfce 100644
40428--- a/drivers/oprofile/oprofile_stats.h
40429+++ b/drivers/oprofile/oprofile_stats.h
40430@@ -13,11 +13,11 @@
40431 #include <linux/atomic.h>
40432
40433 struct oprofile_stat_struct {
40434- atomic_t sample_lost_no_mm;
40435- atomic_t sample_lost_no_mapping;
40436- atomic_t bt_lost_no_mapping;
40437- atomic_t event_lost_overflow;
40438- atomic_t multiplex_counter;
40439+ atomic_unchecked_t sample_lost_no_mm;
40440+ atomic_unchecked_t sample_lost_no_mapping;
40441+ atomic_unchecked_t bt_lost_no_mapping;
40442+ atomic_unchecked_t event_lost_overflow;
40443+ atomic_unchecked_t multiplex_counter;
40444 };
40445
40446 extern struct oprofile_stat_struct oprofile_stats;
40447diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40448index 849357c..b83c1e0 100644
40449--- a/drivers/oprofile/oprofilefs.c
40450+++ b/drivers/oprofile/oprofilefs.c
40451@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
40452
40453
40454 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40455- char const *name, atomic_t *val)
40456+ char const *name, atomic_unchecked_t *val)
40457 {
40458 return __oprofilefs_create_file(sb, root, name,
40459 &atomic_ro_fops, 0444, val);
40460diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
40461index 93404f7..4a313d8 100644
40462--- a/drivers/oprofile/timer_int.c
40463+++ b/drivers/oprofile/timer_int.c
40464@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
40465 return NOTIFY_OK;
40466 }
40467
40468-static struct notifier_block __refdata oprofile_cpu_notifier = {
40469+static struct notifier_block oprofile_cpu_notifier = {
40470 .notifier_call = oprofile_cpu_notify,
40471 };
40472
40473diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40474index 3f56bc0..707d642 100644
40475--- a/drivers/parport/procfs.c
40476+++ b/drivers/parport/procfs.c
40477@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40478
40479 *ppos += len;
40480
40481- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40482+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40483 }
40484
40485 #ifdef CONFIG_PARPORT_1284
40486@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40487
40488 *ppos += len;
40489
40490- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40491+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40492 }
40493 #endif /* IEEE1284.3 support. */
40494
40495diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
40496index c35e8ad..fc33beb 100644
40497--- a/drivers/pci/hotplug/acpiphp_ibm.c
40498+++ b/drivers/pci/hotplug/acpiphp_ibm.c
40499@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
40500 goto init_cleanup;
40501 }
40502
40503- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40504+ pax_open_kernel();
40505+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40506+ pax_close_kernel();
40507 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
40508
40509 return retval;
40510diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
40511index a6a71c4..c91097b 100644
40512--- a/drivers/pci/hotplug/cpcihp_generic.c
40513+++ b/drivers/pci/hotplug/cpcihp_generic.c
40514@@ -73,7 +73,6 @@ static u16 port;
40515 static unsigned int enum_bit;
40516 static u8 enum_mask;
40517
40518-static struct cpci_hp_controller_ops generic_hpc_ops;
40519 static struct cpci_hp_controller generic_hpc;
40520
40521 static int __init validate_parameters(void)
40522@@ -139,6 +138,10 @@ static int query_enum(void)
40523 return ((value & enum_mask) == enum_mask);
40524 }
40525
40526+static struct cpci_hp_controller_ops generic_hpc_ops = {
40527+ .query_enum = query_enum,
40528+};
40529+
40530 static int __init cpcihp_generic_init(void)
40531 {
40532 int status;
40533@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
40534 pci_dev_put(dev);
40535
40536 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
40537- generic_hpc_ops.query_enum = query_enum;
40538 generic_hpc.ops = &generic_hpc_ops;
40539
40540 status = cpci_hp_register_controller(&generic_hpc);
40541diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
40542index 449b4bb..257e2e8 100644
40543--- a/drivers/pci/hotplug/cpcihp_zt5550.c
40544+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
40545@@ -59,7 +59,6 @@
40546 /* local variables */
40547 static bool debug;
40548 static bool poll;
40549-static struct cpci_hp_controller_ops zt5550_hpc_ops;
40550 static struct cpci_hp_controller zt5550_hpc;
40551
40552 /* Primary cPCI bus bridge device */
40553@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
40554 return 0;
40555 }
40556
40557+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
40558+ .query_enum = zt5550_hc_query_enum,
40559+};
40560+
40561 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
40562 {
40563 int status;
40564@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
40565 dbg("returned from zt5550_hc_config");
40566
40567 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
40568- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
40569 zt5550_hpc.ops = &zt5550_hpc_ops;
40570 if(!poll) {
40571 zt5550_hpc.irq = hc_dev->irq;
40572 zt5550_hpc.irq_flags = IRQF_SHARED;
40573 zt5550_hpc.dev_id = hc_dev;
40574
40575- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
40576- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
40577- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
40578+ pax_open_kernel();
40579+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
40580+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
40581+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
40582+ pax_open_kernel();
40583 } else {
40584 info("using ENUM# polling mode");
40585 }
40586diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40587index 76ba8a1..20ca857 100644
40588--- a/drivers/pci/hotplug/cpqphp_nvram.c
40589+++ b/drivers/pci/hotplug/cpqphp_nvram.c
40590@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40591
40592 void compaq_nvram_init (void __iomem *rom_start)
40593 {
40594+
40595+#ifndef CONFIG_PAX_KERNEXEC
40596 if (rom_start) {
40597 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40598 }
40599+#endif
40600+
40601 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40602
40603 /* initialize our int15 lock */
40604diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
40605index 202f4a9..8ee47d0 100644
40606--- a/drivers/pci/hotplug/pci_hotplug_core.c
40607+++ b/drivers/pci/hotplug/pci_hotplug_core.c
40608@@ -448,8 +448,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
40609 return -EINVAL;
40610 }
40611
40612- slot->ops->owner = owner;
40613- slot->ops->mod_name = mod_name;
40614+ pax_open_kernel();
40615+ *(struct module **)&slot->ops->owner = owner;
40616+ *(const char **)&slot->ops->mod_name = mod_name;
40617+ pax_close_kernel();
40618
40619 mutex_lock(&pci_hp_mutex);
40620 /*
40621diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
40622index 939bd1d..a1459c9 100644
40623--- a/drivers/pci/hotplug/pciehp_core.c
40624+++ b/drivers/pci/hotplug/pciehp_core.c
40625@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
40626 struct slot *slot = ctrl->slot;
40627 struct hotplug_slot *hotplug = NULL;
40628 struct hotplug_slot_info *info = NULL;
40629- struct hotplug_slot_ops *ops = NULL;
40630+ hotplug_slot_ops_no_const *ops = NULL;
40631 char name[SLOT_NAME_SIZE];
40632 int retval = -ENOMEM;
40633
40634diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
40635index 9c6e9bb..2916736 100644
40636--- a/drivers/pci/pci-sysfs.c
40637+++ b/drivers/pci/pci-sysfs.c
40638@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
40639 {
40640 /* allocate attribute structure, piggyback attribute name */
40641 int name_len = write_combine ? 13 : 10;
40642- struct bin_attribute *res_attr;
40643+ bin_attribute_no_const *res_attr;
40644 int retval;
40645
40646 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
40647@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
40648 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
40649 {
40650 int retval;
40651- struct bin_attribute *attr;
40652+ bin_attribute_no_const *attr;
40653
40654 /* If the device has VPD, try to expose it in sysfs. */
40655 if (dev->vpd) {
40656@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
40657 {
40658 int retval;
40659 int rom_size = 0;
40660- struct bin_attribute *attr;
40661+ bin_attribute_no_const *attr;
40662
40663 if (!sysfs_initialized)
40664 return -EACCES;
40665diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
40666index e851829..a1a7196 100644
40667--- a/drivers/pci/pci.h
40668+++ b/drivers/pci/pci.h
40669@@ -98,7 +98,7 @@ struct pci_vpd_ops {
40670 struct pci_vpd {
40671 unsigned int len;
40672 const struct pci_vpd_ops *ops;
40673- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
40674+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
40675 };
40676
40677 extern int pci_vpd_pci22_init(struct pci_dev *dev);
40678diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40679index 8474b6a..ee81993 100644
40680--- a/drivers/pci/pcie/aspm.c
40681+++ b/drivers/pci/pcie/aspm.c
40682@@ -27,9 +27,9 @@
40683 #define MODULE_PARAM_PREFIX "pcie_aspm."
40684
40685 /* Note: those are not register definitions */
40686-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40687-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40688-#define ASPM_STATE_L1 (4) /* L1 state */
40689+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40690+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40691+#define ASPM_STATE_L1 (4U) /* L1 state */
40692 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40693 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40694
40695diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40696index 6186f03..1a78714 100644
40697--- a/drivers/pci/probe.c
40698+++ b/drivers/pci/probe.c
40699@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
40700 struct pci_bus_region region;
40701 bool bar_too_big = false, bar_disabled = false;
40702
40703- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
40704+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
40705
40706 /* No printks while decoding is disabled! */
40707 if (!dev->mmio_always_on) {
40708diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40709index 9b8505c..f00870a 100644
40710--- a/drivers/pci/proc.c
40711+++ b/drivers/pci/proc.c
40712@@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40713 static int __init pci_proc_init(void)
40714 {
40715 struct pci_dev *dev = NULL;
40716+
40717+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40718+#ifdef CONFIG_GRKERNSEC_PROC_USER
40719+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40720+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40721+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40722+#endif
40723+#else
40724 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40725+#endif
40726 proc_create("devices", 0, proc_bus_pci_dir,
40727 &proc_bus_pci_dev_operations);
40728 proc_initialized = 1;
40729diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40730index 2111dbb..79e434b 100644
40731--- a/drivers/platform/x86/msi-laptop.c
40732+++ b/drivers/platform/x86/msi-laptop.c
40733@@ -820,12 +820,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
40734 int result;
40735
40736 /* allow userland write sysfs file */
40737- dev_attr_bluetooth.store = store_bluetooth;
40738- dev_attr_wlan.store = store_wlan;
40739- dev_attr_threeg.store = store_threeg;
40740- dev_attr_bluetooth.attr.mode |= S_IWUSR;
40741- dev_attr_wlan.attr.mode |= S_IWUSR;
40742- dev_attr_threeg.attr.mode |= S_IWUSR;
40743+ pax_open_kernel();
40744+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
40745+ *(void **)&dev_attr_wlan.store = store_wlan;
40746+ *(void **)&dev_attr_threeg.store = store_threeg;
40747+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
40748+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
40749+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
40750+ pax_close_kernel();
40751
40752 /* disable hardware control by fn key */
40753 result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
40754diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40755index 0fe987f..6f3d5c3 100644
40756--- a/drivers/platform/x86/sony-laptop.c
40757+++ b/drivers/platform/x86/sony-laptop.c
40758@@ -2356,7 +2356,7 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
40759 }
40760
40761 /* High speed charging function */
40762-static struct device_attribute *hsc_handle;
40763+static device_attribute_no_const *hsc_handle;
40764
40765 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
40766 struct device_attribute *attr,
40767diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40768index f946ca7..f25c833 100644
40769--- a/drivers/platform/x86/thinkpad_acpi.c
40770+++ b/drivers/platform/x86/thinkpad_acpi.c
40771@@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
40772 return 0;
40773 }
40774
40775-void static hotkey_mask_warn_incomplete_mask(void)
40776+static void hotkey_mask_warn_incomplete_mask(void)
40777 {
40778 /* log only what the user can fix... */
40779 const u32 wantedmask = hotkey_driver_mask &
40780@@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
40781 }
40782 }
40783
40784-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40785- struct tp_nvram_state *newn,
40786- const u32 event_mask)
40787-{
40788-
40789 #define TPACPI_COMPARE_KEY(__scancode, __member) \
40790 do { \
40791 if ((event_mask & (1 << __scancode)) && \
40792@@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40793 tpacpi_hotkey_send_key(__scancode); \
40794 } while (0)
40795
40796- void issue_volchange(const unsigned int oldvol,
40797- const unsigned int newvol)
40798- {
40799- unsigned int i = oldvol;
40800+static void issue_volchange(const unsigned int oldvol,
40801+ const unsigned int newvol,
40802+ const u32 event_mask)
40803+{
40804+ unsigned int i = oldvol;
40805
40806- while (i > newvol) {
40807- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
40808- i--;
40809- }
40810- while (i < newvol) {
40811- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40812- i++;
40813- }
40814+ while (i > newvol) {
40815+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
40816+ i--;
40817 }
40818+ while (i < newvol) {
40819+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40820+ i++;
40821+ }
40822+}
40823
40824- void issue_brightnesschange(const unsigned int oldbrt,
40825- const unsigned int newbrt)
40826- {
40827- unsigned int i = oldbrt;
40828+static void issue_brightnesschange(const unsigned int oldbrt,
40829+ const unsigned int newbrt,
40830+ const u32 event_mask)
40831+{
40832+ unsigned int i = oldbrt;
40833
40834- while (i > newbrt) {
40835- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
40836- i--;
40837- }
40838- while (i < newbrt) {
40839- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40840- i++;
40841- }
40842+ while (i > newbrt) {
40843+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
40844+ i--;
40845+ }
40846+ while (i < newbrt) {
40847+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40848+ i++;
40849 }
40850+}
40851
40852+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40853+ struct tp_nvram_state *newn,
40854+ const u32 event_mask)
40855+{
40856 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
40857 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
40858 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
40859@@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40860 oldn->volume_level != newn->volume_level) {
40861 /* recently muted, or repeated mute keypress, or
40862 * multiple presses ending in mute */
40863- issue_volchange(oldn->volume_level, newn->volume_level);
40864+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
40865 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
40866 }
40867 } else {
40868@@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40869 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40870 }
40871 if (oldn->volume_level != newn->volume_level) {
40872- issue_volchange(oldn->volume_level, newn->volume_level);
40873+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
40874 } else if (oldn->volume_toggle != newn->volume_toggle) {
40875 /* repeated vol up/down keypress at end of scale ? */
40876 if (newn->volume_level == 0)
40877@@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40878 /* handle brightness */
40879 if (oldn->brightness_level != newn->brightness_level) {
40880 issue_brightnesschange(oldn->brightness_level,
40881- newn->brightness_level);
40882+ newn->brightness_level,
40883+ event_mask);
40884 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
40885 /* repeated key presses that didn't change state */
40886 if (newn->brightness_level == 0)
40887@@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40888 && !tp_features.bright_unkfw)
40889 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40890 }
40891+}
40892
40893 #undef TPACPI_COMPARE_KEY
40894 #undef TPACPI_MAY_SEND_KEY
40895-}
40896
40897 /*
40898 * Polling driver
40899diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40900index 769d265..a3a05ca 100644
40901--- a/drivers/pnp/pnpbios/bioscalls.c
40902+++ b/drivers/pnp/pnpbios/bioscalls.c
40903@@ -58,7 +58,7 @@ do { \
40904 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40905 } while(0)
40906
40907-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40908+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40909 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40910
40911 /*
40912@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40913
40914 cpu = get_cpu();
40915 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40916+
40917+ pax_open_kernel();
40918 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40919+ pax_close_kernel();
40920
40921 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40922 spin_lock_irqsave(&pnp_bios_lock, flags);
40923@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40924 :"memory");
40925 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40926
40927+ pax_open_kernel();
40928 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40929+ pax_close_kernel();
40930+
40931 put_cpu();
40932
40933 /* If we get here and this is set then the PnP BIOS faulted on us. */
40934@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40935 return status;
40936 }
40937
40938-void pnpbios_calls_init(union pnp_bios_install_struct *header)
40939+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40940 {
40941 int i;
40942
40943@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40944 pnp_bios_callpoint.offset = header->fields.pm16offset;
40945 pnp_bios_callpoint.segment = PNP_CS16;
40946
40947+ pax_open_kernel();
40948+
40949 for_each_possible_cpu(i) {
40950 struct desc_struct *gdt = get_cpu_gdt_table(i);
40951 if (!gdt)
40952@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40953 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40954 (unsigned long)__va(header->fields.pm16dseg));
40955 }
40956+
40957+ pax_close_kernel();
40958 }
40959diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40960index 3e6db1c..1fbbdae 100644
40961--- a/drivers/pnp/resource.c
40962+++ b/drivers/pnp/resource.c
40963@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40964 return 1;
40965
40966 /* check if the resource is valid */
40967- if (*irq < 0 || *irq > 15)
40968+ if (*irq > 15)
40969 return 0;
40970
40971 /* check if the resource is reserved */
40972@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40973 return 1;
40974
40975 /* check if the resource is valid */
40976- if (*dma < 0 || *dma == 4 || *dma > 7)
40977+ if (*dma == 4 || *dma > 7)
40978 return 0;
40979
40980 /* check if the resource is reserved */
40981diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
40982index 7df7c5f..bd48c47 100644
40983--- a/drivers/power/pda_power.c
40984+++ b/drivers/power/pda_power.c
40985@@ -37,7 +37,11 @@ static int polling;
40986
40987 #ifdef CONFIG_USB_OTG_UTILS
40988 static struct usb_phy *transceiver;
40989-static struct notifier_block otg_nb;
40990+static int otg_handle_notification(struct notifier_block *nb,
40991+ unsigned long event, void *unused);
40992+static struct notifier_block otg_nb = {
40993+ .notifier_call = otg_handle_notification
40994+};
40995 #endif
40996
40997 static struct regulator *ac_draw;
40998@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
40999
41000 #ifdef CONFIG_USB_OTG_UTILS
41001 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
41002- otg_nb.notifier_call = otg_handle_notification;
41003 ret = usb_register_notifier(transceiver, &otg_nb);
41004 if (ret) {
41005 dev_err(dev, "failure to register otg notifier\n");
41006diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
41007index cc439fd..8fa30df 100644
41008--- a/drivers/power/power_supply.h
41009+++ b/drivers/power/power_supply.h
41010@@ -16,12 +16,12 @@ struct power_supply;
41011
41012 #ifdef CONFIG_SYSFS
41013
41014-extern void power_supply_init_attrs(struct device_type *dev_type);
41015+extern void power_supply_init_attrs(void);
41016 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
41017
41018 #else
41019
41020-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
41021+static inline void power_supply_init_attrs(void) {}
41022 #define power_supply_uevent NULL
41023
41024 #endif /* CONFIG_SYSFS */
41025diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
41026index 8a7cfb3..72e6e9b 100644
41027--- a/drivers/power/power_supply_core.c
41028+++ b/drivers/power/power_supply_core.c
41029@@ -24,7 +24,10 @@
41030 struct class *power_supply_class;
41031 EXPORT_SYMBOL_GPL(power_supply_class);
41032
41033-static struct device_type power_supply_dev_type;
41034+extern const struct attribute_group *power_supply_attr_groups[];
41035+static struct device_type power_supply_dev_type = {
41036+ .groups = power_supply_attr_groups,
41037+};
41038
41039 static int __power_supply_changed_work(struct device *dev, void *data)
41040 {
41041@@ -393,7 +396,7 @@ static int __init power_supply_class_init(void)
41042 return PTR_ERR(power_supply_class);
41043
41044 power_supply_class->dev_uevent = power_supply_uevent;
41045- power_supply_init_attrs(&power_supply_dev_type);
41046+ power_supply_init_attrs();
41047
41048 return 0;
41049 }
41050diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
41051index 40fa3b7..d9c2e0e 100644
41052--- a/drivers/power/power_supply_sysfs.c
41053+++ b/drivers/power/power_supply_sysfs.c
41054@@ -229,17 +229,15 @@ static struct attribute_group power_supply_attr_group = {
41055 .is_visible = power_supply_attr_is_visible,
41056 };
41057
41058-static const struct attribute_group *power_supply_attr_groups[] = {
41059+const struct attribute_group *power_supply_attr_groups[] = {
41060 &power_supply_attr_group,
41061 NULL,
41062 };
41063
41064-void power_supply_init_attrs(struct device_type *dev_type)
41065+void power_supply_init_attrs(void)
41066 {
41067 int i;
41068
41069- dev_type->groups = power_supply_attr_groups;
41070-
41071 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
41072 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
41073 }
41074diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
41075index 4d7c635..9860196 100644
41076--- a/drivers/regulator/max8660.c
41077+++ b/drivers/regulator/max8660.c
41078@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
41079 max8660->shadow_regs[MAX8660_OVER1] = 5;
41080 } else {
41081 /* Otherwise devices can be toggled via software */
41082- max8660_dcdc_ops.enable = max8660_dcdc_enable;
41083- max8660_dcdc_ops.disable = max8660_dcdc_disable;
41084+ pax_open_kernel();
41085+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
41086+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
41087+ pax_close_kernel();
41088 }
41089
41090 /*
41091diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
41092index 9a8ea91..c483dd9 100644
41093--- a/drivers/regulator/max8973-regulator.c
41094+++ b/drivers/regulator/max8973-regulator.c
41095@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
41096 if (!pdata->enable_ext_control) {
41097 max->desc.enable_reg = MAX8973_VOUT;
41098 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
41099- max8973_dcdc_ops.enable = regulator_enable_regmap;
41100- max8973_dcdc_ops.disable = regulator_disable_regmap;
41101- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41102+ pax_open_kernel();
41103+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
41104+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
41105+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41106+ pax_close_kernel();
41107 }
41108
41109 max->enable_external_control = pdata->enable_ext_control;
41110diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
41111index 0d84b1f..c2da6ac 100644
41112--- a/drivers/regulator/mc13892-regulator.c
41113+++ b/drivers/regulator/mc13892-regulator.c
41114@@ -540,10 +540,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
41115 }
41116 mc13xxx_unlock(mc13892);
41117
41118- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41119+ pax_open_kernel();
41120+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41121 = mc13892_vcam_set_mode;
41122- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41123+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41124 = mc13892_vcam_get_mode;
41125+ pax_close_kernel();
41126
41127 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
41128 ARRAY_SIZE(mc13892_regulators));
41129diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
41130index 16630aa..6afc992 100644
41131--- a/drivers/rtc/rtc-cmos.c
41132+++ b/drivers/rtc/rtc-cmos.c
41133@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
41134 hpet_rtc_timer_init();
41135
41136 /* export at least the first block of NVRAM */
41137- nvram.size = address_space - NVRAM_OFFSET;
41138+ pax_open_kernel();
41139+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
41140+ pax_close_kernel();
41141 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
41142 if (retval < 0) {
41143 dev_dbg(dev, "can't create nvram file? %d\n", retval);
41144diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
41145index 9a86b4b..3a383dc 100644
41146--- a/drivers/rtc/rtc-dev.c
41147+++ b/drivers/rtc/rtc-dev.c
41148@@ -14,6 +14,7 @@
41149 #include <linux/module.h>
41150 #include <linux/rtc.h>
41151 #include <linux/sched.h>
41152+#include <linux/grsecurity.h>
41153 #include "rtc-core.h"
41154
41155 static dev_t rtc_devt;
41156@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
41157 if (copy_from_user(&tm, uarg, sizeof(tm)))
41158 return -EFAULT;
41159
41160+ gr_log_timechange();
41161+
41162 return rtc_set_time(rtc, &tm);
41163
41164 case RTC_PIE_ON:
41165diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
41166index e0d0ba4..3c65868 100644
41167--- a/drivers/rtc/rtc-ds1307.c
41168+++ b/drivers/rtc/rtc-ds1307.c
41169@@ -106,7 +106,7 @@ struct ds1307 {
41170 u8 offset; /* register's offset */
41171 u8 regs[11];
41172 u16 nvram_offset;
41173- struct bin_attribute *nvram;
41174+ bin_attribute_no_const *nvram;
41175 enum ds_type type;
41176 unsigned long flags;
41177 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
41178diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
41179index 130f29a..6179d03 100644
41180--- a/drivers/rtc/rtc-m48t59.c
41181+++ b/drivers/rtc/rtc-m48t59.c
41182@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
41183 goto out;
41184 }
41185
41186- m48t59_nvram_attr.size = pdata->offset;
41187+ pax_open_kernel();
41188+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
41189+ pax_close_kernel();
41190
41191 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
41192 if (ret) {
41193diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
41194index e693af6..2e525b6 100644
41195--- a/drivers/scsi/bfa/bfa_fcpim.h
41196+++ b/drivers/scsi/bfa/bfa_fcpim.h
41197@@ -36,7 +36,7 @@ struct bfa_iotag_s {
41198
41199 struct bfa_itn_s {
41200 bfa_isr_func_t isr;
41201-};
41202+} __no_const;
41203
41204 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
41205 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
41206diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
41207index 23a90e7..9cf04ee 100644
41208--- a/drivers/scsi/bfa/bfa_ioc.h
41209+++ b/drivers/scsi/bfa/bfa_ioc.h
41210@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
41211 bfa_ioc_disable_cbfn_t disable_cbfn;
41212 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
41213 bfa_ioc_reset_cbfn_t reset_cbfn;
41214-};
41215+} __no_const;
41216
41217 /*
41218 * IOC event notification mechanism.
41219@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
41220 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
41221 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
41222 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
41223-};
41224+} __no_const;
41225
41226 /*
41227 * Queue element to wait for room in request queue. FIFO order is
41228diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41229index 593085a..47aa999 100644
41230--- a/drivers/scsi/hosts.c
41231+++ b/drivers/scsi/hosts.c
41232@@ -42,7 +42,7 @@
41233 #include "scsi_logging.h"
41234
41235
41236-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
41237+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
41238
41239
41240 static void scsi_host_cls_release(struct device *dev)
41241@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41242 * subtract one because we increment first then return, but we need to
41243 * know what the next host number was before increment
41244 */
41245- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41246+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41247 shost->dma_channel = 0xff;
41248
41249 /* These three are default values which can be overridden */
41250diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
41251index 4f33806..afd6f60 100644
41252--- a/drivers/scsi/hpsa.c
41253+++ b/drivers/scsi/hpsa.c
41254@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
41255 unsigned long flags;
41256
41257 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
41258- return h->access.command_completed(h, q);
41259+ return h->access->command_completed(h, q);
41260
41261 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
41262 a = rq->head[rq->current_entry];
41263@@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
41264 while (!list_empty(&h->reqQ)) {
41265 c = list_entry(h->reqQ.next, struct CommandList, list);
41266 /* can't do anything if fifo is full */
41267- if ((h->access.fifo_full(h))) {
41268+ if ((h->access->fifo_full(h))) {
41269 dev_warn(&h->pdev->dev, "fifo full\n");
41270 break;
41271 }
41272@@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
41273
41274 /* Tell the controller execute command */
41275 spin_unlock_irqrestore(&h->lock, flags);
41276- h->access.submit_command(h, c);
41277+ h->access->submit_command(h, c);
41278 spin_lock_irqsave(&h->lock, flags);
41279 }
41280 spin_unlock_irqrestore(&h->lock, flags);
41281@@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
41282
41283 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
41284 {
41285- return h->access.command_completed(h, q);
41286+ return h->access->command_completed(h, q);
41287 }
41288
41289 static inline bool interrupt_pending(struct ctlr_info *h)
41290 {
41291- return h->access.intr_pending(h);
41292+ return h->access->intr_pending(h);
41293 }
41294
41295 static inline long interrupt_not_for_us(struct ctlr_info *h)
41296 {
41297- return (h->access.intr_pending(h) == 0) ||
41298+ return (h->access->intr_pending(h) == 0) ||
41299 (h->interrupts_enabled == 0);
41300 }
41301
41302@@ -4316,7 +4316,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
41303 if (prod_index < 0)
41304 return -ENODEV;
41305 h->product_name = products[prod_index].product_name;
41306- h->access = *(products[prod_index].access);
41307+ h->access = products[prod_index].access;
41308
41309 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
41310 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
41311@@ -4598,7 +4598,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
41312
41313 assert_spin_locked(&lockup_detector_lock);
41314 remove_ctlr_from_lockup_detector_list(h);
41315- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41316+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41317 spin_lock_irqsave(&h->lock, flags);
41318 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
41319 spin_unlock_irqrestore(&h->lock, flags);
41320@@ -4775,7 +4775,7 @@ reinit_after_soft_reset:
41321 }
41322
41323 /* make sure the board interrupts are off */
41324- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41325+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41326
41327 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
41328 goto clean2;
41329@@ -4809,7 +4809,7 @@ reinit_after_soft_reset:
41330 * fake ones to scoop up any residual completions.
41331 */
41332 spin_lock_irqsave(&h->lock, flags);
41333- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41334+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41335 spin_unlock_irqrestore(&h->lock, flags);
41336 free_irqs(h);
41337 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
41338@@ -4828,9 +4828,9 @@ reinit_after_soft_reset:
41339 dev_info(&h->pdev->dev, "Board READY.\n");
41340 dev_info(&h->pdev->dev,
41341 "Waiting for stale completions to drain.\n");
41342- h->access.set_intr_mask(h, HPSA_INTR_ON);
41343+ h->access->set_intr_mask(h, HPSA_INTR_ON);
41344 msleep(10000);
41345- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41346+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41347
41348 rc = controller_reset_failed(h->cfgtable);
41349 if (rc)
41350@@ -4851,7 +4851,7 @@ reinit_after_soft_reset:
41351 }
41352
41353 /* Turn the interrupts on so we can service requests */
41354- h->access.set_intr_mask(h, HPSA_INTR_ON);
41355+ h->access->set_intr_mask(h, HPSA_INTR_ON);
41356
41357 hpsa_hba_inquiry(h);
41358 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
41359@@ -4903,7 +4903,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
41360 * To write all data in the battery backed cache to disks
41361 */
41362 hpsa_flush_cache(h);
41363- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41364+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41365 hpsa_free_irqs_and_disable_msix(h);
41366 }
41367
41368@@ -5071,7 +5071,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
41369 return;
41370 }
41371 /* Change the access methods to the performant access methods */
41372- h->access = SA5_performant_access;
41373+ h->access = &SA5_performant_access;
41374 h->transMethod = CFGTBL_Trans_Performant;
41375 }
41376
41377diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
41378index 9816479..c5d4e97 100644
41379--- a/drivers/scsi/hpsa.h
41380+++ b/drivers/scsi/hpsa.h
41381@@ -79,7 +79,7 @@ struct ctlr_info {
41382 unsigned int msix_vector;
41383 unsigned int msi_vector;
41384 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
41385- struct access_method access;
41386+ struct access_method *access;
41387
41388 /* queue and queue Info */
41389 struct list_head reqQ;
41390diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41391index c772d8d..35c362c 100644
41392--- a/drivers/scsi/libfc/fc_exch.c
41393+++ b/drivers/scsi/libfc/fc_exch.c
41394@@ -100,12 +100,12 @@ struct fc_exch_mgr {
41395 u16 pool_max_index;
41396
41397 struct {
41398- atomic_t no_free_exch;
41399- atomic_t no_free_exch_xid;
41400- atomic_t xid_not_found;
41401- atomic_t xid_busy;
41402- atomic_t seq_not_found;
41403- atomic_t non_bls_resp;
41404+ atomic_unchecked_t no_free_exch;
41405+ atomic_unchecked_t no_free_exch_xid;
41406+ atomic_unchecked_t xid_not_found;
41407+ atomic_unchecked_t xid_busy;
41408+ atomic_unchecked_t seq_not_found;
41409+ atomic_unchecked_t non_bls_resp;
41410 } stats;
41411 };
41412
41413@@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41414 /* allocate memory for exchange */
41415 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41416 if (!ep) {
41417- atomic_inc(&mp->stats.no_free_exch);
41418+ atomic_inc_unchecked(&mp->stats.no_free_exch);
41419 goto out;
41420 }
41421 memset(ep, 0, sizeof(*ep));
41422@@ -786,7 +786,7 @@ out:
41423 return ep;
41424 err:
41425 spin_unlock_bh(&pool->lock);
41426- atomic_inc(&mp->stats.no_free_exch_xid);
41427+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41428 mempool_free(ep, mp->ep_pool);
41429 return NULL;
41430 }
41431@@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41432 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41433 ep = fc_exch_find(mp, xid);
41434 if (!ep) {
41435- atomic_inc(&mp->stats.xid_not_found);
41436+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41437 reject = FC_RJT_OX_ID;
41438 goto out;
41439 }
41440@@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41441 ep = fc_exch_find(mp, xid);
41442 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41443 if (ep) {
41444- atomic_inc(&mp->stats.xid_busy);
41445+ atomic_inc_unchecked(&mp->stats.xid_busy);
41446 reject = FC_RJT_RX_ID;
41447 goto rel;
41448 }
41449@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41450 }
41451 xid = ep->xid; /* get our XID */
41452 } else if (!ep) {
41453- atomic_inc(&mp->stats.xid_not_found);
41454+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41455 reject = FC_RJT_RX_ID; /* XID not found */
41456 goto out;
41457 }
41458@@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41459 } else {
41460 sp = &ep->seq;
41461 if (sp->id != fh->fh_seq_id) {
41462- atomic_inc(&mp->stats.seq_not_found);
41463+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41464 if (f_ctl & FC_FC_END_SEQ) {
41465 /*
41466 * Update sequence_id based on incoming last
41467@@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41468
41469 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41470 if (!ep) {
41471- atomic_inc(&mp->stats.xid_not_found);
41472+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41473 goto out;
41474 }
41475 if (ep->esb_stat & ESB_ST_COMPLETE) {
41476- atomic_inc(&mp->stats.xid_not_found);
41477+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41478 goto rel;
41479 }
41480 if (ep->rxid == FC_XID_UNKNOWN)
41481 ep->rxid = ntohs(fh->fh_rx_id);
41482 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41483- atomic_inc(&mp->stats.xid_not_found);
41484+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41485 goto rel;
41486 }
41487 if (ep->did != ntoh24(fh->fh_s_id) &&
41488 ep->did != FC_FID_FLOGI) {
41489- atomic_inc(&mp->stats.xid_not_found);
41490+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41491 goto rel;
41492 }
41493 sof = fr_sof(fp);
41494@@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41495 sp->ssb_stat |= SSB_ST_RESP;
41496 sp->id = fh->fh_seq_id;
41497 } else if (sp->id != fh->fh_seq_id) {
41498- atomic_inc(&mp->stats.seq_not_found);
41499+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41500 goto rel;
41501 }
41502
41503@@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41504 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41505
41506 if (!sp)
41507- atomic_inc(&mp->stats.xid_not_found);
41508+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41509 else
41510- atomic_inc(&mp->stats.non_bls_resp);
41511+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
41512
41513 fc_frame_free(fp);
41514 }
41515@@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
41516
41517 list_for_each_entry(ema, &lport->ema_list, ema_list) {
41518 mp = ema->mp;
41519- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
41520+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
41521 st->fc_no_free_exch_xid +=
41522- atomic_read(&mp->stats.no_free_exch_xid);
41523- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
41524- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
41525- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
41526- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
41527+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
41528+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
41529+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
41530+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
41531+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
41532 }
41533 }
41534 EXPORT_SYMBOL(fc_exch_update_stats);
41535diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41536index bdb81cd..d3c7c2c 100644
41537--- a/drivers/scsi/libsas/sas_ata.c
41538+++ b/drivers/scsi/libsas/sas_ata.c
41539@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
41540 .postreset = ata_std_postreset,
41541 .error_handler = ata_std_error_handler,
41542 .post_internal_cmd = sas_ata_post_internal,
41543- .qc_defer = ata_std_qc_defer,
41544+ .qc_defer = ata_std_qc_defer,
41545 .qc_prep = ata_noop_qc_prep,
41546 .qc_issue = sas_ata_qc_issue,
41547 .qc_fill_rtf = sas_ata_qc_fill_rtf,
41548diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41549index df4c13a..a51e90c 100644
41550--- a/drivers/scsi/lpfc/lpfc.h
41551+++ b/drivers/scsi/lpfc/lpfc.h
41552@@ -424,7 +424,7 @@ struct lpfc_vport {
41553 struct dentry *debug_nodelist;
41554 struct dentry *vport_debugfs_root;
41555 struct lpfc_debugfs_trc *disc_trc;
41556- atomic_t disc_trc_cnt;
41557+ atomic_unchecked_t disc_trc_cnt;
41558 #endif
41559 uint8_t stat_data_enabled;
41560 uint8_t stat_data_blocked;
41561@@ -842,8 +842,8 @@ struct lpfc_hba {
41562 struct timer_list fabric_block_timer;
41563 unsigned long bit_flags;
41564 #define FABRIC_COMANDS_BLOCKED 0
41565- atomic_t num_rsrc_err;
41566- atomic_t num_cmd_success;
41567+ atomic_unchecked_t num_rsrc_err;
41568+ atomic_unchecked_t num_cmd_success;
41569 unsigned long last_rsrc_error_time;
41570 unsigned long last_ramp_down_time;
41571 unsigned long last_ramp_up_time;
41572@@ -879,7 +879,7 @@ struct lpfc_hba {
41573
41574 struct dentry *debug_slow_ring_trc;
41575 struct lpfc_debugfs_trc *slow_ring_trc;
41576- atomic_t slow_ring_trc_cnt;
41577+ atomic_unchecked_t slow_ring_trc_cnt;
41578 /* iDiag debugfs sub-directory */
41579 struct dentry *idiag_root;
41580 struct dentry *idiag_pci_cfg;
41581diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41582index f63f5ff..de29189 100644
41583--- a/drivers/scsi/lpfc/lpfc_debugfs.c
41584+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41585@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
41586
41587 #include <linux/debugfs.h>
41588
41589-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41590+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41591 static unsigned long lpfc_debugfs_start_time = 0L;
41592
41593 /* iDiag */
41594@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41595 lpfc_debugfs_enable = 0;
41596
41597 len = 0;
41598- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41599+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41600 (lpfc_debugfs_max_disc_trc - 1);
41601 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41602 dtp = vport->disc_trc + i;
41603@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41604 lpfc_debugfs_enable = 0;
41605
41606 len = 0;
41607- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41608+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41609 (lpfc_debugfs_max_slow_ring_trc - 1);
41610 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41611 dtp = phba->slow_ring_trc + i;
41612@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41613 !vport || !vport->disc_trc)
41614 return;
41615
41616- index = atomic_inc_return(&vport->disc_trc_cnt) &
41617+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41618 (lpfc_debugfs_max_disc_trc - 1);
41619 dtp = vport->disc_trc + index;
41620 dtp->fmt = fmt;
41621 dtp->data1 = data1;
41622 dtp->data2 = data2;
41623 dtp->data3 = data3;
41624- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41625+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41626 dtp->jif = jiffies;
41627 #endif
41628 return;
41629@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41630 !phba || !phba->slow_ring_trc)
41631 return;
41632
41633- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41634+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41635 (lpfc_debugfs_max_slow_ring_trc - 1);
41636 dtp = phba->slow_ring_trc + index;
41637 dtp->fmt = fmt;
41638 dtp->data1 = data1;
41639 dtp->data2 = data2;
41640 dtp->data3 = data3;
41641- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41642+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41643 dtp->jif = jiffies;
41644 #endif
41645 return;
41646@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41647 "slow_ring buffer\n");
41648 goto debug_failed;
41649 }
41650- atomic_set(&phba->slow_ring_trc_cnt, 0);
41651+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41652 memset(phba->slow_ring_trc, 0,
41653 (sizeof(struct lpfc_debugfs_trc) *
41654 lpfc_debugfs_max_slow_ring_trc));
41655@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41656 "buffer\n");
41657 goto debug_failed;
41658 }
41659- atomic_set(&vport->disc_trc_cnt, 0);
41660+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41661
41662 snprintf(name, sizeof(name), "discovery_trace");
41663 vport->debug_disc_trc =
41664diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41665index 89ad558..76956c4 100644
41666--- a/drivers/scsi/lpfc/lpfc_init.c
41667+++ b/drivers/scsi/lpfc/lpfc_init.c
41668@@ -10618,8 +10618,10 @@ lpfc_init(void)
41669 "misc_register returned with status %d", error);
41670
41671 if (lpfc_enable_npiv) {
41672- lpfc_transport_functions.vport_create = lpfc_vport_create;
41673- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41674+ pax_open_kernel();
41675+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41676+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41677+ pax_close_kernel();
41678 }
41679 lpfc_transport_template =
41680 fc_attach_transport(&lpfc_transport_functions);
41681diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41682index 60e5a17..ff7a793 100644
41683--- a/drivers/scsi/lpfc/lpfc_scsi.c
41684+++ b/drivers/scsi/lpfc/lpfc_scsi.c
41685@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41686 uint32_t evt_posted;
41687
41688 spin_lock_irqsave(&phba->hbalock, flags);
41689- atomic_inc(&phba->num_rsrc_err);
41690+ atomic_inc_unchecked(&phba->num_rsrc_err);
41691 phba->last_rsrc_error_time = jiffies;
41692
41693 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41694@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41695 unsigned long flags;
41696 struct lpfc_hba *phba = vport->phba;
41697 uint32_t evt_posted;
41698- atomic_inc(&phba->num_cmd_success);
41699+ atomic_inc_unchecked(&phba->num_cmd_success);
41700
41701 if (vport->cfg_lun_queue_depth <= queue_depth)
41702 return;
41703@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41704 unsigned long num_rsrc_err, num_cmd_success;
41705 int i;
41706
41707- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41708- num_cmd_success = atomic_read(&phba->num_cmd_success);
41709+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41710+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41711
41712 /*
41713 * The error and success command counters are global per
41714@@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41715 }
41716 }
41717 lpfc_destroy_vport_work_array(phba, vports);
41718- atomic_set(&phba->num_rsrc_err, 0);
41719- atomic_set(&phba->num_cmd_success, 0);
41720+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41721+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41722 }
41723
41724 /**
41725@@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41726 }
41727 }
41728 lpfc_destroy_vport_work_array(phba, vports);
41729- atomic_set(&phba->num_rsrc_err, 0);
41730- atomic_set(&phba->num_cmd_success, 0);
41731+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41732+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41733 }
41734
41735 /**
41736diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41737index b46f5e9..c4c4ccb 100644
41738--- a/drivers/scsi/pmcraid.c
41739+++ b/drivers/scsi/pmcraid.c
41740@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41741 res->scsi_dev = scsi_dev;
41742 scsi_dev->hostdata = res;
41743 res->change_detected = 0;
41744- atomic_set(&res->read_failures, 0);
41745- atomic_set(&res->write_failures, 0);
41746+ atomic_set_unchecked(&res->read_failures, 0);
41747+ atomic_set_unchecked(&res->write_failures, 0);
41748 rc = 0;
41749 }
41750 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41751@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41752
41753 /* If this was a SCSI read/write command keep count of errors */
41754 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41755- atomic_inc(&res->read_failures);
41756+ atomic_inc_unchecked(&res->read_failures);
41757 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41758- atomic_inc(&res->write_failures);
41759+ atomic_inc_unchecked(&res->write_failures);
41760
41761 if (!RES_IS_GSCSI(res->cfg_entry) &&
41762 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41763@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
41764 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
41765 * hrrq_id assigned here in queuecommand
41766 */
41767- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
41768+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
41769 pinstance->num_hrrq;
41770 cmd->cmd_done = pmcraid_io_done;
41771
41772@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
41773 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
41774 * hrrq_id assigned here in queuecommand
41775 */
41776- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
41777+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
41778 pinstance->num_hrrq;
41779
41780 if (request_size) {
41781@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41782
41783 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41784 /* add resources only after host is added into system */
41785- if (!atomic_read(&pinstance->expose_resources))
41786+ if (!atomic_read_unchecked(&pinstance->expose_resources))
41787 return;
41788
41789 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
41790@@ -5324,8 +5324,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
41791 init_waitqueue_head(&pinstance->reset_wait_q);
41792
41793 atomic_set(&pinstance->outstanding_cmds, 0);
41794- atomic_set(&pinstance->last_message_id, 0);
41795- atomic_set(&pinstance->expose_resources, 0);
41796+ atomic_set_unchecked(&pinstance->last_message_id, 0);
41797+ atomic_set_unchecked(&pinstance->expose_resources, 0);
41798
41799 INIT_LIST_HEAD(&pinstance->free_res_q);
41800 INIT_LIST_HEAD(&pinstance->used_res_q);
41801@@ -6038,7 +6038,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
41802 /* Schedule worker thread to handle CCN and take care of adding and
41803 * removing devices to OS
41804 */
41805- atomic_set(&pinstance->expose_resources, 1);
41806+ atomic_set_unchecked(&pinstance->expose_resources, 1);
41807 schedule_work(&pinstance->worker_q);
41808 return rc;
41809
41810diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41811index e1d150f..6c6df44 100644
41812--- a/drivers/scsi/pmcraid.h
41813+++ b/drivers/scsi/pmcraid.h
41814@@ -748,7 +748,7 @@ struct pmcraid_instance {
41815 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
41816
41817 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
41818- atomic_t last_message_id;
41819+ atomic_unchecked_t last_message_id;
41820
41821 /* configuration table */
41822 struct pmcraid_config_table *cfg_table;
41823@@ -777,7 +777,7 @@ struct pmcraid_instance {
41824 atomic_t outstanding_cmds;
41825
41826 /* should add/delete resources to mid-layer now ?*/
41827- atomic_t expose_resources;
41828+ atomic_unchecked_t expose_resources;
41829
41830
41831
41832@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
41833 struct pmcraid_config_table_entry_ext cfg_entry_ext;
41834 };
41835 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41836- atomic_t read_failures; /* count of failed READ commands */
41837- atomic_t write_failures; /* count of failed WRITE commands */
41838+ atomic_unchecked_t read_failures; /* count of failed READ commands */
41839+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41840
41841 /* To indicate add/delete/modify during CCN */
41842 u8 change_detected;
41843diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
41844index 83d7984..a27d947 100644
41845--- a/drivers/scsi/qla2xxx/qla_attr.c
41846+++ b/drivers/scsi/qla2xxx/qla_attr.c
41847@@ -1969,7 +1969,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
41848 return 0;
41849 }
41850
41851-struct fc_function_template qla2xxx_transport_functions = {
41852+fc_function_template_no_const qla2xxx_transport_functions = {
41853
41854 .show_host_node_name = 1,
41855 .show_host_port_name = 1,
41856@@ -2016,7 +2016,7 @@ struct fc_function_template qla2xxx_transport_functions = {
41857 .bsg_timeout = qla24xx_bsg_timeout,
41858 };
41859
41860-struct fc_function_template qla2xxx_transport_vport_functions = {
41861+fc_function_template_no_const qla2xxx_transport_vport_functions = {
41862
41863 .show_host_node_name = 1,
41864 .show_host_port_name = 1,
41865diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
41866index 2411d1a..4673766 100644
41867--- a/drivers/scsi/qla2xxx/qla_gbl.h
41868+++ b/drivers/scsi/qla2xxx/qla_gbl.h
41869@@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
41870 struct device_attribute;
41871 extern struct device_attribute *qla2x00_host_attrs[];
41872 struct fc_function_template;
41873-extern struct fc_function_template qla2xxx_transport_functions;
41874-extern struct fc_function_template qla2xxx_transport_vport_functions;
41875+extern fc_function_template_no_const qla2xxx_transport_functions;
41876+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
41877 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
41878 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
41879 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
41880diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
41881index 10d23f8..a7d5d4c 100644
41882--- a/drivers/scsi/qla2xxx/qla_os.c
41883+++ b/drivers/scsi/qla2xxx/qla_os.c
41884@@ -1472,8 +1472,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
41885 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
41886 /* Ok, a 64bit DMA mask is applicable. */
41887 ha->flags.enable_64bit_addressing = 1;
41888- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
41889- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
41890+ pax_open_kernel();
41891+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
41892+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
41893+ pax_close_kernel();
41894 return;
41895 }
41896 }
41897diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41898index 329d553..f20d31d 100644
41899--- a/drivers/scsi/qla4xxx/ql4_def.h
41900+++ b/drivers/scsi/qla4xxx/ql4_def.h
41901@@ -273,7 +273,7 @@ struct ddb_entry {
41902 * (4000 only) */
41903 atomic_t relogin_timer; /* Max Time to wait for
41904 * relogin to complete */
41905- atomic_t relogin_retry_count; /* Num of times relogin has been
41906+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41907 * retried */
41908 uint32_t default_time2wait; /* Default Min time between
41909 * relogins (+aens) */
41910diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41911index 4cec123..7c1329f 100644
41912--- a/drivers/scsi/qla4xxx/ql4_os.c
41913+++ b/drivers/scsi/qla4xxx/ql4_os.c
41914@@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
41915 */
41916 if (!iscsi_is_session_online(cls_sess)) {
41917 /* Reset retry relogin timer */
41918- atomic_inc(&ddb_entry->relogin_retry_count);
41919+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41920 DEBUG2(ql4_printk(KERN_INFO, ha,
41921 "%s: index[%d] relogin timed out-retrying"
41922 " relogin (%d), retry (%d)\n", __func__,
41923 ddb_entry->fw_ddb_index,
41924- atomic_read(&ddb_entry->relogin_retry_count),
41925+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
41926 ddb_entry->default_time2wait + 4));
41927 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
41928 atomic_set(&ddb_entry->retry_relogin_timer,
41929@@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
41930
41931 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41932 atomic_set(&ddb_entry->relogin_timer, 0);
41933- atomic_set(&ddb_entry->relogin_retry_count, 0);
41934+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41935 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
41936 ddb_entry->default_relogin_timeout =
41937 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
41938diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41939index 2c0d0ec..4e8681a 100644
41940--- a/drivers/scsi/scsi.c
41941+++ b/drivers/scsi/scsi.c
41942@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41943 unsigned long timeout;
41944 int rtn = 0;
41945
41946- atomic_inc(&cmd->device->iorequest_cnt);
41947+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41948
41949 /* check if the device is still usable */
41950 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41951diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41952index f1bf5af..f67e943 100644
41953--- a/drivers/scsi/scsi_lib.c
41954+++ b/drivers/scsi/scsi_lib.c
41955@@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41956 shost = sdev->host;
41957 scsi_init_cmd_errh(cmd);
41958 cmd->result = DID_NO_CONNECT << 16;
41959- atomic_inc(&cmd->device->iorequest_cnt);
41960+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41961
41962 /*
41963 * SCSI request completion path will do scsi_device_unbusy(),
41964@@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
41965
41966 INIT_LIST_HEAD(&cmd->eh_entry);
41967
41968- atomic_inc(&cmd->device->iodone_cnt);
41969+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
41970 if (cmd->result)
41971- atomic_inc(&cmd->device->ioerr_cnt);
41972+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41973
41974 disposition = scsi_decide_disposition(cmd);
41975 if (disposition != SUCCESS &&
41976diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41977index 931a7d9..0c2a754 100644
41978--- a/drivers/scsi/scsi_sysfs.c
41979+++ b/drivers/scsi/scsi_sysfs.c
41980@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41981 char *buf) \
41982 { \
41983 struct scsi_device *sdev = to_scsi_device(dev); \
41984- unsigned long long count = atomic_read(&sdev->field); \
41985+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
41986 return snprintf(buf, 20, "0x%llx\n", count); \
41987 } \
41988 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41989diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41990index 84a1fdf..693b0d6 100644
41991--- a/drivers/scsi/scsi_tgt_lib.c
41992+++ b/drivers/scsi/scsi_tgt_lib.c
41993@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41994 int err;
41995
41996 dprintk("%lx %u\n", uaddr, len);
41997- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41998+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41999 if (err) {
42000 /*
42001 * TODO: need to fixup sg_tablesize, max_segment_size,
42002diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
42003index e894ca7..de9d7660 100644
42004--- a/drivers/scsi/scsi_transport_fc.c
42005+++ b/drivers/scsi/scsi_transport_fc.c
42006@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
42007 * Netlink Infrastructure
42008 */
42009
42010-static atomic_t fc_event_seq;
42011+static atomic_unchecked_t fc_event_seq;
42012
42013 /**
42014 * fc_get_event_number - Obtain the next sequential FC event number
42015@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
42016 u32
42017 fc_get_event_number(void)
42018 {
42019- return atomic_add_return(1, &fc_event_seq);
42020+ return atomic_add_return_unchecked(1, &fc_event_seq);
42021 }
42022 EXPORT_SYMBOL(fc_get_event_number);
42023
42024@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
42025 {
42026 int error;
42027
42028- atomic_set(&fc_event_seq, 0);
42029+ atomic_set_unchecked(&fc_event_seq, 0);
42030
42031 error = transport_class_register(&fc_host_class);
42032 if (error)
42033@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
42034 char *cp;
42035
42036 *val = simple_strtoul(buf, &cp, 0);
42037- if ((*cp && (*cp != '\n')) || (*val < 0))
42038+ if (*cp && (*cp != '\n'))
42039 return -EINVAL;
42040 /*
42041 * Check for overflow; dev_loss_tmo is u32
42042diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
42043index 31969f2..2b348f0 100644
42044--- a/drivers/scsi/scsi_transport_iscsi.c
42045+++ b/drivers/scsi/scsi_transport_iscsi.c
42046@@ -79,7 +79,7 @@ struct iscsi_internal {
42047 struct transport_container session_cont;
42048 };
42049
42050-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
42051+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
42052 static struct workqueue_struct *iscsi_eh_timer_workq;
42053
42054 static DEFINE_IDA(iscsi_sess_ida);
42055@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
42056 int err;
42057
42058 ihost = shost->shost_data;
42059- session->sid = atomic_add_return(1, &iscsi_session_nr);
42060+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
42061
42062 if (target_id == ISCSI_MAX_TARGET) {
42063 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
42064@@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
42065 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
42066 ISCSI_TRANSPORT_VERSION);
42067
42068- atomic_set(&iscsi_session_nr, 0);
42069+ atomic_set_unchecked(&iscsi_session_nr, 0);
42070
42071 err = class_register(&iscsi_transport_class);
42072 if (err)
42073diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
42074index f379c7f..e8fc69c 100644
42075--- a/drivers/scsi/scsi_transport_srp.c
42076+++ b/drivers/scsi/scsi_transport_srp.c
42077@@ -33,7 +33,7 @@
42078 #include "scsi_transport_srp_internal.h"
42079
42080 struct srp_host_attrs {
42081- atomic_t next_port_id;
42082+ atomic_unchecked_t next_port_id;
42083 };
42084 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42085
42086@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
42087 struct Scsi_Host *shost = dev_to_shost(dev);
42088 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
42089
42090- atomic_set(&srp_host->next_port_id, 0);
42091+ atomic_set_unchecked(&srp_host->next_port_id, 0);
42092 return 0;
42093 }
42094
42095@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
42096 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
42097 rport->roles = ids->roles;
42098
42099- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
42100+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
42101 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
42102
42103 transport_setup_device(&rport->dev);
42104diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
42105index 7992635..609faf8 100644
42106--- a/drivers/scsi/sd.c
42107+++ b/drivers/scsi/sd.c
42108@@ -2909,7 +2909,7 @@ static int sd_probe(struct device *dev)
42109 sdkp->disk = gd;
42110 sdkp->index = index;
42111 atomic_set(&sdkp->openers, 0);
42112- atomic_set(&sdkp->device->ioerr_cnt, 0);
42113+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
42114
42115 if (!sdp->request_queue->rq_timeout) {
42116 if (sdp->type != TYPE_MOD)
42117diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
42118index be2c9a6..275525c 100644
42119--- a/drivers/scsi/sg.c
42120+++ b/drivers/scsi/sg.c
42121@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
42122 sdp->disk->disk_name,
42123 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
42124 NULL,
42125- (char *)arg);
42126+ (char __user *)arg);
42127 case BLKTRACESTART:
42128 return blk_trace_startstop(sdp->device->request_queue, 1);
42129 case BLKTRACESTOP:
42130diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
42131index 19ee901..6e8c2ef 100644
42132--- a/drivers/spi/spi.c
42133+++ b/drivers/spi/spi.c
42134@@ -1616,7 +1616,7 @@ int spi_bus_unlock(struct spi_master *master)
42135 EXPORT_SYMBOL_GPL(spi_bus_unlock);
42136
42137 /* portable code must never pass more than 32 bytes */
42138-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
42139+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
42140
42141 static u8 *buf;
42142
42143diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
42144index c7a5f97..71ecd35 100644
42145--- a/drivers/staging/iio/iio_hwmon.c
42146+++ b/drivers/staging/iio/iio_hwmon.c
42147@@ -72,7 +72,7 @@ static void iio_hwmon_free_attrs(struct iio_hwmon_state *st)
42148 static int iio_hwmon_probe(struct platform_device *pdev)
42149 {
42150 struct iio_hwmon_state *st;
42151- struct sensor_device_attribute *a;
42152+ sensor_device_attribute_no_const *a;
42153 int ret, i;
42154 int in_i = 1, temp_i = 1, curr_i = 1;
42155 enum iio_chan_type type;
42156diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42157index 34afc16..ffe44dd 100644
42158--- a/drivers/staging/octeon/ethernet-rx.c
42159+++ b/drivers/staging/octeon/ethernet-rx.c
42160@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42161 /* Increment RX stats for virtual ports */
42162 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42163 #ifdef CONFIG_64BIT
42164- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42165- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42166+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42167+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42168 #else
42169- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42170- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42171+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42172+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42173 #endif
42174 }
42175 netif_receive_skb(skb);
42176@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42177 dev->name);
42178 */
42179 #ifdef CONFIG_64BIT
42180- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42181+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42182 #else
42183- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42184+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
42185 #endif
42186 dev_kfree_skb_irq(skb);
42187 }
42188diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42189index ef32dc1..a159d68 100644
42190--- a/drivers/staging/octeon/ethernet.c
42191+++ b/drivers/staging/octeon/ethernet.c
42192@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42193 * since the RX tasklet also increments it.
42194 */
42195 #ifdef CONFIG_64BIT
42196- atomic64_add(rx_status.dropped_packets,
42197- (atomic64_t *)&priv->stats.rx_dropped);
42198+ atomic64_add_unchecked(rx_status.dropped_packets,
42199+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42200 #else
42201- atomic_add(rx_status.dropped_packets,
42202- (atomic_t *)&priv->stats.rx_dropped);
42203+ atomic_add_unchecked(rx_status.dropped_packets,
42204+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
42205 #endif
42206 }
42207
42208diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
42209index a2b7e03..aaf3630 100644
42210--- a/drivers/staging/ramster/tmem.c
42211+++ b/drivers/staging/ramster/tmem.c
42212@@ -50,25 +50,25 @@
42213 * A tmem host implementation must use this function to register callbacks
42214 * for memory allocation.
42215 */
42216-static struct tmem_hostops tmem_hostops;
42217+static struct tmem_hostops *tmem_hostops;
42218
42219 static void tmem_objnode_tree_init(void);
42220
42221 void tmem_register_hostops(struct tmem_hostops *m)
42222 {
42223 tmem_objnode_tree_init();
42224- tmem_hostops = *m;
42225+ tmem_hostops = m;
42226 }
42227
42228 /*
42229 * A tmem host implementation must use this function to register
42230 * callbacks for a page-accessible memory (PAM) implementation.
42231 */
42232-static struct tmem_pamops tmem_pamops;
42233+static struct tmem_pamops *tmem_pamops;
42234
42235 void tmem_register_pamops(struct tmem_pamops *m)
42236 {
42237- tmem_pamops = *m;
42238+ tmem_pamops = m;
42239 }
42240
42241 /*
42242@@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
42243 obj->pampd_count = 0;
42244 #ifdef CONFIG_RAMSTER
42245 if (tmem_pamops.new_obj != NULL)
42246- (*tmem_pamops.new_obj)(obj);
42247+ (tmem_pamops->new_obj)(obj);
42248 #endif
42249 SET_SENTINEL(obj, OBJ);
42250
42251@@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
42252 rbnode = rb_next(rbnode);
42253 tmem_pampd_destroy_all_in_obj(obj, true);
42254 tmem_obj_free(obj, hb);
42255- (*tmem_hostops.obj_free)(obj, pool);
42256+ (tmem_hostops->obj_free)(obj, pool);
42257 }
42258 spin_unlock(&hb->lock);
42259 }
42260@@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
42261 ASSERT_SENTINEL(obj, OBJ);
42262 BUG_ON(obj->pool == NULL);
42263 ASSERT_SENTINEL(obj->pool, POOL);
42264- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
42265+ objnode = (tmem_hostops->objnode_alloc)(obj->pool);
42266 if (unlikely(objnode == NULL))
42267 goto out;
42268 objnode->obj = obj;
42269@@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
42270 ASSERT_SENTINEL(pool, POOL);
42271 objnode->obj->objnode_count--;
42272 objnode->obj = NULL;
42273- (*tmem_hostops.objnode_free)(objnode, pool);
42274+ (tmem_hostops->objnode_free)(objnode, pool);
42275 }
42276
42277 /*
42278@@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
42279 void *old_pampd = *(void **)slot;
42280 *(void **)slot = new_pampd;
42281 if (!no_free)
42282- (*tmem_pamops.free)(old_pampd, obj->pool,
42283+ (tmem_pamops->free)(old_pampd, obj->pool,
42284 NULL, 0, false);
42285 ret = new_pampd;
42286 }
42287@@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
42288 if (objnode->slots[i]) {
42289 if (ht == 1) {
42290 obj->pampd_count--;
42291- (*tmem_pamops.free)(objnode->slots[i],
42292+ (tmem_pamops->free)(objnode->slots[i],
42293 obj->pool, NULL, 0, true);
42294 objnode->slots[i] = NULL;
42295 continue;
42296@@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
42297 return;
42298 if (obj->objnode_tree_height == 0) {
42299 obj->pampd_count--;
42300- (*tmem_pamops.free)(obj->objnode_tree_root,
42301+ (tmem_pamops->free)(obj->objnode_tree_root,
42302 obj->pool, NULL, 0, true);
42303 } else {
42304 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
42305@@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
42306 obj->objnode_tree_root = NULL;
42307 #ifdef CONFIG_RAMSTER
42308 if (tmem_pamops.free_obj != NULL)
42309- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
42310+ (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
42311 #endif
42312 }
42313
42314@@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42315 /* if found, is a dup put, flush the old one */
42316 pampd_del = tmem_pampd_delete_from_obj(obj, index);
42317 BUG_ON(pampd_del != pampd);
42318- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
42319+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
42320 if (obj->pampd_count == 0) {
42321 objnew = obj;
42322 objfound = NULL;
42323@@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42324 pampd = NULL;
42325 }
42326 } else {
42327- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
42328+ obj = objnew = (tmem_hostops->obj_alloc)(pool);
42329 if (unlikely(obj == NULL)) {
42330 ret = -ENOMEM;
42331 goto out;
42332@@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42333 if (unlikely(ret == -ENOMEM))
42334 /* may have partially built objnode tree ("stump") */
42335 goto delete_and_free;
42336- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
42337+ (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
42338 goto out;
42339
42340 delete_and_free:
42341 (void)tmem_pampd_delete_from_obj(obj, index);
42342 if (pampd)
42343- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
42344+ (tmem_pamops->free)(pampd, pool, NULL, 0, true);
42345 if (objnew) {
42346 tmem_obj_free(objnew, hb);
42347- (*tmem_hostops.obj_free)(objnew, pool);
42348+ (tmem_hostops->obj_free)(objnew, pool);
42349 }
42350 out:
42351 spin_unlock(&hb->lock);
42352@@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
42353 if (pampd != NULL) {
42354 BUG_ON(obj == NULL);
42355 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
42356- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
42357+ (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
42358 } else if (delete) {
42359 BUG_ON(obj == NULL);
42360 (void)tmem_pampd_delete_from_obj(obj, index);
42361@@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
42362 int ret = 0;
42363
42364 if (!is_ephemeral(pool))
42365- new_pampd = (*tmem_pamops.repatriate_preload)(
42366+ new_pampd = (tmem_pamops->repatriate_preload)(
42367 old_pampd, pool, oidp, index, &intransit);
42368 if (intransit)
42369 ret = -EAGAIN;
42370@@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
42371 /* must release the hb->lock else repatriate can't sleep */
42372 spin_unlock(&hb->lock);
42373 if (!intransit)
42374- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
42375+ ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
42376 oidp, index, free, data);
42377 if (ret == -EAGAIN) {
42378 /* rare I think, but should cond_resched()??? */
42379@@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
42380 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
42381 /* if we bug here, pamops wasn't properly set up for ramster */
42382 BUG_ON(tmem_pamops.replace_in_obj == NULL);
42383- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
42384+ ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
42385 out:
42386 spin_unlock(&hb->lock);
42387 return ret;
42388@@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42389 if (free) {
42390 if (obj->pampd_count == 0) {
42391 tmem_obj_free(obj, hb);
42392- (*tmem_hostops.obj_free)(obj, pool);
42393+ (tmem_hostops->obj_free)(obj, pool);
42394 obj = NULL;
42395 }
42396 }
42397 if (free)
42398- ret = (*tmem_pamops.get_data_and_free)(
42399+ ret = (tmem_pamops->get_data_and_free)(
42400 data, sizep, raw, pampd, pool, oidp, index);
42401 else
42402- ret = (*tmem_pamops.get_data)(
42403+ ret = (tmem_pamops->get_data)(
42404 data, sizep, raw, pampd, pool, oidp, index);
42405 if (ret < 0)
42406 goto out;
42407@@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
42408 pampd = tmem_pampd_delete_from_obj(obj, index);
42409 if (pampd == NULL)
42410 goto out;
42411- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
42412+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
42413 if (obj->pampd_count == 0) {
42414 tmem_obj_free(obj, hb);
42415- (*tmem_hostops.obj_free)(obj, pool);
42416+ (tmem_hostops->obj_free)(obj, pool);
42417 }
42418 ret = 0;
42419
42420@@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
42421 goto out;
42422 tmem_pampd_destroy_all_in_obj(obj, false);
42423 tmem_obj_free(obj, hb);
42424- (*tmem_hostops.obj_free)(obj, pool);
42425+ (tmem_hostops->obj_free)(obj, pool);
42426 ret = 0;
42427
42428 out:
42429diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
42430index dc23395..cf7e9b1 100644
42431--- a/drivers/staging/rtl8712/rtl871x_io.h
42432+++ b/drivers/staging/rtl8712/rtl871x_io.h
42433@@ -108,7 +108,7 @@ struct _io_ops {
42434 u8 *pmem);
42435 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
42436 u8 *pmem);
42437-};
42438+} __no_const;
42439
42440 struct io_req {
42441 struct list_head list;
42442diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
42443index 1f5088b..0e59820 100644
42444--- a/drivers/staging/sbe-2t3e3/netdev.c
42445+++ b/drivers/staging/sbe-2t3e3/netdev.c
42446@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42447 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
42448
42449 if (rlen)
42450- if (copy_to_user(data, &resp, rlen))
42451+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
42452 return -EFAULT;
42453
42454 return 0;
42455diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42456index 5dddc4d..34fcb2f 100644
42457--- a/drivers/staging/usbip/vhci.h
42458+++ b/drivers/staging/usbip/vhci.h
42459@@ -83,7 +83,7 @@ struct vhci_hcd {
42460 unsigned resuming:1;
42461 unsigned long re_timeout;
42462
42463- atomic_t seqnum;
42464+ atomic_unchecked_t seqnum;
42465
42466 /*
42467 * NOTE:
42468diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42469index c3aa219..bf8b3de 100644
42470--- a/drivers/staging/usbip/vhci_hcd.c
42471+++ b/drivers/staging/usbip/vhci_hcd.c
42472@@ -451,7 +451,7 @@ static void vhci_tx_urb(struct urb *urb)
42473 return;
42474 }
42475
42476- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42477+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42478 if (priv->seqnum == 0xffff)
42479 dev_info(&urb->dev->dev, "seqnum max\n");
42480
42481@@ -703,7 +703,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42482 return -ENOMEM;
42483 }
42484
42485- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42486+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42487 if (unlink->seqnum == 0xffff)
42488 pr_info("seqnum max\n");
42489
42490@@ -907,7 +907,7 @@ static int vhci_start(struct usb_hcd *hcd)
42491 vdev->rhport = rhport;
42492 }
42493
42494- atomic_set(&vhci->seqnum, 0);
42495+ atomic_set_unchecked(&vhci->seqnum, 0);
42496 spin_lock_init(&vhci->lock);
42497
42498 hcd->power_budget = 0; /* no limit */
42499diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42500index ba5f1c0..11d8122 100644
42501--- a/drivers/staging/usbip/vhci_rx.c
42502+++ b/drivers/staging/usbip/vhci_rx.c
42503@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42504 if (!urb) {
42505 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
42506 pr_info("max seqnum %d\n",
42507- atomic_read(&the_controller->seqnum));
42508+ atomic_read_unchecked(&the_controller->seqnum));
42509 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42510 return;
42511 }
42512diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42513index 5f13890..36a044b 100644
42514--- a/drivers/staging/vt6655/hostap.c
42515+++ b/drivers/staging/vt6655/hostap.c
42516@@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
42517 *
42518 */
42519
42520+static net_device_ops_no_const apdev_netdev_ops;
42521+
42522 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42523 {
42524 PSDevice apdev_priv;
42525 struct net_device *dev = pDevice->dev;
42526 int ret;
42527- const struct net_device_ops apdev_netdev_ops = {
42528- .ndo_start_xmit = pDevice->tx_80211,
42529- };
42530
42531 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
42532
42533@@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42534 *apdev_priv = *pDevice;
42535 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
42536
42537+ /* only half broken now */
42538+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
42539 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
42540
42541 pDevice->apdev->type = ARPHRD_IEEE80211;
42542diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42543index 26a7d0e..897b083 100644
42544--- a/drivers/staging/vt6656/hostap.c
42545+++ b/drivers/staging/vt6656/hostap.c
42546@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
42547 *
42548 */
42549
42550+static net_device_ops_no_const apdev_netdev_ops;
42551+
42552 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42553 {
42554 PSDevice apdev_priv;
42555 struct net_device *dev = pDevice->dev;
42556 int ret;
42557- const struct net_device_ops apdev_netdev_ops = {
42558- .ndo_start_xmit = pDevice->tx_80211,
42559- };
42560
42561 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
42562
42563@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42564 *apdev_priv = *pDevice;
42565 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
42566
42567+ /* only half broken now */
42568+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
42569 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
42570
42571 pDevice->apdev->type = ARPHRD_IEEE80211;
42572diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
42573index 56c8e60..1920c63 100644
42574--- a/drivers/staging/zcache/tmem.c
42575+++ b/drivers/staging/zcache/tmem.c
42576@@ -39,7 +39,7 @@
42577 * A tmem host implementation must use this function to register callbacks
42578 * for memory allocation.
42579 */
42580-static struct tmem_hostops tmem_hostops;
42581+static tmem_hostops_no_const tmem_hostops;
42582
42583 static void tmem_objnode_tree_init(void);
42584
42585@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
42586 * A tmem host implementation must use this function to register
42587 * callbacks for a page-accessible memory (PAM) implementation
42588 */
42589-static struct tmem_pamops tmem_pamops;
42590+static tmem_pamops_no_const tmem_pamops;
42591
42592 void tmem_register_pamops(struct tmem_pamops *m)
42593 {
42594diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
42595index 0d4aa82..f7832d4 100644
42596--- a/drivers/staging/zcache/tmem.h
42597+++ b/drivers/staging/zcache/tmem.h
42598@@ -180,6 +180,7 @@ struct tmem_pamops {
42599 void (*new_obj)(struct tmem_obj *);
42600 int (*replace_in_obj)(void *, struct tmem_obj *);
42601 };
42602+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
42603 extern void tmem_register_pamops(struct tmem_pamops *m);
42604
42605 /* memory allocation methods provided by the host implementation */
42606@@ -189,6 +190,7 @@ struct tmem_hostops {
42607 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
42608 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
42609 };
42610+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
42611 extern void tmem_register_hostops(struct tmem_hostops *m);
42612
42613 /* core tmem accessor functions */
42614diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
42615index 96f4981..4daaa7e 100644
42616--- a/drivers/target/target_core_device.c
42617+++ b/drivers/target/target_core_device.c
42618@@ -1370,7 +1370,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
42619 spin_lock_init(&dev->se_port_lock);
42620 spin_lock_init(&dev->se_tmr_lock);
42621 spin_lock_init(&dev->qf_cmd_lock);
42622- atomic_set(&dev->dev_ordered_id, 0);
42623+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
42624 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
42625 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
42626 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
42627diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
42628index bd587b7..173daf3 100644
42629--- a/drivers/target/target_core_transport.c
42630+++ b/drivers/target/target_core_transport.c
42631@@ -1077,7 +1077,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
42632 * Used to determine when ORDERED commands should go from
42633 * Dormant to Active status.
42634 */
42635- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
42636+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
42637 smp_mb__after_atomic_inc();
42638 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
42639 cmd->se_ordered_id, cmd->sam_task_attr,
42640diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
42641index b09c8d1f..c4225c0 100644
42642--- a/drivers/tty/cyclades.c
42643+++ b/drivers/tty/cyclades.c
42644@@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
42645 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
42646 info->port.count);
42647 #endif
42648- info->port.count++;
42649+ atomic_inc(&info->port.count);
42650 #ifdef CY_DEBUG_COUNT
42651 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
42652- current->pid, info->port.count);
42653+ current->pid, atomic_read(&info->port.count));
42654 #endif
42655
42656 /*
42657@@ -3991,7 +3991,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
42658 for (j = 0; j < cy_card[i].nports; j++) {
42659 info = &cy_card[i].ports[j];
42660
42661- if (info->port.count) {
42662+ if (atomic_read(&info->port.count)) {
42663 /* XXX is the ldisc num worth this? */
42664 struct tty_struct *tty;
42665 struct tty_ldisc *ld;
42666diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
42667index 13ee53b..418d164 100644
42668--- a/drivers/tty/hvc/hvc_console.c
42669+++ b/drivers/tty/hvc/hvc_console.c
42670@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
42671
42672 spin_lock_irqsave(&hp->port.lock, flags);
42673 /* Check and then increment for fast path open. */
42674- if (hp->port.count++ > 0) {
42675+ if (atomic_inc_return(&hp->port.count) > 1) {
42676 spin_unlock_irqrestore(&hp->port.lock, flags);
42677 hvc_kick();
42678 return 0;
42679@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
42680
42681 spin_lock_irqsave(&hp->port.lock, flags);
42682
42683- if (--hp->port.count == 0) {
42684+ if (atomic_dec_return(&hp->port.count) == 0) {
42685 spin_unlock_irqrestore(&hp->port.lock, flags);
42686 /* We are done with the tty pointer now. */
42687 tty_port_tty_set(&hp->port, NULL);
42688@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
42689 */
42690 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
42691 } else {
42692- if (hp->port.count < 0)
42693+ if (atomic_read(&hp->port.count) < 0)
42694 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
42695- hp->vtermno, hp->port.count);
42696+ hp->vtermno, atomic_read(&hp->port.count));
42697 spin_unlock_irqrestore(&hp->port.lock, flags);
42698 }
42699 }
42700@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
42701 * open->hangup case this can be called after the final close so prevent
42702 * that from happening for now.
42703 */
42704- if (hp->port.count <= 0) {
42705+ if (atomic_read(&hp->port.count) <= 0) {
42706 spin_unlock_irqrestore(&hp->port.lock, flags);
42707 return;
42708 }
42709
42710- hp->port.count = 0;
42711+ atomic_set(&hp->port.count, 0);
42712 spin_unlock_irqrestore(&hp->port.lock, flags);
42713 tty_port_tty_set(&hp->port, NULL);
42714
42715@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
42716 return -EPIPE;
42717
42718 /* FIXME what's this (unprotected) check for? */
42719- if (hp->port.count <= 0)
42720+ if (atomic_read(&hp->port.count) <= 0)
42721 return -EIO;
42722
42723 spin_lock_irqsave(&hp->lock, flags);
42724diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
42725index 8776357..b2d4afd 100644
42726--- a/drivers/tty/hvc/hvcs.c
42727+++ b/drivers/tty/hvc/hvcs.c
42728@@ -83,6 +83,7 @@
42729 #include <asm/hvcserver.h>
42730 #include <asm/uaccess.h>
42731 #include <asm/vio.h>
42732+#include <asm/local.h>
42733
42734 /*
42735 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
42736@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
42737
42738 spin_lock_irqsave(&hvcsd->lock, flags);
42739
42740- if (hvcsd->port.count > 0) {
42741+ if (atomic_read(&hvcsd->port.count) > 0) {
42742 spin_unlock_irqrestore(&hvcsd->lock, flags);
42743 printk(KERN_INFO "HVCS: vterm state unchanged. "
42744 "The hvcs device node is still in use.\n");
42745@@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
42746 }
42747 }
42748
42749- hvcsd->port.count = 0;
42750+ atomic_set(&hvcsd->port.count, 0);
42751 hvcsd->port.tty = tty;
42752 tty->driver_data = hvcsd;
42753
42754@@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
42755 unsigned long flags;
42756
42757 spin_lock_irqsave(&hvcsd->lock, flags);
42758- hvcsd->port.count++;
42759+ atomic_inc(&hvcsd->port.count);
42760 hvcsd->todo_mask |= HVCS_SCHED_READ;
42761 spin_unlock_irqrestore(&hvcsd->lock, flags);
42762
42763@@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
42764 hvcsd = tty->driver_data;
42765
42766 spin_lock_irqsave(&hvcsd->lock, flags);
42767- if (--hvcsd->port.count == 0) {
42768+ if (atomic_dec_and_test(&hvcsd->port.count)) {
42769
42770 vio_disable_interrupts(hvcsd->vdev);
42771
42772@@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
42773
42774 free_irq(irq, hvcsd);
42775 return;
42776- } else if (hvcsd->port.count < 0) {
42777+ } else if (atomic_read(&hvcsd->port.count) < 0) {
42778 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
42779 " is missmanaged.\n",
42780- hvcsd->vdev->unit_address, hvcsd->port.count);
42781+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
42782 }
42783
42784 spin_unlock_irqrestore(&hvcsd->lock, flags);
42785@@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
42786
42787 spin_lock_irqsave(&hvcsd->lock, flags);
42788 /* Preserve this so that we know how many kref refs to put */
42789- temp_open_count = hvcsd->port.count;
42790+ temp_open_count = atomic_read(&hvcsd->port.count);
42791
42792 /*
42793 * Don't kref put inside the spinlock because the destruction
42794@@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
42795 tty->driver_data = NULL;
42796 hvcsd->port.tty = NULL;
42797
42798- hvcsd->port.count = 0;
42799+ atomic_set(&hvcsd->port.count, 0);
42800
42801 /* This will drop any buffered data on the floor which is OK in a hangup
42802 * scenario. */
42803@@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
42804 * the middle of a write operation? This is a crummy place to do this
42805 * but we want to keep it all in the spinlock.
42806 */
42807- if (hvcsd->port.count <= 0) {
42808+ if (atomic_read(&hvcsd->port.count) <= 0) {
42809 spin_unlock_irqrestore(&hvcsd->lock, flags);
42810 return -ENODEV;
42811 }
42812@@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
42813 {
42814 struct hvcs_struct *hvcsd = tty->driver_data;
42815
42816- if (!hvcsd || hvcsd->port.count <= 0)
42817+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
42818 return 0;
42819
42820 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
42821diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
42822index 2cde13d..645d78f 100644
42823--- a/drivers/tty/ipwireless/tty.c
42824+++ b/drivers/tty/ipwireless/tty.c
42825@@ -29,6 +29,7 @@
42826 #include <linux/tty_driver.h>
42827 #include <linux/tty_flip.h>
42828 #include <linux/uaccess.h>
42829+#include <asm/local.h>
42830
42831 #include "tty.h"
42832 #include "network.h"
42833@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
42834 mutex_unlock(&tty->ipw_tty_mutex);
42835 return -ENODEV;
42836 }
42837- if (tty->port.count == 0)
42838+ if (atomic_read(&tty->port.count) == 0)
42839 tty->tx_bytes_queued = 0;
42840
42841- tty->port.count++;
42842+ atomic_inc(&tty->port.count);
42843
42844 tty->port.tty = linux_tty;
42845 linux_tty->driver_data = tty;
42846@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
42847
42848 static void do_ipw_close(struct ipw_tty *tty)
42849 {
42850- tty->port.count--;
42851-
42852- if (tty->port.count == 0) {
42853+ if (atomic_dec_return(&tty->port.count) == 0) {
42854 struct tty_struct *linux_tty = tty->port.tty;
42855
42856 if (linux_tty != NULL) {
42857@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
42858 return;
42859
42860 mutex_lock(&tty->ipw_tty_mutex);
42861- if (tty->port.count == 0) {
42862+ if (atomic_read(&tty->port.count) == 0) {
42863 mutex_unlock(&tty->ipw_tty_mutex);
42864 return;
42865 }
42866@@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
42867 return;
42868 }
42869
42870- if (!tty->port.count) {
42871+ if (!atomic_read(&tty->port.count)) {
42872 mutex_unlock(&tty->ipw_tty_mutex);
42873 return;
42874 }
42875@@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
42876 return -ENODEV;
42877
42878 mutex_lock(&tty->ipw_tty_mutex);
42879- if (!tty->port.count) {
42880+ if (!atomic_read(&tty->port.count)) {
42881 mutex_unlock(&tty->ipw_tty_mutex);
42882 return -EINVAL;
42883 }
42884@@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
42885 if (!tty)
42886 return -ENODEV;
42887
42888- if (!tty->port.count)
42889+ if (!atomic_read(&tty->port.count))
42890 return -EINVAL;
42891
42892 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
42893@@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
42894 if (!tty)
42895 return 0;
42896
42897- if (!tty->port.count)
42898+ if (!atomic_read(&tty->port.count))
42899 return 0;
42900
42901 return tty->tx_bytes_queued;
42902@@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
42903 if (!tty)
42904 return -ENODEV;
42905
42906- if (!tty->port.count)
42907+ if (!atomic_read(&tty->port.count))
42908 return -EINVAL;
42909
42910 return get_control_lines(tty);
42911@@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
42912 if (!tty)
42913 return -ENODEV;
42914
42915- if (!tty->port.count)
42916+ if (!atomic_read(&tty->port.count))
42917 return -EINVAL;
42918
42919 return set_control_lines(tty, set, clear);
42920@@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
42921 if (!tty)
42922 return -ENODEV;
42923
42924- if (!tty->port.count)
42925+ if (!atomic_read(&tty->port.count))
42926 return -EINVAL;
42927
42928 /* FIXME: Exactly how is the tty object locked here .. */
42929@@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
42930 * are gone */
42931 mutex_lock(&ttyj->ipw_tty_mutex);
42932 }
42933- while (ttyj->port.count)
42934+ while (atomic_read(&ttyj->port.count))
42935 do_ipw_close(ttyj);
42936 ipwireless_disassociate_network_ttys(network,
42937 ttyj->channel_idx);
42938diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
42939index f9d2850..b006f04 100644
42940--- a/drivers/tty/moxa.c
42941+++ b/drivers/tty/moxa.c
42942@@ -1193,7 +1193,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
42943 }
42944
42945 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
42946- ch->port.count++;
42947+ atomic_inc(&ch->port.count);
42948 tty->driver_data = ch;
42949 tty_port_tty_set(&ch->port, tty);
42950 mutex_lock(&ch->port.mutex);
42951diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
42952index bfd6771..e0d93c4 100644
42953--- a/drivers/tty/n_gsm.c
42954+++ b/drivers/tty/n_gsm.c
42955@@ -1636,7 +1636,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
42956 spin_lock_init(&dlci->lock);
42957 mutex_init(&dlci->mutex);
42958 dlci->fifo = &dlci->_fifo;
42959- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
42960+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
42961 kfree(dlci);
42962 return NULL;
42963 }
42964@@ -2936,7 +2936,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
42965 struct gsm_dlci *dlci = tty->driver_data;
42966 struct tty_port *port = &dlci->port;
42967
42968- port->count++;
42969+ atomic_inc(&port->count);
42970 dlci_get(dlci);
42971 dlci_get(dlci->gsm->dlci[0]);
42972 mux_get(dlci->gsm);
42973diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
42974index 19083ef..6e34e97 100644
42975--- a/drivers/tty/n_tty.c
42976+++ b/drivers/tty/n_tty.c
42977@@ -2196,6 +2196,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
42978 {
42979 *ops = tty_ldisc_N_TTY;
42980 ops->owner = NULL;
42981- ops->refcount = ops->flags = 0;
42982+ atomic_set(&ops->refcount, 0);
42983+ ops->flags = 0;
42984 }
42985 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
42986diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
42987index ac35c90..c47deac 100644
42988--- a/drivers/tty/pty.c
42989+++ b/drivers/tty/pty.c
42990@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
42991 panic("Couldn't register Unix98 pts driver");
42992
42993 /* Now create the /dev/ptmx special device */
42994+ pax_open_kernel();
42995 tty_default_fops(&ptmx_fops);
42996- ptmx_fops.open = ptmx_open;
42997+ *(void **)&ptmx_fops.open = ptmx_open;
42998+ pax_close_kernel();
42999
43000 cdev_init(&ptmx_cdev, &ptmx_fops);
43001 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
43002diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
43003index e42009a..566a036 100644
43004--- a/drivers/tty/rocket.c
43005+++ b/drivers/tty/rocket.c
43006@@ -925,7 +925,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43007 tty->driver_data = info;
43008 tty_port_tty_set(port, tty);
43009
43010- if (port->count++ == 0) {
43011+ if (atomic_inc_return(&port->count) == 1) {
43012 atomic_inc(&rp_num_ports_open);
43013
43014 #ifdef ROCKET_DEBUG_OPEN
43015@@ -934,7 +934,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43016 #endif
43017 }
43018 #ifdef ROCKET_DEBUG_OPEN
43019- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
43020+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
43021 #endif
43022
43023 /*
43024@@ -1529,7 +1529,7 @@ static void rp_hangup(struct tty_struct *tty)
43025 spin_unlock_irqrestore(&info->port.lock, flags);
43026 return;
43027 }
43028- if (info->port.count)
43029+ if (atomic_read(&info->port.count))
43030 atomic_dec(&rp_num_ports_open);
43031 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
43032 spin_unlock_irqrestore(&info->port.lock, flags);
43033diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
43034index 1002054..dd644a8 100644
43035--- a/drivers/tty/serial/kgdboc.c
43036+++ b/drivers/tty/serial/kgdboc.c
43037@@ -24,8 +24,9 @@
43038 #define MAX_CONFIG_LEN 40
43039
43040 static struct kgdb_io kgdboc_io_ops;
43041+static struct kgdb_io kgdboc_io_ops_console;
43042
43043-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
43044+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
43045 static int configured = -1;
43046
43047 static char config[MAX_CONFIG_LEN];
43048@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
43049 kgdboc_unregister_kbd();
43050 if (configured == 1)
43051 kgdb_unregister_io_module(&kgdboc_io_ops);
43052+ else if (configured == 2)
43053+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
43054 }
43055
43056 static int configure_kgdboc(void)
43057@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
43058 int err;
43059 char *cptr = config;
43060 struct console *cons;
43061+ int is_console = 0;
43062
43063 err = kgdboc_option_setup(config);
43064 if (err || !strlen(config) || isspace(config[0]))
43065 goto noconfig;
43066
43067 err = -ENODEV;
43068- kgdboc_io_ops.is_console = 0;
43069 kgdb_tty_driver = NULL;
43070
43071 kgdboc_use_kms = 0;
43072@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
43073 int idx;
43074 if (cons->device && cons->device(cons, &idx) == p &&
43075 idx == tty_line) {
43076- kgdboc_io_ops.is_console = 1;
43077+ is_console = 1;
43078 break;
43079 }
43080 cons = cons->next;
43081@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
43082 kgdb_tty_line = tty_line;
43083
43084 do_register:
43085- err = kgdb_register_io_module(&kgdboc_io_ops);
43086+ if (is_console) {
43087+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
43088+ configured = 2;
43089+ } else {
43090+ err = kgdb_register_io_module(&kgdboc_io_ops);
43091+ configured = 1;
43092+ }
43093 if (err)
43094 goto noconfig;
43095
43096@@ -205,8 +214,6 @@ do_register:
43097 if (err)
43098 goto nmi_con_failed;
43099
43100- configured = 1;
43101-
43102 return 0;
43103
43104 nmi_con_failed:
43105@@ -223,7 +230,7 @@ noconfig:
43106 static int __init init_kgdboc(void)
43107 {
43108 /* Already configured? */
43109- if (configured == 1)
43110+ if (configured >= 1)
43111 return 0;
43112
43113 return configure_kgdboc();
43114@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
43115 if (config[len - 1] == '\n')
43116 config[len - 1] = '\0';
43117
43118- if (configured == 1)
43119+ if (configured >= 1)
43120 cleanup_kgdboc();
43121
43122 /* Go and configure with the new params. */
43123@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
43124 .post_exception = kgdboc_post_exp_handler,
43125 };
43126
43127+static struct kgdb_io kgdboc_io_ops_console = {
43128+ .name = "kgdboc",
43129+ .read_char = kgdboc_get_char,
43130+ .write_char = kgdboc_put_char,
43131+ .pre_exception = kgdboc_pre_exp_handler,
43132+ .post_exception = kgdboc_post_exp_handler,
43133+ .is_console = 1
43134+};
43135+
43136 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
43137 /* This is only available if kgdboc is a built in for early debugging */
43138 static int __init kgdboc_early_init(char *opt)
43139diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
43140index e514b3a..c73d614 100644
43141--- a/drivers/tty/serial/samsung.c
43142+++ b/drivers/tty/serial/samsung.c
43143@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
43144 }
43145 }
43146
43147+static int s3c64xx_serial_startup(struct uart_port *port);
43148 static int s3c24xx_serial_startup(struct uart_port *port)
43149 {
43150 struct s3c24xx_uart_port *ourport = to_ourport(port);
43151 int ret;
43152
43153+ /* Startup sequence is different for s3c64xx and higher SoC's */
43154+ if (s3c24xx_serial_has_interrupt_mask(port))
43155+ return s3c64xx_serial_startup(port);
43156+
43157 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
43158 port->mapbase, port->membase);
43159
43160@@ -1122,10 +1127,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
43161 /* setup info for port */
43162 port->dev = &platdev->dev;
43163
43164- /* Startup sequence is different for s3c64xx and higher SoC's */
43165- if (s3c24xx_serial_has_interrupt_mask(port))
43166- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
43167-
43168 port->uartclk = 1;
43169
43170 if (cfg->uart_flags & UPF_CONS_FLOW) {
43171diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
43172index 2c7230a..2104f16 100644
43173--- a/drivers/tty/serial/serial_core.c
43174+++ b/drivers/tty/serial/serial_core.c
43175@@ -1455,7 +1455,7 @@ static void uart_hangup(struct tty_struct *tty)
43176 uart_flush_buffer(tty);
43177 uart_shutdown(tty, state);
43178 spin_lock_irqsave(&port->lock, flags);
43179- port->count = 0;
43180+ atomic_set(&port->count, 0);
43181 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
43182 spin_unlock_irqrestore(&port->lock, flags);
43183 tty_port_tty_set(port, NULL);
43184@@ -1551,7 +1551,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43185 goto end;
43186 }
43187
43188- port->count++;
43189+ atomic_inc(&port->count);
43190 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
43191 retval = -ENXIO;
43192 goto err_dec_count;
43193@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43194 /*
43195 * Make sure the device is in D0 state.
43196 */
43197- if (port->count == 1)
43198+ if (atomic_read(&port->count) == 1)
43199 uart_change_pm(state, 0);
43200
43201 /*
43202@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43203 end:
43204 return retval;
43205 err_dec_count:
43206- port->count--;
43207+ atomic_inc(&port->count);
43208 mutex_unlock(&port->mutex);
43209 goto end;
43210 }
43211diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
43212index 9e071f6..f30ae69 100644
43213--- a/drivers/tty/synclink.c
43214+++ b/drivers/tty/synclink.c
43215@@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43216
43217 if (debug_level >= DEBUG_LEVEL_INFO)
43218 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
43219- __FILE__,__LINE__, info->device_name, info->port.count);
43220+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43221
43222 if (tty_port_close_start(&info->port, tty, filp) == 0)
43223 goto cleanup;
43224@@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43225 cleanup:
43226 if (debug_level >= DEBUG_LEVEL_INFO)
43227 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
43228- tty->driver->name, info->port.count);
43229+ tty->driver->name, atomic_read(&info->port.count));
43230
43231 } /* end of mgsl_close() */
43232
43233@@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
43234
43235 mgsl_flush_buffer(tty);
43236 shutdown(info);
43237-
43238- info->port.count = 0;
43239+
43240+ atomic_set(&info->port.count, 0);
43241 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43242 info->port.tty = NULL;
43243
43244@@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43245
43246 if (debug_level >= DEBUG_LEVEL_INFO)
43247 printk("%s(%d):block_til_ready before block on %s count=%d\n",
43248- __FILE__,__LINE__, tty->driver->name, port->count );
43249+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43250
43251 spin_lock_irqsave(&info->irq_spinlock, flags);
43252 if (!tty_hung_up_p(filp)) {
43253 extra_count = true;
43254- port->count--;
43255+ atomic_dec(&port->count);
43256 }
43257 spin_unlock_irqrestore(&info->irq_spinlock, flags);
43258 port->blocked_open++;
43259@@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43260
43261 if (debug_level >= DEBUG_LEVEL_INFO)
43262 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
43263- __FILE__,__LINE__, tty->driver->name, port->count );
43264+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43265
43266 tty_unlock(tty);
43267 schedule();
43268@@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43269
43270 /* FIXME: Racy on hangup during close wait */
43271 if (extra_count)
43272- port->count++;
43273+ atomic_inc(&port->count);
43274 port->blocked_open--;
43275
43276 if (debug_level >= DEBUG_LEVEL_INFO)
43277 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
43278- __FILE__,__LINE__, tty->driver->name, port->count );
43279+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43280
43281 if (!retval)
43282 port->flags |= ASYNC_NORMAL_ACTIVE;
43283@@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43284
43285 if (debug_level >= DEBUG_LEVEL_INFO)
43286 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
43287- __FILE__,__LINE__,tty->driver->name, info->port.count);
43288+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43289
43290 /* If port is closing, signal caller to try again */
43291 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43292@@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43293 spin_unlock_irqrestore(&info->netlock, flags);
43294 goto cleanup;
43295 }
43296- info->port.count++;
43297+ atomic_inc(&info->port.count);
43298 spin_unlock_irqrestore(&info->netlock, flags);
43299
43300- if (info->port.count == 1) {
43301+ if (atomic_read(&info->port.count) == 1) {
43302 /* 1st open on this device, init hardware */
43303 retval = startup(info);
43304 if (retval < 0)
43305@@ -3451,8 +3451,8 @@ cleanup:
43306 if (retval) {
43307 if (tty->count == 1)
43308 info->port.tty = NULL; /* tty layer will release tty struct */
43309- if(info->port.count)
43310- info->port.count--;
43311+ if (atomic_read(&info->port.count))
43312+ atomic_dec(&info->port.count);
43313 }
43314
43315 return retval;
43316@@ -7662,7 +7662,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43317 unsigned short new_crctype;
43318
43319 /* return error if TTY interface open */
43320- if (info->port.count)
43321+ if (atomic_read(&info->port.count))
43322 return -EBUSY;
43323
43324 switch (encoding)
43325@@ -7757,7 +7757,7 @@ static int hdlcdev_open(struct net_device *dev)
43326
43327 /* arbitrate between network and tty opens */
43328 spin_lock_irqsave(&info->netlock, flags);
43329- if (info->port.count != 0 || info->netcount != 0) {
43330+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43331 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43332 spin_unlock_irqrestore(&info->netlock, flags);
43333 return -EBUSY;
43334@@ -7843,7 +7843,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43335 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43336
43337 /* return error if TTY interface open */
43338- if (info->port.count)
43339+ if (atomic_read(&info->port.count))
43340 return -EBUSY;
43341
43342 if (cmd != SIOCWANDEV)
43343diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
43344index aba1e59..877ac33 100644
43345--- a/drivers/tty/synclink_gt.c
43346+++ b/drivers/tty/synclink_gt.c
43347@@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43348 tty->driver_data = info;
43349 info->port.tty = tty;
43350
43351- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
43352+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
43353
43354 /* If port is closing, signal caller to try again */
43355 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43356@@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43357 mutex_unlock(&info->port.mutex);
43358 goto cleanup;
43359 }
43360- info->port.count++;
43361+ atomic_inc(&info->port.count);
43362 spin_unlock_irqrestore(&info->netlock, flags);
43363
43364- if (info->port.count == 1) {
43365+ if (atomic_read(&info->port.count) == 1) {
43366 /* 1st open on this device, init hardware */
43367 retval = startup(info);
43368 if (retval < 0) {
43369@@ -716,8 +716,8 @@ cleanup:
43370 if (retval) {
43371 if (tty->count == 1)
43372 info->port.tty = NULL; /* tty layer will release tty struct */
43373- if(info->port.count)
43374- info->port.count--;
43375+ if(atomic_read(&info->port.count))
43376+ atomic_dec(&info->port.count);
43377 }
43378
43379 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
43380@@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43381
43382 if (sanity_check(info, tty->name, "close"))
43383 return;
43384- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
43385+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
43386
43387 if (tty_port_close_start(&info->port, tty, filp) == 0)
43388 goto cleanup;
43389@@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43390 tty_port_close_end(&info->port, tty);
43391 info->port.tty = NULL;
43392 cleanup:
43393- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
43394+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
43395 }
43396
43397 static void hangup(struct tty_struct *tty)
43398@@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
43399 shutdown(info);
43400
43401 spin_lock_irqsave(&info->port.lock, flags);
43402- info->port.count = 0;
43403+ atomic_set(&info->port.count, 0);
43404 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43405 info->port.tty = NULL;
43406 spin_unlock_irqrestore(&info->port.lock, flags);
43407@@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43408 unsigned short new_crctype;
43409
43410 /* return error if TTY interface open */
43411- if (info->port.count)
43412+ if (atomic_read(&info->port.count))
43413 return -EBUSY;
43414
43415 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
43416@@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
43417
43418 /* arbitrate between network and tty opens */
43419 spin_lock_irqsave(&info->netlock, flags);
43420- if (info->port.count != 0 || info->netcount != 0) {
43421+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43422 DBGINFO(("%s hdlc_open busy\n", dev->name));
43423 spin_unlock_irqrestore(&info->netlock, flags);
43424 return -EBUSY;
43425@@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43426 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
43427
43428 /* return error if TTY interface open */
43429- if (info->port.count)
43430+ if (atomic_read(&info->port.count))
43431 return -EBUSY;
43432
43433 if (cmd != SIOCWANDEV)
43434@@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
43435 if (port == NULL)
43436 continue;
43437 spin_lock(&port->lock);
43438- if ((port->port.count || port->netcount) &&
43439+ if ((atomic_read(&port->port.count) || port->netcount) &&
43440 port->pending_bh && !port->bh_running &&
43441 !port->bh_requested) {
43442 DBGISR(("%s bh queued\n", port->device_name));
43443@@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43444 spin_lock_irqsave(&info->lock, flags);
43445 if (!tty_hung_up_p(filp)) {
43446 extra_count = true;
43447- port->count--;
43448+ atomic_dec(&port->count);
43449 }
43450 spin_unlock_irqrestore(&info->lock, flags);
43451 port->blocked_open++;
43452@@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43453 remove_wait_queue(&port->open_wait, &wait);
43454
43455 if (extra_count)
43456- port->count++;
43457+ atomic_inc(&port->count);
43458 port->blocked_open--;
43459
43460 if (!retval)
43461diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
43462index fd43fb6..34704ad 100644
43463--- a/drivers/tty/synclinkmp.c
43464+++ b/drivers/tty/synclinkmp.c
43465@@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43466
43467 if (debug_level >= DEBUG_LEVEL_INFO)
43468 printk("%s(%d):%s open(), old ref count = %d\n",
43469- __FILE__,__LINE__,tty->driver->name, info->port.count);
43470+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43471
43472 /* If port is closing, signal caller to try again */
43473 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43474@@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43475 spin_unlock_irqrestore(&info->netlock, flags);
43476 goto cleanup;
43477 }
43478- info->port.count++;
43479+ atomic_inc(&info->port.count);
43480 spin_unlock_irqrestore(&info->netlock, flags);
43481
43482- if (info->port.count == 1) {
43483+ if (atomic_read(&info->port.count) == 1) {
43484 /* 1st open on this device, init hardware */
43485 retval = startup(info);
43486 if (retval < 0)
43487@@ -797,8 +797,8 @@ cleanup:
43488 if (retval) {
43489 if (tty->count == 1)
43490 info->port.tty = NULL; /* tty layer will release tty struct */
43491- if(info->port.count)
43492- info->port.count--;
43493+ if(atomic_read(&info->port.count))
43494+ atomic_dec(&info->port.count);
43495 }
43496
43497 return retval;
43498@@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43499
43500 if (debug_level >= DEBUG_LEVEL_INFO)
43501 printk("%s(%d):%s close() entry, count=%d\n",
43502- __FILE__,__LINE__, info->device_name, info->port.count);
43503+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43504
43505 if (tty_port_close_start(&info->port, tty, filp) == 0)
43506 goto cleanup;
43507@@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43508 cleanup:
43509 if (debug_level >= DEBUG_LEVEL_INFO)
43510 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
43511- tty->driver->name, info->port.count);
43512+ tty->driver->name, atomic_read(&info->port.count));
43513 }
43514
43515 /* Called by tty_hangup() when a hangup is signaled.
43516@@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
43517 shutdown(info);
43518
43519 spin_lock_irqsave(&info->port.lock, flags);
43520- info->port.count = 0;
43521+ atomic_set(&info->port.count, 0);
43522 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43523 info->port.tty = NULL;
43524 spin_unlock_irqrestore(&info->port.lock, flags);
43525@@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43526 unsigned short new_crctype;
43527
43528 /* return error if TTY interface open */
43529- if (info->port.count)
43530+ if (atomic_read(&info->port.count))
43531 return -EBUSY;
43532
43533 switch (encoding)
43534@@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
43535
43536 /* arbitrate between network and tty opens */
43537 spin_lock_irqsave(&info->netlock, flags);
43538- if (info->port.count != 0 || info->netcount != 0) {
43539+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43540 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43541 spin_unlock_irqrestore(&info->netlock, flags);
43542 return -EBUSY;
43543@@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43544 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43545
43546 /* return error if TTY interface open */
43547- if (info->port.count)
43548+ if (atomic_read(&info->port.count))
43549 return -EBUSY;
43550
43551 if (cmd != SIOCWANDEV)
43552@@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
43553 * do not request bottom half processing if the
43554 * device is not open in a normal mode.
43555 */
43556- if ( port && (port->port.count || port->netcount) &&
43557+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
43558 port->pending_bh && !port->bh_running &&
43559 !port->bh_requested ) {
43560 if ( debug_level >= DEBUG_LEVEL_ISR )
43561@@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43562
43563 if (debug_level >= DEBUG_LEVEL_INFO)
43564 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
43565- __FILE__,__LINE__, tty->driver->name, port->count );
43566+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43567
43568 spin_lock_irqsave(&info->lock, flags);
43569 if (!tty_hung_up_p(filp)) {
43570 extra_count = true;
43571- port->count--;
43572+ atomic_dec(&port->count);
43573 }
43574 spin_unlock_irqrestore(&info->lock, flags);
43575 port->blocked_open++;
43576@@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43577
43578 if (debug_level >= DEBUG_LEVEL_INFO)
43579 printk("%s(%d):%s block_til_ready() count=%d\n",
43580- __FILE__,__LINE__, tty->driver->name, port->count );
43581+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43582
43583 tty_unlock(tty);
43584 schedule();
43585@@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43586 remove_wait_queue(&port->open_wait, &wait);
43587
43588 if (extra_count)
43589- port->count++;
43590+ atomic_inc(&port->count);
43591 port->blocked_open--;
43592
43593 if (debug_level >= DEBUG_LEVEL_INFO)
43594 printk("%s(%d):%s block_til_ready() after, count=%d\n",
43595- __FILE__,__LINE__, tty->driver->name, port->count );
43596+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43597
43598 if (!retval)
43599 port->flags |= ASYNC_NORMAL_ACTIVE;
43600diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
43601index b3c4a25..723916f 100644
43602--- a/drivers/tty/sysrq.c
43603+++ b/drivers/tty/sysrq.c
43604@@ -867,7 +867,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
43605 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
43606 size_t count, loff_t *ppos)
43607 {
43608- if (count) {
43609+ if (count && capable(CAP_SYS_ADMIN)) {
43610 char c;
43611
43612 if (get_user(c, buf))
43613diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
43614index da9fde8..c07975f 100644
43615--- a/drivers/tty/tty_io.c
43616+++ b/drivers/tty/tty_io.c
43617@@ -3391,7 +3391,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
43618
43619 void tty_default_fops(struct file_operations *fops)
43620 {
43621- *fops = tty_fops;
43622+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
43623 }
43624
43625 /*
43626diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
43627index c578229..45aa9ee 100644
43628--- a/drivers/tty/tty_ldisc.c
43629+++ b/drivers/tty/tty_ldisc.c
43630@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
43631 if (atomic_dec_and_test(&ld->users)) {
43632 struct tty_ldisc_ops *ldo = ld->ops;
43633
43634- ldo->refcount--;
43635+ atomic_dec(&ldo->refcount);
43636 module_put(ldo->owner);
43637 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43638
43639@@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
43640 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43641 tty_ldiscs[disc] = new_ldisc;
43642 new_ldisc->num = disc;
43643- new_ldisc->refcount = 0;
43644+ atomic_set(&new_ldisc->refcount, 0);
43645 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43646
43647 return ret;
43648@@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
43649 return -EINVAL;
43650
43651 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43652- if (tty_ldiscs[disc]->refcount)
43653+ if (atomic_read(&tty_ldiscs[disc]->refcount))
43654 ret = -EBUSY;
43655 else
43656 tty_ldiscs[disc] = NULL;
43657@@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
43658 if (ldops) {
43659 ret = ERR_PTR(-EAGAIN);
43660 if (try_module_get(ldops->owner)) {
43661- ldops->refcount++;
43662+ atomic_inc(&ldops->refcount);
43663 ret = ldops;
43664 }
43665 }
43666@@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
43667 unsigned long flags;
43668
43669 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43670- ldops->refcount--;
43671+ atomic_dec(&ldops->refcount);
43672 module_put(ldops->owner);
43673 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43674 }
43675diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
43676index b7ff59d..7c6105e 100644
43677--- a/drivers/tty/tty_port.c
43678+++ b/drivers/tty/tty_port.c
43679@@ -218,7 +218,7 @@ void tty_port_hangup(struct tty_port *port)
43680 unsigned long flags;
43681
43682 spin_lock_irqsave(&port->lock, flags);
43683- port->count = 0;
43684+ atomic_set(&port->count, 0);
43685 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43686 if (port->tty) {
43687 set_bit(TTY_IO_ERROR, &port->tty->flags);
43688@@ -344,7 +344,7 @@ int tty_port_block_til_ready(struct tty_port *port,
43689 /* The port lock protects the port counts */
43690 spin_lock_irqsave(&port->lock, flags);
43691 if (!tty_hung_up_p(filp))
43692- port->count--;
43693+ atomic_dec(&port->count);
43694 port->blocked_open++;
43695 spin_unlock_irqrestore(&port->lock, flags);
43696
43697@@ -386,7 +386,7 @@ int tty_port_block_til_ready(struct tty_port *port,
43698 we must not mess that up further */
43699 spin_lock_irqsave(&port->lock, flags);
43700 if (!tty_hung_up_p(filp))
43701- port->count++;
43702+ atomic_inc(&port->count);
43703 port->blocked_open--;
43704 if (retval == 0)
43705 port->flags |= ASYNC_NORMAL_ACTIVE;
43706@@ -406,19 +406,19 @@ int tty_port_close_start(struct tty_port *port,
43707 return 0;
43708 }
43709
43710- if (tty->count == 1 && port->count != 1) {
43711+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
43712 printk(KERN_WARNING
43713 "tty_port_close_start: tty->count = 1 port count = %d.\n",
43714- port->count);
43715- port->count = 1;
43716+ atomic_read(&port->count));
43717+ atomic_set(&port->count, 1);
43718 }
43719- if (--port->count < 0) {
43720+ if (atomic_dec_return(&port->count) < 0) {
43721 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
43722- port->count);
43723- port->count = 0;
43724+ atomic_read(&port->count));
43725+ atomic_set(&port->count, 0);
43726 }
43727
43728- if (port->count) {
43729+ if (atomic_read(&port->count)) {
43730 spin_unlock_irqrestore(&port->lock, flags);
43731 if (port->ops->drop)
43732 port->ops->drop(port);
43733@@ -516,7 +516,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
43734 {
43735 spin_lock_irq(&port->lock);
43736 if (!tty_hung_up_p(filp))
43737- ++port->count;
43738+ atomic_inc(&port->count);
43739 spin_unlock_irq(&port->lock);
43740 tty_port_tty_set(port, tty);
43741
43742diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
43743index 681765b..d3ccdf2 100644
43744--- a/drivers/tty/vt/keyboard.c
43745+++ b/drivers/tty/vt/keyboard.c
43746@@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
43747 kbd->kbdmode == VC_OFF) &&
43748 value != KVAL(K_SAK))
43749 return; /* SAK is allowed even in raw mode */
43750+
43751+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43752+ {
43753+ void *func = fn_handler[value];
43754+ if (func == fn_show_state || func == fn_show_ptregs ||
43755+ func == fn_show_mem)
43756+ return;
43757+ }
43758+#endif
43759+
43760 fn_handler[value](vc);
43761 }
43762
43763@@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
43764 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
43765 return -EFAULT;
43766
43767- if (!capable(CAP_SYS_TTY_CONFIG))
43768- perm = 0;
43769-
43770 switch (cmd) {
43771 case KDGKBENT:
43772 /* Ensure another thread doesn't free it under us */
43773@@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
43774 spin_unlock_irqrestore(&kbd_event_lock, flags);
43775 return put_user(val, &user_kbe->kb_value);
43776 case KDSKBENT:
43777+ if (!capable(CAP_SYS_TTY_CONFIG))
43778+ perm = 0;
43779+
43780 if (!perm)
43781 return -EPERM;
43782 if (!i && v == K_NOSUCHMAP) {
43783@@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
43784 int i, j, k;
43785 int ret;
43786
43787- if (!capable(CAP_SYS_TTY_CONFIG))
43788- perm = 0;
43789-
43790 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
43791 if (!kbs) {
43792 ret = -ENOMEM;
43793@@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
43794 kfree(kbs);
43795 return ((p && *p) ? -EOVERFLOW : 0);
43796 case KDSKBSENT:
43797+ if (!capable(CAP_SYS_TTY_CONFIG))
43798+ perm = 0;
43799+
43800 if (!perm) {
43801 ret = -EPERM;
43802 goto reterr;
43803diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
43804index 5110f36..8dc0a74 100644
43805--- a/drivers/uio/uio.c
43806+++ b/drivers/uio/uio.c
43807@@ -25,6 +25,7 @@
43808 #include <linux/kobject.h>
43809 #include <linux/cdev.h>
43810 #include <linux/uio_driver.h>
43811+#include <asm/local.h>
43812
43813 #define UIO_MAX_DEVICES (1U << MINORBITS)
43814
43815@@ -32,10 +33,10 @@ struct uio_device {
43816 struct module *owner;
43817 struct device *dev;
43818 int minor;
43819- atomic_t event;
43820+ atomic_unchecked_t event;
43821 struct fasync_struct *async_queue;
43822 wait_queue_head_t wait;
43823- int vma_count;
43824+ local_t vma_count;
43825 struct uio_info *info;
43826 struct kobject *map_dir;
43827 struct kobject *portio_dir;
43828@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
43829 struct device_attribute *attr, char *buf)
43830 {
43831 struct uio_device *idev = dev_get_drvdata(dev);
43832- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
43833+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
43834 }
43835
43836 static struct device_attribute uio_class_attributes[] = {
43837@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
43838 {
43839 struct uio_device *idev = info->uio_dev;
43840
43841- atomic_inc(&idev->event);
43842+ atomic_inc_unchecked(&idev->event);
43843 wake_up_interruptible(&idev->wait);
43844 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
43845 }
43846@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
43847 }
43848
43849 listener->dev = idev;
43850- listener->event_count = atomic_read(&idev->event);
43851+ listener->event_count = atomic_read_unchecked(&idev->event);
43852 filep->private_data = listener;
43853
43854 if (idev->info->open) {
43855@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
43856 return -EIO;
43857
43858 poll_wait(filep, &idev->wait, wait);
43859- if (listener->event_count != atomic_read(&idev->event))
43860+ if (listener->event_count != atomic_read_unchecked(&idev->event))
43861 return POLLIN | POLLRDNORM;
43862 return 0;
43863 }
43864@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
43865 do {
43866 set_current_state(TASK_INTERRUPTIBLE);
43867
43868- event_count = atomic_read(&idev->event);
43869+ event_count = atomic_read_unchecked(&idev->event);
43870 if (event_count != listener->event_count) {
43871 if (copy_to_user(buf, &event_count, count))
43872 retval = -EFAULT;
43873@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
43874 static void uio_vma_open(struct vm_area_struct *vma)
43875 {
43876 struct uio_device *idev = vma->vm_private_data;
43877- idev->vma_count++;
43878+ local_inc(&idev->vma_count);
43879 }
43880
43881 static void uio_vma_close(struct vm_area_struct *vma)
43882 {
43883 struct uio_device *idev = vma->vm_private_data;
43884- idev->vma_count--;
43885+ local_dec(&idev->vma_count);
43886 }
43887
43888 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
43889@@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
43890 idev->owner = owner;
43891 idev->info = info;
43892 init_waitqueue_head(&idev->wait);
43893- atomic_set(&idev->event, 0);
43894+ atomic_set_unchecked(&idev->event, 0);
43895
43896 ret = uio_get_minor(idev);
43897 if (ret)
43898diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
43899index b7eb86a..36d28af 100644
43900--- a/drivers/usb/atm/cxacru.c
43901+++ b/drivers/usb/atm/cxacru.c
43902@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
43903 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
43904 if (ret < 2)
43905 return -EINVAL;
43906- if (index < 0 || index > 0x7f)
43907+ if (index > 0x7f)
43908 return -EINVAL;
43909 pos += tmp;
43910
43911diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
43912index 35f10bf..6a38a0b 100644
43913--- a/drivers/usb/atm/usbatm.c
43914+++ b/drivers/usb/atm/usbatm.c
43915@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43916 if (printk_ratelimit())
43917 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
43918 __func__, vpi, vci);
43919- atomic_inc(&vcc->stats->rx_err);
43920+ atomic_inc_unchecked(&vcc->stats->rx_err);
43921 return;
43922 }
43923
43924@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43925 if (length > ATM_MAX_AAL5_PDU) {
43926 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
43927 __func__, length, vcc);
43928- atomic_inc(&vcc->stats->rx_err);
43929+ atomic_inc_unchecked(&vcc->stats->rx_err);
43930 goto out;
43931 }
43932
43933@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43934 if (sarb->len < pdu_length) {
43935 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
43936 __func__, pdu_length, sarb->len, vcc);
43937- atomic_inc(&vcc->stats->rx_err);
43938+ atomic_inc_unchecked(&vcc->stats->rx_err);
43939 goto out;
43940 }
43941
43942 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
43943 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
43944 __func__, vcc);
43945- atomic_inc(&vcc->stats->rx_err);
43946+ atomic_inc_unchecked(&vcc->stats->rx_err);
43947 goto out;
43948 }
43949
43950@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43951 if (printk_ratelimit())
43952 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
43953 __func__, length);
43954- atomic_inc(&vcc->stats->rx_drop);
43955+ atomic_inc_unchecked(&vcc->stats->rx_drop);
43956 goto out;
43957 }
43958
43959@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43960
43961 vcc->push(vcc, skb);
43962
43963- atomic_inc(&vcc->stats->rx);
43964+ atomic_inc_unchecked(&vcc->stats->rx);
43965 out:
43966 skb_trim(sarb, 0);
43967 }
43968@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
43969 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
43970
43971 usbatm_pop(vcc, skb);
43972- atomic_inc(&vcc->stats->tx);
43973+ atomic_inc_unchecked(&vcc->stats->tx);
43974
43975 skb = skb_dequeue(&instance->sndqueue);
43976 }
43977@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
43978 if (!left--)
43979 return sprintf(page,
43980 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
43981- atomic_read(&atm_dev->stats.aal5.tx),
43982- atomic_read(&atm_dev->stats.aal5.tx_err),
43983- atomic_read(&atm_dev->stats.aal5.rx),
43984- atomic_read(&atm_dev->stats.aal5.rx_err),
43985- atomic_read(&atm_dev->stats.aal5.rx_drop));
43986+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
43987+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
43988+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
43989+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
43990+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
43991
43992 if (!left--) {
43993 if (instance->disconnected)
43994diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
43995index cbacea9..246cccd 100644
43996--- a/drivers/usb/core/devices.c
43997+++ b/drivers/usb/core/devices.c
43998@@ -126,7 +126,7 @@ static const char format_endpt[] =
43999 * time it gets called.
44000 */
44001 static struct device_connect_event {
44002- atomic_t count;
44003+ atomic_unchecked_t count;
44004 wait_queue_head_t wait;
44005 } device_event = {
44006 .count = ATOMIC_INIT(1),
44007@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
44008
44009 void usbfs_conn_disc_event(void)
44010 {
44011- atomic_add(2, &device_event.count);
44012+ atomic_add_unchecked(2, &device_event.count);
44013 wake_up(&device_event.wait);
44014 }
44015
44016@@ -645,7 +645,7 @@ static unsigned int usb_device_poll(struct file *file,
44017
44018 poll_wait(file, &device_event.wait, wait);
44019
44020- event_count = atomic_read(&device_event.count);
44021+ event_count = atomic_read_unchecked(&device_event.count);
44022 if (file->f_version != event_count) {
44023 file->f_version = event_count;
44024 return POLLIN | POLLRDNORM;
44025diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
44026index 8e64adf..9a33a3c 100644
44027--- a/drivers/usb/core/hcd.c
44028+++ b/drivers/usb/core/hcd.c
44029@@ -1522,7 +1522,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44030 */
44031 usb_get_urb(urb);
44032 atomic_inc(&urb->use_count);
44033- atomic_inc(&urb->dev->urbnum);
44034+ atomic_inc_unchecked(&urb->dev->urbnum);
44035 usbmon_urb_submit(&hcd->self, urb);
44036
44037 /* NOTE requirements on root-hub callers (usbfs and the hub
44038@@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44039 urb->hcpriv = NULL;
44040 INIT_LIST_HEAD(&urb->urb_list);
44041 atomic_dec(&urb->use_count);
44042- atomic_dec(&urb->dev->urbnum);
44043+ atomic_dec_unchecked(&urb->dev->urbnum);
44044 if (atomic_read(&urb->reject))
44045 wake_up(&usb_kill_urb_queue);
44046 usb_put_urb(urb);
44047diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
44048index 131f736..99004c3 100644
44049--- a/drivers/usb/core/message.c
44050+++ b/drivers/usb/core/message.c
44051@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
44052 * method can wait for it to complete. Since you don't have a handle on the
44053 * URB used, you can't cancel the request.
44054 */
44055-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44056+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44057 __u8 requesttype, __u16 value, __u16 index, void *data,
44058 __u16 size, int timeout)
44059 {
44060diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
44061index 818e4a0..0fc9589 100644
44062--- a/drivers/usb/core/sysfs.c
44063+++ b/drivers/usb/core/sysfs.c
44064@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
44065 struct usb_device *udev;
44066
44067 udev = to_usb_device(dev);
44068- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
44069+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
44070 }
44071 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
44072
44073diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
44074index f81b925..78d22ec 100644
44075--- a/drivers/usb/core/usb.c
44076+++ b/drivers/usb/core/usb.c
44077@@ -388,7 +388,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
44078 set_dev_node(&dev->dev, dev_to_node(bus->controller));
44079 dev->state = USB_STATE_ATTACHED;
44080 dev->lpm_disable_count = 1;
44081- atomic_set(&dev->urbnum, 0);
44082+ atomic_set_unchecked(&dev->urbnum, 0);
44083
44084 INIT_LIST_HEAD(&dev->ep0.urb_list);
44085 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
44086diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
44087index 5e29dde..eca992f 100644
44088--- a/drivers/usb/early/ehci-dbgp.c
44089+++ b/drivers/usb/early/ehci-dbgp.c
44090@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
44091
44092 #ifdef CONFIG_KGDB
44093 static struct kgdb_io kgdbdbgp_io_ops;
44094-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
44095+static struct kgdb_io kgdbdbgp_io_ops_console;
44096+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
44097 #else
44098 #define dbgp_kgdb_mode (0)
44099 #endif
44100@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
44101 .write_char = kgdbdbgp_write_char,
44102 };
44103
44104+static struct kgdb_io kgdbdbgp_io_ops_console = {
44105+ .name = "kgdbdbgp",
44106+ .read_char = kgdbdbgp_read_char,
44107+ .write_char = kgdbdbgp_write_char,
44108+ .is_console = 1
44109+};
44110+
44111 static int kgdbdbgp_wait_time;
44112
44113 static int __init kgdbdbgp_parse_config(char *str)
44114@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
44115 ptr++;
44116 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
44117 }
44118- kgdb_register_io_module(&kgdbdbgp_io_ops);
44119- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
44120+ if (early_dbgp_console.index != -1)
44121+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
44122+ else
44123+ kgdb_register_io_module(&kgdbdbgp_io_ops);
44124
44125 return 0;
44126 }
44127diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
44128index 598dcc1..032dd4f 100644
44129--- a/drivers/usb/gadget/u_serial.c
44130+++ b/drivers/usb/gadget/u_serial.c
44131@@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44132 spin_lock_irq(&port->port_lock);
44133
44134 /* already open? Great. */
44135- if (port->port.count) {
44136+ if (atomic_read(&port->port.count)) {
44137 status = 0;
44138- port->port.count++;
44139+ atomic_inc(&port->port.count);
44140
44141 /* currently opening/closing? wait ... */
44142 } else if (port->openclose) {
44143@@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44144 tty->driver_data = port;
44145 port->port.tty = tty;
44146
44147- port->port.count = 1;
44148+ atomic_set(&port->port.count, 1);
44149 port->openclose = false;
44150
44151 /* if connected, start the I/O stream */
44152@@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44153
44154 spin_lock_irq(&port->port_lock);
44155
44156- if (port->port.count != 1) {
44157- if (port->port.count == 0)
44158+ if (atomic_read(&port->port.count) != 1) {
44159+ if (atomic_read(&port->port.count) == 0)
44160 WARN_ON(1);
44161 else
44162- --port->port.count;
44163+ atomic_dec(&port->port.count);
44164 goto exit;
44165 }
44166
44167@@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44168 * and sleep if necessary
44169 */
44170 port->openclose = true;
44171- port->port.count = 0;
44172+ atomic_set(&port->port.count, 0);
44173
44174 gser = port->port_usb;
44175 if (gser && gser->disconnect)
44176@@ -1159,7 +1159,7 @@ static int gs_closed(struct gs_port *port)
44177 int cond;
44178
44179 spin_lock_irq(&port->port_lock);
44180- cond = (port->port.count == 0) && !port->openclose;
44181+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
44182 spin_unlock_irq(&port->port_lock);
44183 return cond;
44184 }
44185@@ -1273,7 +1273,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
44186 /* if it's already open, start I/O ... and notify the serial
44187 * protocol about open/close status (connect/disconnect).
44188 */
44189- if (port->port.count) {
44190+ if (atomic_read(&port->port.count)) {
44191 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
44192 gs_start_io(port);
44193 if (gser->connect)
44194@@ -1320,7 +1320,7 @@ void gserial_disconnect(struct gserial *gser)
44195
44196 port->port_usb = NULL;
44197 gser->ioport = NULL;
44198- if (port->port.count > 0 || port->openclose) {
44199+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
44200 wake_up_interruptible(&port->drain_wait);
44201 if (port->port.tty)
44202 tty_hangup(port->port.tty);
44203@@ -1336,7 +1336,7 @@ void gserial_disconnect(struct gserial *gser)
44204
44205 /* finally, free any unused/unusable I/O buffers */
44206 spin_lock_irqsave(&port->port_lock, flags);
44207- if (port->port.count == 0 && !port->openclose)
44208+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
44209 gs_buf_free(&port->port_write_buf);
44210 gs_free_requests(gser->out, &port->read_pool, NULL);
44211 gs_free_requests(gser->out, &port->read_queue, NULL);
44212diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
44213index 5f3bcd3..bfca43f 100644
44214--- a/drivers/usb/serial/console.c
44215+++ b/drivers/usb/serial/console.c
44216@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
44217
44218 info->port = port;
44219
44220- ++port->port.count;
44221+ atomic_inc(&port->port.count);
44222 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
44223 if (serial->type->set_termios) {
44224 /*
44225@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
44226 }
44227 /* Now that any required fake tty operations are completed restore
44228 * the tty port count */
44229- --port->port.count;
44230+ atomic_dec(&port->port.count);
44231 /* The console is special in terms of closing the device so
44232 * indicate this port is now acting as a system console. */
44233 port->port.console = 1;
44234@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
44235 free_tty:
44236 kfree(tty);
44237 reset_open_count:
44238- port->port.count = 0;
44239+ atomic_set(&port->port.count, 0);
44240 usb_autopm_put_interface(serial->interface);
44241 error_get_interface:
44242 usb_serial_put(serial);
44243diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
44244index 75f70f0..d467e1a 100644
44245--- a/drivers/usb/storage/usb.h
44246+++ b/drivers/usb/storage/usb.h
44247@@ -63,7 +63,7 @@ struct us_unusual_dev {
44248 __u8 useProtocol;
44249 __u8 useTransport;
44250 int (*initFunction)(struct us_data *);
44251-};
44252+} __do_const;
44253
44254
44255 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
44256diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
44257index d6bea3e..60b250e 100644
44258--- a/drivers/usb/wusbcore/wa-hc.h
44259+++ b/drivers/usb/wusbcore/wa-hc.h
44260@@ -192,7 +192,7 @@ struct wahc {
44261 struct list_head xfer_delayed_list;
44262 spinlock_t xfer_list_lock;
44263 struct work_struct xfer_work;
44264- atomic_t xfer_id_count;
44265+ atomic_unchecked_t xfer_id_count;
44266 };
44267
44268
44269@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
44270 INIT_LIST_HEAD(&wa->xfer_delayed_list);
44271 spin_lock_init(&wa->xfer_list_lock);
44272 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
44273- atomic_set(&wa->xfer_id_count, 1);
44274+ atomic_set_unchecked(&wa->xfer_id_count, 1);
44275 }
44276
44277 /**
44278diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
44279index 57c01ab..8a05959 100644
44280--- a/drivers/usb/wusbcore/wa-xfer.c
44281+++ b/drivers/usb/wusbcore/wa-xfer.c
44282@@ -296,7 +296,7 @@ out:
44283 */
44284 static void wa_xfer_id_init(struct wa_xfer *xfer)
44285 {
44286- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
44287+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
44288 }
44289
44290 /*
44291diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
44292index 8c55011..eed4ae1a 100644
44293--- a/drivers/video/aty/aty128fb.c
44294+++ b/drivers/video/aty/aty128fb.c
44295@@ -149,7 +149,7 @@ enum {
44296 };
44297
44298 /* Must match above enum */
44299-static char * const r128_family[] = {
44300+static const char * const r128_family[] = {
44301 "AGP",
44302 "PCI",
44303 "PRO AGP",
44304diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
44305index 4f27fdc..d3537e6 100644
44306--- a/drivers/video/aty/atyfb_base.c
44307+++ b/drivers/video/aty/atyfb_base.c
44308@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
44309 par->accel_flags = var->accel_flags; /* hack */
44310
44311 if (var->accel_flags) {
44312- info->fbops->fb_sync = atyfb_sync;
44313+ pax_open_kernel();
44314+ *(void **)&info->fbops->fb_sync = atyfb_sync;
44315+ pax_close_kernel();
44316 info->flags &= ~FBINFO_HWACCEL_DISABLED;
44317 } else {
44318- info->fbops->fb_sync = NULL;
44319+ pax_open_kernel();
44320+ *(void **)&info->fbops->fb_sync = NULL;
44321+ pax_close_kernel();
44322 info->flags |= FBINFO_HWACCEL_DISABLED;
44323 }
44324
44325diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
44326index 95ec042..e6affdd 100644
44327--- a/drivers/video/aty/mach64_cursor.c
44328+++ b/drivers/video/aty/mach64_cursor.c
44329@@ -7,6 +7,7 @@
44330 #include <linux/string.h>
44331
44332 #include <asm/io.h>
44333+#include <asm/pgtable.h>
44334
44335 #ifdef __sparc__
44336 #include <asm/fbio.h>
44337@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
44338 info->sprite.buf_align = 16; /* and 64 lines tall. */
44339 info->sprite.flags = FB_PIXMAP_IO;
44340
44341- info->fbops->fb_cursor = atyfb_cursor;
44342+ pax_open_kernel();
44343+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
44344+ pax_close_kernel();
44345
44346 return 0;
44347 }
44348diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
44349index 6c5ed6b..b727c88 100644
44350--- a/drivers/video/backlight/kb3886_bl.c
44351+++ b/drivers/video/backlight/kb3886_bl.c
44352@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
44353 static unsigned long kb3886bl_flags;
44354 #define KB3886BL_SUSPENDED 0x01
44355
44356-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
44357+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
44358 {
44359 .ident = "Sahara Touch-iT",
44360 .matches = {
44361diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
44362index 88cad6b..dd746c7 100644
44363--- a/drivers/video/fb_defio.c
44364+++ b/drivers/video/fb_defio.c
44365@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
44366
44367 BUG_ON(!fbdefio);
44368 mutex_init(&fbdefio->lock);
44369- info->fbops->fb_mmap = fb_deferred_io_mmap;
44370+ pax_open_kernel();
44371+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
44372+ pax_close_kernel();
44373 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
44374 INIT_LIST_HEAD(&fbdefio->pagelist);
44375 if (fbdefio->delay == 0) /* set a default of 1 s */
44376@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
44377 page->mapping = NULL;
44378 }
44379
44380- info->fbops->fb_mmap = NULL;
44381+ *(void **)&info->fbops->fb_mmap = NULL;
44382 mutex_destroy(&fbdefio->lock);
44383 }
44384 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
44385diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
44386index 5c3960d..15cf8fc 100644
44387--- a/drivers/video/fbcmap.c
44388+++ b/drivers/video/fbcmap.c
44389@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
44390 rc = -ENODEV;
44391 goto out;
44392 }
44393- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
44394- !info->fbops->fb_setcmap)) {
44395+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
44396 rc = -EINVAL;
44397 goto out1;
44398 }
44399diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
44400index dc61c12..e29796e 100644
44401--- a/drivers/video/fbmem.c
44402+++ b/drivers/video/fbmem.c
44403@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
44404 image->dx += image->width + 8;
44405 }
44406 } else if (rotate == FB_ROTATE_UD) {
44407- for (x = 0; x < num && image->dx >= 0; x++) {
44408+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
44409 info->fbops->fb_imageblit(info, image);
44410 image->dx -= image->width + 8;
44411 }
44412@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
44413 image->dy += image->height + 8;
44414 }
44415 } else if (rotate == FB_ROTATE_CCW) {
44416- for (x = 0; x < num && image->dy >= 0; x++) {
44417+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
44418 info->fbops->fb_imageblit(info, image);
44419 image->dy -= image->height + 8;
44420 }
44421@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
44422 return -EFAULT;
44423 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
44424 return -EINVAL;
44425- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
44426+ if (con2fb.framebuffer >= FB_MAX)
44427 return -EINVAL;
44428 if (!registered_fb[con2fb.framebuffer])
44429 request_module("fb%d", con2fb.framebuffer);
44430diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
44431index 7672d2e..b56437f 100644
44432--- a/drivers/video/i810/i810_accel.c
44433+++ b/drivers/video/i810/i810_accel.c
44434@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
44435 }
44436 }
44437 printk("ringbuffer lockup!!!\n");
44438+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
44439 i810_report_error(mmio);
44440 par->dev_flags |= LOCKUP;
44441 info->pixmap.scan_align = 1;
44442diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
44443index 3c14e43..eafa544 100644
44444--- a/drivers/video/logo/logo_linux_clut224.ppm
44445+++ b/drivers/video/logo/logo_linux_clut224.ppm
44446@@ -1,1604 +1,1123 @@
44447 P3
44448-# Standard 224-color Linux logo
44449 80 80
44450 255
44451- 0 0 0 0 0 0 0 0 0 0 0 0
44452- 0 0 0 0 0 0 0 0 0 0 0 0
44453- 0 0 0 0 0 0 0 0 0 0 0 0
44454- 0 0 0 0 0 0 0 0 0 0 0 0
44455- 0 0 0 0 0 0 0 0 0 0 0 0
44456- 0 0 0 0 0 0 0 0 0 0 0 0
44457- 0 0 0 0 0 0 0 0 0 0 0 0
44458- 0 0 0 0 0 0 0 0 0 0 0 0
44459- 0 0 0 0 0 0 0 0 0 0 0 0
44460- 6 6 6 6 6 6 10 10 10 10 10 10
44461- 10 10 10 6 6 6 6 6 6 6 6 6
44462- 0 0 0 0 0 0 0 0 0 0 0 0
44463- 0 0 0 0 0 0 0 0 0 0 0 0
44464- 0 0 0 0 0 0 0 0 0 0 0 0
44465- 0 0 0 0 0 0 0 0 0 0 0 0
44466- 0 0 0 0 0 0 0 0 0 0 0 0
44467- 0 0 0 0 0 0 0 0 0 0 0 0
44468- 0 0 0 0 0 0 0 0 0 0 0 0
44469- 0 0 0 0 0 0 0 0 0 0 0 0
44470- 0 0 0 0 0 0 0 0 0 0 0 0
44471- 0 0 0 0 0 0 0 0 0 0 0 0
44472- 0 0 0 0 0 0 0 0 0 0 0 0
44473- 0 0 0 0 0 0 0 0 0 0 0 0
44474- 0 0 0 0 0 0 0 0 0 0 0 0
44475- 0 0 0 0 0 0 0 0 0 0 0 0
44476- 0 0 0 0 0 0 0 0 0 0 0 0
44477- 0 0 0 0 0 0 0 0 0 0 0 0
44478- 0 0 0 0 0 0 0 0 0 0 0 0
44479- 0 0 0 6 6 6 10 10 10 14 14 14
44480- 22 22 22 26 26 26 30 30 30 34 34 34
44481- 30 30 30 30 30 30 26 26 26 18 18 18
44482- 14 14 14 10 10 10 6 6 6 0 0 0
44483- 0 0 0 0 0 0 0 0 0 0 0 0
44484- 0 0 0 0 0 0 0 0 0 0 0 0
44485- 0 0 0 0 0 0 0 0 0 0 0 0
44486- 0 0 0 0 0 0 0 0 0 0 0 0
44487- 0 0 0 0 0 0 0 0 0 0 0 0
44488- 0 0 0 0 0 0 0 0 0 0 0 0
44489- 0 0 0 0 0 0 0 0 0 0 0 0
44490- 0 0 0 0 0 0 0 0 0 0 0 0
44491- 0 0 0 0 0 0 0 0 0 0 0 0
44492- 0 0 0 0 0 1 0 0 1 0 0 0
44493- 0 0 0 0 0 0 0 0 0 0 0 0
44494- 0 0 0 0 0 0 0 0 0 0 0 0
44495- 0 0 0 0 0 0 0 0 0 0 0 0
44496- 0 0 0 0 0 0 0 0 0 0 0 0
44497- 0 0 0 0 0 0 0 0 0 0 0 0
44498- 0 0 0 0 0 0 0 0 0 0 0 0
44499- 6 6 6 14 14 14 26 26 26 42 42 42
44500- 54 54 54 66 66 66 78 78 78 78 78 78
44501- 78 78 78 74 74 74 66 66 66 54 54 54
44502- 42 42 42 26 26 26 18 18 18 10 10 10
44503- 6 6 6 0 0 0 0 0 0 0 0 0
44504- 0 0 0 0 0 0 0 0 0 0 0 0
44505- 0 0 0 0 0 0 0 0 0 0 0 0
44506- 0 0 0 0 0 0 0 0 0 0 0 0
44507- 0 0 0 0 0 0 0 0 0 0 0 0
44508- 0 0 0 0 0 0 0 0 0 0 0 0
44509- 0 0 0 0 0 0 0 0 0 0 0 0
44510- 0 0 0 0 0 0 0 0 0 0 0 0
44511- 0 0 0 0 0 0 0 0 0 0 0 0
44512- 0 0 1 0 0 0 0 0 0 0 0 0
44513- 0 0 0 0 0 0 0 0 0 0 0 0
44514- 0 0 0 0 0 0 0 0 0 0 0 0
44515- 0 0 0 0 0 0 0 0 0 0 0 0
44516- 0 0 0 0 0 0 0 0 0 0 0 0
44517- 0 0 0 0 0 0 0 0 0 0 0 0
44518- 0 0 0 0 0 0 0 0 0 10 10 10
44519- 22 22 22 42 42 42 66 66 66 86 86 86
44520- 66 66 66 38 38 38 38 38 38 22 22 22
44521- 26 26 26 34 34 34 54 54 54 66 66 66
44522- 86 86 86 70 70 70 46 46 46 26 26 26
44523- 14 14 14 6 6 6 0 0 0 0 0 0
44524- 0 0 0 0 0 0 0 0 0 0 0 0
44525- 0 0 0 0 0 0 0 0 0 0 0 0
44526- 0 0 0 0 0 0 0 0 0 0 0 0
44527- 0 0 0 0 0 0 0 0 0 0 0 0
44528- 0 0 0 0 0 0 0 0 0 0 0 0
44529- 0 0 0 0 0 0 0 0 0 0 0 0
44530- 0 0 0 0 0 0 0 0 0 0 0 0
44531- 0 0 0 0 0 0 0 0 0 0 0 0
44532- 0 0 1 0 0 1 0 0 1 0 0 0
44533- 0 0 0 0 0 0 0 0 0 0 0 0
44534- 0 0 0 0 0 0 0 0 0 0 0 0
44535- 0 0 0 0 0 0 0 0 0 0 0 0
44536- 0 0 0 0 0 0 0 0 0 0 0 0
44537- 0 0 0 0 0 0 0 0 0 0 0 0
44538- 0 0 0 0 0 0 10 10 10 26 26 26
44539- 50 50 50 82 82 82 58 58 58 6 6 6
44540- 2 2 6 2 2 6 2 2 6 2 2 6
44541- 2 2 6 2 2 6 2 2 6 2 2 6
44542- 6 6 6 54 54 54 86 86 86 66 66 66
44543- 38 38 38 18 18 18 6 6 6 0 0 0
44544- 0 0 0 0 0 0 0 0 0 0 0 0
44545- 0 0 0 0 0 0 0 0 0 0 0 0
44546- 0 0 0 0 0 0 0 0 0 0 0 0
44547- 0 0 0 0 0 0 0 0 0 0 0 0
44548- 0 0 0 0 0 0 0 0 0 0 0 0
44549- 0 0 0 0 0 0 0 0 0 0 0 0
44550- 0 0 0 0 0 0 0 0 0 0 0 0
44551- 0 0 0 0 0 0 0 0 0 0 0 0
44552- 0 0 0 0 0 0 0 0 0 0 0 0
44553- 0 0 0 0 0 0 0 0 0 0 0 0
44554- 0 0 0 0 0 0 0 0 0 0 0 0
44555- 0 0 0 0 0 0 0 0 0 0 0 0
44556- 0 0 0 0 0 0 0 0 0 0 0 0
44557- 0 0 0 0 0 0 0 0 0 0 0 0
44558- 0 0 0 6 6 6 22 22 22 50 50 50
44559- 78 78 78 34 34 34 2 2 6 2 2 6
44560- 2 2 6 2 2 6 2 2 6 2 2 6
44561- 2 2 6 2 2 6 2 2 6 2 2 6
44562- 2 2 6 2 2 6 6 6 6 70 70 70
44563- 78 78 78 46 46 46 22 22 22 6 6 6
44564- 0 0 0 0 0 0 0 0 0 0 0 0
44565- 0 0 0 0 0 0 0 0 0 0 0 0
44566- 0 0 0 0 0 0 0 0 0 0 0 0
44567- 0 0 0 0 0 0 0 0 0 0 0 0
44568- 0 0 0 0 0 0 0 0 0 0 0 0
44569- 0 0 0 0 0 0 0 0 0 0 0 0
44570- 0 0 0 0 0 0 0 0 0 0 0 0
44571- 0 0 0 0 0 0 0 0 0 0 0 0
44572- 0 0 1 0 0 1 0 0 1 0 0 0
44573- 0 0 0 0 0 0 0 0 0 0 0 0
44574- 0 0 0 0 0 0 0 0 0 0 0 0
44575- 0 0 0 0 0 0 0 0 0 0 0 0
44576- 0 0 0 0 0 0 0 0 0 0 0 0
44577- 0 0 0 0 0 0 0 0 0 0 0 0
44578- 6 6 6 18 18 18 42 42 42 82 82 82
44579- 26 26 26 2 2 6 2 2 6 2 2 6
44580- 2 2 6 2 2 6 2 2 6 2 2 6
44581- 2 2 6 2 2 6 2 2 6 14 14 14
44582- 46 46 46 34 34 34 6 6 6 2 2 6
44583- 42 42 42 78 78 78 42 42 42 18 18 18
44584- 6 6 6 0 0 0 0 0 0 0 0 0
44585- 0 0 0 0 0 0 0 0 0 0 0 0
44586- 0 0 0 0 0 0 0 0 0 0 0 0
44587- 0 0 0 0 0 0 0 0 0 0 0 0
44588- 0 0 0 0 0 0 0 0 0 0 0 0
44589- 0 0 0 0 0 0 0 0 0 0 0 0
44590- 0 0 0 0 0 0 0 0 0 0 0 0
44591- 0 0 0 0 0 0 0 0 0 0 0 0
44592- 0 0 1 0 0 0 0 0 1 0 0 0
44593- 0 0 0 0 0 0 0 0 0 0 0 0
44594- 0 0 0 0 0 0 0 0 0 0 0 0
44595- 0 0 0 0 0 0 0 0 0 0 0 0
44596- 0 0 0 0 0 0 0 0 0 0 0 0
44597- 0 0 0 0 0 0 0 0 0 0 0 0
44598- 10 10 10 30 30 30 66 66 66 58 58 58
44599- 2 2 6 2 2 6 2 2 6 2 2 6
44600- 2 2 6 2 2 6 2 2 6 2 2 6
44601- 2 2 6 2 2 6 2 2 6 26 26 26
44602- 86 86 86 101 101 101 46 46 46 10 10 10
44603- 2 2 6 58 58 58 70 70 70 34 34 34
44604- 10 10 10 0 0 0 0 0 0 0 0 0
44605- 0 0 0 0 0 0 0 0 0 0 0 0
44606- 0 0 0 0 0 0 0 0 0 0 0 0
44607- 0 0 0 0 0 0 0 0 0 0 0 0
44608- 0 0 0 0 0 0 0 0 0 0 0 0
44609- 0 0 0 0 0 0 0 0 0 0 0 0
44610- 0 0 0 0 0 0 0 0 0 0 0 0
44611- 0 0 0 0 0 0 0 0 0 0 0 0
44612- 0 0 1 0 0 1 0 0 1 0 0 0
44613- 0 0 0 0 0 0 0 0 0 0 0 0
44614- 0 0 0 0 0 0 0 0 0 0 0 0
44615- 0 0 0 0 0 0 0 0 0 0 0 0
44616- 0 0 0 0 0 0 0 0 0 0 0 0
44617- 0 0 0 0 0 0 0 0 0 0 0 0
44618- 14 14 14 42 42 42 86 86 86 10 10 10
44619- 2 2 6 2 2 6 2 2 6 2 2 6
44620- 2 2 6 2 2 6 2 2 6 2 2 6
44621- 2 2 6 2 2 6 2 2 6 30 30 30
44622- 94 94 94 94 94 94 58 58 58 26 26 26
44623- 2 2 6 6 6 6 78 78 78 54 54 54
44624- 22 22 22 6 6 6 0 0 0 0 0 0
44625- 0 0 0 0 0 0 0 0 0 0 0 0
44626- 0 0 0 0 0 0 0 0 0 0 0 0
44627- 0 0 0 0 0 0 0 0 0 0 0 0
44628- 0 0 0 0 0 0 0 0 0 0 0 0
44629- 0 0 0 0 0 0 0 0 0 0 0 0
44630- 0 0 0 0 0 0 0 0 0 0 0 0
44631- 0 0 0 0 0 0 0 0 0 0 0 0
44632- 0 0 0 0 0 0 0 0 0 0 0 0
44633- 0 0 0 0 0 0 0 0 0 0 0 0
44634- 0 0 0 0 0 0 0 0 0 0 0 0
44635- 0 0 0 0 0 0 0 0 0 0 0 0
44636- 0 0 0 0 0 0 0 0 0 0 0 0
44637- 0 0 0 0 0 0 0 0 0 6 6 6
44638- 22 22 22 62 62 62 62 62 62 2 2 6
44639- 2 2 6 2 2 6 2 2 6 2 2 6
44640- 2 2 6 2 2 6 2 2 6 2 2 6
44641- 2 2 6 2 2 6 2 2 6 26 26 26
44642- 54 54 54 38 38 38 18 18 18 10 10 10
44643- 2 2 6 2 2 6 34 34 34 82 82 82
44644- 38 38 38 14 14 14 0 0 0 0 0 0
44645- 0 0 0 0 0 0 0 0 0 0 0 0
44646- 0 0 0 0 0 0 0 0 0 0 0 0
44647- 0 0 0 0 0 0 0 0 0 0 0 0
44648- 0 0 0 0 0 0 0 0 0 0 0 0
44649- 0 0 0 0 0 0 0 0 0 0 0 0
44650- 0 0 0 0 0 0 0 0 0 0 0 0
44651- 0 0 0 0 0 0 0 0 0 0 0 0
44652- 0 0 0 0 0 1 0 0 1 0 0 0
44653- 0 0 0 0 0 0 0 0 0 0 0 0
44654- 0 0 0 0 0 0 0 0 0 0 0 0
44655- 0 0 0 0 0 0 0 0 0 0 0 0
44656- 0 0 0 0 0 0 0 0 0 0 0 0
44657- 0 0 0 0 0 0 0 0 0 6 6 6
44658- 30 30 30 78 78 78 30 30 30 2 2 6
44659- 2 2 6 2 2 6 2 2 6 2 2 6
44660- 2 2 6 2 2 6 2 2 6 2 2 6
44661- 2 2 6 2 2 6 2 2 6 10 10 10
44662- 10 10 10 2 2 6 2 2 6 2 2 6
44663- 2 2 6 2 2 6 2 2 6 78 78 78
44664- 50 50 50 18 18 18 6 6 6 0 0 0
44665- 0 0 0 0 0 0 0 0 0 0 0 0
44666- 0 0 0 0 0 0 0 0 0 0 0 0
44667- 0 0 0 0 0 0 0 0 0 0 0 0
44668- 0 0 0 0 0 0 0 0 0 0 0 0
44669- 0 0 0 0 0 0 0 0 0 0 0 0
44670- 0 0 0 0 0 0 0 0 0 0 0 0
44671- 0 0 0 0 0 0 0 0 0 0 0 0
44672- 0 0 1 0 0 0 0 0 0 0 0 0
44673- 0 0 0 0 0 0 0 0 0 0 0 0
44674- 0 0 0 0 0 0 0 0 0 0 0 0
44675- 0 0 0 0 0 0 0 0 0 0 0 0
44676- 0 0 0 0 0 0 0 0 0 0 0 0
44677- 0 0 0 0 0 0 0 0 0 10 10 10
44678- 38 38 38 86 86 86 14 14 14 2 2 6
44679- 2 2 6 2 2 6 2 2 6 2 2 6
44680- 2 2 6 2 2 6 2 2 6 2 2 6
44681- 2 2 6 2 2 6 2 2 6 2 2 6
44682- 2 2 6 2 2 6 2 2 6 2 2 6
44683- 2 2 6 2 2 6 2 2 6 54 54 54
44684- 66 66 66 26 26 26 6 6 6 0 0 0
44685- 0 0 0 0 0 0 0 0 0 0 0 0
44686- 0 0 0 0 0 0 0 0 0 0 0 0
44687- 0 0 0 0 0 0 0 0 0 0 0 0
44688- 0 0 0 0 0 0 0 0 0 0 0 0
44689- 0 0 0 0 0 0 0 0 0 0 0 0
44690- 0 0 0 0 0 0 0 0 0 0 0 0
44691- 0 0 0 0 0 0 0 0 0 0 0 0
44692- 0 0 0 0 0 1 0 0 1 0 0 0
44693- 0 0 0 0 0 0 0 0 0 0 0 0
44694- 0 0 0 0 0 0 0 0 0 0 0 0
44695- 0 0 0 0 0 0 0 0 0 0 0 0
44696- 0 0 0 0 0 0 0 0 0 0 0 0
44697- 0 0 0 0 0 0 0 0 0 14 14 14
44698- 42 42 42 82 82 82 2 2 6 2 2 6
44699- 2 2 6 6 6 6 10 10 10 2 2 6
44700- 2 2 6 2 2 6 2 2 6 2 2 6
44701- 2 2 6 2 2 6 2 2 6 6 6 6
44702- 14 14 14 10 10 10 2 2 6 2 2 6
44703- 2 2 6 2 2 6 2 2 6 18 18 18
44704- 82 82 82 34 34 34 10 10 10 0 0 0
44705- 0 0 0 0 0 0 0 0 0 0 0 0
44706- 0 0 0 0 0 0 0 0 0 0 0 0
44707- 0 0 0 0 0 0 0 0 0 0 0 0
44708- 0 0 0 0 0 0 0 0 0 0 0 0
44709- 0 0 0 0 0 0 0 0 0 0 0 0
44710- 0 0 0 0 0 0 0 0 0 0 0 0
44711- 0 0 0 0 0 0 0 0 0 0 0 0
44712- 0 0 1 0 0 0 0 0 0 0 0 0
44713- 0 0 0 0 0 0 0 0 0 0 0 0
44714- 0 0 0 0 0 0 0 0 0 0 0 0
44715- 0 0 0 0 0 0 0 0 0 0 0 0
44716- 0 0 0 0 0 0 0 0 0 0 0 0
44717- 0 0 0 0 0 0 0 0 0 14 14 14
44718- 46 46 46 86 86 86 2 2 6 2 2 6
44719- 6 6 6 6 6 6 22 22 22 34 34 34
44720- 6 6 6 2 2 6 2 2 6 2 2 6
44721- 2 2 6 2 2 6 18 18 18 34 34 34
44722- 10 10 10 50 50 50 22 22 22 2 2 6
44723- 2 2 6 2 2 6 2 2 6 10 10 10
44724- 86 86 86 42 42 42 14 14 14 0 0 0
44725- 0 0 0 0 0 0 0 0 0 0 0 0
44726- 0 0 0 0 0 0 0 0 0 0 0 0
44727- 0 0 0 0 0 0 0 0 0 0 0 0
44728- 0 0 0 0 0 0 0 0 0 0 0 0
44729- 0 0 0 0 0 0 0 0 0 0 0 0
44730- 0 0 0 0 0 0 0 0 0 0 0 0
44731- 0 0 0 0 0 0 0 0 0 0 0 0
44732- 0 0 1 0 0 1 0 0 1 0 0 0
44733- 0 0 0 0 0 0 0 0 0 0 0 0
44734- 0 0 0 0 0 0 0 0 0 0 0 0
44735- 0 0 0 0 0 0 0 0 0 0 0 0
44736- 0 0 0 0 0 0 0 0 0 0 0 0
44737- 0 0 0 0 0 0 0 0 0 14 14 14
44738- 46 46 46 86 86 86 2 2 6 2 2 6
44739- 38 38 38 116 116 116 94 94 94 22 22 22
44740- 22 22 22 2 2 6 2 2 6 2 2 6
44741- 14 14 14 86 86 86 138 138 138 162 162 162
44742-154 154 154 38 38 38 26 26 26 6 6 6
44743- 2 2 6 2 2 6 2 2 6 2 2 6
44744- 86 86 86 46 46 46 14 14 14 0 0 0
44745- 0 0 0 0 0 0 0 0 0 0 0 0
44746- 0 0 0 0 0 0 0 0 0 0 0 0
44747- 0 0 0 0 0 0 0 0 0 0 0 0
44748- 0 0 0 0 0 0 0 0 0 0 0 0
44749- 0 0 0 0 0 0 0 0 0 0 0 0
44750- 0 0 0 0 0 0 0 0 0 0 0 0
44751- 0 0 0 0 0 0 0 0 0 0 0 0
44752- 0 0 0 0 0 0 0 0 0 0 0 0
44753- 0 0 0 0 0 0 0 0 0 0 0 0
44754- 0 0 0 0 0 0 0 0 0 0 0 0
44755- 0 0 0 0 0 0 0 0 0 0 0 0
44756- 0 0 0 0 0 0 0 0 0 0 0 0
44757- 0 0 0 0 0 0 0 0 0 14 14 14
44758- 46 46 46 86 86 86 2 2 6 14 14 14
44759-134 134 134 198 198 198 195 195 195 116 116 116
44760- 10 10 10 2 2 6 2 2 6 6 6 6
44761-101 98 89 187 187 187 210 210 210 218 218 218
44762-214 214 214 134 134 134 14 14 14 6 6 6
44763- 2 2 6 2 2 6 2 2 6 2 2 6
44764- 86 86 86 50 50 50 18 18 18 6 6 6
44765- 0 0 0 0 0 0 0 0 0 0 0 0
44766- 0 0 0 0 0 0 0 0 0 0 0 0
44767- 0 0 0 0 0 0 0 0 0 0 0 0
44768- 0 0 0 0 0 0 0 0 0 0 0 0
44769- 0 0 0 0 0 0 0 0 0 0 0 0
44770- 0 0 0 0 0 0 0 0 0 0 0 0
44771- 0 0 0 0 0 0 0 0 1 0 0 0
44772- 0 0 1 0 0 1 0 0 1 0 0 0
44773- 0 0 0 0 0 0 0 0 0 0 0 0
44774- 0 0 0 0 0 0 0 0 0 0 0 0
44775- 0 0 0 0 0 0 0 0 0 0 0 0
44776- 0 0 0 0 0 0 0 0 0 0 0 0
44777- 0 0 0 0 0 0 0 0 0 14 14 14
44778- 46 46 46 86 86 86 2 2 6 54 54 54
44779-218 218 218 195 195 195 226 226 226 246 246 246
44780- 58 58 58 2 2 6 2 2 6 30 30 30
44781-210 210 210 253 253 253 174 174 174 123 123 123
44782-221 221 221 234 234 234 74 74 74 2 2 6
44783- 2 2 6 2 2 6 2 2 6 2 2 6
44784- 70 70 70 58 58 58 22 22 22 6 6 6
44785- 0 0 0 0 0 0 0 0 0 0 0 0
44786- 0 0 0 0 0 0 0 0 0 0 0 0
44787- 0 0 0 0 0 0 0 0 0 0 0 0
44788- 0 0 0 0 0 0 0 0 0 0 0 0
44789- 0 0 0 0 0 0 0 0 0 0 0 0
44790- 0 0 0 0 0 0 0 0 0 0 0 0
44791- 0 0 0 0 0 0 0 0 0 0 0 0
44792- 0 0 0 0 0 0 0 0 0 0 0 0
44793- 0 0 0 0 0 0 0 0 0 0 0 0
44794- 0 0 0 0 0 0 0 0 0 0 0 0
44795- 0 0 0 0 0 0 0 0 0 0 0 0
44796- 0 0 0 0 0 0 0 0 0 0 0 0
44797- 0 0 0 0 0 0 0 0 0 14 14 14
44798- 46 46 46 82 82 82 2 2 6 106 106 106
44799-170 170 170 26 26 26 86 86 86 226 226 226
44800-123 123 123 10 10 10 14 14 14 46 46 46
44801-231 231 231 190 190 190 6 6 6 70 70 70
44802- 90 90 90 238 238 238 158 158 158 2 2 6
44803- 2 2 6 2 2 6 2 2 6 2 2 6
44804- 70 70 70 58 58 58 22 22 22 6 6 6
44805- 0 0 0 0 0 0 0 0 0 0 0 0
44806- 0 0 0 0 0 0 0 0 0 0 0 0
44807- 0 0 0 0 0 0 0 0 0 0 0 0
44808- 0 0 0 0 0 0 0 0 0 0 0 0
44809- 0 0 0 0 0 0 0 0 0 0 0 0
44810- 0 0 0 0 0 0 0 0 0 0 0 0
44811- 0 0 0 0 0 0 0 0 1 0 0 0
44812- 0 0 1 0 0 1 0 0 1 0 0 0
44813- 0 0 0 0 0 0 0 0 0 0 0 0
44814- 0 0 0 0 0 0 0 0 0 0 0 0
44815- 0 0 0 0 0 0 0 0 0 0 0 0
44816- 0 0 0 0 0 0 0 0 0 0 0 0
44817- 0 0 0 0 0 0 0 0 0 14 14 14
44818- 42 42 42 86 86 86 6 6 6 116 116 116
44819-106 106 106 6 6 6 70 70 70 149 149 149
44820-128 128 128 18 18 18 38 38 38 54 54 54
44821-221 221 221 106 106 106 2 2 6 14 14 14
44822- 46 46 46 190 190 190 198 198 198 2 2 6
44823- 2 2 6 2 2 6 2 2 6 2 2 6
44824- 74 74 74 62 62 62 22 22 22 6 6 6
44825- 0 0 0 0 0 0 0 0 0 0 0 0
44826- 0 0 0 0 0 0 0 0 0 0 0 0
44827- 0 0 0 0 0 0 0 0 0 0 0 0
44828- 0 0 0 0 0 0 0 0 0 0 0 0
44829- 0 0 0 0 0 0 0 0 0 0 0 0
44830- 0 0 0 0 0 0 0 0 0 0 0 0
44831- 0 0 0 0 0 0 0 0 1 0 0 0
44832- 0 0 1 0 0 0 0 0 1 0 0 0
44833- 0 0 0 0 0 0 0 0 0 0 0 0
44834- 0 0 0 0 0 0 0 0 0 0 0 0
44835- 0 0 0 0 0 0 0 0 0 0 0 0
44836- 0 0 0 0 0 0 0 0 0 0 0 0
44837- 0 0 0 0 0 0 0 0 0 14 14 14
44838- 42 42 42 94 94 94 14 14 14 101 101 101
44839-128 128 128 2 2 6 18 18 18 116 116 116
44840-118 98 46 121 92 8 121 92 8 98 78 10
44841-162 162 162 106 106 106 2 2 6 2 2 6
44842- 2 2 6 195 195 195 195 195 195 6 6 6
44843- 2 2 6 2 2 6 2 2 6 2 2 6
44844- 74 74 74 62 62 62 22 22 22 6 6 6
44845- 0 0 0 0 0 0 0 0 0 0 0 0
44846- 0 0 0 0 0 0 0 0 0 0 0 0
44847- 0 0 0 0 0 0 0 0 0 0 0 0
44848- 0 0 0 0 0 0 0 0 0 0 0 0
44849- 0 0 0 0 0 0 0 0 0 0 0 0
44850- 0 0 0 0 0 0 0 0 0 0 0 0
44851- 0 0 0 0 0 0 0 0 1 0 0 1
44852- 0 0 1 0 0 0 0 0 1 0 0 0
44853- 0 0 0 0 0 0 0 0 0 0 0 0
44854- 0 0 0 0 0 0 0 0 0 0 0 0
44855- 0 0 0 0 0 0 0 0 0 0 0 0
44856- 0 0 0 0 0 0 0 0 0 0 0 0
44857- 0 0 0 0 0 0 0 0 0 10 10 10
44858- 38 38 38 90 90 90 14 14 14 58 58 58
44859-210 210 210 26 26 26 54 38 6 154 114 10
44860-226 170 11 236 186 11 225 175 15 184 144 12
44861-215 174 15 175 146 61 37 26 9 2 2 6
44862- 70 70 70 246 246 246 138 138 138 2 2 6
44863- 2 2 6 2 2 6 2 2 6 2 2 6
44864- 70 70 70 66 66 66 26 26 26 6 6 6
44865- 0 0 0 0 0 0 0 0 0 0 0 0
44866- 0 0 0 0 0 0 0 0 0 0 0 0
44867- 0 0 0 0 0 0 0 0 0 0 0 0
44868- 0 0 0 0 0 0 0 0 0 0 0 0
44869- 0 0 0 0 0 0 0 0 0 0 0 0
44870- 0 0 0 0 0 0 0 0 0 0 0 0
44871- 0 0 0 0 0 0 0 0 0 0 0 0
44872- 0 0 0 0 0 0 0 0 0 0 0 0
44873- 0 0 0 0 0 0 0 0 0 0 0 0
44874- 0 0 0 0 0 0 0 0 0 0 0 0
44875- 0 0 0 0 0 0 0 0 0 0 0 0
44876- 0 0 0 0 0 0 0 0 0 0 0 0
44877- 0 0 0 0 0 0 0 0 0 10 10 10
44878- 38 38 38 86 86 86 14 14 14 10 10 10
44879-195 195 195 188 164 115 192 133 9 225 175 15
44880-239 182 13 234 190 10 232 195 16 232 200 30
44881-245 207 45 241 208 19 232 195 16 184 144 12
44882-218 194 134 211 206 186 42 42 42 2 2 6
44883- 2 2 6 2 2 6 2 2 6 2 2 6
44884- 50 50 50 74 74 74 30 30 30 6 6 6
44885- 0 0 0 0 0 0 0 0 0 0 0 0
44886- 0 0 0 0 0 0 0 0 0 0 0 0
44887- 0 0 0 0 0 0 0 0 0 0 0 0
44888- 0 0 0 0 0 0 0 0 0 0 0 0
44889- 0 0 0 0 0 0 0 0 0 0 0 0
44890- 0 0 0 0 0 0 0 0 0 0 0 0
44891- 0 0 0 0 0 0 0 0 0 0 0 0
44892- 0 0 0 0 0 0 0 0 0 0 0 0
44893- 0 0 0 0 0 0 0 0 0 0 0 0
44894- 0 0 0 0 0 0 0 0 0 0 0 0
44895- 0 0 0 0 0 0 0 0 0 0 0 0
44896- 0 0 0 0 0 0 0 0 0 0 0 0
44897- 0 0 0 0 0 0 0 0 0 10 10 10
44898- 34 34 34 86 86 86 14 14 14 2 2 6
44899-121 87 25 192 133 9 219 162 10 239 182 13
44900-236 186 11 232 195 16 241 208 19 244 214 54
44901-246 218 60 246 218 38 246 215 20 241 208 19
44902-241 208 19 226 184 13 121 87 25 2 2 6
44903- 2 2 6 2 2 6 2 2 6 2 2 6
44904- 50 50 50 82 82 82 34 34 34 10 10 10
44905- 0 0 0 0 0 0 0 0 0 0 0 0
44906- 0 0 0 0 0 0 0 0 0 0 0 0
44907- 0 0 0 0 0 0 0 0 0 0 0 0
44908- 0 0 0 0 0 0 0 0 0 0 0 0
44909- 0 0 0 0 0 0 0 0 0 0 0 0
44910- 0 0 0 0 0 0 0 0 0 0 0 0
44911- 0 0 0 0 0 0 0 0 0 0 0 0
44912- 0 0 0 0 0 0 0 0 0 0 0 0
44913- 0 0 0 0 0 0 0 0 0 0 0 0
44914- 0 0 0 0 0 0 0 0 0 0 0 0
44915- 0 0 0 0 0 0 0 0 0 0 0 0
44916- 0 0 0 0 0 0 0 0 0 0 0 0
44917- 0 0 0 0 0 0 0 0 0 10 10 10
44918- 34 34 34 82 82 82 30 30 30 61 42 6
44919-180 123 7 206 145 10 230 174 11 239 182 13
44920-234 190 10 238 202 15 241 208 19 246 218 74
44921-246 218 38 246 215 20 246 215 20 246 215 20
44922-226 184 13 215 174 15 184 144 12 6 6 6
44923- 2 2 6 2 2 6 2 2 6 2 2 6
44924- 26 26 26 94 94 94 42 42 42 14 14 14
44925- 0 0 0 0 0 0 0 0 0 0 0 0
44926- 0 0 0 0 0 0 0 0 0 0 0 0
44927- 0 0 0 0 0 0 0 0 0 0 0 0
44928- 0 0 0 0 0 0 0 0 0 0 0 0
44929- 0 0 0 0 0 0 0 0 0 0 0 0
44930- 0 0 0 0 0 0 0 0 0 0 0 0
44931- 0 0 0 0 0 0 0 0 0 0 0 0
44932- 0 0 0 0 0 0 0 0 0 0 0 0
44933- 0 0 0 0 0 0 0 0 0 0 0 0
44934- 0 0 0 0 0 0 0 0 0 0 0 0
44935- 0 0 0 0 0 0 0 0 0 0 0 0
44936- 0 0 0 0 0 0 0 0 0 0 0 0
44937- 0 0 0 0 0 0 0 0 0 10 10 10
44938- 30 30 30 78 78 78 50 50 50 104 69 6
44939-192 133 9 216 158 10 236 178 12 236 186 11
44940-232 195 16 241 208 19 244 214 54 245 215 43
44941-246 215 20 246 215 20 241 208 19 198 155 10
44942-200 144 11 216 158 10 156 118 10 2 2 6
44943- 2 2 6 2 2 6 2 2 6 2 2 6
44944- 6 6 6 90 90 90 54 54 54 18 18 18
44945- 6 6 6 0 0 0 0 0 0 0 0 0
44946- 0 0 0 0 0 0 0 0 0 0 0 0
44947- 0 0 0 0 0 0 0 0 0 0 0 0
44948- 0 0 0 0 0 0 0 0 0 0 0 0
44949- 0 0 0 0 0 0 0 0 0 0 0 0
44950- 0 0 0 0 0 0 0 0 0 0 0 0
44951- 0 0 0 0 0 0 0 0 0 0 0 0
44952- 0 0 0 0 0 0 0 0 0 0 0 0
44953- 0 0 0 0 0 0 0 0 0 0 0 0
44954- 0 0 0 0 0 0 0 0 0 0 0 0
44955- 0 0 0 0 0 0 0 0 0 0 0 0
44956- 0 0 0 0 0 0 0 0 0 0 0 0
44957- 0 0 0 0 0 0 0 0 0 10 10 10
44958- 30 30 30 78 78 78 46 46 46 22 22 22
44959-137 92 6 210 162 10 239 182 13 238 190 10
44960-238 202 15 241 208 19 246 215 20 246 215 20
44961-241 208 19 203 166 17 185 133 11 210 150 10
44962-216 158 10 210 150 10 102 78 10 2 2 6
44963- 6 6 6 54 54 54 14 14 14 2 2 6
44964- 2 2 6 62 62 62 74 74 74 30 30 30
44965- 10 10 10 0 0 0 0 0 0 0 0 0
44966- 0 0 0 0 0 0 0 0 0 0 0 0
44967- 0 0 0 0 0 0 0 0 0 0 0 0
44968- 0 0 0 0 0 0 0 0 0 0 0 0
44969- 0 0 0 0 0 0 0 0 0 0 0 0
44970- 0 0 0 0 0 0 0 0 0 0 0 0
44971- 0 0 0 0 0 0 0 0 0 0 0 0
44972- 0 0 0 0 0 0 0 0 0 0 0 0
44973- 0 0 0 0 0 0 0 0 0 0 0 0
44974- 0 0 0 0 0 0 0 0 0 0 0 0
44975- 0 0 0 0 0 0 0 0 0 0 0 0
44976- 0 0 0 0 0 0 0 0 0 0 0 0
44977- 0 0 0 0 0 0 0 0 0 10 10 10
44978- 34 34 34 78 78 78 50 50 50 6 6 6
44979- 94 70 30 139 102 15 190 146 13 226 184 13
44980-232 200 30 232 195 16 215 174 15 190 146 13
44981-168 122 10 192 133 9 210 150 10 213 154 11
44982-202 150 34 182 157 106 101 98 89 2 2 6
44983- 2 2 6 78 78 78 116 116 116 58 58 58
44984- 2 2 6 22 22 22 90 90 90 46 46 46
44985- 18 18 18 6 6 6 0 0 0 0 0 0
44986- 0 0 0 0 0 0 0 0 0 0 0 0
44987- 0 0 0 0 0 0 0 0 0 0 0 0
44988- 0 0 0 0 0 0 0 0 0 0 0 0
44989- 0 0 0 0 0 0 0 0 0 0 0 0
44990- 0 0 0 0 0 0 0 0 0 0 0 0
44991- 0 0 0 0 0 0 0 0 0 0 0 0
44992- 0 0 0 0 0 0 0 0 0 0 0 0
44993- 0 0 0 0 0 0 0 0 0 0 0 0
44994- 0 0 0 0 0 0 0 0 0 0 0 0
44995- 0 0 0 0 0 0 0 0 0 0 0 0
44996- 0 0 0 0 0 0 0 0 0 0 0 0
44997- 0 0 0 0 0 0 0 0 0 10 10 10
44998- 38 38 38 86 86 86 50 50 50 6 6 6
44999-128 128 128 174 154 114 156 107 11 168 122 10
45000-198 155 10 184 144 12 197 138 11 200 144 11
45001-206 145 10 206 145 10 197 138 11 188 164 115
45002-195 195 195 198 198 198 174 174 174 14 14 14
45003- 2 2 6 22 22 22 116 116 116 116 116 116
45004- 22 22 22 2 2 6 74 74 74 70 70 70
45005- 30 30 30 10 10 10 0 0 0 0 0 0
45006- 0 0 0 0 0 0 0 0 0 0 0 0
45007- 0 0 0 0 0 0 0 0 0 0 0 0
45008- 0 0 0 0 0 0 0 0 0 0 0 0
45009- 0 0 0 0 0 0 0 0 0 0 0 0
45010- 0 0 0 0 0 0 0 0 0 0 0 0
45011- 0 0 0 0 0 0 0 0 0 0 0 0
45012- 0 0 0 0 0 0 0 0 0 0 0 0
45013- 0 0 0 0 0 0 0 0 0 0 0 0
45014- 0 0 0 0 0 0 0 0 0 0 0 0
45015- 0 0 0 0 0 0 0 0 0 0 0 0
45016- 0 0 0 0 0 0 0 0 0 0 0 0
45017- 0 0 0 0 0 0 6 6 6 18 18 18
45018- 50 50 50 101 101 101 26 26 26 10 10 10
45019-138 138 138 190 190 190 174 154 114 156 107 11
45020-197 138 11 200 144 11 197 138 11 192 133 9
45021-180 123 7 190 142 34 190 178 144 187 187 187
45022-202 202 202 221 221 221 214 214 214 66 66 66
45023- 2 2 6 2 2 6 50 50 50 62 62 62
45024- 6 6 6 2 2 6 10 10 10 90 90 90
45025- 50 50 50 18 18 18 6 6 6 0 0 0
45026- 0 0 0 0 0 0 0 0 0 0 0 0
45027- 0 0 0 0 0 0 0 0 0 0 0 0
45028- 0 0 0 0 0 0 0 0 0 0 0 0
45029- 0 0 0 0 0 0 0 0 0 0 0 0
45030- 0 0 0 0 0 0 0 0 0 0 0 0
45031- 0 0 0 0 0 0 0 0 0 0 0 0
45032- 0 0 0 0 0 0 0 0 0 0 0 0
45033- 0 0 0 0 0 0 0 0 0 0 0 0
45034- 0 0 0 0 0 0 0 0 0 0 0 0
45035- 0 0 0 0 0 0 0 0 0 0 0 0
45036- 0 0 0 0 0 0 0 0 0 0 0 0
45037- 0 0 0 0 0 0 10 10 10 34 34 34
45038- 74 74 74 74 74 74 2 2 6 6 6 6
45039-144 144 144 198 198 198 190 190 190 178 166 146
45040-154 121 60 156 107 11 156 107 11 168 124 44
45041-174 154 114 187 187 187 190 190 190 210 210 210
45042-246 246 246 253 253 253 253 253 253 182 182 182
45043- 6 6 6 2 2 6 2 2 6 2 2 6
45044- 2 2 6 2 2 6 2 2 6 62 62 62
45045- 74 74 74 34 34 34 14 14 14 0 0 0
45046- 0 0 0 0 0 0 0 0 0 0 0 0
45047- 0 0 0 0 0 0 0 0 0 0 0 0
45048- 0 0 0 0 0 0 0 0 0 0 0 0
45049- 0 0 0 0 0 0 0 0 0 0 0 0
45050- 0 0 0 0 0 0 0 0 0 0 0 0
45051- 0 0 0 0 0 0 0 0 0 0 0 0
45052- 0 0 0 0 0 0 0 0 0 0 0 0
45053- 0 0 0 0 0 0 0 0 0 0 0 0
45054- 0 0 0 0 0 0 0 0 0 0 0 0
45055- 0 0 0 0 0 0 0 0 0 0 0 0
45056- 0 0 0 0 0 0 0 0 0 0 0 0
45057- 0 0 0 10 10 10 22 22 22 54 54 54
45058- 94 94 94 18 18 18 2 2 6 46 46 46
45059-234 234 234 221 221 221 190 190 190 190 190 190
45060-190 190 190 187 187 187 187 187 187 190 190 190
45061-190 190 190 195 195 195 214 214 214 242 242 242
45062-253 253 253 253 253 253 253 253 253 253 253 253
45063- 82 82 82 2 2 6 2 2 6 2 2 6
45064- 2 2 6 2 2 6 2 2 6 14 14 14
45065- 86 86 86 54 54 54 22 22 22 6 6 6
45066- 0 0 0 0 0 0 0 0 0 0 0 0
45067- 0 0 0 0 0 0 0 0 0 0 0 0
45068- 0 0 0 0 0 0 0 0 0 0 0 0
45069- 0 0 0 0 0 0 0 0 0 0 0 0
45070- 0 0 0 0 0 0 0 0 0 0 0 0
45071- 0 0 0 0 0 0 0 0 0 0 0 0
45072- 0 0 0 0 0 0 0 0 0 0 0 0
45073- 0 0 0 0 0 0 0 0 0 0 0 0
45074- 0 0 0 0 0 0 0 0 0 0 0 0
45075- 0 0 0 0 0 0 0 0 0 0 0 0
45076- 0 0 0 0 0 0 0 0 0 0 0 0
45077- 6 6 6 18 18 18 46 46 46 90 90 90
45078- 46 46 46 18 18 18 6 6 6 182 182 182
45079-253 253 253 246 246 246 206 206 206 190 190 190
45080-190 190 190 190 190 190 190 190 190 190 190 190
45081-206 206 206 231 231 231 250 250 250 253 253 253
45082-253 253 253 253 253 253 253 253 253 253 253 253
45083-202 202 202 14 14 14 2 2 6 2 2 6
45084- 2 2 6 2 2 6 2 2 6 2 2 6
45085- 42 42 42 86 86 86 42 42 42 18 18 18
45086- 6 6 6 0 0 0 0 0 0 0 0 0
45087- 0 0 0 0 0 0 0 0 0 0 0 0
45088- 0 0 0 0 0 0 0 0 0 0 0 0
45089- 0 0 0 0 0 0 0 0 0 0 0 0
45090- 0 0 0 0 0 0 0 0 0 0 0 0
45091- 0 0 0 0 0 0 0 0 0 0 0 0
45092- 0 0 0 0 0 0 0 0 0 0 0 0
45093- 0 0 0 0 0 0 0 0 0 0 0 0
45094- 0 0 0 0 0 0 0 0 0 0 0 0
45095- 0 0 0 0 0 0 0 0 0 0 0 0
45096- 0 0 0 0 0 0 0 0 0 6 6 6
45097- 14 14 14 38 38 38 74 74 74 66 66 66
45098- 2 2 6 6 6 6 90 90 90 250 250 250
45099-253 253 253 253 253 253 238 238 238 198 198 198
45100-190 190 190 190 190 190 195 195 195 221 221 221
45101-246 246 246 253 253 253 253 253 253 253 253 253
45102-253 253 253 253 253 253 253 253 253 253 253 253
45103-253 253 253 82 82 82 2 2 6 2 2 6
45104- 2 2 6 2 2 6 2 2 6 2 2 6
45105- 2 2 6 78 78 78 70 70 70 34 34 34
45106- 14 14 14 6 6 6 0 0 0 0 0 0
45107- 0 0 0 0 0 0 0 0 0 0 0 0
45108- 0 0 0 0 0 0 0 0 0 0 0 0
45109- 0 0 0 0 0 0 0 0 0 0 0 0
45110- 0 0 0 0 0 0 0 0 0 0 0 0
45111- 0 0 0 0 0 0 0 0 0 0 0 0
45112- 0 0 0 0 0 0 0 0 0 0 0 0
45113- 0 0 0 0 0 0 0 0 0 0 0 0
45114- 0 0 0 0 0 0 0 0 0 0 0 0
45115- 0 0 0 0 0 0 0 0 0 0 0 0
45116- 0 0 0 0 0 0 0 0 0 14 14 14
45117- 34 34 34 66 66 66 78 78 78 6 6 6
45118- 2 2 6 18 18 18 218 218 218 253 253 253
45119-253 253 253 253 253 253 253 253 253 246 246 246
45120-226 226 226 231 231 231 246 246 246 253 253 253
45121-253 253 253 253 253 253 253 253 253 253 253 253
45122-253 253 253 253 253 253 253 253 253 253 253 253
45123-253 253 253 178 178 178 2 2 6 2 2 6
45124- 2 2 6 2 2 6 2 2 6 2 2 6
45125- 2 2 6 18 18 18 90 90 90 62 62 62
45126- 30 30 30 10 10 10 0 0 0 0 0 0
45127- 0 0 0 0 0 0 0 0 0 0 0 0
45128- 0 0 0 0 0 0 0 0 0 0 0 0
45129- 0 0 0 0 0 0 0 0 0 0 0 0
45130- 0 0 0 0 0 0 0 0 0 0 0 0
45131- 0 0 0 0 0 0 0 0 0 0 0 0
45132- 0 0 0 0 0 0 0 0 0 0 0 0
45133- 0 0 0 0 0 0 0 0 0 0 0 0
45134- 0 0 0 0 0 0 0 0 0 0 0 0
45135- 0 0 0 0 0 0 0 0 0 0 0 0
45136- 0 0 0 0 0 0 10 10 10 26 26 26
45137- 58 58 58 90 90 90 18 18 18 2 2 6
45138- 2 2 6 110 110 110 253 253 253 253 253 253
45139-253 253 253 253 253 253 253 253 253 253 253 253
45140-250 250 250 253 253 253 253 253 253 253 253 253
45141-253 253 253 253 253 253 253 253 253 253 253 253
45142-253 253 253 253 253 253 253 253 253 253 253 253
45143-253 253 253 231 231 231 18 18 18 2 2 6
45144- 2 2 6 2 2 6 2 2 6 2 2 6
45145- 2 2 6 2 2 6 18 18 18 94 94 94
45146- 54 54 54 26 26 26 10 10 10 0 0 0
45147- 0 0 0 0 0 0 0 0 0 0 0 0
45148- 0 0 0 0 0 0 0 0 0 0 0 0
45149- 0 0 0 0 0 0 0 0 0 0 0 0
45150- 0 0 0 0 0 0 0 0 0 0 0 0
45151- 0 0 0 0 0 0 0 0 0 0 0 0
45152- 0 0 0 0 0 0 0 0 0 0 0 0
45153- 0 0 0 0 0 0 0 0 0 0 0 0
45154- 0 0 0 0 0 0 0 0 0 0 0 0
45155- 0 0 0 0 0 0 0 0 0 0 0 0
45156- 0 0 0 6 6 6 22 22 22 50 50 50
45157- 90 90 90 26 26 26 2 2 6 2 2 6
45158- 14 14 14 195 195 195 250 250 250 253 253 253
45159-253 253 253 253 253 253 253 253 253 253 253 253
45160-253 253 253 253 253 253 253 253 253 253 253 253
45161-253 253 253 253 253 253 253 253 253 253 253 253
45162-253 253 253 253 253 253 253 253 253 253 253 253
45163-250 250 250 242 242 242 54 54 54 2 2 6
45164- 2 2 6 2 2 6 2 2 6 2 2 6
45165- 2 2 6 2 2 6 2 2 6 38 38 38
45166- 86 86 86 50 50 50 22 22 22 6 6 6
45167- 0 0 0 0 0 0 0 0 0 0 0 0
45168- 0 0 0 0 0 0 0 0 0 0 0 0
45169- 0 0 0 0 0 0 0 0 0 0 0 0
45170- 0 0 0 0 0 0 0 0 0 0 0 0
45171- 0 0 0 0 0 0 0 0 0 0 0 0
45172- 0 0 0 0 0 0 0 0 0 0 0 0
45173- 0 0 0 0 0 0 0 0 0 0 0 0
45174- 0 0 0 0 0 0 0 0 0 0 0 0
45175- 0 0 0 0 0 0 0 0 0 0 0 0
45176- 6 6 6 14 14 14 38 38 38 82 82 82
45177- 34 34 34 2 2 6 2 2 6 2 2 6
45178- 42 42 42 195 195 195 246 246 246 253 253 253
45179-253 253 253 253 253 253 253 253 253 250 250 250
45180-242 242 242 242 242 242 250 250 250 253 253 253
45181-253 253 253 253 253 253 253 253 253 253 253 253
45182-253 253 253 250 250 250 246 246 246 238 238 238
45183-226 226 226 231 231 231 101 101 101 6 6 6
45184- 2 2 6 2 2 6 2 2 6 2 2 6
45185- 2 2 6 2 2 6 2 2 6 2 2 6
45186- 38 38 38 82 82 82 42 42 42 14 14 14
45187- 6 6 6 0 0 0 0 0 0 0 0 0
45188- 0 0 0 0 0 0 0 0 0 0 0 0
45189- 0 0 0 0 0 0 0 0 0 0 0 0
45190- 0 0 0 0 0 0 0 0 0 0 0 0
45191- 0 0 0 0 0 0 0 0 0 0 0 0
45192- 0 0 0 0 0 0 0 0 0 0 0 0
45193- 0 0 0 0 0 0 0 0 0 0 0 0
45194- 0 0 0 0 0 0 0 0 0 0 0 0
45195- 0 0 0 0 0 0 0 0 0 0 0 0
45196- 10 10 10 26 26 26 62 62 62 66 66 66
45197- 2 2 6 2 2 6 2 2 6 6 6 6
45198- 70 70 70 170 170 170 206 206 206 234 234 234
45199-246 246 246 250 250 250 250 250 250 238 238 238
45200-226 226 226 231 231 231 238 238 238 250 250 250
45201-250 250 250 250 250 250 246 246 246 231 231 231
45202-214 214 214 206 206 206 202 202 202 202 202 202
45203-198 198 198 202 202 202 182 182 182 18 18 18
45204- 2 2 6 2 2 6 2 2 6 2 2 6
45205- 2 2 6 2 2 6 2 2 6 2 2 6
45206- 2 2 6 62 62 62 66 66 66 30 30 30
45207- 10 10 10 0 0 0 0 0 0 0 0 0
45208- 0 0 0 0 0 0 0 0 0 0 0 0
45209- 0 0 0 0 0 0 0 0 0 0 0 0
45210- 0 0 0 0 0 0 0 0 0 0 0 0
45211- 0 0 0 0 0 0 0 0 0 0 0 0
45212- 0 0 0 0 0 0 0 0 0 0 0 0
45213- 0 0 0 0 0 0 0 0 0 0 0 0
45214- 0 0 0 0 0 0 0 0 0 0 0 0
45215- 0 0 0 0 0 0 0 0 0 0 0 0
45216- 14 14 14 42 42 42 82 82 82 18 18 18
45217- 2 2 6 2 2 6 2 2 6 10 10 10
45218- 94 94 94 182 182 182 218 218 218 242 242 242
45219-250 250 250 253 253 253 253 253 253 250 250 250
45220-234 234 234 253 253 253 253 253 253 253 253 253
45221-253 253 253 253 253 253 253 253 253 246 246 246
45222-238 238 238 226 226 226 210 210 210 202 202 202
45223-195 195 195 195 195 195 210 210 210 158 158 158
45224- 6 6 6 14 14 14 50 50 50 14 14 14
45225- 2 2 6 2 2 6 2 2 6 2 2 6
45226- 2 2 6 6 6 6 86 86 86 46 46 46
45227- 18 18 18 6 6 6 0 0 0 0 0 0
45228- 0 0 0 0 0 0 0 0 0 0 0 0
45229- 0 0 0 0 0 0 0 0 0 0 0 0
45230- 0 0 0 0 0 0 0 0 0 0 0 0
45231- 0 0 0 0 0 0 0 0 0 0 0 0
45232- 0 0 0 0 0 0 0 0 0 0 0 0
45233- 0 0 0 0 0 0 0 0 0 0 0 0
45234- 0 0 0 0 0 0 0 0 0 0 0 0
45235- 0 0 0 0 0 0 0 0 0 6 6 6
45236- 22 22 22 54 54 54 70 70 70 2 2 6
45237- 2 2 6 10 10 10 2 2 6 22 22 22
45238-166 166 166 231 231 231 250 250 250 253 253 253
45239-253 253 253 253 253 253 253 253 253 250 250 250
45240-242 242 242 253 253 253 253 253 253 253 253 253
45241-253 253 253 253 253 253 253 253 253 253 253 253
45242-253 253 253 253 253 253 253 253 253 246 246 246
45243-231 231 231 206 206 206 198 198 198 226 226 226
45244- 94 94 94 2 2 6 6 6 6 38 38 38
45245- 30 30 30 2 2 6 2 2 6 2 2 6
45246- 2 2 6 2 2 6 62 62 62 66 66 66
45247- 26 26 26 10 10 10 0 0 0 0 0 0
45248- 0 0 0 0 0 0 0 0 0 0 0 0
45249- 0 0 0 0 0 0 0 0 0 0 0 0
45250- 0 0 0 0 0 0 0 0 0 0 0 0
45251- 0 0 0 0 0 0 0 0 0 0 0 0
45252- 0 0 0 0 0 0 0 0 0 0 0 0
45253- 0 0 0 0 0 0 0 0 0 0 0 0
45254- 0 0 0 0 0 0 0 0 0 0 0 0
45255- 0 0 0 0 0 0 0 0 0 10 10 10
45256- 30 30 30 74 74 74 50 50 50 2 2 6
45257- 26 26 26 26 26 26 2 2 6 106 106 106
45258-238 238 238 253 253 253 253 253 253 253 253 253
45259-253 253 253 253 253 253 253 253 253 253 253 253
45260-253 253 253 253 253 253 253 253 253 253 253 253
45261-253 253 253 253 253 253 253 253 253 253 253 253
45262-253 253 253 253 253 253 253 253 253 253 253 253
45263-253 253 253 246 246 246 218 218 218 202 202 202
45264-210 210 210 14 14 14 2 2 6 2 2 6
45265- 30 30 30 22 22 22 2 2 6 2 2 6
45266- 2 2 6 2 2 6 18 18 18 86 86 86
45267- 42 42 42 14 14 14 0 0 0 0 0 0
45268- 0 0 0 0 0 0 0 0 0 0 0 0
45269- 0 0 0 0 0 0 0 0 0 0 0 0
45270- 0 0 0 0 0 0 0 0 0 0 0 0
45271- 0 0 0 0 0 0 0 0 0 0 0 0
45272- 0 0 0 0 0 0 0 0 0 0 0 0
45273- 0 0 0 0 0 0 0 0 0 0 0 0
45274- 0 0 0 0 0 0 0 0 0 0 0 0
45275- 0 0 0 0 0 0 0 0 0 14 14 14
45276- 42 42 42 90 90 90 22 22 22 2 2 6
45277- 42 42 42 2 2 6 18 18 18 218 218 218
45278-253 253 253 253 253 253 253 253 253 253 253 253
45279-253 253 253 253 253 253 253 253 253 253 253 253
45280-253 253 253 253 253 253 253 253 253 253 253 253
45281-253 253 253 253 253 253 253 253 253 253 253 253
45282-253 253 253 253 253 253 253 253 253 253 253 253
45283-253 253 253 253 253 253 250 250 250 221 221 221
45284-218 218 218 101 101 101 2 2 6 14 14 14
45285- 18 18 18 38 38 38 10 10 10 2 2 6
45286- 2 2 6 2 2 6 2 2 6 78 78 78
45287- 58 58 58 22 22 22 6 6 6 0 0 0
45288- 0 0 0 0 0 0 0 0 0 0 0 0
45289- 0 0 0 0 0 0 0 0 0 0 0 0
45290- 0 0 0 0 0 0 0 0 0 0 0 0
45291- 0 0 0 0 0 0 0 0 0 0 0 0
45292- 0 0 0 0 0 0 0 0 0 0 0 0
45293- 0 0 0 0 0 0 0 0 0 0 0 0
45294- 0 0 0 0 0 0 0 0 0 0 0 0
45295- 0 0 0 0 0 0 6 6 6 18 18 18
45296- 54 54 54 82 82 82 2 2 6 26 26 26
45297- 22 22 22 2 2 6 123 123 123 253 253 253
45298-253 253 253 253 253 253 253 253 253 253 253 253
45299-253 253 253 253 253 253 253 253 253 253 253 253
45300-253 253 253 253 253 253 253 253 253 253 253 253
45301-253 253 253 253 253 253 253 253 253 253 253 253
45302-253 253 253 253 253 253 253 253 253 253 253 253
45303-253 253 253 253 253 253 253 253 253 250 250 250
45304-238 238 238 198 198 198 6 6 6 38 38 38
45305- 58 58 58 26 26 26 38 38 38 2 2 6
45306- 2 2 6 2 2 6 2 2 6 46 46 46
45307- 78 78 78 30 30 30 10 10 10 0 0 0
45308- 0 0 0 0 0 0 0 0 0 0 0 0
45309- 0 0 0 0 0 0 0 0 0 0 0 0
45310- 0 0 0 0 0 0 0 0 0 0 0 0
45311- 0 0 0 0 0 0 0 0 0 0 0 0
45312- 0 0 0 0 0 0 0 0 0 0 0 0
45313- 0 0 0 0 0 0 0 0 0 0 0 0
45314- 0 0 0 0 0 0 0 0 0 0 0 0
45315- 0 0 0 0 0 0 10 10 10 30 30 30
45316- 74 74 74 58 58 58 2 2 6 42 42 42
45317- 2 2 6 22 22 22 231 231 231 253 253 253
45318-253 253 253 253 253 253 253 253 253 253 253 253
45319-253 253 253 253 253 253 253 253 253 250 250 250
45320-253 253 253 253 253 253 253 253 253 253 253 253
45321-253 253 253 253 253 253 253 253 253 253 253 253
45322-253 253 253 253 253 253 253 253 253 253 253 253
45323-253 253 253 253 253 253 253 253 253 253 253 253
45324-253 253 253 246 246 246 46 46 46 38 38 38
45325- 42 42 42 14 14 14 38 38 38 14 14 14
45326- 2 2 6 2 2 6 2 2 6 6 6 6
45327- 86 86 86 46 46 46 14 14 14 0 0 0
45328- 0 0 0 0 0 0 0 0 0 0 0 0
45329- 0 0 0 0 0 0 0 0 0 0 0 0
45330- 0 0 0 0 0 0 0 0 0 0 0 0
45331- 0 0 0 0 0 0 0 0 0 0 0 0
45332- 0 0 0 0 0 0 0 0 0 0 0 0
45333- 0 0 0 0 0 0 0 0 0 0 0 0
45334- 0 0 0 0 0 0 0 0 0 0 0 0
45335- 0 0 0 6 6 6 14 14 14 42 42 42
45336- 90 90 90 18 18 18 18 18 18 26 26 26
45337- 2 2 6 116 116 116 253 253 253 253 253 253
45338-253 253 253 253 253 253 253 253 253 253 253 253
45339-253 253 253 253 253 253 250 250 250 238 238 238
45340-253 253 253 253 253 253 253 253 253 253 253 253
45341-253 253 253 253 253 253 253 253 253 253 253 253
45342-253 253 253 253 253 253 253 253 253 253 253 253
45343-253 253 253 253 253 253 253 253 253 253 253 253
45344-253 253 253 253 253 253 94 94 94 6 6 6
45345- 2 2 6 2 2 6 10 10 10 34 34 34
45346- 2 2 6 2 2 6 2 2 6 2 2 6
45347- 74 74 74 58 58 58 22 22 22 6 6 6
45348- 0 0 0 0 0 0 0 0 0 0 0 0
45349- 0 0 0 0 0 0 0 0 0 0 0 0
45350- 0 0 0 0 0 0 0 0 0 0 0 0
45351- 0 0 0 0 0 0 0 0 0 0 0 0
45352- 0 0 0 0 0 0 0 0 0 0 0 0
45353- 0 0 0 0 0 0 0 0 0 0 0 0
45354- 0 0 0 0 0 0 0 0 0 0 0 0
45355- 0 0 0 10 10 10 26 26 26 66 66 66
45356- 82 82 82 2 2 6 38 38 38 6 6 6
45357- 14 14 14 210 210 210 253 253 253 253 253 253
45358-253 253 253 253 253 253 253 253 253 253 253 253
45359-253 253 253 253 253 253 246 246 246 242 242 242
45360-253 253 253 253 253 253 253 253 253 253 253 253
45361-253 253 253 253 253 253 253 253 253 253 253 253
45362-253 253 253 253 253 253 253 253 253 253 253 253
45363-253 253 253 253 253 253 253 253 253 253 253 253
45364-253 253 253 253 253 253 144 144 144 2 2 6
45365- 2 2 6 2 2 6 2 2 6 46 46 46
45366- 2 2 6 2 2 6 2 2 6 2 2 6
45367- 42 42 42 74 74 74 30 30 30 10 10 10
45368- 0 0 0 0 0 0 0 0 0 0 0 0
45369- 0 0 0 0 0 0 0 0 0 0 0 0
45370- 0 0 0 0 0 0 0 0 0 0 0 0
45371- 0 0 0 0 0 0 0 0 0 0 0 0
45372- 0 0 0 0 0 0 0 0 0 0 0 0
45373- 0 0 0 0 0 0 0 0 0 0 0 0
45374- 0 0 0 0 0 0 0 0 0 0 0 0
45375- 6 6 6 14 14 14 42 42 42 90 90 90
45376- 26 26 26 6 6 6 42 42 42 2 2 6
45377- 74 74 74 250 250 250 253 253 253 253 253 253
45378-253 253 253 253 253 253 253 253 253 253 253 253
45379-253 253 253 253 253 253 242 242 242 242 242 242
45380-253 253 253 253 253 253 253 253 253 253 253 253
45381-253 253 253 253 253 253 253 253 253 253 253 253
45382-253 253 253 253 253 253 253 253 253 253 253 253
45383-253 253 253 253 253 253 253 253 253 253 253 253
45384-253 253 253 253 253 253 182 182 182 2 2 6
45385- 2 2 6 2 2 6 2 2 6 46 46 46
45386- 2 2 6 2 2 6 2 2 6 2 2 6
45387- 10 10 10 86 86 86 38 38 38 10 10 10
45388- 0 0 0 0 0 0 0 0 0 0 0 0
45389- 0 0 0 0 0 0 0 0 0 0 0 0
45390- 0 0 0 0 0 0 0 0 0 0 0 0
45391- 0 0 0 0 0 0 0 0 0 0 0 0
45392- 0 0 0 0 0 0 0 0 0 0 0 0
45393- 0 0 0 0 0 0 0 0 0 0 0 0
45394- 0 0 0 0 0 0 0 0 0 0 0 0
45395- 10 10 10 26 26 26 66 66 66 82 82 82
45396- 2 2 6 22 22 22 18 18 18 2 2 6
45397-149 149 149 253 253 253 253 253 253 253 253 253
45398-253 253 253 253 253 253 253 253 253 253 253 253
45399-253 253 253 253 253 253 234 234 234 242 242 242
45400-253 253 253 253 253 253 253 253 253 253 253 253
45401-253 253 253 253 253 253 253 253 253 253 253 253
45402-253 253 253 253 253 253 253 253 253 253 253 253
45403-253 253 253 253 253 253 253 253 253 253 253 253
45404-253 253 253 253 253 253 206 206 206 2 2 6
45405- 2 2 6 2 2 6 2 2 6 38 38 38
45406- 2 2 6 2 2 6 2 2 6 2 2 6
45407- 6 6 6 86 86 86 46 46 46 14 14 14
45408- 0 0 0 0 0 0 0 0 0 0 0 0
45409- 0 0 0 0 0 0 0 0 0 0 0 0
45410- 0 0 0 0 0 0 0 0 0 0 0 0
45411- 0 0 0 0 0 0 0 0 0 0 0 0
45412- 0 0 0 0 0 0 0 0 0 0 0 0
45413- 0 0 0 0 0 0 0 0 0 0 0 0
45414- 0 0 0 0 0 0 0 0 0 6 6 6
45415- 18 18 18 46 46 46 86 86 86 18 18 18
45416- 2 2 6 34 34 34 10 10 10 6 6 6
45417-210 210 210 253 253 253 253 253 253 253 253 253
45418-253 253 253 253 253 253 253 253 253 253 253 253
45419-253 253 253 253 253 253 234 234 234 242 242 242
45420-253 253 253 253 253 253 253 253 253 253 253 253
45421-253 253 253 253 253 253 253 253 253 253 253 253
45422-253 253 253 253 253 253 253 253 253 253 253 253
45423-253 253 253 253 253 253 253 253 253 253 253 253
45424-253 253 253 253 253 253 221 221 221 6 6 6
45425- 2 2 6 2 2 6 6 6 6 30 30 30
45426- 2 2 6 2 2 6 2 2 6 2 2 6
45427- 2 2 6 82 82 82 54 54 54 18 18 18
45428- 6 6 6 0 0 0 0 0 0 0 0 0
45429- 0 0 0 0 0 0 0 0 0 0 0 0
45430- 0 0 0 0 0 0 0 0 0 0 0 0
45431- 0 0 0 0 0 0 0 0 0 0 0 0
45432- 0 0 0 0 0 0 0 0 0 0 0 0
45433- 0 0 0 0 0 0 0 0 0 0 0 0
45434- 0 0 0 0 0 0 0 0 0 10 10 10
45435- 26 26 26 66 66 66 62 62 62 2 2 6
45436- 2 2 6 38 38 38 10 10 10 26 26 26
45437-238 238 238 253 253 253 253 253 253 253 253 253
45438-253 253 253 253 253 253 253 253 253 253 253 253
45439-253 253 253 253 253 253 231 231 231 238 238 238
45440-253 253 253 253 253 253 253 253 253 253 253 253
45441-253 253 253 253 253 253 253 253 253 253 253 253
45442-253 253 253 253 253 253 253 253 253 253 253 253
45443-253 253 253 253 253 253 253 253 253 253 253 253
45444-253 253 253 253 253 253 231 231 231 6 6 6
45445- 2 2 6 2 2 6 10 10 10 30 30 30
45446- 2 2 6 2 2 6 2 2 6 2 2 6
45447- 2 2 6 66 66 66 58 58 58 22 22 22
45448- 6 6 6 0 0 0 0 0 0 0 0 0
45449- 0 0 0 0 0 0 0 0 0 0 0 0
45450- 0 0 0 0 0 0 0 0 0 0 0 0
45451- 0 0 0 0 0 0 0 0 0 0 0 0
45452- 0 0 0 0 0 0 0 0 0 0 0 0
45453- 0 0 0 0 0 0 0 0 0 0 0 0
45454- 0 0 0 0 0 0 0 0 0 10 10 10
45455- 38 38 38 78 78 78 6 6 6 2 2 6
45456- 2 2 6 46 46 46 14 14 14 42 42 42
45457-246 246 246 253 253 253 253 253 253 253 253 253
45458-253 253 253 253 253 253 253 253 253 253 253 253
45459-253 253 253 253 253 253 231 231 231 242 242 242
45460-253 253 253 253 253 253 253 253 253 253 253 253
45461-253 253 253 253 253 253 253 253 253 253 253 253
45462-253 253 253 253 253 253 253 253 253 253 253 253
45463-253 253 253 253 253 253 253 253 253 253 253 253
45464-253 253 253 253 253 253 234 234 234 10 10 10
45465- 2 2 6 2 2 6 22 22 22 14 14 14
45466- 2 2 6 2 2 6 2 2 6 2 2 6
45467- 2 2 6 66 66 66 62 62 62 22 22 22
45468- 6 6 6 0 0 0 0 0 0 0 0 0
45469- 0 0 0 0 0 0 0 0 0 0 0 0
45470- 0 0 0 0 0 0 0 0 0 0 0 0
45471- 0 0 0 0 0 0 0 0 0 0 0 0
45472- 0 0 0 0 0 0 0 0 0 0 0 0
45473- 0 0 0 0 0 0 0 0 0 0 0 0
45474- 0 0 0 0 0 0 6 6 6 18 18 18
45475- 50 50 50 74 74 74 2 2 6 2 2 6
45476- 14 14 14 70 70 70 34 34 34 62 62 62
45477-250 250 250 253 253 253 253 253 253 253 253 253
45478-253 253 253 253 253 253 253 253 253 253 253 253
45479-253 253 253 253 253 253 231 231 231 246 246 246
45480-253 253 253 253 253 253 253 253 253 253 253 253
45481-253 253 253 253 253 253 253 253 253 253 253 253
45482-253 253 253 253 253 253 253 253 253 253 253 253
45483-253 253 253 253 253 253 253 253 253 253 253 253
45484-253 253 253 253 253 253 234 234 234 14 14 14
45485- 2 2 6 2 2 6 30 30 30 2 2 6
45486- 2 2 6 2 2 6 2 2 6 2 2 6
45487- 2 2 6 66 66 66 62 62 62 22 22 22
45488- 6 6 6 0 0 0 0 0 0 0 0 0
45489- 0 0 0 0 0 0 0 0 0 0 0 0
45490- 0 0 0 0 0 0 0 0 0 0 0 0
45491- 0 0 0 0 0 0 0 0 0 0 0 0
45492- 0 0 0 0 0 0 0 0 0 0 0 0
45493- 0 0 0 0 0 0 0 0 0 0 0 0
45494- 0 0 0 0 0 0 6 6 6 18 18 18
45495- 54 54 54 62 62 62 2 2 6 2 2 6
45496- 2 2 6 30 30 30 46 46 46 70 70 70
45497-250 250 250 253 253 253 253 253 253 253 253 253
45498-253 253 253 253 253 253 253 253 253 253 253 253
45499-253 253 253 253 253 253 231 231 231 246 246 246
45500-253 253 253 253 253 253 253 253 253 253 253 253
45501-253 253 253 253 253 253 253 253 253 253 253 253
45502-253 253 253 253 253 253 253 253 253 253 253 253
45503-253 253 253 253 253 253 253 253 253 253 253 253
45504-253 253 253 253 253 253 226 226 226 10 10 10
45505- 2 2 6 6 6 6 30 30 30 2 2 6
45506- 2 2 6 2 2 6 2 2 6 2 2 6
45507- 2 2 6 66 66 66 58 58 58 22 22 22
45508- 6 6 6 0 0 0 0 0 0 0 0 0
45509- 0 0 0 0 0 0 0 0 0 0 0 0
45510- 0 0 0 0 0 0 0 0 0 0 0 0
45511- 0 0 0 0 0 0 0 0 0 0 0 0
45512- 0 0 0 0 0 0 0 0 0 0 0 0
45513- 0 0 0 0 0 0 0 0 0 0 0 0
45514- 0 0 0 0 0 0 6 6 6 22 22 22
45515- 58 58 58 62 62 62 2 2 6 2 2 6
45516- 2 2 6 2 2 6 30 30 30 78 78 78
45517-250 250 250 253 253 253 253 253 253 253 253 253
45518-253 253 253 253 253 253 253 253 253 253 253 253
45519-253 253 253 253 253 253 231 231 231 246 246 246
45520-253 253 253 253 253 253 253 253 253 253 253 253
45521-253 253 253 253 253 253 253 253 253 253 253 253
45522-253 253 253 253 253 253 253 253 253 253 253 253
45523-253 253 253 253 253 253 253 253 253 253 253 253
45524-253 253 253 253 253 253 206 206 206 2 2 6
45525- 22 22 22 34 34 34 18 14 6 22 22 22
45526- 26 26 26 18 18 18 6 6 6 2 2 6
45527- 2 2 6 82 82 82 54 54 54 18 18 18
45528- 6 6 6 0 0 0 0 0 0 0 0 0
45529- 0 0 0 0 0 0 0 0 0 0 0 0
45530- 0 0 0 0 0 0 0 0 0 0 0 0
45531- 0 0 0 0 0 0 0 0 0 0 0 0
45532- 0 0 0 0 0 0 0 0 0 0 0 0
45533- 0 0 0 0 0 0 0 0 0 0 0 0
45534- 0 0 0 0 0 0 6 6 6 26 26 26
45535- 62 62 62 106 106 106 74 54 14 185 133 11
45536-210 162 10 121 92 8 6 6 6 62 62 62
45537-238 238 238 253 253 253 253 253 253 253 253 253
45538-253 253 253 253 253 253 253 253 253 253 253 253
45539-253 253 253 253 253 253 231 231 231 246 246 246
45540-253 253 253 253 253 253 253 253 253 253 253 253
45541-253 253 253 253 253 253 253 253 253 253 253 253
45542-253 253 253 253 253 253 253 253 253 253 253 253
45543-253 253 253 253 253 253 253 253 253 253 253 253
45544-253 253 253 253 253 253 158 158 158 18 18 18
45545- 14 14 14 2 2 6 2 2 6 2 2 6
45546- 6 6 6 18 18 18 66 66 66 38 38 38
45547- 6 6 6 94 94 94 50 50 50 18 18 18
45548- 6 6 6 0 0 0 0 0 0 0 0 0
45549- 0 0 0 0 0 0 0 0 0 0 0 0
45550- 0 0 0 0 0 0 0 0 0 0 0 0
45551- 0 0 0 0 0 0 0 0 0 0 0 0
45552- 0 0 0 0 0 0 0 0 0 0 0 0
45553- 0 0 0 0 0 0 0 0 0 6 6 6
45554- 10 10 10 10 10 10 18 18 18 38 38 38
45555- 78 78 78 142 134 106 216 158 10 242 186 14
45556-246 190 14 246 190 14 156 118 10 10 10 10
45557- 90 90 90 238 238 238 253 253 253 253 253 253
45558-253 253 253 253 253 253 253 253 253 253 253 253
45559-253 253 253 253 253 253 231 231 231 250 250 250
45560-253 253 253 253 253 253 253 253 253 253 253 253
45561-253 253 253 253 253 253 253 253 253 253 253 253
45562-253 253 253 253 253 253 253 253 253 253 253 253
45563-253 253 253 253 253 253 253 253 253 246 230 190
45564-238 204 91 238 204 91 181 142 44 37 26 9
45565- 2 2 6 2 2 6 2 2 6 2 2 6
45566- 2 2 6 2 2 6 38 38 38 46 46 46
45567- 26 26 26 106 106 106 54 54 54 18 18 18
45568- 6 6 6 0 0 0 0 0 0 0 0 0
45569- 0 0 0 0 0 0 0 0 0 0 0 0
45570- 0 0 0 0 0 0 0 0 0 0 0 0
45571- 0 0 0 0 0 0 0 0 0 0 0 0
45572- 0 0 0 0 0 0 0 0 0 0 0 0
45573- 0 0 0 6 6 6 14 14 14 22 22 22
45574- 30 30 30 38 38 38 50 50 50 70 70 70
45575-106 106 106 190 142 34 226 170 11 242 186 14
45576-246 190 14 246 190 14 246 190 14 154 114 10
45577- 6 6 6 74 74 74 226 226 226 253 253 253
45578-253 253 253 253 253 253 253 253 253 253 253 253
45579-253 253 253 253 253 253 231 231 231 250 250 250
45580-253 253 253 253 253 253 253 253 253 253 253 253
45581-253 253 253 253 253 253 253 253 253 253 253 253
45582-253 253 253 253 253 253 253 253 253 253 253 253
45583-253 253 253 253 253 253 253 253 253 228 184 62
45584-241 196 14 241 208 19 232 195 16 38 30 10
45585- 2 2 6 2 2 6 2 2 6 2 2 6
45586- 2 2 6 6 6 6 30 30 30 26 26 26
45587-203 166 17 154 142 90 66 66 66 26 26 26
45588- 6 6 6 0 0 0 0 0 0 0 0 0
45589- 0 0 0 0 0 0 0 0 0 0 0 0
45590- 0 0 0 0 0 0 0 0 0 0 0 0
45591- 0 0 0 0 0 0 0 0 0 0 0 0
45592- 0 0 0 0 0 0 0 0 0 0 0 0
45593- 6 6 6 18 18 18 38 38 38 58 58 58
45594- 78 78 78 86 86 86 101 101 101 123 123 123
45595-175 146 61 210 150 10 234 174 13 246 186 14
45596-246 190 14 246 190 14 246 190 14 238 190 10
45597-102 78 10 2 2 6 46 46 46 198 198 198
45598-253 253 253 253 253 253 253 253 253 253 253 253
45599-253 253 253 253 253 253 234 234 234 242 242 242
45600-253 253 253 253 253 253 253 253 253 253 253 253
45601-253 253 253 253 253 253 253 253 253 253 253 253
45602-253 253 253 253 253 253 253 253 253 253 253 253
45603-253 253 253 253 253 253 253 253 253 224 178 62
45604-242 186 14 241 196 14 210 166 10 22 18 6
45605- 2 2 6 2 2 6 2 2 6 2 2 6
45606- 2 2 6 2 2 6 6 6 6 121 92 8
45607-238 202 15 232 195 16 82 82 82 34 34 34
45608- 10 10 10 0 0 0 0 0 0 0 0 0
45609- 0 0 0 0 0 0 0 0 0 0 0 0
45610- 0 0 0 0 0 0 0 0 0 0 0 0
45611- 0 0 0 0 0 0 0 0 0 0 0 0
45612- 0 0 0 0 0 0 0 0 0 0 0 0
45613- 14 14 14 38 38 38 70 70 70 154 122 46
45614-190 142 34 200 144 11 197 138 11 197 138 11
45615-213 154 11 226 170 11 242 186 14 246 190 14
45616-246 190 14 246 190 14 246 190 14 246 190 14
45617-225 175 15 46 32 6 2 2 6 22 22 22
45618-158 158 158 250 250 250 253 253 253 253 253 253
45619-253 253 253 253 253 253 253 253 253 253 253 253
45620-253 253 253 253 253 253 253 253 253 253 253 253
45621-253 253 253 253 253 253 253 253 253 253 253 253
45622-253 253 253 253 253 253 253 253 253 253 253 253
45623-253 253 253 250 250 250 242 242 242 224 178 62
45624-239 182 13 236 186 11 213 154 11 46 32 6
45625- 2 2 6 2 2 6 2 2 6 2 2 6
45626- 2 2 6 2 2 6 61 42 6 225 175 15
45627-238 190 10 236 186 11 112 100 78 42 42 42
45628- 14 14 14 0 0 0 0 0 0 0 0 0
45629- 0 0 0 0 0 0 0 0 0 0 0 0
45630- 0 0 0 0 0 0 0 0 0 0 0 0
45631- 0 0 0 0 0 0 0 0 0 0 0 0
45632- 0 0 0 0 0 0 0 0 0 6 6 6
45633- 22 22 22 54 54 54 154 122 46 213 154 11
45634-226 170 11 230 174 11 226 170 11 226 170 11
45635-236 178 12 242 186 14 246 190 14 246 190 14
45636-246 190 14 246 190 14 246 190 14 246 190 14
45637-241 196 14 184 144 12 10 10 10 2 2 6
45638- 6 6 6 116 116 116 242 242 242 253 253 253
45639-253 253 253 253 253 253 253 253 253 253 253 253
45640-253 253 253 253 253 253 253 253 253 253 253 253
45641-253 253 253 253 253 253 253 253 253 253 253 253
45642-253 253 253 253 253 253 253 253 253 253 253 253
45643-253 253 253 231 231 231 198 198 198 214 170 54
45644-236 178 12 236 178 12 210 150 10 137 92 6
45645- 18 14 6 2 2 6 2 2 6 2 2 6
45646- 6 6 6 70 47 6 200 144 11 236 178 12
45647-239 182 13 239 182 13 124 112 88 58 58 58
45648- 22 22 22 6 6 6 0 0 0 0 0 0
45649- 0 0 0 0 0 0 0 0 0 0 0 0
45650- 0 0 0 0 0 0 0 0 0 0 0 0
45651- 0 0 0 0 0 0 0 0 0 0 0 0
45652- 0 0 0 0 0 0 0 0 0 10 10 10
45653- 30 30 30 70 70 70 180 133 36 226 170 11
45654-239 182 13 242 186 14 242 186 14 246 186 14
45655-246 190 14 246 190 14 246 190 14 246 190 14
45656-246 190 14 246 190 14 246 190 14 246 190 14
45657-246 190 14 232 195 16 98 70 6 2 2 6
45658- 2 2 6 2 2 6 66 66 66 221 221 221
45659-253 253 253 253 253 253 253 253 253 253 253 253
45660-253 253 253 253 253 253 253 253 253 253 253 253
45661-253 253 253 253 253 253 253 253 253 253 253 253
45662-253 253 253 253 253 253 253 253 253 253 253 253
45663-253 253 253 206 206 206 198 198 198 214 166 58
45664-230 174 11 230 174 11 216 158 10 192 133 9
45665-163 110 8 116 81 8 102 78 10 116 81 8
45666-167 114 7 197 138 11 226 170 11 239 182 13
45667-242 186 14 242 186 14 162 146 94 78 78 78
45668- 34 34 34 14 14 14 6 6 6 0 0 0
45669- 0 0 0 0 0 0 0 0 0 0 0 0
45670- 0 0 0 0 0 0 0 0 0 0 0 0
45671- 0 0 0 0 0 0 0 0 0 0 0 0
45672- 0 0 0 0 0 0 0 0 0 6 6 6
45673- 30 30 30 78 78 78 190 142 34 226 170 11
45674-239 182 13 246 190 14 246 190 14 246 190 14
45675-246 190 14 246 190 14 246 190 14 246 190 14
45676-246 190 14 246 190 14 246 190 14 246 190 14
45677-246 190 14 241 196 14 203 166 17 22 18 6
45678- 2 2 6 2 2 6 2 2 6 38 38 38
45679-218 218 218 253 253 253 253 253 253 253 253 253
45680-253 253 253 253 253 253 253 253 253 253 253 253
45681-253 253 253 253 253 253 253 253 253 253 253 253
45682-253 253 253 253 253 253 253 253 253 253 253 253
45683-250 250 250 206 206 206 198 198 198 202 162 69
45684-226 170 11 236 178 12 224 166 10 210 150 10
45685-200 144 11 197 138 11 192 133 9 197 138 11
45686-210 150 10 226 170 11 242 186 14 246 190 14
45687-246 190 14 246 186 14 225 175 15 124 112 88
45688- 62 62 62 30 30 30 14 14 14 6 6 6
45689- 0 0 0 0 0 0 0 0 0 0 0 0
45690- 0 0 0 0 0 0 0 0 0 0 0 0
45691- 0 0 0 0 0 0 0 0 0 0 0 0
45692- 0 0 0 0 0 0 0 0 0 10 10 10
45693- 30 30 30 78 78 78 174 135 50 224 166 10
45694-239 182 13 246 190 14 246 190 14 246 190 14
45695-246 190 14 246 190 14 246 190 14 246 190 14
45696-246 190 14 246 190 14 246 190 14 246 190 14
45697-246 190 14 246 190 14 241 196 14 139 102 15
45698- 2 2 6 2 2 6 2 2 6 2 2 6
45699- 78 78 78 250 250 250 253 253 253 253 253 253
45700-253 253 253 253 253 253 253 253 253 253 253 253
45701-253 253 253 253 253 253 253 253 253 253 253 253
45702-253 253 253 253 253 253 253 253 253 253 253 253
45703-250 250 250 214 214 214 198 198 198 190 150 46
45704-219 162 10 236 178 12 234 174 13 224 166 10
45705-216 158 10 213 154 11 213 154 11 216 158 10
45706-226 170 11 239 182 13 246 190 14 246 190 14
45707-246 190 14 246 190 14 242 186 14 206 162 42
45708-101 101 101 58 58 58 30 30 30 14 14 14
45709- 6 6 6 0 0 0 0 0 0 0 0 0
45710- 0 0 0 0 0 0 0 0 0 0 0 0
45711- 0 0 0 0 0 0 0 0 0 0 0 0
45712- 0 0 0 0 0 0 0 0 0 10 10 10
45713- 30 30 30 74 74 74 174 135 50 216 158 10
45714-236 178 12 246 190 14 246 190 14 246 190 14
45715-246 190 14 246 190 14 246 190 14 246 190 14
45716-246 190 14 246 190 14 246 190 14 246 190 14
45717-246 190 14 246 190 14 241 196 14 226 184 13
45718- 61 42 6 2 2 6 2 2 6 2 2 6
45719- 22 22 22 238 238 238 253 253 253 253 253 253
45720-253 253 253 253 253 253 253 253 253 253 253 253
45721-253 253 253 253 253 253 253 253 253 253 253 253
45722-253 253 253 253 253 253 253 253 253 253 253 253
45723-253 253 253 226 226 226 187 187 187 180 133 36
45724-216 158 10 236 178 12 239 182 13 236 178 12
45725-230 174 11 226 170 11 226 170 11 230 174 11
45726-236 178 12 242 186 14 246 190 14 246 190 14
45727-246 190 14 246 190 14 246 186 14 239 182 13
45728-206 162 42 106 106 106 66 66 66 34 34 34
45729- 14 14 14 6 6 6 0 0 0 0 0 0
45730- 0 0 0 0 0 0 0 0 0 0 0 0
45731- 0 0 0 0 0 0 0 0 0 0 0 0
45732- 0 0 0 0 0 0 0 0 0 6 6 6
45733- 26 26 26 70 70 70 163 133 67 213 154 11
45734-236 178 12 246 190 14 246 190 14 246 190 14
45735-246 190 14 246 190 14 246 190 14 246 190 14
45736-246 190 14 246 190 14 246 190 14 246 190 14
45737-246 190 14 246 190 14 246 190 14 241 196 14
45738-190 146 13 18 14 6 2 2 6 2 2 6
45739- 46 46 46 246 246 246 253 253 253 253 253 253
45740-253 253 253 253 253 253 253 253 253 253 253 253
45741-253 253 253 253 253 253 253 253 253 253 253 253
45742-253 253 253 253 253 253 253 253 253 253 253 253
45743-253 253 253 221 221 221 86 86 86 156 107 11
45744-216 158 10 236 178 12 242 186 14 246 186 14
45745-242 186 14 239 182 13 239 182 13 242 186 14
45746-242 186 14 246 186 14 246 190 14 246 190 14
45747-246 190 14 246 190 14 246 190 14 246 190 14
45748-242 186 14 225 175 15 142 122 72 66 66 66
45749- 30 30 30 10 10 10 0 0 0 0 0 0
45750- 0 0 0 0 0 0 0 0 0 0 0 0
45751- 0 0 0 0 0 0 0 0 0 0 0 0
45752- 0 0 0 0 0 0 0 0 0 6 6 6
45753- 26 26 26 70 70 70 163 133 67 210 150 10
45754-236 178 12 246 190 14 246 190 14 246 190 14
45755-246 190 14 246 190 14 246 190 14 246 190 14
45756-246 190 14 246 190 14 246 190 14 246 190 14
45757-246 190 14 246 190 14 246 190 14 246 190 14
45758-232 195 16 121 92 8 34 34 34 106 106 106
45759-221 221 221 253 253 253 253 253 253 253 253 253
45760-253 253 253 253 253 253 253 253 253 253 253 253
45761-253 253 253 253 253 253 253 253 253 253 253 253
45762-253 253 253 253 253 253 253 253 253 253 253 253
45763-242 242 242 82 82 82 18 14 6 163 110 8
45764-216 158 10 236 178 12 242 186 14 246 190 14
45765-246 190 14 246 190 14 246 190 14 246 190 14
45766-246 190 14 246 190 14 246 190 14 246 190 14
45767-246 190 14 246 190 14 246 190 14 246 190 14
45768-246 190 14 246 190 14 242 186 14 163 133 67
45769- 46 46 46 18 18 18 6 6 6 0 0 0
45770- 0 0 0 0 0 0 0 0 0 0 0 0
45771- 0 0 0 0 0 0 0 0 0 0 0 0
45772- 0 0 0 0 0 0 0 0 0 10 10 10
45773- 30 30 30 78 78 78 163 133 67 210 150 10
45774-236 178 12 246 186 14 246 190 14 246 190 14
45775-246 190 14 246 190 14 246 190 14 246 190 14
45776-246 190 14 246 190 14 246 190 14 246 190 14
45777-246 190 14 246 190 14 246 190 14 246 190 14
45778-241 196 14 215 174 15 190 178 144 253 253 253
45779-253 253 253 253 253 253 253 253 253 253 253 253
45780-253 253 253 253 253 253 253 253 253 253 253 253
45781-253 253 253 253 253 253 253 253 253 253 253 253
45782-253 253 253 253 253 253 253 253 253 218 218 218
45783- 58 58 58 2 2 6 22 18 6 167 114 7
45784-216 158 10 236 178 12 246 186 14 246 190 14
45785-246 190 14 246 190 14 246 190 14 246 190 14
45786-246 190 14 246 190 14 246 190 14 246 190 14
45787-246 190 14 246 190 14 246 190 14 246 190 14
45788-246 190 14 246 186 14 242 186 14 190 150 46
45789- 54 54 54 22 22 22 6 6 6 0 0 0
45790- 0 0 0 0 0 0 0 0 0 0 0 0
45791- 0 0 0 0 0 0 0 0 0 0 0 0
45792- 0 0 0 0 0 0 0 0 0 14 14 14
45793- 38 38 38 86 86 86 180 133 36 213 154 11
45794-236 178 12 246 186 14 246 190 14 246 190 14
45795-246 190 14 246 190 14 246 190 14 246 190 14
45796-246 190 14 246 190 14 246 190 14 246 190 14
45797-246 190 14 246 190 14 246 190 14 246 190 14
45798-246 190 14 232 195 16 190 146 13 214 214 214
45799-253 253 253 253 253 253 253 253 253 253 253 253
45800-253 253 253 253 253 253 253 253 253 253 253 253
45801-253 253 253 253 253 253 253 253 253 253 253 253
45802-253 253 253 250 250 250 170 170 170 26 26 26
45803- 2 2 6 2 2 6 37 26 9 163 110 8
45804-219 162 10 239 182 13 246 186 14 246 190 14
45805-246 190 14 246 190 14 246 190 14 246 190 14
45806-246 190 14 246 190 14 246 190 14 246 190 14
45807-246 190 14 246 190 14 246 190 14 246 190 14
45808-246 186 14 236 178 12 224 166 10 142 122 72
45809- 46 46 46 18 18 18 6 6 6 0 0 0
45810- 0 0 0 0 0 0 0 0 0 0 0 0
45811- 0 0 0 0 0 0 0 0 0 0 0 0
45812- 0 0 0 0 0 0 6 6 6 18 18 18
45813- 50 50 50 109 106 95 192 133 9 224 166 10
45814-242 186 14 246 190 14 246 190 14 246 190 14
45815-246 190 14 246 190 14 246 190 14 246 190 14
45816-246 190 14 246 190 14 246 190 14 246 190 14
45817-246 190 14 246 190 14 246 190 14 246 190 14
45818-242 186 14 226 184 13 210 162 10 142 110 46
45819-226 226 226 253 253 253 253 253 253 253 253 253
45820-253 253 253 253 253 253 253 253 253 253 253 253
45821-253 253 253 253 253 253 253 253 253 253 253 253
45822-198 198 198 66 66 66 2 2 6 2 2 6
45823- 2 2 6 2 2 6 50 34 6 156 107 11
45824-219 162 10 239 182 13 246 186 14 246 190 14
45825-246 190 14 246 190 14 246 190 14 246 190 14
45826-246 190 14 246 190 14 246 190 14 246 190 14
45827-246 190 14 246 190 14 246 190 14 242 186 14
45828-234 174 13 213 154 11 154 122 46 66 66 66
45829- 30 30 30 10 10 10 0 0 0 0 0 0
45830- 0 0 0 0 0 0 0 0 0 0 0 0
45831- 0 0 0 0 0 0 0 0 0 0 0 0
45832- 0 0 0 0 0 0 6 6 6 22 22 22
45833- 58 58 58 154 121 60 206 145 10 234 174 13
45834-242 186 14 246 186 14 246 190 14 246 190 14
45835-246 190 14 246 190 14 246 190 14 246 190 14
45836-246 190 14 246 190 14 246 190 14 246 190 14
45837-246 190 14 246 190 14 246 190 14 246 190 14
45838-246 186 14 236 178 12 210 162 10 163 110 8
45839- 61 42 6 138 138 138 218 218 218 250 250 250
45840-253 253 253 253 253 253 253 253 253 250 250 250
45841-242 242 242 210 210 210 144 144 144 66 66 66
45842- 6 6 6 2 2 6 2 2 6 2 2 6
45843- 2 2 6 2 2 6 61 42 6 163 110 8
45844-216 158 10 236 178 12 246 190 14 246 190 14
45845-246 190 14 246 190 14 246 190 14 246 190 14
45846-246 190 14 246 190 14 246 190 14 246 190 14
45847-246 190 14 239 182 13 230 174 11 216 158 10
45848-190 142 34 124 112 88 70 70 70 38 38 38
45849- 18 18 18 6 6 6 0 0 0 0 0 0
45850- 0 0 0 0 0 0 0 0 0 0 0 0
45851- 0 0 0 0 0 0 0 0 0 0 0 0
45852- 0 0 0 0 0 0 6 6 6 22 22 22
45853- 62 62 62 168 124 44 206 145 10 224 166 10
45854-236 178 12 239 182 13 242 186 14 242 186 14
45855-246 186 14 246 190 14 246 190 14 246 190 14
45856-246 190 14 246 190 14 246 190 14 246 190 14
45857-246 190 14 246 190 14 246 190 14 246 190 14
45858-246 190 14 236 178 12 216 158 10 175 118 6
45859- 80 54 7 2 2 6 6 6 6 30 30 30
45860- 54 54 54 62 62 62 50 50 50 38 38 38
45861- 14 14 14 2 2 6 2 2 6 2 2 6
45862- 2 2 6 2 2 6 2 2 6 2 2 6
45863- 2 2 6 6 6 6 80 54 7 167 114 7
45864-213 154 11 236 178 12 246 190 14 246 190 14
45865-246 190 14 246 190 14 246 190 14 246 190 14
45866-246 190 14 242 186 14 239 182 13 239 182 13
45867-230 174 11 210 150 10 174 135 50 124 112 88
45868- 82 82 82 54 54 54 34 34 34 18 18 18
45869- 6 6 6 0 0 0 0 0 0 0 0 0
45870- 0 0 0 0 0 0 0 0 0 0 0 0
45871- 0 0 0 0 0 0 0 0 0 0 0 0
45872- 0 0 0 0 0 0 6 6 6 18 18 18
45873- 50 50 50 158 118 36 192 133 9 200 144 11
45874-216 158 10 219 162 10 224 166 10 226 170 11
45875-230 174 11 236 178 12 239 182 13 239 182 13
45876-242 186 14 246 186 14 246 190 14 246 190 14
45877-246 190 14 246 190 14 246 190 14 246 190 14
45878-246 186 14 230 174 11 210 150 10 163 110 8
45879-104 69 6 10 10 10 2 2 6 2 2 6
45880- 2 2 6 2 2 6 2 2 6 2 2 6
45881- 2 2 6 2 2 6 2 2 6 2 2 6
45882- 2 2 6 2 2 6 2 2 6 2 2 6
45883- 2 2 6 6 6 6 91 60 6 167 114 7
45884-206 145 10 230 174 11 242 186 14 246 190 14
45885-246 190 14 246 190 14 246 186 14 242 186 14
45886-239 182 13 230 174 11 224 166 10 213 154 11
45887-180 133 36 124 112 88 86 86 86 58 58 58
45888- 38 38 38 22 22 22 10 10 10 6 6 6
45889- 0 0 0 0 0 0 0 0 0 0 0 0
45890- 0 0 0 0 0 0 0 0 0 0 0 0
45891- 0 0 0 0 0 0 0 0 0 0 0 0
45892- 0 0 0 0 0 0 0 0 0 14 14 14
45893- 34 34 34 70 70 70 138 110 50 158 118 36
45894-167 114 7 180 123 7 192 133 9 197 138 11
45895-200 144 11 206 145 10 213 154 11 219 162 10
45896-224 166 10 230 174 11 239 182 13 242 186 14
45897-246 186 14 246 186 14 246 186 14 246 186 14
45898-239 182 13 216 158 10 185 133 11 152 99 6
45899-104 69 6 18 14 6 2 2 6 2 2 6
45900- 2 2 6 2 2 6 2 2 6 2 2 6
45901- 2 2 6 2 2 6 2 2 6 2 2 6
45902- 2 2 6 2 2 6 2 2 6 2 2 6
45903- 2 2 6 6 6 6 80 54 7 152 99 6
45904-192 133 9 219 162 10 236 178 12 239 182 13
45905-246 186 14 242 186 14 239 182 13 236 178 12
45906-224 166 10 206 145 10 192 133 9 154 121 60
45907- 94 94 94 62 62 62 42 42 42 22 22 22
45908- 14 14 14 6 6 6 0 0 0 0 0 0
45909- 0 0 0 0 0 0 0 0 0 0 0 0
45910- 0 0 0 0 0 0 0 0 0 0 0 0
45911- 0 0 0 0 0 0 0 0 0 0 0 0
45912- 0 0 0 0 0 0 0 0 0 6 6 6
45913- 18 18 18 34 34 34 58 58 58 78 78 78
45914-101 98 89 124 112 88 142 110 46 156 107 11
45915-163 110 8 167 114 7 175 118 6 180 123 7
45916-185 133 11 197 138 11 210 150 10 219 162 10
45917-226 170 11 236 178 12 236 178 12 234 174 13
45918-219 162 10 197 138 11 163 110 8 130 83 6
45919- 91 60 6 10 10 10 2 2 6 2 2 6
45920- 18 18 18 38 38 38 38 38 38 38 38 38
45921- 38 38 38 38 38 38 38 38 38 38 38 38
45922- 38 38 38 38 38 38 26 26 26 2 2 6
45923- 2 2 6 6 6 6 70 47 6 137 92 6
45924-175 118 6 200 144 11 219 162 10 230 174 11
45925-234 174 13 230 174 11 219 162 10 210 150 10
45926-192 133 9 163 110 8 124 112 88 82 82 82
45927- 50 50 50 30 30 30 14 14 14 6 6 6
45928- 0 0 0 0 0 0 0 0 0 0 0 0
45929- 0 0 0 0 0 0 0 0 0 0 0 0
45930- 0 0 0 0 0 0 0 0 0 0 0 0
45931- 0 0 0 0 0 0 0 0 0 0 0 0
45932- 0 0 0 0 0 0 0 0 0 0 0 0
45933- 6 6 6 14 14 14 22 22 22 34 34 34
45934- 42 42 42 58 58 58 74 74 74 86 86 86
45935-101 98 89 122 102 70 130 98 46 121 87 25
45936-137 92 6 152 99 6 163 110 8 180 123 7
45937-185 133 11 197 138 11 206 145 10 200 144 11
45938-180 123 7 156 107 11 130 83 6 104 69 6
45939- 50 34 6 54 54 54 110 110 110 101 98 89
45940- 86 86 86 82 82 82 78 78 78 78 78 78
45941- 78 78 78 78 78 78 78 78 78 78 78 78
45942- 78 78 78 82 82 82 86 86 86 94 94 94
45943-106 106 106 101 101 101 86 66 34 124 80 6
45944-156 107 11 180 123 7 192 133 9 200 144 11
45945-206 145 10 200 144 11 192 133 9 175 118 6
45946-139 102 15 109 106 95 70 70 70 42 42 42
45947- 22 22 22 10 10 10 0 0 0 0 0 0
45948- 0 0 0 0 0 0 0 0 0 0 0 0
45949- 0 0 0 0 0 0 0 0 0 0 0 0
45950- 0 0 0 0 0 0 0 0 0 0 0 0
45951- 0 0 0 0 0 0 0 0 0 0 0 0
45952- 0 0 0 0 0 0 0 0 0 0 0 0
45953- 0 0 0 0 0 0 6 6 6 10 10 10
45954- 14 14 14 22 22 22 30 30 30 38 38 38
45955- 50 50 50 62 62 62 74 74 74 90 90 90
45956-101 98 89 112 100 78 121 87 25 124 80 6
45957-137 92 6 152 99 6 152 99 6 152 99 6
45958-138 86 6 124 80 6 98 70 6 86 66 30
45959-101 98 89 82 82 82 58 58 58 46 46 46
45960- 38 38 38 34 34 34 34 34 34 34 34 34
45961- 34 34 34 34 34 34 34 34 34 34 34 34
45962- 34 34 34 34 34 34 38 38 38 42 42 42
45963- 54 54 54 82 82 82 94 86 76 91 60 6
45964-134 86 6 156 107 11 167 114 7 175 118 6
45965-175 118 6 167 114 7 152 99 6 121 87 25
45966-101 98 89 62 62 62 34 34 34 18 18 18
45967- 6 6 6 0 0 0 0 0 0 0 0 0
45968- 0 0 0 0 0 0 0 0 0 0 0 0
45969- 0 0 0 0 0 0 0 0 0 0 0 0
45970- 0 0 0 0 0 0 0 0 0 0 0 0
45971- 0 0 0 0 0 0 0 0 0 0 0 0
45972- 0 0 0 0 0 0 0 0 0 0 0 0
45973- 0 0 0 0 0 0 0 0 0 0 0 0
45974- 0 0 0 6 6 6 6 6 6 10 10 10
45975- 18 18 18 22 22 22 30 30 30 42 42 42
45976- 50 50 50 66 66 66 86 86 86 101 98 89
45977-106 86 58 98 70 6 104 69 6 104 69 6
45978-104 69 6 91 60 6 82 62 34 90 90 90
45979- 62 62 62 38 38 38 22 22 22 14 14 14
45980- 10 10 10 10 10 10 10 10 10 10 10 10
45981- 10 10 10 10 10 10 6 6 6 10 10 10
45982- 10 10 10 10 10 10 10 10 10 14 14 14
45983- 22 22 22 42 42 42 70 70 70 89 81 66
45984- 80 54 7 104 69 6 124 80 6 137 92 6
45985-134 86 6 116 81 8 100 82 52 86 86 86
45986- 58 58 58 30 30 30 14 14 14 6 6 6
45987- 0 0 0 0 0 0 0 0 0 0 0 0
45988- 0 0 0 0 0 0 0 0 0 0 0 0
45989- 0 0 0 0 0 0 0 0 0 0 0 0
45990- 0 0 0 0 0 0 0 0 0 0 0 0
45991- 0 0 0 0 0 0 0 0 0 0 0 0
45992- 0 0 0 0 0 0 0 0 0 0 0 0
45993- 0 0 0 0 0 0 0 0 0 0 0 0
45994- 0 0 0 0 0 0 0 0 0 0 0 0
45995- 0 0 0 6 6 6 10 10 10 14 14 14
45996- 18 18 18 26 26 26 38 38 38 54 54 54
45997- 70 70 70 86 86 86 94 86 76 89 81 66
45998- 89 81 66 86 86 86 74 74 74 50 50 50
45999- 30 30 30 14 14 14 6 6 6 0 0 0
46000- 0 0 0 0 0 0 0 0 0 0 0 0
46001- 0 0 0 0 0 0 0 0 0 0 0 0
46002- 0 0 0 0 0 0 0 0 0 0 0 0
46003- 6 6 6 18 18 18 34 34 34 58 58 58
46004- 82 82 82 89 81 66 89 81 66 89 81 66
46005- 94 86 66 94 86 76 74 74 74 50 50 50
46006- 26 26 26 14 14 14 6 6 6 0 0 0
46007- 0 0 0 0 0 0 0 0 0 0 0 0
46008- 0 0 0 0 0 0 0 0 0 0 0 0
46009- 0 0 0 0 0 0 0 0 0 0 0 0
46010- 0 0 0 0 0 0 0 0 0 0 0 0
46011- 0 0 0 0 0 0 0 0 0 0 0 0
46012- 0 0 0 0 0 0 0 0 0 0 0 0
46013- 0 0 0 0 0 0 0 0 0 0 0 0
46014- 0 0 0 0 0 0 0 0 0 0 0 0
46015- 0 0 0 0 0 0 0 0 0 0 0 0
46016- 6 6 6 6 6 6 14 14 14 18 18 18
46017- 30 30 30 38 38 38 46 46 46 54 54 54
46018- 50 50 50 42 42 42 30 30 30 18 18 18
46019- 10 10 10 0 0 0 0 0 0 0 0 0
46020- 0 0 0 0 0 0 0 0 0 0 0 0
46021- 0 0 0 0 0 0 0 0 0 0 0 0
46022- 0 0 0 0 0 0 0 0 0 0 0 0
46023- 0 0 0 6 6 6 14 14 14 26 26 26
46024- 38 38 38 50 50 50 58 58 58 58 58 58
46025- 54 54 54 42 42 42 30 30 30 18 18 18
46026- 10 10 10 0 0 0 0 0 0 0 0 0
46027- 0 0 0 0 0 0 0 0 0 0 0 0
46028- 0 0 0 0 0 0 0 0 0 0 0 0
46029- 0 0 0 0 0 0 0 0 0 0 0 0
46030- 0 0 0 0 0 0 0 0 0 0 0 0
46031- 0 0 0 0 0 0 0 0 0 0 0 0
46032- 0 0 0 0 0 0 0 0 0 0 0 0
46033- 0 0 0 0 0 0 0 0 0 0 0 0
46034- 0 0 0 0 0 0 0 0 0 0 0 0
46035- 0 0 0 0 0 0 0 0 0 0 0 0
46036- 0 0 0 0 0 0 0 0 0 6 6 6
46037- 6 6 6 10 10 10 14 14 14 18 18 18
46038- 18 18 18 14 14 14 10 10 10 6 6 6
46039- 0 0 0 0 0 0 0 0 0 0 0 0
46040- 0 0 0 0 0 0 0 0 0 0 0 0
46041- 0 0 0 0 0 0 0 0 0 0 0 0
46042- 0 0 0 0 0 0 0 0 0 0 0 0
46043- 0 0 0 0 0 0 0 0 0 6 6 6
46044- 14 14 14 18 18 18 22 22 22 22 22 22
46045- 18 18 18 14 14 14 10 10 10 6 6 6
46046- 0 0 0 0 0 0 0 0 0 0 0 0
46047- 0 0 0 0 0 0 0 0 0 0 0 0
46048- 0 0 0 0 0 0 0 0 0 0 0 0
46049- 0 0 0 0 0 0 0 0 0 0 0 0
46050- 0 0 0 0 0 0 0 0 0 0 0 0
46051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46064+4 4 4 4 4 4
46065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46078+4 4 4 4 4 4
46079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46092+4 4 4 4 4 4
46093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46106+4 4 4 4 4 4
46107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46120+4 4 4 4 4 4
46121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46134+4 4 4 4 4 4
46135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46139+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
46140+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
46141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46144+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
46145+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46146+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
46147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46148+4 4 4 4 4 4
46149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46153+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
46154+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
46155+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46158+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
46159+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
46160+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
46161+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46162+4 4 4 4 4 4
46163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46167+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
46168+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
46169+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46172+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
46173+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
46174+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
46175+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
46176+4 4 4 4 4 4
46177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46180+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
46181+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
46182+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
46183+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
46184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46185+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46186+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
46187+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
46188+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
46189+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
46190+4 4 4 4 4 4
46191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46194+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
46195+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
46196+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
46197+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
46198+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46199+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
46200+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
46201+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
46202+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
46203+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
46204+4 4 4 4 4 4
46205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46208+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
46209+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
46210+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
46211+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
46212+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46213+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
46214+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
46215+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
46216+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
46217+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
46218+4 4 4 4 4 4
46219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46221+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
46222+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
46223+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
46224+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
46225+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
46226+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
46227+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
46228+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
46229+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
46230+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
46231+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
46232+4 4 4 4 4 4
46233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46235+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
46236+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
46237+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
46238+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
46239+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
46240+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
46241+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
46242+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
46243+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
46244+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
46245+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
46246+4 4 4 4 4 4
46247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46249+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
46250+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
46251+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
46252+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
46253+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
46254+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
46255+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
46256+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
46257+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
46258+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
46259+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46260+4 4 4 4 4 4
46261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46263+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
46264+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
46265+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
46266+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
46267+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
46268+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
46269+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
46270+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
46271+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
46272+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
46273+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
46274+4 4 4 4 4 4
46275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46276+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
46277+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
46278+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
46279+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
46280+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
46281+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
46282+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
46283+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
46284+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
46285+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
46286+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
46287+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
46288+4 4 4 4 4 4
46289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46290+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
46291+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
46292+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
46293+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46294+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
46295+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
46296+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
46297+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
46298+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
46299+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
46300+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
46301+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
46302+0 0 0 4 4 4
46303+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46304+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
46305+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
46306+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
46307+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
46308+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
46309+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
46310+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
46311+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
46312+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
46313+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
46314+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
46315+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
46316+2 0 0 0 0 0
46317+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
46318+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
46319+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
46320+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
46321+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
46322+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
46323+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
46324+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
46325+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
46326+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
46327+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
46328+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
46329+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
46330+37 38 37 0 0 0
46331+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
46332+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
46333+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
46334+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
46335+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
46336+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
46337+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
46338+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
46339+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
46340+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
46341+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
46342+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
46343+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
46344+85 115 134 4 0 0
46345+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
46346+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
46347+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
46348+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
46349+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
46350+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
46351+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
46352+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
46353+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
46354+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
46355+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
46356+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
46357+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
46358+60 73 81 4 0 0
46359+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
46360+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
46361+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
46362+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
46363+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
46364+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
46365+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
46366+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
46367+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
46368+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
46369+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
46370+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
46371+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
46372+16 19 21 4 0 0
46373+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
46374+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
46375+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
46376+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
46377+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
46378+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
46379+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
46380+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
46381+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
46382+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
46383+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
46384+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
46385+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
46386+4 0 0 4 3 3
46387+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
46388+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
46389+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
46390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
46391+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
46392+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
46393+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
46394+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
46395+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
46396+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
46397+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
46398+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
46399+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
46400+3 2 2 4 4 4
46401+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
46402+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
46403+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
46404+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
46405+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
46406+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
46407+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
46408+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
46409+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
46410+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
46411+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
46412+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
46413+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
46414+4 4 4 4 4 4
46415+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
46416+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
46417+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
46418+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
46419+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
46420+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
46421+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
46422+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
46423+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
46424+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
46425+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
46426+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
46427+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
46428+4 4 4 4 4 4
46429+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
46430+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
46431+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
46432+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
46433+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
46434+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
46435+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
46436+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
46437+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
46438+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
46439+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
46440+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
46441+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
46442+5 5 5 5 5 5
46443+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
46444+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
46445+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
46446+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
46447+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
46448+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46449+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
46450+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
46451+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
46452+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
46453+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
46454+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
46455+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
46456+5 5 5 4 4 4
46457+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
46458+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
46459+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
46460+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
46461+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46462+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
46463+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
46464+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
46465+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
46466+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
46467+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
46468+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
46469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46470+4 4 4 4 4 4
46471+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
46472+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
46473+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
46474+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
46475+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
46476+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46477+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46478+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
46479+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
46480+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
46481+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
46482+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
46483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46484+4 4 4 4 4 4
46485+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
46486+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
46487+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
46488+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
46489+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46490+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
46491+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
46492+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
46493+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
46494+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
46495+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
46496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46498+4 4 4 4 4 4
46499+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
46500+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
46501+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
46502+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
46503+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46504+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46505+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46506+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
46507+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
46508+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
46509+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
46510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46512+4 4 4 4 4 4
46513+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
46514+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
46515+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
46516+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
46517+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46518+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
46519+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
46520+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
46521+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
46522+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
46523+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46526+4 4 4 4 4 4
46527+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
46528+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
46529+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
46530+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
46531+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46532+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
46533+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
46534+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
46535+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
46536+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
46537+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
46538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46540+4 4 4 4 4 4
46541+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
46542+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
46543+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
46544+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
46545+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46546+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
46547+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
46548+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
46549+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
46550+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
46551+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
46552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46554+4 4 4 4 4 4
46555+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
46556+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
46557+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
46558+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
46559+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
46560+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
46561+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
46562+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
46563+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
46564+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
46565+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46568+4 4 4 4 4 4
46569+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
46570+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
46571+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
46572+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
46573+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46574+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
46575+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
46576+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
46577+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
46578+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
46579+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46582+4 4 4 4 4 4
46583+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
46584+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
46585+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
46586+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
46587+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46588+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
46589+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
46590+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
46591+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
46592+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
46593+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46596+4 4 4 4 4 4
46597+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
46598+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
46599+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
46600+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
46601+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46602+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
46603+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
46604+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
46605+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
46606+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46607+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46610+4 4 4 4 4 4
46611+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
46612+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
46613+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
46614+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
46615+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
46616+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
46617+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
46618+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
46619+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46620+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46621+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46624+4 4 4 4 4 4
46625+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
46626+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
46627+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
46628+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
46629+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46630+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
46631+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
46632+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
46633+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46634+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46635+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46638+4 4 4 4 4 4
46639+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
46640+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
46641+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
46642+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
46643+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
46644+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
46645+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
46646+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
46647+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46648+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46649+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46652+4 4 4 4 4 4
46653+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
46654+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
46655+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46656+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
46657+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
46658+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
46659+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
46660+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
46661+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
46662+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46663+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46666+4 4 4 4 4 4
46667+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
46668+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
46669+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
46670+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
46671+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
46672+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
46673+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
46674+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
46675+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46676+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46677+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46680+4 4 4 4 4 4
46681+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
46682+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
46683+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46684+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
46685+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
46686+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
46687+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
46688+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
46689+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
46690+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46691+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46694+4 4 4 4 4 4
46695+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
46696+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
46697+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
46698+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
46699+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
46700+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
46701+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
46702+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
46703+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46704+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46705+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46708+4 4 4 4 4 4
46709+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46710+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
46711+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46712+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
46713+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
46714+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
46715+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
46716+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
46717+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46718+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46719+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46722+4 4 4 4 4 4
46723+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
46724+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
46725+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
46726+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
46727+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
46728+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
46729+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46730+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
46731+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46732+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46733+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46736+4 4 4 4 4 4
46737+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46738+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
46739+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
46740+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
46741+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
46742+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
46743+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46744+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
46745+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46746+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46747+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46750+4 4 4 4 4 4
46751+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
46752+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
46753+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
46754+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
46755+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
46756+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
46757+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
46758+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
46759+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
46760+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46761+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46764+4 4 4 4 4 4
46765+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46766+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
46767+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
46768+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
46769+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
46770+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
46771+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
46772+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
46773+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
46774+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46775+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46778+4 4 4 4 4 4
46779+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
46780+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
46781+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
46782+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
46783+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
46784+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
46785+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
46786+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
46787+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
46788+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46789+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46792+4 4 4 4 4 4
46793+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46794+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
46795+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
46796+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
46797+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
46798+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
46799+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
46800+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
46801+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
46802+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46803+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46806+4 4 4 4 4 4
46807+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
46808+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
46809+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
46810+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
46811+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
46812+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
46813+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
46814+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
46815+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
46816+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
46817+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46820+4 4 4 4 4 4
46821+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
46822+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
46823+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
46824+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
46825+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
46826+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
46827+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
46828+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
46829+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
46830+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
46831+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46834+4 4 4 4 4 4
46835+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
46836+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
46837+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
46838+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
46839+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
46840+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
46841+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46842+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
46843+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
46844+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
46845+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46848+4 4 4 4 4 4
46849+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
46850+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
46851+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
46852+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
46853+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
46854+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
46855+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
46856+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
46857+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
46858+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
46859+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46862+4 4 4 4 4 4
46863+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
46864+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
46865+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
46866+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
46867+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
46868+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
46869+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
46870+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
46871+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
46872+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
46873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46876+4 4 4 4 4 4
46877+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46878+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
46879+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
46880+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
46881+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
46882+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
46883+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
46884+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
46885+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
46886+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
46887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46890+4 4 4 4 4 4
46891+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
46892+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
46893+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
46894+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
46895+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
46896+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
46897+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
46898+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
46899+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
46900+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
46901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46904+4 4 4 4 4 4
46905+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
46906+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
46907+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
46908+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
46909+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
46910+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
46911+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
46912+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
46913+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46914+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46918+4 4 4 4 4 4
46919+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
46920+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46921+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
46922+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
46923+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
46924+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
46925+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
46926+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
46927+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
46928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46932+4 4 4 4 4 4
46933+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
46934+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
46935+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
46936+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
46937+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
46938+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
46939+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
46940+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
46941+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
46942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46946+4 4 4 4 4 4
46947+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46948+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
46949+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
46950+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
46951+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
46952+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
46953+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
46954+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
46955+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46960+4 4 4 4 4 4
46961+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
46962+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
46963+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46964+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
46965+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
46966+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
46967+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
46968+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
46969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46974+4 4 4 4 4 4
46975+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46976+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
46977+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
46978+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
46979+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
46980+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
46981+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
46982+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
46983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46988+4 4 4 4 4 4
46989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46990+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
46991+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46992+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
46993+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
46994+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
46995+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
46996+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
46997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47002+4 4 4 4 4 4
47003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47004+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
47005+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
47006+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
47007+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
47008+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
47009+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
47010+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
47011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47016+4 4 4 4 4 4
47017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47018+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47019+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
47020+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47021+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
47022+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
47023+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
47024+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47030+4 4 4 4 4 4
47031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47033+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47034+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
47035+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
47036+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
47037+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
47038+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47044+4 4 4 4 4 4
47045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47048+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47049+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
47050+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
47051+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
47052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47058+4 4 4 4 4 4
47059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47062+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47063+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47064+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
47065+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
47066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47072+4 4 4 4 4 4
47073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47076+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47077+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47078+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47079+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
47080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47086+4 4 4 4 4 4
47087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47090+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
47091+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
47092+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
47093+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
47094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47100+4 4 4 4 4 4
47101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47105+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
47106+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47107+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47114+4 4 4 4 4 4
47115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47119+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
47120+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
47121+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
47122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47128+4 4 4 4 4 4
47129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47133+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
47134+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
47135+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47142+4 4 4 4 4 4
47143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47147+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
47148+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
47149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47156+4 4 4 4 4 4
47157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47161+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47162+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
47163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47170+4 4 4 4 4 4
47171diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
47172index fe92eed..106e085 100644
47173--- a/drivers/video/mb862xx/mb862xxfb_accel.c
47174+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
47175@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
47176 struct mb862xxfb_par *par = info->par;
47177
47178 if (info->var.bits_per_pixel == 32) {
47179- info->fbops->fb_fillrect = cfb_fillrect;
47180- info->fbops->fb_copyarea = cfb_copyarea;
47181- info->fbops->fb_imageblit = cfb_imageblit;
47182+ pax_open_kernel();
47183+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47184+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47185+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47186+ pax_close_kernel();
47187 } else {
47188 outreg(disp, GC_L0EM, 3);
47189- info->fbops->fb_fillrect = mb86290fb_fillrect;
47190- info->fbops->fb_copyarea = mb86290fb_copyarea;
47191- info->fbops->fb_imageblit = mb86290fb_imageblit;
47192+ pax_open_kernel();
47193+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
47194+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
47195+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
47196+ pax_close_kernel();
47197 }
47198 outreg(draw, GDC_REG_DRAW_BASE, 0);
47199 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
47200diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
47201index ff22871..b129bed 100644
47202--- a/drivers/video/nvidia/nvidia.c
47203+++ b/drivers/video/nvidia/nvidia.c
47204@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
47205 info->fix.line_length = (info->var.xres_virtual *
47206 info->var.bits_per_pixel) >> 3;
47207 if (info->var.accel_flags) {
47208- info->fbops->fb_imageblit = nvidiafb_imageblit;
47209- info->fbops->fb_fillrect = nvidiafb_fillrect;
47210- info->fbops->fb_copyarea = nvidiafb_copyarea;
47211- info->fbops->fb_sync = nvidiafb_sync;
47212+ pax_open_kernel();
47213+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
47214+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
47215+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
47216+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
47217+ pax_close_kernel();
47218 info->pixmap.scan_align = 4;
47219 info->flags &= ~FBINFO_HWACCEL_DISABLED;
47220 info->flags |= FBINFO_READS_FAST;
47221 NVResetGraphics(info);
47222 } else {
47223- info->fbops->fb_imageblit = cfb_imageblit;
47224- info->fbops->fb_fillrect = cfb_fillrect;
47225- info->fbops->fb_copyarea = cfb_copyarea;
47226- info->fbops->fb_sync = NULL;
47227+ pax_open_kernel();
47228+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47229+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47230+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47231+ *(void **)&info->fbops->fb_sync = NULL;
47232+ pax_close_kernel();
47233 info->pixmap.scan_align = 1;
47234 info->flags |= FBINFO_HWACCEL_DISABLED;
47235 info->flags &= ~FBINFO_READS_FAST;
47236@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
47237 info->pixmap.size = 8 * 1024;
47238 info->pixmap.flags = FB_PIXMAP_SYSTEM;
47239
47240- if (!hwcur)
47241- info->fbops->fb_cursor = NULL;
47242+ if (!hwcur) {
47243+ pax_open_kernel();
47244+ *(void **)&info->fbops->fb_cursor = NULL;
47245+ pax_close_kernel();
47246+ }
47247
47248 info->var.accel_flags = (!noaccel);
47249
47250diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
47251index 76d9053..dec2bfd 100644
47252--- a/drivers/video/s1d13xxxfb.c
47253+++ b/drivers/video/s1d13xxxfb.c
47254@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
47255
47256 switch(prod_id) {
47257 case S1D13506_PROD_ID: /* activate acceleration */
47258- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47259- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47260+ pax_open_kernel();
47261+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47262+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47263+ pax_close_kernel();
47264 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
47265 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
47266 break;
47267diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
47268index 97bd662..39fab85 100644
47269--- a/drivers/video/smscufx.c
47270+++ b/drivers/video/smscufx.c
47271@@ -1171,7 +1171,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
47272 fb_deferred_io_cleanup(info);
47273 kfree(info->fbdefio);
47274 info->fbdefio = NULL;
47275- info->fbops->fb_mmap = ufx_ops_mmap;
47276+ pax_open_kernel();
47277+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
47278+ pax_close_kernel();
47279 }
47280
47281 pr_debug("released /dev/fb%d user=%d count=%d",
47282diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
47283index 86d449e..8e04dc5 100644
47284--- a/drivers/video/udlfb.c
47285+++ b/drivers/video/udlfb.c
47286@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
47287 dlfb_urb_completion(urb);
47288
47289 error:
47290- atomic_add(bytes_sent, &dev->bytes_sent);
47291- atomic_add(bytes_identical, &dev->bytes_identical);
47292- atomic_add(width*height*2, &dev->bytes_rendered);
47293+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47294+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47295+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
47296 end_cycles = get_cycles();
47297- atomic_add(((unsigned int) ((end_cycles - start_cycles)
47298+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47299 >> 10)), /* Kcycles */
47300 &dev->cpu_kcycles_used);
47301
47302@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
47303 dlfb_urb_completion(urb);
47304
47305 error:
47306- atomic_add(bytes_sent, &dev->bytes_sent);
47307- atomic_add(bytes_identical, &dev->bytes_identical);
47308- atomic_add(bytes_rendered, &dev->bytes_rendered);
47309+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47310+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47311+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
47312 end_cycles = get_cycles();
47313- atomic_add(((unsigned int) ((end_cycles - start_cycles)
47314+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47315 >> 10)), /* Kcycles */
47316 &dev->cpu_kcycles_used);
47317 }
47318@@ -989,7 +989,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
47319 fb_deferred_io_cleanup(info);
47320 kfree(info->fbdefio);
47321 info->fbdefio = NULL;
47322- info->fbops->fb_mmap = dlfb_ops_mmap;
47323+ pax_open_kernel();
47324+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
47325+ pax_close_kernel();
47326 }
47327
47328 pr_warn("released /dev/fb%d user=%d count=%d\n",
47329@@ -1372,7 +1374,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
47330 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47331 struct dlfb_data *dev = fb_info->par;
47332 return snprintf(buf, PAGE_SIZE, "%u\n",
47333- atomic_read(&dev->bytes_rendered));
47334+ atomic_read_unchecked(&dev->bytes_rendered));
47335 }
47336
47337 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47338@@ -1380,7 +1382,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47339 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47340 struct dlfb_data *dev = fb_info->par;
47341 return snprintf(buf, PAGE_SIZE, "%u\n",
47342- atomic_read(&dev->bytes_identical));
47343+ atomic_read_unchecked(&dev->bytes_identical));
47344 }
47345
47346 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47347@@ -1388,7 +1390,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47348 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47349 struct dlfb_data *dev = fb_info->par;
47350 return snprintf(buf, PAGE_SIZE, "%u\n",
47351- atomic_read(&dev->bytes_sent));
47352+ atomic_read_unchecked(&dev->bytes_sent));
47353 }
47354
47355 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47356@@ -1396,7 +1398,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47357 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47358 struct dlfb_data *dev = fb_info->par;
47359 return snprintf(buf, PAGE_SIZE, "%u\n",
47360- atomic_read(&dev->cpu_kcycles_used));
47361+ atomic_read_unchecked(&dev->cpu_kcycles_used));
47362 }
47363
47364 static ssize_t edid_show(
47365@@ -1456,10 +1458,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
47366 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47367 struct dlfb_data *dev = fb_info->par;
47368
47369- atomic_set(&dev->bytes_rendered, 0);
47370- atomic_set(&dev->bytes_identical, 0);
47371- atomic_set(&dev->bytes_sent, 0);
47372- atomic_set(&dev->cpu_kcycles_used, 0);
47373+ atomic_set_unchecked(&dev->bytes_rendered, 0);
47374+ atomic_set_unchecked(&dev->bytes_identical, 0);
47375+ atomic_set_unchecked(&dev->bytes_sent, 0);
47376+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
47377
47378 return count;
47379 }
47380diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
47381index b75db01..ad2f34a 100644
47382--- a/drivers/video/uvesafb.c
47383+++ b/drivers/video/uvesafb.c
47384@@ -19,6 +19,7 @@
47385 #include <linux/io.h>
47386 #include <linux/mutex.h>
47387 #include <linux/slab.h>
47388+#include <linux/moduleloader.h>
47389 #include <video/edid.h>
47390 #include <video/uvesafb.h>
47391 #ifdef CONFIG_X86
47392@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
47393 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
47394 par->pmi_setpal = par->ypan = 0;
47395 } else {
47396+
47397+#ifdef CONFIG_PAX_KERNEXEC
47398+#ifdef CONFIG_MODULES
47399+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
47400+#endif
47401+ if (!par->pmi_code) {
47402+ par->pmi_setpal = par->ypan = 0;
47403+ return 0;
47404+ }
47405+#endif
47406+
47407 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
47408 + task->t.regs.edi);
47409+
47410+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47411+ pax_open_kernel();
47412+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
47413+ pax_close_kernel();
47414+
47415+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
47416+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
47417+#else
47418 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
47419 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
47420+#endif
47421+
47422 printk(KERN_INFO "uvesafb: protected mode interface info at "
47423 "%04x:%04x\n",
47424 (u16)task->t.regs.es, (u16)task->t.regs.edi);
47425@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
47426 par->ypan = ypan;
47427
47428 if (par->pmi_setpal || par->ypan) {
47429+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
47430 if (__supported_pte_mask & _PAGE_NX) {
47431 par->pmi_setpal = par->ypan = 0;
47432 printk(KERN_WARNING "uvesafb: NX protection is actively."
47433 "We have better not to use the PMI.\n");
47434- } else {
47435+ } else
47436+#endif
47437 uvesafb_vbe_getpmi(task, par);
47438- }
47439 }
47440 #else
47441 /* The protected mode interface is not available on non-x86. */
47442@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
47443 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
47444
47445 /* Disable blanking if the user requested so. */
47446- if (!blank)
47447- info->fbops->fb_blank = NULL;
47448+ if (!blank) {
47449+ pax_open_kernel();
47450+ *(void **)&info->fbops->fb_blank = NULL;
47451+ pax_close_kernel();
47452+ }
47453
47454 /*
47455 * Find out how much IO memory is required for the mode with
47456@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
47457 info->flags = FBINFO_FLAG_DEFAULT |
47458 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
47459
47460- if (!par->ypan)
47461- info->fbops->fb_pan_display = NULL;
47462+ if (!par->ypan) {
47463+ pax_open_kernel();
47464+ *(void **)&info->fbops->fb_pan_display = NULL;
47465+ pax_close_kernel();
47466+ }
47467 }
47468
47469 static void uvesafb_init_mtrr(struct fb_info *info)
47470@@ -1836,6 +1866,11 @@ out:
47471 if (par->vbe_modes)
47472 kfree(par->vbe_modes);
47473
47474+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47475+ if (par->pmi_code)
47476+ module_free_exec(NULL, par->pmi_code);
47477+#endif
47478+
47479 framebuffer_release(info);
47480 return err;
47481 }
47482@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
47483 kfree(par->vbe_state_orig);
47484 if (par->vbe_state_saved)
47485 kfree(par->vbe_state_saved);
47486+
47487+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47488+ if (par->pmi_code)
47489+ module_free_exec(NULL, par->pmi_code);
47490+#endif
47491+
47492 }
47493
47494 framebuffer_release(info);
47495diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
47496index 501b340..d80aa17 100644
47497--- a/drivers/video/vesafb.c
47498+++ b/drivers/video/vesafb.c
47499@@ -9,6 +9,7 @@
47500 */
47501
47502 #include <linux/module.h>
47503+#include <linux/moduleloader.h>
47504 #include <linux/kernel.h>
47505 #include <linux/errno.h>
47506 #include <linux/string.h>
47507@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
47508 static int vram_total __initdata; /* Set total amount of memory */
47509 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
47510 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
47511-static void (*pmi_start)(void) __read_mostly;
47512-static void (*pmi_pal) (void) __read_mostly;
47513+static void (*pmi_start)(void) __read_only;
47514+static void (*pmi_pal) (void) __read_only;
47515 static int depth __read_mostly;
47516 static int vga_compat __read_mostly;
47517 /* --------------------------------------------------------------------- */
47518@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
47519 unsigned int size_vmode;
47520 unsigned int size_remap;
47521 unsigned int size_total;
47522+ void *pmi_code = NULL;
47523
47524 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
47525 return -ENODEV;
47526@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
47527 size_remap = size_total;
47528 vesafb_fix.smem_len = size_remap;
47529
47530-#ifndef __i386__
47531- screen_info.vesapm_seg = 0;
47532-#endif
47533-
47534 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
47535 printk(KERN_WARNING
47536 "vesafb: cannot reserve video memory at 0x%lx\n",
47537@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
47538 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
47539 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
47540
47541+#ifdef __i386__
47542+
47543+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47544+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
47545+ if (!pmi_code)
47546+#elif !defined(CONFIG_PAX_KERNEXEC)
47547+ if (0)
47548+#endif
47549+
47550+#endif
47551+ screen_info.vesapm_seg = 0;
47552+
47553 if (screen_info.vesapm_seg) {
47554- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
47555- screen_info.vesapm_seg,screen_info.vesapm_off);
47556+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
47557+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
47558 }
47559
47560 if (screen_info.vesapm_seg < 0xc000)
47561@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
47562
47563 if (ypan || pmi_setpal) {
47564 unsigned short *pmi_base;
47565+
47566 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
47567- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
47568- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
47569+
47570+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47571+ pax_open_kernel();
47572+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
47573+#else
47574+ pmi_code = pmi_base;
47575+#endif
47576+
47577+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
47578+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
47579+
47580+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47581+ pmi_start = ktva_ktla(pmi_start);
47582+ pmi_pal = ktva_ktla(pmi_pal);
47583+ pax_close_kernel();
47584+#endif
47585+
47586 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
47587 if (pmi_base[3]) {
47588 printk(KERN_INFO "vesafb: pmi: ports = ");
47589@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
47590 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
47591 (ypan ? FBINFO_HWACCEL_YPAN : 0);
47592
47593- if (!ypan)
47594- info->fbops->fb_pan_display = NULL;
47595+ if (!ypan) {
47596+ pax_open_kernel();
47597+ *(void **)&info->fbops->fb_pan_display = NULL;
47598+ pax_close_kernel();
47599+ }
47600
47601 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
47602 err = -ENOMEM;
47603@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
47604 info->node, info->fix.id);
47605 return 0;
47606 err:
47607+
47608+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47609+ module_free_exec(NULL, pmi_code);
47610+#endif
47611+
47612 if (info->screen_base)
47613 iounmap(info->screen_base);
47614 framebuffer_release(info);
47615diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
47616index 88714ae..16c2e11 100644
47617--- a/drivers/video/via/via_clock.h
47618+++ b/drivers/video/via/via_clock.h
47619@@ -56,7 +56,7 @@ struct via_clock {
47620
47621 void (*set_engine_pll_state)(u8 state);
47622 void (*set_engine_pll)(struct via_pll_config config);
47623-};
47624+} __no_const;
47625
47626
47627 static inline u32 get_pll_internal_frequency(u32 ref_freq,
47628diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
47629index fef20db..d28b1ab 100644
47630--- a/drivers/xen/xenfs/xenstored.c
47631+++ b/drivers/xen/xenfs/xenstored.c
47632@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
47633 static int xsd_kva_open(struct inode *inode, struct file *file)
47634 {
47635 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
47636+#ifdef CONFIG_GRKERNSEC_HIDESYM
47637+ NULL);
47638+#else
47639 xen_store_interface);
47640+#endif
47641+
47642 if (!file->private_data)
47643 return -ENOMEM;
47644 return 0;
47645diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
47646index 890bed5..17ae73e 100644
47647--- a/fs/9p/vfs_inode.c
47648+++ b/fs/9p/vfs_inode.c
47649@@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47650 void
47651 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47652 {
47653- char *s = nd_get_link(nd);
47654+ const char *s = nd_get_link(nd);
47655
47656 p9_debug(P9_DEBUG_VFS, " %s %s\n",
47657 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
47658diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
47659index 0efd152..b5802ad 100644
47660--- a/fs/Kconfig.binfmt
47661+++ b/fs/Kconfig.binfmt
47662@@ -89,7 +89,7 @@ config HAVE_AOUT
47663
47664 config BINFMT_AOUT
47665 tristate "Kernel support for a.out and ECOFF binaries"
47666- depends on HAVE_AOUT
47667+ depends on HAVE_AOUT && BROKEN
47668 ---help---
47669 A.out (Assembler.OUTput) is a set of formats for libraries and
47670 executables used in the earliest versions of UNIX. Linux used
47671diff --git a/fs/aio.c b/fs/aio.c
47672index 71f613c..9d01f1f 100644
47673--- a/fs/aio.c
47674+++ b/fs/aio.c
47675@@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
47676 size += sizeof(struct io_event) * nr_events;
47677 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
47678
47679- if (nr_pages < 0)
47680+ if (nr_pages <= 0)
47681 return -EINVAL;
47682
47683 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
47684@@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
47685 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
47686 {
47687 ssize_t ret;
47688+ struct iovec iovstack;
47689
47690 #ifdef CONFIG_COMPAT
47691 if (compat)
47692 ret = compat_rw_copy_check_uvector(type,
47693 (struct compat_iovec __user *)kiocb->ki_buf,
47694- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
47695+ kiocb->ki_nbytes, 1, &iovstack,
47696 &kiocb->ki_iovec);
47697 else
47698 #endif
47699 ret = rw_copy_check_uvector(type,
47700 (struct iovec __user *)kiocb->ki_buf,
47701- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
47702+ kiocb->ki_nbytes, 1, &iovstack,
47703 &kiocb->ki_iovec);
47704 if (ret < 0)
47705 goto out;
47706@@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
47707 if (ret < 0)
47708 goto out;
47709
47710+ if (kiocb->ki_iovec == &iovstack) {
47711+ kiocb->ki_inline_vec = iovstack;
47712+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
47713+ }
47714 kiocb->ki_nr_segs = kiocb->ki_nbytes;
47715 kiocb->ki_cur_seg = 0;
47716 /* ki_nbytes/left now reflect bytes instead of segs */
47717diff --git a/fs/attr.c b/fs/attr.c
47718index 1449adb..a2038c2 100644
47719--- a/fs/attr.c
47720+++ b/fs/attr.c
47721@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
47722 unsigned long limit;
47723
47724 limit = rlimit(RLIMIT_FSIZE);
47725+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
47726 if (limit != RLIM_INFINITY && offset > limit)
47727 goto out_sig;
47728 if (offset > inode->i_sb->s_maxbytes)
47729diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
47730index 03bc1d3..6205356 100644
47731--- a/fs/autofs4/waitq.c
47732+++ b/fs/autofs4/waitq.c
47733@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
47734 {
47735 unsigned long sigpipe, flags;
47736 mm_segment_t fs;
47737- const char *data = (const char *)addr;
47738+ const char __user *data = (const char __force_user *)addr;
47739 ssize_t wr = 0;
47740
47741 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
47742@@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
47743 return 1;
47744 }
47745
47746+#ifdef CONFIG_GRKERNSEC_HIDESYM
47747+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
47748+#endif
47749+
47750 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
47751 enum autofs_notify notify)
47752 {
47753@@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
47754
47755 /* If this is a direct mount request create a dummy name */
47756 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
47757+#ifdef CONFIG_GRKERNSEC_HIDESYM
47758+ /* this name does get written to userland via autofs4_write() */
47759+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
47760+#else
47761 qstr.len = sprintf(name, "%p", dentry);
47762+#endif
47763 else {
47764 qstr.len = autofs4_getpath(sbi, dentry, &name);
47765 if (!qstr.len) {
47766diff --git a/fs/befs/endian.h b/fs/befs/endian.h
47767index 2722387..c8dd2a7 100644
47768--- a/fs/befs/endian.h
47769+++ b/fs/befs/endian.h
47770@@ -11,7 +11,7 @@
47771
47772 #include <asm/byteorder.h>
47773
47774-static inline u64
47775+static inline u64 __intentional_overflow(-1)
47776 fs64_to_cpu(const struct super_block *sb, fs64 n)
47777 {
47778 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
47779@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
47780 return (__force fs64)cpu_to_be64(n);
47781 }
47782
47783-static inline u32
47784+static inline u32 __intentional_overflow(-1)
47785 fs32_to_cpu(const struct super_block *sb, fs32 n)
47786 {
47787 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
47788diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
47789index 2b3bda8..6a2d4be 100644
47790--- a/fs/befs/linuxvfs.c
47791+++ b/fs/befs/linuxvfs.c
47792@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47793 {
47794 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
47795 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
47796- char *link = nd_get_link(nd);
47797+ const char *link = nd_get_link(nd);
47798 if (!IS_ERR(link))
47799 kfree(link);
47800 }
47801diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
47802index 6043567..16a9239 100644
47803--- a/fs/binfmt_aout.c
47804+++ b/fs/binfmt_aout.c
47805@@ -16,6 +16,7 @@
47806 #include <linux/string.h>
47807 #include <linux/fs.h>
47808 #include <linux/file.h>
47809+#include <linux/security.h>
47810 #include <linux/stat.h>
47811 #include <linux/fcntl.h>
47812 #include <linux/ptrace.h>
47813@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
47814 #endif
47815 # define START_STACK(u) ((void __user *)u.start_stack)
47816
47817+ memset(&dump, 0, sizeof(dump));
47818+
47819 fs = get_fs();
47820 set_fs(KERNEL_DS);
47821 has_dumped = 1;
47822@@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
47823
47824 /* If the size of the dump file exceeds the rlimit, then see what would happen
47825 if we wrote the stack, but not the data area. */
47826+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
47827 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
47828 dump.u_dsize = 0;
47829
47830 /* Make sure we have enough room to write the stack and data areas. */
47831+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
47832 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
47833 dump.u_ssize = 0;
47834
47835@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
47836 rlim = rlimit(RLIMIT_DATA);
47837 if (rlim >= RLIM_INFINITY)
47838 rlim = ~0;
47839+
47840+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
47841 if (ex.a_data + ex.a_bss > rlim)
47842 return -ENOMEM;
47843
47844@@ -268,6 +275,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
47845
47846 install_exec_creds(bprm);
47847
47848+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47849+ current->mm->pax_flags = 0UL;
47850+#endif
47851+
47852+#ifdef CONFIG_PAX_PAGEEXEC
47853+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
47854+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
47855+
47856+#ifdef CONFIG_PAX_EMUTRAMP
47857+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
47858+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
47859+#endif
47860+
47861+#ifdef CONFIG_PAX_MPROTECT
47862+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
47863+ current->mm->pax_flags |= MF_PAX_MPROTECT;
47864+#endif
47865+
47866+ }
47867+#endif
47868+
47869 if (N_MAGIC(ex) == OMAGIC) {
47870 unsigned long text_addr, map_size;
47871 loff_t pos;
47872@@ -333,7 +361,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
47873 }
47874
47875 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
47876- PROT_READ | PROT_WRITE | PROT_EXEC,
47877+ PROT_READ | PROT_WRITE,
47878 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
47879 fd_offset + ex.a_text);
47880 if (error != N_DATADDR(ex)) {
47881diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
47882index 0c42cdb..f23778b 100644
47883--- a/fs/binfmt_elf.c
47884+++ b/fs/binfmt_elf.c
47885@@ -33,6 +33,7 @@
47886 #include <linux/elf.h>
47887 #include <linux/utsname.h>
47888 #include <linux/coredump.h>
47889+#include <linux/xattr.h>
47890 #include <asm/uaccess.h>
47891 #include <asm/param.h>
47892 #include <asm/page.h>
47893@@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
47894 #define elf_core_dump NULL
47895 #endif
47896
47897+#ifdef CONFIG_PAX_MPROTECT
47898+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
47899+#endif
47900+
47901 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
47902 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
47903 #else
47904@@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
47905 .load_binary = load_elf_binary,
47906 .load_shlib = load_elf_library,
47907 .core_dump = elf_core_dump,
47908+
47909+#ifdef CONFIG_PAX_MPROTECT
47910+ .handle_mprotect= elf_handle_mprotect,
47911+#endif
47912+
47913 .min_coredump = ELF_EXEC_PAGESIZE,
47914 };
47915
47916@@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
47917
47918 static int set_brk(unsigned long start, unsigned long end)
47919 {
47920+ unsigned long e = end;
47921+
47922 start = ELF_PAGEALIGN(start);
47923 end = ELF_PAGEALIGN(end);
47924 if (end > start) {
47925@@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
47926 if (BAD_ADDR(addr))
47927 return addr;
47928 }
47929- current->mm->start_brk = current->mm->brk = end;
47930+ current->mm->start_brk = current->mm->brk = e;
47931 return 0;
47932 }
47933
47934@@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47935 elf_addr_t __user *u_rand_bytes;
47936 const char *k_platform = ELF_PLATFORM;
47937 const char *k_base_platform = ELF_BASE_PLATFORM;
47938- unsigned char k_rand_bytes[16];
47939+ u32 k_rand_bytes[4];
47940 int items;
47941 elf_addr_t *elf_info;
47942 int ei_index = 0;
47943 const struct cred *cred = current_cred();
47944 struct vm_area_struct *vma;
47945+ unsigned long saved_auxv[AT_VECTOR_SIZE];
47946
47947 /*
47948 * In some cases (e.g. Hyper-Threading), we want to avoid L1
47949@@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47950 * Generate 16 random bytes for userspace PRNG seeding.
47951 */
47952 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
47953- u_rand_bytes = (elf_addr_t __user *)
47954- STACK_ALLOC(p, sizeof(k_rand_bytes));
47955+ srandom32(k_rand_bytes[0] ^ random32());
47956+ srandom32(k_rand_bytes[1] ^ random32());
47957+ srandom32(k_rand_bytes[2] ^ random32());
47958+ srandom32(k_rand_bytes[3] ^ random32());
47959+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
47960+ u_rand_bytes = (elf_addr_t __user *) p;
47961 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
47962 return -EFAULT;
47963
47964@@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47965 return -EFAULT;
47966 current->mm->env_end = p;
47967
47968+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
47969+
47970 /* Put the elf_info on the stack in the right place. */
47971 sp = (elf_addr_t __user *)envp + 1;
47972- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
47973+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
47974 return -EFAULT;
47975 return 0;
47976 }
47977@@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
47978 an ELF header */
47979
47980 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47981- struct file *interpreter, unsigned long *interp_map_addr,
47982- unsigned long no_base)
47983+ struct file *interpreter, unsigned long no_base)
47984 {
47985 struct elf_phdr *elf_phdata;
47986 struct elf_phdr *eppnt;
47987- unsigned long load_addr = 0;
47988+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
47989 int load_addr_set = 0;
47990 unsigned long last_bss = 0, elf_bss = 0;
47991- unsigned long error = ~0UL;
47992+ unsigned long error = -EINVAL;
47993 unsigned long total_size;
47994 int retval, i, size;
47995
47996@@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47997 goto out_close;
47998 }
47999
48000+#ifdef CONFIG_PAX_SEGMEXEC
48001+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
48002+ pax_task_size = SEGMEXEC_TASK_SIZE;
48003+#endif
48004+
48005 eppnt = elf_phdata;
48006 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
48007 if (eppnt->p_type == PT_LOAD) {
48008@@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48009 map_addr = elf_map(interpreter, load_addr + vaddr,
48010 eppnt, elf_prot, elf_type, total_size);
48011 total_size = 0;
48012- if (!*interp_map_addr)
48013- *interp_map_addr = map_addr;
48014 error = map_addr;
48015 if (BAD_ADDR(map_addr))
48016 goto out_close;
48017@@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48018 k = load_addr + eppnt->p_vaddr;
48019 if (BAD_ADDR(k) ||
48020 eppnt->p_filesz > eppnt->p_memsz ||
48021- eppnt->p_memsz > TASK_SIZE ||
48022- TASK_SIZE - eppnt->p_memsz < k) {
48023+ eppnt->p_memsz > pax_task_size ||
48024+ pax_task_size - eppnt->p_memsz < k) {
48025 error = -ENOMEM;
48026 goto out_close;
48027 }
48028@@ -530,6 +551,315 @@ out:
48029 return error;
48030 }
48031
48032+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48033+#ifdef CONFIG_PAX_SOFTMODE
48034+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
48035+{
48036+ unsigned long pax_flags = 0UL;
48037+
48038+#ifdef CONFIG_PAX_PAGEEXEC
48039+ if (elf_phdata->p_flags & PF_PAGEEXEC)
48040+ pax_flags |= MF_PAX_PAGEEXEC;
48041+#endif
48042+
48043+#ifdef CONFIG_PAX_SEGMEXEC
48044+ if (elf_phdata->p_flags & PF_SEGMEXEC)
48045+ pax_flags |= MF_PAX_SEGMEXEC;
48046+#endif
48047+
48048+#ifdef CONFIG_PAX_EMUTRAMP
48049+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
48050+ pax_flags |= MF_PAX_EMUTRAMP;
48051+#endif
48052+
48053+#ifdef CONFIG_PAX_MPROTECT
48054+ if (elf_phdata->p_flags & PF_MPROTECT)
48055+ pax_flags |= MF_PAX_MPROTECT;
48056+#endif
48057+
48058+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48059+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
48060+ pax_flags |= MF_PAX_RANDMMAP;
48061+#endif
48062+
48063+ return pax_flags;
48064+}
48065+#endif
48066+
48067+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
48068+{
48069+ unsigned long pax_flags = 0UL;
48070+
48071+#ifdef CONFIG_PAX_PAGEEXEC
48072+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
48073+ pax_flags |= MF_PAX_PAGEEXEC;
48074+#endif
48075+
48076+#ifdef CONFIG_PAX_SEGMEXEC
48077+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
48078+ pax_flags |= MF_PAX_SEGMEXEC;
48079+#endif
48080+
48081+#ifdef CONFIG_PAX_EMUTRAMP
48082+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
48083+ pax_flags |= MF_PAX_EMUTRAMP;
48084+#endif
48085+
48086+#ifdef CONFIG_PAX_MPROTECT
48087+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
48088+ pax_flags |= MF_PAX_MPROTECT;
48089+#endif
48090+
48091+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48092+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
48093+ pax_flags |= MF_PAX_RANDMMAP;
48094+#endif
48095+
48096+ return pax_flags;
48097+}
48098+#endif
48099+
48100+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48101+#ifdef CONFIG_PAX_SOFTMODE
48102+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
48103+{
48104+ unsigned long pax_flags = 0UL;
48105+
48106+#ifdef CONFIG_PAX_PAGEEXEC
48107+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
48108+ pax_flags |= MF_PAX_PAGEEXEC;
48109+#endif
48110+
48111+#ifdef CONFIG_PAX_SEGMEXEC
48112+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
48113+ pax_flags |= MF_PAX_SEGMEXEC;
48114+#endif
48115+
48116+#ifdef CONFIG_PAX_EMUTRAMP
48117+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
48118+ pax_flags |= MF_PAX_EMUTRAMP;
48119+#endif
48120+
48121+#ifdef CONFIG_PAX_MPROTECT
48122+ if (pax_flags_softmode & MF_PAX_MPROTECT)
48123+ pax_flags |= MF_PAX_MPROTECT;
48124+#endif
48125+
48126+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48127+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
48128+ pax_flags |= MF_PAX_RANDMMAP;
48129+#endif
48130+
48131+ return pax_flags;
48132+}
48133+#endif
48134+
48135+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
48136+{
48137+ unsigned long pax_flags = 0UL;
48138+
48139+#ifdef CONFIG_PAX_PAGEEXEC
48140+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
48141+ pax_flags |= MF_PAX_PAGEEXEC;
48142+#endif
48143+
48144+#ifdef CONFIG_PAX_SEGMEXEC
48145+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
48146+ pax_flags |= MF_PAX_SEGMEXEC;
48147+#endif
48148+
48149+#ifdef CONFIG_PAX_EMUTRAMP
48150+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
48151+ pax_flags |= MF_PAX_EMUTRAMP;
48152+#endif
48153+
48154+#ifdef CONFIG_PAX_MPROTECT
48155+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
48156+ pax_flags |= MF_PAX_MPROTECT;
48157+#endif
48158+
48159+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48160+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
48161+ pax_flags |= MF_PAX_RANDMMAP;
48162+#endif
48163+
48164+ return pax_flags;
48165+}
48166+#endif
48167+
48168+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48169+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
48170+{
48171+ unsigned long pax_flags = 0UL;
48172+
48173+#ifdef CONFIG_PAX_EI_PAX
48174+
48175+#ifdef CONFIG_PAX_PAGEEXEC
48176+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
48177+ pax_flags |= MF_PAX_PAGEEXEC;
48178+#endif
48179+
48180+#ifdef CONFIG_PAX_SEGMEXEC
48181+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
48182+ pax_flags |= MF_PAX_SEGMEXEC;
48183+#endif
48184+
48185+#ifdef CONFIG_PAX_EMUTRAMP
48186+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
48187+ pax_flags |= MF_PAX_EMUTRAMP;
48188+#endif
48189+
48190+#ifdef CONFIG_PAX_MPROTECT
48191+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
48192+ pax_flags |= MF_PAX_MPROTECT;
48193+#endif
48194+
48195+#ifdef CONFIG_PAX_ASLR
48196+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
48197+ pax_flags |= MF_PAX_RANDMMAP;
48198+#endif
48199+
48200+#else
48201+
48202+#ifdef CONFIG_PAX_PAGEEXEC
48203+ pax_flags |= MF_PAX_PAGEEXEC;
48204+#endif
48205+
48206+#ifdef CONFIG_PAX_SEGMEXEC
48207+ pax_flags |= MF_PAX_SEGMEXEC;
48208+#endif
48209+
48210+#ifdef CONFIG_PAX_MPROTECT
48211+ pax_flags |= MF_PAX_MPROTECT;
48212+#endif
48213+
48214+#ifdef CONFIG_PAX_RANDMMAP
48215+ if (randomize_va_space)
48216+ pax_flags |= MF_PAX_RANDMMAP;
48217+#endif
48218+
48219+#endif
48220+
48221+ return pax_flags;
48222+}
48223+
48224+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
48225+{
48226+
48227+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48228+ unsigned long i;
48229+
48230+ for (i = 0UL; i < elf_ex->e_phnum; i++)
48231+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
48232+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
48233+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
48234+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
48235+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
48236+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
48237+ return ~0UL;
48238+
48239+#ifdef CONFIG_PAX_SOFTMODE
48240+ if (pax_softmode)
48241+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
48242+ else
48243+#endif
48244+
48245+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
48246+ break;
48247+ }
48248+#endif
48249+
48250+ return ~0UL;
48251+}
48252+
48253+static unsigned long pax_parse_xattr_pax(struct file * const file)
48254+{
48255+
48256+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48257+ ssize_t xattr_size, i;
48258+ unsigned char xattr_value[5];
48259+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
48260+
48261+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
48262+ if (xattr_size <= 0 || xattr_size > 5)
48263+ return ~0UL;
48264+
48265+ for (i = 0; i < xattr_size; i++)
48266+ switch (xattr_value[i]) {
48267+ default:
48268+ return ~0UL;
48269+
48270+#define parse_flag(option1, option2, flag) \
48271+ case option1: \
48272+ if (pax_flags_hardmode & MF_PAX_##flag) \
48273+ return ~0UL; \
48274+ pax_flags_hardmode |= MF_PAX_##flag; \
48275+ break; \
48276+ case option2: \
48277+ if (pax_flags_softmode & MF_PAX_##flag) \
48278+ return ~0UL; \
48279+ pax_flags_softmode |= MF_PAX_##flag; \
48280+ break;
48281+
48282+ parse_flag('p', 'P', PAGEEXEC);
48283+ parse_flag('e', 'E', EMUTRAMP);
48284+ parse_flag('m', 'M', MPROTECT);
48285+ parse_flag('r', 'R', RANDMMAP);
48286+ parse_flag('s', 'S', SEGMEXEC);
48287+
48288+#undef parse_flag
48289+ }
48290+
48291+ if (pax_flags_hardmode & pax_flags_softmode)
48292+ return ~0UL;
48293+
48294+#ifdef CONFIG_PAX_SOFTMODE
48295+ if (pax_softmode)
48296+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
48297+ else
48298+#endif
48299+
48300+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
48301+#else
48302+ return ~0UL;
48303+#endif
48304+
48305+}
48306+
48307+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
48308+{
48309+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
48310+
48311+ pax_flags = pax_parse_ei_pax(elf_ex);
48312+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
48313+ xattr_pax_flags = pax_parse_xattr_pax(file);
48314+
48315+ if (pt_pax_flags == ~0UL)
48316+ pt_pax_flags = xattr_pax_flags;
48317+ else if (xattr_pax_flags == ~0UL)
48318+ xattr_pax_flags = pt_pax_flags;
48319+ if (pt_pax_flags != xattr_pax_flags)
48320+ return -EINVAL;
48321+ if (pt_pax_flags != ~0UL)
48322+ pax_flags = pt_pax_flags;
48323+
48324+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
48325+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48326+ if ((__supported_pte_mask & _PAGE_NX))
48327+ pax_flags &= ~MF_PAX_SEGMEXEC;
48328+ else
48329+ pax_flags &= ~MF_PAX_PAGEEXEC;
48330+ }
48331+#endif
48332+
48333+ if (0 > pax_check_flags(&pax_flags))
48334+ return -EINVAL;
48335+
48336+ current->mm->pax_flags = pax_flags;
48337+ return 0;
48338+}
48339+#endif
48340+
48341 /*
48342 * These are the functions used to load ELF style executables and shared
48343 * libraries. There is no binary dependent code anywhere else.
48344@@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
48345 {
48346 unsigned int random_variable = 0;
48347
48348+#ifdef CONFIG_PAX_RANDUSTACK
48349+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
48350+ return stack_top - current->mm->delta_stack;
48351+#endif
48352+
48353 if ((current->flags & PF_RANDOMIZE) &&
48354 !(current->personality & ADDR_NO_RANDOMIZE)) {
48355 random_variable = get_random_int() & STACK_RND_MASK;
48356@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
48357 unsigned long load_addr = 0, load_bias = 0;
48358 int load_addr_set = 0;
48359 char * elf_interpreter = NULL;
48360- unsigned long error;
48361+ unsigned long error = 0;
48362 struct elf_phdr *elf_ppnt, *elf_phdata;
48363 unsigned long elf_bss, elf_brk;
48364 int retval, i;
48365@@ -574,12 +909,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
48366 unsigned long start_code, end_code, start_data, end_data;
48367 unsigned long reloc_func_desc __maybe_unused = 0;
48368 int executable_stack = EXSTACK_DEFAULT;
48369- unsigned long def_flags = 0;
48370 struct pt_regs *regs = current_pt_regs();
48371 struct {
48372 struct elfhdr elf_ex;
48373 struct elfhdr interp_elf_ex;
48374 } *loc;
48375+ unsigned long pax_task_size = TASK_SIZE;
48376
48377 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
48378 if (!loc) {
48379@@ -715,11 +1050,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
48380 goto out_free_dentry;
48381
48382 /* OK, This is the point of no return */
48383- current->mm->def_flags = def_flags;
48384+
48385+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48386+ current->mm->pax_flags = 0UL;
48387+#endif
48388+
48389+#ifdef CONFIG_PAX_DLRESOLVE
48390+ current->mm->call_dl_resolve = 0UL;
48391+#endif
48392+
48393+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
48394+ current->mm->call_syscall = 0UL;
48395+#endif
48396+
48397+#ifdef CONFIG_PAX_ASLR
48398+ current->mm->delta_mmap = 0UL;
48399+ current->mm->delta_stack = 0UL;
48400+#endif
48401+
48402+ current->mm->def_flags = 0;
48403+
48404+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48405+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
48406+ send_sig(SIGKILL, current, 0);
48407+ goto out_free_dentry;
48408+ }
48409+#endif
48410+
48411+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
48412+ pax_set_initial_flags(bprm);
48413+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
48414+ if (pax_set_initial_flags_func)
48415+ (pax_set_initial_flags_func)(bprm);
48416+#endif
48417+
48418+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48419+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
48420+ current->mm->context.user_cs_limit = PAGE_SIZE;
48421+ current->mm->def_flags |= VM_PAGEEXEC;
48422+ }
48423+#endif
48424+
48425+#ifdef CONFIG_PAX_SEGMEXEC
48426+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
48427+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
48428+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
48429+ pax_task_size = SEGMEXEC_TASK_SIZE;
48430+ current->mm->def_flags |= VM_NOHUGEPAGE;
48431+ }
48432+#endif
48433+
48434+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
48435+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48436+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
48437+ put_cpu();
48438+ }
48439+#endif
48440
48441 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
48442 may depend on the personality. */
48443 SET_PERSONALITY(loc->elf_ex);
48444+
48445+#ifdef CONFIG_PAX_ASLR
48446+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
48447+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
48448+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
48449+ }
48450+#endif
48451+
48452+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48453+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48454+ executable_stack = EXSTACK_DISABLE_X;
48455+ current->personality &= ~READ_IMPLIES_EXEC;
48456+ } else
48457+#endif
48458+
48459 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
48460 current->personality |= READ_IMPLIES_EXEC;
48461
48462@@ -810,6 +1215,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
48463 #else
48464 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
48465 #endif
48466+
48467+#ifdef CONFIG_PAX_RANDMMAP
48468+ /* PaX: randomize base address at the default exe base if requested */
48469+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
48470+#ifdef CONFIG_SPARC64
48471+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
48472+#else
48473+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
48474+#endif
48475+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
48476+ elf_flags |= MAP_FIXED;
48477+ }
48478+#endif
48479+
48480 }
48481
48482 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
48483@@ -842,9 +1261,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
48484 * allowed task size. Note that p_filesz must always be
48485 * <= p_memsz so it is only necessary to check p_memsz.
48486 */
48487- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
48488- elf_ppnt->p_memsz > TASK_SIZE ||
48489- TASK_SIZE - elf_ppnt->p_memsz < k) {
48490+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
48491+ elf_ppnt->p_memsz > pax_task_size ||
48492+ pax_task_size - elf_ppnt->p_memsz < k) {
48493 /* set_brk can never work. Avoid overflows. */
48494 send_sig(SIGKILL, current, 0);
48495 retval = -EINVAL;
48496@@ -883,17 +1302,44 @@ static int load_elf_binary(struct linux_binprm *bprm)
48497 goto out_free_dentry;
48498 }
48499 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
48500- send_sig(SIGSEGV, current, 0);
48501- retval = -EFAULT; /* Nobody gets to see this, but.. */
48502- goto out_free_dentry;
48503+ /*
48504+ * This bss-zeroing can fail if the ELF
48505+ * file specifies odd protections. So
48506+ * we don't check the return value
48507+ */
48508 }
48509
48510+#ifdef CONFIG_PAX_RANDMMAP
48511+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
48512+ unsigned long start, size, flags, vm_flags;
48513+
48514+ start = ELF_PAGEALIGN(elf_brk);
48515+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
48516+ flags = MAP_FIXED | MAP_PRIVATE;
48517+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
48518+
48519+ down_write(&current->mm->mmap_sem);
48520+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
48521+ retval = -ENOMEM;
48522+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
48523+// if (current->personality & ADDR_NO_RANDOMIZE)
48524+// vm_flags |= VM_READ | VM_MAYREAD;
48525+ start = mmap_region(NULL, start, PAGE_ALIGN(size), flags, vm_flags, 0);
48526+ retval = IS_ERR_VALUE(start) ? start : 0;
48527+ }
48528+ up_write(&current->mm->mmap_sem);
48529+ if (retval == 0)
48530+ retval = set_brk(start + size, start + size + PAGE_SIZE);
48531+ if (retval < 0) {
48532+ send_sig(SIGKILL, current, 0);
48533+ goto out_free_dentry;
48534+ }
48535+ }
48536+#endif
48537+
48538 if (elf_interpreter) {
48539- unsigned long interp_map_addr = 0;
48540-
48541 elf_entry = load_elf_interp(&loc->interp_elf_ex,
48542 interpreter,
48543- &interp_map_addr,
48544 load_bias);
48545 if (!IS_ERR((void *)elf_entry)) {
48546 /*
48547@@ -1115,7 +1561,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
48548 * Decide what to dump of a segment, part, all or none.
48549 */
48550 static unsigned long vma_dump_size(struct vm_area_struct *vma,
48551- unsigned long mm_flags)
48552+ unsigned long mm_flags, long signr)
48553 {
48554 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
48555
48556@@ -1152,7 +1598,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
48557 if (vma->vm_file == NULL)
48558 return 0;
48559
48560- if (FILTER(MAPPED_PRIVATE))
48561+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
48562 goto whole;
48563
48564 /*
48565@@ -1374,9 +1820,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
48566 {
48567 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
48568 int i = 0;
48569- do
48570+ do {
48571 i += 2;
48572- while (auxv[i - 2] != AT_NULL);
48573+ } while (auxv[i - 2] != AT_NULL);
48574 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
48575 }
48576
48577@@ -2006,14 +2452,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
48578 }
48579
48580 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
48581- unsigned long mm_flags)
48582+ struct coredump_params *cprm)
48583 {
48584 struct vm_area_struct *vma;
48585 size_t size = 0;
48586
48587 for (vma = first_vma(current, gate_vma); vma != NULL;
48588 vma = next_vma(vma, gate_vma))
48589- size += vma_dump_size(vma, mm_flags);
48590+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48591 return size;
48592 }
48593
48594@@ -2107,7 +2553,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48595
48596 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
48597
48598- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
48599+ offset += elf_core_vma_data_size(gate_vma, cprm);
48600 offset += elf_core_extra_data_size();
48601 e_shoff = offset;
48602
48603@@ -2121,10 +2567,12 @@ static int elf_core_dump(struct coredump_params *cprm)
48604 offset = dataoff;
48605
48606 size += sizeof(*elf);
48607+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48608 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
48609 goto end_coredump;
48610
48611 size += sizeof(*phdr4note);
48612+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48613 if (size > cprm->limit
48614 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
48615 goto end_coredump;
48616@@ -2138,7 +2586,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48617 phdr.p_offset = offset;
48618 phdr.p_vaddr = vma->vm_start;
48619 phdr.p_paddr = 0;
48620- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
48621+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48622 phdr.p_memsz = vma->vm_end - vma->vm_start;
48623 offset += phdr.p_filesz;
48624 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
48625@@ -2149,6 +2597,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48626 phdr.p_align = ELF_EXEC_PAGESIZE;
48627
48628 size += sizeof(phdr);
48629+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48630 if (size > cprm->limit
48631 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
48632 goto end_coredump;
48633@@ -2173,7 +2622,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48634 unsigned long addr;
48635 unsigned long end;
48636
48637- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
48638+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48639
48640 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
48641 struct page *page;
48642@@ -2182,6 +2631,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48643 page = get_dump_page(addr);
48644 if (page) {
48645 void *kaddr = kmap(page);
48646+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
48647 stop = ((size += PAGE_SIZE) > cprm->limit) ||
48648 !dump_write(cprm->file, kaddr,
48649 PAGE_SIZE);
48650@@ -2199,6 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48651
48652 if (e_phnum == PN_XNUM) {
48653 size += sizeof(*shdr4extnum);
48654+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48655 if (size > cprm->limit
48656 || !dump_write(cprm->file, shdr4extnum,
48657 sizeof(*shdr4extnum)))
48658@@ -2219,6 +2670,97 @@ out:
48659
48660 #endif /* CONFIG_ELF_CORE */
48661
48662+#ifdef CONFIG_PAX_MPROTECT
48663+/* PaX: non-PIC ELF libraries need relocations on their executable segments
48664+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
48665+ * we'll remove VM_MAYWRITE for good on RELRO segments.
48666+ *
48667+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
48668+ * basis because we want to allow the common case and not the special ones.
48669+ */
48670+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
48671+{
48672+ struct elfhdr elf_h;
48673+ struct elf_phdr elf_p;
48674+ unsigned long i;
48675+ unsigned long oldflags;
48676+ bool is_textrel_rw, is_textrel_rx, is_relro;
48677+
48678+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
48679+ return;
48680+
48681+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
48682+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
48683+
48684+#ifdef CONFIG_PAX_ELFRELOCS
48685+ /* possible TEXTREL */
48686+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
48687+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
48688+#else
48689+ is_textrel_rw = false;
48690+ is_textrel_rx = false;
48691+#endif
48692+
48693+ /* possible RELRO */
48694+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
48695+
48696+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
48697+ return;
48698+
48699+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
48700+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
48701+
48702+#ifdef CONFIG_PAX_ETEXECRELOCS
48703+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
48704+#else
48705+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
48706+#endif
48707+
48708+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
48709+ !elf_check_arch(&elf_h) ||
48710+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
48711+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
48712+ return;
48713+
48714+ for (i = 0UL; i < elf_h.e_phnum; i++) {
48715+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
48716+ return;
48717+ switch (elf_p.p_type) {
48718+ case PT_DYNAMIC:
48719+ if (!is_textrel_rw && !is_textrel_rx)
48720+ continue;
48721+ i = 0UL;
48722+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
48723+ elf_dyn dyn;
48724+
48725+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
48726+ return;
48727+ if (dyn.d_tag == DT_NULL)
48728+ return;
48729+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
48730+ gr_log_textrel(vma);
48731+ if (is_textrel_rw)
48732+ vma->vm_flags |= VM_MAYWRITE;
48733+ else
48734+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
48735+ vma->vm_flags &= ~VM_MAYWRITE;
48736+ return;
48737+ }
48738+ i++;
48739+ }
48740+ return;
48741+
48742+ case PT_GNU_RELRO:
48743+ if (!is_relro)
48744+ continue;
48745+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
48746+ vma->vm_flags &= ~VM_MAYWRITE;
48747+ return;
48748+ }
48749+ }
48750+}
48751+#endif
48752+
48753 static int __init init_elf_binfmt(void)
48754 {
48755 register_binfmt(&elf_format);
48756diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
48757index b563719..3868998 100644
48758--- a/fs/binfmt_flat.c
48759+++ b/fs/binfmt_flat.c
48760@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
48761 realdatastart = (unsigned long) -ENOMEM;
48762 printk("Unable to allocate RAM for process data, errno %d\n",
48763 (int)-realdatastart);
48764+ down_write(&current->mm->mmap_sem);
48765 vm_munmap(textpos, text_len);
48766+ up_write(&current->mm->mmap_sem);
48767 ret = realdatastart;
48768 goto err;
48769 }
48770@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
48771 }
48772 if (IS_ERR_VALUE(result)) {
48773 printk("Unable to read data+bss, errno %d\n", (int)-result);
48774+ down_write(&current->mm->mmap_sem);
48775 vm_munmap(textpos, text_len);
48776 vm_munmap(realdatastart, len);
48777+ up_write(&current->mm->mmap_sem);
48778 ret = result;
48779 goto err;
48780 }
48781@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
48782 }
48783 if (IS_ERR_VALUE(result)) {
48784 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
48785+ down_write(&current->mm->mmap_sem);
48786 vm_munmap(textpos, text_len + data_len + extra +
48787 MAX_SHARED_LIBS * sizeof(unsigned long));
48788+ up_write(&current->mm->mmap_sem);
48789 ret = result;
48790 goto err;
48791 }
48792diff --git a/fs/bio.c b/fs/bio.c
48793index b96fc6c..431d628 100644
48794--- a/fs/bio.c
48795+++ b/fs/bio.c
48796@@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
48797 /*
48798 * Overflow, abort
48799 */
48800- if (end < start)
48801+ if (end < start || end - start > INT_MAX - nr_pages)
48802 return ERR_PTR(-EINVAL);
48803
48804 nr_pages += end - start;
48805@@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
48806 /*
48807 * Overflow, abort
48808 */
48809- if (end < start)
48810+ if (end < start || end - start > INT_MAX - nr_pages)
48811 return ERR_PTR(-EINVAL);
48812
48813 nr_pages += end - start;
48814@@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
48815 const int read = bio_data_dir(bio) == READ;
48816 struct bio_map_data *bmd = bio->bi_private;
48817 int i;
48818- char *p = bmd->sgvecs[0].iov_base;
48819+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
48820
48821 __bio_for_each_segment(bvec, bio, i, 0) {
48822 char *addr = page_address(bvec->bv_page);
48823diff --git a/fs/block_dev.c b/fs/block_dev.c
48824index 78edf76..da14f3f 100644
48825--- a/fs/block_dev.c
48826+++ b/fs/block_dev.c
48827@@ -651,7 +651,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
48828 else if (bdev->bd_contains == bdev)
48829 return true; /* is a whole device which isn't held */
48830
48831- else if (whole->bd_holder == bd_may_claim)
48832+ else if (whole->bd_holder == (void *)bd_may_claim)
48833 return true; /* is a partition of a device that is being partitioned */
48834 else if (whole->bd_holder != NULL)
48835 return false; /* is a partition of a held device */
48836diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
48837index eea5da7..88fead70 100644
48838--- a/fs/btrfs/ctree.c
48839+++ b/fs/btrfs/ctree.c
48840@@ -1033,9 +1033,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
48841 free_extent_buffer(buf);
48842 add_root_to_dirty_list(root);
48843 } else {
48844- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
48845- parent_start = parent->start;
48846- else
48847+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
48848+ if (parent)
48849+ parent_start = parent->start;
48850+ else
48851+ parent_start = 0;
48852+ } else
48853 parent_start = 0;
48854
48855 WARN_ON(trans->transid != btrfs_header_generation(parent));
48856diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
48857index 659ea81..0f63c1a 100644
48858--- a/fs/btrfs/inode.c
48859+++ b/fs/btrfs/inode.c
48860@@ -7300,7 +7300,7 @@ fail:
48861 return -ENOMEM;
48862 }
48863
48864-static int btrfs_getattr(struct vfsmount *mnt,
48865+int btrfs_getattr(struct vfsmount *mnt,
48866 struct dentry *dentry, struct kstat *stat)
48867 {
48868 struct inode *inode = dentry->d_inode;
48869@@ -7314,6 +7314,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
48870 return 0;
48871 }
48872
48873+EXPORT_SYMBOL(btrfs_getattr);
48874+
48875+dev_t get_btrfs_dev_from_inode(struct inode *inode)
48876+{
48877+ return BTRFS_I(inode)->root->anon_dev;
48878+}
48879+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
48880+
48881 /*
48882 * If a file is moved, it will inherit the cow and compression flags of the new
48883 * directory.
48884diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
48885index 338f259..b657640 100644
48886--- a/fs/btrfs/ioctl.c
48887+++ b/fs/btrfs/ioctl.c
48888@@ -3033,9 +3033,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
48889 for (i = 0; i < num_types; i++) {
48890 struct btrfs_space_info *tmp;
48891
48892+ /* Don't copy in more than we allocated */
48893 if (!slot_count)
48894 break;
48895
48896+ slot_count--;
48897+
48898 info = NULL;
48899 rcu_read_lock();
48900 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
48901@@ -3057,10 +3060,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
48902 memcpy(dest, &space, sizeof(space));
48903 dest++;
48904 space_args.total_spaces++;
48905- slot_count--;
48906 }
48907- if (!slot_count)
48908- break;
48909 }
48910 up_read(&info->groups_sem);
48911 }
48912diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
48913index 300e09a..9fe4539 100644
48914--- a/fs/btrfs/relocation.c
48915+++ b/fs/btrfs/relocation.c
48916@@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
48917 }
48918 spin_unlock(&rc->reloc_root_tree.lock);
48919
48920- BUG_ON((struct btrfs_root *)node->data != root);
48921+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
48922
48923 if (!del) {
48924 spin_lock(&rc->reloc_root_tree.lock);
48925diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
48926index d8982e9..29a85fa 100644
48927--- a/fs/btrfs/super.c
48928+++ b/fs/btrfs/super.c
48929@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
48930 function, line, errstr);
48931 return;
48932 }
48933- ACCESS_ONCE(trans->transaction->aborted) = errno;
48934+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
48935 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
48936 }
48937 /*
48938diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
48939index 622f469..e8d2d55 100644
48940--- a/fs/cachefiles/bind.c
48941+++ b/fs/cachefiles/bind.c
48942@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
48943 args);
48944
48945 /* start by checking things over */
48946- ASSERT(cache->fstop_percent >= 0 &&
48947- cache->fstop_percent < cache->fcull_percent &&
48948+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
48949 cache->fcull_percent < cache->frun_percent &&
48950 cache->frun_percent < 100);
48951
48952- ASSERT(cache->bstop_percent >= 0 &&
48953- cache->bstop_percent < cache->bcull_percent &&
48954+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
48955 cache->bcull_percent < cache->brun_percent &&
48956 cache->brun_percent < 100);
48957
48958diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
48959index 0a1467b..6a53245 100644
48960--- a/fs/cachefiles/daemon.c
48961+++ b/fs/cachefiles/daemon.c
48962@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
48963 if (n > buflen)
48964 return -EMSGSIZE;
48965
48966- if (copy_to_user(_buffer, buffer, n) != 0)
48967+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
48968 return -EFAULT;
48969
48970 return n;
48971@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
48972 if (test_bit(CACHEFILES_DEAD, &cache->flags))
48973 return -EIO;
48974
48975- if (datalen < 0 || datalen > PAGE_SIZE - 1)
48976+ if (datalen > PAGE_SIZE - 1)
48977 return -EOPNOTSUPP;
48978
48979 /* drag the command string into the kernel so we can parse it */
48980@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
48981 if (args[0] != '%' || args[1] != '\0')
48982 return -EINVAL;
48983
48984- if (fstop < 0 || fstop >= cache->fcull_percent)
48985+ if (fstop >= cache->fcull_percent)
48986 return cachefiles_daemon_range_error(cache, args);
48987
48988 cache->fstop_percent = fstop;
48989@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
48990 if (args[0] != '%' || args[1] != '\0')
48991 return -EINVAL;
48992
48993- if (bstop < 0 || bstop >= cache->bcull_percent)
48994+ if (bstop >= cache->bcull_percent)
48995 return cachefiles_daemon_range_error(cache, args);
48996
48997 cache->bstop_percent = bstop;
48998diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
48999index 4938251..7e01445 100644
49000--- a/fs/cachefiles/internal.h
49001+++ b/fs/cachefiles/internal.h
49002@@ -59,7 +59,7 @@ struct cachefiles_cache {
49003 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
49004 struct rb_root active_nodes; /* active nodes (can't be culled) */
49005 rwlock_t active_lock; /* lock for active_nodes */
49006- atomic_t gravecounter; /* graveyard uniquifier */
49007+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
49008 unsigned frun_percent; /* when to stop culling (% files) */
49009 unsigned fcull_percent; /* when to start culling (% files) */
49010 unsigned fstop_percent; /* when to stop allocating (% files) */
49011@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
49012 * proc.c
49013 */
49014 #ifdef CONFIG_CACHEFILES_HISTOGRAM
49015-extern atomic_t cachefiles_lookup_histogram[HZ];
49016-extern atomic_t cachefiles_mkdir_histogram[HZ];
49017-extern atomic_t cachefiles_create_histogram[HZ];
49018+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49019+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49020+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
49021
49022 extern int __init cachefiles_proc_init(void);
49023 extern void cachefiles_proc_cleanup(void);
49024 static inline
49025-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
49026+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
49027 {
49028 unsigned long jif = jiffies - start_jif;
49029 if (jif >= HZ)
49030 jif = HZ - 1;
49031- atomic_inc(&histogram[jif]);
49032+ atomic_inc_unchecked(&histogram[jif]);
49033 }
49034
49035 #else
49036diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
49037index 8c01c5fc..15f982e 100644
49038--- a/fs/cachefiles/namei.c
49039+++ b/fs/cachefiles/namei.c
49040@@ -317,7 +317,7 @@ try_again:
49041 /* first step is to make up a grave dentry in the graveyard */
49042 sprintf(nbuffer, "%08x%08x",
49043 (uint32_t) get_seconds(),
49044- (uint32_t) atomic_inc_return(&cache->gravecounter));
49045+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
49046
49047 /* do the multiway lock magic */
49048 trap = lock_rename(cache->graveyard, dir);
49049diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
49050index eccd339..4c1d995 100644
49051--- a/fs/cachefiles/proc.c
49052+++ b/fs/cachefiles/proc.c
49053@@ -14,9 +14,9 @@
49054 #include <linux/seq_file.h>
49055 #include "internal.h"
49056
49057-atomic_t cachefiles_lookup_histogram[HZ];
49058-atomic_t cachefiles_mkdir_histogram[HZ];
49059-atomic_t cachefiles_create_histogram[HZ];
49060+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49061+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49062+atomic_unchecked_t cachefiles_create_histogram[HZ];
49063
49064 /*
49065 * display the latency histogram
49066@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
49067 return 0;
49068 default:
49069 index = (unsigned long) v - 3;
49070- x = atomic_read(&cachefiles_lookup_histogram[index]);
49071- y = atomic_read(&cachefiles_mkdir_histogram[index]);
49072- z = atomic_read(&cachefiles_create_histogram[index]);
49073+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
49074+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
49075+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
49076 if (x == 0 && y == 0 && z == 0)
49077 return 0;
49078
49079diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
49080index 4809922..aab2c39 100644
49081--- a/fs/cachefiles/rdwr.c
49082+++ b/fs/cachefiles/rdwr.c
49083@@ -965,7 +965,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
49084 old_fs = get_fs();
49085 set_fs(KERNEL_DS);
49086 ret = file->f_op->write(
49087- file, (const void __user *) data, len, &pos);
49088+ file, (const void __force_user *) data, len, &pos);
49089 set_fs(old_fs);
49090 kunmap(page);
49091 if (ret != len)
49092diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
49093index 8c1aabe..bbf856a 100644
49094--- a/fs/ceph/dir.c
49095+++ b/fs/ceph/dir.c
49096@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
49097 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
49098 struct ceph_mds_client *mdsc = fsc->mdsc;
49099 unsigned frag = fpos_frag(filp->f_pos);
49100- int off = fpos_off(filp->f_pos);
49101+ unsigned int off = fpos_off(filp->f_pos);
49102 int err;
49103 u32 ftype;
49104 struct ceph_mds_reply_info_parsed *rinfo;
49105diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
49106index cfd1ce3..1d36db1 100644
49107--- a/fs/cifs/asn1.c
49108+++ b/fs/cifs/asn1.c
49109@@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length,
49110 }
49111 }
49112
49113- /* mechlistMIC */
49114- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
49115- /* Check if we have reached the end of the blob, but with
49116- no mechListMic (e.g. NTLMSSP instead of KRB5) */
49117- if (ctx.error == ASN1_ERR_DEC_EMPTY)
49118- goto decode_negtoken_exit;
49119- cFYI(1, "Error decoding last part negTokenInit exit3");
49120- return 0;
49121- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
49122- /* tag = 3 indicating mechListMIC */
49123- cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
49124- cls, con, tag, end, *end);
49125- return 0;
49126- }
49127-
49128- /* sequence */
49129- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
49130- cFYI(1, "Error decoding last part negTokenInit exit5");
49131- return 0;
49132- } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
49133- || (tag != ASN1_SEQ)) {
49134- cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)",
49135- cls, con, tag, end, *end);
49136- }
49137-
49138- /* sequence of */
49139- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
49140- cFYI(1, "Error decoding last part negTokenInit exit 7");
49141- return 0;
49142- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
49143- cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
49144- cls, con, tag, end, *end);
49145- return 0;
49146- }
49147-
49148- /* general string */
49149- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
49150- cFYI(1, "Error decoding last part negTokenInit exit9");
49151- return 0;
49152- } else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
49153- || (tag != ASN1_GENSTR)) {
49154- cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)",
49155- cls, con, tag, end, *end);
49156- return 0;
49157- }
49158- cFYI(1, "Need to call asn1_octets_decode() function for %s",
49159- ctx.pointer); /* is this UTF-8 or ASCII? */
49160-decode_negtoken_exit:
49161+ /*
49162+ * We currently ignore anything at the end of the SPNEGO blob after
49163+ * the mechTypes have been parsed, since none of that info is
49164+ * used at the moment.
49165+ */
49166 return 1;
49167 }
49168diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
49169index d9ea6ed..1e6c8ac 100644
49170--- a/fs/cifs/cifs_debug.c
49171+++ b/fs/cifs/cifs_debug.c
49172@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49173
49174 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
49175 #ifdef CONFIG_CIFS_STATS2
49176- atomic_set(&totBufAllocCount, 0);
49177- atomic_set(&totSmBufAllocCount, 0);
49178+ atomic_set_unchecked(&totBufAllocCount, 0);
49179+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49180 #endif /* CONFIG_CIFS_STATS2 */
49181 spin_lock(&cifs_tcp_ses_lock);
49182 list_for_each(tmp1, &cifs_tcp_ses_list) {
49183@@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49184 tcon = list_entry(tmp3,
49185 struct cifs_tcon,
49186 tcon_list);
49187- atomic_set(&tcon->num_smbs_sent, 0);
49188+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
49189 if (server->ops->clear_stats)
49190 server->ops->clear_stats(tcon);
49191 }
49192@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49193 smBufAllocCount.counter, cifs_min_small);
49194 #ifdef CONFIG_CIFS_STATS2
49195 seq_printf(m, "Total Large %d Small %d Allocations\n",
49196- atomic_read(&totBufAllocCount),
49197- atomic_read(&totSmBufAllocCount));
49198+ atomic_read_unchecked(&totBufAllocCount),
49199+ atomic_read_unchecked(&totSmBufAllocCount));
49200 #endif /* CONFIG_CIFS_STATS2 */
49201
49202 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
49203@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49204 if (tcon->need_reconnect)
49205 seq_puts(m, "\tDISCONNECTED ");
49206 seq_printf(m, "\nSMBs: %d",
49207- atomic_read(&tcon->num_smbs_sent));
49208+ atomic_read_unchecked(&tcon->num_smbs_sent));
49209 if (server->ops->print_stats)
49210 server->ops->print_stats(m, tcon);
49211 }
49212diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
49213index e328339..322228b 100644
49214--- a/fs/cifs/cifsfs.c
49215+++ b/fs/cifs/cifsfs.c
49216@@ -1002,7 +1002,7 @@ cifs_init_request_bufs(void)
49217 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
49218 cifs_req_cachep = kmem_cache_create("cifs_request",
49219 CIFSMaxBufSize + max_hdr_size, 0,
49220- SLAB_HWCACHE_ALIGN, NULL);
49221+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
49222 if (cifs_req_cachep == NULL)
49223 return -ENOMEM;
49224
49225@@ -1029,7 +1029,7 @@ cifs_init_request_bufs(void)
49226 efficient to alloc 1 per page off the slab compared to 17K (5page)
49227 alloc of large cifs buffers even when page debugging is on */
49228 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
49229- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
49230+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
49231 NULL);
49232 if (cifs_sm_req_cachep == NULL) {
49233 mempool_destroy(cifs_req_poolp);
49234@@ -1114,8 +1114,8 @@ init_cifs(void)
49235 atomic_set(&bufAllocCount, 0);
49236 atomic_set(&smBufAllocCount, 0);
49237 #ifdef CONFIG_CIFS_STATS2
49238- atomic_set(&totBufAllocCount, 0);
49239- atomic_set(&totSmBufAllocCount, 0);
49240+ atomic_set_unchecked(&totBufAllocCount, 0);
49241+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49242 #endif /* CONFIG_CIFS_STATS2 */
49243
49244 atomic_set(&midCount, 0);
49245diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
49246index e6899ce..d6b2920 100644
49247--- a/fs/cifs/cifsglob.h
49248+++ b/fs/cifs/cifsglob.h
49249@@ -751,35 +751,35 @@ struct cifs_tcon {
49250 __u16 Flags; /* optional support bits */
49251 enum statusEnum tidStatus;
49252 #ifdef CONFIG_CIFS_STATS
49253- atomic_t num_smbs_sent;
49254+ atomic_unchecked_t num_smbs_sent;
49255 union {
49256 struct {
49257- atomic_t num_writes;
49258- atomic_t num_reads;
49259- atomic_t num_flushes;
49260- atomic_t num_oplock_brks;
49261- atomic_t num_opens;
49262- atomic_t num_closes;
49263- atomic_t num_deletes;
49264- atomic_t num_mkdirs;
49265- atomic_t num_posixopens;
49266- atomic_t num_posixmkdirs;
49267- atomic_t num_rmdirs;
49268- atomic_t num_renames;
49269- atomic_t num_t2renames;
49270- atomic_t num_ffirst;
49271- atomic_t num_fnext;
49272- atomic_t num_fclose;
49273- atomic_t num_hardlinks;
49274- atomic_t num_symlinks;
49275- atomic_t num_locks;
49276- atomic_t num_acl_get;
49277- atomic_t num_acl_set;
49278+ atomic_unchecked_t num_writes;
49279+ atomic_unchecked_t num_reads;
49280+ atomic_unchecked_t num_flushes;
49281+ atomic_unchecked_t num_oplock_brks;
49282+ atomic_unchecked_t num_opens;
49283+ atomic_unchecked_t num_closes;
49284+ atomic_unchecked_t num_deletes;
49285+ atomic_unchecked_t num_mkdirs;
49286+ atomic_unchecked_t num_posixopens;
49287+ atomic_unchecked_t num_posixmkdirs;
49288+ atomic_unchecked_t num_rmdirs;
49289+ atomic_unchecked_t num_renames;
49290+ atomic_unchecked_t num_t2renames;
49291+ atomic_unchecked_t num_ffirst;
49292+ atomic_unchecked_t num_fnext;
49293+ atomic_unchecked_t num_fclose;
49294+ atomic_unchecked_t num_hardlinks;
49295+ atomic_unchecked_t num_symlinks;
49296+ atomic_unchecked_t num_locks;
49297+ atomic_unchecked_t num_acl_get;
49298+ atomic_unchecked_t num_acl_set;
49299 } cifs_stats;
49300 #ifdef CONFIG_CIFS_SMB2
49301 struct {
49302- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49303- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49304+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49305+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49306 } smb2_stats;
49307 #endif /* CONFIG_CIFS_SMB2 */
49308 } stats;
49309@@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
49310 }
49311
49312 #ifdef CONFIG_CIFS_STATS
49313-#define cifs_stats_inc atomic_inc
49314+#define cifs_stats_inc atomic_inc_unchecked
49315
49316 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
49317 unsigned int bytes)
49318@@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
49319 /* Various Debug counters */
49320 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
49321 #ifdef CONFIG_CIFS_STATS2
49322-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
49323-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
49324+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
49325+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
49326 #endif
49327 GLOBAL_EXTERN atomic_t smBufAllocCount;
49328 GLOBAL_EXTERN atomic_t midCount;
49329diff --git a/fs/cifs/link.c b/fs/cifs/link.c
49330index 51dc2fb..1e12a33 100644
49331--- a/fs/cifs/link.c
49332+++ b/fs/cifs/link.c
49333@@ -616,7 +616,7 @@ symlink_exit:
49334
49335 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
49336 {
49337- char *p = nd_get_link(nd);
49338+ const char *p = nd_get_link(nd);
49339 if (!IS_ERR(p))
49340 kfree(p);
49341 }
49342diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
49343index 3a00c0d..42d901c 100644
49344--- a/fs/cifs/misc.c
49345+++ b/fs/cifs/misc.c
49346@@ -169,7 +169,7 @@ cifs_buf_get(void)
49347 memset(ret_buf, 0, buf_size + 3);
49348 atomic_inc(&bufAllocCount);
49349 #ifdef CONFIG_CIFS_STATS2
49350- atomic_inc(&totBufAllocCount);
49351+ atomic_inc_unchecked(&totBufAllocCount);
49352 #endif /* CONFIG_CIFS_STATS2 */
49353 }
49354
49355@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
49356 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
49357 atomic_inc(&smBufAllocCount);
49358 #ifdef CONFIG_CIFS_STATS2
49359- atomic_inc(&totSmBufAllocCount);
49360+ atomic_inc_unchecked(&totSmBufAllocCount);
49361 #endif /* CONFIG_CIFS_STATS2 */
49362
49363 }
49364diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
49365index 47bc5a8..10decbe 100644
49366--- a/fs/cifs/smb1ops.c
49367+++ b/fs/cifs/smb1ops.c
49368@@ -586,27 +586,27 @@ static void
49369 cifs_clear_stats(struct cifs_tcon *tcon)
49370 {
49371 #ifdef CONFIG_CIFS_STATS
49372- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
49373- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
49374- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
49375- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49376- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
49377- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
49378- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49379- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
49380- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
49381- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
49382- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
49383- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
49384- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
49385- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
49386- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
49387- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
49388- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
49389- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
49390- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
49391- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
49392- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
49393+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
49394+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
49395+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
49396+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49397+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
49398+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
49399+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49400+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
49401+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
49402+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
49403+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
49404+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
49405+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
49406+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
49407+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
49408+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
49409+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
49410+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
49411+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
49412+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
49413+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
49414 #endif
49415 }
49416
49417@@ -615,36 +615,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
49418 {
49419 #ifdef CONFIG_CIFS_STATS
49420 seq_printf(m, " Oplocks breaks: %d",
49421- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
49422+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
49423 seq_printf(m, "\nReads: %d Bytes: %llu",
49424- atomic_read(&tcon->stats.cifs_stats.num_reads),
49425+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
49426 (long long)(tcon->bytes_read));
49427 seq_printf(m, "\nWrites: %d Bytes: %llu",
49428- atomic_read(&tcon->stats.cifs_stats.num_writes),
49429+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
49430 (long long)(tcon->bytes_written));
49431 seq_printf(m, "\nFlushes: %d",
49432- atomic_read(&tcon->stats.cifs_stats.num_flushes));
49433+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
49434 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
49435- atomic_read(&tcon->stats.cifs_stats.num_locks),
49436- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
49437- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
49438+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
49439+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
49440+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
49441 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
49442- atomic_read(&tcon->stats.cifs_stats.num_opens),
49443- atomic_read(&tcon->stats.cifs_stats.num_closes),
49444- atomic_read(&tcon->stats.cifs_stats.num_deletes));
49445+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
49446+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
49447+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
49448 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
49449- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
49450- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
49451+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
49452+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
49453 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
49454- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
49455- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
49456+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
49457+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
49458 seq_printf(m, "\nRenames: %d T2 Renames %d",
49459- atomic_read(&tcon->stats.cifs_stats.num_renames),
49460- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
49461+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
49462+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
49463 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
49464- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
49465- atomic_read(&tcon->stats.cifs_stats.num_fnext),
49466- atomic_read(&tcon->stats.cifs_stats.num_fclose));
49467+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
49468+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
49469+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
49470 #endif
49471 }
49472
49473diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
49474index bceffe7..cd1ae59 100644
49475--- a/fs/cifs/smb2ops.c
49476+++ b/fs/cifs/smb2ops.c
49477@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
49478 #ifdef CONFIG_CIFS_STATS
49479 int i;
49480 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
49481- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
49482- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
49483+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
49484+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
49485 }
49486 #endif
49487 }
49488@@ -284,66 +284,66 @@ static void
49489 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
49490 {
49491 #ifdef CONFIG_CIFS_STATS
49492- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
49493- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
49494+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
49495+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
49496 seq_printf(m, "\nNegotiates: %d sent %d failed",
49497- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
49498- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
49499+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
49500+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
49501 seq_printf(m, "\nSessionSetups: %d sent %d failed",
49502- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
49503- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
49504+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
49505+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
49506 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
49507 seq_printf(m, "\nLogoffs: %d sent %d failed",
49508- atomic_read(&sent[SMB2_LOGOFF_HE]),
49509- atomic_read(&failed[SMB2_LOGOFF_HE]));
49510+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
49511+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
49512 seq_printf(m, "\nTreeConnects: %d sent %d failed",
49513- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
49514- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
49515+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
49516+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
49517 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
49518- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
49519- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
49520+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
49521+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
49522 seq_printf(m, "\nCreates: %d sent %d failed",
49523- atomic_read(&sent[SMB2_CREATE_HE]),
49524- atomic_read(&failed[SMB2_CREATE_HE]));
49525+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
49526+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
49527 seq_printf(m, "\nCloses: %d sent %d failed",
49528- atomic_read(&sent[SMB2_CLOSE_HE]),
49529- atomic_read(&failed[SMB2_CLOSE_HE]));
49530+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
49531+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
49532 seq_printf(m, "\nFlushes: %d sent %d failed",
49533- atomic_read(&sent[SMB2_FLUSH_HE]),
49534- atomic_read(&failed[SMB2_FLUSH_HE]));
49535+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
49536+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
49537 seq_printf(m, "\nReads: %d sent %d failed",
49538- atomic_read(&sent[SMB2_READ_HE]),
49539- atomic_read(&failed[SMB2_READ_HE]));
49540+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
49541+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
49542 seq_printf(m, "\nWrites: %d sent %d failed",
49543- atomic_read(&sent[SMB2_WRITE_HE]),
49544- atomic_read(&failed[SMB2_WRITE_HE]));
49545+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
49546+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
49547 seq_printf(m, "\nLocks: %d sent %d failed",
49548- atomic_read(&sent[SMB2_LOCK_HE]),
49549- atomic_read(&failed[SMB2_LOCK_HE]));
49550+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
49551+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
49552 seq_printf(m, "\nIOCTLs: %d sent %d failed",
49553- atomic_read(&sent[SMB2_IOCTL_HE]),
49554- atomic_read(&failed[SMB2_IOCTL_HE]));
49555+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
49556+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
49557 seq_printf(m, "\nCancels: %d sent %d failed",
49558- atomic_read(&sent[SMB2_CANCEL_HE]),
49559- atomic_read(&failed[SMB2_CANCEL_HE]));
49560+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
49561+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
49562 seq_printf(m, "\nEchos: %d sent %d failed",
49563- atomic_read(&sent[SMB2_ECHO_HE]),
49564- atomic_read(&failed[SMB2_ECHO_HE]));
49565+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
49566+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
49567 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
49568- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
49569- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
49570+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
49571+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
49572 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
49573- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
49574- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
49575+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
49576+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
49577 seq_printf(m, "\nQueryInfos: %d sent %d failed",
49578- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
49579- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
49580+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
49581+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
49582 seq_printf(m, "\nSetInfos: %d sent %d failed",
49583- atomic_read(&sent[SMB2_SET_INFO_HE]),
49584- atomic_read(&failed[SMB2_SET_INFO_HE]));
49585+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
49586+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
49587 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
49588- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
49589- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
49590+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
49591+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
49592 #endif
49593 }
49594
49595diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
49596index 41d9d07..dbb4772 100644
49597--- a/fs/cifs/smb2pdu.c
49598+++ b/fs/cifs/smb2pdu.c
49599@@ -1761,8 +1761,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
49600 default:
49601 cERROR(1, "info level %u isn't supported",
49602 srch_inf->info_level);
49603- rc = -EINVAL;
49604- goto qdir_exit;
49605+ return -EINVAL;
49606 }
49607
49608 req->FileIndex = cpu_to_le32(index);
49609diff --git a/fs/coda/cache.c b/fs/coda/cache.c
49610index 958ae0e..505c9d0 100644
49611--- a/fs/coda/cache.c
49612+++ b/fs/coda/cache.c
49613@@ -24,7 +24,7 @@
49614 #include "coda_linux.h"
49615 #include "coda_cache.h"
49616
49617-static atomic_t permission_epoch = ATOMIC_INIT(0);
49618+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
49619
49620 /* replace or extend an acl cache hit */
49621 void coda_cache_enter(struct inode *inode, int mask)
49622@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
49623 struct coda_inode_info *cii = ITOC(inode);
49624
49625 spin_lock(&cii->c_lock);
49626- cii->c_cached_epoch = atomic_read(&permission_epoch);
49627+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
49628 if (cii->c_uid != current_fsuid()) {
49629 cii->c_uid = current_fsuid();
49630 cii->c_cached_perm = mask;
49631@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
49632 {
49633 struct coda_inode_info *cii = ITOC(inode);
49634 spin_lock(&cii->c_lock);
49635- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
49636+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
49637 spin_unlock(&cii->c_lock);
49638 }
49639
49640 /* remove all acl caches */
49641 void coda_cache_clear_all(struct super_block *sb)
49642 {
49643- atomic_inc(&permission_epoch);
49644+ atomic_inc_unchecked(&permission_epoch);
49645 }
49646
49647
49648@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
49649 spin_lock(&cii->c_lock);
49650 hit = (mask & cii->c_cached_perm) == mask &&
49651 cii->c_uid == current_fsuid() &&
49652- cii->c_cached_epoch == atomic_read(&permission_epoch);
49653+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
49654 spin_unlock(&cii->c_lock);
49655
49656 return hit;
49657diff --git a/fs/compat.c b/fs/compat.c
49658index a06dcbc..dacb6d3 100644
49659--- a/fs/compat.c
49660+++ b/fs/compat.c
49661@@ -54,7 +54,7 @@
49662 #include <asm/ioctls.h>
49663 #include "internal.h"
49664
49665-int compat_log = 1;
49666+int compat_log = 0;
49667
49668 int compat_printk(const char *fmt, ...)
49669 {
49670@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
49671
49672 set_fs(KERNEL_DS);
49673 /* The __user pointer cast is valid because of the set_fs() */
49674- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
49675+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
49676 set_fs(oldfs);
49677 /* truncating is ok because it's a user address */
49678 if (!ret)
49679@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
49680 goto out;
49681
49682 ret = -EINVAL;
49683- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
49684+ if (nr_segs > UIO_MAXIOV)
49685 goto out;
49686 if (nr_segs > fast_segs) {
49687 ret = -ENOMEM;
49688@@ -835,6 +835,7 @@ struct compat_old_linux_dirent {
49689
49690 struct compat_readdir_callback {
49691 struct compat_old_linux_dirent __user *dirent;
49692+ struct file * file;
49693 int result;
49694 };
49695
49696@@ -852,6 +853,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
49697 buf->result = -EOVERFLOW;
49698 return -EOVERFLOW;
49699 }
49700+
49701+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49702+ return 0;
49703+
49704 buf->result++;
49705 dirent = buf->dirent;
49706 if (!access_ok(VERIFY_WRITE, dirent,
49707@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
49708
49709 buf.result = 0;
49710 buf.dirent = dirent;
49711+ buf.file = f.file;
49712
49713 error = vfs_readdir(f.file, compat_fillonedir, &buf);
49714 if (buf.result)
49715@@ -901,6 +907,7 @@ struct compat_linux_dirent {
49716 struct compat_getdents_callback {
49717 struct compat_linux_dirent __user *current_dir;
49718 struct compat_linux_dirent __user *previous;
49719+ struct file * file;
49720 int count;
49721 int error;
49722 };
49723@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
49724 buf->error = -EOVERFLOW;
49725 return -EOVERFLOW;
49726 }
49727+
49728+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49729+ return 0;
49730+
49731 dirent = buf->previous;
49732 if (dirent) {
49733 if (__put_user(offset, &dirent->d_off))
49734@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
49735 buf.previous = NULL;
49736 buf.count = count;
49737 buf.error = 0;
49738+ buf.file = f.file;
49739
49740 error = vfs_readdir(f.file, compat_filldir, &buf);
49741 if (error >= 0)
49742@@ -987,6 +999,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
49743 struct compat_getdents_callback64 {
49744 struct linux_dirent64 __user *current_dir;
49745 struct linux_dirent64 __user *previous;
49746+ struct file * file;
49747 int count;
49748 int error;
49749 };
49750@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
49751 buf->error = -EINVAL; /* only used if we fail.. */
49752 if (reclen > buf->count)
49753 return -EINVAL;
49754+
49755+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49756+ return 0;
49757+
49758 dirent = buf->previous;
49759
49760 if (dirent) {
49761@@ -1052,13 +1069,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
49762 buf.previous = NULL;
49763 buf.count = count;
49764 buf.error = 0;
49765+ buf.file = f.file;
49766
49767 error = vfs_readdir(f.file, compat_filldir64, &buf);
49768 if (error >= 0)
49769 error = buf.error;
49770 lastdirent = buf.previous;
49771 if (lastdirent) {
49772- typeof(lastdirent->d_off) d_off = f.file->f_pos;
49773+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
49774 if (__put_user_unaligned(d_off, &lastdirent->d_off))
49775 error = -EFAULT;
49776 else
49777diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
49778index a81147e..20bf2b5 100644
49779--- a/fs/compat_binfmt_elf.c
49780+++ b/fs/compat_binfmt_elf.c
49781@@ -30,11 +30,13 @@
49782 #undef elf_phdr
49783 #undef elf_shdr
49784 #undef elf_note
49785+#undef elf_dyn
49786 #undef elf_addr_t
49787 #define elfhdr elf32_hdr
49788 #define elf_phdr elf32_phdr
49789 #define elf_shdr elf32_shdr
49790 #define elf_note elf32_note
49791+#define elf_dyn Elf32_Dyn
49792 #define elf_addr_t Elf32_Addr
49793
49794 /*
49795diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
49796index e2f57a0..3c78771 100644
49797--- a/fs/compat_ioctl.c
49798+++ b/fs/compat_ioctl.c
49799@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
49800 return -EFAULT;
49801 if (__get_user(udata, &ss32->iomem_base))
49802 return -EFAULT;
49803- ss.iomem_base = compat_ptr(udata);
49804+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
49805 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
49806 __get_user(ss.port_high, &ss32->port_high))
49807 return -EFAULT;
49808@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
49809 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
49810 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
49811 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
49812- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
49813+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
49814 return -EFAULT;
49815
49816 return ioctl_preallocate(file, p);
49817@@ -1620,8 +1620,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
49818 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
49819 {
49820 unsigned int a, b;
49821- a = *(unsigned int *)p;
49822- b = *(unsigned int *)q;
49823+ a = *(const unsigned int *)p;
49824+ b = *(const unsigned int *)q;
49825 if (a > b)
49826 return 1;
49827 if (a < b)
49828diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
49829index 712b10f..c33c4ca 100644
49830--- a/fs/configfs/dir.c
49831+++ b/fs/configfs/dir.c
49832@@ -1037,10 +1037,11 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
49833 static int configfs_depend_prep(struct dentry *origin,
49834 struct config_item *target)
49835 {
49836- struct configfs_dirent *child_sd, *sd = origin->d_fsdata;
49837+ struct configfs_dirent *child_sd, *sd;
49838 int ret = 0;
49839
49840- BUG_ON(!origin || !sd);
49841+ BUG_ON(!origin || !origin->d_fsdata);
49842+ sd = origin->d_fsdata;
49843
49844 if (sd->s_element == target) /* Boo-yah */
49845 goto out;
49846@@ -1564,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
49847 }
49848 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
49849 struct configfs_dirent *next;
49850- const char * name;
49851+ const unsigned char * name;
49852+ char d_name[sizeof(next->s_dentry->d_iname)];
49853 int len;
49854 struct inode *inode = NULL;
49855
49856@@ -1574,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
49857 continue;
49858
49859 name = configfs_get_name(next);
49860- len = strlen(name);
49861+ if (next->s_dentry && name == next->s_dentry->d_iname) {
49862+ len = next->s_dentry->d_name.len;
49863+ memcpy(d_name, name, len);
49864+ name = d_name;
49865+ } else
49866+ len = strlen(name);
49867
49868 /*
49869 * We'll have a dentry and an inode for
49870diff --git a/fs/coredump.c b/fs/coredump.c
49871index 1774932..5812106 100644
49872--- a/fs/coredump.c
49873+++ b/fs/coredump.c
49874@@ -52,7 +52,7 @@ struct core_name {
49875 char *corename;
49876 int used, size;
49877 };
49878-static atomic_t call_count = ATOMIC_INIT(1);
49879+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
49880
49881 /* The maximal length of core_pattern is also specified in sysctl.c */
49882
49883@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
49884 {
49885 char *old_corename = cn->corename;
49886
49887- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
49888+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
49889 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
49890
49891 if (!cn->corename) {
49892@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
49893 int pid_in_pattern = 0;
49894 int err = 0;
49895
49896- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
49897+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
49898 cn->corename = kmalloc(cn->size, GFP_KERNEL);
49899 cn->used = 0;
49900
49901@@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
49902 pipe = file->f_path.dentry->d_inode->i_pipe;
49903
49904 pipe_lock(pipe);
49905- pipe->readers++;
49906- pipe->writers--;
49907+ atomic_inc(&pipe->readers);
49908+ atomic_dec(&pipe->writers);
49909
49910- while ((pipe->readers > 1) && (!signal_pending(current))) {
49911+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49912 wake_up_interruptible_sync(&pipe->wait);
49913 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49914 pipe_wait(pipe);
49915 }
49916
49917- pipe->readers--;
49918- pipe->writers++;
49919+ atomic_dec(&pipe->readers);
49920+ atomic_inc(&pipe->writers);
49921 pipe_unlock(pipe);
49922
49923 }
49924@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo)
49925 int ispipe;
49926 struct files_struct *displaced;
49927 bool need_nonrelative = false;
49928- static atomic_t core_dump_count = ATOMIC_INIT(0);
49929+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49930+ long signr = siginfo->si_signo;
49931 struct coredump_params cprm = {
49932 .siginfo = siginfo,
49933 .regs = signal_pt_regs(),
49934@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo)
49935 .mm_flags = mm->flags,
49936 };
49937
49938- audit_core_dumps(siginfo->si_signo);
49939+ audit_core_dumps(signr);
49940+
49941+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49942+ gr_handle_brute_attach(cprm.mm_flags);
49943
49944 binfmt = mm->binfmt;
49945 if (!binfmt || !binfmt->core_dump)
49946@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo)
49947 need_nonrelative = true;
49948 }
49949
49950- retval = coredump_wait(siginfo->si_signo, &core_state);
49951+ retval = coredump_wait(signr, &core_state);
49952 if (retval < 0)
49953 goto fail_creds;
49954
49955@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo)
49956 }
49957 cprm.limit = RLIM_INFINITY;
49958
49959- dump_count = atomic_inc_return(&core_dump_count);
49960+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
49961 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49962 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49963 task_tgid_vnr(current), current->comm);
49964@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo)
49965 } else {
49966 struct inode *inode;
49967
49968+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49969+
49970 if (cprm.limit < binfmt->min_coredump)
49971 goto fail_unlock;
49972
49973@@ -640,7 +646,7 @@ close_fail:
49974 filp_close(cprm.file, NULL);
49975 fail_dropcount:
49976 if (ispipe)
49977- atomic_dec(&core_dump_count);
49978+ atomic_dec_unchecked(&core_dump_count);
49979 fail_unlock:
49980 kfree(cn.corename);
49981 fail_corename:
49982@@ -659,7 +665,7 @@ fail:
49983 */
49984 int dump_write(struct file *file, const void *addr, int nr)
49985 {
49986- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
49987+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
49988 }
49989 EXPORT_SYMBOL(dump_write);
49990
49991diff --git a/fs/dcache.c b/fs/dcache.c
49992index 19153a0..5b71101 100644
49993--- a/fs/dcache.c
49994+++ b/fs/dcache.c
49995@@ -2552,7 +2552,6 @@ static int prepend_path(const struct path *path,
49996 bool slash = false;
49997 int error = 0;
49998
49999- br_read_lock(&vfsmount_lock);
50000 while (dentry != root->dentry || vfsmnt != root->mnt) {
50001 struct dentry * parent;
50002
50003@@ -2582,8 +2581,6 @@ static int prepend_path(const struct path *path,
50004 if (!error && !slash)
50005 error = prepend(buffer, buflen, "/", 1);
50006
50007-out:
50008- br_read_unlock(&vfsmount_lock);
50009 return error;
50010
50011 global_root:
50012@@ -2600,7 +2597,7 @@ global_root:
50013 error = prepend(buffer, buflen, "/", 1);
50014 if (!error)
50015 error = is_mounted(vfsmnt) ? 1 : 2;
50016- goto out;
50017+ return error;
50018 }
50019
50020 /**
50021@@ -2627,9 +2624,11 @@ char *__d_path(const struct path *path,
50022 int error;
50023
50024 prepend(&res, &buflen, "\0", 1);
50025+ br_read_lock(&vfsmount_lock);
50026 write_seqlock(&rename_lock);
50027 error = prepend_path(path, root, &res, &buflen);
50028 write_sequnlock(&rename_lock);
50029+ br_read_unlock(&vfsmount_lock);
50030
50031 if (error < 0)
50032 return ERR_PTR(error);
50033@@ -2646,9 +2645,11 @@ char *d_absolute_path(const struct path *path,
50034 int error;
50035
50036 prepend(&res, &buflen, "\0", 1);
50037+ br_read_lock(&vfsmount_lock);
50038 write_seqlock(&rename_lock);
50039 error = prepend_path(path, &root, &res, &buflen);
50040 write_sequnlock(&rename_lock);
50041+ br_read_unlock(&vfsmount_lock);
50042
50043 if (error > 1)
50044 error = -EINVAL;
50045@@ -2712,11 +2713,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
50046 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
50047
50048 get_fs_root(current->fs, &root);
50049+ br_read_lock(&vfsmount_lock);
50050 write_seqlock(&rename_lock);
50051 error = path_with_deleted(path, &root, &res, &buflen);
50052+ write_sequnlock(&rename_lock);
50053+ br_read_unlock(&vfsmount_lock);
50054 if (error < 0)
50055 res = ERR_PTR(error);
50056- write_sequnlock(&rename_lock);
50057 path_put(&root);
50058 return res;
50059 }
50060@@ -2871,6 +2874,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
50061 get_fs_root_and_pwd(current->fs, &root, &pwd);
50062
50063 error = -ENOENT;
50064+ br_read_lock(&vfsmount_lock);
50065 write_seqlock(&rename_lock);
50066 if (!d_unlinked(pwd.dentry)) {
50067 unsigned long len;
50068@@ -2880,6 +2884,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
50069 prepend(&cwd, &buflen, "\0", 1);
50070 error = prepend_path(&pwd, &root, &cwd, &buflen);
50071 write_sequnlock(&rename_lock);
50072+ br_read_unlock(&vfsmount_lock);
50073
50074 if (error < 0)
50075 goto out;
50076@@ -2900,6 +2905,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
50077 }
50078 } else {
50079 write_sequnlock(&rename_lock);
50080+ br_read_unlock(&vfsmount_lock);
50081 }
50082
50083 out:
50084@@ -3133,7 +3139,7 @@ void __init vfs_caches_init(unsigned long mempages)
50085 mempages -= reserve;
50086
50087 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
50088- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
50089+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
50090
50091 dcache_init();
50092 inode_init();
50093diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
50094index a5f12b7..4ee8a6f 100644
50095--- a/fs/debugfs/inode.c
50096+++ b/fs/debugfs/inode.c
50097@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
50098 */
50099 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
50100 {
50101+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50102+ return __create_file(name, S_IFDIR | S_IRWXU,
50103+#else
50104 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
50105+#endif
50106 parent, NULL, NULL);
50107 }
50108 EXPORT_SYMBOL_GPL(debugfs_create_dir);
50109diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
50110index cc7709e..7e7211f 100644
50111--- a/fs/ecryptfs/inode.c
50112+++ b/fs/ecryptfs/inode.c
50113@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
50114 old_fs = get_fs();
50115 set_fs(get_ds());
50116 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
50117- (char __user *)lower_buf,
50118+ (char __force_user *)lower_buf,
50119 PATH_MAX);
50120 set_fs(old_fs);
50121 if (rc < 0)
50122@@ -706,7 +706,7 @@ out:
50123 static void
50124 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
50125 {
50126- char *buf = nd_get_link(nd);
50127+ const char *buf = nd_get_link(nd);
50128 if (!IS_ERR(buf)) {
50129 /* Free the char* */
50130 kfree(buf);
50131diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
50132index 412e6ed..4292d22 100644
50133--- a/fs/ecryptfs/miscdev.c
50134+++ b/fs/ecryptfs/miscdev.c
50135@@ -315,7 +315,7 @@ check_list:
50136 goto out_unlock_msg_ctx;
50137 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
50138 if (msg_ctx->msg) {
50139- if (copy_to_user(&buf[i], packet_length, packet_length_size))
50140+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
50141 goto out_unlock_msg_ctx;
50142 i += packet_length_size;
50143 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
50144diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
50145index b2a34a1..162fa69 100644
50146--- a/fs/ecryptfs/read_write.c
50147+++ b/fs/ecryptfs/read_write.c
50148@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
50149 return -EIO;
50150 fs_save = get_fs();
50151 set_fs(get_ds());
50152- rc = vfs_write(lower_file, data, size, &offset);
50153+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
50154 set_fs(fs_save);
50155 mark_inode_dirty_sync(ecryptfs_inode);
50156 return rc;
50157@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
50158 return -EIO;
50159 fs_save = get_fs();
50160 set_fs(get_ds());
50161- rc = vfs_read(lower_file, data, size, &offset);
50162+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
50163 set_fs(fs_save);
50164 return rc;
50165 }
50166diff --git a/fs/exec.c b/fs/exec.c
50167index 20df02c..09b65a1 100644
50168--- a/fs/exec.c
50169+++ b/fs/exec.c
50170@@ -55,6 +55,17 @@
50171 #include <linux/pipe_fs_i.h>
50172 #include <linux/oom.h>
50173 #include <linux/compat.h>
50174+#include <linux/random.h>
50175+#include <linux/seq_file.h>
50176+#include <linux/coredump.h>
50177+#include <linux/mman.h>
50178+
50179+#ifdef CONFIG_PAX_REFCOUNT
50180+#include <linux/kallsyms.h>
50181+#include <linux/kdebug.h>
50182+#endif
50183+
50184+#include <trace/events/fs.h>
50185
50186 #include <asm/uaccess.h>
50187 #include <asm/mmu_context.h>
50188@@ -66,6 +77,18 @@
50189
50190 #include <trace/events/sched.h>
50191
50192+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50193+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
50194+{
50195+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
50196+}
50197+#endif
50198+
50199+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
50200+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
50201+EXPORT_SYMBOL(pax_set_initial_flags_func);
50202+#endif
50203+
50204 int suid_dumpable = 0;
50205
50206 static LIST_HEAD(formats);
50207@@ -75,8 +98,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert)
50208 {
50209 BUG_ON(!fmt);
50210 write_lock(&binfmt_lock);
50211- insert ? list_add(&fmt->lh, &formats) :
50212- list_add_tail(&fmt->lh, &formats);
50213+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
50214+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
50215 write_unlock(&binfmt_lock);
50216 }
50217
50218@@ -85,7 +108,7 @@ EXPORT_SYMBOL(__register_binfmt);
50219 void unregister_binfmt(struct linux_binfmt * fmt)
50220 {
50221 write_lock(&binfmt_lock);
50222- list_del(&fmt->lh);
50223+ pax_list_del((struct list_head *)&fmt->lh);
50224 write_unlock(&binfmt_lock);
50225 }
50226
50227@@ -180,18 +203,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50228 int write)
50229 {
50230 struct page *page;
50231- int ret;
50232
50233-#ifdef CONFIG_STACK_GROWSUP
50234- if (write) {
50235- ret = expand_downwards(bprm->vma, pos);
50236- if (ret < 0)
50237- return NULL;
50238- }
50239-#endif
50240- ret = get_user_pages(current, bprm->mm, pos,
50241- 1, write, 1, &page, NULL);
50242- if (ret <= 0)
50243+ if (0 > expand_downwards(bprm->vma, pos))
50244+ return NULL;
50245+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
50246 return NULL;
50247
50248 if (write) {
50249@@ -207,6 +222,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50250 if (size <= ARG_MAX)
50251 return page;
50252
50253+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50254+ // only allow 512KB for argv+env on suid/sgid binaries
50255+ // to prevent easy ASLR exhaustion
50256+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
50257+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
50258+ (size > (512 * 1024))) {
50259+ put_page(page);
50260+ return NULL;
50261+ }
50262+#endif
50263+
50264 /*
50265 * Limit to 1/4-th the stack size for the argv+env strings.
50266 * This ensures that:
50267@@ -266,6 +292,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50268 vma->vm_end = STACK_TOP_MAX;
50269 vma->vm_start = vma->vm_end - PAGE_SIZE;
50270 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
50271+
50272+#ifdef CONFIG_PAX_SEGMEXEC
50273+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
50274+#endif
50275+
50276 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
50277 INIT_LIST_HEAD(&vma->anon_vma_chain);
50278
50279@@ -276,6 +307,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50280 mm->stack_vm = mm->total_vm = 1;
50281 up_write(&mm->mmap_sem);
50282 bprm->p = vma->vm_end - sizeof(void *);
50283+
50284+#ifdef CONFIG_PAX_RANDUSTACK
50285+ if (randomize_va_space)
50286+ bprm->p ^= random32() & ~PAGE_MASK;
50287+#endif
50288+
50289 return 0;
50290 err:
50291 up_write(&mm->mmap_sem);
50292@@ -396,7 +433,7 @@ struct user_arg_ptr {
50293 } ptr;
50294 };
50295
50296-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50297+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50298 {
50299 const char __user *native;
50300
50301@@ -405,14 +442,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50302 compat_uptr_t compat;
50303
50304 if (get_user(compat, argv.ptr.compat + nr))
50305- return ERR_PTR(-EFAULT);
50306+ return (const char __force_user *)ERR_PTR(-EFAULT);
50307
50308 return compat_ptr(compat);
50309 }
50310 #endif
50311
50312 if (get_user(native, argv.ptr.native + nr))
50313- return ERR_PTR(-EFAULT);
50314+ return (const char __force_user *)ERR_PTR(-EFAULT);
50315
50316 return native;
50317 }
50318@@ -431,7 +468,7 @@ static int count(struct user_arg_ptr argv, int max)
50319 if (!p)
50320 break;
50321
50322- if (IS_ERR(p))
50323+ if (IS_ERR((const char __force_kernel *)p))
50324 return -EFAULT;
50325
50326 if (i >= max)
50327@@ -466,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
50328
50329 ret = -EFAULT;
50330 str = get_user_arg_ptr(argv, argc);
50331- if (IS_ERR(str))
50332+ if (IS_ERR((const char __force_kernel *)str))
50333 goto out;
50334
50335 len = strnlen_user(str, MAX_ARG_STRLEN);
50336@@ -548,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
50337 int r;
50338 mm_segment_t oldfs = get_fs();
50339 struct user_arg_ptr argv = {
50340- .ptr.native = (const char __user *const __user *)__argv,
50341+ .ptr.native = (const char __force_user *const __force_user *)__argv,
50342 };
50343
50344 set_fs(KERNEL_DS);
50345@@ -583,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50346 unsigned long new_end = old_end - shift;
50347 struct mmu_gather tlb;
50348
50349- BUG_ON(new_start > new_end);
50350+ if (new_start >= new_end || new_start < mmap_min_addr)
50351+ return -ENOMEM;
50352
50353 /*
50354 * ensure there are no vmas between where we want to go
50355@@ -592,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50356 if (vma != find_vma(mm, new_start))
50357 return -EFAULT;
50358
50359+#ifdef CONFIG_PAX_SEGMEXEC
50360+ BUG_ON(pax_find_mirror_vma(vma));
50361+#endif
50362+
50363 /*
50364 * cover the whole range: [new_start, old_end)
50365 */
50366@@ -672,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50367 stack_top = arch_align_stack(stack_top);
50368 stack_top = PAGE_ALIGN(stack_top);
50369
50370- if (unlikely(stack_top < mmap_min_addr) ||
50371- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
50372- return -ENOMEM;
50373-
50374 stack_shift = vma->vm_end - stack_top;
50375
50376 bprm->p -= stack_shift;
50377@@ -687,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
50378 bprm->exec -= stack_shift;
50379
50380 down_write(&mm->mmap_sem);
50381+
50382+ /* Move stack pages down in memory. */
50383+ if (stack_shift) {
50384+ ret = shift_arg_pages(vma, stack_shift);
50385+ if (ret)
50386+ goto out_unlock;
50387+ }
50388+
50389 vm_flags = VM_STACK_FLAGS;
50390
50391+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50392+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
50393+ vm_flags &= ~VM_EXEC;
50394+
50395+#ifdef CONFIG_PAX_MPROTECT
50396+ if (mm->pax_flags & MF_PAX_MPROTECT)
50397+ vm_flags &= ~VM_MAYEXEC;
50398+#endif
50399+
50400+ }
50401+#endif
50402+
50403 /*
50404 * Adjust stack execute permissions; explicitly enable for
50405 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
50406@@ -707,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50407 goto out_unlock;
50408 BUG_ON(prev != vma);
50409
50410- /* Move stack pages down in memory. */
50411- if (stack_shift) {
50412- ret = shift_arg_pages(vma, stack_shift);
50413- if (ret)
50414- goto out_unlock;
50415- }
50416-
50417 /* mprotect_fixup is overkill to remove the temporary stack flags */
50418 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
50419
50420@@ -737,6 +788,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
50421 #endif
50422 current->mm->start_stack = bprm->p;
50423 ret = expand_stack(vma, stack_base);
50424+
50425+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
50426+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
50427+ unsigned long size, flags, vm_flags;
50428+
50429+ size = STACK_TOP - vma->vm_end;
50430+ flags = MAP_FIXED | MAP_PRIVATE;
50431+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
50432+
50433+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
50434+
50435+#ifdef CONFIG_X86
50436+ if (!ret) {
50437+ size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
50438+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), flags, vm_flags, 0);
50439+ }
50440+#endif
50441+
50442+ }
50443+#endif
50444+
50445 if (ret)
50446 ret = -EFAULT;
50447
50448@@ -772,6 +844,8 @@ struct file *open_exec(const char *name)
50449
50450 fsnotify_open(file);
50451
50452+ trace_open_exec(name);
50453+
50454 err = deny_write_access(file);
50455 if (err)
50456 goto exit;
50457@@ -795,7 +869,7 @@ int kernel_read(struct file *file, loff_t offset,
50458 old_fs = get_fs();
50459 set_fs(get_ds());
50460 /* The cast to a user pointer is valid due to the set_fs() */
50461- result = vfs_read(file, (void __user *)addr, count, &pos);
50462+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
50463 set_fs(old_fs);
50464 return result;
50465 }
50466@@ -1247,7 +1321,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
50467 }
50468 rcu_read_unlock();
50469
50470- if (p->fs->users > n_fs) {
50471+ if (atomic_read(&p->fs->users) > n_fs) {
50472 bprm->unsafe |= LSM_UNSAFE_SHARE;
50473 } else {
50474 res = -EAGAIN;
50475@@ -1447,6 +1521,31 @@ int search_binary_handler(struct linux_binprm *bprm)
50476
50477 EXPORT_SYMBOL(search_binary_handler);
50478
50479+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50480+static DEFINE_PER_CPU(u64, exec_counter);
50481+static int __init init_exec_counters(void)
50482+{
50483+ unsigned int cpu;
50484+
50485+ for_each_possible_cpu(cpu) {
50486+ per_cpu(exec_counter, cpu) = (u64)cpu;
50487+ }
50488+
50489+ return 0;
50490+}
50491+early_initcall(init_exec_counters);
50492+static inline void increment_exec_counter(void)
50493+{
50494+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
50495+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
50496+}
50497+#else
50498+static inline void increment_exec_counter(void) {}
50499+#endif
50500+
50501+extern void gr_handle_exec_args(struct linux_binprm *bprm,
50502+ struct user_arg_ptr argv);
50503+
50504 /*
50505 * sys_execve() executes a new program.
50506 */
50507@@ -1454,6 +1553,11 @@ static int do_execve_common(const char *filename,
50508 struct user_arg_ptr argv,
50509 struct user_arg_ptr envp)
50510 {
50511+#ifdef CONFIG_GRKERNSEC
50512+ struct file *old_exec_file;
50513+ struct acl_subject_label *old_acl;
50514+ struct rlimit old_rlim[RLIM_NLIMITS];
50515+#endif
50516 struct linux_binprm *bprm;
50517 struct file *file;
50518 struct files_struct *displaced;
50519@@ -1461,6 +1565,8 @@ static int do_execve_common(const char *filename,
50520 int retval;
50521 const struct cred *cred = current_cred();
50522
50523+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
50524+
50525 /*
50526 * We move the actual failure in case of RLIMIT_NPROC excess from
50527 * set*uid() to execve() because too many poorly written programs
50528@@ -1501,12 +1607,27 @@ static int do_execve_common(const char *filename,
50529 if (IS_ERR(file))
50530 goto out_unmark;
50531
50532+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
50533+ retval = -EPERM;
50534+ goto out_file;
50535+ }
50536+
50537 sched_exec();
50538
50539 bprm->file = file;
50540 bprm->filename = filename;
50541 bprm->interp = filename;
50542
50543+ if (gr_process_user_ban()) {
50544+ retval = -EPERM;
50545+ goto out_file;
50546+ }
50547+
50548+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
50549+ retval = -EACCES;
50550+ goto out_file;
50551+ }
50552+
50553 retval = bprm_mm_init(bprm);
50554 if (retval)
50555 goto out_file;
50556@@ -1523,24 +1644,65 @@ static int do_execve_common(const char *filename,
50557 if (retval < 0)
50558 goto out;
50559
50560+#ifdef CONFIG_GRKERNSEC
50561+ old_acl = current->acl;
50562+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
50563+ old_exec_file = current->exec_file;
50564+ get_file(file);
50565+ current->exec_file = file;
50566+#endif
50567+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50568+ /* limit suid stack to 8MB
50569+ * we saved the old limits above and will restore them if this exec fails
50570+ */
50571+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
50572+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
50573+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
50574+#endif
50575+
50576+ if (!gr_tpe_allow(file)) {
50577+ retval = -EACCES;
50578+ goto out_fail;
50579+ }
50580+
50581+ if (gr_check_crash_exec(file)) {
50582+ retval = -EACCES;
50583+ goto out_fail;
50584+ }
50585+
50586+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
50587+ bprm->unsafe);
50588+ if (retval < 0)
50589+ goto out_fail;
50590+
50591 retval = copy_strings_kernel(1, &bprm->filename, bprm);
50592 if (retval < 0)
50593- goto out;
50594+ goto out_fail;
50595
50596 bprm->exec = bprm->p;
50597 retval = copy_strings(bprm->envc, envp, bprm);
50598 if (retval < 0)
50599- goto out;
50600+ goto out_fail;
50601
50602 retval = copy_strings(bprm->argc, argv, bprm);
50603 if (retval < 0)
50604- goto out;
50605+ goto out_fail;
50606+
50607+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
50608+
50609+ gr_handle_exec_args(bprm, argv);
50610
50611 retval = search_binary_handler(bprm);
50612 if (retval < 0)
50613- goto out;
50614+ goto out_fail;
50615+#ifdef CONFIG_GRKERNSEC
50616+ if (old_exec_file)
50617+ fput(old_exec_file);
50618+#endif
50619
50620 /* execve succeeded */
50621+
50622+ increment_exec_counter();
50623 current->fs->in_exec = 0;
50624 current->in_execve = 0;
50625 acct_update_integrals(current);
50626@@ -1549,6 +1711,14 @@ static int do_execve_common(const char *filename,
50627 put_files_struct(displaced);
50628 return retval;
50629
50630+out_fail:
50631+#ifdef CONFIG_GRKERNSEC
50632+ current->acl = old_acl;
50633+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
50634+ fput(current->exec_file);
50635+ current->exec_file = old_exec_file;
50636+#endif
50637+
50638 out:
50639 if (bprm->mm) {
50640 acct_arg_size(bprm, 0);
50641@@ -1697,3 +1867,253 @@ asmlinkage long compat_sys_execve(const char __user * filename,
50642 return error;
50643 }
50644 #endif
50645+
50646+int pax_check_flags(unsigned long *flags)
50647+{
50648+ int retval = 0;
50649+
50650+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
50651+ if (*flags & MF_PAX_SEGMEXEC)
50652+ {
50653+ *flags &= ~MF_PAX_SEGMEXEC;
50654+ retval = -EINVAL;
50655+ }
50656+#endif
50657+
50658+ if ((*flags & MF_PAX_PAGEEXEC)
50659+
50660+#ifdef CONFIG_PAX_PAGEEXEC
50661+ && (*flags & MF_PAX_SEGMEXEC)
50662+#endif
50663+
50664+ )
50665+ {
50666+ *flags &= ~MF_PAX_PAGEEXEC;
50667+ retval = -EINVAL;
50668+ }
50669+
50670+ if ((*flags & MF_PAX_MPROTECT)
50671+
50672+#ifdef CONFIG_PAX_MPROTECT
50673+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
50674+#endif
50675+
50676+ )
50677+ {
50678+ *flags &= ~MF_PAX_MPROTECT;
50679+ retval = -EINVAL;
50680+ }
50681+
50682+ if ((*flags & MF_PAX_EMUTRAMP)
50683+
50684+#ifdef CONFIG_PAX_EMUTRAMP
50685+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
50686+#endif
50687+
50688+ )
50689+ {
50690+ *flags &= ~MF_PAX_EMUTRAMP;
50691+ retval = -EINVAL;
50692+ }
50693+
50694+ return retval;
50695+}
50696+
50697+EXPORT_SYMBOL(pax_check_flags);
50698+
50699+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50700+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
50701+{
50702+ struct task_struct *tsk = current;
50703+ struct mm_struct *mm = current->mm;
50704+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
50705+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
50706+ char *path_exec = NULL;
50707+ char *path_fault = NULL;
50708+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
50709+ siginfo_t info = { };
50710+
50711+ if (buffer_exec && buffer_fault) {
50712+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
50713+
50714+ down_read(&mm->mmap_sem);
50715+ vma = mm->mmap;
50716+ while (vma && (!vma_exec || !vma_fault)) {
50717+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
50718+ vma_exec = vma;
50719+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
50720+ vma_fault = vma;
50721+ vma = vma->vm_next;
50722+ }
50723+ if (vma_exec) {
50724+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
50725+ if (IS_ERR(path_exec))
50726+ path_exec = "<path too long>";
50727+ else {
50728+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
50729+ if (path_exec) {
50730+ *path_exec = 0;
50731+ path_exec = buffer_exec;
50732+ } else
50733+ path_exec = "<path too long>";
50734+ }
50735+ }
50736+ if (vma_fault) {
50737+ start = vma_fault->vm_start;
50738+ end = vma_fault->vm_end;
50739+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
50740+ if (vma_fault->vm_file) {
50741+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
50742+ if (IS_ERR(path_fault))
50743+ path_fault = "<path too long>";
50744+ else {
50745+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
50746+ if (path_fault) {
50747+ *path_fault = 0;
50748+ path_fault = buffer_fault;
50749+ } else
50750+ path_fault = "<path too long>";
50751+ }
50752+ } else
50753+ path_fault = "<anonymous mapping>";
50754+ }
50755+ up_read(&mm->mmap_sem);
50756+ }
50757+ if (tsk->signal->curr_ip)
50758+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
50759+ else
50760+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
50761+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
50762+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
50763+ free_page((unsigned long)buffer_exec);
50764+ free_page((unsigned long)buffer_fault);
50765+ pax_report_insns(regs, pc, sp);
50766+ info.si_signo = SIGKILL;
50767+ info.si_errno = 0;
50768+ info.si_code = SI_KERNEL;
50769+ info.si_pid = 0;
50770+ info.si_uid = 0;
50771+ do_coredump(&info);
50772+}
50773+#endif
50774+
50775+#ifdef CONFIG_PAX_REFCOUNT
50776+void pax_report_refcount_overflow(struct pt_regs *regs)
50777+{
50778+ if (current->signal->curr_ip)
50779+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
50780+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
50781+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
50782+ else
50783+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
50784+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
50785+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
50786+ show_regs(regs);
50787+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
50788+}
50789+#endif
50790+
50791+#ifdef CONFIG_PAX_USERCOPY
50792+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
50793+static noinline int check_stack_object(const void *obj, unsigned long len)
50794+{
50795+ const void * const stack = task_stack_page(current);
50796+ const void * const stackend = stack + THREAD_SIZE;
50797+
50798+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
50799+ const void *frame = NULL;
50800+ const void *oldframe;
50801+#endif
50802+
50803+ if (obj + len < obj)
50804+ return -1;
50805+
50806+ if (obj + len <= stack || stackend <= obj)
50807+ return 0;
50808+
50809+ if (obj < stack || stackend < obj + len)
50810+ return -1;
50811+
50812+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
50813+ oldframe = __builtin_frame_address(1);
50814+ if (oldframe)
50815+ frame = __builtin_frame_address(2);
50816+ /*
50817+ low ----------------------------------------------> high
50818+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
50819+ ^----------------^
50820+ allow copies only within here
50821+ */
50822+ while (stack <= frame && frame < stackend) {
50823+ /* if obj + len extends past the last frame, this
50824+ check won't pass and the next frame will be 0,
50825+ causing us to bail out and correctly report
50826+ the copy as invalid
50827+ */
50828+ if (obj + len <= frame)
50829+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
50830+ oldframe = frame;
50831+ frame = *(const void * const *)frame;
50832+ }
50833+ return -1;
50834+#else
50835+ return 1;
50836+#endif
50837+}
50838+
50839+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
50840+{
50841+ if (current->signal->curr_ip)
50842+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
50843+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
50844+ else
50845+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
50846+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
50847+ dump_stack();
50848+ gr_handle_kernel_exploit();
50849+ do_group_exit(SIGKILL);
50850+}
50851+#endif
50852+
50853+void __check_object_size(const void *ptr, unsigned long n, bool to)
50854+{
50855+
50856+#ifdef CONFIG_PAX_USERCOPY
50857+ const char *type;
50858+
50859+ if (!n)
50860+ return;
50861+
50862+ type = check_heap_object(ptr, n);
50863+ if (!type) {
50864+ if (check_stack_object(ptr, n) != -1)
50865+ return;
50866+ type = "<process stack>";
50867+ }
50868+
50869+ pax_report_usercopy(ptr, n, to, type);
50870+#endif
50871+
50872+}
50873+EXPORT_SYMBOL(__check_object_size);
50874+
50875+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
50876+void pax_track_stack(void)
50877+{
50878+ unsigned long sp = (unsigned long)&sp;
50879+ if (sp < current_thread_info()->lowest_stack &&
50880+ sp > (unsigned long)task_stack_page(current))
50881+ current_thread_info()->lowest_stack = sp;
50882+}
50883+EXPORT_SYMBOL(pax_track_stack);
50884+#endif
50885+
50886+#ifdef CONFIG_PAX_SIZE_OVERFLOW
50887+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
50888+{
50889+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
50890+ dump_stack();
50891+ do_group_exit(SIGKILL);
50892+}
50893+EXPORT_SYMBOL(report_size_overflow);
50894+#endif
50895diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
50896index 2616d0e..2ffdec9 100644
50897--- a/fs/ext2/balloc.c
50898+++ b/fs/ext2/balloc.c
50899@@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
50900
50901 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
50902 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
50903- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
50904+ if (free_blocks < root_blocks + 1 &&
50905 !uid_eq(sbi->s_resuid, current_fsuid()) &&
50906 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
50907- !in_group_p (sbi->s_resgid))) {
50908+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
50909 return 0;
50910 }
50911 return 1;
50912diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
50913index 22548f5..41521d8 100644
50914--- a/fs/ext3/balloc.c
50915+++ b/fs/ext3/balloc.c
50916@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
50917
50918 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
50919 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
50920- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
50921+ if (free_blocks < root_blocks + 1 &&
50922 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
50923 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
50924- !in_group_p (sbi->s_resgid))) {
50925+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
50926 return 0;
50927 }
50928 return 1;
50929diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
50930index 92e68b3..115d987 100644
50931--- a/fs/ext4/balloc.c
50932+++ b/fs/ext4/balloc.c
50933@@ -505,8 +505,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
50934 /* Hm, nope. Are (enough) root reserved clusters available? */
50935 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
50936 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
50937- capable(CAP_SYS_RESOURCE) ||
50938- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
50939+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
50940+ capable_nolog(CAP_SYS_RESOURCE)) {
50941
50942 if (free_clusters >= (nclusters + dirty_clusters))
50943 return 1;
50944diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
50945index 8462eb3..4a71af6 100644
50946--- a/fs/ext4/ext4.h
50947+++ b/fs/ext4/ext4.h
50948@@ -1265,19 +1265,19 @@ struct ext4_sb_info {
50949 unsigned long s_mb_last_start;
50950
50951 /* stats for buddy allocator */
50952- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
50953- atomic_t s_bal_success; /* we found long enough chunks */
50954- atomic_t s_bal_allocated; /* in blocks */
50955- atomic_t s_bal_ex_scanned; /* total extents scanned */
50956- atomic_t s_bal_goals; /* goal hits */
50957- atomic_t s_bal_breaks; /* too long searches */
50958- atomic_t s_bal_2orders; /* 2^order hits */
50959+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
50960+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
50961+ atomic_unchecked_t s_bal_allocated; /* in blocks */
50962+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
50963+ atomic_unchecked_t s_bal_goals; /* goal hits */
50964+ atomic_unchecked_t s_bal_breaks; /* too long searches */
50965+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
50966 spinlock_t s_bal_lock;
50967 unsigned long s_mb_buddies_generated;
50968 unsigned long long s_mb_generation_time;
50969- atomic_t s_mb_lost_chunks;
50970- atomic_t s_mb_preallocated;
50971- atomic_t s_mb_discarded;
50972+ atomic_unchecked_t s_mb_lost_chunks;
50973+ atomic_unchecked_t s_mb_preallocated;
50974+ atomic_unchecked_t s_mb_discarded;
50975 atomic_t s_lock_busy;
50976
50977 /* locality groups */
50978diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
50979index 28bbf9b..75ca7c1 100644
50980--- a/fs/ext4/mballoc.c
50981+++ b/fs/ext4/mballoc.c
50982@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
50983 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
50984
50985 if (EXT4_SB(sb)->s_mb_stats)
50986- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
50987+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
50988
50989 break;
50990 }
50991@@ -2044,7 +2044,7 @@ repeat:
50992 ac->ac_status = AC_STATUS_CONTINUE;
50993 ac->ac_flags |= EXT4_MB_HINT_FIRST;
50994 cr = 3;
50995- atomic_inc(&sbi->s_mb_lost_chunks);
50996+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
50997 goto repeat;
50998 }
50999 }
51000@@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
51001 if (sbi->s_mb_stats) {
51002 ext4_msg(sb, KERN_INFO,
51003 "mballoc: %u blocks %u reqs (%u success)",
51004- atomic_read(&sbi->s_bal_allocated),
51005- atomic_read(&sbi->s_bal_reqs),
51006- atomic_read(&sbi->s_bal_success));
51007+ atomic_read_unchecked(&sbi->s_bal_allocated),
51008+ atomic_read_unchecked(&sbi->s_bal_reqs),
51009+ atomic_read_unchecked(&sbi->s_bal_success));
51010 ext4_msg(sb, KERN_INFO,
51011 "mballoc: %u extents scanned, %u goal hits, "
51012 "%u 2^N hits, %u breaks, %u lost",
51013- atomic_read(&sbi->s_bal_ex_scanned),
51014- atomic_read(&sbi->s_bal_goals),
51015- atomic_read(&sbi->s_bal_2orders),
51016- atomic_read(&sbi->s_bal_breaks),
51017- atomic_read(&sbi->s_mb_lost_chunks));
51018+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
51019+ atomic_read_unchecked(&sbi->s_bal_goals),
51020+ atomic_read_unchecked(&sbi->s_bal_2orders),
51021+ atomic_read_unchecked(&sbi->s_bal_breaks),
51022+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
51023 ext4_msg(sb, KERN_INFO,
51024 "mballoc: %lu generated and it took %Lu",
51025 sbi->s_mb_buddies_generated,
51026 sbi->s_mb_generation_time);
51027 ext4_msg(sb, KERN_INFO,
51028 "mballoc: %u preallocated, %u discarded",
51029- atomic_read(&sbi->s_mb_preallocated),
51030- atomic_read(&sbi->s_mb_discarded));
51031+ atomic_read_unchecked(&sbi->s_mb_preallocated),
51032+ atomic_read_unchecked(&sbi->s_mb_discarded));
51033 }
51034
51035 free_percpu(sbi->s_locality_groups);
51036@@ -3060,16 +3060,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
51037 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
51038
51039 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
51040- atomic_inc(&sbi->s_bal_reqs);
51041- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51042+ atomic_inc_unchecked(&sbi->s_bal_reqs);
51043+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51044 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
51045- atomic_inc(&sbi->s_bal_success);
51046- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
51047+ atomic_inc_unchecked(&sbi->s_bal_success);
51048+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
51049 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
51050 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
51051- atomic_inc(&sbi->s_bal_goals);
51052+ atomic_inc_unchecked(&sbi->s_bal_goals);
51053 if (ac->ac_found > sbi->s_mb_max_to_scan)
51054- atomic_inc(&sbi->s_bal_breaks);
51055+ atomic_inc_unchecked(&sbi->s_bal_breaks);
51056 }
51057
51058 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
51059@@ -3469,7 +3469,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
51060 trace_ext4_mb_new_inode_pa(ac, pa);
51061
51062 ext4_mb_use_inode_pa(ac, pa);
51063- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
51064+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
51065
51066 ei = EXT4_I(ac->ac_inode);
51067 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51068@@ -3529,7 +3529,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
51069 trace_ext4_mb_new_group_pa(ac, pa);
51070
51071 ext4_mb_use_group_pa(ac, pa);
51072- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51073+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51074
51075 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51076 lg = ac->ac_lg;
51077@@ -3618,7 +3618,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
51078 * from the bitmap and continue.
51079 */
51080 }
51081- atomic_add(free, &sbi->s_mb_discarded);
51082+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
51083
51084 return err;
51085 }
51086@@ -3636,7 +3636,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
51087 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
51088 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
51089 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
51090- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51091+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51092 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
51093
51094 return 0;
51095diff --git a/fs/ext4/super.c b/fs/ext4/super.c
51096index 5fa223d..12fa738 100644
51097--- a/fs/ext4/super.c
51098+++ b/fs/ext4/super.c
51099@@ -2429,7 +2429,7 @@ struct ext4_attr {
51100 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
51101 const char *, size_t);
51102 int offset;
51103-};
51104+} __do_const;
51105
51106 static int parse_strtoul(const char *buf,
51107 unsigned long max, unsigned long *value)
51108diff --git a/fs/fcntl.c b/fs/fcntl.c
51109index 71a600a..20d87b1 100644
51110--- a/fs/fcntl.c
51111+++ b/fs/fcntl.c
51112@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
51113 if (err)
51114 return err;
51115
51116+ if (gr_handle_chroot_fowner(pid, type))
51117+ return -ENOENT;
51118+ if (gr_check_protected_task_fowner(pid, type))
51119+ return -EACCES;
51120+
51121 f_modown(filp, pid, type, force);
51122 return 0;
51123 }
51124diff --git a/fs/fhandle.c b/fs/fhandle.c
51125index 999ff5c..41f4109 100644
51126--- a/fs/fhandle.c
51127+++ b/fs/fhandle.c
51128@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
51129 } else
51130 retval = 0;
51131 /* copy the mount id */
51132- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
51133- sizeof(*mnt_id)) ||
51134+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
51135 copy_to_user(ufh, handle,
51136 sizeof(struct file_handle) + handle_bytes))
51137 retval = -EFAULT;
51138diff --git a/fs/fifo.c b/fs/fifo.c
51139index cf6f434..3d7942c 100644
51140--- a/fs/fifo.c
51141+++ b/fs/fifo.c
51142@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
51143 */
51144 filp->f_op = &read_pipefifo_fops;
51145 pipe->r_counter++;
51146- if (pipe->readers++ == 0)
51147+ if (atomic_inc_return(&pipe->readers) == 1)
51148 wake_up_partner(inode);
51149
51150- if (!pipe->writers) {
51151+ if (!atomic_read(&pipe->writers)) {
51152 if ((filp->f_flags & O_NONBLOCK)) {
51153 /* suppress POLLHUP until we have
51154 * seen a writer */
51155@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
51156 * errno=ENXIO when there is no process reading the FIFO.
51157 */
51158 ret = -ENXIO;
51159- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
51160+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
51161 goto err;
51162
51163 filp->f_op = &write_pipefifo_fops;
51164 pipe->w_counter++;
51165- if (!pipe->writers++)
51166+ if (atomic_inc_return(&pipe->writers) == 1)
51167 wake_up_partner(inode);
51168
51169- if (!pipe->readers) {
51170+ if (!atomic_read(&pipe->readers)) {
51171 if (wait_for_partner(inode, &pipe->r_counter))
51172 goto err_wr;
51173 }
51174@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
51175 */
51176 filp->f_op = &rdwr_pipefifo_fops;
51177
51178- pipe->readers++;
51179- pipe->writers++;
51180+ atomic_inc(&pipe->readers);
51181+ atomic_inc(&pipe->writers);
51182 pipe->r_counter++;
51183 pipe->w_counter++;
51184- if (pipe->readers == 1 || pipe->writers == 1)
51185+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
51186 wake_up_partner(inode);
51187 break;
51188
51189@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
51190 return 0;
51191
51192 err_rd:
51193- if (!--pipe->readers)
51194+ if (atomic_dec_and_test(&pipe->readers))
51195 wake_up_interruptible(&pipe->wait);
51196 ret = -ERESTARTSYS;
51197 goto err;
51198
51199 err_wr:
51200- if (!--pipe->writers)
51201+ if (atomic_dec_and_test(&pipe->writers))
51202 wake_up_interruptible(&pipe->wait);
51203 ret = -ERESTARTSYS;
51204 goto err;
51205
51206 err:
51207- if (!pipe->readers && !pipe->writers)
51208+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
51209 free_pipe_info(inode);
51210
51211 err_nocleanup:
51212diff --git a/fs/file.c b/fs/file.c
51213index 2b3570b..c57924b 100644
51214--- a/fs/file.c
51215+++ b/fs/file.c
51216@@ -16,6 +16,7 @@
51217 #include <linux/slab.h>
51218 #include <linux/vmalloc.h>
51219 #include <linux/file.h>
51220+#include <linux/security.h>
51221 #include <linux/fdtable.h>
51222 #include <linux/bitops.h>
51223 #include <linux/interrupt.h>
51224@@ -892,6 +893,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
51225 if (!file)
51226 return __close_fd(files, fd);
51227
51228+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
51229 if (fd >= rlimit(RLIMIT_NOFILE))
51230 return -EBADF;
51231
51232@@ -918,6 +920,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
51233 if (unlikely(oldfd == newfd))
51234 return -EINVAL;
51235
51236+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
51237 if (newfd >= rlimit(RLIMIT_NOFILE))
51238 return -EBADF;
51239
51240@@ -973,6 +976,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
51241 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
51242 {
51243 int err;
51244+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
51245 if (from >= rlimit(RLIMIT_NOFILE))
51246 return -EINVAL;
51247 err = alloc_fd(from, flags);
51248diff --git a/fs/filesystems.c b/fs/filesystems.c
51249index da165f6..3671bdb 100644
51250--- a/fs/filesystems.c
51251+++ b/fs/filesystems.c
51252@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
51253 int len = dot ? dot - name : strlen(name);
51254
51255 fs = __get_fs_type(name, len);
51256+
51257+#ifdef CONFIG_GRKERNSEC_MODHARDEN
51258+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
51259+#else
51260 if (!fs && (request_module("%.*s", len, name) == 0))
51261+#endif
51262 fs = __get_fs_type(name, len);
51263
51264 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
51265diff --git a/fs/fs_struct.c b/fs/fs_struct.c
51266index fe6ca58..65318cf 100644
51267--- a/fs/fs_struct.c
51268+++ b/fs/fs_struct.c
51269@@ -4,6 +4,7 @@
51270 #include <linux/path.h>
51271 #include <linux/slab.h>
51272 #include <linux/fs_struct.h>
51273+#include <linux/grsecurity.h>
51274 #include "internal.h"
51275
51276 /*
51277@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
51278 write_seqcount_begin(&fs->seq);
51279 old_root = fs->root;
51280 fs->root = *path;
51281+ gr_set_chroot_entries(current, path);
51282 write_seqcount_end(&fs->seq);
51283 spin_unlock(&fs->lock);
51284 if (old_root.dentry)
51285@@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
51286 return 1;
51287 }
51288
51289+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
51290+{
51291+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
51292+ return 0;
51293+ *p = *new;
51294+
51295+ /* This function is only called from pivot_root(). Leave our
51296+ gr_chroot_dentry and is_chrooted flags as-is, so that a
51297+ pivoted root isn't treated as a chroot
51298+ */
51299+ //gr_set_chroot_entries(task, new);
51300+
51301+ return 1;
51302+}
51303+
51304 void chroot_fs_refs(struct path *old_root, struct path *new_root)
51305 {
51306 struct task_struct *g, *p;
51307@@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
51308 int hits = 0;
51309 spin_lock(&fs->lock);
51310 write_seqcount_begin(&fs->seq);
51311- hits += replace_path(&fs->root, old_root, new_root);
51312+ hits += replace_root_path(p, &fs->root, old_root, new_root);
51313 hits += replace_path(&fs->pwd, old_root, new_root);
51314 write_seqcount_end(&fs->seq);
51315 while (hits--) {
51316@@ -99,7 +116,8 @@ void exit_fs(struct task_struct *tsk)
51317 task_lock(tsk);
51318 spin_lock(&fs->lock);
51319 tsk->fs = NULL;
51320- kill = !--fs->users;
51321+ gr_clear_chroot_entries(tsk);
51322+ kill = !atomic_dec_return(&fs->users);
51323 spin_unlock(&fs->lock);
51324 task_unlock(tsk);
51325 if (kill)
51326@@ -112,7 +130,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51327 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
51328 /* We don't need to lock fs - think why ;-) */
51329 if (fs) {
51330- fs->users = 1;
51331+ atomic_set(&fs->users, 1);
51332 fs->in_exec = 0;
51333 spin_lock_init(&fs->lock);
51334 seqcount_init(&fs->seq);
51335@@ -121,6 +139,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51336 spin_lock(&old->lock);
51337 fs->root = old->root;
51338 path_get(&fs->root);
51339+ /* instead of calling gr_set_chroot_entries here,
51340+ we call it from every caller of this function
51341+ */
51342 fs->pwd = old->pwd;
51343 path_get(&fs->pwd);
51344 spin_unlock(&old->lock);
51345@@ -139,8 +160,9 @@ int unshare_fs_struct(void)
51346
51347 task_lock(current);
51348 spin_lock(&fs->lock);
51349- kill = !--fs->users;
51350+ kill = !atomic_dec_return(&fs->users);
51351 current->fs = new_fs;
51352+ gr_set_chroot_entries(current, &new_fs->root);
51353 spin_unlock(&fs->lock);
51354 task_unlock(current);
51355
51356@@ -153,13 +175,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
51357
51358 int current_umask(void)
51359 {
51360- return current->fs->umask;
51361+ return current->fs->umask | gr_acl_umask();
51362 }
51363 EXPORT_SYMBOL(current_umask);
51364
51365 /* to be mentioned only in INIT_TASK */
51366 struct fs_struct init_fs = {
51367- .users = 1,
51368+ .users = ATOMIC_INIT(1),
51369 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
51370 .seq = SEQCNT_ZERO,
51371 .umask = 0022,
51372diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
51373index 8dcb114..b1072e2 100644
51374--- a/fs/fscache/cookie.c
51375+++ b/fs/fscache/cookie.c
51376@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
51377 parent ? (char *) parent->def->name : "<no-parent>",
51378 def->name, netfs_data);
51379
51380- fscache_stat(&fscache_n_acquires);
51381+ fscache_stat_unchecked(&fscache_n_acquires);
51382
51383 /* if there's no parent cookie, then we don't create one here either */
51384 if (!parent) {
51385- fscache_stat(&fscache_n_acquires_null);
51386+ fscache_stat_unchecked(&fscache_n_acquires_null);
51387 _leave(" [no parent]");
51388 return NULL;
51389 }
51390@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
51391 /* allocate and initialise a cookie */
51392 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
51393 if (!cookie) {
51394- fscache_stat(&fscache_n_acquires_oom);
51395+ fscache_stat_unchecked(&fscache_n_acquires_oom);
51396 _leave(" [ENOMEM]");
51397 return NULL;
51398 }
51399@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51400
51401 switch (cookie->def->type) {
51402 case FSCACHE_COOKIE_TYPE_INDEX:
51403- fscache_stat(&fscache_n_cookie_index);
51404+ fscache_stat_unchecked(&fscache_n_cookie_index);
51405 break;
51406 case FSCACHE_COOKIE_TYPE_DATAFILE:
51407- fscache_stat(&fscache_n_cookie_data);
51408+ fscache_stat_unchecked(&fscache_n_cookie_data);
51409 break;
51410 default:
51411- fscache_stat(&fscache_n_cookie_special);
51412+ fscache_stat_unchecked(&fscache_n_cookie_special);
51413 break;
51414 }
51415
51416@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51417 if (fscache_acquire_non_index_cookie(cookie) < 0) {
51418 atomic_dec(&parent->n_children);
51419 __fscache_cookie_put(cookie);
51420- fscache_stat(&fscache_n_acquires_nobufs);
51421+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
51422 _leave(" = NULL");
51423 return NULL;
51424 }
51425 }
51426
51427- fscache_stat(&fscache_n_acquires_ok);
51428+ fscache_stat_unchecked(&fscache_n_acquires_ok);
51429 _leave(" = %p", cookie);
51430 return cookie;
51431 }
51432@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
51433 cache = fscache_select_cache_for_object(cookie->parent);
51434 if (!cache) {
51435 up_read(&fscache_addremove_sem);
51436- fscache_stat(&fscache_n_acquires_no_cache);
51437+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
51438 _leave(" = -ENOMEDIUM [no cache]");
51439 return -ENOMEDIUM;
51440 }
51441@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
51442 object = cache->ops->alloc_object(cache, cookie);
51443 fscache_stat_d(&fscache_n_cop_alloc_object);
51444 if (IS_ERR(object)) {
51445- fscache_stat(&fscache_n_object_no_alloc);
51446+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
51447 ret = PTR_ERR(object);
51448 goto error;
51449 }
51450
51451- fscache_stat(&fscache_n_object_alloc);
51452+ fscache_stat_unchecked(&fscache_n_object_alloc);
51453
51454 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
51455
51456@@ -378,7 +378,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
51457
51458 _enter("{%s}", cookie->def->name);
51459
51460- fscache_stat(&fscache_n_invalidates);
51461+ fscache_stat_unchecked(&fscache_n_invalidates);
51462
51463 /* Only permit invalidation of data files. Invalidating an index will
51464 * require the caller to release all its attachments to the tree rooted
51465@@ -437,10 +437,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
51466 struct fscache_object *object;
51467 struct hlist_node *_p;
51468
51469- fscache_stat(&fscache_n_updates);
51470+ fscache_stat_unchecked(&fscache_n_updates);
51471
51472 if (!cookie) {
51473- fscache_stat(&fscache_n_updates_null);
51474+ fscache_stat_unchecked(&fscache_n_updates_null);
51475 _leave(" [no cookie]");
51476 return;
51477 }
51478@@ -474,12 +474,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51479 struct fscache_object *object;
51480 unsigned long event;
51481
51482- fscache_stat(&fscache_n_relinquishes);
51483+ fscache_stat_unchecked(&fscache_n_relinquishes);
51484 if (retire)
51485- fscache_stat(&fscache_n_relinquishes_retire);
51486+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
51487
51488 if (!cookie) {
51489- fscache_stat(&fscache_n_relinquishes_null);
51490+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
51491 _leave(" [no cookie]");
51492 return;
51493 }
51494@@ -495,7 +495,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51495
51496 /* wait for the cookie to finish being instantiated (or to fail) */
51497 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
51498- fscache_stat(&fscache_n_relinquishes_waitcrt);
51499+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
51500 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
51501 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
51502 }
51503diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
51504index ee38fef..0a326d4 100644
51505--- a/fs/fscache/internal.h
51506+++ b/fs/fscache/internal.h
51507@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
51508 * stats.c
51509 */
51510 #ifdef CONFIG_FSCACHE_STATS
51511-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
51512-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
51513+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
51514+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
51515
51516-extern atomic_t fscache_n_op_pend;
51517-extern atomic_t fscache_n_op_run;
51518-extern atomic_t fscache_n_op_enqueue;
51519-extern atomic_t fscache_n_op_deferred_release;
51520-extern atomic_t fscache_n_op_release;
51521-extern atomic_t fscache_n_op_gc;
51522-extern atomic_t fscache_n_op_cancelled;
51523-extern atomic_t fscache_n_op_rejected;
51524+extern atomic_unchecked_t fscache_n_op_pend;
51525+extern atomic_unchecked_t fscache_n_op_run;
51526+extern atomic_unchecked_t fscache_n_op_enqueue;
51527+extern atomic_unchecked_t fscache_n_op_deferred_release;
51528+extern atomic_unchecked_t fscache_n_op_release;
51529+extern atomic_unchecked_t fscache_n_op_gc;
51530+extern atomic_unchecked_t fscache_n_op_cancelled;
51531+extern atomic_unchecked_t fscache_n_op_rejected;
51532
51533-extern atomic_t fscache_n_attr_changed;
51534-extern atomic_t fscache_n_attr_changed_ok;
51535-extern atomic_t fscache_n_attr_changed_nobufs;
51536-extern atomic_t fscache_n_attr_changed_nomem;
51537-extern atomic_t fscache_n_attr_changed_calls;
51538+extern atomic_unchecked_t fscache_n_attr_changed;
51539+extern atomic_unchecked_t fscache_n_attr_changed_ok;
51540+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
51541+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
51542+extern atomic_unchecked_t fscache_n_attr_changed_calls;
51543
51544-extern atomic_t fscache_n_allocs;
51545-extern atomic_t fscache_n_allocs_ok;
51546-extern atomic_t fscache_n_allocs_wait;
51547-extern atomic_t fscache_n_allocs_nobufs;
51548-extern atomic_t fscache_n_allocs_intr;
51549-extern atomic_t fscache_n_allocs_object_dead;
51550-extern atomic_t fscache_n_alloc_ops;
51551-extern atomic_t fscache_n_alloc_op_waits;
51552+extern atomic_unchecked_t fscache_n_allocs;
51553+extern atomic_unchecked_t fscache_n_allocs_ok;
51554+extern atomic_unchecked_t fscache_n_allocs_wait;
51555+extern atomic_unchecked_t fscache_n_allocs_nobufs;
51556+extern atomic_unchecked_t fscache_n_allocs_intr;
51557+extern atomic_unchecked_t fscache_n_allocs_object_dead;
51558+extern atomic_unchecked_t fscache_n_alloc_ops;
51559+extern atomic_unchecked_t fscache_n_alloc_op_waits;
51560
51561-extern atomic_t fscache_n_retrievals;
51562-extern atomic_t fscache_n_retrievals_ok;
51563-extern atomic_t fscache_n_retrievals_wait;
51564-extern atomic_t fscache_n_retrievals_nodata;
51565-extern atomic_t fscache_n_retrievals_nobufs;
51566-extern atomic_t fscache_n_retrievals_intr;
51567-extern atomic_t fscache_n_retrievals_nomem;
51568-extern atomic_t fscache_n_retrievals_object_dead;
51569-extern atomic_t fscache_n_retrieval_ops;
51570-extern atomic_t fscache_n_retrieval_op_waits;
51571+extern atomic_unchecked_t fscache_n_retrievals;
51572+extern atomic_unchecked_t fscache_n_retrievals_ok;
51573+extern atomic_unchecked_t fscache_n_retrievals_wait;
51574+extern atomic_unchecked_t fscache_n_retrievals_nodata;
51575+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
51576+extern atomic_unchecked_t fscache_n_retrievals_intr;
51577+extern atomic_unchecked_t fscache_n_retrievals_nomem;
51578+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
51579+extern atomic_unchecked_t fscache_n_retrieval_ops;
51580+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
51581
51582-extern atomic_t fscache_n_stores;
51583-extern atomic_t fscache_n_stores_ok;
51584-extern atomic_t fscache_n_stores_again;
51585-extern atomic_t fscache_n_stores_nobufs;
51586-extern atomic_t fscache_n_stores_oom;
51587-extern atomic_t fscache_n_store_ops;
51588-extern atomic_t fscache_n_store_calls;
51589-extern atomic_t fscache_n_store_pages;
51590-extern atomic_t fscache_n_store_radix_deletes;
51591-extern atomic_t fscache_n_store_pages_over_limit;
51592+extern atomic_unchecked_t fscache_n_stores;
51593+extern atomic_unchecked_t fscache_n_stores_ok;
51594+extern atomic_unchecked_t fscache_n_stores_again;
51595+extern atomic_unchecked_t fscache_n_stores_nobufs;
51596+extern atomic_unchecked_t fscache_n_stores_oom;
51597+extern atomic_unchecked_t fscache_n_store_ops;
51598+extern atomic_unchecked_t fscache_n_store_calls;
51599+extern atomic_unchecked_t fscache_n_store_pages;
51600+extern atomic_unchecked_t fscache_n_store_radix_deletes;
51601+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
51602
51603-extern atomic_t fscache_n_store_vmscan_not_storing;
51604-extern atomic_t fscache_n_store_vmscan_gone;
51605-extern atomic_t fscache_n_store_vmscan_busy;
51606-extern atomic_t fscache_n_store_vmscan_cancelled;
51607-extern atomic_t fscache_n_store_vmscan_wait;
51608+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
51609+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
51610+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
51611+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
51612+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
51613
51614-extern atomic_t fscache_n_marks;
51615-extern atomic_t fscache_n_uncaches;
51616+extern atomic_unchecked_t fscache_n_marks;
51617+extern atomic_unchecked_t fscache_n_uncaches;
51618
51619-extern atomic_t fscache_n_acquires;
51620-extern atomic_t fscache_n_acquires_null;
51621-extern atomic_t fscache_n_acquires_no_cache;
51622-extern atomic_t fscache_n_acquires_ok;
51623-extern atomic_t fscache_n_acquires_nobufs;
51624-extern atomic_t fscache_n_acquires_oom;
51625+extern atomic_unchecked_t fscache_n_acquires;
51626+extern atomic_unchecked_t fscache_n_acquires_null;
51627+extern atomic_unchecked_t fscache_n_acquires_no_cache;
51628+extern atomic_unchecked_t fscache_n_acquires_ok;
51629+extern atomic_unchecked_t fscache_n_acquires_nobufs;
51630+extern atomic_unchecked_t fscache_n_acquires_oom;
51631
51632-extern atomic_t fscache_n_invalidates;
51633-extern atomic_t fscache_n_invalidates_run;
51634+extern atomic_unchecked_t fscache_n_invalidates;
51635+extern atomic_unchecked_t fscache_n_invalidates_run;
51636
51637-extern atomic_t fscache_n_updates;
51638-extern atomic_t fscache_n_updates_null;
51639-extern atomic_t fscache_n_updates_run;
51640+extern atomic_unchecked_t fscache_n_updates;
51641+extern atomic_unchecked_t fscache_n_updates_null;
51642+extern atomic_unchecked_t fscache_n_updates_run;
51643
51644-extern atomic_t fscache_n_relinquishes;
51645-extern atomic_t fscache_n_relinquishes_null;
51646-extern atomic_t fscache_n_relinquishes_waitcrt;
51647-extern atomic_t fscache_n_relinquishes_retire;
51648+extern atomic_unchecked_t fscache_n_relinquishes;
51649+extern atomic_unchecked_t fscache_n_relinquishes_null;
51650+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
51651+extern atomic_unchecked_t fscache_n_relinquishes_retire;
51652
51653-extern atomic_t fscache_n_cookie_index;
51654-extern atomic_t fscache_n_cookie_data;
51655-extern atomic_t fscache_n_cookie_special;
51656+extern atomic_unchecked_t fscache_n_cookie_index;
51657+extern atomic_unchecked_t fscache_n_cookie_data;
51658+extern atomic_unchecked_t fscache_n_cookie_special;
51659
51660-extern atomic_t fscache_n_object_alloc;
51661-extern atomic_t fscache_n_object_no_alloc;
51662-extern atomic_t fscache_n_object_lookups;
51663-extern atomic_t fscache_n_object_lookups_negative;
51664-extern atomic_t fscache_n_object_lookups_positive;
51665-extern atomic_t fscache_n_object_lookups_timed_out;
51666-extern atomic_t fscache_n_object_created;
51667-extern atomic_t fscache_n_object_avail;
51668-extern atomic_t fscache_n_object_dead;
51669+extern atomic_unchecked_t fscache_n_object_alloc;
51670+extern atomic_unchecked_t fscache_n_object_no_alloc;
51671+extern atomic_unchecked_t fscache_n_object_lookups;
51672+extern atomic_unchecked_t fscache_n_object_lookups_negative;
51673+extern atomic_unchecked_t fscache_n_object_lookups_positive;
51674+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
51675+extern atomic_unchecked_t fscache_n_object_created;
51676+extern atomic_unchecked_t fscache_n_object_avail;
51677+extern atomic_unchecked_t fscache_n_object_dead;
51678
51679-extern atomic_t fscache_n_checkaux_none;
51680-extern atomic_t fscache_n_checkaux_okay;
51681-extern atomic_t fscache_n_checkaux_update;
51682-extern atomic_t fscache_n_checkaux_obsolete;
51683+extern atomic_unchecked_t fscache_n_checkaux_none;
51684+extern atomic_unchecked_t fscache_n_checkaux_okay;
51685+extern atomic_unchecked_t fscache_n_checkaux_update;
51686+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
51687
51688 extern atomic_t fscache_n_cop_alloc_object;
51689 extern atomic_t fscache_n_cop_lookup_object;
51690@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
51691 atomic_inc(stat);
51692 }
51693
51694+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
51695+{
51696+ atomic_inc_unchecked(stat);
51697+}
51698+
51699 static inline void fscache_stat_d(atomic_t *stat)
51700 {
51701 atomic_dec(stat);
51702@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
51703
51704 #define __fscache_stat(stat) (NULL)
51705 #define fscache_stat(stat) do {} while (0)
51706+#define fscache_stat_unchecked(stat) do {} while (0)
51707 #define fscache_stat_d(stat) do {} while (0)
51708 #endif
51709
51710diff --git a/fs/fscache/object.c b/fs/fscache/object.c
51711index 50d41c1..10ee117 100644
51712--- a/fs/fscache/object.c
51713+++ b/fs/fscache/object.c
51714@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51715 /* Invalidate an object on disk */
51716 case FSCACHE_OBJECT_INVALIDATING:
51717 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
51718- fscache_stat(&fscache_n_invalidates_run);
51719+ fscache_stat_unchecked(&fscache_n_invalidates_run);
51720 fscache_stat(&fscache_n_cop_invalidate_object);
51721 fscache_invalidate_object(object);
51722 fscache_stat_d(&fscache_n_cop_invalidate_object);
51723@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51724 /* update the object metadata on disk */
51725 case FSCACHE_OBJECT_UPDATING:
51726 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
51727- fscache_stat(&fscache_n_updates_run);
51728+ fscache_stat_unchecked(&fscache_n_updates_run);
51729 fscache_stat(&fscache_n_cop_update_object);
51730 object->cache->ops->update_object(object);
51731 fscache_stat_d(&fscache_n_cop_update_object);
51732@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51733 spin_lock(&object->lock);
51734 object->state = FSCACHE_OBJECT_DEAD;
51735 spin_unlock(&object->lock);
51736- fscache_stat(&fscache_n_object_dead);
51737+ fscache_stat_unchecked(&fscache_n_object_dead);
51738 goto terminal_transit;
51739
51740 /* handle the parent cache of this object being withdrawn from
51741@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51742 spin_lock(&object->lock);
51743 object->state = FSCACHE_OBJECT_DEAD;
51744 spin_unlock(&object->lock);
51745- fscache_stat(&fscache_n_object_dead);
51746+ fscache_stat_unchecked(&fscache_n_object_dead);
51747 goto terminal_transit;
51748
51749 /* complain about the object being woken up once it is
51750@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
51751 parent->cookie->def->name, cookie->def->name,
51752 object->cache->tag->name);
51753
51754- fscache_stat(&fscache_n_object_lookups);
51755+ fscache_stat_unchecked(&fscache_n_object_lookups);
51756 fscache_stat(&fscache_n_cop_lookup_object);
51757 ret = object->cache->ops->lookup_object(object);
51758 fscache_stat_d(&fscache_n_cop_lookup_object);
51759@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
51760 if (ret == -ETIMEDOUT) {
51761 /* probably stuck behind another object, so move this one to
51762 * the back of the queue */
51763- fscache_stat(&fscache_n_object_lookups_timed_out);
51764+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
51765 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
51766 }
51767
51768@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
51769
51770 spin_lock(&object->lock);
51771 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
51772- fscache_stat(&fscache_n_object_lookups_negative);
51773+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
51774
51775 /* transit here to allow write requests to begin stacking up
51776 * and read requests to begin returning ENODATA */
51777@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
51778 * result, in which case there may be data available */
51779 spin_lock(&object->lock);
51780 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
51781- fscache_stat(&fscache_n_object_lookups_positive);
51782+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
51783
51784 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
51785
51786@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
51787 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
51788 } else {
51789 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
51790- fscache_stat(&fscache_n_object_created);
51791+ fscache_stat_unchecked(&fscache_n_object_created);
51792
51793 object->state = FSCACHE_OBJECT_AVAILABLE;
51794 spin_unlock(&object->lock);
51795@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
51796 fscache_enqueue_dependents(object);
51797
51798 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
51799- fscache_stat(&fscache_n_object_avail);
51800+ fscache_stat_unchecked(&fscache_n_object_avail);
51801
51802 _leave("");
51803 }
51804@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
51805 enum fscache_checkaux result;
51806
51807 if (!object->cookie->def->check_aux) {
51808- fscache_stat(&fscache_n_checkaux_none);
51809+ fscache_stat_unchecked(&fscache_n_checkaux_none);
51810 return FSCACHE_CHECKAUX_OKAY;
51811 }
51812
51813@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
51814 switch (result) {
51815 /* entry okay as is */
51816 case FSCACHE_CHECKAUX_OKAY:
51817- fscache_stat(&fscache_n_checkaux_okay);
51818+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
51819 break;
51820
51821 /* entry requires update */
51822 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
51823- fscache_stat(&fscache_n_checkaux_update);
51824+ fscache_stat_unchecked(&fscache_n_checkaux_update);
51825 break;
51826
51827 /* entry requires deletion */
51828 case FSCACHE_CHECKAUX_OBSOLETE:
51829- fscache_stat(&fscache_n_checkaux_obsolete);
51830+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
51831 break;
51832
51833 default:
51834diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
51835index 762a9ec..2023284 100644
51836--- a/fs/fscache/operation.c
51837+++ b/fs/fscache/operation.c
51838@@ -17,7 +17,7 @@
51839 #include <linux/slab.h>
51840 #include "internal.h"
51841
51842-atomic_t fscache_op_debug_id;
51843+atomic_unchecked_t fscache_op_debug_id;
51844 EXPORT_SYMBOL(fscache_op_debug_id);
51845
51846 /**
51847@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
51848 ASSERTCMP(atomic_read(&op->usage), >, 0);
51849 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
51850
51851- fscache_stat(&fscache_n_op_enqueue);
51852+ fscache_stat_unchecked(&fscache_n_op_enqueue);
51853 switch (op->flags & FSCACHE_OP_TYPE) {
51854 case FSCACHE_OP_ASYNC:
51855 _debug("queue async");
51856@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
51857 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
51858 if (op->processor)
51859 fscache_enqueue_operation(op);
51860- fscache_stat(&fscache_n_op_run);
51861+ fscache_stat_unchecked(&fscache_n_op_run);
51862 }
51863
51864 /*
51865@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
51866 if (object->n_in_progress > 0) {
51867 atomic_inc(&op->usage);
51868 list_add_tail(&op->pend_link, &object->pending_ops);
51869- fscache_stat(&fscache_n_op_pend);
51870+ fscache_stat_unchecked(&fscache_n_op_pend);
51871 } else if (!list_empty(&object->pending_ops)) {
51872 atomic_inc(&op->usage);
51873 list_add_tail(&op->pend_link, &object->pending_ops);
51874- fscache_stat(&fscache_n_op_pend);
51875+ fscache_stat_unchecked(&fscache_n_op_pend);
51876 fscache_start_operations(object);
51877 } else {
51878 ASSERTCMP(object->n_in_progress, ==, 0);
51879@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
51880 object->n_exclusive++; /* reads and writes must wait */
51881 atomic_inc(&op->usage);
51882 list_add_tail(&op->pend_link, &object->pending_ops);
51883- fscache_stat(&fscache_n_op_pend);
51884+ fscache_stat_unchecked(&fscache_n_op_pend);
51885 ret = 0;
51886 } else {
51887 /* If we're in any other state, there must have been an I/O
51888@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
51889 if (object->n_exclusive > 0) {
51890 atomic_inc(&op->usage);
51891 list_add_tail(&op->pend_link, &object->pending_ops);
51892- fscache_stat(&fscache_n_op_pend);
51893+ fscache_stat_unchecked(&fscache_n_op_pend);
51894 } else if (!list_empty(&object->pending_ops)) {
51895 atomic_inc(&op->usage);
51896 list_add_tail(&op->pend_link, &object->pending_ops);
51897- fscache_stat(&fscache_n_op_pend);
51898+ fscache_stat_unchecked(&fscache_n_op_pend);
51899 fscache_start_operations(object);
51900 } else {
51901 ASSERTCMP(object->n_exclusive, ==, 0);
51902@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
51903 object->n_ops++;
51904 atomic_inc(&op->usage);
51905 list_add_tail(&op->pend_link, &object->pending_ops);
51906- fscache_stat(&fscache_n_op_pend);
51907+ fscache_stat_unchecked(&fscache_n_op_pend);
51908 ret = 0;
51909 } else if (object->state == FSCACHE_OBJECT_DYING ||
51910 object->state == FSCACHE_OBJECT_LC_DYING ||
51911 object->state == FSCACHE_OBJECT_WITHDRAWING) {
51912- fscache_stat(&fscache_n_op_rejected);
51913+ fscache_stat_unchecked(&fscache_n_op_rejected);
51914 op->state = FSCACHE_OP_ST_CANCELLED;
51915 ret = -ENOBUFS;
51916 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
51917@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
51918 ret = -EBUSY;
51919 if (op->state == FSCACHE_OP_ST_PENDING) {
51920 ASSERT(!list_empty(&op->pend_link));
51921- fscache_stat(&fscache_n_op_cancelled);
51922+ fscache_stat_unchecked(&fscache_n_op_cancelled);
51923 list_del_init(&op->pend_link);
51924 if (do_cancel)
51925 do_cancel(op);
51926@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
51927 while (!list_empty(&object->pending_ops)) {
51928 op = list_entry(object->pending_ops.next,
51929 struct fscache_operation, pend_link);
51930- fscache_stat(&fscache_n_op_cancelled);
51931+ fscache_stat_unchecked(&fscache_n_op_cancelled);
51932 list_del_init(&op->pend_link);
51933
51934 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
51935@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
51936 op->state, ==, FSCACHE_OP_ST_CANCELLED);
51937 op->state = FSCACHE_OP_ST_DEAD;
51938
51939- fscache_stat(&fscache_n_op_release);
51940+ fscache_stat_unchecked(&fscache_n_op_release);
51941
51942 if (op->release) {
51943 op->release(op);
51944@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
51945 * lock, and defer it otherwise */
51946 if (!spin_trylock(&object->lock)) {
51947 _debug("defer put");
51948- fscache_stat(&fscache_n_op_deferred_release);
51949+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
51950
51951 cache = object->cache;
51952 spin_lock(&cache->op_gc_list_lock);
51953@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
51954
51955 _debug("GC DEFERRED REL OBJ%x OP%x",
51956 object->debug_id, op->debug_id);
51957- fscache_stat(&fscache_n_op_gc);
51958+ fscache_stat_unchecked(&fscache_n_op_gc);
51959
51960 ASSERTCMP(atomic_read(&op->usage), ==, 0);
51961 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
51962diff --git a/fs/fscache/page.c b/fs/fscache/page.c
51963index ff000e5..c44ec6d 100644
51964--- a/fs/fscache/page.c
51965+++ b/fs/fscache/page.c
51966@@ -61,7 +61,7 @@ try_again:
51967 val = radix_tree_lookup(&cookie->stores, page->index);
51968 if (!val) {
51969 rcu_read_unlock();
51970- fscache_stat(&fscache_n_store_vmscan_not_storing);
51971+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
51972 __fscache_uncache_page(cookie, page);
51973 return true;
51974 }
51975@@ -91,11 +91,11 @@ try_again:
51976 spin_unlock(&cookie->stores_lock);
51977
51978 if (xpage) {
51979- fscache_stat(&fscache_n_store_vmscan_cancelled);
51980- fscache_stat(&fscache_n_store_radix_deletes);
51981+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
51982+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
51983 ASSERTCMP(xpage, ==, page);
51984 } else {
51985- fscache_stat(&fscache_n_store_vmscan_gone);
51986+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
51987 }
51988
51989 wake_up_bit(&cookie->flags, 0);
51990@@ -110,11 +110,11 @@ page_busy:
51991 * sleeping on memory allocation, so we may need to impose a timeout
51992 * too. */
51993 if (!(gfp & __GFP_WAIT)) {
51994- fscache_stat(&fscache_n_store_vmscan_busy);
51995+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
51996 return false;
51997 }
51998
51999- fscache_stat(&fscache_n_store_vmscan_wait);
52000+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
52001 __fscache_wait_on_page_write(cookie, page);
52002 gfp &= ~__GFP_WAIT;
52003 goto try_again;
52004@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
52005 FSCACHE_COOKIE_STORING_TAG);
52006 if (!radix_tree_tag_get(&cookie->stores, page->index,
52007 FSCACHE_COOKIE_PENDING_TAG)) {
52008- fscache_stat(&fscache_n_store_radix_deletes);
52009+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52010 xpage = radix_tree_delete(&cookie->stores, page->index);
52011 }
52012 spin_unlock(&cookie->stores_lock);
52013@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
52014
52015 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
52016
52017- fscache_stat(&fscache_n_attr_changed_calls);
52018+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
52019
52020 if (fscache_object_is_active(object)) {
52021 fscache_stat(&fscache_n_cop_attr_changed);
52022@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52023
52024 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52025
52026- fscache_stat(&fscache_n_attr_changed);
52027+ fscache_stat_unchecked(&fscache_n_attr_changed);
52028
52029 op = kzalloc(sizeof(*op), GFP_KERNEL);
52030 if (!op) {
52031- fscache_stat(&fscache_n_attr_changed_nomem);
52032+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
52033 _leave(" = -ENOMEM");
52034 return -ENOMEM;
52035 }
52036@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52037 if (fscache_submit_exclusive_op(object, op) < 0)
52038 goto nobufs;
52039 spin_unlock(&cookie->lock);
52040- fscache_stat(&fscache_n_attr_changed_ok);
52041+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
52042 fscache_put_operation(op);
52043 _leave(" = 0");
52044 return 0;
52045@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52046 nobufs:
52047 spin_unlock(&cookie->lock);
52048 kfree(op);
52049- fscache_stat(&fscache_n_attr_changed_nobufs);
52050+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
52051 _leave(" = %d", -ENOBUFS);
52052 return -ENOBUFS;
52053 }
52054@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
52055 /* allocate a retrieval operation and attempt to submit it */
52056 op = kzalloc(sizeof(*op), GFP_NOIO);
52057 if (!op) {
52058- fscache_stat(&fscache_n_retrievals_nomem);
52059+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52060 return NULL;
52061 }
52062
52063@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
52064 return 0;
52065 }
52066
52067- fscache_stat(&fscache_n_retrievals_wait);
52068+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
52069
52070 jif = jiffies;
52071 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
52072 fscache_wait_bit_interruptible,
52073 TASK_INTERRUPTIBLE) != 0) {
52074- fscache_stat(&fscache_n_retrievals_intr);
52075+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52076 _leave(" = -ERESTARTSYS");
52077 return -ERESTARTSYS;
52078 }
52079@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
52080 */
52081 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52082 struct fscache_retrieval *op,
52083- atomic_t *stat_op_waits,
52084- atomic_t *stat_object_dead)
52085+ atomic_unchecked_t *stat_op_waits,
52086+ atomic_unchecked_t *stat_object_dead)
52087 {
52088 int ret;
52089
52090@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52091 goto check_if_dead;
52092
52093 _debug(">>> WT");
52094- fscache_stat(stat_op_waits);
52095+ fscache_stat_unchecked(stat_op_waits);
52096 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
52097 fscache_wait_bit_interruptible,
52098 TASK_INTERRUPTIBLE) != 0) {
52099@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52100
52101 check_if_dead:
52102 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
52103- fscache_stat(stat_object_dead);
52104+ fscache_stat_unchecked(stat_object_dead);
52105 _leave(" = -ENOBUFS [cancelled]");
52106 return -ENOBUFS;
52107 }
52108 if (unlikely(fscache_object_is_dead(object))) {
52109 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
52110 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
52111- fscache_stat(stat_object_dead);
52112+ fscache_stat_unchecked(stat_object_dead);
52113 return -ENOBUFS;
52114 }
52115 return 0;
52116@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52117
52118 _enter("%p,%p,,,", cookie, page);
52119
52120- fscache_stat(&fscache_n_retrievals);
52121+ fscache_stat_unchecked(&fscache_n_retrievals);
52122
52123 if (hlist_empty(&cookie->backing_objects))
52124 goto nobufs;
52125@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52126 goto nobufs_unlock_dec;
52127 spin_unlock(&cookie->lock);
52128
52129- fscache_stat(&fscache_n_retrieval_ops);
52130+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52131
52132 /* pin the netfs read context in case we need to do the actual netfs
52133 * read because we've encountered a cache read failure */
52134@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52135
52136 error:
52137 if (ret == -ENOMEM)
52138- fscache_stat(&fscache_n_retrievals_nomem);
52139+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52140 else if (ret == -ERESTARTSYS)
52141- fscache_stat(&fscache_n_retrievals_intr);
52142+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52143 else if (ret == -ENODATA)
52144- fscache_stat(&fscache_n_retrievals_nodata);
52145+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52146 else if (ret < 0)
52147- fscache_stat(&fscache_n_retrievals_nobufs);
52148+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52149 else
52150- fscache_stat(&fscache_n_retrievals_ok);
52151+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52152
52153 fscache_put_retrieval(op);
52154 _leave(" = %d", ret);
52155@@ -467,7 +467,7 @@ nobufs_unlock:
52156 spin_unlock(&cookie->lock);
52157 kfree(op);
52158 nobufs:
52159- fscache_stat(&fscache_n_retrievals_nobufs);
52160+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52161 _leave(" = -ENOBUFS");
52162 return -ENOBUFS;
52163 }
52164@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52165
52166 _enter("%p,,%d,,,", cookie, *nr_pages);
52167
52168- fscache_stat(&fscache_n_retrievals);
52169+ fscache_stat_unchecked(&fscache_n_retrievals);
52170
52171 if (hlist_empty(&cookie->backing_objects))
52172 goto nobufs;
52173@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52174 goto nobufs_unlock_dec;
52175 spin_unlock(&cookie->lock);
52176
52177- fscache_stat(&fscache_n_retrieval_ops);
52178+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52179
52180 /* pin the netfs read context in case we need to do the actual netfs
52181 * read because we've encountered a cache read failure */
52182@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52183
52184 error:
52185 if (ret == -ENOMEM)
52186- fscache_stat(&fscache_n_retrievals_nomem);
52187+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52188 else if (ret == -ERESTARTSYS)
52189- fscache_stat(&fscache_n_retrievals_intr);
52190+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52191 else if (ret == -ENODATA)
52192- fscache_stat(&fscache_n_retrievals_nodata);
52193+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52194 else if (ret < 0)
52195- fscache_stat(&fscache_n_retrievals_nobufs);
52196+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52197 else
52198- fscache_stat(&fscache_n_retrievals_ok);
52199+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52200
52201 fscache_put_retrieval(op);
52202 _leave(" = %d", ret);
52203@@ -591,7 +591,7 @@ nobufs_unlock:
52204 spin_unlock(&cookie->lock);
52205 kfree(op);
52206 nobufs:
52207- fscache_stat(&fscache_n_retrievals_nobufs);
52208+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52209 _leave(" = -ENOBUFS");
52210 return -ENOBUFS;
52211 }
52212@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52213
52214 _enter("%p,%p,,,", cookie, page);
52215
52216- fscache_stat(&fscache_n_allocs);
52217+ fscache_stat_unchecked(&fscache_n_allocs);
52218
52219 if (hlist_empty(&cookie->backing_objects))
52220 goto nobufs;
52221@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52222 goto nobufs_unlock;
52223 spin_unlock(&cookie->lock);
52224
52225- fscache_stat(&fscache_n_alloc_ops);
52226+ fscache_stat_unchecked(&fscache_n_alloc_ops);
52227
52228 ret = fscache_wait_for_retrieval_activation(
52229 object, op,
52230@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52231
52232 error:
52233 if (ret == -ERESTARTSYS)
52234- fscache_stat(&fscache_n_allocs_intr);
52235+ fscache_stat_unchecked(&fscache_n_allocs_intr);
52236 else if (ret < 0)
52237- fscache_stat(&fscache_n_allocs_nobufs);
52238+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52239 else
52240- fscache_stat(&fscache_n_allocs_ok);
52241+ fscache_stat_unchecked(&fscache_n_allocs_ok);
52242
52243 fscache_put_retrieval(op);
52244 _leave(" = %d", ret);
52245@@ -677,7 +677,7 @@ nobufs_unlock:
52246 spin_unlock(&cookie->lock);
52247 kfree(op);
52248 nobufs:
52249- fscache_stat(&fscache_n_allocs_nobufs);
52250+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52251 _leave(" = -ENOBUFS");
52252 return -ENOBUFS;
52253 }
52254@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52255
52256 spin_lock(&cookie->stores_lock);
52257
52258- fscache_stat(&fscache_n_store_calls);
52259+ fscache_stat_unchecked(&fscache_n_store_calls);
52260
52261 /* find a page to store */
52262 page = NULL;
52263@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52264 page = results[0];
52265 _debug("gang %d [%lx]", n, page->index);
52266 if (page->index > op->store_limit) {
52267- fscache_stat(&fscache_n_store_pages_over_limit);
52268+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
52269 goto superseded;
52270 }
52271
52272@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52273 spin_unlock(&cookie->stores_lock);
52274 spin_unlock(&object->lock);
52275
52276- fscache_stat(&fscache_n_store_pages);
52277+ fscache_stat_unchecked(&fscache_n_store_pages);
52278 fscache_stat(&fscache_n_cop_write_page);
52279 ret = object->cache->ops->write_page(op, page);
52280 fscache_stat_d(&fscache_n_cop_write_page);
52281@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52282 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52283 ASSERT(PageFsCache(page));
52284
52285- fscache_stat(&fscache_n_stores);
52286+ fscache_stat_unchecked(&fscache_n_stores);
52287
52288 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
52289 _leave(" = -ENOBUFS [invalidating]");
52290@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52291 spin_unlock(&cookie->stores_lock);
52292 spin_unlock(&object->lock);
52293
52294- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
52295+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52296 op->store_limit = object->store_limit;
52297
52298 if (fscache_submit_op(object, &op->op) < 0)
52299@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52300
52301 spin_unlock(&cookie->lock);
52302 radix_tree_preload_end();
52303- fscache_stat(&fscache_n_store_ops);
52304- fscache_stat(&fscache_n_stores_ok);
52305+ fscache_stat_unchecked(&fscache_n_store_ops);
52306+ fscache_stat_unchecked(&fscache_n_stores_ok);
52307
52308 /* the work queue now carries its own ref on the object */
52309 fscache_put_operation(&op->op);
52310@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52311 return 0;
52312
52313 already_queued:
52314- fscache_stat(&fscache_n_stores_again);
52315+ fscache_stat_unchecked(&fscache_n_stores_again);
52316 already_pending:
52317 spin_unlock(&cookie->stores_lock);
52318 spin_unlock(&object->lock);
52319 spin_unlock(&cookie->lock);
52320 radix_tree_preload_end();
52321 kfree(op);
52322- fscache_stat(&fscache_n_stores_ok);
52323+ fscache_stat_unchecked(&fscache_n_stores_ok);
52324 _leave(" = 0");
52325 return 0;
52326
52327@@ -959,14 +959,14 @@ nobufs:
52328 spin_unlock(&cookie->lock);
52329 radix_tree_preload_end();
52330 kfree(op);
52331- fscache_stat(&fscache_n_stores_nobufs);
52332+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
52333 _leave(" = -ENOBUFS");
52334 return -ENOBUFS;
52335
52336 nomem_free:
52337 kfree(op);
52338 nomem:
52339- fscache_stat(&fscache_n_stores_oom);
52340+ fscache_stat_unchecked(&fscache_n_stores_oom);
52341 _leave(" = -ENOMEM");
52342 return -ENOMEM;
52343 }
52344@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
52345 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52346 ASSERTCMP(page, !=, NULL);
52347
52348- fscache_stat(&fscache_n_uncaches);
52349+ fscache_stat_unchecked(&fscache_n_uncaches);
52350
52351 /* cache withdrawal may beat us to it */
52352 if (!PageFsCache(page))
52353@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
52354 struct fscache_cookie *cookie = op->op.object->cookie;
52355
52356 #ifdef CONFIG_FSCACHE_STATS
52357- atomic_inc(&fscache_n_marks);
52358+ atomic_inc_unchecked(&fscache_n_marks);
52359 #endif
52360
52361 _debug("- mark %p{%lx}", page, page->index);
52362diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
52363index 8179e8b..5072cc7 100644
52364--- a/fs/fscache/stats.c
52365+++ b/fs/fscache/stats.c
52366@@ -18,99 +18,99 @@
52367 /*
52368 * operation counters
52369 */
52370-atomic_t fscache_n_op_pend;
52371-atomic_t fscache_n_op_run;
52372-atomic_t fscache_n_op_enqueue;
52373-atomic_t fscache_n_op_requeue;
52374-atomic_t fscache_n_op_deferred_release;
52375-atomic_t fscache_n_op_release;
52376-atomic_t fscache_n_op_gc;
52377-atomic_t fscache_n_op_cancelled;
52378-atomic_t fscache_n_op_rejected;
52379+atomic_unchecked_t fscache_n_op_pend;
52380+atomic_unchecked_t fscache_n_op_run;
52381+atomic_unchecked_t fscache_n_op_enqueue;
52382+atomic_unchecked_t fscache_n_op_requeue;
52383+atomic_unchecked_t fscache_n_op_deferred_release;
52384+atomic_unchecked_t fscache_n_op_release;
52385+atomic_unchecked_t fscache_n_op_gc;
52386+atomic_unchecked_t fscache_n_op_cancelled;
52387+atomic_unchecked_t fscache_n_op_rejected;
52388
52389-atomic_t fscache_n_attr_changed;
52390-atomic_t fscache_n_attr_changed_ok;
52391-atomic_t fscache_n_attr_changed_nobufs;
52392-atomic_t fscache_n_attr_changed_nomem;
52393-atomic_t fscache_n_attr_changed_calls;
52394+atomic_unchecked_t fscache_n_attr_changed;
52395+atomic_unchecked_t fscache_n_attr_changed_ok;
52396+atomic_unchecked_t fscache_n_attr_changed_nobufs;
52397+atomic_unchecked_t fscache_n_attr_changed_nomem;
52398+atomic_unchecked_t fscache_n_attr_changed_calls;
52399
52400-atomic_t fscache_n_allocs;
52401-atomic_t fscache_n_allocs_ok;
52402-atomic_t fscache_n_allocs_wait;
52403-atomic_t fscache_n_allocs_nobufs;
52404-atomic_t fscache_n_allocs_intr;
52405-atomic_t fscache_n_allocs_object_dead;
52406-atomic_t fscache_n_alloc_ops;
52407-atomic_t fscache_n_alloc_op_waits;
52408+atomic_unchecked_t fscache_n_allocs;
52409+atomic_unchecked_t fscache_n_allocs_ok;
52410+atomic_unchecked_t fscache_n_allocs_wait;
52411+atomic_unchecked_t fscache_n_allocs_nobufs;
52412+atomic_unchecked_t fscache_n_allocs_intr;
52413+atomic_unchecked_t fscache_n_allocs_object_dead;
52414+atomic_unchecked_t fscache_n_alloc_ops;
52415+atomic_unchecked_t fscache_n_alloc_op_waits;
52416
52417-atomic_t fscache_n_retrievals;
52418-atomic_t fscache_n_retrievals_ok;
52419-atomic_t fscache_n_retrievals_wait;
52420-atomic_t fscache_n_retrievals_nodata;
52421-atomic_t fscache_n_retrievals_nobufs;
52422-atomic_t fscache_n_retrievals_intr;
52423-atomic_t fscache_n_retrievals_nomem;
52424-atomic_t fscache_n_retrievals_object_dead;
52425-atomic_t fscache_n_retrieval_ops;
52426-atomic_t fscache_n_retrieval_op_waits;
52427+atomic_unchecked_t fscache_n_retrievals;
52428+atomic_unchecked_t fscache_n_retrievals_ok;
52429+atomic_unchecked_t fscache_n_retrievals_wait;
52430+atomic_unchecked_t fscache_n_retrievals_nodata;
52431+atomic_unchecked_t fscache_n_retrievals_nobufs;
52432+atomic_unchecked_t fscache_n_retrievals_intr;
52433+atomic_unchecked_t fscache_n_retrievals_nomem;
52434+atomic_unchecked_t fscache_n_retrievals_object_dead;
52435+atomic_unchecked_t fscache_n_retrieval_ops;
52436+atomic_unchecked_t fscache_n_retrieval_op_waits;
52437
52438-atomic_t fscache_n_stores;
52439-atomic_t fscache_n_stores_ok;
52440-atomic_t fscache_n_stores_again;
52441-atomic_t fscache_n_stores_nobufs;
52442-atomic_t fscache_n_stores_oom;
52443-atomic_t fscache_n_store_ops;
52444-atomic_t fscache_n_store_calls;
52445-atomic_t fscache_n_store_pages;
52446-atomic_t fscache_n_store_radix_deletes;
52447-atomic_t fscache_n_store_pages_over_limit;
52448+atomic_unchecked_t fscache_n_stores;
52449+atomic_unchecked_t fscache_n_stores_ok;
52450+atomic_unchecked_t fscache_n_stores_again;
52451+atomic_unchecked_t fscache_n_stores_nobufs;
52452+atomic_unchecked_t fscache_n_stores_oom;
52453+atomic_unchecked_t fscache_n_store_ops;
52454+atomic_unchecked_t fscache_n_store_calls;
52455+atomic_unchecked_t fscache_n_store_pages;
52456+atomic_unchecked_t fscache_n_store_radix_deletes;
52457+atomic_unchecked_t fscache_n_store_pages_over_limit;
52458
52459-atomic_t fscache_n_store_vmscan_not_storing;
52460-atomic_t fscache_n_store_vmscan_gone;
52461-atomic_t fscache_n_store_vmscan_busy;
52462-atomic_t fscache_n_store_vmscan_cancelled;
52463-atomic_t fscache_n_store_vmscan_wait;
52464+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52465+atomic_unchecked_t fscache_n_store_vmscan_gone;
52466+atomic_unchecked_t fscache_n_store_vmscan_busy;
52467+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52468+atomic_unchecked_t fscache_n_store_vmscan_wait;
52469
52470-atomic_t fscache_n_marks;
52471-atomic_t fscache_n_uncaches;
52472+atomic_unchecked_t fscache_n_marks;
52473+atomic_unchecked_t fscache_n_uncaches;
52474
52475-atomic_t fscache_n_acquires;
52476-atomic_t fscache_n_acquires_null;
52477-atomic_t fscache_n_acquires_no_cache;
52478-atomic_t fscache_n_acquires_ok;
52479-atomic_t fscache_n_acquires_nobufs;
52480-atomic_t fscache_n_acquires_oom;
52481+atomic_unchecked_t fscache_n_acquires;
52482+atomic_unchecked_t fscache_n_acquires_null;
52483+atomic_unchecked_t fscache_n_acquires_no_cache;
52484+atomic_unchecked_t fscache_n_acquires_ok;
52485+atomic_unchecked_t fscache_n_acquires_nobufs;
52486+atomic_unchecked_t fscache_n_acquires_oom;
52487
52488-atomic_t fscache_n_invalidates;
52489-atomic_t fscache_n_invalidates_run;
52490+atomic_unchecked_t fscache_n_invalidates;
52491+atomic_unchecked_t fscache_n_invalidates_run;
52492
52493-atomic_t fscache_n_updates;
52494-atomic_t fscache_n_updates_null;
52495-atomic_t fscache_n_updates_run;
52496+atomic_unchecked_t fscache_n_updates;
52497+atomic_unchecked_t fscache_n_updates_null;
52498+atomic_unchecked_t fscache_n_updates_run;
52499
52500-atomic_t fscache_n_relinquishes;
52501-atomic_t fscache_n_relinquishes_null;
52502-atomic_t fscache_n_relinquishes_waitcrt;
52503-atomic_t fscache_n_relinquishes_retire;
52504+atomic_unchecked_t fscache_n_relinquishes;
52505+atomic_unchecked_t fscache_n_relinquishes_null;
52506+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
52507+atomic_unchecked_t fscache_n_relinquishes_retire;
52508
52509-atomic_t fscache_n_cookie_index;
52510-atomic_t fscache_n_cookie_data;
52511-atomic_t fscache_n_cookie_special;
52512+atomic_unchecked_t fscache_n_cookie_index;
52513+atomic_unchecked_t fscache_n_cookie_data;
52514+atomic_unchecked_t fscache_n_cookie_special;
52515
52516-atomic_t fscache_n_object_alloc;
52517-atomic_t fscache_n_object_no_alloc;
52518-atomic_t fscache_n_object_lookups;
52519-atomic_t fscache_n_object_lookups_negative;
52520-atomic_t fscache_n_object_lookups_positive;
52521-atomic_t fscache_n_object_lookups_timed_out;
52522-atomic_t fscache_n_object_created;
52523-atomic_t fscache_n_object_avail;
52524-atomic_t fscache_n_object_dead;
52525+atomic_unchecked_t fscache_n_object_alloc;
52526+atomic_unchecked_t fscache_n_object_no_alloc;
52527+atomic_unchecked_t fscache_n_object_lookups;
52528+atomic_unchecked_t fscache_n_object_lookups_negative;
52529+atomic_unchecked_t fscache_n_object_lookups_positive;
52530+atomic_unchecked_t fscache_n_object_lookups_timed_out;
52531+atomic_unchecked_t fscache_n_object_created;
52532+atomic_unchecked_t fscache_n_object_avail;
52533+atomic_unchecked_t fscache_n_object_dead;
52534
52535-atomic_t fscache_n_checkaux_none;
52536-atomic_t fscache_n_checkaux_okay;
52537-atomic_t fscache_n_checkaux_update;
52538-atomic_t fscache_n_checkaux_obsolete;
52539+atomic_unchecked_t fscache_n_checkaux_none;
52540+atomic_unchecked_t fscache_n_checkaux_okay;
52541+atomic_unchecked_t fscache_n_checkaux_update;
52542+atomic_unchecked_t fscache_n_checkaux_obsolete;
52543
52544 atomic_t fscache_n_cop_alloc_object;
52545 atomic_t fscache_n_cop_lookup_object;
52546@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
52547 seq_puts(m, "FS-Cache statistics\n");
52548
52549 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
52550- atomic_read(&fscache_n_cookie_index),
52551- atomic_read(&fscache_n_cookie_data),
52552- atomic_read(&fscache_n_cookie_special));
52553+ atomic_read_unchecked(&fscache_n_cookie_index),
52554+ atomic_read_unchecked(&fscache_n_cookie_data),
52555+ atomic_read_unchecked(&fscache_n_cookie_special));
52556
52557 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
52558- atomic_read(&fscache_n_object_alloc),
52559- atomic_read(&fscache_n_object_no_alloc),
52560- atomic_read(&fscache_n_object_avail),
52561- atomic_read(&fscache_n_object_dead));
52562+ atomic_read_unchecked(&fscache_n_object_alloc),
52563+ atomic_read_unchecked(&fscache_n_object_no_alloc),
52564+ atomic_read_unchecked(&fscache_n_object_avail),
52565+ atomic_read_unchecked(&fscache_n_object_dead));
52566 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
52567- atomic_read(&fscache_n_checkaux_none),
52568- atomic_read(&fscache_n_checkaux_okay),
52569- atomic_read(&fscache_n_checkaux_update),
52570- atomic_read(&fscache_n_checkaux_obsolete));
52571+ atomic_read_unchecked(&fscache_n_checkaux_none),
52572+ atomic_read_unchecked(&fscache_n_checkaux_okay),
52573+ atomic_read_unchecked(&fscache_n_checkaux_update),
52574+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
52575
52576 seq_printf(m, "Pages : mrk=%u unc=%u\n",
52577- atomic_read(&fscache_n_marks),
52578- atomic_read(&fscache_n_uncaches));
52579+ atomic_read_unchecked(&fscache_n_marks),
52580+ atomic_read_unchecked(&fscache_n_uncaches));
52581
52582 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
52583 " oom=%u\n",
52584- atomic_read(&fscache_n_acquires),
52585- atomic_read(&fscache_n_acquires_null),
52586- atomic_read(&fscache_n_acquires_no_cache),
52587- atomic_read(&fscache_n_acquires_ok),
52588- atomic_read(&fscache_n_acquires_nobufs),
52589- atomic_read(&fscache_n_acquires_oom));
52590+ atomic_read_unchecked(&fscache_n_acquires),
52591+ atomic_read_unchecked(&fscache_n_acquires_null),
52592+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
52593+ atomic_read_unchecked(&fscache_n_acquires_ok),
52594+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
52595+ atomic_read_unchecked(&fscache_n_acquires_oom));
52596
52597 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
52598- atomic_read(&fscache_n_object_lookups),
52599- atomic_read(&fscache_n_object_lookups_negative),
52600- atomic_read(&fscache_n_object_lookups_positive),
52601- atomic_read(&fscache_n_object_created),
52602- atomic_read(&fscache_n_object_lookups_timed_out));
52603+ atomic_read_unchecked(&fscache_n_object_lookups),
52604+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
52605+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
52606+ atomic_read_unchecked(&fscache_n_object_created),
52607+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
52608
52609 seq_printf(m, "Invals : n=%u run=%u\n",
52610- atomic_read(&fscache_n_invalidates),
52611- atomic_read(&fscache_n_invalidates_run));
52612+ atomic_read_unchecked(&fscache_n_invalidates),
52613+ atomic_read_unchecked(&fscache_n_invalidates_run));
52614
52615 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
52616- atomic_read(&fscache_n_updates),
52617- atomic_read(&fscache_n_updates_null),
52618- atomic_read(&fscache_n_updates_run));
52619+ atomic_read_unchecked(&fscache_n_updates),
52620+ atomic_read_unchecked(&fscache_n_updates_null),
52621+ atomic_read_unchecked(&fscache_n_updates_run));
52622
52623 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
52624- atomic_read(&fscache_n_relinquishes),
52625- atomic_read(&fscache_n_relinquishes_null),
52626- atomic_read(&fscache_n_relinquishes_waitcrt),
52627- atomic_read(&fscache_n_relinquishes_retire));
52628+ atomic_read_unchecked(&fscache_n_relinquishes),
52629+ atomic_read_unchecked(&fscache_n_relinquishes_null),
52630+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
52631+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
52632
52633 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
52634- atomic_read(&fscache_n_attr_changed),
52635- atomic_read(&fscache_n_attr_changed_ok),
52636- atomic_read(&fscache_n_attr_changed_nobufs),
52637- atomic_read(&fscache_n_attr_changed_nomem),
52638- atomic_read(&fscache_n_attr_changed_calls));
52639+ atomic_read_unchecked(&fscache_n_attr_changed),
52640+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
52641+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
52642+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
52643+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
52644
52645 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
52646- atomic_read(&fscache_n_allocs),
52647- atomic_read(&fscache_n_allocs_ok),
52648- atomic_read(&fscache_n_allocs_wait),
52649- atomic_read(&fscache_n_allocs_nobufs),
52650- atomic_read(&fscache_n_allocs_intr));
52651+ atomic_read_unchecked(&fscache_n_allocs),
52652+ atomic_read_unchecked(&fscache_n_allocs_ok),
52653+ atomic_read_unchecked(&fscache_n_allocs_wait),
52654+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
52655+ atomic_read_unchecked(&fscache_n_allocs_intr));
52656 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
52657- atomic_read(&fscache_n_alloc_ops),
52658- atomic_read(&fscache_n_alloc_op_waits),
52659- atomic_read(&fscache_n_allocs_object_dead));
52660+ atomic_read_unchecked(&fscache_n_alloc_ops),
52661+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
52662+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
52663
52664 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
52665 " int=%u oom=%u\n",
52666- atomic_read(&fscache_n_retrievals),
52667- atomic_read(&fscache_n_retrievals_ok),
52668- atomic_read(&fscache_n_retrievals_wait),
52669- atomic_read(&fscache_n_retrievals_nodata),
52670- atomic_read(&fscache_n_retrievals_nobufs),
52671- atomic_read(&fscache_n_retrievals_intr),
52672- atomic_read(&fscache_n_retrievals_nomem));
52673+ atomic_read_unchecked(&fscache_n_retrievals),
52674+ atomic_read_unchecked(&fscache_n_retrievals_ok),
52675+ atomic_read_unchecked(&fscache_n_retrievals_wait),
52676+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
52677+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
52678+ atomic_read_unchecked(&fscache_n_retrievals_intr),
52679+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
52680 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
52681- atomic_read(&fscache_n_retrieval_ops),
52682- atomic_read(&fscache_n_retrieval_op_waits),
52683- atomic_read(&fscache_n_retrievals_object_dead));
52684+ atomic_read_unchecked(&fscache_n_retrieval_ops),
52685+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
52686+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
52687
52688 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
52689- atomic_read(&fscache_n_stores),
52690- atomic_read(&fscache_n_stores_ok),
52691- atomic_read(&fscache_n_stores_again),
52692- atomic_read(&fscache_n_stores_nobufs),
52693- atomic_read(&fscache_n_stores_oom));
52694+ atomic_read_unchecked(&fscache_n_stores),
52695+ atomic_read_unchecked(&fscache_n_stores_ok),
52696+ atomic_read_unchecked(&fscache_n_stores_again),
52697+ atomic_read_unchecked(&fscache_n_stores_nobufs),
52698+ atomic_read_unchecked(&fscache_n_stores_oom));
52699 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
52700- atomic_read(&fscache_n_store_ops),
52701- atomic_read(&fscache_n_store_calls),
52702- atomic_read(&fscache_n_store_pages),
52703- atomic_read(&fscache_n_store_radix_deletes),
52704- atomic_read(&fscache_n_store_pages_over_limit));
52705+ atomic_read_unchecked(&fscache_n_store_ops),
52706+ atomic_read_unchecked(&fscache_n_store_calls),
52707+ atomic_read_unchecked(&fscache_n_store_pages),
52708+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
52709+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
52710
52711 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
52712- atomic_read(&fscache_n_store_vmscan_not_storing),
52713- atomic_read(&fscache_n_store_vmscan_gone),
52714- atomic_read(&fscache_n_store_vmscan_busy),
52715- atomic_read(&fscache_n_store_vmscan_cancelled),
52716- atomic_read(&fscache_n_store_vmscan_wait));
52717+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
52718+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
52719+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
52720+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
52721+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
52722
52723 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
52724- atomic_read(&fscache_n_op_pend),
52725- atomic_read(&fscache_n_op_run),
52726- atomic_read(&fscache_n_op_enqueue),
52727- atomic_read(&fscache_n_op_cancelled),
52728- atomic_read(&fscache_n_op_rejected));
52729+ atomic_read_unchecked(&fscache_n_op_pend),
52730+ atomic_read_unchecked(&fscache_n_op_run),
52731+ atomic_read_unchecked(&fscache_n_op_enqueue),
52732+ atomic_read_unchecked(&fscache_n_op_cancelled),
52733+ atomic_read_unchecked(&fscache_n_op_rejected));
52734 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
52735- atomic_read(&fscache_n_op_deferred_release),
52736- atomic_read(&fscache_n_op_release),
52737- atomic_read(&fscache_n_op_gc));
52738+ atomic_read_unchecked(&fscache_n_op_deferred_release),
52739+ atomic_read_unchecked(&fscache_n_op_release),
52740+ atomic_read_unchecked(&fscache_n_op_gc));
52741
52742 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
52743 atomic_read(&fscache_n_cop_alloc_object),
52744diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
52745index e397b67..b0d8709 100644
52746--- a/fs/fuse/cuse.c
52747+++ b/fs/fuse/cuse.c
52748@@ -593,10 +593,12 @@ static int __init cuse_init(void)
52749 INIT_LIST_HEAD(&cuse_conntbl[i]);
52750
52751 /* inherit and extend fuse_dev_operations */
52752- cuse_channel_fops = fuse_dev_operations;
52753- cuse_channel_fops.owner = THIS_MODULE;
52754- cuse_channel_fops.open = cuse_channel_open;
52755- cuse_channel_fops.release = cuse_channel_release;
52756+ pax_open_kernel();
52757+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
52758+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
52759+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
52760+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
52761+ pax_close_kernel();
52762
52763 cuse_class = class_create(THIS_MODULE, "cuse");
52764 if (IS_ERR(cuse_class))
52765diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
52766index e83351a..41e3c9c 100644
52767--- a/fs/fuse/dev.c
52768+++ b/fs/fuse/dev.c
52769@@ -1236,7 +1236,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
52770 ret = 0;
52771 pipe_lock(pipe);
52772
52773- if (!pipe->readers) {
52774+ if (!atomic_read(&pipe->readers)) {
52775 send_sig(SIGPIPE, current, 0);
52776 if (!ret)
52777 ret = -EPIPE;
52778diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
52779index 315e1f8..91f890c 100644
52780--- a/fs/fuse/dir.c
52781+++ b/fs/fuse/dir.c
52782@@ -1233,7 +1233,7 @@ static char *read_link(struct dentry *dentry)
52783 return link;
52784 }
52785
52786-static void free_link(char *link)
52787+static void free_link(const char *link)
52788 {
52789 if (!IS_ERR(link))
52790 free_page((unsigned long) link);
52791diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
52792index 2b6f569..fcb4d1f 100644
52793--- a/fs/gfs2/inode.c
52794+++ b/fs/gfs2/inode.c
52795@@ -1499,7 +1499,7 @@ out:
52796
52797 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
52798 {
52799- char *s = nd_get_link(nd);
52800+ const char *s = nd_get_link(nd);
52801 if (!IS_ERR(s))
52802 kfree(s);
52803 }
52804diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
52805index 78bde32..767e906 100644
52806--- a/fs/hugetlbfs/inode.c
52807+++ b/fs/hugetlbfs/inode.c
52808@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
52809 struct mm_struct *mm = current->mm;
52810 struct vm_area_struct *vma;
52811 struct hstate *h = hstate_file(file);
52812+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
52813 struct vm_unmapped_area_info info;
52814
52815 if (len & ~huge_page_mask(h))
52816@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
52817 return addr;
52818 }
52819
52820+#ifdef CONFIG_PAX_RANDMMAP
52821+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
52822+#endif
52823+
52824 if (addr) {
52825 addr = ALIGN(addr, huge_page_size(h));
52826 vma = find_vma(mm, addr);
52827- if (TASK_SIZE - len >= addr &&
52828- (!vma || addr + len <= vma->vm_start))
52829+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
52830 return addr;
52831 }
52832
52833 info.flags = 0;
52834 info.length = len;
52835 info.low_limit = TASK_UNMAPPED_BASE;
52836+
52837+#ifdef CONFIG_PAX_RANDMMAP
52838+ if (mm->pax_flags & MF_PAX_RANDMMAP)
52839+ info.low_limit += mm->delta_mmap;
52840+#endif
52841+
52842 info.high_limit = TASK_SIZE;
52843 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
52844 info.align_offset = 0;
52845@@ -897,7 +907,7 @@ static struct file_system_type hugetlbfs_fs_type = {
52846 .kill_sb = kill_litter_super,
52847 };
52848
52849-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
52850+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
52851
52852 static int can_do_hugetlb_shm(void)
52853 {
52854diff --git a/fs/inode.c b/fs/inode.c
52855index 14084b7..29af1d9 100644
52856--- a/fs/inode.c
52857+++ b/fs/inode.c
52858@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
52859
52860 #ifdef CONFIG_SMP
52861 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
52862- static atomic_t shared_last_ino;
52863- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
52864+ static atomic_unchecked_t shared_last_ino;
52865+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
52866
52867 res = next - LAST_INO_BATCH;
52868 }
52869diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
52870index 4a6cf28..d3a29d3 100644
52871--- a/fs/jffs2/erase.c
52872+++ b/fs/jffs2/erase.c
52873@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
52874 struct jffs2_unknown_node marker = {
52875 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
52876 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
52877- .totlen = cpu_to_je32(c->cleanmarker_size)
52878+ .totlen = cpu_to_je32(c->cleanmarker_size),
52879+ .hdr_crc = cpu_to_je32(0)
52880 };
52881
52882 jffs2_prealloc_raw_node_refs(c, jeb, 1);
52883diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
52884index a6597d6..41b30ec 100644
52885--- a/fs/jffs2/wbuf.c
52886+++ b/fs/jffs2/wbuf.c
52887@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
52888 {
52889 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
52890 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
52891- .totlen = constant_cpu_to_je32(8)
52892+ .totlen = constant_cpu_to_je32(8),
52893+ .hdr_crc = constant_cpu_to_je32(0)
52894 };
52895
52896 /*
52897diff --git a/fs/jfs/super.c b/fs/jfs/super.c
52898index 1a543be..a4e1363 100644
52899--- a/fs/jfs/super.c
52900+++ b/fs/jfs/super.c
52901@@ -225,7 +225,7 @@ static const match_table_t tokens = {
52902 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
52903 int *flag)
52904 {
52905- void *nls_map = (void *)-1; /* -1: no change; NULL: none */
52906+ const void *nls_map = (const void *)-1; /* -1: no change; NULL: none */
52907 char *p;
52908 struct jfs_sb_info *sbi = JFS_SBI(sb);
52909
52910@@ -253,7 +253,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
52911 /* Don't do anything ;-) */
52912 break;
52913 case Opt_iocharset:
52914- if (nls_map && nls_map != (void *) -1)
52915+ if (nls_map && nls_map != (const void *) -1)
52916 unload_nls(nls_map);
52917 if (!strcmp(args[0].from, "none"))
52918 nls_map = NULL;
52919@@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
52920
52921 jfs_inode_cachep =
52922 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
52923- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
52924+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
52925 init_once);
52926 if (jfs_inode_cachep == NULL)
52927 return -ENOMEM;
52928diff --git a/fs/libfs.c b/fs/libfs.c
52929index 916da8c..1588998 100644
52930--- a/fs/libfs.c
52931+++ b/fs/libfs.c
52932@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
52933
52934 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
52935 struct dentry *next;
52936+ char d_name[sizeof(next->d_iname)];
52937+ const unsigned char *name;
52938+
52939 next = list_entry(p, struct dentry, d_u.d_child);
52940 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
52941 if (!simple_positive(next)) {
52942@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
52943
52944 spin_unlock(&next->d_lock);
52945 spin_unlock(&dentry->d_lock);
52946- if (filldir(dirent, next->d_name.name,
52947+ name = next->d_name.name;
52948+ if (name == next->d_iname) {
52949+ memcpy(d_name, name, next->d_name.len);
52950+ name = d_name;
52951+ }
52952+ if (filldir(dirent, name,
52953 next->d_name.len, filp->f_pos,
52954 next->d_inode->i_ino,
52955 dt_type(next->d_inode)) < 0)
52956diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
52957index 52e5120..808936e 100644
52958--- a/fs/lockd/clntproc.c
52959+++ b/fs/lockd/clntproc.c
52960@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
52961 /*
52962 * Cookie counter for NLM requests
52963 */
52964-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
52965+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
52966
52967 void nlmclnt_next_cookie(struct nlm_cookie *c)
52968 {
52969- u32 cookie = atomic_inc_return(&nlm_cookie);
52970+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
52971
52972 memcpy(c->data, &cookie, 4);
52973 c->len=4;
52974diff --git a/fs/locks.c b/fs/locks.c
52975index a94e331..060bce3 100644
52976--- a/fs/locks.c
52977+++ b/fs/locks.c
52978@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
52979 return;
52980
52981 if (filp->f_op && filp->f_op->flock) {
52982- struct file_lock fl = {
52983+ struct file_lock flock = {
52984 .fl_pid = current->tgid,
52985 .fl_file = filp,
52986 .fl_flags = FL_FLOCK,
52987 .fl_type = F_UNLCK,
52988 .fl_end = OFFSET_MAX,
52989 };
52990- filp->f_op->flock(filp, F_SETLKW, &fl);
52991- if (fl.fl_ops && fl.fl_ops->fl_release_private)
52992- fl.fl_ops->fl_release_private(&fl);
52993+ filp->f_op->flock(filp, F_SETLKW, &flock);
52994+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
52995+ flock.fl_ops->fl_release_private(&flock);
52996 }
52997
52998 lock_flocks();
52999diff --git a/fs/namei.c b/fs/namei.c
53000index ec97aef..e67718d 100644
53001--- a/fs/namei.c
53002+++ b/fs/namei.c
53003@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
53004 if (ret != -EACCES)
53005 return ret;
53006
53007+#ifdef CONFIG_GRKERNSEC
53008+ /* we'll block if we have to log due to a denied capability use */
53009+ if (mask & MAY_NOT_BLOCK)
53010+ return -ECHILD;
53011+#endif
53012+
53013 if (S_ISDIR(inode->i_mode)) {
53014 /* DACs are overridable for directories */
53015- if (inode_capable(inode, CAP_DAC_OVERRIDE))
53016- return 0;
53017 if (!(mask & MAY_WRITE))
53018- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53019+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53020+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53021 return 0;
53022+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
53023+ return 0;
53024 return -EACCES;
53025 }
53026 /*
53027+ * Searching includes executable on directories, else just read.
53028+ */
53029+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53030+ if (mask == MAY_READ)
53031+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53032+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53033+ return 0;
53034+
53035+ /*
53036 * Read/write DACs are always overridable.
53037 * Executable DACs are overridable when there is
53038 * at least one exec bit set.
53039@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
53040 if (inode_capable(inode, CAP_DAC_OVERRIDE))
53041 return 0;
53042
53043- /*
53044- * Searching includes executable on directories, else just read.
53045- */
53046- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53047- if (mask == MAY_READ)
53048- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53049- return 0;
53050-
53051 return -EACCES;
53052 }
53053
53054@@ -824,7 +832,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53055 {
53056 struct dentry *dentry = link->dentry;
53057 int error;
53058- char *s;
53059+ const char *s;
53060
53061 BUG_ON(nd->flags & LOOKUP_RCU);
53062
53063@@ -845,6 +853,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53064 if (error)
53065 goto out_put_nd_path;
53066
53067+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
53068+ dentry->d_inode, dentry, nd->path.mnt)) {
53069+ error = -EACCES;
53070+ goto out_put_nd_path;
53071+ }
53072+
53073 nd->last_type = LAST_BIND;
53074 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
53075 error = PTR_ERR(*p);
53076@@ -1594,6 +1608,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
53077 break;
53078 res = walk_component(nd, path, &nd->last,
53079 nd->last_type, LOOKUP_FOLLOW);
53080+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
53081+ res = -EACCES;
53082 put_link(nd, &link, cookie);
53083 } while (res > 0);
53084
53085@@ -1692,7 +1708,7 @@ EXPORT_SYMBOL(full_name_hash);
53086 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
53087 {
53088 unsigned long a, b, adata, bdata, mask, hash, len;
53089- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53090+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53091
53092 hash = a = 0;
53093 len = -sizeof(unsigned long);
53094@@ -1977,6 +1993,8 @@ static int path_lookupat(int dfd, const char *name,
53095 if (err)
53096 break;
53097 err = lookup_last(nd, &path);
53098+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
53099+ err = -EACCES;
53100 put_link(nd, &link, cookie);
53101 }
53102 }
53103@@ -1984,6 +2002,13 @@ static int path_lookupat(int dfd, const char *name,
53104 if (!err)
53105 err = complete_walk(nd);
53106
53107+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
53108+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53109+ path_put(&nd->path);
53110+ err = -ENOENT;
53111+ }
53112+ }
53113+
53114 if (!err && nd->flags & LOOKUP_DIRECTORY) {
53115 if (!nd->inode->i_op->lookup) {
53116 path_put(&nd->path);
53117@@ -2011,8 +2036,15 @@ static int filename_lookup(int dfd, struct filename *name,
53118 retval = path_lookupat(dfd, name->name,
53119 flags | LOOKUP_REVAL, nd);
53120
53121- if (likely(!retval))
53122+ if (likely(!retval)) {
53123 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
53124+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
53125+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
53126+ path_put(&nd->path);
53127+ return -ENOENT;
53128+ }
53129+ }
53130+ }
53131 return retval;
53132 }
53133
53134@@ -2390,6 +2422,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
53135 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
53136 return -EPERM;
53137
53138+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
53139+ return -EPERM;
53140+ if (gr_handle_rawio(inode))
53141+ return -EPERM;
53142+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
53143+ return -EACCES;
53144+
53145 return 0;
53146 }
53147
53148@@ -2611,7 +2650,7 @@ looked_up:
53149 * cleared otherwise prior to returning.
53150 */
53151 static int lookup_open(struct nameidata *nd, struct path *path,
53152- struct file *file,
53153+ struct path *link, struct file *file,
53154 const struct open_flags *op,
53155 bool got_write, int *opened)
53156 {
53157@@ -2646,6 +2685,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53158 /* Negative dentry, just create the file */
53159 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
53160 umode_t mode = op->mode;
53161+
53162+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
53163+ error = -EACCES;
53164+ goto out_dput;
53165+ }
53166+
53167+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
53168+ error = -EACCES;
53169+ goto out_dput;
53170+ }
53171+
53172 if (!IS_POSIXACL(dir->d_inode))
53173 mode &= ~current_umask();
53174 /*
53175@@ -2667,6 +2717,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53176 nd->flags & LOOKUP_EXCL);
53177 if (error)
53178 goto out_dput;
53179+ else
53180+ gr_handle_create(dentry, nd->path.mnt);
53181 }
53182 out_no_open:
53183 path->dentry = dentry;
53184@@ -2681,7 +2733,7 @@ out_dput:
53185 /*
53186 * Handle the last step of open()
53187 */
53188-static int do_last(struct nameidata *nd, struct path *path,
53189+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
53190 struct file *file, const struct open_flags *op,
53191 int *opened, struct filename *name)
53192 {
53193@@ -2710,16 +2762,32 @@ static int do_last(struct nameidata *nd, struct path *path,
53194 error = complete_walk(nd);
53195 if (error)
53196 return error;
53197+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53198+ error = -ENOENT;
53199+ goto out;
53200+ }
53201 audit_inode(name, nd->path.dentry, 0);
53202 if (open_flag & O_CREAT) {
53203 error = -EISDIR;
53204 goto out;
53205 }
53206+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53207+ error = -EACCES;
53208+ goto out;
53209+ }
53210 goto finish_open;
53211 case LAST_BIND:
53212 error = complete_walk(nd);
53213 if (error)
53214 return error;
53215+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
53216+ error = -ENOENT;
53217+ goto out;
53218+ }
53219+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53220+ error = -EACCES;
53221+ goto out;
53222+ }
53223 audit_inode(name, dir, 0);
53224 goto finish_open;
53225 }
53226@@ -2768,7 +2836,7 @@ retry_lookup:
53227 */
53228 }
53229 mutex_lock(&dir->d_inode->i_mutex);
53230- error = lookup_open(nd, path, file, op, got_write, opened);
53231+ error = lookup_open(nd, path, link, file, op, got_write, opened);
53232 mutex_unlock(&dir->d_inode->i_mutex);
53233
53234 if (error <= 0) {
53235@@ -2792,11 +2860,28 @@ retry_lookup:
53236 goto finish_open_created;
53237 }
53238
53239+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
53240+ error = -ENOENT;
53241+ goto exit_dput;
53242+ }
53243+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
53244+ error = -EACCES;
53245+ goto exit_dput;
53246+ }
53247+
53248 /*
53249 * create/update audit record if it already exists.
53250 */
53251- if (path->dentry->d_inode)
53252+ if (path->dentry->d_inode) {
53253+ /* only check if O_CREAT is specified, all other checks need to go
53254+ into may_open */
53255+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
53256+ error = -EACCES;
53257+ goto exit_dput;
53258+ }
53259+
53260 audit_inode(name, path->dentry, 0);
53261+ }
53262
53263 /*
53264 * If atomic_open() acquired write access it is dropped now due to
53265@@ -2837,6 +2922,11 @@ finish_lookup:
53266 }
53267 }
53268 BUG_ON(inode != path->dentry->d_inode);
53269+ /* if we're resolving a symlink to another symlink */
53270+ if (link && gr_handle_symlink_owner(link, inode)) {
53271+ error = -EACCES;
53272+ goto out;
53273+ }
53274 return 1;
53275 }
53276
53277@@ -2846,7 +2936,6 @@ finish_lookup:
53278 save_parent.dentry = nd->path.dentry;
53279 save_parent.mnt = mntget(path->mnt);
53280 nd->path.dentry = path->dentry;
53281-
53282 }
53283 nd->inode = inode;
53284 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
53285@@ -2855,6 +2944,16 @@ finish_lookup:
53286 path_put(&save_parent);
53287 return error;
53288 }
53289+
53290+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53291+ error = -ENOENT;
53292+ goto out;
53293+ }
53294+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53295+ error = -EACCES;
53296+ goto out;
53297+ }
53298+
53299 error = -EISDIR;
53300 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
53301 goto out;
53302@@ -2953,7 +3052,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53303 if (unlikely(error))
53304 goto out;
53305
53306- error = do_last(nd, &path, file, op, &opened, pathname);
53307+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
53308 while (unlikely(error > 0)) { /* trailing symlink */
53309 struct path link = path;
53310 void *cookie;
53311@@ -2971,7 +3070,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53312 error = follow_link(&link, nd, &cookie);
53313 if (unlikely(error))
53314 break;
53315- error = do_last(nd, &path, file, op, &opened, pathname);
53316+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
53317 put_link(nd, &link, cookie);
53318 }
53319 out:
53320@@ -3071,8 +3170,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
53321 goto unlock;
53322
53323 error = -EEXIST;
53324- if (dentry->d_inode)
53325+ if (dentry->d_inode) {
53326+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
53327+ error = -ENOENT;
53328+ }
53329 goto fail;
53330+ }
53331 /*
53332 * Special case - lookup gave negative, but... we had foo/bar/
53333 * From the vfs_mknod() POV we just have a negative dentry -
53334@@ -3124,6 +3227,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
53335 }
53336 EXPORT_SYMBOL(user_path_create);
53337
53338+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
53339+{
53340+ struct filename *tmp = getname(pathname);
53341+ struct dentry *res;
53342+ if (IS_ERR(tmp))
53343+ return ERR_CAST(tmp);
53344+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
53345+ if (IS_ERR(res))
53346+ putname(tmp);
53347+ else
53348+ *to = tmp;
53349+ return res;
53350+}
53351+
53352 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
53353 {
53354 int error = may_create(dir, dentry);
53355@@ -3186,6 +3303,17 @@ retry:
53356
53357 if (!IS_POSIXACL(path.dentry->d_inode))
53358 mode &= ~current_umask();
53359+
53360+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
53361+ error = -EPERM;
53362+ goto out;
53363+ }
53364+
53365+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
53366+ error = -EACCES;
53367+ goto out;
53368+ }
53369+
53370 error = security_path_mknod(&path, dentry, mode, dev);
53371 if (error)
53372 goto out;
53373@@ -3202,6 +3330,8 @@ retry:
53374 break;
53375 }
53376 out:
53377+ if (!error)
53378+ gr_handle_create(dentry, path.mnt);
53379 done_path_create(&path, dentry);
53380 if (retry_estale(error, lookup_flags)) {
53381 lookup_flags |= LOOKUP_REVAL;
53382@@ -3254,9 +3384,16 @@ retry:
53383
53384 if (!IS_POSIXACL(path.dentry->d_inode))
53385 mode &= ~current_umask();
53386+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
53387+ error = -EACCES;
53388+ goto out;
53389+ }
53390 error = security_path_mkdir(&path, dentry, mode);
53391 if (!error)
53392 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
53393+ if (!error)
53394+ gr_handle_create(dentry, path.mnt);
53395+out:
53396 done_path_create(&path, dentry);
53397 if (retry_estale(error, lookup_flags)) {
53398 lookup_flags |= LOOKUP_REVAL;
53399@@ -3337,6 +3474,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
53400 struct filename *name;
53401 struct dentry *dentry;
53402 struct nameidata nd;
53403+ ino_t saved_ino = 0;
53404+ dev_t saved_dev = 0;
53405 unsigned int lookup_flags = 0;
53406 retry:
53407 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53408@@ -3369,10 +3508,21 @@ retry:
53409 error = -ENOENT;
53410 goto exit3;
53411 }
53412+
53413+ saved_ino = dentry->d_inode->i_ino;
53414+ saved_dev = gr_get_dev_from_dentry(dentry);
53415+
53416+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
53417+ error = -EACCES;
53418+ goto exit3;
53419+ }
53420+
53421 error = security_path_rmdir(&nd.path, dentry);
53422 if (error)
53423 goto exit3;
53424 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
53425+ if (!error && (saved_dev || saved_ino))
53426+ gr_handle_delete(saved_ino, saved_dev);
53427 exit3:
53428 dput(dentry);
53429 exit2:
53430@@ -3438,6 +3588,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
53431 struct dentry *dentry;
53432 struct nameidata nd;
53433 struct inode *inode = NULL;
53434+ ino_t saved_ino = 0;
53435+ dev_t saved_dev = 0;
53436 unsigned int lookup_flags = 0;
53437 retry:
53438 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53439@@ -3464,10 +3616,22 @@ retry:
53440 if (!inode)
53441 goto slashes;
53442 ihold(inode);
53443+
53444+ if (inode->i_nlink <= 1) {
53445+ saved_ino = inode->i_ino;
53446+ saved_dev = gr_get_dev_from_dentry(dentry);
53447+ }
53448+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
53449+ error = -EACCES;
53450+ goto exit2;
53451+ }
53452+
53453 error = security_path_unlink(&nd.path, dentry);
53454 if (error)
53455 goto exit2;
53456 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
53457+ if (!error && (saved_ino || saved_dev))
53458+ gr_handle_delete(saved_ino, saved_dev);
53459 exit2:
53460 dput(dentry);
53461 }
53462@@ -3545,9 +3709,17 @@ retry:
53463 if (IS_ERR(dentry))
53464 goto out_putname;
53465
53466+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
53467+ error = -EACCES;
53468+ goto out;
53469+ }
53470+
53471 error = security_path_symlink(&path, dentry, from->name);
53472 if (!error)
53473 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
53474+ if (!error)
53475+ gr_handle_create(dentry, path.mnt);
53476+out:
53477 done_path_create(&path, dentry);
53478 if (retry_estale(error, lookup_flags)) {
53479 lookup_flags |= LOOKUP_REVAL;
53480@@ -3621,6 +3793,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
53481 {
53482 struct dentry *new_dentry;
53483 struct path old_path, new_path;
53484+ struct filename *to = NULL;
53485 int how = 0;
53486 int error;
53487
53488@@ -3644,7 +3817,7 @@ retry:
53489 if (error)
53490 return error;
53491
53492- new_dentry = user_path_create(newdfd, newname, &new_path,
53493+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
53494 (how & LOOKUP_REVAL));
53495 error = PTR_ERR(new_dentry);
53496 if (IS_ERR(new_dentry))
53497@@ -3656,11 +3829,28 @@ retry:
53498 error = may_linkat(&old_path);
53499 if (unlikely(error))
53500 goto out_dput;
53501+
53502+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
53503+ old_path.dentry->d_inode,
53504+ old_path.dentry->d_inode->i_mode, to)) {
53505+ error = -EACCES;
53506+ goto out_dput;
53507+ }
53508+
53509+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
53510+ old_path.dentry, old_path.mnt, to)) {
53511+ error = -EACCES;
53512+ goto out_dput;
53513+ }
53514+
53515 error = security_path_link(old_path.dentry, &new_path, new_dentry);
53516 if (error)
53517 goto out_dput;
53518 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
53519+ if (!error)
53520+ gr_handle_create(new_dentry, new_path.mnt);
53521 out_dput:
53522+ putname(to);
53523 done_path_create(&new_path, new_dentry);
53524 if (retry_estale(error, how)) {
53525 how |= LOOKUP_REVAL;
53526@@ -3906,12 +4096,21 @@ retry:
53527 if (new_dentry == trap)
53528 goto exit5;
53529
53530+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
53531+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
53532+ to);
53533+ if (error)
53534+ goto exit5;
53535+
53536 error = security_path_rename(&oldnd.path, old_dentry,
53537 &newnd.path, new_dentry);
53538 if (error)
53539 goto exit5;
53540 error = vfs_rename(old_dir->d_inode, old_dentry,
53541 new_dir->d_inode, new_dentry);
53542+ if (!error)
53543+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
53544+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
53545 exit5:
53546 dput(new_dentry);
53547 exit4:
53548@@ -3943,6 +4142,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
53549
53550 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
53551 {
53552+ char tmpbuf[64];
53553+ const char *newlink;
53554 int len;
53555
53556 len = PTR_ERR(link);
53557@@ -3952,7 +4153,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
53558 len = strlen(link);
53559 if (len > (unsigned) buflen)
53560 len = buflen;
53561- if (copy_to_user(buffer, link, len))
53562+
53563+ if (len < sizeof(tmpbuf)) {
53564+ memcpy(tmpbuf, link, len);
53565+ newlink = tmpbuf;
53566+ } else
53567+ newlink = link;
53568+
53569+ if (copy_to_user(buffer, newlink, len))
53570 len = -EFAULT;
53571 out:
53572 return len;
53573diff --git a/fs/namespace.c b/fs/namespace.c
53574index a51054f..f9b53e5 100644
53575--- a/fs/namespace.c
53576+++ b/fs/namespace.c
53577@@ -1215,6 +1215,9 @@ static int do_umount(struct mount *mnt, int flags)
53578 if (!(sb->s_flags & MS_RDONLY))
53579 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
53580 up_write(&sb->s_umount);
53581+
53582+ gr_log_remount(mnt->mnt_devname, retval);
53583+
53584 return retval;
53585 }
53586
53587@@ -1234,6 +1237,9 @@ static int do_umount(struct mount *mnt, int flags)
53588 br_write_unlock(&vfsmount_lock);
53589 up_write(&namespace_sem);
53590 release_mounts(&umount_list);
53591+
53592+ gr_log_unmount(mnt->mnt_devname, retval);
53593+
53594 return retval;
53595 }
53596
53597@@ -2287,6 +2293,16 @@ long do_mount(const char *dev_name, const char *dir_name,
53598 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
53599 MS_STRICTATIME);
53600
53601+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
53602+ retval = -EPERM;
53603+ goto dput_out;
53604+ }
53605+
53606+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
53607+ retval = -EPERM;
53608+ goto dput_out;
53609+ }
53610+
53611 if (flags & MS_REMOUNT)
53612 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
53613 data_page);
53614@@ -2301,6 +2317,9 @@ long do_mount(const char *dev_name, const char *dir_name,
53615 dev_name, data_page);
53616 dput_out:
53617 path_put(&path);
53618+
53619+ gr_log_mount(dev_name, dir_name, retval);
53620+
53621 return retval;
53622 }
53623
53624@@ -2587,6 +2606,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
53625 if (error)
53626 goto out2;
53627
53628+ if (gr_handle_chroot_pivot()) {
53629+ error = -EPERM;
53630+ goto out2;
53631+ }
53632+
53633 get_fs_root(current->fs, &root);
53634 error = lock_mount(&old);
53635 if (error)
53636@@ -2790,7 +2814,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
53637 !nsown_capable(CAP_SYS_ADMIN))
53638 return -EPERM;
53639
53640- if (fs->users != 1)
53641+ if (atomic_read(&fs->users) != 1)
53642 return -EINVAL;
53643
53644 get_mnt_ns(mnt_ns);
53645diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
53646index 59461c9..b17c57e 100644
53647--- a/fs/nfs/callback_xdr.c
53648+++ b/fs/nfs/callback_xdr.c
53649@@ -51,7 +51,7 @@ struct callback_op {
53650 callback_decode_arg_t decode_args;
53651 callback_encode_res_t encode_res;
53652 long res_maxsize;
53653-};
53654+} __do_const;
53655
53656 static struct callback_op callback_ops[];
53657
53658diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
53659index ebeb94c..ff35337 100644
53660--- a/fs/nfs/inode.c
53661+++ b/fs/nfs/inode.c
53662@@ -1042,16 +1042,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
53663 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
53664 }
53665
53666-static atomic_long_t nfs_attr_generation_counter;
53667+static atomic_long_unchecked_t nfs_attr_generation_counter;
53668
53669 static unsigned long nfs_read_attr_generation_counter(void)
53670 {
53671- return atomic_long_read(&nfs_attr_generation_counter);
53672+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
53673 }
53674
53675 unsigned long nfs_inc_attr_generation_counter(void)
53676 {
53677- return atomic_long_inc_return(&nfs_attr_generation_counter);
53678+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
53679 }
53680
53681 void nfs_fattr_init(struct nfs_fattr *fattr)
53682diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
53683index 9d1c5db..1e13db8 100644
53684--- a/fs/nfsd/nfs4proc.c
53685+++ b/fs/nfsd/nfs4proc.c
53686@@ -1097,7 +1097,7 @@ struct nfsd4_operation {
53687 nfsd4op_rsize op_rsize_bop;
53688 stateid_getter op_get_currentstateid;
53689 stateid_setter op_set_currentstateid;
53690-};
53691+} __do_const;
53692
53693 static struct nfsd4_operation nfsd4_ops[];
53694
53695diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
53696index 0dc1158..ccf0338 100644
53697--- a/fs/nfsd/nfs4xdr.c
53698+++ b/fs/nfsd/nfs4xdr.c
53699@@ -1456,7 +1456,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
53700
53701 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
53702
53703-static nfsd4_dec nfsd4_dec_ops[] = {
53704+static const nfsd4_dec nfsd4_dec_ops[] = {
53705 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
53706 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
53707 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
53708@@ -1496,7 +1496,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
53709 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
53710 };
53711
53712-static nfsd4_dec nfsd41_dec_ops[] = {
53713+static const nfsd4_dec nfsd41_dec_ops[] = {
53714 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
53715 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
53716 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
53717@@ -1558,7 +1558,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
53718 };
53719
53720 struct nfsd4_minorversion_ops {
53721- nfsd4_dec *decoders;
53722+ const nfsd4_dec *decoders;
53723 int nops;
53724 };
53725
53726diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
53727index d586117..143d568 100644
53728--- a/fs/nfsd/vfs.c
53729+++ b/fs/nfsd/vfs.c
53730@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
53731 } else {
53732 oldfs = get_fs();
53733 set_fs(KERNEL_DS);
53734- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
53735+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
53736 set_fs(oldfs);
53737 }
53738
53739@@ -1025,7 +1025,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
53740
53741 /* Write the data. */
53742 oldfs = get_fs(); set_fs(KERNEL_DS);
53743- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
53744+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
53745 set_fs(oldfs);
53746 if (host_err < 0)
53747 goto out_nfserr;
53748@@ -1571,7 +1571,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
53749 */
53750
53751 oldfs = get_fs(); set_fs(KERNEL_DS);
53752- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
53753+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
53754 set_fs(oldfs);
53755
53756 if (host_err < 0)
53757diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
53758index fea6bd5..8ee9d81 100644
53759--- a/fs/nls/nls_base.c
53760+++ b/fs/nls/nls_base.c
53761@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
53762
53763 int register_nls(struct nls_table * nls)
53764 {
53765- struct nls_table ** tmp = &tables;
53766+ struct nls_table *tmp = tables;
53767
53768 if (nls->next)
53769 return -EBUSY;
53770
53771 spin_lock(&nls_lock);
53772- while (*tmp) {
53773- if (nls == *tmp) {
53774+ while (tmp) {
53775+ if (nls == tmp) {
53776 spin_unlock(&nls_lock);
53777 return -EBUSY;
53778 }
53779- tmp = &(*tmp)->next;
53780+ tmp = tmp->next;
53781 }
53782- nls->next = tables;
53783+ pax_open_kernel();
53784+ *(struct nls_table **)&nls->next = tables;
53785+ pax_close_kernel();
53786 tables = nls;
53787 spin_unlock(&nls_lock);
53788 return 0;
53789@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
53790
53791 int unregister_nls(struct nls_table * nls)
53792 {
53793- struct nls_table ** tmp = &tables;
53794+ struct nls_table * const * tmp = &tables;
53795
53796 spin_lock(&nls_lock);
53797 while (*tmp) {
53798 if (nls == *tmp) {
53799- *tmp = nls->next;
53800+ pax_open_kernel();
53801+ *(struct nls_table **)tmp = nls->next;
53802+ pax_close_kernel();
53803 spin_unlock(&nls_lock);
53804 return 0;
53805 }
53806diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
53807index 7424929..35f6be5 100644
53808--- a/fs/nls/nls_euc-jp.c
53809+++ b/fs/nls/nls_euc-jp.c
53810@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
53811 p_nls = load_nls("cp932");
53812
53813 if (p_nls) {
53814- table.charset2upper = p_nls->charset2upper;
53815- table.charset2lower = p_nls->charset2lower;
53816+ pax_open_kernel();
53817+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
53818+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
53819+ pax_close_kernel();
53820 return register_nls(&table);
53821 }
53822
53823diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
53824index e7bc1d7..06bd4bb 100644
53825--- a/fs/nls/nls_koi8-ru.c
53826+++ b/fs/nls/nls_koi8-ru.c
53827@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
53828 p_nls = load_nls("koi8-u");
53829
53830 if (p_nls) {
53831- table.charset2upper = p_nls->charset2upper;
53832- table.charset2lower = p_nls->charset2lower;
53833+ pax_open_kernel();
53834+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
53835+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
53836+ pax_close_kernel();
53837 return register_nls(&table);
53838 }
53839
53840diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
53841index 9ff4a5e..deb1f0f 100644
53842--- a/fs/notify/fanotify/fanotify_user.c
53843+++ b/fs/notify/fanotify/fanotify_user.c
53844@@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
53845
53846 fd = fanotify_event_metadata.fd;
53847 ret = -EFAULT;
53848- if (copy_to_user(buf, &fanotify_event_metadata,
53849- fanotify_event_metadata.event_len))
53850+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
53851+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
53852 goto out_close_fd;
53853
53854 ret = prepare_for_access_response(group, event, fd);
53855diff --git a/fs/notify/notification.c b/fs/notify/notification.c
53856index 7b51b05..5ea5ef6 100644
53857--- a/fs/notify/notification.c
53858+++ b/fs/notify/notification.c
53859@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
53860 * get set to 0 so it will never get 'freed'
53861 */
53862 static struct fsnotify_event *q_overflow_event;
53863-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53864+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53865
53866 /**
53867 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
53868@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53869 */
53870 u32 fsnotify_get_cookie(void)
53871 {
53872- return atomic_inc_return(&fsnotify_sync_cookie);
53873+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
53874 }
53875 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
53876
53877diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
53878index 99e3610..02c1068 100644
53879--- a/fs/ntfs/dir.c
53880+++ b/fs/ntfs/dir.c
53881@@ -1329,7 +1329,7 @@ find_next_index_buffer:
53882 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
53883 ~(s64)(ndir->itype.index.block_size - 1)));
53884 /* Bounds checks. */
53885- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
53886+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
53887 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
53888 "inode 0x%lx or driver bug.", vdir->i_ino);
53889 goto err_out;
53890diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
53891index 5b2d4f0..c6de396 100644
53892--- a/fs/ntfs/file.c
53893+++ b/fs/ntfs/file.c
53894@@ -2242,6 +2242,6 @@ const struct inode_operations ntfs_file_inode_ops = {
53895 #endif /* NTFS_RW */
53896 };
53897
53898-const struct file_operations ntfs_empty_file_ops = {};
53899+const struct file_operations ntfs_empty_file_ops __read_only;
53900
53901-const struct inode_operations ntfs_empty_inode_ops = {};
53902+const struct inode_operations ntfs_empty_inode_ops __read_only;
53903diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
53904index a9f78c7..ed8a381 100644
53905--- a/fs/ocfs2/localalloc.c
53906+++ b/fs/ocfs2/localalloc.c
53907@@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
53908 goto bail;
53909 }
53910
53911- atomic_inc(&osb->alloc_stats.moves);
53912+ atomic_inc_unchecked(&osb->alloc_stats.moves);
53913
53914 bail:
53915 if (handle)
53916diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
53917index d355e6e..578d905 100644
53918--- a/fs/ocfs2/ocfs2.h
53919+++ b/fs/ocfs2/ocfs2.h
53920@@ -235,11 +235,11 @@ enum ocfs2_vol_state
53921
53922 struct ocfs2_alloc_stats
53923 {
53924- atomic_t moves;
53925- atomic_t local_data;
53926- atomic_t bitmap_data;
53927- atomic_t bg_allocs;
53928- atomic_t bg_extends;
53929+ atomic_unchecked_t moves;
53930+ atomic_unchecked_t local_data;
53931+ atomic_unchecked_t bitmap_data;
53932+ atomic_unchecked_t bg_allocs;
53933+ atomic_unchecked_t bg_extends;
53934 };
53935
53936 enum ocfs2_local_alloc_state
53937diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
53938index b7e74b5..19c6536 100644
53939--- a/fs/ocfs2/suballoc.c
53940+++ b/fs/ocfs2/suballoc.c
53941@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
53942 mlog_errno(status);
53943 goto bail;
53944 }
53945- atomic_inc(&osb->alloc_stats.bg_extends);
53946+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
53947
53948 /* You should never ask for this much metadata */
53949 BUG_ON(bits_wanted >
53950@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
53951 mlog_errno(status);
53952 goto bail;
53953 }
53954- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53955+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53956
53957 *suballoc_loc = res.sr_bg_blkno;
53958 *suballoc_bit_start = res.sr_bit_offset;
53959@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
53960 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
53961 res->sr_bits);
53962
53963- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53964+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53965
53966 BUG_ON(res->sr_bits != 1);
53967
53968@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
53969 mlog_errno(status);
53970 goto bail;
53971 }
53972- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53973+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53974
53975 BUG_ON(res.sr_bits != 1);
53976
53977@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
53978 cluster_start,
53979 num_clusters);
53980 if (!status)
53981- atomic_inc(&osb->alloc_stats.local_data);
53982+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
53983 } else {
53984 if (min_clusters > (osb->bitmap_cpg - 1)) {
53985 /* The only paths asking for contiguousness
53986@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
53987 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
53988 res.sr_bg_blkno,
53989 res.sr_bit_offset);
53990- atomic_inc(&osb->alloc_stats.bitmap_data);
53991+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
53992 *num_clusters = res.sr_bits;
53993 }
53994 }
53995diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
53996index 0e91ec2..f4b3fc6 100644
53997--- a/fs/ocfs2/super.c
53998+++ b/fs/ocfs2/super.c
53999@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
54000 "%10s => GlobalAllocs: %d LocalAllocs: %d "
54001 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
54002 "Stats",
54003- atomic_read(&osb->alloc_stats.bitmap_data),
54004- atomic_read(&osb->alloc_stats.local_data),
54005- atomic_read(&osb->alloc_stats.bg_allocs),
54006- atomic_read(&osb->alloc_stats.moves),
54007- atomic_read(&osb->alloc_stats.bg_extends));
54008+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
54009+ atomic_read_unchecked(&osb->alloc_stats.local_data),
54010+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
54011+ atomic_read_unchecked(&osb->alloc_stats.moves),
54012+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
54013
54014 out += snprintf(buf + out, len - out,
54015 "%10s => State: %u Descriptor: %llu Size: %u bits "
54016@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
54017 spin_lock_init(&osb->osb_xattr_lock);
54018 ocfs2_init_steal_slots(osb);
54019
54020- atomic_set(&osb->alloc_stats.moves, 0);
54021- atomic_set(&osb->alloc_stats.local_data, 0);
54022- atomic_set(&osb->alloc_stats.bitmap_data, 0);
54023- atomic_set(&osb->alloc_stats.bg_allocs, 0);
54024- atomic_set(&osb->alloc_stats.bg_extends, 0);
54025+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
54026+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
54027+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
54028+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
54029+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
54030
54031 /* Copy the blockcheck stats from the superblock probe */
54032 osb->osb_ecc_stats = *stats;
54033diff --git a/fs/open.c b/fs/open.c
54034index 9b33c0c..2ffcca2 100644
54035--- a/fs/open.c
54036+++ b/fs/open.c
54037@@ -31,6 +31,8 @@
54038 #include <linux/ima.h>
54039 #include <linux/dnotify.h>
54040
54041+#define CREATE_TRACE_POINTS
54042+#include <trace/events/fs.h>
54043 #include "internal.h"
54044
54045 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
54046@@ -101,6 +103,8 @@ long vfs_truncate(struct path *path, loff_t length)
54047 error = locks_verify_truncate(inode, NULL, length);
54048 if (!error)
54049 error = security_path_truncate(path);
54050+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
54051+ error = -EACCES;
54052 if (!error)
54053 error = do_truncate(path->dentry, length, 0, NULL);
54054
54055@@ -178,6 +182,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
54056 error = locks_verify_truncate(inode, f.file, length);
54057 if (!error)
54058 error = security_path_truncate(&f.file->f_path);
54059+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
54060+ error = -EACCES;
54061 if (!error)
54062 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
54063 sb_end_write(inode->i_sb);
54064@@ -373,6 +379,9 @@ retry:
54065 if (__mnt_is_readonly(path.mnt))
54066 res = -EROFS;
54067
54068+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
54069+ res = -EACCES;
54070+
54071 out_path_release:
54072 path_put(&path);
54073 if (retry_estale(res, lookup_flags)) {
54074@@ -404,6 +413,8 @@ retry:
54075 if (error)
54076 goto dput_and_out;
54077
54078+ gr_log_chdir(path.dentry, path.mnt);
54079+
54080 set_fs_pwd(current->fs, &path);
54081
54082 dput_and_out:
54083@@ -433,6 +444,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
54084 goto out_putf;
54085
54086 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
54087+
54088+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
54089+ error = -EPERM;
54090+
54091+ if (!error)
54092+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
54093+
54094 if (!error)
54095 set_fs_pwd(current->fs, &f.file->f_path);
54096 out_putf:
54097@@ -462,7 +480,13 @@ retry:
54098 if (error)
54099 goto dput_and_out;
54100
54101+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
54102+ goto dput_and_out;
54103+
54104 set_fs_root(current->fs, &path);
54105+
54106+ gr_handle_chroot_chdir(&path);
54107+
54108 error = 0;
54109 dput_and_out:
54110 path_put(&path);
54111@@ -484,6 +508,16 @@ static int chmod_common(struct path *path, umode_t mode)
54112 if (error)
54113 return error;
54114 mutex_lock(&inode->i_mutex);
54115+
54116+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
54117+ error = -EACCES;
54118+ goto out_unlock;
54119+ }
54120+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
54121+ error = -EACCES;
54122+ goto out_unlock;
54123+ }
54124+
54125 error = security_path_chmod(path, mode);
54126 if (error)
54127 goto out_unlock;
54128@@ -544,6 +578,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
54129 uid = make_kuid(current_user_ns(), user);
54130 gid = make_kgid(current_user_ns(), group);
54131
54132+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
54133+ return -EACCES;
54134+
54135 newattrs.ia_valid = ATTR_CTIME;
54136 if (user != (uid_t) -1) {
54137 if (!uid_valid(uid))
54138@@ -960,6 +997,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
54139 } else {
54140 fsnotify_open(f);
54141 fd_install(fd, f);
54142+ trace_do_sys_open(tmp->name, flags, mode);
54143 }
54144 }
54145 putname(tmp);
54146diff --git a/fs/pipe.c b/fs/pipe.c
54147index 8e2e73f..1ef1048 100644
54148--- a/fs/pipe.c
54149+++ b/fs/pipe.c
54150@@ -438,9 +438,9 @@ redo:
54151 }
54152 if (bufs) /* More to do? */
54153 continue;
54154- if (!pipe->writers)
54155+ if (!atomic_read(&pipe->writers))
54156 break;
54157- if (!pipe->waiting_writers) {
54158+ if (!atomic_read(&pipe->waiting_writers)) {
54159 /* syscall merging: Usually we must not sleep
54160 * if O_NONBLOCK is set, or if we got some data.
54161 * But if a writer sleeps in kernel space, then
54162@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
54163 mutex_lock(&inode->i_mutex);
54164 pipe = inode->i_pipe;
54165
54166- if (!pipe->readers) {
54167+ if (!atomic_read(&pipe->readers)) {
54168 send_sig(SIGPIPE, current, 0);
54169 ret = -EPIPE;
54170 goto out;
54171@@ -553,7 +553,7 @@ redo1:
54172 for (;;) {
54173 int bufs;
54174
54175- if (!pipe->readers) {
54176+ if (!atomic_read(&pipe->readers)) {
54177 send_sig(SIGPIPE, current, 0);
54178 if (!ret)
54179 ret = -EPIPE;
54180@@ -644,9 +644,9 @@ redo2:
54181 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54182 do_wakeup = 0;
54183 }
54184- pipe->waiting_writers++;
54185+ atomic_inc(&pipe->waiting_writers);
54186 pipe_wait(pipe);
54187- pipe->waiting_writers--;
54188+ atomic_dec(&pipe->waiting_writers);
54189 }
54190 out:
54191 mutex_unlock(&inode->i_mutex);
54192@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54193 mask = 0;
54194 if (filp->f_mode & FMODE_READ) {
54195 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
54196- if (!pipe->writers && filp->f_version != pipe->w_counter)
54197+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
54198 mask |= POLLHUP;
54199 }
54200
54201@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54202 * Most Unices do not set POLLERR for FIFOs but on Linux they
54203 * behave exactly like pipes for poll().
54204 */
54205- if (!pipe->readers)
54206+ if (!atomic_read(&pipe->readers))
54207 mask |= POLLERR;
54208 }
54209
54210@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
54211
54212 mutex_lock(&inode->i_mutex);
54213 pipe = inode->i_pipe;
54214- pipe->readers -= decr;
54215- pipe->writers -= decw;
54216+ atomic_sub(decr, &pipe->readers);
54217+ atomic_sub(decw, &pipe->writers);
54218
54219- if (!pipe->readers && !pipe->writers) {
54220+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
54221 free_pipe_info(inode);
54222 } else {
54223 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
54224@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
54225
54226 if (inode->i_pipe) {
54227 ret = 0;
54228- inode->i_pipe->readers++;
54229+ atomic_inc(&inode->i_pipe->readers);
54230 }
54231
54232 mutex_unlock(&inode->i_mutex);
54233@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
54234
54235 if (inode->i_pipe) {
54236 ret = 0;
54237- inode->i_pipe->writers++;
54238+ atomic_inc(&inode->i_pipe->writers);
54239 }
54240
54241 mutex_unlock(&inode->i_mutex);
54242@@ -871,9 +871,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
54243 if (inode->i_pipe) {
54244 ret = 0;
54245 if (filp->f_mode & FMODE_READ)
54246- inode->i_pipe->readers++;
54247+ atomic_inc(&inode->i_pipe->readers);
54248 if (filp->f_mode & FMODE_WRITE)
54249- inode->i_pipe->writers++;
54250+ atomic_inc(&inode->i_pipe->writers);
54251 }
54252
54253 mutex_unlock(&inode->i_mutex);
54254@@ -965,7 +965,7 @@ void free_pipe_info(struct inode *inode)
54255 inode->i_pipe = NULL;
54256 }
54257
54258-static struct vfsmount *pipe_mnt __read_mostly;
54259+struct vfsmount *pipe_mnt __read_mostly;
54260
54261 /*
54262 * pipefs_dname() is called from d_path().
54263@@ -995,7 +995,8 @@ static struct inode * get_pipe_inode(void)
54264 goto fail_iput;
54265 inode->i_pipe = pipe;
54266
54267- pipe->readers = pipe->writers = 1;
54268+ atomic_set(&pipe->readers, 1);
54269+ atomic_set(&pipe->writers, 1);
54270 inode->i_fop = &rdwr_pipefifo_fops;
54271
54272 /*
54273diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
54274index 15af622..0e9f4467 100644
54275--- a/fs/proc/Kconfig
54276+++ b/fs/proc/Kconfig
54277@@ -30,12 +30,12 @@ config PROC_FS
54278
54279 config PROC_KCORE
54280 bool "/proc/kcore support" if !ARM
54281- depends on PROC_FS && MMU
54282+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
54283
54284 config PROC_VMCORE
54285 bool "/proc/vmcore support"
54286- depends on PROC_FS && CRASH_DUMP
54287- default y
54288+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
54289+ default n
54290 help
54291 Exports the dump image of crashed kernel in ELF format.
54292
54293@@ -59,8 +59,8 @@ config PROC_SYSCTL
54294 limited in memory.
54295
54296 config PROC_PAGE_MONITOR
54297- default y
54298- depends on PROC_FS && MMU
54299+ default n
54300+ depends on PROC_FS && MMU && !GRKERNSEC
54301 bool "Enable /proc page monitoring" if EXPERT
54302 help
54303 Various /proc files exist to monitor process memory utilization:
54304diff --git a/fs/proc/array.c b/fs/proc/array.c
54305index 6a91e6f..e54dbc14 100644
54306--- a/fs/proc/array.c
54307+++ b/fs/proc/array.c
54308@@ -60,6 +60,7 @@
54309 #include <linux/tty.h>
54310 #include <linux/string.h>
54311 #include <linux/mman.h>
54312+#include <linux/grsecurity.h>
54313 #include <linux/proc_fs.h>
54314 #include <linux/ioport.h>
54315 #include <linux/uaccess.h>
54316@@ -362,6 +363,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
54317 seq_putc(m, '\n');
54318 }
54319
54320+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54321+static inline void task_pax(struct seq_file *m, struct task_struct *p)
54322+{
54323+ if (p->mm)
54324+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
54325+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
54326+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
54327+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
54328+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
54329+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
54330+ else
54331+ seq_printf(m, "PaX:\t-----\n");
54332+}
54333+#endif
54334+
54335 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54336 struct pid *pid, struct task_struct *task)
54337 {
54338@@ -380,9 +396,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54339 task_cpus_allowed(m, task);
54340 cpuset_task_status_allowed(m, task);
54341 task_context_switch_counts(m, task);
54342+
54343+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54344+ task_pax(m, task);
54345+#endif
54346+
54347+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
54348+ task_grsec_rbac(m, task);
54349+#endif
54350+
54351 return 0;
54352 }
54353
54354+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54355+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54356+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54357+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54358+#endif
54359+
54360 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54361 struct pid *pid, struct task_struct *task, int whole)
54362 {
54363@@ -404,6 +435,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54364 char tcomm[sizeof(task->comm)];
54365 unsigned long flags;
54366
54367+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54368+ if (current->exec_id != m->exec_id) {
54369+ gr_log_badprocpid("stat");
54370+ return 0;
54371+ }
54372+#endif
54373+
54374 state = *get_task_state(task);
54375 vsize = eip = esp = 0;
54376 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
54377@@ -475,6 +513,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54378 gtime = task->gtime;
54379 }
54380
54381+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54382+ if (PAX_RAND_FLAGS(mm)) {
54383+ eip = 0;
54384+ esp = 0;
54385+ wchan = 0;
54386+ }
54387+#endif
54388+#ifdef CONFIG_GRKERNSEC_HIDESYM
54389+ wchan = 0;
54390+ eip =0;
54391+ esp =0;
54392+#endif
54393+
54394 /* scale priority and nice values from timeslices to -20..20 */
54395 /* to make it look like a "normal" Unix priority/nice value */
54396 priority = task_prio(task);
54397@@ -511,9 +562,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54398 seq_put_decimal_ull(m, ' ', vsize);
54399 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
54400 seq_put_decimal_ull(m, ' ', rsslim);
54401+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54402+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
54403+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
54404+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
54405+#else
54406 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
54407 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
54408 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
54409+#endif
54410 seq_put_decimal_ull(m, ' ', esp);
54411 seq_put_decimal_ull(m, ' ', eip);
54412 /* The signal information here is obsolete.
54413@@ -535,7 +592,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54414 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
54415 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
54416
54417- if (mm && permitted) {
54418+ if (mm && permitted
54419+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54420+ && !PAX_RAND_FLAGS(mm)
54421+#endif
54422+ ) {
54423 seq_put_decimal_ull(m, ' ', mm->start_data);
54424 seq_put_decimal_ull(m, ' ', mm->end_data);
54425 seq_put_decimal_ull(m, ' ', mm->start_brk);
54426@@ -573,8 +634,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54427 struct pid *pid, struct task_struct *task)
54428 {
54429 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
54430- struct mm_struct *mm = get_task_mm(task);
54431+ struct mm_struct *mm;
54432
54433+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54434+ if (current->exec_id != m->exec_id) {
54435+ gr_log_badprocpid("statm");
54436+ return 0;
54437+ }
54438+#endif
54439+ mm = get_task_mm(task);
54440 if (mm) {
54441 size = task_statm(mm, &shared, &text, &data, &resident);
54442 mmput(mm);
54443@@ -597,6 +665,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54444 return 0;
54445 }
54446
54447+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54448+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
54449+{
54450+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
54451+}
54452+#endif
54453+
54454 #ifdef CONFIG_CHECKPOINT_RESTORE
54455 static struct pid *
54456 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
54457diff --git a/fs/proc/base.c b/fs/proc/base.c
54458index 9b43ff77..ba3e990 100644
54459--- a/fs/proc/base.c
54460+++ b/fs/proc/base.c
54461@@ -111,6 +111,14 @@ struct pid_entry {
54462 union proc_op op;
54463 };
54464
54465+struct getdents_callback {
54466+ struct linux_dirent __user * current_dir;
54467+ struct linux_dirent __user * previous;
54468+ struct file * file;
54469+ int count;
54470+ int error;
54471+};
54472+
54473 #define NOD(NAME, MODE, IOP, FOP, OP) { \
54474 .name = (NAME), \
54475 .len = sizeof(NAME) - 1, \
54476@@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
54477 if (!mm->arg_end)
54478 goto out_mm; /* Shh! No looking before we're done */
54479
54480+ if (gr_acl_handle_procpidmem(task))
54481+ goto out_mm;
54482+
54483 len = mm->arg_end - mm->arg_start;
54484
54485 if (len > PAGE_SIZE)
54486@@ -235,12 +246,28 @@ out:
54487 return res;
54488 }
54489
54490+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54491+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54492+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54493+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54494+#endif
54495+
54496 static int proc_pid_auxv(struct task_struct *task, char *buffer)
54497 {
54498 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
54499 int res = PTR_ERR(mm);
54500 if (mm && !IS_ERR(mm)) {
54501 unsigned int nwords = 0;
54502+
54503+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54504+ /* allow if we're currently ptracing this task */
54505+ if (PAX_RAND_FLAGS(mm) &&
54506+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
54507+ mmput(mm);
54508+ return 0;
54509+ }
54510+#endif
54511+
54512 do {
54513 nwords += 2;
54514 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
54515@@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
54516 }
54517
54518
54519-#ifdef CONFIG_KALLSYMS
54520+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54521 /*
54522 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
54523 * Returns the resolved symbol. If that fails, simply return the address.
54524@@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
54525 mutex_unlock(&task->signal->cred_guard_mutex);
54526 }
54527
54528-#ifdef CONFIG_STACKTRACE
54529+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54530
54531 #define MAX_STACK_TRACE_DEPTH 64
54532
54533@@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
54534 return count;
54535 }
54536
54537-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54538+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54539 static int proc_pid_syscall(struct task_struct *task, char *buffer)
54540 {
54541 long nr;
54542@@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
54543 /************************************************************************/
54544
54545 /* permission checks */
54546-static int proc_fd_access_allowed(struct inode *inode)
54547+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
54548 {
54549 struct task_struct *task;
54550 int allowed = 0;
54551@@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
54552 */
54553 task = get_proc_task(inode);
54554 if (task) {
54555- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
54556+ if (log)
54557+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
54558+ else
54559+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
54560 put_task_struct(task);
54561 }
54562 return allowed;
54563@@ -555,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
54564 struct task_struct *task,
54565 int hide_pid_min)
54566 {
54567+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54568+ return false;
54569+
54570+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54571+ rcu_read_lock();
54572+ {
54573+ const struct cred *tmpcred = current_cred();
54574+ const struct cred *cred = __task_cred(task);
54575+
54576+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
54577+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54578+ || in_group_p(grsec_proc_gid)
54579+#endif
54580+ ) {
54581+ rcu_read_unlock();
54582+ return true;
54583+ }
54584+ }
54585+ rcu_read_unlock();
54586+
54587+ if (!pid->hide_pid)
54588+ return false;
54589+#endif
54590+
54591 if (pid->hide_pid < hide_pid_min)
54592 return true;
54593 if (in_group_p(pid->pid_gid))
54594 return true;
54595+
54596 return ptrace_may_access(task, PTRACE_MODE_READ);
54597 }
54598
54599@@ -576,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
54600 put_task_struct(task);
54601
54602 if (!has_perms) {
54603+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54604+ {
54605+#else
54606 if (pid->hide_pid == 2) {
54607+#endif
54608 /*
54609 * Let's make getdents(), stat(), and open()
54610 * consistent with each other. If a process
54611@@ -674,6 +733,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
54612 if (!task)
54613 return -ESRCH;
54614
54615+ if (gr_acl_handle_procpidmem(task)) {
54616+ put_task_struct(task);
54617+ return -EPERM;
54618+ }
54619+
54620 mm = mm_access(task, mode);
54621 put_task_struct(task);
54622
54623@@ -689,6 +753,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
54624
54625 file->private_data = mm;
54626
54627+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54628+ file->f_version = current->exec_id;
54629+#endif
54630+
54631 return 0;
54632 }
54633
54634@@ -710,6 +778,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
54635 ssize_t copied;
54636 char *page;
54637
54638+#ifdef CONFIG_GRKERNSEC
54639+ if (write)
54640+ return -EPERM;
54641+#endif
54642+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54643+ if (file->f_version != current->exec_id) {
54644+ gr_log_badprocpid("mem");
54645+ return 0;
54646+ }
54647+#endif
54648+
54649 if (!mm)
54650 return 0;
54651
54652@@ -814,6 +893,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
54653 if (!mm)
54654 return 0;
54655
54656+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54657+ if (file->f_version != current->exec_id) {
54658+ gr_log_badprocpid("environ");
54659+ return 0;
54660+ }
54661+#endif
54662+
54663 page = (char *)__get_free_page(GFP_TEMPORARY);
54664 if (!page)
54665 return -ENOMEM;
54666@@ -1429,7 +1515,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
54667 int error = -EACCES;
54668
54669 /* Are we allowed to snoop on the tasks file descriptors? */
54670- if (!proc_fd_access_allowed(inode))
54671+ if (!proc_fd_access_allowed(inode, 0))
54672 goto out;
54673
54674 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
54675@@ -1473,8 +1559,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
54676 struct path path;
54677
54678 /* Are we allowed to snoop on the tasks file descriptors? */
54679- if (!proc_fd_access_allowed(inode))
54680- goto out;
54681+ /* logging this is needed for learning on chromium to work properly,
54682+ but we don't want to flood the logs from 'ps' which does a readlink
54683+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
54684+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
54685+ */
54686+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
54687+ if (!proc_fd_access_allowed(inode,0))
54688+ goto out;
54689+ } else {
54690+ if (!proc_fd_access_allowed(inode,1))
54691+ goto out;
54692+ }
54693
54694 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
54695 if (error)
54696@@ -1524,7 +1620,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
54697 rcu_read_lock();
54698 cred = __task_cred(task);
54699 inode->i_uid = cred->euid;
54700+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54701+ inode->i_gid = grsec_proc_gid;
54702+#else
54703 inode->i_gid = cred->egid;
54704+#endif
54705 rcu_read_unlock();
54706 }
54707 security_task_to_inode(task, inode);
54708@@ -1560,10 +1660,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
54709 return -ENOENT;
54710 }
54711 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
54712+#ifdef CONFIG_GRKERNSEC_PROC_USER
54713+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
54714+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54715+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
54716+#endif
54717 task_dumpable(task)) {
54718 cred = __task_cred(task);
54719 stat->uid = cred->euid;
54720+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54721+ stat->gid = grsec_proc_gid;
54722+#else
54723 stat->gid = cred->egid;
54724+#endif
54725 }
54726 }
54727 rcu_read_unlock();
54728@@ -1601,11 +1710,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
54729
54730 if (task) {
54731 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
54732+#ifdef CONFIG_GRKERNSEC_PROC_USER
54733+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
54734+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54735+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
54736+#endif
54737 task_dumpable(task)) {
54738 rcu_read_lock();
54739 cred = __task_cred(task);
54740 inode->i_uid = cred->euid;
54741+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54742+ inode->i_gid = grsec_proc_gid;
54743+#else
54744 inode->i_gid = cred->egid;
54745+#endif
54746 rcu_read_unlock();
54747 } else {
54748 inode->i_uid = GLOBAL_ROOT_UID;
54749@@ -2058,6 +2176,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
54750 if (!task)
54751 goto out_no_task;
54752
54753+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54754+ goto out;
54755+
54756 /*
54757 * Yes, it does not scale. And it should not. Don't add
54758 * new entries into /proc/<tgid>/ without very good reasons.
54759@@ -2102,6 +2223,9 @@ static int proc_pident_readdir(struct file *filp,
54760 if (!task)
54761 goto out_no_task;
54762
54763+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54764+ goto out;
54765+
54766 ret = 0;
54767 i = filp->f_pos;
54768 switch (i) {
54769@@ -2515,7 +2639,7 @@ static const struct pid_entry tgid_base_stuff[] = {
54770 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
54771 #endif
54772 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
54773-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54774+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54775 INF("syscall", S_IRUGO, proc_pid_syscall),
54776 #endif
54777 INF("cmdline", S_IRUGO, proc_pid_cmdline),
54778@@ -2540,10 +2664,10 @@ static const struct pid_entry tgid_base_stuff[] = {
54779 #ifdef CONFIG_SECURITY
54780 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
54781 #endif
54782-#ifdef CONFIG_KALLSYMS
54783+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54784 INF("wchan", S_IRUGO, proc_pid_wchan),
54785 #endif
54786-#ifdef CONFIG_STACKTRACE
54787+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54788 ONE("stack", S_IRUGO, proc_pid_stack),
54789 #endif
54790 #ifdef CONFIG_SCHEDSTATS
54791@@ -2577,6 +2701,9 @@ static const struct pid_entry tgid_base_stuff[] = {
54792 #ifdef CONFIG_HARDWALL
54793 INF("hardwall", S_IRUGO, proc_pid_hardwall),
54794 #endif
54795+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54796+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
54797+#endif
54798 #ifdef CONFIG_USER_NS
54799 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
54800 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
54801@@ -2705,7 +2832,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
54802 if (!inode)
54803 goto out;
54804
54805+#ifdef CONFIG_GRKERNSEC_PROC_USER
54806+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
54807+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54808+ inode->i_gid = grsec_proc_gid;
54809+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
54810+#else
54811 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
54812+#endif
54813 inode->i_op = &proc_tgid_base_inode_operations;
54814 inode->i_fop = &proc_tgid_base_operations;
54815 inode->i_flags|=S_IMMUTABLE;
54816@@ -2743,7 +2877,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
54817 if (!task)
54818 goto out;
54819
54820+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54821+ goto out_put_task;
54822+
54823 result = proc_pid_instantiate(dir, dentry, task, NULL);
54824+out_put_task:
54825 put_task_struct(task);
54826 out:
54827 return result;
54828@@ -2806,6 +2944,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
54829 static int fake_filldir(void *buf, const char *name, int namelen,
54830 loff_t offset, u64 ino, unsigned d_type)
54831 {
54832+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
54833+ __buf->error = -EINVAL;
54834 return 0;
54835 }
54836
54837@@ -2857,7 +2997,7 @@ static const struct pid_entry tid_base_stuff[] = {
54838 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
54839 #endif
54840 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
54841-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54842+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54843 INF("syscall", S_IRUGO, proc_pid_syscall),
54844 #endif
54845 INF("cmdline", S_IRUGO, proc_pid_cmdline),
54846@@ -2884,10 +3024,10 @@ static const struct pid_entry tid_base_stuff[] = {
54847 #ifdef CONFIG_SECURITY
54848 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
54849 #endif
54850-#ifdef CONFIG_KALLSYMS
54851+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54852 INF("wchan", S_IRUGO, proc_pid_wchan),
54853 #endif
54854-#ifdef CONFIG_STACKTRACE
54855+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54856 ONE("stack", S_IRUGO, proc_pid_stack),
54857 #endif
54858 #ifdef CONFIG_SCHEDSTATS
54859diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
54860index 82676e3..5f8518a 100644
54861--- a/fs/proc/cmdline.c
54862+++ b/fs/proc/cmdline.c
54863@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
54864
54865 static int __init proc_cmdline_init(void)
54866 {
54867+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54868+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
54869+#else
54870 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
54871+#endif
54872 return 0;
54873 }
54874 module_init(proc_cmdline_init);
54875diff --git a/fs/proc/devices.c b/fs/proc/devices.c
54876index b143471..bb105e5 100644
54877--- a/fs/proc/devices.c
54878+++ b/fs/proc/devices.c
54879@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
54880
54881 static int __init proc_devices_init(void)
54882 {
54883+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54884+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
54885+#else
54886 proc_create("devices", 0, NULL, &proc_devinfo_operations);
54887+#endif
54888 return 0;
54889 }
54890 module_init(proc_devices_init);
54891diff --git a/fs/proc/fd.c b/fs/proc/fd.c
54892index d7a4a28..0201742 100644
54893--- a/fs/proc/fd.c
54894+++ b/fs/proc/fd.c
54895@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
54896 if (!task)
54897 return -ENOENT;
54898
54899- files = get_files_struct(task);
54900+ if (!gr_acl_handle_procpidmem(task))
54901+ files = get_files_struct(task);
54902 put_task_struct(task);
54903
54904 if (files) {
54905@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
54906 */
54907 int proc_fd_permission(struct inode *inode, int mask)
54908 {
54909+ struct task_struct *task;
54910 int rv = generic_permission(inode, mask);
54911- if (rv == 0)
54912- return 0;
54913+
54914 if (task_pid(current) == proc_pid(inode))
54915 rv = 0;
54916+
54917+ task = get_proc_task(inode);
54918+ if (task == NULL)
54919+ return rv;
54920+
54921+ if (gr_acl_handle_procpidmem(task))
54922+ rv = -EACCES;
54923+
54924+ put_task_struct(task);
54925+
54926 return rv;
54927 }
54928
54929diff --git a/fs/proc/inode.c b/fs/proc/inode.c
54930index 439ae688..c0e4d63 100644
54931--- a/fs/proc/inode.c
54932+++ b/fs/proc/inode.c
54933@@ -21,11 +21,17 @@
54934 #include <linux/seq_file.h>
54935 #include <linux/slab.h>
54936 #include <linux/mount.h>
54937+#include <linux/grsecurity.h>
54938
54939 #include <asm/uaccess.h>
54940
54941 #include "internal.h"
54942
54943+#ifdef CONFIG_PROC_SYSCTL
54944+extern const struct inode_operations proc_sys_inode_operations;
54945+extern const struct inode_operations proc_sys_dir_operations;
54946+#endif
54947+
54948 static void proc_evict_inode(struct inode *inode)
54949 {
54950 struct proc_dir_entry *de;
54951@@ -53,6 +59,13 @@ static void proc_evict_inode(struct inode *inode)
54952 ns = PROC_I(inode)->ns;
54953 if (ns_ops && ns)
54954 ns_ops->put(ns);
54955+
54956+#ifdef CONFIG_PROC_SYSCTL
54957+ if (inode->i_op == &proc_sys_inode_operations ||
54958+ inode->i_op == &proc_sys_dir_operations)
54959+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
54960+#endif
54961+
54962 }
54963
54964 static struct kmem_cache * proc_inode_cachep;
54965@@ -445,19 +458,21 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
54966
54967 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
54968 {
54969- struct inode * inode;
54970+ struct inode *inode = new_inode_pseudo(sb);
54971
54972- inode = iget_locked(sb, de->low_ino);
54973- if (!inode)
54974- return NULL;
54975- if (inode->i_state & I_NEW) {
54976+ if (inode) {
54977+ inode->i_ino = de->low_ino;
54978 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
54979 PROC_I(inode)->pde = de;
54980
54981 if (de->mode) {
54982 inode->i_mode = de->mode;
54983 inode->i_uid = de->uid;
54984+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54985+ inode->i_gid = grsec_proc_gid;
54986+#else
54987 inode->i_gid = de->gid;
54988+#endif
54989 }
54990 if (de->size)
54991 inode->i_size = de->size;
54992@@ -478,7 +493,6 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
54993 inode->i_fop = de->proc_fops;
54994 }
54995 }
54996- unlock_new_inode(inode);
54997 } else
54998 pde_put(de);
54999 return inode;
55000diff --git a/fs/proc/internal.h b/fs/proc/internal.h
55001index 252544c..04395b9 100644
55002--- a/fs/proc/internal.h
55003+++ b/fs/proc/internal.h
55004@@ -55,6 +55,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
55005 struct pid *pid, struct task_struct *task);
55006 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55007 struct pid *pid, struct task_struct *task);
55008+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55009+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
55010+#endif
55011 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
55012
55013 extern const struct file_operations proc_tid_children_operations;
55014diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
55015index e96d4f1..8b116ed 100644
55016--- a/fs/proc/kcore.c
55017+++ b/fs/proc/kcore.c
55018@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55019 * the addresses in the elf_phdr on our list.
55020 */
55021 start = kc_offset_to_vaddr(*fpos - elf_buflen);
55022- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
55023+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
55024+ if (tsz > buflen)
55025 tsz = buflen;
55026-
55027+
55028 while (buflen) {
55029 struct kcore_list *m;
55030
55031@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55032 kfree(elf_buf);
55033 } else {
55034 if (kern_addr_valid(start)) {
55035- unsigned long n;
55036+ char *elf_buf;
55037+ mm_segment_t oldfs;
55038
55039- n = copy_to_user(buffer, (char *)start, tsz);
55040- /*
55041- * We cannot distinguish between fault on source
55042- * and fault on destination. When this happens
55043- * we clear too and hope it will trigger the
55044- * EFAULT again.
55045- */
55046- if (n) {
55047- if (clear_user(buffer + tsz - n,
55048- n))
55049+ elf_buf = kmalloc(tsz, GFP_KERNEL);
55050+ if (!elf_buf)
55051+ return -ENOMEM;
55052+ oldfs = get_fs();
55053+ set_fs(KERNEL_DS);
55054+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
55055+ set_fs(oldfs);
55056+ if (copy_to_user(buffer, elf_buf, tsz)) {
55057+ kfree(elf_buf);
55058 return -EFAULT;
55059+ }
55060 }
55061+ set_fs(oldfs);
55062+ kfree(elf_buf);
55063 } else {
55064 if (clear_user(buffer, tsz))
55065 return -EFAULT;
55066@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55067
55068 static int open_kcore(struct inode *inode, struct file *filp)
55069 {
55070+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
55071+ return -EPERM;
55072+#endif
55073 if (!capable(CAP_SYS_RAWIO))
55074 return -EPERM;
55075 if (kcore_need_update)
55076diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
55077index 80e4645..53e5fcf 100644
55078--- a/fs/proc/meminfo.c
55079+++ b/fs/proc/meminfo.c
55080@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
55081 vmi.used >> 10,
55082 vmi.largest_chunk >> 10
55083 #ifdef CONFIG_MEMORY_FAILURE
55084- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
55085+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
55086 #endif
55087 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
55088 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
55089diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
55090index b1822dd..df622cb 100644
55091--- a/fs/proc/nommu.c
55092+++ b/fs/proc/nommu.c
55093@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
55094 if (len < 1)
55095 len = 1;
55096 seq_printf(m, "%*c", len, ' ');
55097- seq_path(m, &file->f_path, "");
55098+ seq_path(m, &file->f_path, "\n\\");
55099 }
55100
55101 seq_putc(m, '\n');
55102diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
55103index fe72cd0..21b52ff 100644
55104--- a/fs/proc/proc_net.c
55105+++ b/fs/proc/proc_net.c
55106@@ -23,6 +23,7 @@
55107 #include <linux/nsproxy.h>
55108 #include <net/net_namespace.h>
55109 #include <linux/seq_file.h>
55110+#include <linux/grsecurity.h>
55111
55112 #include "internal.h"
55113
55114@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
55115 struct task_struct *task;
55116 struct nsproxy *ns;
55117 struct net *net = NULL;
55118+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55119+ const struct cred *cred = current_cred();
55120+#endif
55121+
55122+#ifdef CONFIG_GRKERNSEC_PROC_USER
55123+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
55124+ return net;
55125+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55126+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
55127+ return net;
55128+#endif
55129
55130 rcu_read_lock();
55131 task = pid_task(proc_pid(dir), PIDTYPE_PID);
55132diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
55133index 1827d88..43b0279 100644
55134--- a/fs/proc/proc_sysctl.c
55135+++ b/fs/proc/proc_sysctl.c
55136@@ -12,11 +12,15 @@
55137 #include <linux/module.h>
55138 #include "internal.h"
55139
55140+extern int gr_handle_chroot_sysctl(const int op);
55141+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
55142+ const int op);
55143+
55144 static const struct dentry_operations proc_sys_dentry_operations;
55145 static const struct file_operations proc_sys_file_operations;
55146-static const struct inode_operations proc_sys_inode_operations;
55147+const struct inode_operations proc_sys_inode_operations;
55148 static const struct file_operations proc_sys_dir_file_operations;
55149-static const struct inode_operations proc_sys_dir_operations;
55150+const struct inode_operations proc_sys_dir_operations;
55151
55152 void proc_sys_poll_notify(struct ctl_table_poll *poll)
55153 {
55154@@ -466,6 +470,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
55155
55156 err = NULL;
55157 d_set_d_op(dentry, &proc_sys_dentry_operations);
55158+
55159+ gr_handle_proc_create(dentry, inode);
55160+
55161 d_add(dentry, inode);
55162
55163 out:
55164@@ -481,6 +488,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55165 struct inode *inode = filp->f_path.dentry->d_inode;
55166 struct ctl_table_header *head = grab_header(inode);
55167 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
55168+ int op = write ? MAY_WRITE : MAY_READ;
55169 ssize_t error;
55170 size_t res;
55171
55172@@ -492,7 +500,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55173 * and won't be until we finish.
55174 */
55175 error = -EPERM;
55176- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
55177+ if (sysctl_perm(head, table, op))
55178 goto out;
55179
55180 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
55181@@ -500,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55182 if (!table->proc_handler)
55183 goto out;
55184
55185+#ifdef CONFIG_GRKERNSEC
55186+ error = -EPERM;
55187+ if (gr_handle_chroot_sysctl(op))
55188+ goto out;
55189+ dget(filp->f_path.dentry);
55190+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
55191+ dput(filp->f_path.dentry);
55192+ goto out;
55193+ }
55194+ dput(filp->f_path.dentry);
55195+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
55196+ goto out;
55197+ if (write && !capable(CAP_SYS_ADMIN))
55198+ goto out;
55199+#endif
55200+
55201 /* careful: calling conventions are nasty here */
55202 res = count;
55203 error = table->proc_handler(table, write, buf, &res, ppos);
55204@@ -597,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
55205 return -ENOMEM;
55206 } else {
55207 d_set_d_op(child, &proc_sys_dentry_operations);
55208+
55209+ gr_handle_proc_create(child, inode);
55210+
55211 d_add(child, inode);
55212 }
55213 } else {
55214@@ -640,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
55215 if ((*pos)++ < file->f_pos)
55216 return 0;
55217
55218+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
55219+ return 0;
55220+
55221 if (unlikely(S_ISLNK(table->mode)))
55222 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
55223 else
55224@@ -750,6 +780,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
55225 if (IS_ERR(head))
55226 return PTR_ERR(head);
55227
55228+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
55229+ return -ENOENT;
55230+
55231 generic_fillattr(inode, stat);
55232 if (table)
55233 stat->mode = (stat->mode & S_IFMT) | table->mode;
55234@@ -772,13 +805,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
55235 .llseek = generic_file_llseek,
55236 };
55237
55238-static const struct inode_operations proc_sys_inode_operations = {
55239+const struct inode_operations proc_sys_inode_operations = {
55240 .permission = proc_sys_permission,
55241 .setattr = proc_sys_setattr,
55242 .getattr = proc_sys_getattr,
55243 };
55244
55245-static const struct inode_operations proc_sys_dir_operations = {
55246+const struct inode_operations proc_sys_dir_operations = {
55247 .lookup = proc_sys_lookup,
55248 .permission = proc_sys_permission,
55249 .setattr = proc_sys_setattr,
55250@@ -854,7 +887,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
55251 static struct ctl_dir *new_dir(struct ctl_table_set *set,
55252 const char *name, int namelen)
55253 {
55254- struct ctl_table *table;
55255+ ctl_table_no_const *table;
55256 struct ctl_dir *new;
55257 struct ctl_node *node;
55258 char *new_name;
55259@@ -866,7 +899,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
55260 return NULL;
55261
55262 node = (struct ctl_node *)(new + 1);
55263- table = (struct ctl_table *)(node + 1);
55264+ table = (ctl_table_no_const *)(node + 1);
55265 new_name = (char *)(table + 2);
55266 memcpy(new_name, name, namelen);
55267 new_name[namelen] = '\0';
55268@@ -1035,7 +1068,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
55269 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
55270 struct ctl_table_root *link_root)
55271 {
55272- struct ctl_table *link_table, *entry, *link;
55273+ ctl_table_no_const *link_table, *link;
55274+ struct ctl_table *entry;
55275 struct ctl_table_header *links;
55276 struct ctl_node *node;
55277 char *link_name;
55278@@ -1058,7 +1092,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
55279 return NULL;
55280
55281 node = (struct ctl_node *)(links + 1);
55282- link_table = (struct ctl_table *)(node + nr_entries);
55283+ link_table = (ctl_table_no_const *)(node + nr_entries);
55284 link_name = (char *)&link_table[nr_entries + 1];
55285
55286 for (link = link_table, entry = table; entry->procname; link++, entry++) {
55287@@ -1306,8 +1340,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55288 struct ctl_table_header ***subheader, struct ctl_table_set *set,
55289 struct ctl_table *table)
55290 {
55291- struct ctl_table *ctl_table_arg = NULL;
55292- struct ctl_table *entry, *files;
55293+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
55294+ struct ctl_table *entry;
55295 int nr_files = 0;
55296 int nr_dirs = 0;
55297 int err = -ENOMEM;
55298@@ -1319,10 +1353,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55299 nr_files++;
55300 }
55301
55302- files = table;
55303 /* If there are mixed files and directories we need a new table */
55304 if (nr_dirs && nr_files) {
55305- struct ctl_table *new;
55306+ ctl_table_no_const *new;
55307 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
55308 GFP_KERNEL);
55309 if (!files)
55310@@ -1340,7 +1373,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55311 /* Register everything except a directory full of subdirectories */
55312 if (nr_files || !nr_dirs) {
55313 struct ctl_table_header *header;
55314- header = __register_sysctl_table(set, path, files);
55315+ header = __register_sysctl_table(set, path, files ? files : table);
55316 if (!header) {
55317 kfree(ctl_table_arg);
55318 goto out;
55319diff --git a/fs/proc/root.c b/fs/proc/root.c
55320index c6e9fac..a740964 100644
55321--- a/fs/proc/root.c
55322+++ b/fs/proc/root.c
55323@@ -176,7 +176,15 @@ void __init proc_root_init(void)
55324 #ifdef CONFIG_PROC_DEVICETREE
55325 proc_device_tree_init();
55326 #endif
55327+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55328+#ifdef CONFIG_GRKERNSEC_PROC_USER
55329+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
55330+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55331+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
55332+#endif
55333+#else
55334 proc_mkdir("bus", NULL);
55335+#endif
55336 proc_sys_init();
55337 }
55338
55339diff --git a/fs/proc/self.c b/fs/proc/self.c
55340index aa5cc3b..c91a5d0 100644
55341--- a/fs/proc/self.c
55342+++ b/fs/proc/self.c
55343@@ -37,7 +37,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
55344 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
55345 void *cookie)
55346 {
55347- char *s = nd_get_link(nd);
55348+ const char *s = nd_get_link(nd);
55349 if (!IS_ERR(s))
55350 kfree(s);
55351 }
55352diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
55353index ca5ce7f..02c1cf0 100644
55354--- a/fs/proc/task_mmu.c
55355+++ b/fs/proc/task_mmu.c
55356@@ -11,12 +11,19 @@
55357 #include <linux/rmap.h>
55358 #include <linux/swap.h>
55359 #include <linux/swapops.h>
55360+#include <linux/grsecurity.h>
55361
55362 #include <asm/elf.h>
55363 #include <asm/uaccess.h>
55364 #include <asm/tlbflush.h>
55365 #include "internal.h"
55366
55367+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55368+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55369+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55370+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55371+#endif
55372+
55373 void task_mem(struct seq_file *m, struct mm_struct *mm)
55374 {
55375 unsigned long data, text, lib, swap;
55376@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55377 "VmExe:\t%8lu kB\n"
55378 "VmLib:\t%8lu kB\n"
55379 "VmPTE:\t%8lu kB\n"
55380- "VmSwap:\t%8lu kB\n",
55381- hiwater_vm << (PAGE_SHIFT-10),
55382+ "VmSwap:\t%8lu kB\n"
55383+
55384+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55385+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
55386+#endif
55387+
55388+ ,hiwater_vm << (PAGE_SHIFT-10),
55389 total_vm << (PAGE_SHIFT-10),
55390 mm->locked_vm << (PAGE_SHIFT-10),
55391 mm->pinned_vm << (PAGE_SHIFT-10),
55392@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55393 data << (PAGE_SHIFT-10),
55394 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
55395 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
55396- swap << (PAGE_SHIFT-10));
55397+ swap << (PAGE_SHIFT-10)
55398+
55399+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55400+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55401+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
55402+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
55403+#else
55404+ , mm->context.user_cs_base
55405+ , mm->context.user_cs_limit
55406+#endif
55407+#endif
55408+
55409+ );
55410 }
55411
55412 unsigned long task_vsize(struct mm_struct *mm)
55413@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55414 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
55415 }
55416
55417- /* We don't show the stack guard page in /proc/maps */
55418+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55419+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
55420+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
55421+#else
55422 start = vma->vm_start;
55423- if (stack_guard_page_start(vma, start))
55424- start += PAGE_SIZE;
55425 end = vma->vm_end;
55426- if (stack_guard_page_end(vma, end))
55427- end -= PAGE_SIZE;
55428+#endif
55429
55430 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
55431 start,
55432@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55433 flags & VM_WRITE ? 'w' : '-',
55434 flags & VM_EXEC ? 'x' : '-',
55435 flags & VM_MAYSHARE ? 's' : 'p',
55436+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55437+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
55438+#else
55439 pgoff,
55440+#endif
55441 MAJOR(dev), MINOR(dev), ino, &len);
55442
55443 /*
55444@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55445 */
55446 if (file) {
55447 pad_len_spaces(m, len);
55448- seq_path(m, &file->f_path, "\n");
55449+ seq_path(m, &file->f_path, "\n\\");
55450 goto done;
55451 }
55452
55453@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55454 * Thread stack in /proc/PID/task/TID/maps or
55455 * the main process stack.
55456 */
55457- if (!is_pid || (vma->vm_start <= mm->start_stack &&
55458- vma->vm_end >= mm->start_stack)) {
55459+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
55460+ (vma->vm_start <= mm->start_stack &&
55461+ vma->vm_end >= mm->start_stack)) {
55462 name = "[stack]";
55463 } else {
55464 /* Thread stack in /proc/PID/maps */
55465@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
55466 struct proc_maps_private *priv = m->private;
55467 struct task_struct *task = priv->task;
55468
55469+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55470+ if (current->exec_id != m->exec_id) {
55471+ gr_log_badprocpid("maps");
55472+ return 0;
55473+ }
55474+#endif
55475+
55476 show_map_vma(m, vma, is_pid);
55477
55478 if (m->count < m->size) /* vma is copied successfully */
55479@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
55480 .private = &mss,
55481 };
55482
55483+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55484+ if (current->exec_id != m->exec_id) {
55485+ gr_log_badprocpid("smaps");
55486+ return 0;
55487+ }
55488+#endif
55489 memset(&mss, 0, sizeof mss);
55490- mss.vma = vma;
55491- /* mmap_sem is held in m_start */
55492- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
55493- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
55494-
55495+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55496+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
55497+#endif
55498+ mss.vma = vma;
55499+ /* mmap_sem is held in m_start */
55500+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
55501+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
55502+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55503+ }
55504+#endif
55505 show_map_vma(m, vma, is_pid);
55506
55507 seq_printf(m,
55508@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
55509 "KernelPageSize: %8lu kB\n"
55510 "MMUPageSize: %8lu kB\n"
55511 "Locked: %8lu kB\n",
55512+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55513+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
55514+#else
55515 (vma->vm_end - vma->vm_start) >> 10,
55516+#endif
55517 mss.resident >> 10,
55518 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
55519 mss.shared_clean >> 10,
55520@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
55521 int n;
55522 char buffer[50];
55523
55524+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55525+ if (current->exec_id != m->exec_id) {
55526+ gr_log_badprocpid("numa_maps");
55527+ return 0;
55528+ }
55529+#endif
55530+
55531 if (!mm)
55532 return 0;
55533
55534@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
55535 mpol_to_str(buffer, sizeof(buffer), pol);
55536 mpol_cond_put(pol);
55537
55538+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55539+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
55540+#else
55541 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
55542+#endif
55543
55544 if (file) {
55545 seq_printf(m, " file=");
55546- seq_path(m, &file->f_path, "\n\t= ");
55547+ seq_path(m, &file->f_path, "\n\t\\= ");
55548 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
55549 seq_printf(m, " heap");
55550 } else {
55551diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
55552index 1ccfa53..0848f95 100644
55553--- a/fs/proc/task_nommu.c
55554+++ b/fs/proc/task_nommu.c
55555@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55556 else
55557 bytes += kobjsize(mm);
55558
55559- if (current->fs && current->fs->users > 1)
55560+ if (current->fs && atomic_read(&current->fs->users) > 1)
55561 sbytes += kobjsize(current->fs);
55562 else
55563 bytes += kobjsize(current->fs);
55564@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
55565
55566 if (file) {
55567 pad_len_spaces(m, len);
55568- seq_path(m, &file->f_path, "");
55569+ seq_path(m, &file->f_path, "\n\\");
55570 } else if (mm) {
55571 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
55572
55573diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
55574index b00fcc9..e0c6381 100644
55575--- a/fs/qnx6/qnx6.h
55576+++ b/fs/qnx6/qnx6.h
55577@@ -74,7 +74,7 @@ enum {
55578 BYTESEX_BE,
55579 };
55580
55581-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
55582+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
55583 {
55584 if (sbi->s_bytesex == BYTESEX_LE)
55585 return le64_to_cpu((__force __le64)n);
55586@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
55587 return (__force __fs64)cpu_to_be64(n);
55588 }
55589
55590-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
55591+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
55592 {
55593 if (sbi->s_bytesex == BYTESEX_LE)
55594 return le32_to_cpu((__force __le32)n);
55595diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
55596index 16e8abb..2dcf914 100644
55597--- a/fs/quota/netlink.c
55598+++ b/fs/quota/netlink.c
55599@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
55600 void quota_send_warning(struct kqid qid, dev_t dev,
55601 const char warntype)
55602 {
55603- static atomic_t seq;
55604+ static atomic_unchecked_t seq;
55605 struct sk_buff *skb;
55606 void *msg_head;
55607 int ret;
55608@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
55609 "VFS: Not enough memory to send quota warning.\n");
55610 return;
55611 }
55612- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
55613+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
55614 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
55615 if (!msg_head) {
55616 printk(KERN_ERR
55617diff --git a/fs/readdir.c b/fs/readdir.c
55618index 5e69ef5..e5d9099 100644
55619--- a/fs/readdir.c
55620+++ b/fs/readdir.c
55621@@ -17,6 +17,7 @@
55622 #include <linux/security.h>
55623 #include <linux/syscalls.h>
55624 #include <linux/unistd.h>
55625+#include <linux/namei.h>
55626
55627 #include <asm/uaccess.h>
55628
55629@@ -67,6 +68,7 @@ struct old_linux_dirent {
55630
55631 struct readdir_callback {
55632 struct old_linux_dirent __user * dirent;
55633+ struct file * file;
55634 int result;
55635 };
55636
55637@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
55638 buf->result = -EOVERFLOW;
55639 return -EOVERFLOW;
55640 }
55641+
55642+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55643+ return 0;
55644+
55645 buf->result++;
55646 dirent = buf->dirent;
55647 if (!access_ok(VERIFY_WRITE, dirent,
55648@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
55649
55650 buf.result = 0;
55651 buf.dirent = dirent;
55652+ buf.file = f.file;
55653
55654 error = vfs_readdir(f.file, fillonedir, &buf);
55655 if (buf.result)
55656@@ -139,6 +146,7 @@ struct linux_dirent {
55657 struct getdents_callback {
55658 struct linux_dirent __user * current_dir;
55659 struct linux_dirent __user * previous;
55660+ struct file * file;
55661 int count;
55662 int error;
55663 };
55664@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
55665 buf->error = -EOVERFLOW;
55666 return -EOVERFLOW;
55667 }
55668+
55669+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55670+ return 0;
55671+
55672 dirent = buf->previous;
55673 if (dirent) {
55674 if (__put_user(offset, &dirent->d_off))
55675@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55676 buf.previous = NULL;
55677 buf.count = count;
55678 buf.error = 0;
55679+ buf.file = f.file;
55680
55681 error = vfs_readdir(f.file, filldir, &buf);
55682 if (error >= 0)
55683@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55684 struct getdents_callback64 {
55685 struct linux_dirent64 __user * current_dir;
55686 struct linux_dirent64 __user * previous;
55687+ struct file *file;
55688 int count;
55689 int error;
55690 };
55691@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
55692 buf->error = -EINVAL; /* only used if we fail.. */
55693 if (reclen > buf->count)
55694 return -EINVAL;
55695+
55696+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55697+ return 0;
55698+
55699 dirent = buf->previous;
55700 if (dirent) {
55701 if (__put_user(offset, &dirent->d_off))
55702@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55703
55704 buf.current_dir = dirent;
55705 buf.previous = NULL;
55706+ buf.file = f.file;
55707 buf.count = count;
55708 buf.error = 0;
55709
55710@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55711 error = buf.error;
55712 lastdirent = buf.previous;
55713 if (lastdirent) {
55714- typeof(lastdirent->d_off) d_off = f.file->f_pos;
55715+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
55716 if (__put_user(d_off, &lastdirent->d_off))
55717 error = -EFAULT;
55718 else
55719diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
55720index 2b7882b..1c5ef48 100644
55721--- a/fs/reiserfs/do_balan.c
55722+++ b/fs/reiserfs/do_balan.c
55723@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
55724 return;
55725 }
55726
55727- atomic_inc(&(fs_generation(tb->tb_sb)));
55728+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
55729 do_balance_starts(tb);
55730
55731 /* balance leaf returns 0 except if combining L R and S into
55732diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
55733index e60e870..f40ac16 100644
55734--- a/fs/reiserfs/procfs.c
55735+++ b/fs/reiserfs/procfs.c
55736@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
55737 "SMALL_TAILS " : "NO_TAILS ",
55738 replay_only(sb) ? "REPLAY_ONLY " : "",
55739 convert_reiserfs(sb) ? "CONV " : "",
55740- atomic_read(&r->s_generation_counter),
55741+ atomic_read_unchecked(&r->s_generation_counter),
55742 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
55743 SF(s_do_balance), SF(s_unneeded_left_neighbor),
55744 SF(s_good_search_by_key_reada), SF(s_bmaps),
55745diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
55746index 157e474..65a6114 100644
55747--- a/fs/reiserfs/reiserfs.h
55748+++ b/fs/reiserfs/reiserfs.h
55749@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
55750 /* Comment? -Hans */
55751 wait_queue_head_t s_wait;
55752 /* To be obsoleted soon by per buffer seals.. -Hans */
55753- atomic_t s_generation_counter; // increased by one every time the
55754+ atomic_unchecked_t s_generation_counter; // increased by one every time the
55755 // tree gets re-balanced
55756 unsigned long s_properties; /* File system properties. Currently holds
55757 on-disk FS format */
55758@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
55759 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
55760
55761 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
55762-#define get_generation(s) atomic_read (&fs_generation(s))
55763+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
55764 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
55765 #define __fs_changed(gen,s) (gen != get_generation (s))
55766 #define fs_changed(gen,s) \
55767diff --git a/fs/select.c b/fs/select.c
55768index 2ef72d9..f213b17 100644
55769--- a/fs/select.c
55770+++ b/fs/select.c
55771@@ -20,6 +20,7 @@
55772 #include <linux/export.h>
55773 #include <linux/slab.h>
55774 #include <linux/poll.h>
55775+#include <linux/security.h>
55776 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
55777 #include <linux/file.h>
55778 #include <linux/fdtable.h>
55779@@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
55780 struct poll_list *walk = head;
55781 unsigned long todo = nfds;
55782
55783+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
55784 if (nfds > rlimit(RLIMIT_NOFILE))
55785 return -EINVAL;
55786
55787diff --git a/fs/seq_file.c b/fs/seq_file.c
55788index f2bc3df..239d4f6 100644
55789--- a/fs/seq_file.c
55790+++ b/fs/seq_file.c
55791@@ -10,6 +10,7 @@
55792 #include <linux/seq_file.h>
55793 #include <linux/slab.h>
55794 #include <linux/cred.h>
55795+#include <linux/sched.h>
55796
55797 #include <asm/uaccess.h>
55798 #include <asm/page.h>
55799@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
55800 #ifdef CONFIG_USER_NS
55801 p->user_ns = file->f_cred->user_ns;
55802 #endif
55803+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55804+ p->exec_id = current->exec_id;
55805+#endif
55806
55807 /*
55808 * Wrappers around seq_open(e.g. swaps_open) need to be
55809@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
55810 return 0;
55811 }
55812 if (!m->buf) {
55813- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
55814+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
55815 if (!m->buf)
55816 return -ENOMEM;
55817 }
55818@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
55819 Eoverflow:
55820 m->op->stop(m, p);
55821 kfree(m->buf);
55822- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
55823+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
55824 return !m->buf ? -ENOMEM : -EAGAIN;
55825 }
55826
55827@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
55828
55829 /* grab buffer if we didn't have one */
55830 if (!m->buf) {
55831- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
55832+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
55833 if (!m->buf)
55834 goto Enomem;
55835 }
55836@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
55837 goto Fill;
55838 m->op->stop(m, p);
55839 kfree(m->buf);
55840- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
55841+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
55842 if (!m->buf)
55843 goto Enomem;
55844 m->count = 0;
55845@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
55846 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
55847 void *data)
55848 {
55849- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
55850+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
55851 int res = -ENOMEM;
55852
55853 if (op) {
55854diff --git a/fs/splice.c b/fs/splice.c
55855index 6909d89..5b2e8f9 100644
55856--- a/fs/splice.c
55857+++ b/fs/splice.c
55858@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
55859 pipe_lock(pipe);
55860
55861 for (;;) {
55862- if (!pipe->readers) {
55863+ if (!atomic_read(&pipe->readers)) {
55864 send_sig(SIGPIPE, current, 0);
55865 if (!ret)
55866 ret = -EPIPE;
55867@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
55868 do_wakeup = 0;
55869 }
55870
55871- pipe->waiting_writers++;
55872+ atomic_inc(&pipe->waiting_writers);
55873 pipe_wait(pipe);
55874- pipe->waiting_writers--;
55875+ atomic_dec(&pipe->waiting_writers);
55876 }
55877
55878 pipe_unlock(pipe);
55879@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
55880 old_fs = get_fs();
55881 set_fs(get_ds());
55882 /* The cast to a user pointer is valid due to the set_fs() */
55883- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
55884+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
55885 set_fs(old_fs);
55886
55887 return res;
55888@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
55889 old_fs = get_fs();
55890 set_fs(get_ds());
55891 /* The cast to a user pointer is valid due to the set_fs() */
55892- res = vfs_write(file, (const char __user *)buf, count, &pos);
55893+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
55894 set_fs(old_fs);
55895
55896 return res;
55897@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
55898 goto err;
55899
55900 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
55901- vec[i].iov_base = (void __user *) page_address(page);
55902+ vec[i].iov_base = (void __force_user *) page_address(page);
55903 vec[i].iov_len = this_len;
55904 spd.pages[i] = page;
55905 spd.nr_pages++;
55906@@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
55907 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
55908 {
55909 while (!pipe->nrbufs) {
55910- if (!pipe->writers)
55911+ if (!atomic_read(&pipe->writers))
55912 return 0;
55913
55914- if (!pipe->waiting_writers && sd->num_spliced)
55915+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
55916 return 0;
55917
55918 if (sd->flags & SPLICE_F_NONBLOCK)
55919@@ -1189,7 +1189,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
55920 * out of the pipe right after the splice_to_pipe(). So set
55921 * PIPE_READERS appropriately.
55922 */
55923- pipe->readers = 1;
55924+ atomic_set(&pipe->readers, 1);
55925
55926 current->splice_pipe = pipe;
55927 }
55928@@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55929 ret = -ERESTARTSYS;
55930 break;
55931 }
55932- if (!pipe->writers)
55933+ if (!atomic_read(&pipe->writers))
55934 break;
55935- if (!pipe->waiting_writers) {
55936+ if (!atomic_read(&pipe->waiting_writers)) {
55937 if (flags & SPLICE_F_NONBLOCK) {
55938 ret = -EAGAIN;
55939 break;
55940@@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55941 pipe_lock(pipe);
55942
55943 while (pipe->nrbufs >= pipe->buffers) {
55944- if (!pipe->readers) {
55945+ if (!atomic_read(&pipe->readers)) {
55946 send_sig(SIGPIPE, current, 0);
55947 ret = -EPIPE;
55948 break;
55949@@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55950 ret = -ERESTARTSYS;
55951 break;
55952 }
55953- pipe->waiting_writers++;
55954+ atomic_inc(&pipe->waiting_writers);
55955 pipe_wait(pipe);
55956- pipe->waiting_writers--;
55957+ atomic_dec(&pipe->waiting_writers);
55958 }
55959
55960 pipe_unlock(pipe);
55961@@ -1823,14 +1823,14 @@ retry:
55962 pipe_double_lock(ipipe, opipe);
55963
55964 do {
55965- if (!opipe->readers) {
55966+ if (!atomic_read(&opipe->readers)) {
55967 send_sig(SIGPIPE, current, 0);
55968 if (!ret)
55969 ret = -EPIPE;
55970 break;
55971 }
55972
55973- if (!ipipe->nrbufs && !ipipe->writers)
55974+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
55975 break;
55976
55977 /*
55978@@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55979 pipe_double_lock(ipipe, opipe);
55980
55981 do {
55982- if (!opipe->readers) {
55983+ if (!atomic_read(&opipe->readers)) {
55984 send_sig(SIGPIPE, current, 0);
55985 if (!ret)
55986 ret = -EPIPE;
55987@@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55988 * return EAGAIN if we have the potential of some data in the
55989 * future, otherwise just return 0
55990 */
55991- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
55992+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
55993 ret = -EAGAIN;
55994
55995 pipe_unlock(ipipe);
55996diff --git a/fs/stat.c b/fs/stat.c
55997index 14f4545..9b7f55b 100644
55998--- a/fs/stat.c
55999+++ b/fs/stat.c
56000@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
56001 stat->gid = inode->i_gid;
56002 stat->rdev = inode->i_rdev;
56003 stat->size = i_size_read(inode);
56004- stat->atime = inode->i_atime;
56005- stat->mtime = inode->i_mtime;
56006+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56007+ stat->atime = inode->i_ctime;
56008+ stat->mtime = inode->i_ctime;
56009+ } else {
56010+ stat->atime = inode->i_atime;
56011+ stat->mtime = inode->i_mtime;
56012+ }
56013 stat->ctime = inode->i_ctime;
56014 stat->blksize = (1 << inode->i_blkbits);
56015 stat->blocks = inode->i_blocks;
56016@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
56017 if (retval)
56018 return retval;
56019
56020- if (inode->i_op->getattr)
56021- return inode->i_op->getattr(mnt, dentry, stat);
56022+ if (inode->i_op->getattr) {
56023+ retval = inode->i_op->getattr(mnt, dentry, stat);
56024+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56025+ stat->atime = stat->ctime;
56026+ stat->mtime = stat->ctime;
56027+ }
56028+ return retval;
56029+ }
56030
56031 generic_fillattr(inode, stat);
56032 return 0;
56033diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
56034index 2fbdff6..5530a61 100644
56035--- a/fs/sysfs/dir.c
56036+++ b/fs/sysfs/dir.c
56037@@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
56038 struct sysfs_dirent *sd;
56039 int rc;
56040
56041+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
56042+ const char *parent_name = parent_sd->s_name;
56043+
56044+ mode = S_IFDIR | S_IRWXU;
56045+
56046+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
56047+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
56048+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
56049+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
56050+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
56051+#endif
56052+
56053 /* allocate */
56054 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
56055 if (!sd)
56056diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
56057index 602f56d..6853db8 100644
56058--- a/fs/sysfs/file.c
56059+++ b/fs/sysfs/file.c
56060@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
56061
56062 struct sysfs_open_dirent {
56063 atomic_t refcnt;
56064- atomic_t event;
56065+ atomic_unchecked_t event;
56066 wait_queue_head_t poll;
56067 struct list_head buffers; /* goes through sysfs_buffer.list */
56068 };
56069@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
56070 if (!sysfs_get_active(attr_sd))
56071 return -ENODEV;
56072
56073- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
56074+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
56075 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
56076
56077 sysfs_put_active(attr_sd);
56078@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
56079 return -ENOMEM;
56080
56081 atomic_set(&new_od->refcnt, 0);
56082- atomic_set(&new_od->event, 1);
56083+ atomic_set_unchecked(&new_od->event, 1);
56084 init_waitqueue_head(&new_od->poll);
56085 INIT_LIST_HEAD(&new_od->buffers);
56086 goto retry;
56087@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
56088
56089 sysfs_put_active(attr_sd);
56090
56091- if (buffer->event != atomic_read(&od->event))
56092+ if (buffer->event != atomic_read_unchecked(&od->event))
56093 goto trigger;
56094
56095 return DEFAULT_POLLMASK;
56096@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
56097
56098 od = sd->s_attr.open;
56099 if (od) {
56100- atomic_inc(&od->event);
56101+ atomic_inc_unchecked(&od->event);
56102 wake_up_interruptible(&od->poll);
56103 }
56104
56105diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
56106index 3c9eb56..9dea5be 100644
56107--- a/fs/sysfs/symlink.c
56108+++ b/fs/sysfs/symlink.c
56109@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
56110
56111 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
56112 {
56113- char *page = nd_get_link(nd);
56114+ const char *page = nd_get_link(nd);
56115 if (!IS_ERR(page))
56116 free_page((unsigned long)page);
56117 }
56118diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
56119index 69d4889..a810bd4 100644
56120--- a/fs/sysv/sysv.h
56121+++ b/fs/sysv/sysv.h
56122@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
56123 #endif
56124 }
56125
56126-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56127+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56128 {
56129 if (sbi->s_bytesex == BYTESEX_PDP)
56130 return PDP_swab((__force __u32)n);
56131diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
56132index e18b988..f1d4ad0f 100644
56133--- a/fs/ubifs/io.c
56134+++ b/fs/ubifs/io.c
56135@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
56136 return err;
56137 }
56138
56139-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56140+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56141 {
56142 int err;
56143
56144diff --git a/fs/udf/misc.c b/fs/udf/misc.c
56145index c175b4d..8f36a16 100644
56146--- a/fs/udf/misc.c
56147+++ b/fs/udf/misc.c
56148@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
56149
56150 u8 udf_tag_checksum(const struct tag *t)
56151 {
56152- u8 *data = (u8 *)t;
56153+ const u8 *data = (const u8 *)t;
56154 u8 checksum = 0;
56155 int i;
56156 for (i = 0; i < sizeof(struct tag); ++i)
56157diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
56158index 8d974c4..b82f6ec 100644
56159--- a/fs/ufs/swab.h
56160+++ b/fs/ufs/swab.h
56161@@ -22,7 +22,7 @@ enum {
56162 BYTESEX_BE
56163 };
56164
56165-static inline u64
56166+static inline u64 __intentional_overflow(-1)
56167 fs64_to_cpu(struct super_block *sbp, __fs64 n)
56168 {
56169 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56170@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
56171 return (__force __fs64)cpu_to_be64(n);
56172 }
56173
56174-static inline u32
56175+static inline u32 __intentional_overflow(-1)
56176 fs32_to_cpu(struct super_block *sbp, __fs32 n)
56177 {
56178 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56179diff --git a/fs/utimes.c b/fs/utimes.c
56180index f4fb7ec..3fe03c0 100644
56181--- a/fs/utimes.c
56182+++ b/fs/utimes.c
56183@@ -1,6 +1,7 @@
56184 #include <linux/compiler.h>
56185 #include <linux/file.h>
56186 #include <linux/fs.h>
56187+#include <linux/security.h>
56188 #include <linux/linkage.h>
56189 #include <linux/mount.h>
56190 #include <linux/namei.h>
56191@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
56192 goto mnt_drop_write_and_out;
56193 }
56194 }
56195+
56196+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
56197+ error = -EACCES;
56198+ goto mnt_drop_write_and_out;
56199+ }
56200+
56201 mutex_lock(&inode->i_mutex);
56202 error = notify_change(path->dentry, &newattrs);
56203 mutex_unlock(&inode->i_mutex);
56204diff --git a/fs/xattr.c b/fs/xattr.c
56205index 3377dff..4feded6 100644
56206--- a/fs/xattr.c
56207+++ b/fs/xattr.c
56208@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
56209 * Extended attribute SET operations
56210 */
56211 static long
56212-setxattr(struct dentry *d, const char __user *name, const void __user *value,
56213+setxattr(struct path *path, const char __user *name, const void __user *value,
56214 size_t size, int flags)
56215 {
56216 int error;
56217@@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
56218 posix_acl_fix_xattr_from_user(kvalue, size);
56219 }
56220
56221- error = vfs_setxattr(d, kname, kvalue, size, flags);
56222+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
56223+ error = -EACCES;
56224+ goto out;
56225+ }
56226+
56227+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
56228 out:
56229 if (vvalue)
56230 vfree(vvalue);
56231@@ -377,7 +382,7 @@ retry:
56232 return error;
56233 error = mnt_want_write(path.mnt);
56234 if (!error) {
56235- error = setxattr(path.dentry, name, value, size, flags);
56236+ error = setxattr(&path, name, value, size, flags);
56237 mnt_drop_write(path.mnt);
56238 }
56239 path_put(&path);
56240@@ -401,7 +406,7 @@ retry:
56241 return error;
56242 error = mnt_want_write(path.mnt);
56243 if (!error) {
56244- error = setxattr(path.dentry, name, value, size, flags);
56245+ error = setxattr(&path, name, value, size, flags);
56246 mnt_drop_write(path.mnt);
56247 }
56248 path_put(&path);
56249@@ -416,16 +421,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
56250 const void __user *,value, size_t, size, int, flags)
56251 {
56252 struct fd f = fdget(fd);
56253- struct dentry *dentry;
56254 int error = -EBADF;
56255
56256 if (!f.file)
56257 return error;
56258- dentry = f.file->f_path.dentry;
56259- audit_inode(NULL, dentry, 0);
56260+ audit_inode(NULL, f.file->f_path.dentry, 0);
56261 error = mnt_want_write_file(f.file);
56262 if (!error) {
56263- error = setxattr(dentry, name, value, size, flags);
56264+ error = setxattr(&f.file->f_path, name, value, size, flags);
56265 mnt_drop_write_file(f.file);
56266 }
56267 fdput(f);
56268diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
56269index 9fbea87..6b19972 100644
56270--- a/fs/xattr_acl.c
56271+++ b/fs/xattr_acl.c
56272@@ -76,8 +76,8 @@ struct posix_acl *
56273 posix_acl_from_xattr(struct user_namespace *user_ns,
56274 const void *value, size_t size)
56275 {
56276- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
56277- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
56278+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
56279+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
56280 int count;
56281 struct posix_acl *acl;
56282 struct posix_acl_entry *acl_e;
56283diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
56284index 572a858..12a9b0d 100644
56285--- a/fs/xfs/xfs_bmap.c
56286+++ b/fs/xfs/xfs_bmap.c
56287@@ -192,7 +192,7 @@ xfs_bmap_validate_ret(
56288 int nmap,
56289 int ret_nmap);
56290 #else
56291-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
56292+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
56293 #endif /* DEBUG */
56294
56295 STATIC int
56296diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
56297index 1b9fc3e..e1bdde0 100644
56298--- a/fs/xfs/xfs_dir2_sf.c
56299+++ b/fs/xfs/xfs_dir2_sf.c
56300@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
56301 }
56302
56303 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
56304- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
56305+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
56306+ char name[sfep->namelen];
56307+ memcpy(name, sfep->name, sfep->namelen);
56308+ if (filldir(dirent, name, sfep->namelen,
56309+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
56310+ *offset = off & 0x7fffffff;
56311+ return 0;
56312+ }
56313+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
56314 off & 0x7fffffff, ino, DT_UNKNOWN)) {
56315 *offset = off & 0x7fffffff;
56316 return 0;
56317diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
56318index c1c3ef8..0952438 100644
56319--- a/fs/xfs/xfs_ioctl.c
56320+++ b/fs/xfs/xfs_ioctl.c
56321@@ -127,7 +127,7 @@ xfs_find_handle(
56322 }
56323
56324 error = -EFAULT;
56325- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
56326+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
56327 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
56328 goto out_put;
56329
56330diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
56331index d82efaa..0904a8e 100644
56332--- a/fs/xfs/xfs_iops.c
56333+++ b/fs/xfs/xfs_iops.c
56334@@ -395,7 +395,7 @@ xfs_vn_put_link(
56335 struct nameidata *nd,
56336 void *p)
56337 {
56338- char *s = nd_get_link(nd);
56339+ const char *s = nd_get_link(nd);
56340
56341 if (!IS_ERR(s))
56342 kfree(s);
56343diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
56344new file mode 100644
56345index 0000000..92247e4
56346--- /dev/null
56347+++ b/grsecurity/Kconfig
56348@@ -0,0 +1,1021 @@
56349+#
56350+# grecurity configuration
56351+#
56352+menu "Memory Protections"
56353+depends on GRKERNSEC
56354+
56355+config GRKERNSEC_KMEM
56356+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
56357+ default y if GRKERNSEC_CONFIG_AUTO
56358+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56359+ help
56360+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56361+ be written to or read from to modify or leak the contents of the running
56362+ kernel. /dev/port will also not be allowed to be opened and support
56363+ for /dev/cpu/*/msr will be removed. If you have module
56364+ support disabled, enabling this will close up five ways that are
56365+ currently used to insert malicious code into the running kernel.
56366+
56367+ Even with all these features enabled, we still highly recommend that
56368+ you use the RBAC system, as it is still possible for an attacker to
56369+ modify the running kernel through privileged I/O granted by ioperm/iopl.
56370+
56371+ If you are not using XFree86, you may be able to stop this additional
56372+ case by enabling the 'Disable privileged I/O' option. Though nothing
56373+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56374+ but only to video memory, which is the only writing we allow in this
56375+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56376+ not be allowed to mprotect it with PROT_WRITE later.
56377+ Enabling this feature will prevent the "cpupower" and "powertop" tools
56378+ from working.
56379+
56380+ It is highly recommended that you say Y here if you meet all the
56381+ conditions above.
56382+
56383+config GRKERNSEC_VM86
56384+ bool "Restrict VM86 mode"
56385+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56386+ depends on X86_32
56387+
56388+ help
56389+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56390+ make use of a special execution mode on 32bit x86 processors called
56391+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56392+ video cards and will still work with this option enabled. The purpose
56393+ of the option is to prevent exploitation of emulation errors in
56394+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
56395+ Nearly all users should be able to enable this option.
56396+
56397+config GRKERNSEC_IO
56398+ bool "Disable privileged I/O"
56399+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56400+ depends on X86
56401+ select RTC_CLASS
56402+ select RTC_INTF_DEV
56403+ select RTC_DRV_CMOS
56404+
56405+ help
56406+ If you say Y here, all ioperm and iopl calls will return an error.
56407+ Ioperm and iopl can be used to modify the running kernel.
56408+ Unfortunately, some programs need this access to operate properly,
56409+ the most notable of which are XFree86 and hwclock. hwclock can be
56410+ remedied by having RTC support in the kernel, so real-time
56411+ clock support is enabled if this option is enabled, to ensure
56412+ that hwclock operates correctly. XFree86 still will not
56413+ operate correctly with this option enabled, so DO NOT CHOOSE Y
56414+ IF YOU USE XFree86. If you use XFree86 and you still want to
56415+ protect your kernel against modification, use the RBAC system.
56416+
56417+config GRKERNSEC_JIT_HARDEN
56418+ bool "Harden BPF JIT against spray attacks"
56419+ default y if GRKERNSEC_CONFIG_AUTO
56420+ depends on BPF_JIT
56421+ help
56422+ If you say Y here, the native code generated by the kernel's Berkeley
56423+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
56424+ attacks that attempt to fit attacker-beneficial instructions in
56425+ 32bit immediate fields of JIT-generated native instructions. The
56426+ attacker will generally aim to cause an unintended instruction sequence
56427+ of JIT-generated native code to execute by jumping into the middle of
56428+ a generated instruction. This feature effectively randomizes the 32bit
56429+ immediate constants present in the generated code to thwart such attacks.
56430+
56431+ If you're using KERNEXEC, it's recommended that you enable this option
56432+ to supplement the hardening of the kernel.
56433+
56434+config GRKERNSEC_RAND_THREADSTACK
56435+ bool "Insert random gaps between thread stacks"
56436+ default y if GRKERNSEC_CONFIG_AUTO
56437+ depends on PAX_RANDMMAP && !PPC
56438+ help
56439+ If you say Y here, a random-sized gap will be enforced between allocated
56440+ thread stacks. Glibc's NPTL and other threading libraries that
56441+ pass MAP_STACK to the kernel for thread stack allocation are supported.
56442+ The implementation currently provides 8 bits of entropy for the gap.
56443+
56444+ Many distributions do not compile threaded remote services with the
56445+ -fstack-check argument to GCC, causing the variable-sized stack-based
56446+ allocator, alloca(), to not probe the stack on allocation. This
56447+ permits an unbounded alloca() to skip over any guard page and potentially
56448+ modify another thread's stack reliably. An enforced random gap
56449+ reduces the reliability of such an attack and increases the chance
56450+ that such a read/write to another thread's stack instead lands in
56451+ an unmapped area, causing a crash and triggering grsecurity's
56452+ anti-bruteforcing logic.
56453+
56454+config GRKERNSEC_PROC_MEMMAP
56455+ bool "Harden ASLR against information leaks and entropy reduction"
56456+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
56457+ depends on PAX_NOEXEC || PAX_ASLR
56458+ help
56459+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56460+ give no information about the addresses of its mappings if
56461+ PaX features that rely on random addresses are enabled on the task.
56462+ In addition to sanitizing this information and disabling other
56463+ dangerous sources of information, this option causes reads of sensitive
56464+ /proc/<pid> entries where the file descriptor was opened in a different
56465+ task than the one performing the read. Such attempts are logged.
56466+ This option also limits argv/env strings for suid/sgid binaries
56467+ to 512KB to prevent a complete exhaustion of the stack entropy provided
56468+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
56469+ binaries to prevent alternative mmap layouts from being abused.
56470+
56471+ If you use PaX it is essential that you say Y here as it closes up
56472+ several holes that make full ASLR useless locally.
56473+
56474+config GRKERNSEC_BRUTE
56475+ bool "Deter exploit bruteforcing"
56476+ default y if GRKERNSEC_CONFIG_AUTO
56477+ help
56478+ If you say Y here, attempts to bruteforce exploits against forking
56479+ daemons such as apache or sshd, as well as against suid/sgid binaries
56480+ will be deterred. When a child of a forking daemon is killed by PaX
56481+ or crashes due to an illegal instruction or other suspicious signal,
56482+ the parent process will be delayed 30 seconds upon every subsequent
56483+ fork until the administrator is able to assess the situation and
56484+ restart the daemon.
56485+ In the suid/sgid case, the attempt is logged, the user has all their
56486+ processes terminated, and they are prevented from executing any further
56487+ processes for 15 minutes.
56488+ It is recommended that you also enable signal logging in the auditing
56489+ section so that logs are generated when a process triggers a suspicious
56490+ signal.
56491+ If the sysctl option is enabled, a sysctl option with name
56492+ "deter_bruteforce" is created.
56493+
56494+
56495+config GRKERNSEC_MODHARDEN
56496+ bool "Harden module auto-loading"
56497+ default y if GRKERNSEC_CONFIG_AUTO
56498+ depends on MODULES
56499+ help
56500+ If you say Y here, module auto-loading in response to use of some
56501+ feature implemented by an unloaded module will be restricted to
56502+ root users. Enabling this option helps defend against attacks
56503+ by unprivileged users who abuse the auto-loading behavior to
56504+ cause a vulnerable module to load that is then exploited.
56505+
56506+ If this option prevents a legitimate use of auto-loading for a
56507+ non-root user, the administrator can execute modprobe manually
56508+ with the exact name of the module mentioned in the alert log.
56509+ Alternatively, the administrator can add the module to the list
56510+ of modules loaded at boot by modifying init scripts.
56511+
56512+ Modification of init scripts will most likely be needed on
56513+ Ubuntu servers with encrypted home directory support enabled,
56514+ as the first non-root user logging in will cause the ecb(aes),
56515+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56516+
56517+config GRKERNSEC_HIDESYM
56518+ bool "Hide kernel symbols"
56519+ default y if GRKERNSEC_CONFIG_AUTO
56520+ select PAX_USERCOPY_SLABS
56521+ help
56522+ If you say Y here, getting information on loaded modules, and
56523+ displaying all kernel symbols through a syscall will be restricted
56524+ to users with CAP_SYS_MODULE. For software compatibility reasons,
56525+ /proc/kallsyms will be restricted to the root user. The RBAC
56526+ system can hide that entry even from root.
56527+
56528+ This option also prevents leaking of kernel addresses through
56529+ several /proc entries.
56530+
56531+ Note that this option is only effective provided the following
56532+ conditions are met:
56533+ 1) The kernel using grsecurity is not precompiled by some distribution
56534+ 2) You have also enabled GRKERNSEC_DMESG
56535+ 3) You are using the RBAC system and hiding other files such as your
56536+ kernel image and System.map. Alternatively, enabling this option
56537+ causes the permissions on /boot, /lib/modules, and the kernel
56538+ source directory to change at compile time to prevent
56539+ reading by non-root users.
56540+ If the above conditions are met, this option will aid in providing a
56541+ useful protection against local kernel exploitation of overflows
56542+ and arbitrary read/write vulnerabilities.
56543+
56544+config GRKERNSEC_KERN_LOCKOUT
56545+ bool "Active kernel exploit response"
56546+ default y if GRKERNSEC_CONFIG_AUTO
56547+ depends on X86 || ARM || PPC || SPARC
56548+ help
56549+ If you say Y here, when a PaX alert is triggered due to suspicious
56550+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56551+ or an OOPS occurs due to bad memory accesses, instead of just
56552+ terminating the offending process (and potentially allowing
56553+ a subsequent exploit from the same user), we will take one of two
56554+ actions:
56555+ If the user was root, we will panic the system
56556+ If the user was non-root, we will log the attempt, terminate
56557+ all processes owned by the user, then prevent them from creating
56558+ any new processes until the system is restarted
56559+ This deters repeated kernel exploitation/bruteforcing attempts
56560+ and is useful for later forensics.
56561+
56562+endmenu
56563+menu "Role Based Access Control Options"
56564+depends on GRKERNSEC
56565+
56566+config GRKERNSEC_RBAC_DEBUG
56567+ bool
56568+
56569+config GRKERNSEC_NO_RBAC
56570+ bool "Disable RBAC system"
56571+ help
56572+ If you say Y here, the /dev/grsec device will be removed from the kernel,
56573+ preventing the RBAC system from being enabled. You should only say Y
56574+ here if you have no intention of using the RBAC system, so as to prevent
56575+ an attacker with root access from misusing the RBAC system to hide files
56576+ and processes when loadable module support and /dev/[k]mem have been
56577+ locked down.
56578+
56579+config GRKERNSEC_ACL_HIDEKERN
56580+ bool "Hide kernel processes"
56581+ help
56582+ If you say Y here, all kernel threads will be hidden to all
56583+ processes but those whose subject has the "view hidden processes"
56584+ flag.
56585+
56586+config GRKERNSEC_ACL_MAXTRIES
56587+ int "Maximum tries before password lockout"
56588+ default 3
56589+ help
56590+ This option enforces the maximum number of times a user can attempt
56591+ to authorize themselves with the grsecurity RBAC system before being
56592+ denied the ability to attempt authorization again for a specified time.
56593+ The lower the number, the harder it will be to brute-force a password.
56594+
56595+config GRKERNSEC_ACL_TIMEOUT
56596+ int "Time to wait after max password tries, in seconds"
56597+ default 30
56598+ help
56599+ This option specifies the time the user must wait after attempting to
56600+ authorize to the RBAC system with the maximum number of invalid
56601+ passwords. The higher the number, the harder it will be to brute-force
56602+ a password.
56603+
56604+endmenu
56605+menu "Filesystem Protections"
56606+depends on GRKERNSEC
56607+
56608+config GRKERNSEC_PROC
56609+ bool "Proc restrictions"
56610+ default y if GRKERNSEC_CONFIG_AUTO
56611+ help
56612+ If you say Y here, the permissions of the /proc filesystem
56613+ will be altered to enhance system security and privacy. You MUST
56614+ choose either a user only restriction or a user and group restriction.
56615+ Depending upon the option you choose, you can either restrict users to
56616+ see only the processes they themselves run, or choose a group that can
56617+ view all processes and files normally restricted to root if you choose
56618+ the "restrict to user only" option. NOTE: If you're running identd or
56619+ ntpd as a non-root user, you will have to run it as the group you
56620+ specify here.
56621+
56622+config GRKERNSEC_PROC_USER
56623+ bool "Restrict /proc to user only"
56624+ depends on GRKERNSEC_PROC
56625+ help
56626+ If you say Y here, non-root users will only be able to view their own
56627+ processes, and restricts them from viewing network-related information,
56628+ and viewing kernel symbol and module information.
56629+
56630+config GRKERNSEC_PROC_USERGROUP
56631+ bool "Allow special group"
56632+ default y if GRKERNSEC_CONFIG_AUTO
56633+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56634+ help
56635+ If you say Y here, you will be able to select a group that will be
56636+ able to view all processes and network-related information. If you've
56637+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56638+ remain hidden. This option is useful if you want to run identd as
56639+ a non-root user. The group you select may also be chosen at boot time
56640+ via "grsec_proc_gid=" on the kernel commandline.
56641+
56642+config GRKERNSEC_PROC_GID
56643+ int "GID for special group"
56644+ depends on GRKERNSEC_PROC_USERGROUP
56645+ default 1001
56646+
56647+config GRKERNSEC_PROC_ADD
56648+ bool "Additional restrictions"
56649+ default y if GRKERNSEC_CONFIG_AUTO
56650+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56651+ help
56652+ If you say Y here, additional restrictions will be placed on
56653+ /proc that keep normal users from viewing device information and
56654+ slabinfo information that could be useful for exploits.
56655+
56656+config GRKERNSEC_LINK
56657+ bool "Linking restrictions"
56658+ default y if GRKERNSEC_CONFIG_AUTO
56659+ help
56660+ If you say Y here, /tmp race exploits will be prevented, since users
56661+ will no longer be able to follow symlinks owned by other users in
56662+ world-writable +t directories (e.g. /tmp), unless the owner of the
56663+ symlink is the owner of the directory. users will also not be
56664+ able to hardlink to files they do not own. If the sysctl option is
56665+ enabled, a sysctl option with name "linking_restrictions" is created.
56666+
56667+config GRKERNSEC_SYMLINKOWN
56668+ bool "Kernel-enforced SymlinksIfOwnerMatch"
56669+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
56670+ help
56671+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
56672+ that prevents it from being used as a security feature. As Apache
56673+ verifies the symlink by performing a stat() against the target of
56674+ the symlink before it is followed, an attacker can setup a symlink
56675+ to point to a same-owned file, then replace the symlink with one
56676+ that targets another user's file just after Apache "validates" the
56677+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
56678+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
56679+ will be in place for the group you specify. If the sysctl option
56680+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
56681+ created.
56682+
56683+config GRKERNSEC_SYMLINKOWN_GID
56684+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
56685+ depends on GRKERNSEC_SYMLINKOWN
56686+ default 1006
56687+ help
56688+ Setting this GID determines what group kernel-enforced
56689+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
56690+ is enabled, a sysctl option with name "symlinkown_gid" is created.
56691+
56692+config GRKERNSEC_FIFO
56693+ bool "FIFO restrictions"
56694+ default y if GRKERNSEC_CONFIG_AUTO
56695+ help
56696+ If you say Y here, users will not be able to write to FIFOs they don't
56697+ own in world-writable +t directories (e.g. /tmp), unless the owner of
56698+ the FIFO is the same owner of the directory it's held in. If the sysctl
56699+ option is enabled, a sysctl option with name "fifo_restrictions" is
56700+ created.
56701+
56702+config GRKERNSEC_SYSFS_RESTRICT
56703+ bool "Sysfs/debugfs restriction"
56704+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56705+ depends on SYSFS
56706+ help
56707+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56708+ any filesystem normally mounted under it (e.g. debugfs) will be
56709+ mostly accessible only by root. These filesystems generally provide access
56710+ to hardware and debug information that isn't appropriate for unprivileged
56711+ users of the system. Sysfs and debugfs have also become a large source
56712+ of new vulnerabilities, ranging from infoleaks to local compromise.
56713+ There has been very little oversight with an eye toward security involved
56714+ in adding new exporters of information to these filesystems, so their
56715+ use is discouraged.
56716+ For reasons of compatibility, a few directories have been whitelisted
56717+ for access by non-root users:
56718+ /sys/fs/selinux
56719+ /sys/fs/fuse
56720+ /sys/devices/system/cpu
56721+
56722+config GRKERNSEC_ROFS
56723+ bool "Runtime read-only mount protection"
56724+ help
56725+ If you say Y here, a sysctl option with name "romount_protect" will
56726+ be created. By setting this option to 1 at runtime, filesystems
56727+ will be protected in the following ways:
56728+ * No new writable mounts will be allowed
56729+ * Existing read-only mounts won't be able to be remounted read/write
56730+ * Write operations will be denied on all block devices
56731+ This option acts independently of grsec_lock: once it is set to 1,
56732+ it cannot be turned off. Therefore, please be mindful of the resulting
56733+ behavior if this option is enabled in an init script on a read-only
56734+ filesystem. This feature is mainly intended for secure embedded systems.
56735+
56736+config GRKERNSEC_DEVICE_SIDECHANNEL
56737+ bool "Eliminate stat/notify-based device sidechannels"
56738+ default y if GRKERNSEC_CONFIG_AUTO
56739+ help
56740+ If you say Y here, timing analyses on block or character
56741+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
56742+ will be thwarted for unprivileged users. If a process without
56743+ CAP_MKNOD stats such a device, the last access and last modify times
56744+ will match the device's create time. No access or modify events
56745+ will be triggered through inotify/dnotify/fanotify for such devices.
56746+ This feature will prevent attacks that may at a minimum
56747+ allow an attacker to determine the administrator's password length.
56748+
56749+config GRKERNSEC_CHROOT
56750+ bool "Chroot jail restrictions"
56751+ default y if GRKERNSEC_CONFIG_AUTO
56752+ help
56753+ If you say Y here, you will be able to choose several options that will
56754+ make breaking out of a chrooted jail much more difficult. If you
56755+ encounter no software incompatibilities with the following options, it
56756+ is recommended that you enable each one.
56757+
56758+config GRKERNSEC_CHROOT_MOUNT
56759+ bool "Deny mounts"
56760+ default y if GRKERNSEC_CONFIG_AUTO
56761+ depends on GRKERNSEC_CHROOT
56762+ help
56763+ If you say Y here, processes inside a chroot will not be able to
56764+ mount or remount filesystems. If the sysctl option is enabled, a
56765+ sysctl option with name "chroot_deny_mount" is created.
56766+
56767+config GRKERNSEC_CHROOT_DOUBLE
56768+ bool "Deny double-chroots"
56769+ default y if GRKERNSEC_CONFIG_AUTO
56770+ depends on GRKERNSEC_CHROOT
56771+ help
56772+ If you say Y here, processes inside a chroot will not be able to chroot
56773+ again outside the chroot. This is a widely used method of breaking
56774+ out of a chroot jail and should not be allowed. If the sysctl
56775+ option is enabled, a sysctl option with name
56776+ "chroot_deny_chroot" is created.
56777+
56778+config GRKERNSEC_CHROOT_PIVOT
56779+ bool "Deny pivot_root in chroot"
56780+ default y if GRKERNSEC_CONFIG_AUTO
56781+ depends on GRKERNSEC_CHROOT
56782+ help
56783+ If you say Y here, processes inside a chroot will not be able to use
56784+ a function called pivot_root() that was introduced in Linux 2.3.41. It
56785+ works similar to chroot in that it changes the root filesystem. This
56786+ function could be misused in a chrooted process to attempt to break out
56787+ of the chroot, and therefore should not be allowed. If the sysctl
56788+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
56789+ created.
56790+
56791+config GRKERNSEC_CHROOT_CHDIR
56792+ bool "Enforce chdir(\"/\") on all chroots"
56793+ default y if GRKERNSEC_CONFIG_AUTO
56794+ depends on GRKERNSEC_CHROOT
56795+ help
56796+ If you say Y here, the current working directory of all newly-chrooted
56797+ applications will be set to the the root directory of the chroot.
56798+ The man page on chroot(2) states:
56799+ Note that this call does not change the current working
56800+ directory, so that `.' can be outside the tree rooted at
56801+ `/'. In particular, the super-user can escape from a
56802+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56803+
56804+ It is recommended that you say Y here, since it's not known to break
56805+ any software. If the sysctl option is enabled, a sysctl option with
56806+ name "chroot_enforce_chdir" is created.
56807+
56808+config GRKERNSEC_CHROOT_CHMOD
56809+ bool "Deny (f)chmod +s"
56810+ default y if GRKERNSEC_CONFIG_AUTO
56811+ depends on GRKERNSEC_CHROOT
56812+ help
56813+ If you say Y here, processes inside a chroot will not be able to chmod
56814+ or fchmod files to make them have suid or sgid bits. This protects
56815+ against another published method of breaking a chroot. If the sysctl
56816+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
56817+ created.
56818+
56819+config GRKERNSEC_CHROOT_FCHDIR
56820+ bool "Deny fchdir out of chroot"
56821+ default y if GRKERNSEC_CONFIG_AUTO
56822+ depends on GRKERNSEC_CHROOT
56823+ help
56824+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
56825+ to a file descriptor of the chrooting process that points to a directory
56826+ outside the filesystem will be stopped. If the sysctl option
56827+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56828+
56829+config GRKERNSEC_CHROOT_MKNOD
56830+ bool "Deny mknod"
56831+ default y if GRKERNSEC_CONFIG_AUTO
56832+ depends on GRKERNSEC_CHROOT
56833+ help
56834+ If you say Y here, processes inside a chroot will not be allowed to
56835+ mknod. The problem with using mknod inside a chroot is that it
56836+ would allow an attacker to create a device entry that is the same
56837+ as one on the physical root of your system, which could range from
56838+ anything from the console device to a device for your harddrive (which
56839+ they could then use to wipe the drive or steal data). It is recommended
56840+ that you say Y here, unless you run into software incompatibilities.
56841+ If the sysctl option is enabled, a sysctl option with name
56842+ "chroot_deny_mknod" is created.
56843+
56844+config GRKERNSEC_CHROOT_SHMAT
56845+ bool "Deny shmat() out of chroot"
56846+ default y if GRKERNSEC_CONFIG_AUTO
56847+ depends on GRKERNSEC_CHROOT
56848+ help
56849+ If you say Y here, processes inside a chroot will not be able to attach
56850+ to shared memory segments that were created outside of the chroot jail.
56851+ It is recommended that you say Y here. If the sysctl option is enabled,
56852+ a sysctl option with name "chroot_deny_shmat" is created.
56853+
56854+config GRKERNSEC_CHROOT_UNIX
56855+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56856+ default y if GRKERNSEC_CONFIG_AUTO
56857+ depends on GRKERNSEC_CHROOT
56858+ help
56859+ If you say Y here, processes inside a chroot will not be able to
56860+ connect to abstract (meaning not belonging to a filesystem) Unix
56861+ domain sockets that were bound outside of a chroot. It is recommended
56862+ that you say Y here. If the sysctl option is enabled, a sysctl option
56863+ with name "chroot_deny_unix" is created.
56864+
56865+config GRKERNSEC_CHROOT_FINDTASK
56866+ bool "Protect outside processes"
56867+ default y if GRKERNSEC_CONFIG_AUTO
56868+ depends on GRKERNSEC_CHROOT
56869+ help
56870+ If you say Y here, processes inside a chroot will not be able to
56871+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56872+ getsid, or view any process outside of the chroot. If the sysctl
56873+ option is enabled, a sysctl option with name "chroot_findtask" is
56874+ created.
56875+
56876+config GRKERNSEC_CHROOT_NICE
56877+ bool "Restrict priority changes"
56878+ default y if GRKERNSEC_CONFIG_AUTO
56879+ depends on GRKERNSEC_CHROOT
56880+ help
56881+ If you say Y here, processes inside a chroot will not be able to raise
56882+ the priority of processes in the chroot, or alter the priority of
56883+ processes outside the chroot. This provides more security than simply
56884+ removing CAP_SYS_NICE from the process' capability set. If the
56885+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56886+ is created.
56887+
56888+config GRKERNSEC_CHROOT_SYSCTL
56889+ bool "Deny sysctl writes"
56890+ default y if GRKERNSEC_CONFIG_AUTO
56891+ depends on GRKERNSEC_CHROOT
56892+ help
56893+ If you say Y here, an attacker in a chroot will not be able to
56894+ write to sysctl entries, either by sysctl(2) or through a /proc
56895+ interface. It is strongly recommended that you say Y here. If the
56896+ sysctl option is enabled, a sysctl option with name
56897+ "chroot_deny_sysctl" is created.
56898+
56899+config GRKERNSEC_CHROOT_CAPS
56900+ bool "Capability restrictions"
56901+ default y if GRKERNSEC_CONFIG_AUTO
56902+ depends on GRKERNSEC_CHROOT
56903+ help
56904+ If you say Y here, the capabilities on all processes within a
56905+ chroot jail will be lowered to stop module insertion, raw i/o,
56906+ system and net admin tasks, rebooting the system, modifying immutable
56907+ files, modifying IPC owned by another, and changing the system time.
56908+ This is left an option because it can break some apps. Disable this
56909+ if your chrooted apps are having problems performing those kinds of
56910+ tasks. If the sysctl option is enabled, a sysctl option with
56911+ name "chroot_caps" is created.
56912+
56913+endmenu
56914+menu "Kernel Auditing"
56915+depends on GRKERNSEC
56916+
56917+config GRKERNSEC_AUDIT_GROUP
56918+ bool "Single group for auditing"
56919+ help
56920+ If you say Y here, the exec and chdir logging features will only operate
56921+ on a group you specify. This option is recommended if you only want to
56922+ watch certain users instead of having a large amount of logs from the
56923+ entire system. If the sysctl option is enabled, a sysctl option with
56924+ name "audit_group" is created.
56925+
56926+config GRKERNSEC_AUDIT_GID
56927+ int "GID for auditing"
56928+ depends on GRKERNSEC_AUDIT_GROUP
56929+ default 1007
56930+
56931+config GRKERNSEC_EXECLOG
56932+ bool "Exec logging"
56933+ help
56934+ If you say Y here, all execve() calls will be logged (since the
56935+ other exec*() calls are frontends to execve(), all execution
56936+ will be logged). Useful for shell-servers that like to keep track
56937+ of their users. If the sysctl option is enabled, a sysctl option with
56938+ name "exec_logging" is created.
56939+ WARNING: This option when enabled will produce a LOT of logs, especially
56940+ on an active system.
56941+
56942+config GRKERNSEC_RESLOG
56943+ bool "Resource logging"
56944+ default y if GRKERNSEC_CONFIG_AUTO
56945+ help
56946+ If you say Y here, all attempts to overstep resource limits will
56947+ be logged with the resource name, the requested size, and the current
56948+ limit. It is highly recommended that you say Y here. If the sysctl
56949+ option is enabled, a sysctl option with name "resource_logging" is
56950+ created. If the RBAC system is enabled, the sysctl value is ignored.
56951+
56952+config GRKERNSEC_CHROOT_EXECLOG
56953+ bool "Log execs within chroot"
56954+ help
56955+ If you say Y here, all executions inside a chroot jail will be logged
56956+ to syslog. This can cause a large amount of logs if certain
56957+ applications (eg. djb's daemontools) are installed on the system, and
56958+ is therefore left as an option. If the sysctl option is enabled, a
56959+ sysctl option with name "chroot_execlog" is created.
56960+
56961+config GRKERNSEC_AUDIT_PTRACE
56962+ bool "Ptrace logging"
56963+ help
56964+ If you say Y here, all attempts to attach to a process via ptrace
56965+ will be logged. If the sysctl option is enabled, a sysctl option
56966+ with name "audit_ptrace" is created.
56967+
56968+config GRKERNSEC_AUDIT_CHDIR
56969+ bool "Chdir logging"
56970+ help
56971+ If you say Y here, all chdir() calls will be logged. If the sysctl
56972+ option is enabled, a sysctl option with name "audit_chdir" is created.
56973+
56974+config GRKERNSEC_AUDIT_MOUNT
56975+ bool "(Un)Mount logging"
56976+ help
56977+ If you say Y here, all mounts and unmounts will be logged. If the
56978+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56979+ created.
56980+
56981+config GRKERNSEC_SIGNAL
56982+ bool "Signal logging"
56983+ default y if GRKERNSEC_CONFIG_AUTO
56984+ help
56985+ If you say Y here, certain important signals will be logged, such as
56986+ SIGSEGV, which will as a result inform you of when a error in a program
56987+ occurred, which in some cases could mean a possible exploit attempt.
56988+ If the sysctl option is enabled, a sysctl option with name
56989+ "signal_logging" is created.
56990+
56991+config GRKERNSEC_FORKFAIL
56992+ bool "Fork failure logging"
56993+ help
56994+ If you say Y here, all failed fork() attempts will be logged.
56995+ This could suggest a fork bomb, or someone attempting to overstep
56996+ their process limit. If the sysctl option is enabled, a sysctl option
56997+ with name "forkfail_logging" is created.
56998+
56999+config GRKERNSEC_TIME
57000+ bool "Time change logging"
57001+ default y if GRKERNSEC_CONFIG_AUTO
57002+ help
57003+ If you say Y here, any changes of the system clock will be logged.
57004+ If the sysctl option is enabled, a sysctl option with name
57005+ "timechange_logging" is created.
57006+
57007+config GRKERNSEC_PROC_IPADDR
57008+ bool "/proc/<pid>/ipaddr support"
57009+ default y if GRKERNSEC_CONFIG_AUTO
57010+ help
57011+ If you say Y here, a new entry will be added to each /proc/<pid>
57012+ directory that contains the IP address of the person using the task.
57013+ The IP is carried across local TCP and AF_UNIX stream sockets.
57014+ This information can be useful for IDS/IPSes to perform remote response
57015+ to a local attack. The entry is readable by only the owner of the
57016+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
57017+ the RBAC system), and thus does not create privacy concerns.
57018+
57019+config GRKERNSEC_RWXMAP_LOG
57020+ bool 'Denied RWX mmap/mprotect logging'
57021+ default y if GRKERNSEC_CONFIG_AUTO
57022+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
57023+ help
57024+ If you say Y here, calls to mmap() and mprotect() with explicit
57025+ usage of PROT_WRITE and PROT_EXEC together will be logged when
57026+ denied by the PAX_MPROTECT feature. If the sysctl option is
57027+ enabled, a sysctl option with name "rwxmap_logging" is created.
57028+
57029+config GRKERNSEC_AUDIT_TEXTREL
57030+ bool 'ELF text relocations logging (READ HELP)'
57031+ depends on PAX_MPROTECT
57032+ help
57033+ If you say Y here, text relocations will be logged with the filename
57034+ of the offending library or binary. The purpose of the feature is
57035+ to help Linux distribution developers get rid of libraries and
57036+ binaries that need text relocations which hinder the future progress
57037+ of PaX. Only Linux distribution developers should say Y here, and
57038+ never on a production machine, as this option creates an information
57039+ leak that could aid an attacker in defeating the randomization of
57040+ a single memory region. If the sysctl option is enabled, a sysctl
57041+ option with name "audit_textrel" is created.
57042+
57043+endmenu
57044+
57045+menu "Executable Protections"
57046+depends on GRKERNSEC
57047+
57048+config GRKERNSEC_DMESG
57049+ bool "Dmesg(8) restriction"
57050+ default y if GRKERNSEC_CONFIG_AUTO
57051+ help
57052+ If you say Y here, non-root users will not be able to use dmesg(8)
57053+ to view the contents of the kernel's circular log buffer.
57054+ The kernel's log buffer often contains kernel addresses and other
57055+ identifying information useful to an attacker in fingerprinting a
57056+ system for a targeted exploit.
57057+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
57058+ created.
57059+
57060+config GRKERNSEC_HARDEN_PTRACE
57061+ bool "Deter ptrace-based process snooping"
57062+ default y if GRKERNSEC_CONFIG_AUTO
57063+ help
57064+ If you say Y here, TTY sniffers and other malicious monitoring
57065+ programs implemented through ptrace will be defeated. If you
57066+ have been using the RBAC system, this option has already been
57067+ enabled for several years for all users, with the ability to make
57068+ fine-grained exceptions.
57069+
57070+ This option only affects the ability of non-root users to ptrace
57071+ processes that are not a descendent of the ptracing process.
57072+ This means that strace ./binary and gdb ./binary will still work,
57073+ but attaching to arbitrary processes will not. If the sysctl
57074+ option is enabled, a sysctl option with name "harden_ptrace" is
57075+ created.
57076+
57077+config GRKERNSEC_PTRACE_READEXEC
57078+ bool "Require read access to ptrace sensitive binaries"
57079+ default y if GRKERNSEC_CONFIG_AUTO
57080+ help
57081+ If you say Y here, unprivileged users will not be able to ptrace unreadable
57082+ binaries. This option is useful in environments that
57083+ remove the read bits (e.g. file mode 4711) from suid binaries to
57084+ prevent infoleaking of their contents. This option adds
57085+ consistency to the use of that file mode, as the binary could normally
57086+ be read out when run without privileges while ptracing.
57087+
57088+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
57089+ is created.
57090+
57091+config GRKERNSEC_SETXID
57092+ bool "Enforce consistent multithreaded privileges"
57093+ default y if GRKERNSEC_CONFIG_AUTO
57094+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
57095+ help
57096+ If you say Y here, a change from a root uid to a non-root uid
57097+ in a multithreaded application will cause the resulting uids,
57098+ gids, supplementary groups, and capabilities in that thread
57099+ to be propagated to the other threads of the process. In most
57100+ cases this is unnecessary, as glibc will emulate this behavior
57101+ on behalf of the application. Other libcs do not act in the
57102+ same way, allowing the other threads of the process to continue
57103+ running with root privileges. If the sysctl option is enabled,
57104+ a sysctl option with name "consistent_setxid" is created.
57105+
57106+config GRKERNSEC_TPE
57107+ bool "Trusted Path Execution (TPE)"
57108+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57109+ help
57110+ If you say Y here, you will be able to choose a gid to add to the
57111+ supplementary groups of users you want to mark as "untrusted."
57112+ These users will not be able to execute any files that are not in
57113+ root-owned directories writable only by root. If the sysctl option
57114+ is enabled, a sysctl option with name "tpe" is created.
57115+
57116+config GRKERNSEC_TPE_ALL
57117+ bool "Partially restrict all non-root users"
57118+ depends on GRKERNSEC_TPE
57119+ help
57120+ If you say Y here, all non-root users will be covered under
57121+ a weaker TPE restriction. This is separate from, and in addition to,
57122+ the main TPE options that you have selected elsewhere. Thus, if a
57123+ "trusted" GID is chosen, this restriction applies to even that GID.
57124+ Under this restriction, all non-root users will only be allowed to
57125+ execute files in directories they own that are not group or
57126+ world-writable, or in directories owned by root and writable only by
57127+ root. If the sysctl option is enabled, a sysctl option with name
57128+ "tpe_restrict_all" is created.
57129+
57130+config GRKERNSEC_TPE_INVERT
57131+ bool "Invert GID option"
57132+ depends on GRKERNSEC_TPE
57133+ help
57134+ If you say Y here, the group you specify in the TPE configuration will
57135+ decide what group TPE restrictions will be *disabled* for. This
57136+ option is useful if you want TPE restrictions to be applied to most
57137+ users on the system. If the sysctl option is enabled, a sysctl option
57138+ with name "tpe_invert" is created. Unlike other sysctl options, this
57139+ entry will default to on for backward-compatibility.
57140+
57141+config GRKERNSEC_TPE_GID
57142+ int
57143+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
57144+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
57145+
57146+config GRKERNSEC_TPE_UNTRUSTED_GID
57147+ int "GID for TPE-untrusted users"
57148+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
57149+ default 1005
57150+ help
57151+ Setting this GID determines what group TPE restrictions will be
57152+ *enabled* for. If the sysctl option is enabled, a sysctl option
57153+ with name "tpe_gid" is created.
57154+
57155+config GRKERNSEC_TPE_TRUSTED_GID
57156+ int "GID for TPE-trusted users"
57157+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
57158+ default 1005
57159+ help
57160+ Setting this GID determines what group TPE restrictions will be
57161+ *disabled* for. If the sysctl option is enabled, a sysctl option
57162+ with name "tpe_gid" is created.
57163+
57164+endmenu
57165+menu "Network Protections"
57166+depends on GRKERNSEC
57167+
57168+config GRKERNSEC_RANDNET
57169+ bool "Larger entropy pools"
57170+ default y if GRKERNSEC_CONFIG_AUTO
57171+ help
57172+ If you say Y here, the entropy pools used for many features of Linux
57173+ and grsecurity will be doubled in size. Since several grsecurity
57174+ features use additional randomness, it is recommended that you say Y
57175+ here. Saying Y here has a similar effect as modifying
57176+ /proc/sys/kernel/random/poolsize.
57177+
57178+config GRKERNSEC_BLACKHOLE
57179+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
57180+ default y if GRKERNSEC_CONFIG_AUTO
57181+ depends on NET
57182+ help
57183+ If you say Y here, neither TCP resets nor ICMP
57184+ destination-unreachable packets will be sent in response to packets
57185+ sent to ports for which no associated listening process exists.
57186+ This feature supports both IPV4 and IPV6 and exempts the
57187+ loopback interface from blackholing. Enabling this feature
57188+ makes a host more resilient to DoS attacks and reduces network
57189+ visibility against scanners.
57190+
57191+ The blackhole feature as-implemented is equivalent to the FreeBSD
57192+ blackhole feature, as it prevents RST responses to all packets, not
57193+ just SYNs. Under most application behavior this causes no
57194+ problems, but applications (like haproxy) may not close certain
57195+ connections in a way that cleanly terminates them on the remote
57196+ end, leaving the remote host in LAST_ACK state. Because of this
57197+ side-effect and to prevent intentional LAST_ACK DoSes, this
57198+ feature also adds automatic mitigation against such attacks.
57199+ The mitigation drastically reduces the amount of time a socket
57200+ can spend in LAST_ACK state. If you're using haproxy and not
57201+ all servers it connects to have this option enabled, consider
57202+ disabling this feature on the haproxy host.
57203+
57204+ If the sysctl option is enabled, two sysctl options with names
57205+ "ip_blackhole" and "lastack_retries" will be created.
57206+ While "ip_blackhole" takes the standard zero/non-zero on/off
57207+ toggle, "lastack_retries" uses the same kinds of values as
57208+ "tcp_retries1" and "tcp_retries2". The default value of 4
57209+ prevents a socket from lasting more than 45 seconds in LAST_ACK
57210+ state.
57211+
57212+config GRKERNSEC_NO_SIMULT_CONNECT
57213+ bool "Disable TCP Simultaneous Connect"
57214+ default y if GRKERNSEC_CONFIG_AUTO
57215+ depends on NET
57216+ help
57217+ If you say Y here, a feature by Willy Tarreau will be enabled that
57218+ removes a weakness in Linux's strict implementation of TCP that
57219+ allows two clients to connect to each other without either entering
57220+ a listening state. The weakness allows an attacker to easily prevent
57221+ a client from connecting to a known server provided the source port
57222+ for the connection is guessed correctly.
57223+
57224+ As the weakness could be used to prevent an antivirus or IPS from
57225+ fetching updates, or prevent an SSL gateway from fetching a CRL,
57226+ it should be eliminated by enabling this option. Though Linux is
57227+ one of few operating systems supporting simultaneous connect, it
57228+ has no legitimate use in practice and is rarely supported by firewalls.
57229+
57230+config GRKERNSEC_SOCKET
57231+ bool "Socket restrictions"
57232+ depends on NET
57233+ help
57234+ If you say Y here, you will be able to choose from several options.
57235+ If you assign a GID on your system and add it to the supplementary
57236+ groups of users you want to restrict socket access to, this patch
57237+ will perform up to three things, based on the option(s) you choose.
57238+
57239+config GRKERNSEC_SOCKET_ALL
57240+ bool "Deny any sockets to group"
57241+ depends on GRKERNSEC_SOCKET
57242+ help
57243+ If you say Y here, you will be able to choose a GID of whose users will
57244+ be unable to connect to other hosts from your machine or run server
57245+ applications from your machine. If the sysctl option is enabled, a
57246+ sysctl option with name "socket_all" is created.
57247+
57248+config GRKERNSEC_SOCKET_ALL_GID
57249+ int "GID to deny all sockets for"
57250+ depends on GRKERNSEC_SOCKET_ALL
57251+ default 1004
57252+ help
57253+ Here you can choose the GID to disable socket access for. Remember to
57254+ add the users you want socket access disabled for to the GID
57255+ specified here. If the sysctl option is enabled, a sysctl option
57256+ with name "socket_all_gid" is created.
57257+
57258+config GRKERNSEC_SOCKET_CLIENT
57259+ bool "Deny client sockets to group"
57260+ depends on GRKERNSEC_SOCKET
57261+ help
57262+ If you say Y here, you will be able to choose a GID of whose users will
57263+ be unable to connect to other hosts from your machine, but will be
57264+ able to run servers. If this option is enabled, all users in the group
57265+ you specify will have to use passive mode when initiating ftp transfers
57266+ from the shell on your machine. If the sysctl option is enabled, a
57267+ sysctl option with name "socket_client" is created.
57268+
57269+config GRKERNSEC_SOCKET_CLIENT_GID
57270+ int "GID to deny client sockets for"
57271+ depends on GRKERNSEC_SOCKET_CLIENT
57272+ default 1003
57273+ help
57274+ Here you can choose the GID to disable client socket access for.
57275+ Remember to add the users you want client socket access disabled for to
57276+ the GID specified here. If the sysctl option is enabled, a sysctl
57277+ option with name "socket_client_gid" is created.
57278+
57279+config GRKERNSEC_SOCKET_SERVER
57280+ bool "Deny server sockets to group"
57281+ depends on GRKERNSEC_SOCKET
57282+ help
57283+ If you say Y here, you will be able to choose a GID of whose users will
57284+ be unable to run server applications from your machine. If the sysctl
57285+ option is enabled, a sysctl option with name "socket_server" is created.
57286+
57287+config GRKERNSEC_SOCKET_SERVER_GID
57288+ int "GID to deny server sockets for"
57289+ depends on GRKERNSEC_SOCKET_SERVER
57290+ default 1002
57291+ help
57292+ Here you can choose the GID to disable server socket access for.
57293+ Remember to add the users you want server socket access disabled for to
57294+ the GID specified here. If the sysctl option is enabled, a sysctl
57295+ option with name "socket_server_gid" is created.
57296+
57297+endmenu
57298+menu "Sysctl Support"
57299+depends on GRKERNSEC && SYSCTL
57300+
57301+config GRKERNSEC_SYSCTL
57302+ bool "Sysctl support"
57303+ default y if GRKERNSEC_CONFIG_AUTO
57304+ help
57305+ If you say Y here, you will be able to change the options that
57306+ grsecurity runs with at bootup, without having to recompile your
57307+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
57308+ to enable (1) or disable (0) various features. All the sysctl entries
57309+ are mutable until the "grsec_lock" entry is set to a non-zero value.
57310+ All features enabled in the kernel configuration are disabled at boot
57311+ if you do not say Y to the "Turn on features by default" option.
57312+ All options should be set at startup, and the grsec_lock entry should
57313+ be set to a non-zero value after all the options are set.
57314+ *THIS IS EXTREMELY IMPORTANT*
57315+
57316+config GRKERNSEC_SYSCTL_DISTRO
57317+ bool "Extra sysctl support for distro makers (READ HELP)"
57318+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
57319+ help
57320+ If you say Y here, additional sysctl options will be created
57321+ for features that affect processes running as root. Therefore,
57322+ it is critical when using this option that the grsec_lock entry be
57323+ enabled after boot. Only distros with prebuilt kernel packages
57324+ with this option enabled that can ensure grsec_lock is enabled
57325+ after boot should use this option.
57326+ *Failure to set grsec_lock after boot makes all grsec features
57327+ this option covers useless*
57328+
57329+ Currently this option creates the following sysctl entries:
57330+ "Disable Privileged I/O": "disable_priv_io"
57331+
57332+config GRKERNSEC_SYSCTL_ON
57333+ bool "Turn on features by default"
57334+ default y if GRKERNSEC_CONFIG_AUTO
57335+ depends on GRKERNSEC_SYSCTL
57336+ help
57337+ If you say Y here, instead of having all features enabled in the
57338+ kernel configuration disabled at boot time, the features will be
57339+ enabled at boot time. It is recommended you say Y here unless
57340+ there is some reason you would want all sysctl-tunable features to
57341+ be disabled by default. As mentioned elsewhere, it is important
57342+ to enable the grsec_lock entry once you have finished modifying
57343+ the sysctl entries.
57344+
57345+endmenu
57346+menu "Logging Options"
57347+depends on GRKERNSEC
57348+
57349+config GRKERNSEC_FLOODTIME
57350+ int "Seconds in between log messages (minimum)"
57351+ default 10
57352+ help
57353+ This option allows you to enforce the number of seconds between
57354+ grsecurity log messages. The default should be suitable for most
57355+ people, however, if you choose to change it, choose a value small enough
57356+ to allow informative logs to be produced, but large enough to
57357+ prevent flooding.
57358+
57359+config GRKERNSEC_FLOODBURST
57360+ int "Number of messages in a burst (maximum)"
57361+ default 6
57362+ help
57363+ This option allows you to choose the maximum number of messages allowed
57364+ within the flood time interval you chose in a separate option. The
57365+ default should be suitable for most people, however if you find that
57366+ many of your logs are being interpreted as flooding, you may want to
57367+ raise this value.
57368+
57369+endmenu
57370diff --git a/grsecurity/Makefile b/grsecurity/Makefile
57371new file mode 100644
57372index 0000000..1b9afa9
57373--- /dev/null
57374+++ b/grsecurity/Makefile
57375@@ -0,0 +1,38 @@
57376+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
57377+# during 2001-2009 it has been completely redesigned by Brad Spengler
57378+# into an RBAC system
57379+#
57380+# All code in this directory and various hooks inserted throughout the kernel
57381+# are copyright Brad Spengler - Open Source Security, Inc., and released
57382+# under the GPL v2 or higher
57383+
57384+KBUILD_CFLAGS += -Werror
57385+
57386+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
57387+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
57388+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
57389+
57390+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
57391+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
57392+ gracl_learn.o grsec_log.o
57393+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
57394+
57395+ifdef CONFIG_NET
57396+obj-y += grsec_sock.o
57397+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
57398+endif
57399+
57400+ifndef CONFIG_GRKERNSEC
57401+obj-y += grsec_disabled.o
57402+endif
57403+
57404+ifdef CONFIG_GRKERNSEC_HIDESYM
57405+extra-y := grsec_hidesym.o
57406+$(obj)/grsec_hidesym.o:
57407+ @-chmod -f 500 /boot
57408+ @-chmod -f 500 /lib/modules
57409+ @-chmod -f 500 /lib64/modules
57410+ @-chmod -f 500 /lib32/modules
57411+ @-chmod -f 700 .
57412+ @echo ' grsec: protected kernel image paths'
57413+endif
57414diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
57415new file mode 100644
57416index 0000000..d0e7b38
57417--- /dev/null
57418+++ b/grsecurity/gracl.c
57419@@ -0,0 +1,4071 @@
57420+#include <linux/kernel.h>
57421+#include <linux/module.h>
57422+#include <linux/sched.h>
57423+#include <linux/mm.h>
57424+#include <linux/file.h>
57425+#include <linux/fs.h>
57426+#include <linux/namei.h>
57427+#include <linux/mount.h>
57428+#include <linux/tty.h>
57429+#include <linux/proc_fs.h>
57430+#include <linux/lglock.h>
57431+#include <linux/slab.h>
57432+#include <linux/vmalloc.h>
57433+#include <linux/types.h>
57434+#include <linux/sysctl.h>
57435+#include <linux/netdevice.h>
57436+#include <linux/ptrace.h>
57437+#include <linux/gracl.h>
57438+#include <linux/gralloc.h>
57439+#include <linux/security.h>
57440+#include <linux/grinternal.h>
57441+#include <linux/pid_namespace.h>
57442+#include <linux/stop_machine.h>
57443+#include <linux/fdtable.h>
57444+#include <linux/percpu.h>
57445+#include <linux/lglock.h>
57446+#include <linux/hugetlb.h>
57447+#include <linux/posix-timers.h>
57448+#include "../fs/mount.h"
57449+
57450+#include <asm/uaccess.h>
57451+#include <asm/errno.h>
57452+#include <asm/mman.h>
57453+
57454+extern struct lglock vfsmount_lock;
57455+
57456+static struct acl_role_db acl_role_set;
57457+static struct name_db name_set;
57458+static struct inodev_db inodev_set;
57459+
57460+/* for keeping track of userspace pointers used for subjects, so we
57461+ can share references in the kernel as well
57462+*/
57463+
57464+static struct path real_root;
57465+
57466+static struct acl_subj_map_db subj_map_set;
57467+
57468+static struct acl_role_label *default_role;
57469+
57470+static struct acl_role_label *role_list;
57471+
57472+static u16 acl_sp_role_value;
57473+
57474+extern char *gr_shared_page[4];
57475+static DEFINE_MUTEX(gr_dev_mutex);
57476+DEFINE_RWLOCK(gr_inode_lock);
57477+
57478+struct gr_arg *gr_usermode;
57479+
57480+static unsigned int gr_status __read_only = GR_STATUS_INIT;
57481+
57482+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
57483+extern void gr_clear_learn_entries(void);
57484+
57485+unsigned char *gr_system_salt;
57486+unsigned char *gr_system_sum;
57487+
57488+static struct sprole_pw **acl_special_roles = NULL;
57489+static __u16 num_sprole_pws = 0;
57490+
57491+static struct acl_role_label *kernel_role = NULL;
57492+
57493+static unsigned int gr_auth_attempts = 0;
57494+static unsigned long gr_auth_expires = 0UL;
57495+
57496+#ifdef CONFIG_NET
57497+extern struct vfsmount *sock_mnt;
57498+#endif
57499+
57500+extern struct vfsmount *pipe_mnt;
57501+extern struct vfsmount *shm_mnt;
57502+
57503+#ifdef CONFIG_HUGETLBFS
57504+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
57505+#endif
57506+
57507+static struct acl_object_label *fakefs_obj_rw;
57508+static struct acl_object_label *fakefs_obj_rwx;
57509+
57510+extern int gr_init_uidset(void);
57511+extern void gr_free_uidset(void);
57512+extern void gr_remove_uid(uid_t uid);
57513+extern int gr_find_uid(uid_t uid);
57514+
57515+__inline__ int
57516+gr_acl_is_enabled(void)
57517+{
57518+ return (gr_status & GR_READY);
57519+}
57520+
57521+#ifdef CONFIG_BTRFS_FS
57522+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
57523+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
57524+#endif
57525+
57526+static inline dev_t __get_dev(const struct dentry *dentry)
57527+{
57528+#ifdef CONFIG_BTRFS_FS
57529+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
57530+ return get_btrfs_dev_from_inode(dentry->d_inode);
57531+ else
57532+#endif
57533+ return dentry->d_inode->i_sb->s_dev;
57534+}
57535+
57536+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57537+{
57538+ return __get_dev(dentry);
57539+}
57540+
57541+static char gr_task_roletype_to_char(struct task_struct *task)
57542+{
57543+ switch (task->role->roletype &
57544+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
57545+ GR_ROLE_SPECIAL)) {
57546+ case GR_ROLE_DEFAULT:
57547+ return 'D';
57548+ case GR_ROLE_USER:
57549+ return 'U';
57550+ case GR_ROLE_GROUP:
57551+ return 'G';
57552+ case GR_ROLE_SPECIAL:
57553+ return 'S';
57554+ }
57555+
57556+ return 'X';
57557+}
57558+
57559+char gr_roletype_to_char(void)
57560+{
57561+ return gr_task_roletype_to_char(current);
57562+}
57563+
57564+__inline__ int
57565+gr_acl_tpe_check(void)
57566+{
57567+ if (unlikely(!(gr_status & GR_READY)))
57568+ return 0;
57569+ if (current->role->roletype & GR_ROLE_TPE)
57570+ return 1;
57571+ else
57572+ return 0;
57573+}
57574+
57575+int
57576+gr_handle_rawio(const struct inode *inode)
57577+{
57578+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57579+ if (inode && S_ISBLK(inode->i_mode) &&
57580+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57581+ !capable(CAP_SYS_RAWIO))
57582+ return 1;
57583+#endif
57584+ return 0;
57585+}
57586+
57587+static int
57588+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
57589+{
57590+ if (likely(lena != lenb))
57591+ return 0;
57592+
57593+ return !memcmp(a, b, lena);
57594+}
57595+
57596+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
57597+{
57598+ *buflen -= namelen;
57599+ if (*buflen < 0)
57600+ return -ENAMETOOLONG;
57601+ *buffer -= namelen;
57602+ memcpy(*buffer, str, namelen);
57603+ return 0;
57604+}
57605+
57606+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
57607+{
57608+ return prepend(buffer, buflen, name->name, name->len);
57609+}
57610+
57611+static int prepend_path(const struct path *path, struct path *root,
57612+ char **buffer, int *buflen)
57613+{
57614+ struct dentry *dentry = path->dentry;
57615+ struct vfsmount *vfsmnt = path->mnt;
57616+ struct mount *mnt = real_mount(vfsmnt);
57617+ bool slash = false;
57618+ int error = 0;
57619+
57620+ while (dentry != root->dentry || vfsmnt != root->mnt) {
57621+ struct dentry * parent;
57622+
57623+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
57624+ /* Global root? */
57625+ if (!mnt_has_parent(mnt)) {
57626+ goto out;
57627+ }
57628+ dentry = mnt->mnt_mountpoint;
57629+ mnt = mnt->mnt_parent;
57630+ vfsmnt = &mnt->mnt;
57631+ continue;
57632+ }
57633+ parent = dentry->d_parent;
57634+ prefetch(parent);
57635+ spin_lock(&dentry->d_lock);
57636+ error = prepend_name(buffer, buflen, &dentry->d_name);
57637+ spin_unlock(&dentry->d_lock);
57638+ if (!error)
57639+ error = prepend(buffer, buflen, "/", 1);
57640+ if (error)
57641+ break;
57642+
57643+ slash = true;
57644+ dentry = parent;
57645+ }
57646+
57647+out:
57648+ if (!error && !slash)
57649+ error = prepend(buffer, buflen, "/", 1);
57650+
57651+ return error;
57652+}
57653+
57654+/* this must be called with vfsmount_lock and rename_lock held */
57655+
57656+static char *__our_d_path(const struct path *path, struct path *root,
57657+ char *buf, int buflen)
57658+{
57659+ char *res = buf + buflen;
57660+ int error;
57661+
57662+ prepend(&res, &buflen, "\0", 1);
57663+ error = prepend_path(path, root, &res, &buflen);
57664+ if (error)
57665+ return ERR_PTR(error);
57666+
57667+ return res;
57668+}
57669+
57670+static char *
57671+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
57672+{
57673+ char *retval;
57674+
57675+ retval = __our_d_path(path, root, buf, buflen);
57676+ if (unlikely(IS_ERR(retval)))
57677+ retval = strcpy(buf, "<path too long>");
57678+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
57679+ retval[1] = '\0';
57680+
57681+ return retval;
57682+}
57683+
57684+static char *
57685+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
57686+ char *buf, int buflen)
57687+{
57688+ struct path path;
57689+ char *res;
57690+
57691+ path.dentry = (struct dentry *)dentry;
57692+ path.mnt = (struct vfsmount *)vfsmnt;
57693+
57694+ /* we can use real_root.dentry, real_root.mnt, because this is only called
57695+ by the RBAC system */
57696+ res = gen_full_path(&path, &real_root, buf, buflen);
57697+
57698+ return res;
57699+}
57700+
57701+static char *
57702+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
57703+ char *buf, int buflen)
57704+{
57705+ char *res;
57706+ struct path path;
57707+ struct path root;
57708+ struct task_struct *reaper = init_pid_ns.child_reaper;
57709+
57710+ path.dentry = (struct dentry *)dentry;
57711+ path.mnt = (struct vfsmount *)vfsmnt;
57712+
57713+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
57714+ get_fs_root(reaper->fs, &root);
57715+
57716+ br_read_lock(&vfsmount_lock);
57717+ write_seqlock(&rename_lock);
57718+ res = gen_full_path(&path, &root, buf, buflen);
57719+ write_sequnlock(&rename_lock);
57720+ br_read_unlock(&vfsmount_lock);
57721+
57722+ path_put(&root);
57723+ return res;
57724+}
57725+
57726+static char *
57727+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
57728+{
57729+ char *ret;
57730+ br_read_lock(&vfsmount_lock);
57731+ write_seqlock(&rename_lock);
57732+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
57733+ PAGE_SIZE);
57734+ write_sequnlock(&rename_lock);
57735+ br_read_unlock(&vfsmount_lock);
57736+ return ret;
57737+}
57738+
57739+static char *
57740+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
57741+{
57742+ char *ret;
57743+ char *buf;
57744+ int buflen;
57745+
57746+ br_read_lock(&vfsmount_lock);
57747+ write_seqlock(&rename_lock);
57748+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
57749+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
57750+ buflen = (int)(ret - buf);
57751+ if (buflen >= 5)
57752+ prepend(&ret, &buflen, "/proc", 5);
57753+ else
57754+ ret = strcpy(buf, "<path too long>");
57755+ write_sequnlock(&rename_lock);
57756+ br_read_unlock(&vfsmount_lock);
57757+ return ret;
57758+}
57759+
57760+char *
57761+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
57762+{
57763+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
57764+ PAGE_SIZE);
57765+}
57766+
57767+char *
57768+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
57769+{
57770+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
57771+ PAGE_SIZE);
57772+}
57773+
57774+char *
57775+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
57776+{
57777+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
57778+ PAGE_SIZE);
57779+}
57780+
57781+char *
57782+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
57783+{
57784+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
57785+ PAGE_SIZE);
57786+}
57787+
57788+char *
57789+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
57790+{
57791+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
57792+ PAGE_SIZE);
57793+}
57794+
57795+__inline__ __u32
57796+to_gr_audit(const __u32 reqmode)
57797+{
57798+ /* masks off auditable permission flags, then shifts them to create
57799+ auditing flags, and adds the special case of append auditing if
57800+ we're requesting write */
57801+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
57802+}
57803+
57804+struct acl_subject_label *
57805+lookup_subject_map(const struct acl_subject_label *userp)
57806+{
57807+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
57808+ struct subject_map *match;
57809+
57810+ match = subj_map_set.s_hash[index];
57811+
57812+ while (match && match->user != userp)
57813+ match = match->next;
57814+
57815+ if (match != NULL)
57816+ return match->kernel;
57817+ else
57818+ return NULL;
57819+}
57820+
57821+static void
57822+insert_subj_map_entry(struct subject_map *subjmap)
57823+{
57824+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
57825+ struct subject_map **curr;
57826+
57827+ subjmap->prev = NULL;
57828+
57829+ curr = &subj_map_set.s_hash[index];
57830+ if (*curr != NULL)
57831+ (*curr)->prev = subjmap;
57832+
57833+ subjmap->next = *curr;
57834+ *curr = subjmap;
57835+
57836+ return;
57837+}
57838+
57839+static struct acl_role_label *
57840+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
57841+ const gid_t gid)
57842+{
57843+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
57844+ struct acl_role_label *match;
57845+ struct role_allowed_ip *ipp;
57846+ unsigned int x;
57847+ u32 curr_ip = task->signal->curr_ip;
57848+
57849+ task->signal->saved_ip = curr_ip;
57850+
57851+ match = acl_role_set.r_hash[index];
57852+
57853+ while (match) {
57854+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
57855+ for (x = 0; x < match->domain_child_num; x++) {
57856+ if (match->domain_children[x] == uid)
57857+ goto found;
57858+ }
57859+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
57860+ break;
57861+ match = match->next;
57862+ }
57863+found:
57864+ if (match == NULL) {
57865+ try_group:
57866+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
57867+ match = acl_role_set.r_hash[index];
57868+
57869+ while (match) {
57870+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
57871+ for (x = 0; x < match->domain_child_num; x++) {
57872+ if (match->domain_children[x] == gid)
57873+ goto found2;
57874+ }
57875+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
57876+ break;
57877+ match = match->next;
57878+ }
57879+found2:
57880+ if (match == NULL)
57881+ match = default_role;
57882+ if (match->allowed_ips == NULL)
57883+ return match;
57884+ else {
57885+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57886+ if (likely
57887+ ((ntohl(curr_ip) & ipp->netmask) ==
57888+ (ntohl(ipp->addr) & ipp->netmask)))
57889+ return match;
57890+ }
57891+ match = default_role;
57892+ }
57893+ } else if (match->allowed_ips == NULL) {
57894+ return match;
57895+ } else {
57896+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57897+ if (likely
57898+ ((ntohl(curr_ip) & ipp->netmask) ==
57899+ (ntohl(ipp->addr) & ipp->netmask)))
57900+ return match;
57901+ }
57902+ goto try_group;
57903+ }
57904+
57905+ return match;
57906+}
57907+
57908+struct acl_subject_label *
57909+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
57910+ const struct acl_role_label *role)
57911+{
57912+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
57913+ struct acl_subject_label *match;
57914+
57915+ match = role->subj_hash[index];
57916+
57917+ while (match && (match->inode != ino || match->device != dev ||
57918+ (match->mode & GR_DELETED))) {
57919+ match = match->next;
57920+ }
57921+
57922+ if (match && !(match->mode & GR_DELETED))
57923+ return match;
57924+ else
57925+ return NULL;
57926+}
57927+
57928+struct acl_subject_label *
57929+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
57930+ const struct acl_role_label *role)
57931+{
57932+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
57933+ struct acl_subject_label *match;
57934+
57935+ match = role->subj_hash[index];
57936+
57937+ while (match && (match->inode != ino || match->device != dev ||
57938+ !(match->mode & GR_DELETED))) {
57939+ match = match->next;
57940+ }
57941+
57942+ if (match && (match->mode & GR_DELETED))
57943+ return match;
57944+ else
57945+ return NULL;
57946+}
57947+
57948+static struct acl_object_label *
57949+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
57950+ const struct acl_subject_label *subj)
57951+{
57952+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
57953+ struct acl_object_label *match;
57954+
57955+ match = subj->obj_hash[index];
57956+
57957+ while (match && (match->inode != ino || match->device != dev ||
57958+ (match->mode & GR_DELETED))) {
57959+ match = match->next;
57960+ }
57961+
57962+ if (match && !(match->mode & GR_DELETED))
57963+ return match;
57964+ else
57965+ return NULL;
57966+}
57967+
57968+static struct acl_object_label *
57969+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
57970+ const struct acl_subject_label *subj)
57971+{
57972+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
57973+ struct acl_object_label *match;
57974+
57975+ match = subj->obj_hash[index];
57976+
57977+ while (match && (match->inode != ino || match->device != dev ||
57978+ !(match->mode & GR_DELETED))) {
57979+ match = match->next;
57980+ }
57981+
57982+ if (match && (match->mode & GR_DELETED))
57983+ return match;
57984+
57985+ match = subj->obj_hash[index];
57986+
57987+ while (match && (match->inode != ino || match->device != dev ||
57988+ (match->mode & GR_DELETED))) {
57989+ match = match->next;
57990+ }
57991+
57992+ if (match && !(match->mode & GR_DELETED))
57993+ return match;
57994+ else
57995+ return NULL;
57996+}
57997+
57998+static struct name_entry *
57999+lookup_name_entry(const char *name)
58000+{
58001+ unsigned int len = strlen(name);
58002+ unsigned int key = full_name_hash(name, len);
58003+ unsigned int index = key % name_set.n_size;
58004+ struct name_entry *match;
58005+
58006+ match = name_set.n_hash[index];
58007+
58008+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
58009+ match = match->next;
58010+
58011+ return match;
58012+}
58013+
58014+static struct name_entry *
58015+lookup_name_entry_create(const char *name)
58016+{
58017+ unsigned int len = strlen(name);
58018+ unsigned int key = full_name_hash(name, len);
58019+ unsigned int index = key % name_set.n_size;
58020+ struct name_entry *match;
58021+
58022+ match = name_set.n_hash[index];
58023+
58024+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58025+ !match->deleted))
58026+ match = match->next;
58027+
58028+ if (match && match->deleted)
58029+ return match;
58030+
58031+ match = name_set.n_hash[index];
58032+
58033+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58034+ match->deleted))
58035+ match = match->next;
58036+
58037+ if (match && !match->deleted)
58038+ return match;
58039+ else
58040+ return NULL;
58041+}
58042+
58043+static struct inodev_entry *
58044+lookup_inodev_entry(const ino_t ino, const dev_t dev)
58045+{
58046+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
58047+ struct inodev_entry *match;
58048+
58049+ match = inodev_set.i_hash[index];
58050+
58051+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
58052+ match = match->next;
58053+
58054+ return match;
58055+}
58056+
58057+static void
58058+insert_inodev_entry(struct inodev_entry *entry)
58059+{
58060+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
58061+ inodev_set.i_size);
58062+ struct inodev_entry **curr;
58063+
58064+ entry->prev = NULL;
58065+
58066+ curr = &inodev_set.i_hash[index];
58067+ if (*curr != NULL)
58068+ (*curr)->prev = entry;
58069+
58070+ entry->next = *curr;
58071+ *curr = entry;
58072+
58073+ return;
58074+}
58075+
58076+static void
58077+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
58078+{
58079+ unsigned int index =
58080+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
58081+ struct acl_role_label **curr;
58082+ struct acl_role_label *tmp, *tmp2;
58083+
58084+ curr = &acl_role_set.r_hash[index];
58085+
58086+ /* simple case, slot is empty, just set it to our role */
58087+ if (*curr == NULL) {
58088+ *curr = role;
58089+ } else {
58090+ /* example:
58091+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
58092+ 2 -> 3
58093+ */
58094+ /* first check to see if we can already be reached via this slot */
58095+ tmp = *curr;
58096+ while (tmp && tmp != role)
58097+ tmp = tmp->next;
58098+ if (tmp == role) {
58099+ /* we don't need to add ourselves to this slot's chain */
58100+ return;
58101+ }
58102+ /* we need to add ourselves to this chain, two cases */
58103+ if (role->next == NULL) {
58104+ /* simple case, append the current chain to our role */
58105+ role->next = *curr;
58106+ *curr = role;
58107+ } else {
58108+ /* 1 -> 2 -> 3 -> 4
58109+ 2 -> 3 -> 4
58110+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
58111+ */
58112+ /* trickier case: walk our role's chain until we find
58113+ the role for the start of the current slot's chain */
58114+ tmp = role;
58115+ tmp2 = *curr;
58116+ while (tmp->next && tmp->next != tmp2)
58117+ tmp = tmp->next;
58118+ if (tmp->next == tmp2) {
58119+ /* from example above, we found 3, so just
58120+ replace this slot's chain with ours */
58121+ *curr = role;
58122+ } else {
58123+ /* we didn't find a subset of our role's chain
58124+ in the current slot's chain, so append their
58125+ chain to ours, and set us as the first role in
58126+ the slot's chain
58127+
58128+ we could fold this case with the case above,
58129+ but making it explicit for clarity
58130+ */
58131+ tmp->next = tmp2;
58132+ *curr = role;
58133+ }
58134+ }
58135+ }
58136+
58137+ return;
58138+}
58139+
58140+static void
58141+insert_acl_role_label(struct acl_role_label *role)
58142+{
58143+ int i;
58144+
58145+ if (role_list == NULL) {
58146+ role_list = role;
58147+ role->prev = NULL;
58148+ } else {
58149+ role->prev = role_list;
58150+ role_list = role;
58151+ }
58152+
58153+ /* used for hash chains */
58154+ role->next = NULL;
58155+
58156+ if (role->roletype & GR_ROLE_DOMAIN) {
58157+ for (i = 0; i < role->domain_child_num; i++)
58158+ __insert_acl_role_label(role, role->domain_children[i]);
58159+ } else
58160+ __insert_acl_role_label(role, role->uidgid);
58161+}
58162+
58163+static int
58164+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
58165+{
58166+ struct name_entry **curr, *nentry;
58167+ struct inodev_entry *ientry;
58168+ unsigned int len = strlen(name);
58169+ unsigned int key = full_name_hash(name, len);
58170+ unsigned int index = key % name_set.n_size;
58171+
58172+ curr = &name_set.n_hash[index];
58173+
58174+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
58175+ curr = &((*curr)->next);
58176+
58177+ if (*curr != NULL)
58178+ return 1;
58179+
58180+ nentry = acl_alloc(sizeof (struct name_entry));
58181+ if (nentry == NULL)
58182+ return 0;
58183+ ientry = acl_alloc(sizeof (struct inodev_entry));
58184+ if (ientry == NULL)
58185+ return 0;
58186+ ientry->nentry = nentry;
58187+
58188+ nentry->key = key;
58189+ nentry->name = name;
58190+ nentry->inode = inode;
58191+ nentry->device = device;
58192+ nentry->len = len;
58193+ nentry->deleted = deleted;
58194+
58195+ nentry->prev = NULL;
58196+ curr = &name_set.n_hash[index];
58197+ if (*curr != NULL)
58198+ (*curr)->prev = nentry;
58199+ nentry->next = *curr;
58200+ *curr = nentry;
58201+
58202+ /* insert us into the table searchable by inode/dev */
58203+ insert_inodev_entry(ientry);
58204+
58205+ return 1;
58206+}
58207+
58208+static void
58209+insert_acl_obj_label(struct acl_object_label *obj,
58210+ struct acl_subject_label *subj)
58211+{
58212+ unsigned int index =
58213+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
58214+ struct acl_object_label **curr;
58215+
58216+
58217+ obj->prev = NULL;
58218+
58219+ curr = &subj->obj_hash[index];
58220+ if (*curr != NULL)
58221+ (*curr)->prev = obj;
58222+
58223+ obj->next = *curr;
58224+ *curr = obj;
58225+
58226+ return;
58227+}
58228+
58229+static void
58230+insert_acl_subj_label(struct acl_subject_label *obj,
58231+ struct acl_role_label *role)
58232+{
58233+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
58234+ struct acl_subject_label **curr;
58235+
58236+ obj->prev = NULL;
58237+
58238+ curr = &role->subj_hash[index];
58239+ if (*curr != NULL)
58240+ (*curr)->prev = obj;
58241+
58242+ obj->next = *curr;
58243+ *curr = obj;
58244+
58245+ return;
58246+}
58247+
58248+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
58249+
58250+static void *
58251+create_table(__u32 * len, int elementsize)
58252+{
58253+ unsigned int table_sizes[] = {
58254+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
58255+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
58256+ 4194301, 8388593, 16777213, 33554393, 67108859
58257+ };
58258+ void *newtable = NULL;
58259+ unsigned int pwr = 0;
58260+
58261+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
58262+ table_sizes[pwr] <= *len)
58263+ pwr++;
58264+
58265+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
58266+ return newtable;
58267+
58268+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
58269+ newtable =
58270+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
58271+ else
58272+ newtable = vmalloc(table_sizes[pwr] * elementsize);
58273+
58274+ *len = table_sizes[pwr];
58275+
58276+ return newtable;
58277+}
58278+
58279+static int
58280+init_variables(const struct gr_arg *arg)
58281+{
58282+ struct task_struct *reaper = init_pid_ns.child_reaper;
58283+ unsigned int stacksize;
58284+
58285+ subj_map_set.s_size = arg->role_db.num_subjects;
58286+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
58287+ name_set.n_size = arg->role_db.num_objects;
58288+ inodev_set.i_size = arg->role_db.num_objects;
58289+
58290+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
58291+ !name_set.n_size || !inodev_set.i_size)
58292+ return 1;
58293+
58294+ if (!gr_init_uidset())
58295+ return 1;
58296+
58297+ /* set up the stack that holds allocation info */
58298+
58299+ stacksize = arg->role_db.num_pointers + 5;
58300+
58301+ if (!acl_alloc_stack_init(stacksize))
58302+ return 1;
58303+
58304+ /* grab reference for the real root dentry and vfsmount */
58305+ get_fs_root(reaper->fs, &real_root);
58306+
58307+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58308+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
58309+#endif
58310+
58311+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
58312+ if (fakefs_obj_rw == NULL)
58313+ return 1;
58314+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
58315+
58316+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
58317+ if (fakefs_obj_rwx == NULL)
58318+ return 1;
58319+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
58320+
58321+ subj_map_set.s_hash =
58322+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
58323+ acl_role_set.r_hash =
58324+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
58325+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
58326+ inodev_set.i_hash =
58327+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
58328+
58329+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
58330+ !name_set.n_hash || !inodev_set.i_hash)
58331+ return 1;
58332+
58333+ memset(subj_map_set.s_hash, 0,
58334+ sizeof(struct subject_map *) * subj_map_set.s_size);
58335+ memset(acl_role_set.r_hash, 0,
58336+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
58337+ memset(name_set.n_hash, 0,
58338+ sizeof (struct name_entry *) * name_set.n_size);
58339+ memset(inodev_set.i_hash, 0,
58340+ sizeof (struct inodev_entry *) * inodev_set.i_size);
58341+
58342+ return 0;
58343+}
58344+
58345+/* free information not needed after startup
58346+ currently contains user->kernel pointer mappings for subjects
58347+*/
58348+
58349+static void
58350+free_init_variables(void)
58351+{
58352+ __u32 i;
58353+
58354+ if (subj_map_set.s_hash) {
58355+ for (i = 0; i < subj_map_set.s_size; i++) {
58356+ if (subj_map_set.s_hash[i]) {
58357+ kfree(subj_map_set.s_hash[i]);
58358+ subj_map_set.s_hash[i] = NULL;
58359+ }
58360+ }
58361+
58362+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
58363+ PAGE_SIZE)
58364+ kfree(subj_map_set.s_hash);
58365+ else
58366+ vfree(subj_map_set.s_hash);
58367+ }
58368+
58369+ return;
58370+}
58371+
58372+static void
58373+free_variables(void)
58374+{
58375+ struct acl_subject_label *s;
58376+ struct acl_role_label *r;
58377+ struct task_struct *task, *task2;
58378+ unsigned int x;
58379+
58380+ gr_clear_learn_entries();
58381+
58382+ read_lock(&tasklist_lock);
58383+ do_each_thread(task2, task) {
58384+ task->acl_sp_role = 0;
58385+ task->acl_role_id = 0;
58386+ task->acl = NULL;
58387+ task->role = NULL;
58388+ } while_each_thread(task2, task);
58389+ read_unlock(&tasklist_lock);
58390+
58391+ /* release the reference to the real root dentry and vfsmount */
58392+ path_put(&real_root);
58393+ memset(&real_root, 0, sizeof(real_root));
58394+
58395+ /* free all object hash tables */
58396+
58397+ FOR_EACH_ROLE_START(r)
58398+ if (r->subj_hash == NULL)
58399+ goto next_role;
58400+ FOR_EACH_SUBJECT_START(r, s, x)
58401+ if (s->obj_hash == NULL)
58402+ break;
58403+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
58404+ kfree(s->obj_hash);
58405+ else
58406+ vfree(s->obj_hash);
58407+ FOR_EACH_SUBJECT_END(s, x)
58408+ FOR_EACH_NESTED_SUBJECT_START(r, s)
58409+ if (s->obj_hash == NULL)
58410+ break;
58411+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
58412+ kfree(s->obj_hash);
58413+ else
58414+ vfree(s->obj_hash);
58415+ FOR_EACH_NESTED_SUBJECT_END(s)
58416+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
58417+ kfree(r->subj_hash);
58418+ else
58419+ vfree(r->subj_hash);
58420+ r->subj_hash = NULL;
58421+next_role:
58422+ FOR_EACH_ROLE_END(r)
58423+
58424+ acl_free_all();
58425+
58426+ if (acl_role_set.r_hash) {
58427+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
58428+ PAGE_SIZE)
58429+ kfree(acl_role_set.r_hash);
58430+ else
58431+ vfree(acl_role_set.r_hash);
58432+ }
58433+ if (name_set.n_hash) {
58434+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
58435+ PAGE_SIZE)
58436+ kfree(name_set.n_hash);
58437+ else
58438+ vfree(name_set.n_hash);
58439+ }
58440+
58441+ if (inodev_set.i_hash) {
58442+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
58443+ PAGE_SIZE)
58444+ kfree(inodev_set.i_hash);
58445+ else
58446+ vfree(inodev_set.i_hash);
58447+ }
58448+
58449+ gr_free_uidset();
58450+
58451+ memset(&name_set, 0, sizeof (struct name_db));
58452+ memset(&inodev_set, 0, sizeof (struct inodev_db));
58453+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
58454+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
58455+
58456+ default_role = NULL;
58457+ kernel_role = NULL;
58458+ role_list = NULL;
58459+
58460+ return;
58461+}
58462+
58463+static __u32
58464+count_user_objs(struct acl_object_label *userp)
58465+{
58466+ struct acl_object_label o_tmp;
58467+ __u32 num = 0;
58468+
58469+ while (userp) {
58470+ if (copy_from_user(&o_tmp, userp,
58471+ sizeof (struct acl_object_label)))
58472+ break;
58473+
58474+ userp = o_tmp.prev;
58475+ num++;
58476+ }
58477+
58478+ return num;
58479+}
58480+
58481+static struct acl_subject_label *
58482+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
58483+
58484+static int
58485+copy_user_glob(struct acl_object_label *obj)
58486+{
58487+ struct acl_object_label *g_tmp, **guser;
58488+ unsigned int len;
58489+ char *tmp;
58490+
58491+ if (obj->globbed == NULL)
58492+ return 0;
58493+
58494+ guser = &obj->globbed;
58495+ while (*guser) {
58496+ g_tmp = (struct acl_object_label *)
58497+ acl_alloc(sizeof (struct acl_object_label));
58498+ if (g_tmp == NULL)
58499+ return -ENOMEM;
58500+
58501+ if (copy_from_user(g_tmp, *guser,
58502+ sizeof (struct acl_object_label)))
58503+ return -EFAULT;
58504+
58505+ len = strnlen_user(g_tmp->filename, PATH_MAX);
58506+
58507+ if (!len || len >= PATH_MAX)
58508+ return -EINVAL;
58509+
58510+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58511+ return -ENOMEM;
58512+
58513+ if (copy_from_user(tmp, g_tmp->filename, len))
58514+ return -EFAULT;
58515+ tmp[len-1] = '\0';
58516+ g_tmp->filename = tmp;
58517+
58518+ *guser = g_tmp;
58519+ guser = &(g_tmp->next);
58520+ }
58521+
58522+ return 0;
58523+}
58524+
58525+static int
58526+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
58527+ struct acl_role_label *role)
58528+{
58529+ struct acl_object_label *o_tmp;
58530+ unsigned int len;
58531+ int ret;
58532+ char *tmp;
58533+
58534+ while (userp) {
58535+ if ((o_tmp = (struct acl_object_label *)
58536+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
58537+ return -ENOMEM;
58538+
58539+ if (copy_from_user(o_tmp, userp,
58540+ sizeof (struct acl_object_label)))
58541+ return -EFAULT;
58542+
58543+ userp = o_tmp->prev;
58544+
58545+ len = strnlen_user(o_tmp->filename, PATH_MAX);
58546+
58547+ if (!len || len >= PATH_MAX)
58548+ return -EINVAL;
58549+
58550+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58551+ return -ENOMEM;
58552+
58553+ if (copy_from_user(tmp, o_tmp->filename, len))
58554+ return -EFAULT;
58555+ tmp[len-1] = '\0';
58556+ o_tmp->filename = tmp;
58557+
58558+ insert_acl_obj_label(o_tmp, subj);
58559+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
58560+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
58561+ return -ENOMEM;
58562+
58563+ ret = copy_user_glob(o_tmp);
58564+ if (ret)
58565+ return ret;
58566+
58567+ if (o_tmp->nested) {
58568+ int already_copied;
58569+
58570+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
58571+ if (IS_ERR(o_tmp->nested))
58572+ return PTR_ERR(o_tmp->nested);
58573+
58574+ /* insert into nested subject list if we haven't copied this one yet
58575+ to prevent duplicate entries */
58576+ if (!already_copied) {
58577+ o_tmp->nested->next = role->hash->first;
58578+ role->hash->first = o_tmp->nested;
58579+ }
58580+ }
58581+ }
58582+
58583+ return 0;
58584+}
58585+
58586+static __u32
58587+count_user_subjs(struct acl_subject_label *userp)
58588+{
58589+ struct acl_subject_label s_tmp;
58590+ __u32 num = 0;
58591+
58592+ while (userp) {
58593+ if (copy_from_user(&s_tmp, userp,
58594+ sizeof (struct acl_subject_label)))
58595+ break;
58596+
58597+ userp = s_tmp.prev;
58598+ }
58599+
58600+ return num;
58601+}
58602+
58603+static int
58604+copy_user_allowedips(struct acl_role_label *rolep)
58605+{
58606+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
58607+
58608+ ruserip = rolep->allowed_ips;
58609+
58610+ while (ruserip) {
58611+ rlast = rtmp;
58612+
58613+ if ((rtmp = (struct role_allowed_ip *)
58614+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
58615+ return -ENOMEM;
58616+
58617+ if (copy_from_user(rtmp, ruserip,
58618+ sizeof (struct role_allowed_ip)))
58619+ return -EFAULT;
58620+
58621+ ruserip = rtmp->prev;
58622+
58623+ if (!rlast) {
58624+ rtmp->prev = NULL;
58625+ rolep->allowed_ips = rtmp;
58626+ } else {
58627+ rlast->next = rtmp;
58628+ rtmp->prev = rlast;
58629+ }
58630+
58631+ if (!ruserip)
58632+ rtmp->next = NULL;
58633+ }
58634+
58635+ return 0;
58636+}
58637+
58638+static int
58639+copy_user_transitions(struct acl_role_label *rolep)
58640+{
58641+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
58642+
58643+ unsigned int len;
58644+ char *tmp;
58645+
58646+ rusertp = rolep->transitions;
58647+
58648+ while (rusertp) {
58649+ rlast = rtmp;
58650+
58651+ if ((rtmp = (struct role_transition *)
58652+ acl_alloc(sizeof (struct role_transition))) == NULL)
58653+ return -ENOMEM;
58654+
58655+ if (copy_from_user(rtmp, rusertp,
58656+ sizeof (struct role_transition)))
58657+ return -EFAULT;
58658+
58659+ rusertp = rtmp->prev;
58660+
58661+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
58662+
58663+ if (!len || len >= GR_SPROLE_LEN)
58664+ return -EINVAL;
58665+
58666+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58667+ return -ENOMEM;
58668+
58669+ if (copy_from_user(tmp, rtmp->rolename, len))
58670+ return -EFAULT;
58671+ tmp[len-1] = '\0';
58672+ rtmp->rolename = tmp;
58673+
58674+ if (!rlast) {
58675+ rtmp->prev = NULL;
58676+ rolep->transitions = rtmp;
58677+ } else {
58678+ rlast->next = rtmp;
58679+ rtmp->prev = rlast;
58680+ }
58681+
58682+ if (!rusertp)
58683+ rtmp->next = NULL;
58684+ }
58685+
58686+ return 0;
58687+}
58688+
58689+static struct acl_subject_label *
58690+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
58691+{
58692+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
58693+ unsigned int len;
58694+ char *tmp;
58695+ __u32 num_objs;
58696+ struct acl_ip_label **i_tmp, *i_utmp2;
58697+ struct gr_hash_struct ghash;
58698+ struct subject_map *subjmap;
58699+ unsigned int i_num;
58700+ int err;
58701+
58702+ if (already_copied != NULL)
58703+ *already_copied = 0;
58704+
58705+ s_tmp = lookup_subject_map(userp);
58706+
58707+ /* we've already copied this subject into the kernel, just return
58708+ the reference to it, and don't copy it over again
58709+ */
58710+ if (s_tmp) {
58711+ if (already_copied != NULL)
58712+ *already_copied = 1;
58713+ return(s_tmp);
58714+ }
58715+
58716+ if ((s_tmp = (struct acl_subject_label *)
58717+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
58718+ return ERR_PTR(-ENOMEM);
58719+
58720+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
58721+ if (subjmap == NULL)
58722+ return ERR_PTR(-ENOMEM);
58723+
58724+ subjmap->user = userp;
58725+ subjmap->kernel = s_tmp;
58726+ insert_subj_map_entry(subjmap);
58727+
58728+ if (copy_from_user(s_tmp, userp,
58729+ sizeof (struct acl_subject_label)))
58730+ return ERR_PTR(-EFAULT);
58731+
58732+ len = strnlen_user(s_tmp->filename, PATH_MAX);
58733+
58734+ if (!len || len >= PATH_MAX)
58735+ return ERR_PTR(-EINVAL);
58736+
58737+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58738+ return ERR_PTR(-ENOMEM);
58739+
58740+ if (copy_from_user(tmp, s_tmp->filename, len))
58741+ return ERR_PTR(-EFAULT);
58742+ tmp[len-1] = '\0';
58743+ s_tmp->filename = tmp;
58744+
58745+ if (!strcmp(s_tmp->filename, "/"))
58746+ role->root_label = s_tmp;
58747+
58748+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
58749+ return ERR_PTR(-EFAULT);
58750+
58751+ /* copy user and group transition tables */
58752+
58753+ if (s_tmp->user_trans_num) {
58754+ uid_t *uidlist;
58755+
58756+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
58757+ if (uidlist == NULL)
58758+ return ERR_PTR(-ENOMEM);
58759+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
58760+ return ERR_PTR(-EFAULT);
58761+
58762+ s_tmp->user_transitions = uidlist;
58763+ }
58764+
58765+ if (s_tmp->group_trans_num) {
58766+ gid_t *gidlist;
58767+
58768+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
58769+ if (gidlist == NULL)
58770+ return ERR_PTR(-ENOMEM);
58771+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
58772+ return ERR_PTR(-EFAULT);
58773+
58774+ s_tmp->group_transitions = gidlist;
58775+ }
58776+
58777+ /* set up object hash table */
58778+ num_objs = count_user_objs(ghash.first);
58779+
58780+ s_tmp->obj_hash_size = num_objs;
58781+ s_tmp->obj_hash =
58782+ (struct acl_object_label **)
58783+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
58784+
58785+ if (!s_tmp->obj_hash)
58786+ return ERR_PTR(-ENOMEM);
58787+
58788+ memset(s_tmp->obj_hash, 0,
58789+ s_tmp->obj_hash_size *
58790+ sizeof (struct acl_object_label *));
58791+
58792+ /* add in objects */
58793+ err = copy_user_objs(ghash.first, s_tmp, role);
58794+
58795+ if (err)
58796+ return ERR_PTR(err);
58797+
58798+ /* set pointer for parent subject */
58799+ if (s_tmp->parent_subject) {
58800+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
58801+
58802+ if (IS_ERR(s_tmp2))
58803+ return s_tmp2;
58804+
58805+ s_tmp->parent_subject = s_tmp2;
58806+ }
58807+
58808+ /* add in ip acls */
58809+
58810+ if (!s_tmp->ip_num) {
58811+ s_tmp->ips = NULL;
58812+ goto insert;
58813+ }
58814+
58815+ i_tmp =
58816+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
58817+ sizeof (struct acl_ip_label *));
58818+
58819+ if (!i_tmp)
58820+ return ERR_PTR(-ENOMEM);
58821+
58822+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
58823+ *(i_tmp + i_num) =
58824+ (struct acl_ip_label *)
58825+ acl_alloc(sizeof (struct acl_ip_label));
58826+ if (!*(i_tmp + i_num))
58827+ return ERR_PTR(-ENOMEM);
58828+
58829+ if (copy_from_user
58830+ (&i_utmp2, s_tmp->ips + i_num,
58831+ sizeof (struct acl_ip_label *)))
58832+ return ERR_PTR(-EFAULT);
58833+
58834+ if (copy_from_user
58835+ (*(i_tmp + i_num), i_utmp2,
58836+ sizeof (struct acl_ip_label)))
58837+ return ERR_PTR(-EFAULT);
58838+
58839+ if ((*(i_tmp + i_num))->iface == NULL)
58840+ continue;
58841+
58842+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
58843+ if (!len || len >= IFNAMSIZ)
58844+ return ERR_PTR(-EINVAL);
58845+ tmp = acl_alloc(len);
58846+ if (tmp == NULL)
58847+ return ERR_PTR(-ENOMEM);
58848+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
58849+ return ERR_PTR(-EFAULT);
58850+ (*(i_tmp + i_num))->iface = tmp;
58851+ }
58852+
58853+ s_tmp->ips = i_tmp;
58854+
58855+insert:
58856+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
58857+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
58858+ return ERR_PTR(-ENOMEM);
58859+
58860+ return s_tmp;
58861+}
58862+
58863+static int
58864+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
58865+{
58866+ struct acl_subject_label s_pre;
58867+ struct acl_subject_label * ret;
58868+ int err;
58869+
58870+ while (userp) {
58871+ if (copy_from_user(&s_pre, userp,
58872+ sizeof (struct acl_subject_label)))
58873+ return -EFAULT;
58874+
58875+ ret = do_copy_user_subj(userp, role, NULL);
58876+
58877+ err = PTR_ERR(ret);
58878+ if (IS_ERR(ret))
58879+ return err;
58880+
58881+ insert_acl_subj_label(ret, role);
58882+
58883+ userp = s_pre.prev;
58884+ }
58885+
58886+ return 0;
58887+}
58888+
58889+static int
58890+copy_user_acl(struct gr_arg *arg)
58891+{
58892+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
58893+ struct acl_subject_label *subj_list;
58894+ struct sprole_pw *sptmp;
58895+ struct gr_hash_struct *ghash;
58896+ uid_t *domainlist;
58897+ unsigned int r_num;
58898+ unsigned int len;
58899+ char *tmp;
58900+ int err = 0;
58901+ __u16 i;
58902+ __u32 num_subjs;
58903+
58904+ /* we need a default and kernel role */
58905+ if (arg->role_db.num_roles < 2)
58906+ return -EINVAL;
58907+
58908+ /* copy special role authentication info from userspace */
58909+
58910+ num_sprole_pws = arg->num_sprole_pws;
58911+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
58912+
58913+ if (!acl_special_roles && num_sprole_pws)
58914+ return -ENOMEM;
58915+
58916+ for (i = 0; i < num_sprole_pws; i++) {
58917+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
58918+ if (!sptmp)
58919+ return -ENOMEM;
58920+ if (copy_from_user(sptmp, arg->sprole_pws + i,
58921+ sizeof (struct sprole_pw)))
58922+ return -EFAULT;
58923+
58924+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
58925+
58926+ if (!len || len >= GR_SPROLE_LEN)
58927+ return -EINVAL;
58928+
58929+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58930+ return -ENOMEM;
58931+
58932+ if (copy_from_user(tmp, sptmp->rolename, len))
58933+ return -EFAULT;
58934+
58935+ tmp[len-1] = '\0';
58936+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58937+ printk(KERN_ALERT "Copying special role %s\n", tmp);
58938+#endif
58939+ sptmp->rolename = tmp;
58940+ acl_special_roles[i] = sptmp;
58941+ }
58942+
58943+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
58944+
58945+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
58946+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
58947+
58948+ if (!r_tmp)
58949+ return -ENOMEM;
58950+
58951+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
58952+ sizeof (struct acl_role_label *)))
58953+ return -EFAULT;
58954+
58955+ if (copy_from_user(r_tmp, r_utmp2,
58956+ sizeof (struct acl_role_label)))
58957+ return -EFAULT;
58958+
58959+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
58960+
58961+ if (!len || len >= PATH_MAX)
58962+ return -EINVAL;
58963+
58964+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58965+ return -ENOMEM;
58966+
58967+ if (copy_from_user(tmp, r_tmp->rolename, len))
58968+ return -EFAULT;
58969+
58970+ tmp[len-1] = '\0';
58971+ r_tmp->rolename = tmp;
58972+
58973+ if (!strcmp(r_tmp->rolename, "default")
58974+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
58975+ default_role = r_tmp;
58976+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
58977+ kernel_role = r_tmp;
58978+ }
58979+
58980+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
58981+ return -ENOMEM;
58982+
58983+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
58984+ return -EFAULT;
58985+
58986+ r_tmp->hash = ghash;
58987+
58988+ num_subjs = count_user_subjs(r_tmp->hash->first);
58989+
58990+ r_tmp->subj_hash_size = num_subjs;
58991+ r_tmp->subj_hash =
58992+ (struct acl_subject_label **)
58993+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
58994+
58995+ if (!r_tmp->subj_hash)
58996+ return -ENOMEM;
58997+
58998+ err = copy_user_allowedips(r_tmp);
58999+ if (err)
59000+ return err;
59001+
59002+ /* copy domain info */
59003+ if (r_tmp->domain_children != NULL) {
59004+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
59005+ if (domainlist == NULL)
59006+ return -ENOMEM;
59007+
59008+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
59009+ return -EFAULT;
59010+
59011+ r_tmp->domain_children = domainlist;
59012+ }
59013+
59014+ err = copy_user_transitions(r_tmp);
59015+ if (err)
59016+ return err;
59017+
59018+ memset(r_tmp->subj_hash, 0,
59019+ r_tmp->subj_hash_size *
59020+ sizeof (struct acl_subject_label *));
59021+
59022+ /* acquire the list of subjects, then NULL out
59023+ the list prior to parsing the subjects for this role,
59024+ as during this parsing the list is replaced with a list
59025+ of *nested* subjects for the role
59026+ */
59027+ subj_list = r_tmp->hash->first;
59028+
59029+ /* set nested subject list to null */
59030+ r_tmp->hash->first = NULL;
59031+
59032+ err = copy_user_subjs(subj_list, r_tmp);
59033+
59034+ if (err)
59035+ return err;
59036+
59037+ insert_acl_role_label(r_tmp);
59038+ }
59039+
59040+ if (default_role == NULL || kernel_role == NULL)
59041+ return -EINVAL;
59042+
59043+ return err;
59044+}
59045+
59046+static int
59047+gracl_init(struct gr_arg *args)
59048+{
59049+ int error = 0;
59050+
59051+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
59052+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
59053+
59054+ if (init_variables(args)) {
59055+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
59056+ error = -ENOMEM;
59057+ free_variables();
59058+ goto out;
59059+ }
59060+
59061+ error = copy_user_acl(args);
59062+ free_init_variables();
59063+ if (error) {
59064+ free_variables();
59065+ goto out;
59066+ }
59067+
59068+ if ((error = gr_set_acls(0))) {
59069+ free_variables();
59070+ goto out;
59071+ }
59072+
59073+ pax_open_kernel();
59074+ gr_status |= GR_READY;
59075+ pax_close_kernel();
59076+
59077+ out:
59078+ return error;
59079+}
59080+
59081+/* derived from glibc fnmatch() 0: match, 1: no match*/
59082+
59083+static int
59084+glob_match(const char *p, const char *n)
59085+{
59086+ char c;
59087+
59088+ while ((c = *p++) != '\0') {
59089+ switch (c) {
59090+ case '?':
59091+ if (*n == '\0')
59092+ return 1;
59093+ else if (*n == '/')
59094+ return 1;
59095+ break;
59096+ case '\\':
59097+ if (*n != c)
59098+ return 1;
59099+ break;
59100+ case '*':
59101+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
59102+ if (*n == '/')
59103+ return 1;
59104+ else if (c == '?') {
59105+ if (*n == '\0')
59106+ return 1;
59107+ else
59108+ ++n;
59109+ }
59110+ }
59111+ if (c == '\0') {
59112+ return 0;
59113+ } else {
59114+ const char *endp;
59115+
59116+ if ((endp = strchr(n, '/')) == NULL)
59117+ endp = n + strlen(n);
59118+
59119+ if (c == '[') {
59120+ for (--p; n < endp; ++n)
59121+ if (!glob_match(p, n))
59122+ return 0;
59123+ } else if (c == '/') {
59124+ while (*n != '\0' && *n != '/')
59125+ ++n;
59126+ if (*n == '/' && !glob_match(p, n + 1))
59127+ return 0;
59128+ } else {
59129+ for (--p; n < endp; ++n)
59130+ if (*n == c && !glob_match(p, n))
59131+ return 0;
59132+ }
59133+
59134+ return 1;
59135+ }
59136+ case '[':
59137+ {
59138+ int not;
59139+ char cold;
59140+
59141+ if (*n == '\0' || *n == '/')
59142+ return 1;
59143+
59144+ not = (*p == '!' || *p == '^');
59145+ if (not)
59146+ ++p;
59147+
59148+ c = *p++;
59149+ for (;;) {
59150+ unsigned char fn = (unsigned char)*n;
59151+
59152+ if (c == '\0')
59153+ return 1;
59154+ else {
59155+ if (c == fn)
59156+ goto matched;
59157+ cold = c;
59158+ c = *p++;
59159+
59160+ if (c == '-' && *p != ']') {
59161+ unsigned char cend = *p++;
59162+
59163+ if (cend == '\0')
59164+ return 1;
59165+
59166+ if (cold <= fn && fn <= cend)
59167+ goto matched;
59168+
59169+ c = *p++;
59170+ }
59171+ }
59172+
59173+ if (c == ']')
59174+ break;
59175+ }
59176+ if (!not)
59177+ return 1;
59178+ break;
59179+ matched:
59180+ while (c != ']') {
59181+ if (c == '\0')
59182+ return 1;
59183+
59184+ c = *p++;
59185+ }
59186+ if (not)
59187+ return 1;
59188+ }
59189+ break;
59190+ default:
59191+ if (c != *n)
59192+ return 1;
59193+ }
59194+
59195+ ++n;
59196+ }
59197+
59198+ if (*n == '\0')
59199+ return 0;
59200+
59201+ if (*n == '/')
59202+ return 0;
59203+
59204+ return 1;
59205+}
59206+
59207+static struct acl_object_label *
59208+chk_glob_label(struct acl_object_label *globbed,
59209+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
59210+{
59211+ struct acl_object_label *tmp;
59212+
59213+ if (*path == NULL)
59214+ *path = gr_to_filename_nolock(dentry, mnt);
59215+
59216+ tmp = globbed;
59217+
59218+ while (tmp) {
59219+ if (!glob_match(tmp->filename, *path))
59220+ return tmp;
59221+ tmp = tmp->next;
59222+ }
59223+
59224+ return NULL;
59225+}
59226+
59227+static struct acl_object_label *
59228+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
59229+ const ino_t curr_ino, const dev_t curr_dev,
59230+ const struct acl_subject_label *subj, char **path, const int checkglob)
59231+{
59232+ struct acl_subject_label *tmpsubj;
59233+ struct acl_object_label *retval;
59234+ struct acl_object_label *retval2;
59235+
59236+ tmpsubj = (struct acl_subject_label *) subj;
59237+ read_lock(&gr_inode_lock);
59238+ do {
59239+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
59240+ if (retval) {
59241+ if (checkglob && retval->globbed) {
59242+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
59243+ if (retval2)
59244+ retval = retval2;
59245+ }
59246+ break;
59247+ }
59248+ } while ((tmpsubj = tmpsubj->parent_subject));
59249+ read_unlock(&gr_inode_lock);
59250+
59251+ return retval;
59252+}
59253+
59254+static __inline__ struct acl_object_label *
59255+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
59256+ struct dentry *curr_dentry,
59257+ const struct acl_subject_label *subj, char **path, const int checkglob)
59258+{
59259+ int newglob = checkglob;
59260+ ino_t inode;
59261+ dev_t device;
59262+
59263+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
59264+ as we don't want a / * rule to match instead of the / object
59265+ don't do this for create lookups that call this function though, since they're looking up
59266+ on the parent and thus need globbing checks on all paths
59267+ */
59268+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
59269+ newglob = GR_NO_GLOB;
59270+
59271+ spin_lock(&curr_dentry->d_lock);
59272+ inode = curr_dentry->d_inode->i_ino;
59273+ device = __get_dev(curr_dentry);
59274+ spin_unlock(&curr_dentry->d_lock);
59275+
59276+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
59277+}
59278+
59279+#ifdef CONFIG_HUGETLBFS
59280+static inline bool
59281+is_hugetlbfs_mnt(const struct vfsmount *mnt)
59282+{
59283+ int i;
59284+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
59285+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
59286+ return true;
59287+ }
59288+
59289+ return false;
59290+}
59291+#endif
59292+
59293+static struct acl_object_label *
59294+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59295+ const struct acl_subject_label *subj, char *path, const int checkglob)
59296+{
59297+ struct dentry *dentry = (struct dentry *) l_dentry;
59298+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
59299+ struct mount *real_mnt = real_mount(mnt);
59300+ struct acl_object_label *retval;
59301+ struct dentry *parent;
59302+
59303+ br_read_lock(&vfsmount_lock);
59304+ write_seqlock(&rename_lock);
59305+
59306+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
59307+#ifdef CONFIG_NET
59308+ mnt == sock_mnt ||
59309+#endif
59310+#ifdef CONFIG_HUGETLBFS
59311+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
59312+#endif
59313+ /* ignore Eric Biederman */
59314+ IS_PRIVATE(l_dentry->d_inode))) {
59315+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
59316+ goto out;
59317+ }
59318+
59319+ for (;;) {
59320+ if (dentry == real_root.dentry && mnt == real_root.mnt)
59321+ break;
59322+
59323+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
59324+ if (!mnt_has_parent(real_mnt))
59325+ break;
59326+
59327+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59328+ if (retval != NULL)
59329+ goto out;
59330+
59331+ dentry = real_mnt->mnt_mountpoint;
59332+ real_mnt = real_mnt->mnt_parent;
59333+ mnt = &real_mnt->mnt;
59334+ continue;
59335+ }
59336+
59337+ parent = dentry->d_parent;
59338+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59339+ if (retval != NULL)
59340+ goto out;
59341+
59342+ dentry = parent;
59343+ }
59344+
59345+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59346+
59347+ /* real_root is pinned so we don't have to hold a reference */
59348+ if (retval == NULL)
59349+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
59350+out:
59351+ write_sequnlock(&rename_lock);
59352+ br_read_unlock(&vfsmount_lock);
59353+
59354+ BUG_ON(retval == NULL);
59355+
59356+ return retval;
59357+}
59358+
59359+static __inline__ struct acl_object_label *
59360+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59361+ const struct acl_subject_label *subj)
59362+{
59363+ char *path = NULL;
59364+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
59365+}
59366+
59367+static __inline__ struct acl_object_label *
59368+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59369+ const struct acl_subject_label *subj)
59370+{
59371+ char *path = NULL;
59372+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
59373+}
59374+
59375+static __inline__ struct acl_object_label *
59376+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59377+ const struct acl_subject_label *subj, char *path)
59378+{
59379+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
59380+}
59381+
59382+static struct acl_subject_label *
59383+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59384+ const struct acl_role_label *role)
59385+{
59386+ struct dentry *dentry = (struct dentry *) l_dentry;
59387+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
59388+ struct mount *real_mnt = real_mount(mnt);
59389+ struct acl_subject_label *retval;
59390+ struct dentry *parent;
59391+
59392+ br_read_lock(&vfsmount_lock);
59393+ write_seqlock(&rename_lock);
59394+
59395+ for (;;) {
59396+ if (dentry == real_root.dentry && mnt == real_root.mnt)
59397+ break;
59398+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
59399+ if (!mnt_has_parent(real_mnt))
59400+ break;
59401+
59402+ spin_lock(&dentry->d_lock);
59403+ read_lock(&gr_inode_lock);
59404+ retval =
59405+ lookup_acl_subj_label(dentry->d_inode->i_ino,
59406+ __get_dev(dentry), role);
59407+ read_unlock(&gr_inode_lock);
59408+ spin_unlock(&dentry->d_lock);
59409+ if (retval != NULL)
59410+ goto out;
59411+
59412+ dentry = real_mnt->mnt_mountpoint;
59413+ real_mnt = real_mnt->mnt_parent;
59414+ mnt = &real_mnt->mnt;
59415+ continue;
59416+ }
59417+
59418+ spin_lock(&dentry->d_lock);
59419+ read_lock(&gr_inode_lock);
59420+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
59421+ __get_dev(dentry), role);
59422+ read_unlock(&gr_inode_lock);
59423+ parent = dentry->d_parent;
59424+ spin_unlock(&dentry->d_lock);
59425+
59426+ if (retval != NULL)
59427+ goto out;
59428+
59429+ dentry = parent;
59430+ }
59431+
59432+ spin_lock(&dentry->d_lock);
59433+ read_lock(&gr_inode_lock);
59434+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
59435+ __get_dev(dentry), role);
59436+ read_unlock(&gr_inode_lock);
59437+ spin_unlock(&dentry->d_lock);
59438+
59439+ if (unlikely(retval == NULL)) {
59440+ /* real_root is pinned, we don't need to hold a reference */
59441+ read_lock(&gr_inode_lock);
59442+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
59443+ __get_dev(real_root.dentry), role);
59444+ read_unlock(&gr_inode_lock);
59445+ }
59446+out:
59447+ write_sequnlock(&rename_lock);
59448+ br_read_unlock(&vfsmount_lock);
59449+
59450+ BUG_ON(retval == NULL);
59451+
59452+ return retval;
59453+}
59454+
59455+static void
59456+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
59457+{
59458+ struct task_struct *task = current;
59459+ const struct cred *cred = current_cred();
59460+
59461+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
59462+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59463+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59464+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
59465+
59466+ return;
59467+}
59468+
59469+static void
59470+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
59471+{
59472+ struct task_struct *task = current;
59473+ const struct cred *cred = current_cred();
59474+
59475+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
59476+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59477+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59478+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
59479+
59480+ return;
59481+}
59482+
59483+static void
59484+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
59485+{
59486+ struct task_struct *task = current;
59487+ const struct cred *cred = current_cred();
59488+
59489+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
59490+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59491+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59492+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
59493+
59494+ return;
59495+}
59496+
59497+__u32
59498+gr_search_file(const struct dentry * dentry, const __u32 mode,
59499+ const struct vfsmount * mnt)
59500+{
59501+ __u32 retval = mode;
59502+ struct acl_subject_label *curracl;
59503+ struct acl_object_label *currobj;
59504+
59505+ if (unlikely(!(gr_status & GR_READY)))
59506+ return (mode & ~GR_AUDITS);
59507+
59508+ curracl = current->acl;
59509+
59510+ currobj = chk_obj_label(dentry, mnt, curracl);
59511+ retval = currobj->mode & mode;
59512+
59513+ /* if we're opening a specified transfer file for writing
59514+ (e.g. /dev/initctl), then transfer our role to init
59515+ */
59516+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
59517+ current->role->roletype & GR_ROLE_PERSIST)) {
59518+ struct task_struct *task = init_pid_ns.child_reaper;
59519+
59520+ if (task->role != current->role) {
59521+ task->acl_sp_role = 0;
59522+ task->acl_role_id = current->acl_role_id;
59523+ task->role = current->role;
59524+ rcu_read_lock();
59525+ read_lock(&grsec_exec_file_lock);
59526+ gr_apply_subject_to_task(task);
59527+ read_unlock(&grsec_exec_file_lock);
59528+ rcu_read_unlock();
59529+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
59530+ }
59531+ }
59532+
59533+ if (unlikely
59534+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
59535+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
59536+ __u32 new_mode = mode;
59537+
59538+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59539+
59540+ retval = new_mode;
59541+
59542+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
59543+ new_mode |= GR_INHERIT;
59544+
59545+ if (!(mode & GR_NOLEARN))
59546+ gr_log_learn(dentry, mnt, new_mode);
59547+ }
59548+
59549+ return retval;
59550+}
59551+
59552+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
59553+ const struct dentry *parent,
59554+ const struct vfsmount *mnt)
59555+{
59556+ struct name_entry *match;
59557+ struct acl_object_label *matchpo;
59558+ struct acl_subject_label *curracl;
59559+ char *path;
59560+
59561+ if (unlikely(!(gr_status & GR_READY)))
59562+ return NULL;
59563+
59564+ preempt_disable();
59565+ path = gr_to_filename_rbac(new_dentry, mnt);
59566+ match = lookup_name_entry_create(path);
59567+
59568+ curracl = current->acl;
59569+
59570+ if (match) {
59571+ read_lock(&gr_inode_lock);
59572+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
59573+ read_unlock(&gr_inode_lock);
59574+
59575+ if (matchpo) {
59576+ preempt_enable();
59577+ return matchpo;
59578+ }
59579+ }
59580+
59581+ // lookup parent
59582+
59583+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
59584+
59585+ preempt_enable();
59586+ return matchpo;
59587+}
59588+
59589+__u32
59590+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
59591+ const struct vfsmount * mnt, const __u32 mode)
59592+{
59593+ struct acl_object_label *matchpo;
59594+ __u32 retval;
59595+
59596+ if (unlikely(!(gr_status & GR_READY)))
59597+ return (mode & ~GR_AUDITS);
59598+
59599+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
59600+
59601+ retval = matchpo->mode & mode;
59602+
59603+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
59604+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59605+ __u32 new_mode = mode;
59606+
59607+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59608+
59609+ gr_log_learn(new_dentry, mnt, new_mode);
59610+ return new_mode;
59611+ }
59612+
59613+ return retval;
59614+}
59615+
59616+__u32
59617+gr_check_link(const struct dentry * new_dentry,
59618+ const struct dentry * parent_dentry,
59619+ const struct vfsmount * parent_mnt,
59620+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
59621+{
59622+ struct acl_object_label *obj;
59623+ __u32 oldmode, newmode;
59624+ __u32 needmode;
59625+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
59626+ GR_DELETE | GR_INHERIT;
59627+
59628+ if (unlikely(!(gr_status & GR_READY)))
59629+ return (GR_CREATE | GR_LINK);
59630+
59631+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
59632+ oldmode = obj->mode;
59633+
59634+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
59635+ newmode = obj->mode;
59636+
59637+ needmode = newmode & checkmodes;
59638+
59639+ // old name for hardlink must have at least the permissions of the new name
59640+ if ((oldmode & needmode) != needmode)
59641+ goto bad;
59642+
59643+ // if old name had restrictions/auditing, make sure the new name does as well
59644+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
59645+
59646+ // don't allow hardlinking of suid/sgid/fcapped files without permission
59647+ if (is_privileged_binary(old_dentry))
59648+ needmode |= GR_SETID;
59649+
59650+ if ((newmode & needmode) != needmode)
59651+ goto bad;
59652+
59653+ // enforce minimum permissions
59654+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
59655+ return newmode;
59656+bad:
59657+ needmode = oldmode;
59658+ if (is_privileged_binary(old_dentry))
59659+ needmode |= GR_SETID;
59660+
59661+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
59662+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
59663+ return (GR_CREATE | GR_LINK);
59664+ } else if (newmode & GR_SUPPRESS)
59665+ return GR_SUPPRESS;
59666+ else
59667+ return 0;
59668+}
59669+
59670+int
59671+gr_check_hidden_task(const struct task_struct *task)
59672+{
59673+ if (unlikely(!(gr_status & GR_READY)))
59674+ return 0;
59675+
59676+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
59677+ return 1;
59678+
59679+ return 0;
59680+}
59681+
59682+int
59683+gr_check_protected_task(const struct task_struct *task)
59684+{
59685+ if (unlikely(!(gr_status & GR_READY) || !task))
59686+ return 0;
59687+
59688+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
59689+ task->acl != current->acl)
59690+ return 1;
59691+
59692+ return 0;
59693+}
59694+
59695+int
59696+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
59697+{
59698+ struct task_struct *p;
59699+ int ret = 0;
59700+
59701+ if (unlikely(!(gr_status & GR_READY) || !pid))
59702+ return ret;
59703+
59704+ read_lock(&tasklist_lock);
59705+ do_each_pid_task(pid, type, p) {
59706+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
59707+ p->acl != current->acl) {
59708+ ret = 1;
59709+ goto out;
59710+ }
59711+ } while_each_pid_task(pid, type, p);
59712+out:
59713+ read_unlock(&tasklist_lock);
59714+
59715+ return ret;
59716+}
59717+
59718+void
59719+gr_copy_label(struct task_struct *tsk)
59720+{
59721+ tsk->signal->used_accept = 0;
59722+ tsk->acl_sp_role = 0;
59723+ tsk->acl_role_id = current->acl_role_id;
59724+ tsk->acl = current->acl;
59725+ tsk->role = current->role;
59726+ tsk->signal->curr_ip = current->signal->curr_ip;
59727+ tsk->signal->saved_ip = current->signal->saved_ip;
59728+ if (current->exec_file)
59729+ get_file(current->exec_file);
59730+ tsk->exec_file = current->exec_file;
59731+ tsk->is_writable = current->is_writable;
59732+ if (unlikely(current->signal->used_accept)) {
59733+ current->signal->curr_ip = 0;
59734+ current->signal->saved_ip = 0;
59735+ }
59736+
59737+ return;
59738+}
59739+
59740+static void
59741+gr_set_proc_res(struct task_struct *task)
59742+{
59743+ struct acl_subject_label *proc;
59744+ unsigned short i;
59745+
59746+ proc = task->acl;
59747+
59748+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
59749+ return;
59750+
59751+ for (i = 0; i < RLIM_NLIMITS; i++) {
59752+ if (!(proc->resmask & (1 << i)))
59753+ continue;
59754+
59755+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
59756+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
59757+
59758+ if (i == RLIMIT_CPU)
59759+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
59760+ }
59761+
59762+ return;
59763+}
59764+
59765+extern int __gr_process_user_ban(struct user_struct *user);
59766+
59767+int
59768+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
59769+{
59770+ unsigned int i;
59771+ __u16 num;
59772+ uid_t *uidlist;
59773+ uid_t curuid;
59774+ int realok = 0;
59775+ int effectiveok = 0;
59776+ int fsok = 0;
59777+ uid_t globalreal, globaleffective, globalfs;
59778+
59779+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59780+ struct user_struct *user;
59781+
59782+ if (!uid_valid(real))
59783+ goto skipit;
59784+
59785+ /* find user based on global namespace */
59786+
59787+ globalreal = GR_GLOBAL_UID(real);
59788+
59789+ user = find_user(make_kuid(&init_user_ns, globalreal));
59790+ if (user == NULL)
59791+ goto skipit;
59792+
59793+ if (__gr_process_user_ban(user)) {
59794+ /* for find_user */
59795+ free_uid(user);
59796+ return 1;
59797+ }
59798+
59799+ /* for find_user */
59800+ free_uid(user);
59801+
59802+skipit:
59803+#endif
59804+
59805+ if (unlikely(!(gr_status & GR_READY)))
59806+ return 0;
59807+
59808+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59809+ gr_log_learn_uid_change(real, effective, fs);
59810+
59811+ num = current->acl->user_trans_num;
59812+ uidlist = current->acl->user_transitions;
59813+
59814+ if (uidlist == NULL)
59815+ return 0;
59816+
59817+ if (!uid_valid(real)) {
59818+ realok = 1;
59819+ globalreal = (uid_t)-1;
59820+ } else {
59821+ globalreal = GR_GLOBAL_UID(real);
59822+ }
59823+ if (!uid_valid(effective)) {
59824+ effectiveok = 1;
59825+ globaleffective = (uid_t)-1;
59826+ } else {
59827+ globaleffective = GR_GLOBAL_UID(effective);
59828+ }
59829+ if (!uid_valid(fs)) {
59830+ fsok = 1;
59831+ globalfs = (uid_t)-1;
59832+ } else {
59833+ globalfs = GR_GLOBAL_UID(fs);
59834+ }
59835+
59836+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
59837+ for (i = 0; i < num; i++) {
59838+ curuid = uidlist[i];
59839+ if (globalreal == curuid)
59840+ realok = 1;
59841+ if (globaleffective == curuid)
59842+ effectiveok = 1;
59843+ if (globalfs == curuid)
59844+ fsok = 1;
59845+ }
59846+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
59847+ for (i = 0; i < num; i++) {
59848+ curuid = uidlist[i];
59849+ if (globalreal == curuid)
59850+ break;
59851+ if (globaleffective == curuid)
59852+ break;
59853+ if (globalfs == curuid)
59854+ break;
59855+ }
59856+ /* not in deny list */
59857+ if (i == num) {
59858+ realok = 1;
59859+ effectiveok = 1;
59860+ fsok = 1;
59861+ }
59862+ }
59863+
59864+ if (realok && effectiveok && fsok)
59865+ return 0;
59866+ else {
59867+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
59868+ return 1;
59869+ }
59870+}
59871+
59872+int
59873+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
59874+{
59875+ unsigned int i;
59876+ __u16 num;
59877+ gid_t *gidlist;
59878+ gid_t curgid;
59879+ int realok = 0;
59880+ int effectiveok = 0;
59881+ int fsok = 0;
59882+ gid_t globalreal, globaleffective, globalfs;
59883+
59884+ if (unlikely(!(gr_status & GR_READY)))
59885+ return 0;
59886+
59887+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59888+ gr_log_learn_gid_change(real, effective, fs);
59889+
59890+ num = current->acl->group_trans_num;
59891+ gidlist = current->acl->group_transitions;
59892+
59893+ if (gidlist == NULL)
59894+ return 0;
59895+
59896+ if (!gid_valid(real)) {
59897+ realok = 1;
59898+ globalreal = (gid_t)-1;
59899+ } else {
59900+ globalreal = GR_GLOBAL_GID(real);
59901+ }
59902+ if (!gid_valid(effective)) {
59903+ effectiveok = 1;
59904+ globaleffective = (gid_t)-1;
59905+ } else {
59906+ globaleffective = GR_GLOBAL_GID(effective);
59907+ }
59908+ if (!gid_valid(fs)) {
59909+ fsok = 1;
59910+ globalfs = (gid_t)-1;
59911+ } else {
59912+ globalfs = GR_GLOBAL_GID(fs);
59913+ }
59914+
59915+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
59916+ for (i = 0; i < num; i++) {
59917+ curgid = gidlist[i];
59918+ if (globalreal == curgid)
59919+ realok = 1;
59920+ if (globaleffective == curgid)
59921+ effectiveok = 1;
59922+ if (globalfs == curgid)
59923+ fsok = 1;
59924+ }
59925+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
59926+ for (i = 0; i < num; i++) {
59927+ curgid = gidlist[i];
59928+ if (globalreal == curgid)
59929+ break;
59930+ if (globaleffective == curgid)
59931+ break;
59932+ if (globalfs == curgid)
59933+ break;
59934+ }
59935+ /* not in deny list */
59936+ if (i == num) {
59937+ realok = 1;
59938+ effectiveok = 1;
59939+ fsok = 1;
59940+ }
59941+ }
59942+
59943+ if (realok && effectiveok && fsok)
59944+ return 0;
59945+ else {
59946+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
59947+ return 1;
59948+ }
59949+}
59950+
59951+extern int gr_acl_is_capable(const int cap);
59952+
59953+void
59954+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
59955+{
59956+ struct acl_role_label *role = task->role;
59957+ struct acl_subject_label *subj = NULL;
59958+ struct acl_object_label *obj;
59959+ struct file *filp;
59960+ uid_t uid;
59961+ gid_t gid;
59962+
59963+ if (unlikely(!(gr_status & GR_READY)))
59964+ return;
59965+
59966+ uid = GR_GLOBAL_UID(kuid);
59967+ gid = GR_GLOBAL_GID(kgid);
59968+
59969+ filp = task->exec_file;
59970+
59971+ /* kernel process, we'll give them the kernel role */
59972+ if (unlikely(!filp)) {
59973+ task->role = kernel_role;
59974+ task->acl = kernel_role->root_label;
59975+ return;
59976+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
59977+ role = lookup_acl_role_label(task, uid, gid);
59978+
59979+ /* don't change the role if we're not a privileged process */
59980+ if (role && task->role != role &&
59981+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
59982+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
59983+ return;
59984+
59985+ /* perform subject lookup in possibly new role
59986+ we can use this result below in the case where role == task->role
59987+ */
59988+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
59989+
59990+ /* if we changed uid/gid, but result in the same role
59991+ and are using inheritance, don't lose the inherited subject
59992+ if current subject is other than what normal lookup
59993+ would result in, we arrived via inheritance, don't
59994+ lose subject
59995+ */
59996+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
59997+ (subj == task->acl)))
59998+ task->acl = subj;
59999+
60000+ task->role = role;
60001+
60002+ task->is_writable = 0;
60003+
60004+ /* ignore additional mmap checks for processes that are writable
60005+ by the default ACL */
60006+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60007+ if (unlikely(obj->mode & GR_WRITE))
60008+ task->is_writable = 1;
60009+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60010+ if (unlikely(obj->mode & GR_WRITE))
60011+ task->is_writable = 1;
60012+
60013+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60014+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60015+#endif
60016+
60017+ gr_set_proc_res(task);
60018+
60019+ return;
60020+}
60021+
60022+int
60023+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
60024+ const int unsafe_flags)
60025+{
60026+ struct task_struct *task = current;
60027+ struct acl_subject_label *newacl;
60028+ struct acl_object_label *obj;
60029+ __u32 retmode;
60030+
60031+ if (unlikely(!(gr_status & GR_READY)))
60032+ return 0;
60033+
60034+ newacl = chk_subj_label(dentry, mnt, task->role);
60035+
60036+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
60037+ did an exec
60038+ */
60039+ rcu_read_lock();
60040+ read_lock(&tasklist_lock);
60041+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
60042+ (task->parent->acl->mode & GR_POVERRIDE))) {
60043+ read_unlock(&tasklist_lock);
60044+ rcu_read_unlock();
60045+ goto skip_check;
60046+ }
60047+ read_unlock(&tasklist_lock);
60048+ rcu_read_unlock();
60049+
60050+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
60051+ !(task->role->roletype & GR_ROLE_GOD) &&
60052+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
60053+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
60054+ if (unsafe_flags & LSM_UNSAFE_SHARE)
60055+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
60056+ else
60057+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
60058+ return -EACCES;
60059+ }
60060+
60061+skip_check:
60062+
60063+ obj = chk_obj_label(dentry, mnt, task->acl);
60064+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
60065+
60066+ if (!(task->acl->mode & GR_INHERITLEARN) &&
60067+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
60068+ if (obj->nested)
60069+ task->acl = obj->nested;
60070+ else
60071+ task->acl = newacl;
60072+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
60073+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
60074+
60075+ task->is_writable = 0;
60076+
60077+ /* ignore additional mmap checks for processes that are writable
60078+ by the default ACL */
60079+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
60080+ if (unlikely(obj->mode & GR_WRITE))
60081+ task->is_writable = 1;
60082+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
60083+ if (unlikely(obj->mode & GR_WRITE))
60084+ task->is_writable = 1;
60085+
60086+ gr_set_proc_res(task);
60087+
60088+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60089+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60090+#endif
60091+ return 0;
60092+}
60093+
60094+/* always called with valid inodev ptr */
60095+static void
60096+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
60097+{
60098+ struct acl_object_label *matchpo;
60099+ struct acl_subject_label *matchps;
60100+ struct acl_subject_label *subj;
60101+ struct acl_role_label *role;
60102+ unsigned int x;
60103+
60104+ FOR_EACH_ROLE_START(role)
60105+ FOR_EACH_SUBJECT_START(role, subj, x)
60106+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60107+ matchpo->mode |= GR_DELETED;
60108+ FOR_EACH_SUBJECT_END(subj,x)
60109+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
60110+ /* nested subjects aren't in the role's subj_hash table */
60111+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60112+ matchpo->mode |= GR_DELETED;
60113+ FOR_EACH_NESTED_SUBJECT_END(subj)
60114+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
60115+ matchps->mode |= GR_DELETED;
60116+ FOR_EACH_ROLE_END(role)
60117+
60118+ inodev->nentry->deleted = 1;
60119+
60120+ return;
60121+}
60122+
60123+void
60124+gr_handle_delete(const ino_t ino, const dev_t dev)
60125+{
60126+ struct inodev_entry *inodev;
60127+
60128+ if (unlikely(!(gr_status & GR_READY)))
60129+ return;
60130+
60131+ write_lock(&gr_inode_lock);
60132+ inodev = lookup_inodev_entry(ino, dev);
60133+ if (inodev != NULL)
60134+ do_handle_delete(inodev, ino, dev);
60135+ write_unlock(&gr_inode_lock);
60136+
60137+ return;
60138+}
60139+
60140+static void
60141+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
60142+ const ino_t newinode, const dev_t newdevice,
60143+ struct acl_subject_label *subj)
60144+{
60145+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
60146+ struct acl_object_label *match;
60147+
60148+ match = subj->obj_hash[index];
60149+
60150+ while (match && (match->inode != oldinode ||
60151+ match->device != olddevice ||
60152+ !(match->mode & GR_DELETED)))
60153+ match = match->next;
60154+
60155+ if (match && (match->inode == oldinode)
60156+ && (match->device == olddevice)
60157+ && (match->mode & GR_DELETED)) {
60158+ if (match->prev == NULL) {
60159+ subj->obj_hash[index] = match->next;
60160+ if (match->next != NULL)
60161+ match->next->prev = NULL;
60162+ } else {
60163+ match->prev->next = match->next;
60164+ if (match->next != NULL)
60165+ match->next->prev = match->prev;
60166+ }
60167+ match->prev = NULL;
60168+ match->next = NULL;
60169+ match->inode = newinode;
60170+ match->device = newdevice;
60171+ match->mode &= ~GR_DELETED;
60172+
60173+ insert_acl_obj_label(match, subj);
60174+ }
60175+
60176+ return;
60177+}
60178+
60179+static void
60180+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
60181+ const ino_t newinode, const dev_t newdevice,
60182+ struct acl_role_label *role)
60183+{
60184+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
60185+ struct acl_subject_label *match;
60186+
60187+ match = role->subj_hash[index];
60188+
60189+ while (match && (match->inode != oldinode ||
60190+ match->device != olddevice ||
60191+ !(match->mode & GR_DELETED)))
60192+ match = match->next;
60193+
60194+ if (match && (match->inode == oldinode)
60195+ && (match->device == olddevice)
60196+ && (match->mode & GR_DELETED)) {
60197+ if (match->prev == NULL) {
60198+ role->subj_hash[index] = match->next;
60199+ if (match->next != NULL)
60200+ match->next->prev = NULL;
60201+ } else {
60202+ match->prev->next = match->next;
60203+ if (match->next != NULL)
60204+ match->next->prev = match->prev;
60205+ }
60206+ match->prev = NULL;
60207+ match->next = NULL;
60208+ match->inode = newinode;
60209+ match->device = newdevice;
60210+ match->mode &= ~GR_DELETED;
60211+
60212+ insert_acl_subj_label(match, role);
60213+ }
60214+
60215+ return;
60216+}
60217+
60218+static void
60219+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
60220+ const ino_t newinode, const dev_t newdevice)
60221+{
60222+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
60223+ struct inodev_entry *match;
60224+
60225+ match = inodev_set.i_hash[index];
60226+
60227+ while (match && (match->nentry->inode != oldinode ||
60228+ match->nentry->device != olddevice || !match->nentry->deleted))
60229+ match = match->next;
60230+
60231+ if (match && (match->nentry->inode == oldinode)
60232+ && (match->nentry->device == olddevice) &&
60233+ match->nentry->deleted) {
60234+ if (match->prev == NULL) {
60235+ inodev_set.i_hash[index] = match->next;
60236+ if (match->next != NULL)
60237+ match->next->prev = NULL;
60238+ } else {
60239+ match->prev->next = match->next;
60240+ if (match->next != NULL)
60241+ match->next->prev = match->prev;
60242+ }
60243+ match->prev = NULL;
60244+ match->next = NULL;
60245+ match->nentry->inode = newinode;
60246+ match->nentry->device = newdevice;
60247+ match->nentry->deleted = 0;
60248+
60249+ insert_inodev_entry(match);
60250+ }
60251+
60252+ return;
60253+}
60254+
60255+static void
60256+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
60257+{
60258+ struct acl_subject_label *subj;
60259+ struct acl_role_label *role;
60260+ unsigned int x;
60261+
60262+ FOR_EACH_ROLE_START(role)
60263+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
60264+
60265+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
60266+ if ((subj->inode == ino) && (subj->device == dev)) {
60267+ subj->inode = ino;
60268+ subj->device = dev;
60269+ }
60270+ /* nested subjects aren't in the role's subj_hash table */
60271+ update_acl_obj_label(matchn->inode, matchn->device,
60272+ ino, dev, subj);
60273+ FOR_EACH_NESTED_SUBJECT_END(subj)
60274+ FOR_EACH_SUBJECT_START(role, subj, x)
60275+ update_acl_obj_label(matchn->inode, matchn->device,
60276+ ino, dev, subj);
60277+ FOR_EACH_SUBJECT_END(subj,x)
60278+ FOR_EACH_ROLE_END(role)
60279+
60280+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
60281+
60282+ return;
60283+}
60284+
60285+static void
60286+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
60287+ const struct vfsmount *mnt)
60288+{
60289+ ino_t ino = dentry->d_inode->i_ino;
60290+ dev_t dev = __get_dev(dentry);
60291+
60292+ __do_handle_create(matchn, ino, dev);
60293+
60294+ return;
60295+}
60296+
60297+void
60298+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
60299+{
60300+ struct name_entry *matchn;
60301+
60302+ if (unlikely(!(gr_status & GR_READY)))
60303+ return;
60304+
60305+ preempt_disable();
60306+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
60307+
60308+ if (unlikely((unsigned long)matchn)) {
60309+ write_lock(&gr_inode_lock);
60310+ do_handle_create(matchn, dentry, mnt);
60311+ write_unlock(&gr_inode_lock);
60312+ }
60313+ preempt_enable();
60314+
60315+ return;
60316+}
60317+
60318+void
60319+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
60320+{
60321+ struct name_entry *matchn;
60322+
60323+ if (unlikely(!(gr_status & GR_READY)))
60324+ return;
60325+
60326+ preempt_disable();
60327+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
60328+
60329+ if (unlikely((unsigned long)matchn)) {
60330+ write_lock(&gr_inode_lock);
60331+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
60332+ write_unlock(&gr_inode_lock);
60333+ }
60334+ preempt_enable();
60335+
60336+ return;
60337+}
60338+
60339+void
60340+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
60341+ struct dentry *old_dentry,
60342+ struct dentry *new_dentry,
60343+ struct vfsmount *mnt, const __u8 replace)
60344+{
60345+ struct name_entry *matchn;
60346+ struct inodev_entry *inodev;
60347+ struct inode *inode = new_dentry->d_inode;
60348+ ino_t old_ino = old_dentry->d_inode->i_ino;
60349+ dev_t old_dev = __get_dev(old_dentry);
60350+
60351+ /* vfs_rename swaps the name and parent link for old_dentry and
60352+ new_dentry
60353+ at this point, old_dentry has the new name, parent link, and inode
60354+ for the renamed file
60355+ if a file is being replaced by a rename, new_dentry has the inode
60356+ and name for the replaced file
60357+ */
60358+
60359+ if (unlikely(!(gr_status & GR_READY)))
60360+ return;
60361+
60362+ preempt_disable();
60363+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
60364+
60365+ /* we wouldn't have to check d_inode if it weren't for
60366+ NFS silly-renaming
60367+ */
60368+
60369+ write_lock(&gr_inode_lock);
60370+ if (unlikely(replace && inode)) {
60371+ ino_t new_ino = inode->i_ino;
60372+ dev_t new_dev = __get_dev(new_dentry);
60373+
60374+ inodev = lookup_inodev_entry(new_ino, new_dev);
60375+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
60376+ do_handle_delete(inodev, new_ino, new_dev);
60377+ }
60378+
60379+ inodev = lookup_inodev_entry(old_ino, old_dev);
60380+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
60381+ do_handle_delete(inodev, old_ino, old_dev);
60382+
60383+ if (unlikely((unsigned long)matchn))
60384+ do_handle_create(matchn, old_dentry, mnt);
60385+
60386+ write_unlock(&gr_inode_lock);
60387+ preempt_enable();
60388+
60389+ return;
60390+}
60391+
60392+static int
60393+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
60394+ unsigned char **sum)
60395+{
60396+ struct acl_role_label *r;
60397+ struct role_allowed_ip *ipp;
60398+ struct role_transition *trans;
60399+ unsigned int i;
60400+ int found = 0;
60401+ u32 curr_ip = current->signal->curr_ip;
60402+
60403+ current->signal->saved_ip = curr_ip;
60404+
60405+ /* check transition table */
60406+
60407+ for (trans = current->role->transitions; trans; trans = trans->next) {
60408+ if (!strcmp(rolename, trans->rolename)) {
60409+ found = 1;
60410+ break;
60411+ }
60412+ }
60413+
60414+ if (!found)
60415+ return 0;
60416+
60417+ /* handle special roles that do not require authentication
60418+ and check ip */
60419+
60420+ FOR_EACH_ROLE_START(r)
60421+ if (!strcmp(rolename, r->rolename) &&
60422+ (r->roletype & GR_ROLE_SPECIAL)) {
60423+ found = 0;
60424+ if (r->allowed_ips != NULL) {
60425+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
60426+ if ((ntohl(curr_ip) & ipp->netmask) ==
60427+ (ntohl(ipp->addr) & ipp->netmask))
60428+ found = 1;
60429+ }
60430+ } else
60431+ found = 2;
60432+ if (!found)
60433+ return 0;
60434+
60435+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
60436+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
60437+ *salt = NULL;
60438+ *sum = NULL;
60439+ return 1;
60440+ }
60441+ }
60442+ FOR_EACH_ROLE_END(r)
60443+
60444+ for (i = 0; i < num_sprole_pws; i++) {
60445+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
60446+ *salt = acl_special_roles[i]->salt;
60447+ *sum = acl_special_roles[i]->sum;
60448+ return 1;
60449+ }
60450+ }
60451+
60452+ return 0;
60453+}
60454+
60455+static void
60456+assign_special_role(char *rolename)
60457+{
60458+ struct acl_object_label *obj;
60459+ struct acl_role_label *r;
60460+ struct acl_role_label *assigned = NULL;
60461+ struct task_struct *tsk;
60462+ struct file *filp;
60463+
60464+ FOR_EACH_ROLE_START(r)
60465+ if (!strcmp(rolename, r->rolename) &&
60466+ (r->roletype & GR_ROLE_SPECIAL)) {
60467+ assigned = r;
60468+ break;
60469+ }
60470+ FOR_EACH_ROLE_END(r)
60471+
60472+ if (!assigned)
60473+ return;
60474+
60475+ read_lock(&tasklist_lock);
60476+ read_lock(&grsec_exec_file_lock);
60477+
60478+ tsk = current->real_parent;
60479+ if (tsk == NULL)
60480+ goto out_unlock;
60481+
60482+ filp = tsk->exec_file;
60483+ if (filp == NULL)
60484+ goto out_unlock;
60485+
60486+ tsk->is_writable = 0;
60487+
60488+ tsk->acl_sp_role = 1;
60489+ tsk->acl_role_id = ++acl_sp_role_value;
60490+ tsk->role = assigned;
60491+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
60492+
60493+ /* ignore additional mmap checks for processes that are writable
60494+ by the default ACL */
60495+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60496+ if (unlikely(obj->mode & GR_WRITE))
60497+ tsk->is_writable = 1;
60498+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
60499+ if (unlikely(obj->mode & GR_WRITE))
60500+ tsk->is_writable = 1;
60501+
60502+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60503+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
60504+#endif
60505+
60506+out_unlock:
60507+ read_unlock(&grsec_exec_file_lock);
60508+ read_unlock(&tasklist_lock);
60509+ return;
60510+}
60511+
60512+int gr_check_secure_terminal(struct task_struct *task)
60513+{
60514+ struct task_struct *p, *p2, *p3;
60515+ struct files_struct *files;
60516+ struct fdtable *fdt;
60517+ struct file *our_file = NULL, *file;
60518+ int i;
60519+
60520+ if (task->signal->tty == NULL)
60521+ return 1;
60522+
60523+ files = get_files_struct(task);
60524+ if (files != NULL) {
60525+ rcu_read_lock();
60526+ fdt = files_fdtable(files);
60527+ for (i=0; i < fdt->max_fds; i++) {
60528+ file = fcheck_files(files, i);
60529+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
60530+ get_file(file);
60531+ our_file = file;
60532+ }
60533+ }
60534+ rcu_read_unlock();
60535+ put_files_struct(files);
60536+ }
60537+
60538+ if (our_file == NULL)
60539+ return 1;
60540+
60541+ read_lock(&tasklist_lock);
60542+ do_each_thread(p2, p) {
60543+ files = get_files_struct(p);
60544+ if (files == NULL ||
60545+ (p->signal && p->signal->tty == task->signal->tty)) {
60546+ if (files != NULL)
60547+ put_files_struct(files);
60548+ continue;
60549+ }
60550+ rcu_read_lock();
60551+ fdt = files_fdtable(files);
60552+ for (i=0; i < fdt->max_fds; i++) {
60553+ file = fcheck_files(files, i);
60554+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
60555+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
60556+ p3 = task;
60557+ while (task_pid_nr(p3) > 0) {
60558+ if (p3 == p)
60559+ break;
60560+ p3 = p3->real_parent;
60561+ }
60562+ if (p3 == p)
60563+ break;
60564+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
60565+ gr_handle_alertkill(p);
60566+ rcu_read_unlock();
60567+ put_files_struct(files);
60568+ read_unlock(&tasklist_lock);
60569+ fput(our_file);
60570+ return 0;
60571+ }
60572+ }
60573+ rcu_read_unlock();
60574+ put_files_struct(files);
60575+ } while_each_thread(p2, p);
60576+ read_unlock(&tasklist_lock);
60577+
60578+ fput(our_file);
60579+ return 1;
60580+}
60581+
60582+static int gr_rbac_disable(void *unused)
60583+{
60584+ pax_open_kernel();
60585+ gr_status &= ~GR_READY;
60586+ pax_close_kernel();
60587+
60588+ return 0;
60589+}
60590+
60591+ssize_t
60592+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
60593+{
60594+ struct gr_arg_wrapper uwrap;
60595+ unsigned char *sprole_salt = NULL;
60596+ unsigned char *sprole_sum = NULL;
60597+ int error = sizeof (struct gr_arg_wrapper);
60598+ int error2 = 0;
60599+
60600+ mutex_lock(&gr_dev_mutex);
60601+
60602+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
60603+ error = -EPERM;
60604+ goto out;
60605+ }
60606+
60607+ if (count != sizeof (struct gr_arg_wrapper)) {
60608+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
60609+ error = -EINVAL;
60610+ goto out;
60611+ }
60612+
60613+
60614+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
60615+ gr_auth_expires = 0;
60616+ gr_auth_attempts = 0;
60617+ }
60618+
60619+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
60620+ error = -EFAULT;
60621+ goto out;
60622+ }
60623+
60624+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
60625+ error = -EINVAL;
60626+ goto out;
60627+ }
60628+
60629+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
60630+ error = -EFAULT;
60631+ goto out;
60632+ }
60633+
60634+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
60635+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
60636+ time_after(gr_auth_expires, get_seconds())) {
60637+ error = -EBUSY;
60638+ goto out;
60639+ }
60640+
60641+ /* if non-root trying to do anything other than use a special role,
60642+ do not attempt authentication, do not count towards authentication
60643+ locking
60644+ */
60645+
60646+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
60647+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
60648+ gr_is_global_nonroot(current_uid())) {
60649+ error = -EPERM;
60650+ goto out;
60651+ }
60652+
60653+ /* ensure pw and special role name are null terminated */
60654+
60655+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
60656+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
60657+
60658+ /* Okay.
60659+ * We have our enough of the argument structure..(we have yet
60660+ * to copy_from_user the tables themselves) . Copy the tables
60661+ * only if we need them, i.e. for loading operations. */
60662+
60663+ switch (gr_usermode->mode) {
60664+ case GR_STATUS:
60665+ if (gr_status & GR_READY) {
60666+ error = 1;
60667+ if (!gr_check_secure_terminal(current))
60668+ error = 3;
60669+ } else
60670+ error = 2;
60671+ goto out;
60672+ case GR_SHUTDOWN:
60673+ if ((gr_status & GR_READY)
60674+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60675+ stop_machine(gr_rbac_disable, NULL, NULL);
60676+ free_variables();
60677+ memset(gr_usermode, 0, sizeof (struct gr_arg));
60678+ memset(gr_system_salt, 0, GR_SALT_LEN);
60679+ memset(gr_system_sum, 0, GR_SHA_LEN);
60680+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
60681+ } else if (gr_status & GR_READY) {
60682+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
60683+ error = -EPERM;
60684+ } else {
60685+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
60686+ error = -EAGAIN;
60687+ }
60688+ break;
60689+ case GR_ENABLE:
60690+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
60691+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
60692+ else {
60693+ if (gr_status & GR_READY)
60694+ error = -EAGAIN;
60695+ else
60696+ error = error2;
60697+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
60698+ }
60699+ break;
60700+ case GR_RELOAD:
60701+ if (!(gr_status & GR_READY)) {
60702+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
60703+ error = -EAGAIN;
60704+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60705+ stop_machine(gr_rbac_disable, NULL, NULL);
60706+ free_variables();
60707+ error2 = gracl_init(gr_usermode);
60708+ if (!error2)
60709+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
60710+ else {
60711+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
60712+ error = error2;
60713+ }
60714+ } else {
60715+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
60716+ error = -EPERM;
60717+ }
60718+ break;
60719+ case GR_SEGVMOD:
60720+ if (unlikely(!(gr_status & GR_READY))) {
60721+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
60722+ error = -EAGAIN;
60723+ break;
60724+ }
60725+
60726+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60727+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
60728+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
60729+ struct acl_subject_label *segvacl;
60730+ segvacl =
60731+ lookup_acl_subj_label(gr_usermode->segv_inode,
60732+ gr_usermode->segv_device,
60733+ current->role);
60734+ if (segvacl) {
60735+ segvacl->crashes = 0;
60736+ segvacl->expires = 0;
60737+ }
60738+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
60739+ gr_remove_uid(gr_usermode->segv_uid);
60740+ }
60741+ } else {
60742+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
60743+ error = -EPERM;
60744+ }
60745+ break;
60746+ case GR_SPROLE:
60747+ case GR_SPROLEPAM:
60748+ if (unlikely(!(gr_status & GR_READY))) {
60749+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
60750+ error = -EAGAIN;
60751+ break;
60752+ }
60753+
60754+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
60755+ current->role->expires = 0;
60756+ current->role->auth_attempts = 0;
60757+ }
60758+
60759+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
60760+ time_after(current->role->expires, get_seconds())) {
60761+ error = -EBUSY;
60762+ goto out;
60763+ }
60764+
60765+ if (lookup_special_role_auth
60766+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
60767+ && ((!sprole_salt && !sprole_sum)
60768+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
60769+ char *p = "";
60770+ assign_special_role(gr_usermode->sp_role);
60771+ read_lock(&tasklist_lock);
60772+ if (current->real_parent)
60773+ p = current->real_parent->role->rolename;
60774+ read_unlock(&tasklist_lock);
60775+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
60776+ p, acl_sp_role_value);
60777+ } else {
60778+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
60779+ error = -EPERM;
60780+ if(!(current->role->auth_attempts++))
60781+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
60782+
60783+ goto out;
60784+ }
60785+ break;
60786+ case GR_UNSPROLE:
60787+ if (unlikely(!(gr_status & GR_READY))) {
60788+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
60789+ error = -EAGAIN;
60790+ break;
60791+ }
60792+
60793+ if (current->role->roletype & GR_ROLE_SPECIAL) {
60794+ char *p = "";
60795+ int i = 0;
60796+
60797+ read_lock(&tasklist_lock);
60798+ if (current->real_parent) {
60799+ p = current->real_parent->role->rolename;
60800+ i = current->real_parent->acl_role_id;
60801+ }
60802+ read_unlock(&tasklist_lock);
60803+
60804+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
60805+ gr_set_acls(1);
60806+ } else {
60807+ error = -EPERM;
60808+ goto out;
60809+ }
60810+ break;
60811+ default:
60812+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
60813+ error = -EINVAL;
60814+ break;
60815+ }
60816+
60817+ if (error != -EPERM)
60818+ goto out;
60819+
60820+ if(!(gr_auth_attempts++))
60821+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
60822+
60823+ out:
60824+ mutex_unlock(&gr_dev_mutex);
60825+ return error;
60826+}
60827+
60828+/* must be called with
60829+ rcu_read_lock();
60830+ read_lock(&tasklist_lock);
60831+ read_lock(&grsec_exec_file_lock);
60832+*/
60833+int gr_apply_subject_to_task(struct task_struct *task)
60834+{
60835+ struct acl_object_label *obj;
60836+ char *tmpname;
60837+ struct acl_subject_label *tmpsubj;
60838+ struct file *filp;
60839+ struct name_entry *nmatch;
60840+
60841+ filp = task->exec_file;
60842+ if (filp == NULL)
60843+ return 0;
60844+
60845+ /* the following is to apply the correct subject
60846+ on binaries running when the RBAC system
60847+ is enabled, when the binaries have been
60848+ replaced or deleted since their execution
60849+ -----
60850+ when the RBAC system starts, the inode/dev
60851+ from exec_file will be one the RBAC system
60852+ is unaware of. It only knows the inode/dev
60853+ of the present file on disk, or the absence
60854+ of it.
60855+ */
60856+ preempt_disable();
60857+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
60858+
60859+ nmatch = lookup_name_entry(tmpname);
60860+ preempt_enable();
60861+ tmpsubj = NULL;
60862+ if (nmatch) {
60863+ if (nmatch->deleted)
60864+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
60865+ else
60866+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
60867+ if (tmpsubj != NULL)
60868+ task->acl = tmpsubj;
60869+ }
60870+ if (tmpsubj == NULL)
60871+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
60872+ task->role);
60873+ if (task->acl) {
60874+ task->is_writable = 0;
60875+ /* ignore additional mmap checks for processes that are writable
60876+ by the default ACL */
60877+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60878+ if (unlikely(obj->mode & GR_WRITE))
60879+ task->is_writable = 1;
60880+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60881+ if (unlikely(obj->mode & GR_WRITE))
60882+ task->is_writable = 1;
60883+
60884+ gr_set_proc_res(task);
60885+
60886+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60887+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60888+#endif
60889+ } else {
60890+ return 1;
60891+ }
60892+
60893+ return 0;
60894+}
60895+
60896+int
60897+gr_set_acls(const int type)
60898+{
60899+ struct task_struct *task, *task2;
60900+ struct acl_role_label *role = current->role;
60901+ __u16 acl_role_id = current->acl_role_id;
60902+ const struct cred *cred;
60903+ int ret;
60904+
60905+ rcu_read_lock();
60906+ read_lock(&tasklist_lock);
60907+ read_lock(&grsec_exec_file_lock);
60908+ do_each_thread(task2, task) {
60909+ /* check to see if we're called from the exit handler,
60910+ if so, only replace ACLs that have inherited the admin
60911+ ACL */
60912+
60913+ if (type && (task->role != role ||
60914+ task->acl_role_id != acl_role_id))
60915+ continue;
60916+
60917+ task->acl_role_id = 0;
60918+ task->acl_sp_role = 0;
60919+
60920+ if (task->exec_file) {
60921+ cred = __task_cred(task);
60922+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
60923+ ret = gr_apply_subject_to_task(task);
60924+ if (ret) {
60925+ read_unlock(&grsec_exec_file_lock);
60926+ read_unlock(&tasklist_lock);
60927+ rcu_read_unlock();
60928+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
60929+ return ret;
60930+ }
60931+ } else {
60932+ // it's a kernel process
60933+ task->role = kernel_role;
60934+ task->acl = kernel_role->root_label;
60935+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
60936+ task->acl->mode &= ~GR_PROCFIND;
60937+#endif
60938+ }
60939+ } while_each_thread(task2, task);
60940+ read_unlock(&grsec_exec_file_lock);
60941+ read_unlock(&tasklist_lock);
60942+ rcu_read_unlock();
60943+
60944+ return 0;
60945+}
60946+
60947+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
60948+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
60949+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
60950+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
60951+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
60952+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
60953+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
60954+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
60955+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
60956+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
60957+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
60958+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
60959+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
60960+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
60961+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
60962+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
60963+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
60964+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
60965+};
60966+
60967+void
60968+gr_learn_resource(const struct task_struct *task,
60969+ const int res, const unsigned long wanted, const int gt)
60970+{
60971+ struct acl_subject_label *acl;
60972+ const struct cred *cred;
60973+
60974+ if (unlikely((gr_status & GR_READY) &&
60975+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
60976+ goto skip_reslog;
60977+
60978+ gr_log_resource(task, res, wanted, gt);
60979+skip_reslog:
60980+
60981+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
60982+ return;
60983+
60984+ acl = task->acl;
60985+
60986+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
60987+ !(acl->resmask & (1 << (unsigned short) res))))
60988+ return;
60989+
60990+ if (wanted >= acl->res[res].rlim_cur) {
60991+ unsigned long res_add;
60992+
60993+ res_add = wanted + res_learn_bumps[res];
60994+
60995+ acl->res[res].rlim_cur = res_add;
60996+
60997+ if (wanted > acl->res[res].rlim_max)
60998+ acl->res[res].rlim_max = res_add;
60999+
61000+ /* only log the subject filename, since resource logging is supported for
61001+ single-subject learning only */
61002+ rcu_read_lock();
61003+ cred = __task_cred(task);
61004+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
61005+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
61006+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
61007+ "", (unsigned long) res, &task->signal->saved_ip);
61008+ rcu_read_unlock();
61009+ }
61010+
61011+ return;
61012+}
61013+EXPORT_SYMBOL(gr_learn_resource);
61014+#endif
61015+
61016+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
61017+void
61018+pax_set_initial_flags(struct linux_binprm *bprm)
61019+{
61020+ struct task_struct *task = current;
61021+ struct acl_subject_label *proc;
61022+ unsigned long flags;
61023+
61024+ if (unlikely(!(gr_status & GR_READY)))
61025+ return;
61026+
61027+ flags = pax_get_flags(task);
61028+
61029+ proc = task->acl;
61030+
61031+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
61032+ flags &= ~MF_PAX_PAGEEXEC;
61033+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
61034+ flags &= ~MF_PAX_SEGMEXEC;
61035+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
61036+ flags &= ~MF_PAX_RANDMMAP;
61037+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
61038+ flags &= ~MF_PAX_EMUTRAMP;
61039+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
61040+ flags &= ~MF_PAX_MPROTECT;
61041+
61042+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
61043+ flags |= MF_PAX_PAGEEXEC;
61044+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
61045+ flags |= MF_PAX_SEGMEXEC;
61046+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
61047+ flags |= MF_PAX_RANDMMAP;
61048+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
61049+ flags |= MF_PAX_EMUTRAMP;
61050+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
61051+ flags |= MF_PAX_MPROTECT;
61052+
61053+ pax_set_flags(task, flags);
61054+
61055+ return;
61056+}
61057+#endif
61058+
61059+int
61060+gr_handle_proc_ptrace(struct task_struct *task)
61061+{
61062+ struct file *filp;
61063+ struct task_struct *tmp = task;
61064+ struct task_struct *curtemp = current;
61065+ __u32 retmode;
61066+
61067+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61068+ if (unlikely(!(gr_status & GR_READY)))
61069+ return 0;
61070+#endif
61071+
61072+ read_lock(&tasklist_lock);
61073+ read_lock(&grsec_exec_file_lock);
61074+ filp = task->exec_file;
61075+
61076+ while (task_pid_nr(tmp) > 0) {
61077+ if (tmp == curtemp)
61078+ break;
61079+ tmp = tmp->real_parent;
61080+ }
61081+
61082+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
61083+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
61084+ read_unlock(&grsec_exec_file_lock);
61085+ read_unlock(&tasklist_lock);
61086+ return 1;
61087+ }
61088+
61089+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61090+ if (!(gr_status & GR_READY)) {
61091+ read_unlock(&grsec_exec_file_lock);
61092+ read_unlock(&tasklist_lock);
61093+ return 0;
61094+ }
61095+#endif
61096+
61097+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
61098+ read_unlock(&grsec_exec_file_lock);
61099+ read_unlock(&tasklist_lock);
61100+
61101+ if (retmode & GR_NOPTRACE)
61102+ return 1;
61103+
61104+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
61105+ && (current->acl != task->acl || (current->acl != current->role->root_label
61106+ && task_pid_nr(current) != task_pid_nr(task))))
61107+ return 1;
61108+
61109+ return 0;
61110+}
61111+
61112+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
61113+{
61114+ if (unlikely(!(gr_status & GR_READY)))
61115+ return;
61116+
61117+ if (!(current->role->roletype & GR_ROLE_GOD))
61118+ return;
61119+
61120+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
61121+ p->role->rolename, gr_task_roletype_to_char(p),
61122+ p->acl->filename);
61123+}
61124+
61125+int
61126+gr_handle_ptrace(struct task_struct *task, const long request)
61127+{
61128+ struct task_struct *tmp = task;
61129+ struct task_struct *curtemp = current;
61130+ __u32 retmode;
61131+
61132+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61133+ if (unlikely(!(gr_status & GR_READY)))
61134+ return 0;
61135+#endif
61136+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
61137+ read_lock(&tasklist_lock);
61138+ while (task_pid_nr(tmp) > 0) {
61139+ if (tmp == curtemp)
61140+ break;
61141+ tmp = tmp->real_parent;
61142+ }
61143+
61144+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
61145+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
61146+ read_unlock(&tasklist_lock);
61147+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61148+ return 1;
61149+ }
61150+ read_unlock(&tasklist_lock);
61151+ }
61152+
61153+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61154+ if (!(gr_status & GR_READY))
61155+ return 0;
61156+#endif
61157+
61158+ read_lock(&grsec_exec_file_lock);
61159+ if (unlikely(!task->exec_file)) {
61160+ read_unlock(&grsec_exec_file_lock);
61161+ return 0;
61162+ }
61163+
61164+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
61165+ read_unlock(&grsec_exec_file_lock);
61166+
61167+ if (retmode & GR_NOPTRACE) {
61168+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61169+ return 1;
61170+ }
61171+
61172+ if (retmode & GR_PTRACERD) {
61173+ switch (request) {
61174+ case PTRACE_SEIZE:
61175+ case PTRACE_POKETEXT:
61176+ case PTRACE_POKEDATA:
61177+ case PTRACE_POKEUSR:
61178+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
61179+ case PTRACE_SETREGS:
61180+ case PTRACE_SETFPREGS:
61181+#endif
61182+#ifdef CONFIG_X86
61183+ case PTRACE_SETFPXREGS:
61184+#endif
61185+#ifdef CONFIG_ALTIVEC
61186+ case PTRACE_SETVRREGS:
61187+#endif
61188+ return 1;
61189+ default:
61190+ return 0;
61191+ }
61192+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
61193+ !(current->role->roletype & GR_ROLE_GOD) &&
61194+ (current->acl != task->acl)) {
61195+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61196+ return 1;
61197+ }
61198+
61199+ return 0;
61200+}
61201+
61202+static int is_writable_mmap(const struct file *filp)
61203+{
61204+ struct task_struct *task = current;
61205+ struct acl_object_label *obj, *obj2;
61206+
61207+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
61208+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
61209+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61210+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
61211+ task->role->root_label);
61212+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
61213+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
61214+ return 1;
61215+ }
61216+ }
61217+ return 0;
61218+}
61219+
61220+int
61221+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
61222+{
61223+ __u32 mode;
61224+
61225+ if (unlikely(!file || !(prot & PROT_EXEC)))
61226+ return 1;
61227+
61228+ if (is_writable_mmap(file))
61229+ return 0;
61230+
61231+ mode =
61232+ gr_search_file(file->f_path.dentry,
61233+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61234+ file->f_path.mnt);
61235+
61236+ if (!gr_tpe_allow(file))
61237+ return 0;
61238+
61239+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61240+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61241+ return 0;
61242+ } else if (unlikely(!(mode & GR_EXEC))) {
61243+ return 0;
61244+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61245+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61246+ return 1;
61247+ }
61248+
61249+ return 1;
61250+}
61251+
61252+int
61253+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
61254+{
61255+ __u32 mode;
61256+
61257+ if (unlikely(!file || !(prot & PROT_EXEC)))
61258+ return 1;
61259+
61260+ if (is_writable_mmap(file))
61261+ return 0;
61262+
61263+ mode =
61264+ gr_search_file(file->f_path.dentry,
61265+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61266+ file->f_path.mnt);
61267+
61268+ if (!gr_tpe_allow(file))
61269+ return 0;
61270+
61271+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61272+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61273+ return 0;
61274+ } else if (unlikely(!(mode & GR_EXEC))) {
61275+ return 0;
61276+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61277+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61278+ return 1;
61279+ }
61280+
61281+ return 1;
61282+}
61283+
61284+void
61285+gr_acl_handle_psacct(struct task_struct *task, const long code)
61286+{
61287+ unsigned long runtime;
61288+ unsigned long cputime;
61289+ unsigned int wday, cday;
61290+ __u8 whr, chr;
61291+ __u8 wmin, cmin;
61292+ __u8 wsec, csec;
61293+ struct timespec timeval;
61294+
61295+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
61296+ !(task->acl->mode & GR_PROCACCT)))
61297+ return;
61298+
61299+ do_posix_clock_monotonic_gettime(&timeval);
61300+ runtime = timeval.tv_sec - task->start_time.tv_sec;
61301+ wday = runtime / (3600 * 24);
61302+ runtime -= wday * (3600 * 24);
61303+ whr = runtime / 3600;
61304+ runtime -= whr * 3600;
61305+ wmin = runtime / 60;
61306+ runtime -= wmin * 60;
61307+ wsec = runtime;
61308+
61309+ cputime = (task->utime + task->stime) / HZ;
61310+ cday = cputime / (3600 * 24);
61311+ cputime -= cday * (3600 * 24);
61312+ chr = cputime / 3600;
61313+ cputime -= chr * 3600;
61314+ cmin = cputime / 60;
61315+ cputime -= cmin * 60;
61316+ csec = cputime;
61317+
61318+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
61319+
61320+ return;
61321+}
61322+
61323+void gr_set_kernel_label(struct task_struct *task)
61324+{
61325+ if (gr_status & GR_READY) {
61326+ task->role = kernel_role;
61327+ task->acl = kernel_role->root_label;
61328+ }
61329+ return;
61330+}
61331+
61332+#ifdef CONFIG_TASKSTATS
61333+int gr_is_taskstats_denied(int pid)
61334+{
61335+ struct task_struct *task;
61336+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61337+ const struct cred *cred;
61338+#endif
61339+ int ret = 0;
61340+
61341+ /* restrict taskstats viewing to un-chrooted root users
61342+ who have the 'view' subject flag if the RBAC system is enabled
61343+ */
61344+
61345+ rcu_read_lock();
61346+ read_lock(&tasklist_lock);
61347+ task = find_task_by_vpid(pid);
61348+ if (task) {
61349+#ifdef CONFIG_GRKERNSEC_CHROOT
61350+ if (proc_is_chrooted(task))
61351+ ret = -EACCES;
61352+#endif
61353+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61354+ cred = __task_cred(task);
61355+#ifdef CONFIG_GRKERNSEC_PROC_USER
61356+ if (gr_is_global_nonroot(cred->uid))
61357+ ret = -EACCES;
61358+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61359+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
61360+ ret = -EACCES;
61361+#endif
61362+#endif
61363+ if (gr_status & GR_READY) {
61364+ if (!(task->acl->mode & GR_VIEW))
61365+ ret = -EACCES;
61366+ }
61367+ } else
61368+ ret = -ENOENT;
61369+
61370+ read_unlock(&tasklist_lock);
61371+ rcu_read_unlock();
61372+
61373+ return ret;
61374+}
61375+#endif
61376+
61377+/* AUXV entries are filled via a descendant of search_binary_handler
61378+ after we've already applied the subject for the target
61379+*/
61380+int gr_acl_enable_at_secure(void)
61381+{
61382+ if (unlikely(!(gr_status & GR_READY)))
61383+ return 0;
61384+
61385+ if (current->acl->mode & GR_ATSECURE)
61386+ return 1;
61387+
61388+ return 0;
61389+}
61390+
61391+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
61392+{
61393+ struct task_struct *task = current;
61394+ struct dentry *dentry = file->f_path.dentry;
61395+ struct vfsmount *mnt = file->f_path.mnt;
61396+ struct acl_object_label *obj, *tmp;
61397+ struct acl_subject_label *subj;
61398+ unsigned int bufsize;
61399+ int is_not_root;
61400+ char *path;
61401+ dev_t dev = __get_dev(dentry);
61402+
61403+ if (unlikely(!(gr_status & GR_READY)))
61404+ return 1;
61405+
61406+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
61407+ return 1;
61408+
61409+ /* ignore Eric Biederman */
61410+ if (IS_PRIVATE(dentry->d_inode))
61411+ return 1;
61412+
61413+ subj = task->acl;
61414+ read_lock(&gr_inode_lock);
61415+ do {
61416+ obj = lookup_acl_obj_label(ino, dev, subj);
61417+ if (obj != NULL) {
61418+ read_unlock(&gr_inode_lock);
61419+ return (obj->mode & GR_FIND) ? 1 : 0;
61420+ }
61421+ } while ((subj = subj->parent_subject));
61422+ read_unlock(&gr_inode_lock);
61423+
61424+ /* this is purely an optimization since we're looking for an object
61425+ for the directory we're doing a readdir on
61426+ if it's possible for any globbed object to match the entry we're
61427+ filling into the directory, then the object we find here will be
61428+ an anchor point with attached globbed objects
61429+ */
61430+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
61431+ if (obj->globbed == NULL)
61432+ return (obj->mode & GR_FIND) ? 1 : 0;
61433+
61434+ is_not_root = ((obj->filename[0] == '/') &&
61435+ (obj->filename[1] == '\0')) ? 0 : 1;
61436+ bufsize = PAGE_SIZE - namelen - is_not_root;
61437+
61438+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
61439+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
61440+ return 1;
61441+
61442+ preempt_disable();
61443+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
61444+ bufsize);
61445+
61446+ bufsize = strlen(path);
61447+
61448+ /* if base is "/", don't append an additional slash */
61449+ if (is_not_root)
61450+ *(path + bufsize) = '/';
61451+ memcpy(path + bufsize + is_not_root, name, namelen);
61452+ *(path + bufsize + namelen + is_not_root) = '\0';
61453+
61454+ tmp = obj->globbed;
61455+ while (tmp) {
61456+ if (!glob_match(tmp->filename, path)) {
61457+ preempt_enable();
61458+ return (tmp->mode & GR_FIND) ? 1 : 0;
61459+ }
61460+ tmp = tmp->next;
61461+ }
61462+ preempt_enable();
61463+ return (obj->mode & GR_FIND) ? 1 : 0;
61464+}
61465+
61466+void gr_put_exec_file(struct task_struct *task)
61467+{
61468+ struct file *filp;
61469+
61470+ write_lock(&grsec_exec_file_lock);
61471+ filp = task->exec_file;
61472+ task->exec_file = NULL;
61473+ write_unlock(&grsec_exec_file_lock);
61474+
61475+ if (filp)
61476+ fput(filp);
61477+
61478+ return;
61479+}
61480+
61481+
61482+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
61483+EXPORT_SYMBOL(gr_acl_is_enabled);
61484+#endif
61485+EXPORT_SYMBOL(gr_set_kernel_label);
61486+#ifdef CONFIG_SECURITY
61487+EXPORT_SYMBOL(gr_check_user_change);
61488+EXPORT_SYMBOL(gr_check_group_change);
61489+#endif
61490+
61491diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
61492new file mode 100644
61493index 0000000..34fefda
61494--- /dev/null
61495+++ b/grsecurity/gracl_alloc.c
61496@@ -0,0 +1,105 @@
61497+#include <linux/kernel.h>
61498+#include <linux/mm.h>
61499+#include <linux/slab.h>
61500+#include <linux/vmalloc.h>
61501+#include <linux/gracl.h>
61502+#include <linux/grsecurity.h>
61503+
61504+static unsigned long alloc_stack_next = 1;
61505+static unsigned long alloc_stack_size = 1;
61506+static void **alloc_stack;
61507+
61508+static __inline__ int
61509+alloc_pop(void)
61510+{
61511+ if (alloc_stack_next == 1)
61512+ return 0;
61513+
61514+ kfree(alloc_stack[alloc_stack_next - 2]);
61515+
61516+ alloc_stack_next--;
61517+
61518+ return 1;
61519+}
61520+
61521+static __inline__ int
61522+alloc_push(void *buf)
61523+{
61524+ if (alloc_stack_next >= alloc_stack_size)
61525+ return 1;
61526+
61527+ alloc_stack[alloc_stack_next - 1] = buf;
61528+
61529+ alloc_stack_next++;
61530+
61531+ return 0;
61532+}
61533+
61534+void *
61535+acl_alloc(unsigned long len)
61536+{
61537+ void *ret = NULL;
61538+
61539+ if (!len || len > PAGE_SIZE)
61540+ goto out;
61541+
61542+ ret = kmalloc(len, GFP_KERNEL);
61543+
61544+ if (ret) {
61545+ if (alloc_push(ret)) {
61546+ kfree(ret);
61547+ ret = NULL;
61548+ }
61549+ }
61550+
61551+out:
61552+ return ret;
61553+}
61554+
61555+void *
61556+acl_alloc_num(unsigned long num, unsigned long len)
61557+{
61558+ if (!len || (num > (PAGE_SIZE / len)))
61559+ return NULL;
61560+
61561+ return acl_alloc(num * len);
61562+}
61563+
61564+void
61565+acl_free_all(void)
61566+{
61567+ if (gr_acl_is_enabled() || !alloc_stack)
61568+ return;
61569+
61570+ while (alloc_pop()) ;
61571+
61572+ if (alloc_stack) {
61573+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
61574+ kfree(alloc_stack);
61575+ else
61576+ vfree(alloc_stack);
61577+ }
61578+
61579+ alloc_stack = NULL;
61580+ alloc_stack_size = 1;
61581+ alloc_stack_next = 1;
61582+
61583+ return;
61584+}
61585+
61586+int
61587+acl_alloc_stack_init(unsigned long size)
61588+{
61589+ if ((size * sizeof (void *)) <= PAGE_SIZE)
61590+ alloc_stack =
61591+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
61592+ else
61593+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
61594+
61595+ alloc_stack_size = size;
61596+
61597+ if (!alloc_stack)
61598+ return 0;
61599+ else
61600+ return 1;
61601+}
61602diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
61603new file mode 100644
61604index 0000000..bdd51ea
61605--- /dev/null
61606+++ b/grsecurity/gracl_cap.c
61607@@ -0,0 +1,110 @@
61608+#include <linux/kernel.h>
61609+#include <linux/module.h>
61610+#include <linux/sched.h>
61611+#include <linux/gracl.h>
61612+#include <linux/grsecurity.h>
61613+#include <linux/grinternal.h>
61614+
61615+extern const char *captab_log[];
61616+extern int captab_log_entries;
61617+
61618+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
61619+{
61620+ struct acl_subject_label *curracl;
61621+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
61622+ kernel_cap_t cap_audit = __cap_empty_set;
61623+
61624+ if (!gr_acl_is_enabled())
61625+ return 1;
61626+
61627+ curracl = task->acl;
61628+
61629+ cap_drop = curracl->cap_lower;
61630+ cap_mask = curracl->cap_mask;
61631+ cap_audit = curracl->cap_invert_audit;
61632+
61633+ while ((curracl = curracl->parent_subject)) {
61634+ /* if the cap isn't specified in the current computed mask but is specified in the
61635+ current level subject, and is lowered in the current level subject, then add
61636+ it to the set of dropped capabilities
61637+ otherwise, add the current level subject's mask to the current computed mask
61638+ */
61639+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
61640+ cap_raise(cap_mask, cap);
61641+ if (cap_raised(curracl->cap_lower, cap))
61642+ cap_raise(cap_drop, cap);
61643+ if (cap_raised(curracl->cap_invert_audit, cap))
61644+ cap_raise(cap_audit, cap);
61645+ }
61646+ }
61647+
61648+ if (!cap_raised(cap_drop, cap)) {
61649+ if (cap_raised(cap_audit, cap))
61650+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
61651+ return 1;
61652+ }
61653+
61654+ curracl = task->acl;
61655+
61656+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
61657+ && cap_raised(cred->cap_effective, cap)) {
61658+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
61659+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
61660+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
61661+ gr_to_filename(task->exec_file->f_path.dentry,
61662+ task->exec_file->f_path.mnt) : curracl->filename,
61663+ curracl->filename, 0UL,
61664+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
61665+ return 1;
61666+ }
61667+
61668+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
61669+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
61670+
61671+ return 0;
61672+}
61673+
61674+int
61675+gr_acl_is_capable(const int cap)
61676+{
61677+ return gr_task_acl_is_capable(current, current_cred(), cap);
61678+}
61679+
61680+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
61681+{
61682+ struct acl_subject_label *curracl;
61683+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
61684+
61685+ if (!gr_acl_is_enabled())
61686+ return 1;
61687+
61688+ curracl = task->acl;
61689+
61690+ cap_drop = curracl->cap_lower;
61691+ cap_mask = curracl->cap_mask;
61692+
61693+ while ((curracl = curracl->parent_subject)) {
61694+ /* if the cap isn't specified in the current computed mask but is specified in the
61695+ current level subject, and is lowered in the current level subject, then add
61696+ it to the set of dropped capabilities
61697+ otherwise, add the current level subject's mask to the current computed mask
61698+ */
61699+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
61700+ cap_raise(cap_mask, cap);
61701+ if (cap_raised(curracl->cap_lower, cap))
61702+ cap_raise(cap_drop, cap);
61703+ }
61704+ }
61705+
61706+ if (!cap_raised(cap_drop, cap))
61707+ return 1;
61708+
61709+ return 0;
61710+}
61711+
61712+int
61713+gr_acl_is_capable_nolog(const int cap)
61714+{
61715+ return gr_task_acl_is_capable_nolog(current, cap);
61716+}
61717+
61718diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
61719new file mode 100644
61720index 0000000..a340c17
61721--- /dev/null
61722+++ b/grsecurity/gracl_fs.c
61723@@ -0,0 +1,431 @@
61724+#include <linux/kernel.h>
61725+#include <linux/sched.h>
61726+#include <linux/types.h>
61727+#include <linux/fs.h>
61728+#include <linux/file.h>
61729+#include <linux/stat.h>
61730+#include <linux/grsecurity.h>
61731+#include <linux/grinternal.h>
61732+#include <linux/gracl.h>
61733+
61734+umode_t
61735+gr_acl_umask(void)
61736+{
61737+ if (unlikely(!gr_acl_is_enabled()))
61738+ return 0;
61739+
61740+ return current->role->umask;
61741+}
61742+
61743+__u32
61744+gr_acl_handle_hidden_file(const struct dentry * dentry,
61745+ const struct vfsmount * mnt)
61746+{
61747+ __u32 mode;
61748+
61749+ if (unlikely(!dentry->d_inode))
61750+ return GR_FIND;
61751+
61752+ mode =
61753+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
61754+
61755+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
61756+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61757+ return mode;
61758+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
61759+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61760+ return 0;
61761+ } else if (unlikely(!(mode & GR_FIND)))
61762+ return 0;
61763+
61764+ return GR_FIND;
61765+}
61766+
61767+__u32
61768+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
61769+ int acc_mode)
61770+{
61771+ __u32 reqmode = GR_FIND;
61772+ __u32 mode;
61773+
61774+ if (unlikely(!dentry->d_inode))
61775+ return reqmode;
61776+
61777+ if (acc_mode & MAY_APPEND)
61778+ reqmode |= GR_APPEND;
61779+ else if (acc_mode & MAY_WRITE)
61780+ reqmode |= GR_WRITE;
61781+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
61782+ reqmode |= GR_READ;
61783+
61784+ mode =
61785+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61786+ mnt);
61787+
61788+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61789+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61790+ reqmode & GR_READ ? " reading" : "",
61791+ reqmode & GR_WRITE ? " writing" : reqmode &
61792+ GR_APPEND ? " appending" : "");
61793+ return reqmode;
61794+ } else
61795+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61796+ {
61797+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61798+ reqmode & GR_READ ? " reading" : "",
61799+ reqmode & GR_WRITE ? " writing" : reqmode &
61800+ GR_APPEND ? " appending" : "");
61801+ return 0;
61802+ } else if (unlikely((mode & reqmode) != reqmode))
61803+ return 0;
61804+
61805+ return reqmode;
61806+}
61807+
61808+__u32
61809+gr_acl_handle_creat(const struct dentry * dentry,
61810+ const struct dentry * p_dentry,
61811+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
61812+ const int imode)
61813+{
61814+ __u32 reqmode = GR_WRITE | GR_CREATE;
61815+ __u32 mode;
61816+
61817+ if (acc_mode & MAY_APPEND)
61818+ reqmode |= GR_APPEND;
61819+ // if a directory was required or the directory already exists, then
61820+ // don't count this open as a read
61821+ if ((acc_mode & MAY_READ) &&
61822+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
61823+ reqmode |= GR_READ;
61824+ if ((open_flags & O_CREAT) &&
61825+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
61826+ reqmode |= GR_SETID;
61827+
61828+ mode =
61829+ gr_check_create(dentry, p_dentry, p_mnt,
61830+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61831+
61832+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61833+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61834+ reqmode & GR_READ ? " reading" : "",
61835+ reqmode & GR_WRITE ? " writing" : reqmode &
61836+ GR_APPEND ? " appending" : "");
61837+ return reqmode;
61838+ } else
61839+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61840+ {
61841+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61842+ reqmode & GR_READ ? " reading" : "",
61843+ reqmode & GR_WRITE ? " writing" : reqmode &
61844+ GR_APPEND ? " appending" : "");
61845+ return 0;
61846+ } else if (unlikely((mode & reqmode) != reqmode))
61847+ return 0;
61848+
61849+ return reqmode;
61850+}
61851+
61852+__u32
61853+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
61854+ const int fmode)
61855+{
61856+ __u32 mode, reqmode = GR_FIND;
61857+
61858+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
61859+ reqmode |= GR_EXEC;
61860+ if (fmode & S_IWOTH)
61861+ reqmode |= GR_WRITE;
61862+ if (fmode & S_IROTH)
61863+ reqmode |= GR_READ;
61864+
61865+ mode =
61866+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61867+ mnt);
61868+
61869+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61870+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61871+ reqmode & GR_READ ? " reading" : "",
61872+ reqmode & GR_WRITE ? " writing" : "",
61873+ reqmode & GR_EXEC ? " executing" : "");
61874+ return reqmode;
61875+ } else
61876+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61877+ {
61878+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61879+ reqmode & GR_READ ? " reading" : "",
61880+ reqmode & GR_WRITE ? " writing" : "",
61881+ reqmode & GR_EXEC ? " executing" : "");
61882+ return 0;
61883+ } else if (unlikely((mode & reqmode) != reqmode))
61884+ return 0;
61885+
61886+ return reqmode;
61887+}
61888+
61889+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
61890+{
61891+ __u32 mode;
61892+
61893+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
61894+
61895+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61896+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
61897+ return mode;
61898+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61899+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
61900+ return 0;
61901+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
61902+ return 0;
61903+
61904+ return (reqmode);
61905+}
61906+
61907+__u32
61908+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
61909+{
61910+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
61911+}
61912+
61913+__u32
61914+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
61915+{
61916+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
61917+}
61918+
61919+__u32
61920+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
61921+{
61922+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
61923+}
61924+
61925+__u32
61926+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
61927+{
61928+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
61929+}
61930+
61931+__u32
61932+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
61933+ umode_t *modeptr)
61934+{
61935+ umode_t mode;
61936+
61937+ *modeptr &= ~gr_acl_umask();
61938+ mode = *modeptr;
61939+
61940+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
61941+ return 1;
61942+
61943+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
61944+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
61945+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
61946+ GR_CHMOD_ACL_MSG);
61947+ } else {
61948+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
61949+ }
61950+}
61951+
61952+__u32
61953+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
61954+{
61955+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
61956+}
61957+
61958+__u32
61959+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
61960+{
61961+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
61962+}
61963+
61964+__u32
61965+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
61966+{
61967+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
61968+}
61969+
61970+__u32
61971+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
61972+{
61973+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
61974+ GR_UNIXCONNECT_ACL_MSG);
61975+}
61976+
61977+/* hardlinks require at minimum create and link permission,
61978+ any additional privilege required is based on the
61979+ privilege of the file being linked to
61980+*/
61981+__u32
61982+gr_acl_handle_link(const struct dentry * new_dentry,
61983+ const struct dentry * parent_dentry,
61984+ const struct vfsmount * parent_mnt,
61985+ const struct dentry * old_dentry,
61986+ const struct vfsmount * old_mnt, const struct filename *to)
61987+{
61988+ __u32 mode;
61989+ __u32 needmode = GR_CREATE | GR_LINK;
61990+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
61991+
61992+ mode =
61993+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
61994+ old_mnt);
61995+
61996+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
61997+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
61998+ return mode;
61999+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62000+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62001+ return 0;
62002+ } else if (unlikely((mode & needmode) != needmode))
62003+ return 0;
62004+
62005+ return 1;
62006+}
62007+
62008+__u32
62009+gr_acl_handle_symlink(const struct dentry * new_dentry,
62010+ const struct dentry * parent_dentry,
62011+ const struct vfsmount * parent_mnt, const struct filename *from)
62012+{
62013+ __u32 needmode = GR_WRITE | GR_CREATE;
62014+ __u32 mode;
62015+
62016+ mode =
62017+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
62018+ GR_CREATE | GR_AUDIT_CREATE |
62019+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
62020+
62021+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
62022+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62023+ return mode;
62024+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62025+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62026+ return 0;
62027+ } else if (unlikely((mode & needmode) != needmode))
62028+ return 0;
62029+
62030+ return (GR_WRITE | GR_CREATE);
62031+}
62032+
62033+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
62034+{
62035+ __u32 mode;
62036+
62037+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62038+
62039+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62040+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
62041+ return mode;
62042+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62043+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
62044+ return 0;
62045+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62046+ return 0;
62047+
62048+ return (reqmode);
62049+}
62050+
62051+__u32
62052+gr_acl_handle_mknod(const struct dentry * new_dentry,
62053+ const struct dentry * parent_dentry,
62054+ const struct vfsmount * parent_mnt,
62055+ const int mode)
62056+{
62057+ __u32 reqmode = GR_WRITE | GR_CREATE;
62058+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62059+ reqmode |= GR_SETID;
62060+
62061+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62062+ reqmode, GR_MKNOD_ACL_MSG);
62063+}
62064+
62065+__u32
62066+gr_acl_handle_mkdir(const struct dentry *new_dentry,
62067+ const struct dentry *parent_dentry,
62068+ const struct vfsmount *parent_mnt)
62069+{
62070+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62071+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
62072+}
62073+
62074+#define RENAME_CHECK_SUCCESS(old, new) \
62075+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
62076+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
62077+
62078+int
62079+gr_acl_handle_rename(struct dentry *new_dentry,
62080+ struct dentry *parent_dentry,
62081+ const struct vfsmount *parent_mnt,
62082+ struct dentry *old_dentry,
62083+ struct inode *old_parent_inode,
62084+ struct vfsmount *old_mnt, const struct filename *newname)
62085+{
62086+ __u32 comp1, comp2;
62087+ int error = 0;
62088+
62089+ if (unlikely(!gr_acl_is_enabled()))
62090+ return 0;
62091+
62092+ if (!new_dentry->d_inode) {
62093+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
62094+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
62095+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
62096+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
62097+ GR_DELETE | GR_AUDIT_DELETE |
62098+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62099+ GR_SUPPRESS, old_mnt);
62100+ } else {
62101+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
62102+ GR_CREATE | GR_DELETE |
62103+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
62104+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62105+ GR_SUPPRESS, parent_mnt);
62106+ comp2 =
62107+ gr_search_file(old_dentry,
62108+ GR_READ | GR_WRITE | GR_AUDIT_READ |
62109+ GR_DELETE | GR_AUDIT_DELETE |
62110+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
62111+ }
62112+
62113+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
62114+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
62115+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62116+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
62117+ && !(comp2 & GR_SUPPRESS)) {
62118+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62119+ error = -EACCES;
62120+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
62121+ error = -EACCES;
62122+
62123+ return error;
62124+}
62125+
62126+void
62127+gr_acl_handle_exit(void)
62128+{
62129+ u16 id;
62130+ char *rolename;
62131+
62132+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
62133+ !(current->role->roletype & GR_ROLE_PERSIST))) {
62134+ id = current->acl_role_id;
62135+ rolename = current->role->rolename;
62136+ gr_set_acls(1);
62137+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
62138+ }
62139+
62140+ gr_put_exec_file(current);
62141+ return;
62142+}
62143+
62144+int
62145+gr_acl_handle_procpidmem(const struct task_struct *task)
62146+{
62147+ if (unlikely(!gr_acl_is_enabled()))
62148+ return 0;
62149+
62150+ if (task != current && task->acl->mode & GR_PROTPROCFD)
62151+ return -EACCES;
62152+
62153+ return 0;
62154+}
62155diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
62156new file mode 100644
62157index 0000000..4699807
62158--- /dev/null
62159+++ b/grsecurity/gracl_ip.c
62160@@ -0,0 +1,384 @@
62161+#include <linux/kernel.h>
62162+#include <asm/uaccess.h>
62163+#include <asm/errno.h>
62164+#include <net/sock.h>
62165+#include <linux/file.h>
62166+#include <linux/fs.h>
62167+#include <linux/net.h>
62168+#include <linux/in.h>
62169+#include <linux/skbuff.h>
62170+#include <linux/ip.h>
62171+#include <linux/udp.h>
62172+#include <linux/types.h>
62173+#include <linux/sched.h>
62174+#include <linux/netdevice.h>
62175+#include <linux/inetdevice.h>
62176+#include <linux/gracl.h>
62177+#include <linux/grsecurity.h>
62178+#include <linux/grinternal.h>
62179+
62180+#define GR_BIND 0x01
62181+#define GR_CONNECT 0x02
62182+#define GR_INVERT 0x04
62183+#define GR_BINDOVERRIDE 0x08
62184+#define GR_CONNECTOVERRIDE 0x10
62185+#define GR_SOCK_FAMILY 0x20
62186+
62187+static const char * gr_protocols[IPPROTO_MAX] = {
62188+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
62189+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
62190+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
62191+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
62192+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
62193+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
62194+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
62195+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
62196+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
62197+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
62198+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
62199+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
62200+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
62201+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
62202+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
62203+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
62204+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
62205+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
62206+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
62207+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
62208+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
62209+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
62210+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
62211+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
62212+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
62213+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
62214+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
62215+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
62216+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
62217+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
62218+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
62219+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
62220+ };
62221+
62222+static const char * gr_socktypes[SOCK_MAX] = {
62223+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
62224+ "unknown:7", "unknown:8", "unknown:9", "packet"
62225+ };
62226+
62227+static const char * gr_sockfamilies[AF_MAX+1] = {
62228+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
62229+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
62230+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
62231+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
62232+ };
62233+
62234+const char *
62235+gr_proto_to_name(unsigned char proto)
62236+{
62237+ return gr_protocols[proto];
62238+}
62239+
62240+const char *
62241+gr_socktype_to_name(unsigned char type)
62242+{
62243+ return gr_socktypes[type];
62244+}
62245+
62246+const char *
62247+gr_sockfamily_to_name(unsigned char family)
62248+{
62249+ return gr_sockfamilies[family];
62250+}
62251+
62252+int
62253+gr_search_socket(const int domain, const int type, const int protocol)
62254+{
62255+ struct acl_subject_label *curr;
62256+ const struct cred *cred = current_cred();
62257+
62258+ if (unlikely(!gr_acl_is_enabled()))
62259+ goto exit;
62260+
62261+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
62262+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
62263+ goto exit; // let the kernel handle it
62264+
62265+ curr = current->acl;
62266+
62267+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
62268+ /* the family is allowed, if this is PF_INET allow it only if
62269+ the extra sock type/protocol checks pass */
62270+ if (domain == PF_INET)
62271+ goto inet_check;
62272+ goto exit;
62273+ } else {
62274+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62275+ __u32 fakeip = 0;
62276+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62277+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62278+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62279+ gr_to_filename(current->exec_file->f_path.dentry,
62280+ current->exec_file->f_path.mnt) :
62281+ curr->filename, curr->filename,
62282+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
62283+ &current->signal->saved_ip);
62284+ goto exit;
62285+ }
62286+ goto exit_fail;
62287+ }
62288+
62289+inet_check:
62290+ /* the rest of this checking is for IPv4 only */
62291+ if (!curr->ips)
62292+ goto exit;
62293+
62294+ if ((curr->ip_type & (1 << type)) &&
62295+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
62296+ goto exit;
62297+
62298+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62299+ /* we don't place acls on raw sockets , and sometimes
62300+ dgram/ip sockets are opened for ioctl and not
62301+ bind/connect, so we'll fake a bind learn log */
62302+ if (type == SOCK_RAW || type == SOCK_PACKET) {
62303+ __u32 fakeip = 0;
62304+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62305+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62306+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62307+ gr_to_filename(current->exec_file->f_path.dentry,
62308+ current->exec_file->f_path.mnt) :
62309+ curr->filename, curr->filename,
62310+ &fakeip, 0, type,
62311+ protocol, GR_CONNECT, &current->signal->saved_ip);
62312+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
62313+ __u32 fakeip = 0;
62314+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62315+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62316+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62317+ gr_to_filename(current->exec_file->f_path.dentry,
62318+ current->exec_file->f_path.mnt) :
62319+ curr->filename, curr->filename,
62320+ &fakeip, 0, type,
62321+ protocol, GR_BIND, &current->signal->saved_ip);
62322+ }
62323+ /* we'll log when they use connect or bind */
62324+ goto exit;
62325+ }
62326+
62327+exit_fail:
62328+ if (domain == PF_INET)
62329+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
62330+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
62331+ else
62332+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
62333+ gr_socktype_to_name(type), protocol);
62334+
62335+ return 0;
62336+exit:
62337+ return 1;
62338+}
62339+
62340+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
62341+{
62342+ if ((ip->mode & mode) &&
62343+ (ip_port >= ip->low) &&
62344+ (ip_port <= ip->high) &&
62345+ ((ntohl(ip_addr) & our_netmask) ==
62346+ (ntohl(our_addr) & our_netmask))
62347+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
62348+ && (ip->type & (1 << type))) {
62349+ if (ip->mode & GR_INVERT)
62350+ return 2; // specifically denied
62351+ else
62352+ return 1; // allowed
62353+ }
62354+
62355+ return 0; // not specifically allowed, may continue parsing
62356+}
62357+
62358+static int
62359+gr_search_connectbind(const int full_mode, struct sock *sk,
62360+ struct sockaddr_in *addr, const int type)
62361+{
62362+ char iface[IFNAMSIZ] = {0};
62363+ struct acl_subject_label *curr;
62364+ struct acl_ip_label *ip;
62365+ struct inet_sock *isk;
62366+ struct net_device *dev;
62367+ struct in_device *idev;
62368+ unsigned long i;
62369+ int ret;
62370+ int mode = full_mode & (GR_BIND | GR_CONNECT);
62371+ __u32 ip_addr = 0;
62372+ __u32 our_addr;
62373+ __u32 our_netmask;
62374+ char *p;
62375+ __u16 ip_port = 0;
62376+ const struct cred *cred = current_cred();
62377+
62378+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
62379+ return 0;
62380+
62381+ curr = current->acl;
62382+ isk = inet_sk(sk);
62383+
62384+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
62385+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
62386+ addr->sin_addr.s_addr = curr->inaddr_any_override;
62387+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
62388+ struct sockaddr_in saddr;
62389+ int err;
62390+
62391+ saddr.sin_family = AF_INET;
62392+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
62393+ saddr.sin_port = isk->inet_sport;
62394+
62395+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
62396+ if (err)
62397+ return err;
62398+
62399+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
62400+ if (err)
62401+ return err;
62402+ }
62403+
62404+ if (!curr->ips)
62405+ return 0;
62406+
62407+ ip_addr = addr->sin_addr.s_addr;
62408+ ip_port = ntohs(addr->sin_port);
62409+
62410+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62411+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62412+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62413+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62414+ gr_to_filename(current->exec_file->f_path.dentry,
62415+ current->exec_file->f_path.mnt) :
62416+ curr->filename, curr->filename,
62417+ &ip_addr, ip_port, type,
62418+ sk->sk_protocol, mode, &current->signal->saved_ip);
62419+ return 0;
62420+ }
62421+
62422+ for (i = 0; i < curr->ip_num; i++) {
62423+ ip = *(curr->ips + i);
62424+ if (ip->iface != NULL) {
62425+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
62426+ p = strchr(iface, ':');
62427+ if (p != NULL)
62428+ *p = '\0';
62429+ dev = dev_get_by_name(sock_net(sk), iface);
62430+ if (dev == NULL)
62431+ continue;
62432+ idev = in_dev_get(dev);
62433+ if (idev == NULL) {
62434+ dev_put(dev);
62435+ continue;
62436+ }
62437+ rcu_read_lock();
62438+ for_ifa(idev) {
62439+ if (!strcmp(ip->iface, ifa->ifa_label)) {
62440+ our_addr = ifa->ifa_address;
62441+ our_netmask = 0xffffffff;
62442+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
62443+ if (ret == 1) {
62444+ rcu_read_unlock();
62445+ in_dev_put(idev);
62446+ dev_put(dev);
62447+ return 0;
62448+ } else if (ret == 2) {
62449+ rcu_read_unlock();
62450+ in_dev_put(idev);
62451+ dev_put(dev);
62452+ goto denied;
62453+ }
62454+ }
62455+ } endfor_ifa(idev);
62456+ rcu_read_unlock();
62457+ in_dev_put(idev);
62458+ dev_put(dev);
62459+ } else {
62460+ our_addr = ip->addr;
62461+ our_netmask = ip->netmask;
62462+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
62463+ if (ret == 1)
62464+ return 0;
62465+ else if (ret == 2)
62466+ goto denied;
62467+ }
62468+ }
62469+
62470+denied:
62471+ if (mode == GR_BIND)
62472+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
62473+ else if (mode == GR_CONNECT)
62474+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
62475+
62476+ return -EACCES;
62477+}
62478+
62479+int
62480+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
62481+{
62482+ /* always allow disconnection of dgram sockets with connect */
62483+ if (addr->sin_family == AF_UNSPEC)
62484+ return 0;
62485+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
62486+}
62487+
62488+int
62489+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
62490+{
62491+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
62492+}
62493+
62494+int gr_search_listen(struct socket *sock)
62495+{
62496+ struct sock *sk = sock->sk;
62497+ struct sockaddr_in addr;
62498+
62499+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
62500+ addr.sin_port = inet_sk(sk)->inet_sport;
62501+
62502+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
62503+}
62504+
62505+int gr_search_accept(struct socket *sock)
62506+{
62507+ struct sock *sk = sock->sk;
62508+ struct sockaddr_in addr;
62509+
62510+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
62511+ addr.sin_port = inet_sk(sk)->inet_sport;
62512+
62513+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
62514+}
62515+
62516+int
62517+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
62518+{
62519+ if (addr)
62520+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
62521+ else {
62522+ struct sockaddr_in sin;
62523+ const struct inet_sock *inet = inet_sk(sk);
62524+
62525+ sin.sin_addr.s_addr = inet->inet_daddr;
62526+ sin.sin_port = inet->inet_dport;
62527+
62528+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
62529+ }
62530+}
62531+
62532+int
62533+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
62534+{
62535+ struct sockaddr_in sin;
62536+
62537+ if (unlikely(skb->len < sizeof (struct udphdr)))
62538+ return 0; // skip this packet
62539+
62540+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
62541+ sin.sin_port = udp_hdr(skb)->source;
62542+
62543+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
62544+}
62545diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
62546new file mode 100644
62547index 0000000..25f54ef
62548--- /dev/null
62549+++ b/grsecurity/gracl_learn.c
62550@@ -0,0 +1,207 @@
62551+#include <linux/kernel.h>
62552+#include <linux/mm.h>
62553+#include <linux/sched.h>
62554+#include <linux/poll.h>
62555+#include <linux/string.h>
62556+#include <linux/file.h>
62557+#include <linux/types.h>
62558+#include <linux/vmalloc.h>
62559+#include <linux/grinternal.h>
62560+
62561+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
62562+ size_t count, loff_t *ppos);
62563+extern int gr_acl_is_enabled(void);
62564+
62565+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
62566+static int gr_learn_attached;
62567+
62568+/* use a 512k buffer */
62569+#define LEARN_BUFFER_SIZE (512 * 1024)
62570+
62571+static DEFINE_SPINLOCK(gr_learn_lock);
62572+static DEFINE_MUTEX(gr_learn_user_mutex);
62573+
62574+/* we need to maintain two buffers, so that the kernel context of grlearn
62575+ uses a semaphore around the userspace copying, and the other kernel contexts
62576+ use a spinlock when copying into the buffer, since they cannot sleep
62577+*/
62578+static char *learn_buffer;
62579+static char *learn_buffer_user;
62580+static int learn_buffer_len;
62581+static int learn_buffer_user_len;
62582+
62583+static ssize_t
62584+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
62585+{
62586+ DECLARE_WAITQUEUE(wait, current);
62587+ ssize_t retval = 0;
62588+
62589+ add_wait_queue(&learn_wait, &wait);
62590+ set_current_state(TASK_INTERRUPTIBLE);
62591+ do {
62592+ mutex_lock(&gr_learn_user_mutex);
62593+ spin_lock(&gr_learn_lock);
62594+ if (learn_buffer_len)
62595+ break;
62596+ spin_unlock(&gr_learn_lock);
62597+ mutex_unlock(&gr_learn_user_mutex);
62598+ if (file->f_flags & O_NONBLOCK) {
62599+ retval = -EAGAIN;
62600+ goto out;
62601+ }
62602+ if (signal_pending(current)) {
62603+ retval = -ERESTARTSYS;
62604+ goto out;
62605+ }
62606+
62607+ schedule();
62608+ } while (1);
62609+
62610+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
62611+ learn_buffer_user_len = learn_buffer_len;
62612+ retval = learn_buffer_len;
62613+ learn_buffer_len = 0;
62614+
62615+ spin_unlock(&gr_learn_lock);
62616+
62617+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
62618+ retval = -EFAULT;
62619+
62620+ mutex_unlock(&gr_learn_user_mutex);
62621+out:
62622+ set_current_state(TASK_RUNNING);
62623+ remove_wait_queue(&learn_wait, &wait);
62624+ return retval;
62625+}
62626+
62627+static unsigned int
62628+poll_learn(struct file * file, poll_table * wait)
62629+{
62630+ poll_wait(file, &learn_wait, wait);
62631+
62632+ if (learn_buffer_len)
62633+ return (POLLIN | POLLRDNORM);
62634+
62635+ return 0;
62636+}
62637+
62638+void
62639+gr_clear_learn_entries(void)
62640+{
62641+ char *tmp;
62642+
62643+ mutex_lock(&gr_learn_user_mutex);
62644+ spin_lock(&gr_learn_lock);
62645+ tmp = learn_buffer;
62646+ learn_buffer = NULL;
62647+ spin_unlock(&gr_learn_lock);
62648+ if (tmp)
62649+ vfree(tmp);
62650+ if (learn_buffer_user != NULL) {
62651+ vfree(learn_buffer_user);
62652+ learn_buffer_user = NULL;
62653+ }
62654+ learn_buffer_len = 0;
62655+ mutex_unlock(&gr_learn_user_mutex);
62656+
62657+ return;
62658+}
62659+
62660+void
62661+gr_add_learn_entry(const char *fmt, ...)
62662+{
62663+ va_list args;
62664+ unsigned int len;
62665+
62666+ if (!gr_learn_attached)
62667+ return;
62668+
62669+ spin_lock(&gr_learn_lock);
62670+
62671+ /* leave a gap at the end so we know when it's "full" but don't have to
62672+ compute the exact length of the string we're trying to append
62673+ */
62674+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
62675+ spin_unlock(&gr_learn_lock);
62676+ wake_up_interruptible(&learn_wait);
62677+ return;
62678+ }
62679+ if (learn_buffer == NULL) {
62680+ spin_unlock(&gr_learn_lock);
62681+ return;
62682+ }
62683+
62684+ va_start(args, fmt);
62685+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
62686+ va_end(args);
62687+
62688+ learn_buffer_len += len + 1;
62689+
62690+ spin_unlock(&gr_learn_lock);
62691+ wake_up_interruptible(&learn_wait);
62692+
62693+ return;
62694+}
62695+
62696+static int
62697+open_learn(struct inode *inode, struct file *file)
62698+{
62699+ if (file->f_mode & FMODE_READ && gr_learn_attached)
62700+ return -EBUSY;
62701+ if (file->f_mode & FMODE_READ) {
62702+ int retval = 0;
62703+ mutex_lock(&gr_learn_user_mutex);
62704+ if (learn_buffer == NULL)
62705+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
62706+ if (learn_buffer_user == NULL)
62707+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
62708+ if (learn_buffer == NULL) {
62709+ retval = -ENOMEM;
62710+ goto out_error;
62711+ }
62712+ if (learn_buffer_user == NULL) {
62713+ retval = -ENOMEM;
62714+ goto out_error;
62715+ }
62716+ learn_buffer_len = 0;
62717+ learn_buffer_user_len = 0;
62718+ gr_learn_attached = 1;
62719+out_error:
62720+ mutex_unlock(&gr_learn_user_mutex);
62721+ return retval;
62722+ }
62723+ return 0;
62724+}
62725+
62726+static int
62727+close_learn(struct inode *inode, struct file *file)
62728+{
62729+ if (file->f_mode & FMODE_READ) {
62730+ char *tmp = NULL;
62731+ mutex_lock(&gr_learn_user_mutex);
62732+ spin_lock(&gr_learn_lock);
62733+ tmp = learn_buffer;
62734+ learn_buffer = NULL;
62735+ spin_unlock(&gr_learn_lock);
62736+ if (tmp)
62737+ vfree(tmp);
62738+ if (learn_buffer_user != NULL) {
62739+ vfree(learn_buffer_user);
62740+ learn_buffer_user = NULL;
62741+ }
62742+ learn_buffer_len = 0;
62743+ learn_buffer_user_len = 0;
62744+ gr_learn_attached = 0;
62745+ mutex_unlock(&gr_learn_user_mutex);
62746+ }
62747+
62748+ return 0;
62749+}
62750+
62751+const struct file_operations grsec_fops = {
62752+ .read = read_learn,
62753+ .write = write_grsec_handler,
62754+ .open = open_learn,
62755+ .release = close_learn,
62756+ .poll = poll_learn,
62757+};
62758diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
62759new file mode 100644
62760index 0000000..39645c9
62761--- /dev/null
62762+++ b/grsecurity/gracl_res.c
62763@@ -0,0 +1,68 @@
62764+#include <linux/kernel.h>
62765+#include <linux/sched.h>
62766+#include <linux/gracl.h>
62767+#include <linux/grinternal.h>
62768+
62769+static const char *restab_log[] = {
62770+ [RLIMIT_CPU] = "RLIMIT_CPU",
62771+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
62772+ [RLIMIT_DATA] = "RLIMIT_DATA",
62773+ [RLIMIT_STACK] = "RLIMIT_STACK",
62774+ [RLIMIT_CORE] = "RLIMIT_CORE",
62775+ [RLIMIT_RSS] = "RLIMIT_RSS",
62776+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
62777+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
62778+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
62779+ [RLIMIT_AS] = "RLIMIT_AS",
62780+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
62781+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
62782+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
62783+ [RLIMIT_NICE] = "RLIMIT_NICE",
62784+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
62785+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
62786+ [GR_CRASH_RES] = "RLIMIT_CRASH"
62787+};
62788+
62789+void
62790+gr_log_resource(const struct task_struct *task,
62791+ const int res, const unsigned long wanted, const int gt)
62792+{
62793+ const struct cred *cred;
62794+ unsigned long rlim;
62795+
62796+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
62797+ return;
62798+
62799+ // not yet supported resource
62800+ if (unlikely(!restab_log[res]))
62801+ return;
62802+
62803+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
62804+ rlim = task_rlimit_max(task, res);
62805+ else
62806+ rlim = task_rlimit(task, res);
62807+
62808+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
62809+ return;
62810+
62811+ rcu_read_lock();
62812+ cred = __task_cred(task);
62813+
62814+ if (res == RLIMIT_NPROC &&
62815+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
62816+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
62817+ goto out_rcu_unlock;
62818+ else if (res == RLIMIT_MEMLOCK &&
62819+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
62820+ goto out_rcu_unlock;
62821+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
62822+ goto out_rcu_unlock;
62823+ rcu_read_unlock();
62824+
62825+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
62826+
62827+ return;
62828+out_rcu_unlock:
62829+ rcu_read_unlock();
62830+ return;
62831+}
62832diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
62833new file mode 100644
62834index 0000000..8c8fc9d
62835--- /dev/null
62836+++ b/grsecurity/gracl_segv.c
62837@@ -0,0 +1,303 @@
62838+#include <linux/kernel.h>
62839+#include <linux/mm.h>
62840+#include <asm/uaccess.h>
62841+#include <asm/errno.h>
62842+#include <asm/mman.h>
62843+#include <net/sock.h>
62844+#include <linux/file.h>
62845+#include <linux/fs.h>
62846+#include <linux/net.h>
62847+#include <linux/in.h>
62848+#include <linux/slab.h>
62849+#include <linux/types.h>
62850+#include <linux/sched.h>
62851+#include <linux/timer.h>
62852+#include <linux/gracl.h>
62853+#include <linux/grsecurity.h>
62854+#include <linux/grinternal.h>
62855+
62856+static struct crash_uid *uid_set;
62857+static unsigned short uid_used;
62858+static DEFINE_SPINLOCK(gr_uid_lock);
62859+extern rwlock_t gr_inode_lock;
62860+extern struct acl_subject_label *
62861+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
62862+ struct acl_role_label *role);
62863+
62864+#ifdef CONFIG_BTRFS_FS
62865+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
62866+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
62867+#endif
62868+
62869+static inline dev_t __get_dev(const struct dentry *dentry)
62870+{
62871+#ifdef CONFIG_BTRFS_FS
62872+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
62873+ return get_btrfs_dev_from_inode(dentry->d_inode);
62874+ else
62875+#endif
62876+ return dentry->d_inode->i_sb->s_dev;
62877+}
62878+
62879+int
62880+gr_init_uidset(void)
62881+{
62882+ uid_set =
62883+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
62884+ uid_used = 0;
62885+
62886+ return uid_set ? 1 : 0;
62887+}
62888+
62889+void
62890+gr_free_uidset(void)
62891+{
62892+ if (uid_set)
62893+ kfree(uid_set);
62894+
62895+ return;
62896+}
62897+
62898+int
62899+gr_find_uid(const uid_t uid)
62900+{
62901+ struct crash_uid *tmp = uid_set;
62902+ uid_t buid;
62903+ int low = 0, high = uid_used - 1, mid;
62904+
62905+ while (high >= low) {
62906+ mid = (low + high) >> 1;
62907+ buid = tmp[mid].uid;
62908+ if (buid == uid)
62909+ return mid;
62910+ if (buid > uid)
62911+ high = mid - 1;
62912+ if (buid < uid)
62913+ low = mid + 1;
62914+ }
62915+
62916+ return -1;
62917+}
62918+
62919+static __inline__ void
62920+gr_insertsort(void)
62921+{
62922+ unsigned short i, j;
62923+ struct crash_uid index;
62924+
62925+ for (i = 1; i < uid_used; i++) {
62926+ index = uid_set[i];
62927+ j = i;
62928+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
62929+ uid_set[j] = uid_set[j - 1];
62930+ j--;
62931+ }
62932+ uid_set[j] = index;
62933+ }
62934+
62935+ return;
62936+}
62937+
62938+static __inline__ void
62939+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
62940+{
62941+ int loc;
62942+ uid_t uid = GR_GLOBAL_UID(kuid);
62943+
62944+ if (uid_used == GR_UIDTABLE_MAX)
62945+ return;
62946+
62947+ loc = gr_find_uid(uid);
62948+
62949+ if (loc >= 0) {
62950+ uid_set[loc].expires = expires;
62951+ return;
62952+ }
62953+
62954+ uid_set[uid_used].uid = uid;
62955+ uid_set[uid_used].expires = expires;
62956+ uid_used++;
62957+
62958+ gr_insertsort();
62959+
62960+ return;
62961+}
62962+
62963+void
62964+gr_remove_uid(const unsigned short loc)
62965+{
62966+ unsigned short i;
62967+
62968+ for (i = loc + 1; i < uid_used; i++)
62969+ uid_set[i - 1] = uid_set[i];
62970+
62971+ uid_used--;
62972+
62973+ return;
62974+}
62975+
62976+int
62977+gr_check_crash_uid(const kuid_t kuid)
62978+{
62979+ int loc;
62980+ int ret = 0;
62981+ uid_t uid;
62982+
62983+ if (unlikely(!gr_acl_is_enabled()))
62984+ return 0;
62985+
62986+ uid = GR_GLOBAL_UID(kuid);
62987+
62988+ spin_lock(&gr_uid_lock);
62989+ loc = gr_find_uid(uid);
62990+
62991+ if (loc < 0)
62992+ goto out_unlock;
62993+
62994+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
62995+ gr_remove_uid(loc);
62996+ else
62997+ ret = 1;
62998+
62999+out_unlock:
63000+ spin_unlock(&gr_uid_lock);
63001+ return ret;
63002+}
63003+
63004+static __inline__ int
63005+proc_is_setxid(const struct cred *cred)
63006+{
63007+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
63008+ !uid_eq(cred->uid, cred->fsuid))
63009+ return 1;
63010+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
63011+ !gid_eq(cred->gid, cred->fsgid))
63012+ return 1;
63013+
63014+ return 0;
63015+}
63016+
63017+extern int gr_fake_force_sig(int sig, struct task_struct *t);
63018+
63019+void
63020+gr_handle_crash(struct task_struct *task, const int sig)
63021+{
63022+ struct acl_subject_label *curr;
63023+ struct task_struct *tsk, *tsk2;
63024+ const struct cred *cred;
63025+ const struct cred *cred2;
63026+
63027+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
63028+ return;
63029+
63030+ if (unlikely(!gr_acl_is_enabled()))
63031+ return;
63032+
63033+ curr = task->acl;
63034+
63035+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
63036+ return;
63037+
63038+ if (time_before_eq(curr->expires, get_seconds())) {
63039+ curr->expires = 0;
63040+ curr->crashes = 0;
63041+ }
63042+
63043+ curr->crashes++;
63044+
63045+ if (!curr->expires)
63046+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
63047+
63048+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63049+ time_after(curr->expires, get_seconds())) {
63050+ rcu_read_lock();
63051+ cred = __task_cred(task);
63052+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
63053+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63054+ spin_lock(&gr_uid_lock);
63055+ gr_insert_uid(cred->uid, curr->expires);
63056+ spin_unlock(&gr_uid_lock);
63057+ curr->expires = 0;
63058+ curr->crashes = 0;
63059+ read_lock(&tasklist_lock);
63060+ do_each_thread(tsk2, tsk) {
63061+ cred2 = __task_cred(tsk);
63062+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
63063+ gr_fake_force_sig(SIGKILL, tsk);
63064+ } while_each_thread(tsk2, tsk);
63065+ read_unlock(&tasklist_lock);
63066+ } else {
63067+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63068+ read_lock(&tasklist_lock);
63069+ read_lock(&grsec_exec_file_lock);
63070+ do_each_thread(tsk2, tsk) {
63071+ if (likely(tsk != task)) {
63072+ // if this thread has the same subject as the one that triggered
63073+ // RES_CRASH and it's the same binary, kill it
63074+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
63075+ gr_fake_force_sig(SIGKILL, tsk);
63076+ }
63077+ } while_each_thread(tsk2, tsk);
63078+ read_unlock(&grsec_exec_file_lock);
63079+ read_unlock(&tasklist_lock);
63080+ }
63081+ rcu_read_unlock();
63082+ }
63083+
63084+ return;
63085+}
63086+
63087+int
63088+gr_check_crash_exec(const struct file *filp)
63089+{
63090+ struct acl_subject_label *curr;
63091+
63092+ if (unlikely(!gr_acl_is_enabled()))
63093+ return 0;
63094+
63095+ read_lock(&gr_inode_lock);
63096+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
63097+ __get_dev(filp->f_path.dentry),
63098+ current->role);
63099+ read_unlock(&gr_inode_lock);
63100+
63101+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
63102+ (!curr->crashes && !curr->expires))
63103+ return 0;
63104+
63105+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63106+ time_after(curr->expires, get_seconds()))
63107+ return 1;
63108+ else if (time_before_eq(curr->expires, get_seconds())) {
63109+ curr->crashes = 0;
63110+ curr->expires = 0;
63111+ }
63112+
63113+ return 0;
63114+}
63115+
63116+void
63117+gr_handle_alertkill(struct task_struct *task)
63118+{
63119+ struct acl_subject_label *curracl;
63120+ __u32 curr_ip;
63121+ struct task_struct *p, *p2;
63122+
63123+ if (unlikely(!gr_acl_is_enabled()))
63124+ return;
63125+
63126+ curracl = task->acl;
63127+ curr_ip = task->signal->curr_ip;
63128+
63129+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
63130+ read_lock(&tasklist_lock);
63131+ do_each_thread(p2, p) {
63132+ if (p->signal->curr_ip == curr_ip)
63133+ gr_fake_force_sig(SIGKILL, p);
63134+ } while_each_thread(p2, p);
63135+ read_unlock(&tasklist_lock);
63136+ } else if (curracl->mode & GR_KILLPROC)
63137+ gr_fake_force_sig(SIGKILL, task);
63138+
63139+ return;
63140+}
63141diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
63142new file mode 100644
63143index 0000000..98011b0
63144--- /dev/null
63145+++ b/grsecurity/gracl_shm.c
63146@@ -0,0 +1,40 @@
63147+#include <linux/kernel.h>
63148+#include <linux/mm.h>
63149+#include <linux/sched.h>
63150+#include <linux/file.h>
63151+#include <linux/ipc.h>
63152+#include <linux/gracl.h>
63153+#include <linux/grsecurity.h>
63154+#include <linux/grinternal.h>
63155+
63156+int
63157+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63158+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
63159+{
63160+ struct task_struct *task;
63161+
63162+ if (!gr_acl_is_enabled())
63163+ return 1;
63164+
63165+ rcu_read_lock();
63166+ read_lock(&tasklist_lock);
63167+
63168+ task = find_task_by_vpid(shm_cprid);
63169+
63170+ if (unlikely(!task))
63171+ task = find_task_by_vpid(shm_lapid);
63172+
63173+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
63174+ (task_pid_nr(task) == shm_lapid)) &&
63175+ (task->acl->mode & GR_PROTSHM) &&
63176+ (task->acl != current->acl))) {
63177+ read_unlock(&tasklist_lock);
63178+ rcu_read_unlock();
63179+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
63180+ return 0;
63181+ }
63182+ read_unlock(&tasklist_lock);
63183+ rcu_read_unlock();
63184+
63185+ return 1;
63186+}
63187diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
63188new file mode 100644
63189index 0000000..bc0be01
63190--- /dev/null
63191+++ b/grsecurity/grsec_chdir.c
63192@@ -0,0 +1,19 @@
63193+#include <linux/kernel.h>
63194+#include <linux/sched.h>
63195+#include <linux/fs.h>
63196+#include <linux/file.h>
63197+#include <linux/grsecurity.h>
63198+#include <linux/grinternal.h>
63199+
63200+void
63201+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
63202+{
63203+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63204+ if ((grsec_enable_chdir && grsec_enable_group &&
63205+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
63206+ !grsec_enable_group)) {
63207+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
63208+ }
63209+#endif
63210+ return;
63211+}
63212diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
63213new file mode 100644
63214index 0000000..6d2de57
63215--- /dev/null
63216+++ b/grsecurity/grsec_chroot.c
63217@@ -0,0 +1,357 @@
63218+#include <linux/kernel.h>
63219+#include <linux/module.h>
63220+#include <linux/sched.h>
63221+#include <linux/file.h>
63222+#include <linux/fs.h>
63223+#include <linux/mount.h>
63224+#include <linux/types.h>
63225+#include "../fs/mount.h"
63226+#include <linux/grsecurity.h>
63227+#include <linux/grinternal.h>
63228+
63229+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
63230+{
63231+#ifdef CONFIG_GRKERNSEC
63232+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
63233+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
63234+ task->gr_is_chrooted = 1;
63235+ else
63236+ task->gr_is_chrooted = 0;
63237+
63238+ task->gr_chroot_dentry = path->dentry;
63239+#endif
63240+ return;
63241+}
63242+
63243+void gr_clear_chroot_entries(struct task_struct *task)
63244+{
63245+#ifdef CONFIG_GRKERNSEC
63246+ task->gr_is_chrooted = 0;
63247+ task->gr_chroot_dentry = NULL;
63248+#endif
63249+ return;
63250+}
63251+
63252+int
63253+gr_handle_chroot_unix(const pid_t pid)
63254+{
63255+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63256+ struct task_struct *p;
63257+
63258+ if (unlikely(!grsec_enable_chroot_unix))
63259+ return 1;
63260+
63261+ if (likely(!proc_is_chrooted(current)))
63262+ return 1;
63263+
63264+ rcu_read_lock();
63265+ read_lock(&tasklist_lock);
63266+ p = find_task_by_vpid_unrestricted(pid);
63267+ if (unlikely(p && !have_same_root(current, p))) {
63268+ read_unlock(&tasklist_lock);
63269+ rcu_read_unlock();
63270+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
63271+ return 0;
63272+ }
63273+ read_unlock(&tasklist_lock);
63274+ rcu_read_unlock();
63275+#endif
63276+ return 1;
63277+}
63278+
63279+int
63280+gr_handle_chroot_nice(void)
63281+{
63282+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63283+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
63284+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
63285+ return -EPERM;
63286+ }
63287+#endif
63288+ return 0;
63289+}
63290+
63291+int
63292+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
63293+{
63294+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63295+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
63296+ && proc_is_chrooted(current)) {
63297+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
63298+ return -EACCES;
63299+ }
63300+#endif
63301+ return 0;
63302+}
63303+
63304+int
63305+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
63306+{
63307+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63308+ struct task_struct *p;
63309+ int ret = 0;
63310+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
63311+ return ret;
63312+
63313+ read_lock(&tasklist_lock);
63314+ do_each_pid_task(pid, type, p) {
63315+ if (!have_same_root(current, p)) {
63316+ ret = 1;
63317+ goto out;
63318+ }
63319+ } while_each_pid_task(pid, type, p);
63320+out:
63321+ read_unlock(&tasklist_lock);
63322+ return ret;
63323+#endif
63324+ return 0;
63325+}
63326+
63327+int
63328+gr_pid_is_chrooted(struct task_struct *p)
63329+{
63330+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63331+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
63332+ return 0;
63333+
63334+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
63335+ !have_same_root(current, p)) {
63336+ return 1;
63337+ }
63338+#endif
63339+ return 0;
63340+}
63341+
63342+EXPORT_SYMBOL(gr_pid_is_chrooted);
63343+
63344+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
63345+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
63346+{
63347+ struct path path, currentroot;
63348+ int ret = 0;
63349+
63350+ path.dentry = (struct dentry *)u_dentry;
63351+ path.mnt = (struct vfsmount *)u_mnt;
63352+ get_fs_root(current->fs, &currentroot);
63353+ if (path_is_under(&path, &currentroot))
63354+ ret = 1;
63355+ path_put(&currentroot);
63356+
63357+ return ret;
63358+}
63359+#endif
63360+
63361+int
63362+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
63363+{
63364+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63365+ if (!grsec_enable_chroot_fchdir)
63366+ return 1;
63367+
63368+ if (!proc_is_chrooted(current))
63369+ return 1;
63370+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
63371+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
63372+ return 0;
63373+ }
63374+#endif
63375+ return 1;
63376+}
63377+
63378+int
63379+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63380+ const time_t shm_createtime)
63381+{
63382+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63383+ struct task_struct *p;
63384+ time_t starttime;
63385+
63386+ if (unlikely(!grsec_enable_chroot_shmat))
63387+ return 1;
63388+
63389+ if (likely(!proc_is_chrooted(current)))
63390+ return 1;
63391+
63392+ rcu_read_lock();
63393+ read_lock(&tasklist_lock);
63394+
63395+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
63396+ starttime = p->start_time.tv_sec;
63397+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
63398+ if (have_same_root(current, p)) {
63399+ goto allow;
63400+ } else {
63401+ read_unlock(&tasklist_lock);
63402+ rcu_read_unlock();
63403+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
63404+ return 0;
63405+ }
63406+ }
63407+ /* creator exited, pid reuse, fall through to next check */
63408+ }
63409+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
63410+ if (unlikely(!have_same_root(current, p))) {
63411+ read_unlock(&tasklist_lock);
63412+ rcu_read_unlock();
63413+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
63414+ return 0;
63415+ }
63416+ }
63417+
63418+allow:
63419+ read_unlock(&tasklist_lock);
63420+ rcu_read_unlock();
63421+#endif
63422+ return 1;
63423+}
63424+
63425+void
63426+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
63427+{
63428+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63429+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
63430+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
63431+#endif
63432+ return;
63433+}
63434+
63435+int
63436+gr_handle_chroot_mknod(const struct dentry *dentry,
63437+ const struct vfsmount *mnt, const int mode)
63438+{
63439+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63440+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
63441+ proc_is_chrooted(current)) {
63442+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
63443+ return -EPERM;
63444+ }
63445+#endif
63446+ return 0;
63447+}
63448+
63449+int
63450+gr_handle_chroot_mount(const struct dentry *dentry,
63451+ const struct vfsmount *mnt, const char *dev_name)
63452+{
63453+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63454+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
63455+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
63456+ return -EPERM;
63457+ }
63458+#endif
63459+ return 0;
63460+}
63461+
63462+int
63463+gr_handle_chroot_pivot(void)
63464+{
63465+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63466+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
63467+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
63468+ return -EPERM;
63469+ }
63470+#endif
63471+ return 0;
63472+}
63473+
63474+int
63475+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
63476+{
63477+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63478+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
63479+ !gr_is_outside_chroot(dentry, mnt)) {
63480+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
63481+ return -EPERM;
63482+ }
63483+#endif
63484+ return 0;
63485+}
63486+
63487+extern const char *captab_log[];
63488+extern int captab_log_entries;
63489+
63490+int
63491+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
63492+{
63493+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63494+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
63495+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
63496+ if (cap_raised(chroot_caps, cap)) {
63497+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
63498+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
63499+ }
63500+ return 0;
63501+ }
63502+ }
63503+#endif
63504+ return 1;
63505+}
63506+
63507+int
63508+gr_chroot_is_capable(const int cap)
63509+{
63510+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63511+ return gr_task_chroot_is_capable(current, current_cred(), cap);
63512+#endif
63513+ return 1;
63514+}
63515+
63516+int
63517+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
63518+{
63519+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63520+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
63521+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
63522+ if (cap_raised(chroot_caps, cap)) {
63523+ return 0;
63524+ }
63525+ }
63526+#endif
63527+ return 1;
63528+}
63529+
63530+int
63531+gr_chroot_is_capable_nolog(const int cap)
63532+{
63533+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63534+ return gr_task_chroot_is_capable_nolog(current, cap);
63535+#endif
63536+ return 1;
63537+}
63538+
63539+int
63540+gr_handle_chroot_sysctl(const int op)
63541+{
63542+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63543+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
63544+ proc_is_chrooted(current))
63545+ return -EACCES;
63546+#endif
63547+ return 0;
63548+}
63549+
63550+void
63551+gr_handle_chroot_chdir(struct path *path)
63552+{
63553+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63554+ if (grsec_enable_chroot_chdir)
63555+ set_fs_pwd(current->fs, path);
63556+#endif
63557+ return;
63558+}
63559+
63560+int
63561+gr_handle_chroot_chmod(const struct dentry *dentry,
63562+ const struct vfsmount *mnt, const int mode)
63563+{
63564+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63565+ /* allow chmod +s on directories, but not files */
63566+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
63567+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
63568+ proc_is_chrooted(current)) {
63569+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
63570+ return -EPERM;
63571+ }
63572+#endif
63573+ return 0;
63574+}
63575diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
63576new file mode 100644
63577index 0000000..207d409
63578--- /dev/null
63579+++ b/grsecurity/grsec_disabled.c
63580@@ -0,0 +1,434 @@
63581+#include <linux/kernel.h>
63582+#include <linux/module.h>
63583+#include <linux/sched.h>
63584+#include <linux/file.h>
63585+#include <linux/fs.h>
63586+#include <linux/kdev_t.h>
63587+#include <linux/net.h>
63588+#include <linux/in.h>
63589+#include <linux/ip.h>
63590+#include <linux/skbuff.h>
63591+#include <linux/sysctl.h>
63592+
63593+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63594+void
63595+pax_set_initial_flags(struct linux_binprm *bprm)
63596+{
63597+ return;
63598+}
63599+#endif
63600+
63601+#ifdef CONFIG_SYSCTL
63602+__u32
63603+gr_handle_sysctl(const struct ctl_table * table, const int op)
63604+{
63605+ return 0;
63606+}
63607+#endif
63608+
63609+#ifdef CONFIG_TASKSTATS
63610+int gr_is_taskstats_denied(int pid)
63611+{
63612+ return 0;
63613+}
63614+#endif
63615+
63616+int
63617+gr_acl_is_enabled(void)
63618+{
63619+ return 0;
63620+}
63621+
63622+void
63623+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
63624+{
63625+ return;
63626+}
63627+
63628+int
63629+gr_handle_rawio(const struct inode *inode)
63630+{
63631+ return 0;
63632+}
63633+
63634+void
63635+gr_acl_handle_psacct(struct task_struct *task, const long code)
63636+{
63637+ return;
63638+}
63639+
63640+int
63641+gr_handle_ptrace(struct task_struct *task, const long request)
63642+{
63643+ return 0;
63644+}
63645+
63646+int
63647+gr_handle_proc_ptrace(struct task_struct *task)
63648+{
63649+ return 0;
63650+}
63651+
63652+int
63653+gr_set_acls(const int type)
63654+{
63655+ return 0;
63656+}
63657+
63658+int
63659+gr_check_hidden_task(const struct task_struct *tsk)
63660+{
63661+ return 0;
63662+}
63663+
63664+int
63665+gr_check_protected_task(const struct task_struct *task)
63666+{
63667+ return 0;
63668+}
63669+
63670+int
63671+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
63672+{
63673+ return 0;
63674+}
63675+
63676+void
63677+gr_copy_label(struct task_struct *tsk)
63678+{
63679+ return;
63680+}
63681+
63682+void
63683+gr_set_pax_flags(struct task_struct *task)
63684+{
63685+ return;
63686+}
63687+
63688+int
63689+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
63690+ const int unsafe_share)
63691+{
63692+ return 0;
63693+}
63694+
63695+void
63696+gr_handle_delete(const ino_t ino, const dev_t dev)
63697+{
63698+ return;
63699+}
63700+
63701+void
63702+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
63703+{
63704+ return;
63705+}
63706+
63707+void
63708+gr_handle_crash(struct task_struct *task, const int sig)
63709+{
63710+ return;
63711+}
63712+
63713+int
63714+gr_check_crash_exec(const struct file *filp)
63715+{
63716+ return 0;
63717+}
63718+
63719+int
63720+gr_check_crash_uid(const kuid_t uid)
63721+{
63722+ return 0;
63723+}
63724+
63725+void
63726+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
63727+ struct dentry *old_dentry,
63728+ struct dentry *new_dentry,
63729+ struct vfsmount *mnt, const __u8 replace)
63730+{
63731+ return;
63732+}
63733+
63734+int
63735+gr_search_socket(const int family, const int type, const int protocol)
63736+{
63737+ return 1;
63738+}
63739+
63740+int
63741+gr_search_connectbind(const int mode, const struct socket *sock,
63742+ const struct sockaddr_in *addr)
63743+{
63744+ return 0;
63745+}
63746+
63747+void
63748+gr_handle_alertkill(struct task_struct *task)
63749+{
63750+ return;
63751+}
63752+
63753+__u32
63754+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
63755+{
63756+ return 1;
63757+}
63758+
63759+__u32
63760+gr_acl_handle_hidden_file(const struct dentry * dentry,
63761+ const struct vfsmount * mnt)
63762+{
63763+ return 1;
63764+}
63765+
63766+__u32
63767+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
63768+ int acc_mode)
63769+{
63770+ return 1;
63771+}
63772+
63773+__u32
63774+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
63775+{
63776+ return 1;
63777+}
63778+
63779+__u32
63780+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
63781+{
63782+ return 1;
63783+}
63784+
63785+int
63786+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
63787+ unsigned int *vm_flags)
63788+{
63789+ return 1;
63790+}
63791+
63792+__u32
63793+gr_acl_handle_truncate(const struct dentry * dentry,
63794+ const struct vfsmount * mnt)
63795+{
63796+ return 1;
63797+}
63798+
63799+__u32
63800+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
63801+{
63802+ return 1;
63803+}
63804+
63805+__u32
63806+gr_acl_handle_access(const struct dentry * dentry,
63807+ const struct vfsmount * mnt, const int fmode)
63808+{
63809+ return 1;
63810+}
63811+
63812+__u32
63813+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
63814+ umode_t *mode)
63815+{
63816+ return 1;
63817+}
63818+
63819+__u32
63820+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
63821+{
63822+ return 1;
63823+}
63824+
63825+__u32
63826+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
63827+{
63828+ return 1;
63829+}
63830+
63831+void
63832+grsecurity_init(void)
63833+{
63834+ return;
63835+}
63836+
63837+umode_t gr_acl_umask(void)
63838+{
63839+ return 0;
63840+}
63841+
63842+__u32
63843+gr_acl_handle_mknod(const struct dentry * new_dentry,
63844+ const struct dentry * parent_dentry,
63845+ const struct vfsmount * parent_mnt,
63846+ const int mode)
63847+{
63848+ return 1;
63849+}
63850+
63851+__u32
63852+gr_acl_handle_mkdir(const struct dentry * new_dentry,
63853+ const struct dentry * parent_dentry,
63854+ const struct vfsmount * parent_mnt)
63855+{
63856+ return 1;
63857+}
63858+
63859+__u32
63860+gr_acl_handle_symlink(const struct dentry * new_dentry,
63861+ const struct dentry * parent_dentry,
63862+ const struct vfsmount * parent_mnt, const struct filename *from)
63863+{
63864+ return 1;
63865+}
63866+
63867+__u32
63868+gr_acl_handle_link(const struct dentry * new_dentry,
63869+ const struct dentry * parent_dentry,
63870+ const struct vfsmount * parent_mnt,
63871+ const struct dentry * old_dentry,
63872+ const struct vfsmount * old_mnt, const struct filename *to)
63873+{
63874+ return 1;
63875+}
63876+
63877+int
63878+gr_acl_handle_rename(const struct dentry *new_dentry,
63879+ const struct dentry *parent_dentry,
63880+ const struct vfsmount *parent_mnt,
63881+ const struct dentry *old_dentry,
63882+ const struct inode *old_parent_inode,
63883+ const struct vfsmount *old_mnt, const struct filename *newname)
63884+{
63885+ return 0;
63886+}
63887+
63888+int
63889+gr_acl_handle_filldir(const struct file *file, const char *name,
63890+ const int namelen, const ino_t ino)
63891+{
63892+ return 1;
63893+}
63894+
63895+int
63896+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63897+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
63898+{
63899+ return 1;
63900+}
63901+
63902+int
63903+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
63904+{
63905+ return 0;
63906+}
63907+
63908+int
63909+gr_search_accept(const struct socket *sock)
63910+{
63911+ return 0;
63912+}
63913+
63914+int
63915+gr_search_listen(const struct socket *sock)
63916+{
63917+ return 0;
63918+}
63919+
63920+int
63921+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
63922+{
63923+ return 0;
63924+}
63925+
63926+__u32
63927+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
63928+{
63929+ return 1;
63930+}
63931+
63932+__u32
63933+gr_acl_handle_creat(const struct dentry * dentry,
63934+ const struct dentry * p_dentry,
63935+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
63936+ const int imode)
63937+{
63938+ return 1;
63939+}
63940+
63941+void
63942+gr_acl_handle_exit(void)
63943+{
63944+ return;
63945+}
63946+
63947+int
63948+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
63949+{
63950+ return 1;
63951+}
63952+
63953+void
63954+gr_set_role_label(const kuid_t uid, const kgid_t gid)
63955+{
63956+ return;
63957+}
63958+
63959+int
63960+gr_acl_handle_procpidmem(const struct task_struct *task)
63961+{
63962+ return 0;
63963+}
63964+
63965+int
63966+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
63967+{
63968+ return 0;
63969+}
63970+
63971+int
63972+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
63973+{
63974+ return 0;
63975+}
63976+
63977+void
63978+gr_set_kernel_label(struct task_struct *task)
63979+{
63980+ return;
63981+}
63982+
63983+int
63984+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
63985+{
63986+ return 0;
63987+}
63988+
63989+int
63990+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
63991+{
63992+ return 0;
63993+}
63994+
63995+int gr_acl_enable_at_secure(void)
63996+{
63997+ return 0;
63998+}
63999+
64000+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
64001+{
64002+ return dentry->d_inode->i_sb->s_dev;
64003+}
64004+
64005+void gr_put_exec_file(struct task_struct *task)
64006+{
64007+ return;
64008+}
64009+
64010+EXPORT_SYMBOL(gr_set_kernel_label);
64011+#ifdef CONFIG_SECURITY
64012+EXPORT_SYMBOL(gr_check_user_change);
64013+EXPORT_SYMBOL(gr_check_group_change);
64014+#endif
64015diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
64016new file mode 100644
64017index 0000000..387032b
64018--- /dev/null
64019+++ b/grsecurity/grsec_exec.c
64020@@ -0,0 +1,187 @@
64021+#include <linux/kernel.h>
64022+#include <linux/sched.h>
64023+#include <linux/file.h>
64024+#include <linux/binfmts.h>
64025+#include <linux/fs.h>
64026+#include <linux/types.h>
64027+#include <linux/grdefs.h>
64028+#include <linux/grsecurity.h>
64029+#include <linux/grinternal.h>
64030+#include <linux/capability.h>
64031+#include <linux/module.h>
64032+#include <linux/compat.h>
64033+
64034+#include <asm/uaccess.h>
64035+
64036+#ifdef CONFIG_GRKERNSEC_EXECLOG
64037+static char gr_exec_arg_buf[132];
64038+static DEFINE_MUTEX(gr_exec_arg_mutex);
64039+#endif
64040+
64041+struct user_arg_ptr {
64042+#ifdef CONFIG_COMPAT
64043+ bool is_compat;
64044+#endif
64045+ union {
64046+ const char __user *const __user *native;
64047+#ifdef CONFIG_COMPAT
64048+ const compat_uptr_t __user *compat;
64049+#endif
64050+ } ptr;
64051+};
64052+
64053+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
64054+
64055+void
64056+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
64057+{
64058+#ifdef CONFIG_GRKERNSEC_EXECLOG
64059+ char *grarg = gr_exec_arg_buf;
64060+ unsigned int i, x, execlen = 0;
64061+ char c;
64062+
64063+ if (!((grsec_enable_execlog && grsec_enable_group &&
64064+ in_group_p(grsec_audit_gid))
64065+ || (grsec_enable_execlog && !grsec_enable_group)))
64066+ return;
64067+
64068+ mutex_lock(&gr_exec_arg_mutex);
64069+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
64070+
64071+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
64072+ const char __user *p;
64073+ unsigned int len;
64074+
64075+ p = get_user_arg_ptr(argv, i);
64076+ if (IS_ERR(p))
64077+ goto log;
64078+
64079+ len = strnlen_user(p, 128 - execlen);
64080+ if (len > 128 - execlen)
64081+ len = 128 - execlen;
64082+ else if (len > 0)
64083+ len--;
64084+ if (copy_from_user(grarg + execlen, p, len))
64085+ goto log;
64086+
64087+ /* rewrite unprintable characters */
64088+ for (x = 0; x < len; x++) {
64089+ c = *(grarg + execlen + x);
64090+ if (c < 32 || c > 126)
64091+ *(grarg + execlen + x) = ' ';
64092+ }
64093+
64094+ execlen += len;
64095+ *(grarg + execlen) = ' ';
64096+ *(grarg + execlen + 1) = '\0';
64097+ execlen++;
64098+ }
64099+
64100+ log:
64101+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
64102+ bprm->file->f_path.mnt, grarg);
64103+ mutex_unlock(&gr_exec_arg_mutex);
64104+#endif
64105+ return;
64106+}
64107+
64108+#ifdef CONFIG_GRKERNSEC
64109+extern int gr_acl_is_capable(const int cap);
64110+extern int gr_acl_is_capable_nolog(const int cap);
64111+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64112+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
64113+extern int gr_chroot_is_capable(const int cap);
64114+extern int gr_chroot_is_capable_nolog(const int cap);
64115+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64116+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
64117+#endif
64118+
64119+const char *captab_log[] = {
64120+ "CAP_CHOWN",
64121+ "CAP_DAC_OVERRIDE",
64122+ "CAP_DAC_READ_SEARCH",
64123+ "CAP_FOWNER",
64124+ "CAP_FSETID",
64125+ "CAP_KILL",
64126+ "CAP_SETGID",
64127+ "CAP_SETUID",
64128+ "CAP_SETPCAP",
64129+ "CAP_LINUX_IMMUTABLE",
64130+ "CAP_NET_BIND_SERVICE",
64131+ "CAP_NET_BROADCAST",
64132+ "CAP_NET_ADMIN",
64133+ "CAP_NET_RAW",
64134+ "CAP_IPC_LOCK",
64135+ "CAP_IPC_OWNER",
64136+ "CAP_SYS_MODULE",
64137+ "CAP_SYS_RAWIO",
64138+ "CAP_SYS_CHROOT",
64139+ "CAP_SYS_PTRACE",
64140+ "CAP_SYS_PACCT",
64141+ "CAP_SYS_ADMIN",
64142+ "CAP_SYS_BOOT",
64143+ "CAP_SYS_NICE",
64144+ "CAP_SYS_RESOURCE",
64145+ "CAP_SYS_TIME",
64146+ "CAP_SYS_TTY_CONFIG",
64147+ "CAP_MKNOD",
64148+ "CAP_LEASE",
64149+ "CAP_AUDIT_WRITE",
64150+ "CAP_AUDIT_CONTROL",
64151+ "CAP_SETFCAP",
64152+ "CAP_MAC_OVERRIDE",
64153+ "CAP_MAC_ADMIN",
64154+ "CAP_SYSLOG",
64155+ "CAP_WAKE_ALARM"
64156+};
64157+
64158+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
64159+
64160+int gr_is_capable(const int cap)
64161+{
64162+#ifdef CONFIG_GRKERNSEC
64163+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
64164+ return 1;
64165+ return 0;
64166+#else
64167+ return 1;
64168+#endif
64169+}
64170+
64171+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
64172+{
64173+#ifdef CONFIG_GRKERNSEC
64174+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
64175+ return 1;
64176+ return 0;
64177+#else
64178+ return 1;
64179+#endif
64180+}
64181+
64182+int gr_is_capable_nolog(const int cap)
64183+{
64184+#ifdef CONFIG_GRKERNSEC
64185+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
64186+ return 1;
64187+ return 0;
64188+#else
64189+ return 1;
64190+#endif
64191+}
64192+
64193+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
64194+{
64195+#ifdef CONFIG_GRKERNSEC
64196+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
64197+ return 1;
64198+ return 0;
64199+#else
64200+ return 1;
64201+#endif
64202+}
64203+
64204+EXPORT_SYMBOL(gr_is_capable);
64205+EXPORT_SYMBOL(gr_is_capable_nolog);
64206+EXPORT_SYMBOL(gr_task_is_capable);
64207+EXPORT_SYMBOL(gr_task_is_capable_nolog);
64208diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
64209new file mode 100644
64210index 0000000..06cc6ea
64211--- /dev/null
64212+++ b/grsecurity/grsec_fifo.c
64213@@ -0,0 +1,24 @@
64214+#include <linux/kernel.h>
64215+#include <linux/sched.h>
64216+#include <linux/fs.h>
64217+#include <linux/file.h>
64218+#include <linux/grinternal.h>
64219+
64220+int
64221+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
64222+ const struct dentry *dir, const int flag, const int acc_mode)
64223+{
64224+#ifdef CONFIG_GRKERNSEC_FIFO
64225+ const struct cred *cred = current_cred();
64226+
64227+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
64228+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
64229+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
64230+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
64231+ if (!inode_permission(dentry->d_inode, acc_mode))
64232+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
64233+ return -EACCES;
64234+ }
64235+#endif
64236+ return 0;
64237+}
64238diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
64239new file mode 100644
64240index 0000000..8ca18bf
64241--- /dev/null
64242+++ b/grsecurity/grsec_fork.c
64243@@ -0,0 +1,23 @@
64244+#include <linux/kernel.h>
64245+#include <linux/sched.h>
64246+#include <linux/grsecurity.h>
64247+#include <linux/grinternal.h>
64248+#include <linux/errno.h>
64249+
64250+void
64251+gr_log_forkfail(const int retval)
64252+{
64253+#ifdef CONFIG_GRKERNSEC_FORKFAIL
64254+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
64255+ switch (retval) {
64256+ case -EAGAIN:
64257+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
64258+ break;
64259+ case -ENOMEM:
64260+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
64261+ break;
64262+ }
64263+ }
64264+#endif
64265+ return;
64266+}
64267diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
64268new file mode 100644
64269index 0000000..a862e9f
64270--- /dev/null
64271+++ b/grsecurity/grsec_init.c
64272@@ -0,0 +1,283 @@
64273+#include <linux/kernel.h>
64274+#include <linux/sched.h>
64275+#include <linux/mm.h>
64276+#include <linux/gracl.h>
64277+#include <linux/slab.h>
64278+#include <linux/vmalloc.h>
64279+#include <linux/percpu.h>
64280+#include <linux/module.h>
64281+
64282+int grsec_enable_ptrace_readexec;
64283+int grsec_enable_setxid;
64284+int grsec_enable_symlinkown;
64285+kgid_t grsec_symlinkown_gid;
64286+int grsec_enable_brute;
64287+int grsec_enable_link;
64288+int grsec_enable_dmesg;
64289+int grsec_enable_harden_ptrace;
64290+int grsec_enable_fifo;
64291+int grsec_enable_execlog;
64292+int grsec_enable_signal;
64293+int grsec_enable_forkfail;
64294+int grsec_enable_audit_ptrace;
64295+int grsec_enable_time;
64296+int grsec_enable_audit_textrel;
64297+int grsec_enable_group;
64298+kgid_t grsec_audit_gid;
64299+int grsec_enable_chdir;
64300+int grsec_enable_mount;
64301+int grsec_enable_rofs;
64302+int grsec_enable_chroot_findtask;
64303+int grsec_enable_chroot_mount;
64304+int grsec_enable_chroot_shmat;
64305+int grsec_enable_chroot_fchdir;
64306+int grsec_enable_chroot_double;
64307+int grsec_enable_chroot_pivot;
64308+int grsec_enable_chroot_chdir;
64309+int grsec_enable_chroot_chmod;
64310+int grsec_enable_chroot_mknod;
64311+int grsec_enable_chroot_nice;
64312+int grsec_enable_chroot_execlog;
64313+int grsec_enable_chroot_caps;
64314+int grsec_enable_chroot_sysctl;
64315+int grsec_enable_chroot_unix;
64316+int grsec_enable_tpe;
64317+kgid_t grsec_tpe_gid;
64318+int grsec_enable_blackhole;
64319+#ifdef CONFIG_IPV6_MODULE
64320+EXPORT_SYMBOL(grsec_enable_blackhole);
64321+#endif
64322+int grsec_lastack_retries;
64323+int grsec_enable_tpe_all;
64324+int grsec_enable_tpe_invert;
64325+int grsec_enable_socket_all;
64326+kgid_t grsec_socket_all_gid;
64327+int grsec_enable_socket_client;
64328+kgid_t grsec_socket_client_gid;
64329+int grsec_enable_socket_server;
64330+kgid_t grsec_socket_server_gid;
64331+int grsec_resource_logging;
64332+int grsec_disable_privio;
64333+int grsec_enable_log_rwxmaps;
64334+int grsec_lock;
64335+
64336+DEFINE_SPINLOCK(grsec_alert_lock);
64337+unsigned long grsec_alert_wtime = 0;
64338+unsigned long grsec_alert_fyet = 0;
64339+
64340+DEFINE_SPINLOCK(grsec_audit_lock);
64341+
64342+DEFINE_RWLOCK(grsec_exec_file_lock);
64343+
64344+char *gr_shared_page[4];
64345+
64346+char *gr_alert_log_fmt;
64347+char *gr_audit_log_fmt;
64348+char *gr_alert_log_buf;
64349+char *gr_audit_log_buf;
64350+
64351+extern struct gr_arg *gr_usermode;
64352+extern unsigned char *gr_system_salt;
64353+extern unsigned char *gr_system_sum;
64354+
64355+void __init
64356+grsecurity_init(void)
64357+{
64358+ int j;
64359+ /* create the per-cpu shared pages */
64360+
64361+#ifdef CONFIG_X86
64362+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
64363+#endif
64364+
64365+ for (j = 0; j < 4; j++) {
64366+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
64367+ if (gr_shared_page[j] == NULL) {
64368+ panic("Unable to allocate grsecurity shared page");
64369+ return;
64370+ }
64371+ }
64372+
64373+ /* allocate log buffers */
64374+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
64375+ if (!gr_alert_log_fmt) {
64376+ panic("Unable to allocate grsecurity alert log format buffer");
64377+ return;
64378+ }
64379+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
64380+ if (!gr_audit_log_fmt) {
64381+ panic("Unable to allocate grsecurity audit log format buffer");
64382+ return;
64383+ }
64384+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
64385+ if (!gr_alert_log_buf) {
64386+ panic("Unable to allocate grsecurity alert log buffer");
64387+ return;
64388+ }
64389+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
64390+ if (!gr_audit_log_buf) {
64391+ panic("Unable to allocate grsecurity audit log buffer");
64392+ return;
64393+ }
64394+
64395+ /* allocate memory for authentication structure */
64396+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
64397+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
64398+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
64399+
64400+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
64401+ panic("Unable to allocate grsecurity authentication structure");
64402+ return;
64403+ }
64404+
64405+
64406+#ifdef CONFIG_GRKERNSEC_IO
64407+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
64408+ grsec_disable_privio = 1;
64409+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
64410+ grsec_disable_privio = 1;
64411+#else
64412+ grsec_disable_privio = 0;
64413+#endif
64414+#endif
64415+
64416+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64417+ /* for backward compatibility, tpe_invert always defaults to on if
64418+ enabled in the kernel
64419+ */
64420+ grsec_enable_tpe_invert = 1;
64421+#endif
64422+
64423+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
64424+#ifndef CONFIG_GRKERNSEC_SYSCTL
64425+ grsec_lock = 1;
64426+#endif
64427+
64428+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64429+ grsec_enable_audit_textrel = 1;
64430+#endif
64431+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64432+ grsec_enable_log_rwxmaps = 1;
64433+#endif
64434+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64435+ grsec_enable_group = 1;
64436+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
64437+#endif
64438+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64439+ grsec_enable_ptrace_readexec = 1;
64440+#endif
64441+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64442+ grsec_enable_chdir = 1;
64443+#endif
64444+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64445+ grsec_enable_harden_ptrace = 1;
64446+#endif
64447+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64448+ grsec_enable_mount = 1;
64449+#endif
64450+#ifdef CONFIG_GRKERNSEC_LINK
64451+ grsec_enable_link = 1;
64452+#endif
64453+#ifdef CONFIG_GRKERNSEC_BRUTE
64454+ grsec_enable_brute = 1;
64455+#endif
64456+#ifdef CONFIG_GRKERNSEC_DMESG
64457+ grsec_enable_dmesg = 1;
64458+#endif
64459+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64460+ grsec_enable_blackhole = 1;
64461+ grsec_lastack_retries = 4;
64462+#endif
64463+#ifdef CONFIG_GRKERNSEC_FIFO
64464+ grsec_enable_fifo = 1;
64465+#endif
64466+#ifdef CONFIG_GRKERNSEC_EXECLOG
64467+ grsec_enable_execlog = 1;
64468+#endif
64469+#ifdef CONFIG_GRKERNSEC_SETXID
64470+ grsec_enable_setxid = 1;
64471+#endif
64472+#ifdef CONFIG_GRKERNSEC_SIGNAL
64473+ grsec_enable_signal = 1;
64474+#endif
64475+#ifdef CONFIG_GRKERNSEC_FORKFAIL
64476+ grsec_enable_forkfail = 1;
64477+#endif
64478+#ifdef CONFIG_GRKERNSEC_TIME
64479+ grsec_enable_time = 1;
64480+#endif
64481+#ifdef CONFIG_GRKERNSEC_RESLOG
64482+ grsec_resource_logging = 1;
64483+#endif
64484+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64485+ grsec_enable_chroot_findtask = 1;
64486+#endif
64487+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64488+ grsec_enable_chroot_unix = 1;
64489+#endif
64490+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64491+ grsec_enable_chroot_mount = 1;
64492+#endif
64493+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64494+ grsec_enable_chroot_fchdir = 1;
64495+#endif
64496+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64497+ grsec_enable_chroot_shmat = 1;
64498+#endif
64499+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64500+ grsec_enable_audit_ptrace = 1;
64501+#endif
64502+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64503+ grsec_enable_chroot_double = 1;
64504+#endif
64505+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64506+ grsec_enable_chroot_pivot = 1;
64507+#endif
64508+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64509+ grsec_enable_chroot_chdir = 1;
64510+#endif
64511+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64512+ grsec_enable_chroot_chmod = 1;
64513+#endif
64514+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64515+ grsec_enable_chroot_mknod = 1;
64516+#endif
64517+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64518+ grsec_enable_chroot_nice = 1;
64519+#endif
64520+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64521+ grsec_enable_chroot_execlog = 1;
64522+#endif
64523+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64524+ grsec_enable_chroot_caps = 1;
64525+#endif
64526+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64527+ grsec_enable_chroot_sysctl = 1;
64528+#endif
64529+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
64530+ grsec_enable_symlinkown = 1;
64531+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
64532+#endif
64533+#ifdef CONFIG_GRKERNSEC_TPE
64534+ grsec_enable_tpe = 1;
64535+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
64536+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64537+ grsec_enable_tpe_all = 1;
64538+#endif
64539+#endif
64540+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64541+ grsec_enable_socket_all = 1;
64542+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
64543+#endif
64544+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64545+ grsec_enable_socket_client = 1;
64546+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
64547+#endif
64548+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64549+ grsec_enable_socket_server = 1;
64550+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
64551+#endif
64552+#endif
64553+
64554+ return;
64555+}
64556diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
64557new file mode 100644
64558index 0000000..5e05e20
64559--- /dev/null
64560+++ b/grsecurity/grsec_link.c
64561@@ -0,0 +1,58 @@
64562+#include <linux/kernel.h>
64563+#include <linux/sched.h>
64564+#include <linux/fs.h>
64565+#include <linux/file.h>
64566+#include <linux/grinternal.h>
64567+
64568+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
64569+{
64570+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
64571+ const struct inode *link_inode = link->dentry->d_inode;
64572+
64573+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
64574+ /* ignore root-owned links, e.g. /proc/self */
64575+ gr_is_global_nonroot(link_inode->i_uid) && target &&
64576+ !uid_eq(link_inode->i_uid, target->i_uid)) {
64577+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
64578+ return 1;
64579+ }
64580+#endif
64581+ return 0;
64582+}
64583+
64584+int
64585+gr_handle_follow_link(const struct inode *parent,
64586+ const struct inode *inode,
64587+ const struct dentry *dentry, const struct vfsmount *mnt)
64588+{
64589+#ifdef CONFIG_GRKERNSEC_LINK
64590+ const struct cred *cred = current_cred();
64591+
64592+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
64593+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
64594+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
64595+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
64596+ return -EACCES;
64597+ }
64598+#endif
64599+ return 0;
64600+}
64601+
64602+int
64603+gr_handle_hardlink(const struct dentry *dentry,
64604+ const struct vfsmount *mnt,
64605+ struct inode *inode, const int mode, const struct filename *to)
64606+{
64607+#ifdef CONFIG_GRKERNSEC_LINK
64608+ const struct cred *cred = current_cred();
64609+
64610+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
64611+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
64612+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
64613+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
64614+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
64615+ return -EPERM;
64616+ }
64617+#endif
64618+ return 0;
64619+}
64620diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
64621new file mode 100644
64622index 0000000..7c06085
64623--- /dev/null
64624+++ b/grsecurity/grsec_log.c
64625@@ -0,0 +1,326 @@
64626+#include <linux/kernel.h>
64627+#include <linux/sched.h>
64628+#include <linux/file.h>
64629+#include <linux/tty.h>
64630+#include <linux/fs.h>
64631+#include <linux/grinternal.h>
64632+
64633+#ifdef CONFIG_TREE_PREEMPT_RCU
64634+#define DISABLE_PREEMPT() preempt_disable()
64635+#define ENABLE_PREEMPT() preempt_enable()
64636+#else
64637+#define DISABLE_PREEMPT()
64638+#define ENABLE_PREEMPT()
64639+#endif
64640+
64641+#define BEGIN_LOCKS(x) \
64642+ DISABLE_PREEMPT(); \
64643+ rcu_read_lock(); \
64644+ read_lock(&tasklist_lock); \
64645+ read_lock(&grsec_exec_file_lock); \
64646+ if (x != GR_DO_AUDIT) \
64647+ spin_lock(&grsec_alert_lock); \
64648+ else \
64649+ spin_lock(&grsec_audit_lock)
64650+
64651+#define END_LOCKS(x) \
64652+ if (x != GR_DO_AUDIT) \
64653+ spin_unlock(&grsec_alert_lock); \
64654+ else \
64655+ spin_unlock(&grsec_audit_lock); \
64656+ read_unlock(&grsec_exec_file_lock); \
64657+ read_unlock(&tasklist_lock); \
64658+ rcu_read_unlock(); \
64659+ ENABLE_PREEMPT(); \
64660+ if (x == GR_DONT_AUDIT) \
64661+ gr_handle_alertkill(current)
64662+
64663+enum {
64664+ FLOODING,
64665+ NO_FLOODING
64666+};
64667+
64668+extern char *gr_alert_log_fmt;
64669+extern char *gr_audit_log_fmt;
64670+extern char *gr_alert_log_buf;
64671+extern char *gr_audit_log_buf;
64672+
64673+static int gr_log_start(int audit)
64674+{
64675+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
64676+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
64677+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64678+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
64679+ unsigned long curr_secs = get_seconds();
64680+
64681+ if (audit == GR_DO_AUDIT)
64682+ goto set_fmt;
64683+
64684+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
64685+ grsec_alert_wtime = curr_secs;
64686+ grsec_alert_fyet = 0;
64687+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
64688+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
64689+ grsec_alert_fyet++;
64690+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
64691+ grsec_alert_wtime = curr_secs;
64692+ grsec_alert_fyet++;
64693+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
64694+ return FLOODING;
64695+ }
64696+ else return FLOODING;
64697+
64698+set_fmt:
64699+#endif
64700+ memset(buf, 0, PAGE_SIZE);
64701+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
64702+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
64703+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
64704+ } else if (current->signal->curr_ip) {
64705+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
64706+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
64707+ } else if (gr_acl_is_enabled()) {
64708+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
64709+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
64710+ } else {
64711+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
64712+ strcpy(buf, fmt);
64713+ }
64714+
64715+ return NO_FLOODING;
64716+}
64717+
64718+static void gr_log_middle(int audit, const char *msg, va_list ap)
64719+ __attribute__ ((format (printf, 2, 0)));
64720+
64721+static void gr_log_middle(int audit, const char *msg, va_list ap)
64722+{
64723+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64724+ unsigned int len = strlen(buf);
64725+
64726+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64727+
64728+ return;
64729+}
64730+
64731+static void gr_log_middle_varargs(int audit, const char *msg, ...)
64732+ __attribute__ ((format (printf, 2, 3)));
64733+
64734+static void gr_log_middle_varargs(int audit, const char *msg, ...)
64735+{
64736+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64737+ unsigned int len = strlen(buf);
64738+ va_list ap;
64739+
64740+ va_start(ap, msg);
64741+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64742+ va_end(ap);
64743+
64744+ return;
64745+}
64746+
64747+static void gr_log_end(int audit, int append_default)
64748+{
64749+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64750+ if (append_default) {
64751+ struct task_struct *task = current;
64752+ struct task_struct *parent = task->real_parent;
64753+ const struct cred *cred = __task_cred(task);
64754+ const struct cred *pcred = __task_cred(parent);
64755+ unsigned int len = strlen(buf);
64756+
64757+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64758+ }
64759+
64760+ printk("%s\n", buf);
64761+
64762+ return;
64763+}
64764+
64765+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
64766+{
64767+ int logtype;
64768+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
64769+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
64770+ void *voidptr = NULL;
64771+ int num1 = 0, num2 = 0;
64772+ unsigned long ulong1 = 0, ulong2 = 0;
64773+ struct dentry *dentry = NULL;
64774+ struct vfsmount *mnt = NULL;
64775+ struct file *file = NULL;
64776+ struct task_struct *task = NULL;
64777+ const struct cred *cred, *pcred;
64778+ va_list ap;
64779+
64780+ BEGIN_LOCKS(audit);
64781+ logtype = gr_log_start(audit);
64782+ if (logtype == FLOODING) {
64783+ END_LOCKS(audit);
64784+ return;
64785+ }
64786+ va_start(ap, argtypes);
64787+ switch (argtypes) {
64788+ case GR_TTYSNIFF:
64789+ task = va_arg(ap, struct task_struct *);
64790+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
64791+ break;
64792+ case GR_SYSCTL_HIDDEN:
64793+ str1 = va_arg(ap, char *);
64794+ gr_log_middle_varargs(audit, msg, result, str1);
64795+ break;
64796+ case GR_RBAC:
64797+ dentry = va_arg(ap, struct dentry *);
64798+ mnt = va_arg(ap, struct vfsmount *);
64799+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
64800+ break;
64801+ case GR_RBAC_STR:
64802+ dentry = va_arg(ap, struct dentry *);
64803+ mnt = va_arg(ap, struct vfsmount *);
64804+ str1 = va_arg(ap, char *);
64805+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
64806+ break;
64807+ case GR_STR_RBAC:
64808+ str1 = va_arg(ap, char *);
64809+ dentry = va_arg(ap, struct dentry *);
64810+ mnt = va_arg(ap, struct vfsmount *);
64811+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
64812+ break;
64813+ case GR_RBAC_MODE2:
64814+ dentry = va_arg(ap, struct dentry *);
64815+ mnt = va_arg(ap, struct vfsmount *);
64816+ str1 = va_arg(ap, char *);
64817+ str2 = va_arg(ap, char *);
64818+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
64819+ break;
64820+ case GR_RBAC_MODE3:
64821+ dentry = va_arg(ap, struct dentry *);
64822+ mnt = va_arg(ap, struct vfsmount *);
64823+ str1 = va_arg(ap, char *);
64824+ str2 = va_arg(ap, char *);
64825+ str3 = va_arg(ap, char *);
64826+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
64827+ break;
64828+ case GR_FILENAME:
64829+ dentry = va_arg(ap, struct dentry *);
64830+ mnt = va_arg(ap, struct vfsmount *);
64831+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
64832+ break;
64833+ case GR_STR_FILENAME:
64834+ str1 = va_arg(ap, char *);
64835+ dentry = va_arg(ap, struct dentry *);
64836+ mnt = va_arg(ap, struct vfsmount *);
64837+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
64838+ break;
64839+ case GR_FILENAME_STR:
64840+ dentry = va_arg(ap, struct dentry *);
64841+ mnt = va_arg(ap, struct vfsmount *);
64842+ str1 = va_arg(ap, char *);
64843+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
64844+ break;
64845+ case GR_FILENAME_TWO_INT:
64846+ dentry = va_arg(ap, struct dentry *);
64847+ mnt = va_arg(ap, struct vfsmount *);
64848+ num1 = va_arg(ap, int);
64849+ num2 = va_arg(ap, int);
64850+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
64851+ break;
64852+ case GR_FILENAME_TWO_INT_STR:
64853+ dentry = va_arg(ap, struct dentry *);
64854+ mnt = va_arg(ap, struct vfsmount *);
64855+ num1 = va_arg(ap, int);
64856+ num2 = va_arg(ap, int);
64857+ str1 = va_arg(ap, char *);
64858+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
64859+ break;
64860+ case GR_TEXTREL:
64861+ file = va_arg(ap, struct file *);
64862+ ulong1 = va_arg(ap, unsigned long);
64863+ ulong2 = va_arg(ap, unsigned long);
64864+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
64865+ break;
64866+ case GR_PTRACE:
64867+ task = va_arg(ap, struct task_struct *);
64868+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
64869+ break;
64870+ case GR_RESOURCE:
64871+ task = va_arg(ap, struct task_struct *);
64872+ cred = __task_cred(task);
64873+ pcred = __task_cred(task->real_parent);
64874+ ulong1 = va_arg(ap, unsigned long);
64875+ str1 = va_arg(ap, char *);
64876+ ulong2 = va_arg(ap, unsigned long);
64877+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64878+ break;
64879+ case GR_CAP:
64880+ task = va_arg(ap, struct task_struct *);
64881+ cred = __task_cred(task);
64882+ pcred = __task_cred(task->real_parent);
64883+ str1 = va_arg(ap, char *);
64884+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64885+ break;
64886+ case GR_SIG:
64887+ str1 = va_arg(ap, char *);
64888+ voidptr = va_arg(ap, void *);
64889+ gr_log_middle_varargs(audit, msg, str1, voidptr);
64890+ break;
64891+ case GR_SIG2:
64892+ task = va_arg(ap, struct task_struct *);
64893+ cred = __task_cred(task);
64894+ pcred = __task_cred(task->real_parent);
64895+ num1 = va_arg(ap, int);
64896+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64897+ break;
64898+ case GR_CRASH1:
64899+ task = va_arg(ap, struct task_struct *);
64900+ cred = __task_cred(task);
64901+ pcred = __task_cred(task->real_parent);
64902+ ulong1 = va_arg(ap, unsigned long);
64903+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
64904+ break;
64905+ case GR_CRASH2:
64906+ task = va_arg(ap, struct task_struct *);
64907+ cred = __task_cred(task);
64908+ pcred = __task_cred(task->real_parent);
64909+ ulong1 = va_arg(ap, unsigned long);
64910+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
64911+ break;
64912+ case GR_RWXMAP:
64913+ file = va_arg(ap, struct file *);
64914+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
64915+ break;
64916+ case GR_PSACCT:
64917+ {
64918+ unsigned int wday, cday;
64919+ __u8 whr, chr;
64920+ __u8 wmin, cmin;
64921+ __u8 wsec, csec;
64922+ char cur_tty[64] = { 0 };
64923+ char parent_tty[64] = { 0 };
64924+
64925+ task = va_arg(ap, struct task_struct *);
64926+ wday = va_arg(ap, unsigned int);
64927+ cday = va_arg(ap, unsigned int);
64928+ whr = va_arg(ap, int);
64929+ chr = va_arg(ap, int);
64930+ wmin = va_arg(ap, int);
64931+ cmin = va_arg(ap, int);
64932+ wsec = va_arg(ap, int);
64933+ csec = va_arg(ap, int);
64934+ ulong1 = va_arg(ap, unsigned long);
64935+ cred = __task_cred(task);
64936+ pcred = __task_cred(task->real_parent);
64937+
64938+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64939+ }
64940+ break;
64941+ default:
64942+ gr_log_middle(audit, msg, ap);
64943+ }
64944+ va_end(ap);
64945+ // these don't need DEFAULTSECARGS printed on the end
64946+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
64947+ gr_log_end(audit, 0);
64948+ else
64949+ gr_log_end(audit, 1);
64950+ END_LOCKS(audit);
64951+}
64952diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
64953new file mode 100644
64954index 0000000..f536303
64955--- /dev/null
64956+++ b/grsecurity/grsec_mem.c
64957@@ -0,0 +1,40 @@
64958+#include <linux/kernel.h>
64959+#include <linux/sched.h>
64960+#include <linux/mm.h>
64961+#include <linux/mman.h>
64962+#include <linux/grinternal.h>
64963+
64964+void
64965+gr_handle_ioperm(void)
64966+{
64967+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
64968+ return;
64969+}
64970+
64971+void
64972+gr_handle_iopl(void)
64973+{
64974+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
64975+ return;
64976+}
64977+
64978+void
64979+gr_handle_mem_readwrite(u64 from, u64 to)
64980+{
64981+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
64982+ return;
64983+}
64984+
64985+void
64986+gr_handle_vm86(void)
64987+{
64988+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
64989+ return;
64990+}
64991+
64992+void
64993+gr_log_badprocpid(const char *entry)
64994+{
64995+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
64996+ return;
64997+}
64998diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
64999new file mode 100644
65000index 0000000..2131422
65001--- /dev/null
65002+++ b/grsecurity/grsec_mount.c
65003@@ -0,0 +1,62 @@
65004+#include <linux/kernel.h>
65005+#include <linux/sched.h>
65006+#include <linux/mount.h>
65007+#include <linux/grsecurity.h>
65008+#include <linux/grinternal.h>
65009+
65010+void
65011+gr_log_remount(const char *devname, const int retval)
65012+{
65013+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65014+ if (grsec_enable_mount && (retval >= 0))
65015+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
65016+#endif
65017+ return;
65018+}
65019+
65020+void
65021+gr_log_unmount(const char *devname, const int retval)
65022+{
65023+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65024+ if (grsec_enable_mount && (retval >= 0))
65025+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
65026+#endif
65027+ return;
65028+}
65029+
65030+void
65031+gr_log_mount(const char *from, const char *to, const int retval)
65032+{
65033+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65034+ if (grsec_enable_mount && (retval >= 0))
65035+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
65036+#endif
65037+ return;
65038+}
65039+
65040+int
65041+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
65042+{
65043+#ifdef CONFIG_GRKERNSEC_ROFS
65044+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
65045+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
65046+ return -EPERM;
65047+ } else
65048+ return 0;
65049+#endif
65050+ return 0;
65051+}
65052+
65053+int
65054+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
65055+{
65056+#ifdef CONFIG_GRKERNSEC_ROFS
65057+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
65058+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
65059+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
65060+ return -EPERM;
65061+ } else
65062+ return 0;
65063+#endif
65064+ return 0;
65065+}
65066diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
65067new file mode 100644
65068index 0000000..a3b12a0
65069--- /dev/null
65070+++ b/grsecurity/grsec_pax.c
65071@@ -0,0 +1,36 @@
65072+#include <linux/kernel.h>
65073+#include <linux/sched.h>
65074+#include <linux/mm.h>
65075+#include <linux/file.h>
65076+#include <linux/grinternal.h>
65077+#include <linux/grsecurity.h>
65078+
65079+void
65080+gr_log_textrel(struct vm_area_struct * vma)
65081+{
65082+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65083+ if (grsec_enable_audit_textrel)
65084+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
65085+#endif
65086+ return;
65087+}
65088+
65089+void
65090+gr_log_rwxmmap(struct file *file)
65091+{
65092+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65093+ if (grsec_enable_log_rwxmaps)
65094+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
65095+#endif
65096+ return;
65097+}
65098+
65099+void
65100+gr_log_rwxmprotect(struct file *file)
65101+{
65102+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65103+ if (grsec_enable_log_rwxmaps)
65104+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
65105+#endif
65106+ return;
65107+}
65108diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
65109new file mode 100644
65110index 0000000..f7f29aa
65111--- /dev/null
65112+++ b/grsecurity/grsec_ptrace.c
65113@@ -0,0 +1,30 @@
65114+#include <linux/kernel.h>
65115+#include <linux/sched.h>
65116+#include <linux/grinternal.h>
65117+#include <linux/security.h>
65118+
65119+void
65120+gr_audit_ptrace(struct task_struct *task)
65121+{
65122+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65123+ if (grsec_enable_audit_ptrace)
65124+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
65125+#endif
65126+ return;
65127+}
65128+
65129+int
65130+gr_ptrace_readexec(struct file *file, int unsafe_flags)
65131+{
65132+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65133+ const struct dentry *dentry = file->f_path.dentry;
65134+ const struct vfsmount *mnt = file->f_path.mnt;
65135+
65136+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
65137+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
65138+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
65139+ return -EACCES;
65140+ }
65141+#endif
65142+ return 0;
65143+}
65144diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
65145new file mode 100644
65146index 0000000..e09715a
65147--- /dev/null
65148+++ b/grsecurity/grsec_sig.c
65149@@ -0,0 +1,222 @@
65150+#include <linux/kernel.h>
65151+#include <linux/sched.h>
65152+#include <linux/delay.h>
65153+#include <linux/grsecurity.h>
65154+#include <linux/grinternal.h>
65155+#include <linux/hardirq.h>
65156+
65157+char *signames[] = {
65158+ [SIGSEGV] = "Segmentation fault",
65159+ [SIGILL] = "Illegal instruction",
65160+ [SIGABRT] = "Abort",
65161+ [SIGBUS] = "Invalid alignment/Bus error"
65162+};
65163+
65164+void
65165+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
65166+{
65167+#ifdef CONFIG_GRKERNSEC_SIGNAL
65168+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
65169+ (sig == SIGABRT) || (sig == SIGBUS))) {
65170+ if (task_pid_nr(t) == task_pid_nr(current)) {
65171+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
65172+ } else {
65173+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
65174+ }
65175+ }
65176+#endif
65177+ return;
65178+}
65179+
65180+int
65181+gr_handle_signal(const struct task_struct *p, const int sig)
65182+{
65183+#ifdef CONFIG_GRKERNSEC
65184+ /* ignore the 0 signal for protected task checks */
65185+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
65186+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
65187+ return -EPERM;
65188+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
65189+ return -EPERM;
65190+ }
65191+#endif
65192+ return 0;
65193+}
65194+
65195+#ifdef CONFIG_GRKERNSEC
65196+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
65197+
65198+int gr_fake_force_sig(int sig, struct task_struct *t)
65199+{
65200+ unsigned long int flags;
65201+ int ret, blocked, ignored;
65202+ struct k_sigaction *action;
65203+
65204+ spin_lock_irqsave(&t->sighand->siglock, flags);
65205+ action = &t->sighand->action[sig-1];
65206+ ignored = action->sa.sa_handler == SIG_IGN;
65207+ blocked = sigismember(&t->blocked, sig);
65208+ if (blocked || ignored) {
65209+ action->sa.sa_handler = SIG_DFL;
65210+ if (blocked) {
65211+ sigdelset(&t->blocked, sig);
65212+ recalc_sigpending_and_wake(t);
65213+ }
65214+ }
65215+ if (action->sa.sa_handler == SIG_DFL)
65216+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
65217+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
65218+
65219+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
65220+
65221+ return ret;
65222+}
65223+#endif
65224+
65225+#ifdef CONFIG_GRKERNSEC_BRUTE
65226+#define GR_USER_BAN_TIME (15 * 60)
65227+#define GR_DAEMON_BRUTE_TIME (30 * 60)
65228+
65229+static int __get_dumpable(unsigned long mm_flags)
65230+{
65231+ int ret;
65232+
65233+ ret = mm_flags & MMF_DUMPABLE_MASK;
65234+ return (ret >= 2) ? 2 : ret;
65235+}
65236+#endif
65237+
65238+void gr_handle_brute_attach(unsigned long mm_flags)
65239+{
65240+#ifdef CONFIG_GRKERNSEC_BRUTE
65241+ struct task_struct *p = current;
65242+ kuid_t uid = GLOBAL_ROOT_UID;
65243+ int daemon = 0;
65244+
65245+ if (!grsec_enable_brute)
65246+ return;
65247+
65248+ rcu_read_lock();
65249+ read_lock(&tasklist_lock);
65250+ read_lock(&grsec_exec_file_lock);
65251+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
65252+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
65253+ p->real_parent->brute = 1;
65254+ daemon = 1;
65255+ } else {
65256+ const struct cred *cred = __task_cred(p), *cred2;
65257+ struct task_struct *tsk, *tsk2;
65258+
65259+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
65260+ struct user_struct *user;
65261+
65262+ uid = cred->uid;
65263+
65264+ /* this is put upon execution past expiration */
65265+ user = find_user(uid);
65266+ if (user == NULL)
65267+ goto unlock;
65268+ user->banned = 1;
65269+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
65270+ if (user->ban_expires == ~0UL)
65271+ user->ban_expires--;
65272+
65273+ do_each_thread(tsk2, tsk) {
65274+ cred2 = __task_cred(tsk);
65275+ if (tsk != p && uid_eq(cred2->uid, uid))
65276+ gr_fake_force_sig(SIGKILL, tsk);
65277+ } while_each_thread(tsk2, tsk);
65278+ }
65279+ }
65280+unlock:
65281+ read_unlock(&grsec_exec_file_lock);
65282+ read_unlock(&tasklist_lock);
65283+ rcu_read_unlock();
65284+
65285+ if (gr_is_global_nonroot(uid))
65286+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
65287+ GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
65288+ else if (daemon)
65289+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
65290+
65291+#endif
65292+ return;
65293+}
65294+
65295+void gr_handle_brute_check(void)
65296+{
65297+#ifdef CONFIG_GRKERNSEC_BRUTE
65298+ struct task_struct *p = current;
65299+
65300+ if (unlikely(p->brute)) {
65301+ if (!grsec_enable_brute)
65302+ p->brute = 0;
65303+ else if (time_before(get_seconds(), p->brute_expires))
65304+ msleep(30 * 1000);
65305+ }
65306+#endif
65307+ return;
65308+}
65309+
65310+void gr_handle_kernel_exploit(void)
65311+{
65312+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
65313+ const struct cred *cred;
65314+ struct task_struct *tsk, *tsk2;
65315+ struct user_struct *user;
65316+ kuid_t uid;
65317+
65318+ if (in_irq() || in_serving_softirq() || in_nmi())
65319+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
65320+
65321+ uid = current_uid();
65322+
65323+ if (gr_is_global_root(uid))
65324+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
65325+ else {
65326+ /* kill all the processes of this user, hold a reference
65327+ to their creds struct, and prevent them from creating
65328+ another process until system reset
65329+ */
65330+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
65331+ GR_GLOBAL_UID(uid));
65332+ /* we intentionally leak this ref */
65333+ user = get_uid(current->cred->user);
65334+ if (user) {
65335+ user->banned = 1;
65336+ user->ban_expires = ~0UL;
65337+ }
65338+
65339+ read_lock(&tasklist_lock);
65340+ do_each_thread(tsk2, tsk) {
65341+ cred = __task_cred(tsk);
65342+ if (uid_eq(cred->uid, uid))
65343+ gr_fake_force_sig(SIGKILL, tsk);
65344+ } while_each_thread(tsk2, tsk);
65345+ read_unlock(&tasklist_lock);
65346+ }
65347+#endif
65348+}
65349+
65350+int __gr_process_user_ban(struct user_struct *user)
65351+{
65352+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65353+ if (unlikely(user->banned)) {
65354+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
65355+ user->banned = 0;
65356+ user->ban_expires = 0;
65357+ free_uid(user);
65358+ } else
65359+ return -EPERM;
65360+ }
65361+#endif
65362+ return 0;
65363+}
65364+
65365+int gr_process_user_ban(void)
65366+{
65367+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65368+ return __gr_process_user_ban(current->cred->user);
65369+#endif
65370+ return 0;
65371+}
65372diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
65373new file mode 100644
65374index 0000000..4030d57
65375--- /dev/null
65376+++ b/grsecurity/grsec_sock.c
65377@@ -0,0 +1,244 @@
65378+#include <linux/kernel.h>
65379+#include <linux/module.h>
65380+#include <linux/sched.h>
65381+#include <linux/file.h>
65382+#include <linux/net.h>
65383+#include <linux/in.h>
65384+#include <linux/ip.h>
65385+#include <net/sock.h>
65386+#include <net/inet_sock.h>
65387+#include <linux/grsecurity.h>
65388+#include <linux/grinternal.h>
65389+#include <linux/gracl.h>
65390+
65391+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
65392+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
65393+
65394+EXPORT_SYMBOL(gr_search_udp_recvmsg);
65395+EXPORT_SYMBOL(gr_search_udp_sendmsg);
65396+
65397+#ifdef CONFIG_UNIX_MODULE
65398+EXPORT_SYMBOL(gr_acl_handle_unix);
65399+EXPORT_SYMBOL(gr_acl_handle_mknod);
65400+EXPORT_SYMBOL(gr_handle_chroot_unix);
65401+EXPORT_SYMBOL(gr_handle_create);
65402+#endif
65403+
65404+#ifdef CONFIG_GRKERNSEC
65405+#define gr_conn_table_size 32749
65406+struct conn_table_entry {
65407+ struct conn_table_entry *next;
65408+ struct signal_struct *sig;
65409+};
65410+
65411+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
65412+DEFINE_SPINLOCK(gr_conn_table_lock);
65413+
65414+extern const char * gr_socktype_to_name(unsigned char type);
65415+extern const char * gr_proto_to_name(unsigned char proto);
65416+extern const char * gr_sockfamily_to_name(unsigned char family);
65417+
65418+static __inline__ int
65419+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
65420+{
65421+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
65422+}
65423+
65424+static __inline__ int
65425+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
65426+ __u16 sport, __u16 dport)
65427+{
65428+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
65429+ sig->gr_sport == sport && sig->gr_dport == dport))
65430+ return 1;
65431+ else
65432+ return 0;
65433+}
65434+
65435+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
65436+{
65437+ struct conn_table_entry **match;
65438+ unsigned int index;
65439+
65440+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
65441+ sig->gr_sport, sig->gr_dport,
65442+ gr_conn_table_size);
65443+
65444+ newent->sig = sig;
65445+
65446+ match = &gr_conn_table[index];
65447+ newent->next = *match;
65448+ *match = newent;
65449+
65450+ return;
65451+}
65452+
65453+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
65454+{
65455+ struct conn_table_entry *match, *last = NULL;
65456+ unsigned int index;
65457+
65458+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
65459+ sig->gr_sport, sig->gr_dport,
65460+ gr_conn_table_size);
65461+
65462+ match = gr_conn_table[index];
65463+ while (match && !conn_match(match->sig,
65464+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
65465+ sig->gr_dport)) {
65466+ last = match;
65467+ match = match->next;
65468+ }
65469+
65470+ if (match) {
65471+ if (last)
65472+ last->next = match->next;
65473+ else
65474+ gr_conn_table[index] = NULL;
65475+ kfree(match);
65476+ }
65477+
65478+ return;
65479+}
65480+
65481+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
65482+ __u16 sport, __u16 dport)
65483+{
65484+ struct conn_table_entry *match;
65485+ unsigned int index;
65486+
65487+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
65488+
65489+ match = gr_conn_table[index];
65490+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
65491+ match = match->next;
65492+
65493+ if (match)
65494+ return match->sig;
65495+ else
65496+ return NULL;
65497+}
65498+
65499+#endif
65500+
65501+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
65502+{
65503+#ifdef CONFIG_GRKERNSEC
65504+ struct signal_struct *sig = task->signal;
65505+ struct conn_table_entry *newent;
65506+
65507+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
65508+ if (newent == NULL)
65509+ return;
65510+ /* no bh lock needed since we are called with bh disabled */
65511+ spin_lock(&gr_conn_table_lock);
65512+ gr_del_task_from_ip_table_nolock(sig);
65513+ sig->gr_saddr = inet->inet_rcv_saddr;
65514+ sig->gr_daddr = inet->inet_daddr;
65515+ sig->gr_sport = inet->inet_sport;
65516+ sig->gr_dport = inet->inet_dport;
65517+ gr_add_to_task_ip_table_nolock(sig, newent);
65518+ spin_unlock(&gr_conn_table_lock);
65519+#endif
65520+ return;
65521+}
65522+
65523+void gr_del_task_from_ip_table(struct task_struct *task)
65524+{
65525+#ifdef CONFIG_GRKERNSEC
65526+ spin_lock_bh(&gr_conn_table_lock);
65527+ gr_del_task_from_ip_table_nolock(task->signal);
65528+ spin_unlock_bh(&gr_conn_table_lock);
65529+#endif
65530+ return;
65531+}
65532+
65533+void
65534+gr_attach_curr_ip(const struct sock *sk)
65535+{
65536+#ifdef CONFIG_GRKERNSEC
65537+ struct signal_struct *p, *set;
65538+ const struct inet_sock *inet = inet_sk(sk);
65539+
65540+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
65541+ return;
65542+
65543+ set = current->signal;
65544+
65545+ spin_lock_bh(&gr_conn_table_lock);
65546+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
65547+ inet->inet_dport, inet->inet_sport);
65548+ if (unlikely(p != NULL)) {
65549+ set->curr_ip = p->curr_ip;
65550+ set->used_accept = 1;
65551+ gr_del_task_from_ip_table_nolock(p);
65552+ spin_unlock_bh(&gr_conn_table_lock);
65553+ return;
65554+ }
65555+ spin_unlock_bh(&gr_conn_table_lock);
65556+
65557+ set->curr_ip = inet->inet_daddr;
65558+ set->used_accept = 1;
65559+#endif
65560+ return;
65561+}
65562+
65563+int
65564+gr_handle_sock_all(const int family, const int type, const int protocol)
65565+{
65566+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65567+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
65568+ (family != AF_UNIX)) {
65569+ if (family == AF_INET)
65570+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
65571+ else
65572+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
65573+ return -EACCES;
65574+ }
65575+#endif
65576+ return 0;
65577+}
65578+
65579+int
65580+gr_handle_sock_server(const struct sockaddr *sck)
65581+{
65582+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65583+ if (grsec_enable_socket_server &&
65584+ in_group_p(grsec_socket_server_gid) &&
65585+ sck && (sck->sa_family != AF_UNIX) &&
65586+ (sck->sa_family != AF_LOCAL)) {
65587+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
65588+ return -EACCES;
65589+ }
65590+#endif
65591+ return 0;
65592+}
65593+
65594+int
65595+gr_handle_sock_server_other(const struct sock *sck)
65596+{
65597+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65598+ if (grsec_enable_socket_server &&
65599+ in_group_p(grsec_socket_server_gid) &&
65600+ sck && (sck->sk_family != AF_UNIX) &&
65601+ (sck->sk_family != AF_LOCAL)) {
65602+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
65603+ return -EACCES;
65604+ }
65605+#endif
65606+ return 0;
65607+}
65608+
65609+int
65610+gr_handle_sock_client(const struct sockaddr *sck)
65611+{
65612+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65613+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
65614+ sck && (sck->sa_family != AF_UNIX) &&
65615+ (sck->sa_family != AF_LOCAL)) {
65616+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
65617+ return -EACCES;
65618+ }
65619+#endif
65620+ return 0;
65621+}
65622diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
65623new file mode 100644
65624index 0000000..f55ef0f
65625--- /dev/null
65626+++ b/grsecurity/grsec_sysctl.c
65627@@ -0,0 +1,469 @@
65628+#include <linux/kernel.h>
65629+#include <linux/sched.h>
65630+#include <linux/sysctl.h>
65631+#include <linux/grsecurity.h>
65632+#include <linux/grinternal.h>
65633+
65634+int
65635+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
65636+{
65637+#ifdef CONFIG_GRKERNSEC_SYSCTL
65638+ if (dirname == NULL || name == NULL)
65639+ return 0;
65640+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
65641+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
65642+ return -EACCES;
65643+ }
65644+#endif
65645+ return 0;
65646+}
65647+
65648+#ifdef CONFIG_GRKERNSEC_ROFS
65649+static int __maybe_unused one = 1;
65650+#endif
65651+
65652+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65653+struct ctl_table grsecurity_table[] = {
65654+#ifdef CONFIG_GRKERNSEC_SYSCTL
65655+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
65656+#ifdef CONFIG_GRKERNSEC_IO
65657+ {
65658+ .procname = "disable_priv_io",
65659+ .data = &grsec_disable_privio,
65660+ .maxlen = sizeof(int),
65661+ .mode = 0600,
65662+ .proc_handler = &proc_dointvec,
65663+ },
65664+#endif
65665+#endif
65666+#ifdef CONFIG_GRKERNSEC_LINK
65667+ {
65668+ .procname = "linking_restrictions",
65669+ .data = &grsec_enable_link,
65670+ .maxlen = sizeof(int),
65671+ .mode = 0600,
65672+ .proc_handler = &proc_dointvec,
65673+ },
65674+#endif
65675+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65676+ {
65677+ .procname = "enforce_symlinksifowner",
65678+ .data = &grsec_enable_symlinkown,
65679+ .maxlen = sizeof(int),
65680+ .mode = 0600,
65681+ .proc_handler = &proc_dointvec,
65682+ },
65683+ {
65684+ .procname = "symlinkown_gid",
65685+ .data = &grsec_symlinkown_gid,
65686+ .maxlen = sizeof(int),
65687+ .mode = 0600,
65688+ .proc_handler = &proc_dointvec,
65689+ },
65690+#endif
65691+#ifdef CONFIG_GRKERNSEC_BRUTE
65692+ {
65693+ .procname = "deter_bruteforce",
65694+ .data = &grsec_enable_brute,
65695+ .maxlen = sizeof(int),
65696+ .mode = 0600,
65697+ .proc_handler = &proc_dointvec,
65698+ },
65699+#endif
65700+#ifdef CONFIG_GRKERNSEC_FIFO
65701+ {
65702+ .procname = "fifo_restrictions",
65703+ .data = &grsec_enable_fifo,
65704+ .maxlen = sizeof(int),
65705+ .mode = 0600,
65706+ .proc_handler = &proc_dointvec,
65707+ },
65708+#endif
65709+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65710+ {
65711+ .procname = "ptrace_readexec",
65712+ .data = &grsec_enable_ptrace_readexec,
65713+ .maxlen = sizeof(int),
65714+ .mode = 0600,
65715+ .proc_handler = &proc_dointvec,
65716+ },
65717+#endif
65718+#ifdef CONFIG_GRKERNSEC_SETXID
65719+ {
65720+ .procname = "consistent_setxid",
65721+ .data = &grsec_enable_setxid,
65722+ .maxlen = sizeof(int),
65723+ .mode = 0600,
65724+ .proc_handler = &proc_dointvec,
65725+ },
65726+#endif
65727+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65728+ {
65729+ .procname = "ip_blackhole",
65730+ .data = &grsec_enable_blackhole,
65731+ .maxlen = sizeof(int),
65732+ .mode = 0600,
65733+ .proc_handler = &proc_dointvec,
65734+ },
65735+ {
65736+ .procname = "lastack_retries",
65737+ .data = &grsec_lastack_retries,
65738+ .maxlen = sizeof(int),
65739+ .mode = 0600,
65740+ .proc_handler = &proc_dointvec,
65741+ },
65742+#endif
65743+#ifdef CONFIG_GRKERNSEC_EXECLOG
65744+ {
65745+ .procname = "exec_logging",
65746+ .data = &grsec_enable_execlog,
65747+ .maxlen = sizeof(int),
65748+ .mode = 0600,
65749+ .proc_handler = &proc_dointvec,
65750+ },
65751+#endif
65752+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65753+ {
65754+ .procname = "rwxmap_logging",
65755+ .data = &grsec_enable_log_rwxmaps,
65756+ .maxlen = sizeof(int),
65757+ .mode = 0600,
65758+ .proc_handler = &proc_dointvec,
65759+ },
65760+#endif
65761+#ifdef CONFIG_GRKERNSEC_SIGNAL
65762+ {
65763+ .procname = "signal_logging",
65764+ .data = &grsec_enable_signal,
65765+ .maxlen = sizeof(int),
65766+ .mode = 0600,
65767+ .proc_handler = &proc_dointvec,
65768+ },
65769+#endif
65770+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65771+ {
65772+ .procname = "forkfail_logging",
65773+ .data = &grsec_enable_forkfail,
65774+ .maxlen = sizeof(int),
65775+ .mode = 0600,
65776+ .proc_handler = &proc_dointvec,
65777+ },
65778+#endif
65779+#ifdef CONFIG_GRKERNSEC_TIME
65780+ {
65781+ .procname = "timechange_logging",
65782+ .data = &grsec_enable_time,
65783+ .maxlen = sizeof(int),
65784+ .mode = 0600,
65785+ .proc_handler = &proc_dointvec,
65786+ },
65787+#endif
65788+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65789+ {
65790+ .procname = "chroot_deny_shmat",
65791+ .data = &grsec_enable_chroot_shmat,
65792+ .maxlen = sizeof(int),
65793+ .mode = 0600,
65794+ .proc_handler = &proc_dointvec,
65795+ },
65796+#endif
65797+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65798+ {
65799+ .procname = "chroot_deny_unix",
65800+ .data = &grsec_enable_chroot_unix,
65801+ .maxlen = sizeof(int),
65802+ .mode = 0600,
65803+ .proc_handler = &proc_dointvec,
65804+ },
65805+#endif
65806+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65807+ {
65808+ .procname = "chroot_deny_mount",
65809+ .data = &grsec_enable_chroot_mount,
65810+ .maxlen = sizeof(int),
65811+ .mode = 0600,
65812+ .proc_handler = &proc_dointvec,
65813+ },
65814+#endif
65815+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65816+ {
65817+ .procname = "chroot_deny_fchdir",
65818+ .data = &grsec_enable_chroot_fchdir,
65819+ .maxlen = sizeof(int),
65820+ .mode = 0600,
65821+ .proc_handler = &proc_dointvec,
65822+ },
65823+#endif
65824+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65825+ {
65826+ .procname = "chroot_deny_chroot",
65827+ .data = &grsec_enable_chroot_double,
65828+ .maxlen = sizeof(int),
65829+ .mode = 0600,
65830+ .proc_handler = &proc_dointvec,
65831+ },
65832+#endif
65833+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65834+ {
65835+ .procname = "chroot_deny_pivot",
65836+ .data = &grsec_enable_chroot_pivot,
65837+ .maxlen = sizeof(int),
65838+ .mode = 0600,
65839+ .proc_handler = &proc_dointvec,
65840+ },
65841+#endif
65842+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65843+ {
65844+ .procname = "chroot_enforce_chdir",
65845+ .data = &grsec_enable_chroot_chdir,
65846+ .maxlen = sizeof(int),
65847+ .mode = 0600,
65848+ .proc_handler = &proc_dointvec,
65849+ },
65850+#endif
65851+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65852+ {
65853+ .procname = "chroot_deny_chmod",
65854+ .data = &grsec_enable_chroot_chmod,
65855+ .maxlen = sizeof(int),
65856+ .mode = 0600,
65857+ .proc_handler = &proc_dointvec,
65858+ },
65859+#endif
65860+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65861+ {
65862+ .procname = "chroot_deny_mknod",
65863+ .data = &grsec_enable_chroot_mknod,
65864+ .maxlen = sizeof(int),
65865+ .mode = 0600,
65866+ .proc_handler = &proc_dointvec,
65867+ },
65868+#endif
65869+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65870+ {
65871+ .procname = "chroot_restrict_nice",
65872+ .data = &grsec_enable_chroot_nice,
65873+ .maxlen = sizeof(int),
65874+ .mode = 0600,
65875+ .proc_handler = &proc_dointvec,
65876+ },
65877+#endif
65878+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65879+ {
65880+ .procname = "chroot_execlog",
65881+ .data = &grsec_enable_chroot_execlog,
65882+ .maxlen = sizeof(int),
65883+ .mode = 0600,
65884+ .proc_handler = &proc_dointvec,
65885+ },
65886+#endif
65887+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65888+ {
65889+ .procname = "chroot_caps",
65890+ .data = &grsec_enable_chroot_caps,
65891+ .maxlen = sizeof(int),
65892+ .mode = 0600,
65893+ .proc_handler = &proc_dointvec,
65894+ },
65895+#endif
65896+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65897+ {
65898+ .procname = "chroot_deny_sysctl",
65899+ .data = &grsec_enable_chroot_sysctl,
65900+ .maxlen = sizeof(int),
65901+ .mode = 0600,
65902+ .proc_handler = &proc_dointvec,
65903+ },
65904+#endif
65905+#ifdef CONFIG_GRKERNSEC_TPE
65906+ {
65907+ .procname = "tpe",
65908+ .data = &grsec_enable_tpe,
65909+ .maxlen = sizeof(int),
65910+ .mode = 0600,
65911+ .proc_handler = &proc_dointvec,
65912+ },
65913+ {
65914+ .procname = "tpe_gid",
65915+ .data = &grsec_tpe_gid,
65916+ .maxlen = sizeof(int),
65917+ .mode = 0600,
65918+ .proc_handler = &proc_dointvec,
65919+ },
65920+#endif
65921+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65922+ {
65923+ .procname = "tpe_invert",
65924+ .data = &grsec_enable_tpe_invert,
65925+ .maxlen = sizeof(int),
65926+ .mode = 0600,
65927+ .proc_handler = &proc_dointvec,
65928+ },
65929+#endif
65930+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65931+ {
65932+ .procname = "tpe_restrict_all",
65933+ .data = &grsec_enable_tpe_all,
65934+ .maxlen = sizeof(int),
65935+ .mode = 0600,
65936+ .proc_handler = &proc_dointvec,
65937+ },
65938+#endif
65939+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65940+ {
65941+ .procname = "socket_all",
65942+ .data = &grsec_enable_socket_all,
65943+ .maxlen = sizeof(int),
65944+ .mode = 0600,
65945+ .proc_handler = &proc_dointvec,
65946+ },
65947+ {
65948+ .procname = "socket_all_gid",
65949+ .data = &grsec_socket_all_gid,
65950+ .maxlen = sizeof(int),
65951+ .mode = 0600,
65952+ .proc_handler = &proc_dointvec,
65953+ },
65954+#endif
65955+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65956+ {
65957+ .procname = "socket_client",
65958+ .data = &grsec_enable_socket_client,
65959+ .maxlen = sizeof(int),
65960+ .mode = 0600,
65961+ .proc_handler = &proc_dointvec,
65962+ },
65963+ {
65964+ .procname = "socket_client_gid",
65965+ .data = &grsec_socket_client_gid,
65966+ .maxlen = sizeof(int),
65967+ .mode = 0600,
65968+ .proc_handler = &proc_dointvec,
65969+ },
65970+#endif
65971+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65972+ {
65973+ .procname = "socket_server",
65974+ .data = &grsec_enable_socket_server,
65975+ .maxlen = sizeof(int),
65976+ .mode = 0600,
65977+ .proc_handler = &proc_dointvec,
65978+ },
65979+ {
65980+ .procname = "socket_server_gid",
65981+ .data = &grsec_socket_server_gid,
65982+ .maxlen = sizeof(int),
65983+ .mode = 0600,
65984+ .proc_handler = &proc_dointvec,
65985+ },
65986+#endif
65987+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65988+ {
65989+ .procname = "audit_group",
65990+ .data = &grsec_enable_group,
65991+ .maxlen = sizeof(int),
65992+ .mode = 0600,
65993+ .proc_handler = &proc_dointvec,
65994+ },
65995+ {
65996+ .procname = "audit_gid",
65997+ .data = &grsec_audit_gid,
65998+ .maxlen = sizeof(int),
65999+ .mode = 0600,
66000+ .proc_handler = &proc_dointvec,
66001+ },
66002+#endif
66003+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
66004+ {
66005+ .procname = "audit_chdir",
66006+ .data = &grsec_enable_chdir,
66007+ .maxlen = sizeof(int),
66008+ .mode = 0600,
66009+ .proc_handler = &proc_dointvec,
66010+ },
66011+#endif
66012+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66013+ {
66014+ .procname = "audit_mount",
66015+ .data = &grsec_enable_mount,
66016+ .maxlen = sizeof(int),
66017+ .mode = 0600,
66018+ .proc_handler = &proc_dointvec,
66019+ },
66020+#endif
66021+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
66022+ {
66023+ .procname = "audit_textrel",
66024+ .data = &grsec_enable_audit_textrel,
66025+ .maxlen = sizeof(int),
66026+ .mode = 0600,
66027+ .proc_handler = &proc_dointvec,
66028+ },
66029+#endif
66030+#ifdef CONFIG_GRKERNSEC_DMESG
66031+ {
66032+ .procname = "dmesg",
66033+ .data = &grsec_enable_dmesg,
66034+ .maxlen = sizeof(int),
66035+ .mode = 0600,
66036+ .proc_handler = &proc_dointvec,
66037+ },
66038+#endif
66039+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66040+ {
66041+ .procname = "chroot_findtask",
66042+ .data = &grsec_enable_chroot_findtask,
66043+ .maxlen = sizeof(int),
66044+ .mode = 0600,
66045+ .proc_handler = &proc_dointvec,
66046+ },
66047+#endif
66048+#ifdef CONFIG_GRKERNSEC_RESLOG
66049+ {
66050+ .procname = "resource_logging",
66051+ .data = &grsec_resource_logging,
66052+ .maxlen = sizeof(int),
66053+ .mode = 0600,
66054+ .proc_handler = &proc_dointvec,
66055+ },
66056+#endif
66057+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
66058+ {
66059+ .procname = "audit_ptrace",
66060+ .data = &grsec_enable_audit_ptrace,
66061+ .maxlen = sizeof(int),
66062+ .mode = 0600,
66063+ .proc_handler = &proc_dointvec,
66064+ },
66065+#endif
66066+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
66067+ {
66068+ .procname = "harden_ptrace",
66069+ .data = &grsec_enable_harden_ptrace,
66070+ .maxlen = sizeof(int),
66071+ .mode = 0600,
66072+ .proc_handler = &proc_dointvec,
66073+ },
66074+#endif
66075+ {
66076+ .procname = "grsec_lock",
66077+ .data = &grsec_lock,
66078+ .maxlen = sizeof(int),
66079+ .mode = 0600,
66080+ .proc_handler = &proc_dointvec,
66081+ },
66082+#endif
66083+#ifdef CONFIG_GRKERNSEC_ROFS
66084+ {
66085+ .procname = "romount_protect",
66086+ .data = &grsec_enable_rofs,
66087+ .maxlen = sizeof(int),
66088+ .mode = 0600,
66089+ .proc_handler = &proc_dointvec_minmax,
66090+ .extra1 = &one,
66091+ .extra2 = &one,
66092+ },
66093+#endif
66094+ { }
66095+};
66096+#endif
66097diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
66098new file mode 100644
66099index 0000000..0dc13c3
66100--- /dev/null
66101+++ b/grsecurity/grsec_time.c
66102@@ -0,0 +1,16 @@
66103+#include <linux/kernel.h>
66104+#include <linux/sched.h>
66105+#include <linux/grinternal.h>
66106+#include <linux/module.h>
66107+
66108+void
66109+gr_log_timechange(void)
66110+{
66111+#ifdef CONFIG_GRKERNSEC_TIME
66112+ if (grsec_enable_time)
66113+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
66114+#endif
66115+ return;
66116+}
66117+
66118+EXPORT_SYMBOL(gr_log_timechange);
66119diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
66120new file mode 100644
66121index 0000000..ee57dcf
66122--- /dev/null
66123+++ b/grsecurity/grsec_tpe.c
66124@@ -0,0 +1,73 @@
66125+#include <linux/kernel.h>
66126+#include <linux/sched.h>
66127+#include <linux/file.h>
66128+#include <linux/fs.h>
66129+#include <linux/grinternal.h>
66130+
66131+extern int gr_acl_tpe_check(void);
66132+
66133+int
66134+gr_tpe_allow(const struct file *file)
66135+{
66136+#ifdef CONFIG_GRKERNSEC
66137+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
66138+ const struct cred *cred = current_cred();
66139+ char *msg = NULL;
66140+ char *msg2 = NULL;
66141+
66142+ // never restrict root
66143+ if (gr_is_global_root(cred->uid))
66144+ return 1;
66145+
66146+ if (grsec_enable_tpe) {
66147+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
66148+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
66149+ msg = "not being in trusted group";
66150+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
66151+ msg = "being in untrusted group";
66152+#else
66153+ if (in_group_p(grsec_tpe_gid))
66154+ msg = "being in untrusted group";
66155+#endif
66156+ }
66157+ if (!msg && gr_acl_tpe_check())
66158+ msg = "being in untrusted role";
66159+
66160+ // not in any affected group/role
66161+ if (!msg)
66162+ goto next_check;
66163+
66164+ if (gr_is_global_nonroot(inode->i_uid))
66165+ msg2 = "file in non-root-owned directory";
66166+ else if (inode->i_mode & S_IWOTH)
66167+ msg2 = "file in world-writable directory";
66168+ else if (inode->i_mode & S_IWGRP)
66169+ msg2 = "file in group-writable directory";
66170+
66171+ if (msg && msg2) {
66172+ char fullmsg[70] = {0};
66173+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
66174+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
66175+ return 0;
66176+ }
66177+ msg = NULL;
66178+next_check:
66179+#ifdef CONFIG_GRKERNSEC_TPE_ALL
66180+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
66181+ return 1;
66182+
66183+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
66184+ msg = "directory not owned by user";
66185+ else if (inode->i_mode & S_IWOTH)
66186+ msg = "file in world-writable directory";
66187+ else if (inode->i_mode & S_IWGRP)
66188+ msg = "file in group-writable directory";
66189+
66190+ if (msg) {
66191+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
66192+ return 0;
66193+ }
66194+#endif
66195+#endif
66196+ return 1;
66197+}
66198diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
66199new file mode 100644
66200index 0000000..9f7b1ac
66201--- /dev/null
66202+++ b/grsecurity/grsum.c
66203@@ -0,0 +1,61 @@
66204+#include <linux/err.h>
66205+#include <linux/kernel.h>
66206+#include <linux/sched.h>
66207+#include <linux/mm.h>
66208+#include <linux/scatterlist.h>
66209+#include <linux/crypto.h>
66210+#include <linux/gracl.h>
66211+
66212+
66213+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
66214+#error "crypto and sha256 must be built into the kernel"
66215+#endif
66216+
66217+int
66218+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
66219+{
66220+ char *p;
66221+ struct crypto_hash *tfm;
66222+ struct hash_desc desc;
66223+ struct scatterlist sg;
66224+ unsigned char temp_sum[GR_SHA_LEN];
66225+ volatile int retval = 0;
66226+ volatile int dummy = 0;
66227+ unsigned int i;
66228+
66229+ sg_init_table(&sg, 1);
66230+
66231+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
66232+ if (IS_ERR(tfm)) {
66233+ /* should never happen, since sha256 should be built in */
66234+ return 1;
66235+ }
66236+
66237+ desc.tfm = tfm;
66238+ desc.flags = 0;
66239+
66240+ crypto_hash_init(&desc);
66241+
66242+ p = salt;
66243+ sg_set_buf(&sg, p, GR_SALT_LEN);
66244+ crypto_hash_update(&desc, &sg, sg.length);
66245+
66246+ p = entry->pw;
66247+ sg_set_buf(&sg, p, strlen(p));
66248+
66249+ crypto_hash_update(&desc, &sg, sg.length);
66250+
66251+ crypto_hash_final(&desc, temp_sum);
66252+
66253+ memset(entry->pw, 0, GR_PW_LEN);
66254+
66255+ for (i = 0; i < GR_SHA_LEN; i++)
66256+ if (sum[i] != temp_sum[i])
66257+ retval = 1;
66258+ else
66259+ dummy = 1; // waste a cycle
66260+
66261+ crypto_free_hash(tfm);
66262+
66263+ return retval;
66264+}
66265diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
66266index 77ff547..181834f 100644
66267--- a/include/asm-generic/4level-fixup.h
66268+++ b/include/asm-generic/4level-fixup.h
66269@@ -13,8 +13,10 @@
66270 #define pmd_alloc(mm, pud, address) \
66271 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
66272 NULL: pmd_offset(pud, address))
66273+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
66274
66275 #define pud_alloc(mm, pgd, address) (pgd)
66276+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
66277 #define pud_offset(pgd, start) (pgd)
66278 #define pud_none(pud) 0
66279 #define pud_bad(pud) 0
66280diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
66281index b7babf0..04ad282 100644
66282--- a/include/asm-generic/atomic-long.h
66283+++ b/include/asm-generic/atomic-long.h
66284@@ -22,6 +22,12 @@
66285
66286 typedef atomic64_t atomic_long_t;
66287
66288+#ifdef CONFIG_PAX_REFCOUNT
66289+typedef atomic64_unchecked_t atomic_long_unchecked_t;
66290+#else
66291+typedef atomic64_t atomic_long_unchecked_t;
66292+#endif
66293+
66294 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
66295
66296 static inline long atomic_long_read(atomic_long_t *l)
66297@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
66298 return (long)atomic64_read(v);
66299 }
66300
66301+#ifdef CONFIG_PAX_REFCOUNT
66302+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
66303+{
66304+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66305+
66306+ return (long)atomic64_read_unchecked(v);
66307+}
66308+#endif
66309+
66310 static inline void atomic_long_set(atomic_long_t *l, long i)
66311 {
66312 atomic64_t *v = (atomic64_t *)l;
66313@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
66314 atomic64_set(v, i);
66315 }
66316
66317+#ifdef CONFIG_PAX_REFCOUNT
66318+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
66319+{
66320+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66321+
66322+ atomic64_set_unchecked(v, i);
66323+}
66324+#endif
66325+
66326 static inline void atomic_long_inc(atomic_long_t *l)
66327 {
66328 atomic64_t *v = (atomic64_t *)l;
66329@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
66330 atomic64_inc(v);
66331 }
66332
66333+#ifdef CONFIG_PAX_REFCOUNT
66334+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
66335+{
66336+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66337+
66338+ atomic64_inc_unchecked(v);
66339+}
66340+#endif
66341+
66342 static inline void atomic_long_dec(atomic_long_t *l)
66343 {
66344 atomic64_t *v = (atomic64_t *)l;
66345@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
66346 atomic64_dec(v);
66347 }
66348
66349+#ifdef CONFIG_PAX_REFCOUNT
66350+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
66351+{
66352+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66353+
66354+ atomic64_dec_unchecked(v);
66355+}
66356+#endif
66357+
66358 static inline void atomic_long_add(long i, atomic_long_t *l)
66359 {
66360 atomic64_t *v = (atomic64_t *)l;
66361@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
66362 atomic64_add(i, v);
66363 }
66364
66365+#ifdef CONFIG_PAX_REFCOUNT
66366+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
66367+{
66368+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66369+
66370+ atomic64_add_unchecked(i, v);
66371+}
66372+#endif
66373+
66374 static inline void atomic_long_sub(long i, atomic_long_t *l)
66375 {
66376 atomic64_t *v = (atomic64_t *)l;
66377@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
66378 atomic64_sub(i, v);
66379 }
66380
66381+#ifdef CONFIG_PAX_REFCOUNT
66382+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
66383+{
66384+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66385+
66386+ atomic64_sub_unchecked(i, v);
66387+}
66388+#endif
66389+
66390 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
66391 {
66392 atomic64_t *v = (atomic64_t *)l;
66393@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
66394 return (long)atomic64_add_return(i, v);
66395 }
66396
66397+#ifdef CONFIG_PAX_REFCOUNT
66398+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
66399+{
66400+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66401+
66402+ return (long)atomic64_add_return_unchecked(i, v);
66403+}
66404+#endif
66405+
66406 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
66407 {
66408 atomic64_t *v = (atomic64_t *)l;
66409@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
66410 return (long)atomic64_inc_return(v);
66411 }
66412
66413+#ifdef CONFIG_PAX_REFCOUNT
66414+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
66415+{
66416+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66417+
66418+ return (long)atomic64_inc_return_unchecked(v);
66419+}
66420+#endif
66421+
66422 static inline long atomic_long_dec_return(atomic_long_t *l)
66423 {
66424 atomic64_t *v = (atomic64_t *)l;
66425@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
66426
66427 typedef atomic_t atomic_long_t;
66428
66429+#ifdef CONFIG_PAX_REFCOUNT
66430+typedef atomic_unchecked_t atomic_long_unchecked_t;
66431+#else
66432+typedef atomic_t atomic_long_unchecked_t;
66433+#endif
66434+
66435 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
66436 static inline long atomic_long_read(atomic_long_t *l)
66437 {
66438@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
66439 return (long)atomic_read(v);
66440 }
66441
66442+#ifdef CONFIG_PAX_REFCOUNT
66443+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
66444+{
66445+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66446+
66447+ return (long)atomic_read_unchecked(v);
66448+}
66449+#endif
66450+
66451 static inline void atomic_long_set(atomic_long_t *l, long i)
66452 {
66453 atomic_t *v = (atomic_t *)l;
66454@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
66455 atomic_set(v, i);
66456 }
66457
66458+#ifdef CONFIG_PAX_REFCOUNT
66459+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
66460+{
66461+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66462+
66463+ atomic_set_unchecked(v, i);
66464+}
66465+#endif
66466+
66467 static inline void atomic_long_inc(atomic_long_t *l)
66468 {
66469 atomic_t *v = (atomic_t *)l;
66470@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
66471 atomic_inc(v);
66472 }
66473
66474+#ifdef CONFIG_PAX_REFCOUNT
66475+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
66476+{
66477+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66478+
66479+ atomic_inc_unchecked(v);
66480+}
66481+#endif
66482+
66483 static inline void atomic_long_dec(atomic_long_t *l)
66484 {
66485 atomic_t *v = (atomic_t *)l;
66486@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
66487 atomic_dec(v);
66488 }
66489
66490+#ifdef CONFIG_PAX_REFCOUNT
66491+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
66492+{
66493+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66494+
66495+ atomic_dec_unchecked(v);
66496+}
66497+#endif
66498+
66499 static inline void atomic_long_add(long i, atomic_long_t *l)
66500 {
66501 atomic_t *v = (atomic_t *)l;
66502@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
66503 atomic_add(i, v);
66504 }
66505
66506+#ifdef CONFIG_PAX_REFCOUNT
66507+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
66508+{
66509+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66510+
66511+ atomic_add_unchecked(i, v);
66512+}
66513+#endif
66514+
66515 static inline void atomic_long_sub(long i, atomic_long_t *l)
66516 {
66517 atomic_t *v = (atomic_t *)l;
66518@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
66519 atomic_sub(i, v);
66520 }
66521
66522+#ifdef CONFIG_PAX_REFCOUNT
66523+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
66524+{
66525+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66526+
66527+ atomic_sub_unchecked(i, v);
66528+}
66529+#endif
66530+
66531 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
66532 {
66533 atomic_t *v = (atomic_t *)l;
66534@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
66535 return (long)atomic_add_return(i, v);
66536 }
66537
66538+#ifdef CONFIG_PAX_REFCOUNT
66539+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
66540+{
66541+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66542+
66543+ return (long)atomic_add_return_unchecked(i, v);
66544+}
66545+
66546+#endif
66547+
66548 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
66549 {
66550 atomic_t *v = (atomic_t *)l;
66551@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
66552 return (long)atomic_inc_return(v);
66553 }
66554
66555+#ifdef CONFIG_PAX_REFCOUNT
66556+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
66557+{
66558+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66559+
66560+ return (long)atomic_inc_return_unchecked(v);
66561+}
66562+#endif
66563+
66564 static inline long atomic_long_dec_return(atomic_long_t *l)
66565 {
66566 atomic_t *v = (atomic_t *)l;
66567@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
66568
66569 #endif /* BITS_PER_LONG == 64 */
66570
66571+#ifdef CONFIG_PAX_REFCOUNT
66572+static inline void pax_refcount_needs_these_functions(void)
66573+{
66574+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
66575+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
66576+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
66577+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
66578+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
66579+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
66580+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
66581+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
66582+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
66583+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
66584+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
66585+#ifdef CONFIG_X86
66586+ atomic_clear_mask_unchecked(0, NULL);
66587+ atomic_set_mask_unchecked(0, NULL);
66588+#endif
66589+
66590+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
66591+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
66592+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
66593+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
66594+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
66595+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
66596+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
66597+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
66598+}
66599+#else
66600+#define atomic_read_unchecked(v) atomic_read(v)
66601+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
66602+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
66603+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
66604+#define atomic_inc_unchecked(v) atomic_inc(v)
66605+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
66606+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
66607+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
66608+#define atomic_dec_unchecked(v) atomic_dec(v)
66609+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
66610+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
66611+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
66612+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
66613+
66614+#define atomic_long_read_unchecked(v) atomic_long_read(v)
66615+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
66616+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
66617+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
66618+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
66619+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
66620+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
66621+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
66622+#endif
66623+
66624 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
66625diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
66626index 1ced641..c896ee8 100644
66627--- a/include/asm-generic/atomic.h
66628+++ b/include/asm-generic/atomic.h
66629@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
66630 * Atomically clears the bits set in @mask from @v
66631 */
66632 #ifndef atomic_clear_mask
66633-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
66634+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
66635 {
66636 unsigned long flags;
66637
66638diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
66639index b18ce4f..2ee2843 100644
66640--- a/include/asm-generic/atomic64.h
66641+++ b/include/asm-generic/atomic64.h
66642@@ -16,6 +16,8 @@ typedef struct {
66643 long long counter;
66644 } atomic64_t;
66645
66646+typedef atomic64_t atomic64_unchecked_t;
66647+
66648 #define ATOMIC64_INIT(i) { (i) }
66649
66650 extern long long atomic64_read(const atomic64_t *v);
66651@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
66652 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
66653 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
66654
66655+#define atomic64_read_unchecked(v) atomic64_read(v)
66656+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
66657+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
66658+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
66659+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
66660+#define atomic64_inc_unchecked(v) atomic64_inc(v)
66661+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
66662+#define atomic64_dec_unchecked(v) atomic64_dec(v)
66663+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
66664+
66665 #endif /* _ASM_GENERIC_ATOMIC64_H */
66666diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
66667index 1bfcfe5..e04c5c9 100644
66668--- a/include/asm-generic/cache.h
66669+++ b/include/asm-generic/cache.h
66670@@ -6,7 +6,7 @@
66671 * cache lines need to provide their own cache.h.
66672 */
66673
66674-#define L1_CACHE_SHIFT 5
66675-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
66676+#define L1_CACHE_SHIFT 5UL
66677+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
66678
66679 #endif /* __ASM_GENERIC_CACHE_H */
66680diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
66681index 0d68a1e..b74a761 100644
66682--- a/include/asm-generic/emergency-restart.h
66683+++ b/include/asm-generic/emergency-restart.h
66684@@ -1,7 +1,7 @@
66685 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
66686 #define _ASM_GENERIC_EMERGENCY_RESTART_H
66687
66688-static inline void machine_emergency_restart(void)
66689+static inline __noreturn void machine_emergency_restart(void)
66690 {
66691 machine_restart(NULL);
66692 }
66693diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
66694index 90f99c7..00ce236 100644
66695--- a/include/asm-generic/kmap_types.h
66696+++ b/include/asm-generic/kmap_types.h
66697@@ -2,9 +2,9 @@
66698 #define _ASM_GENERIC_KMAP_TYPES_H
66699
66700 #ifdef __WITH_KM_FENCE
66701-# define KM_TYPE_NR 41
66702+# define KM_TYPE_NR 42
66703 #else
66704-# define KM_TYPE_NR 20
66705+# define KM_TYPE_NR 21
66706 #endif
66707
66708 #endif
66709diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
66710index 9ceb03b..62b0b8f 100644
66711--- a/include/asm-generic/local.h
66712+++ b/include/asm-generic/local.h
66713@@ -23,24 +23,37 @@ typedef struct
66714 atomic_long_t a;
66715 } local_t;
66716
66717+typedef struct {
66718+ atomic_long_unchecked_t a;
66719+} local_unchecked_t;
66720+
66721 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
66722
66723 #define local_read(l) atomic_long_read(&(l)->a)
66724+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
66725 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
66726+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
66727 #define local_inc(l) atomic_long_inc(&(l)->a)
66728+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
66729 #define local_dec(l) atomic_long_dec(&(l)->a)
66730+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
66731 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
66732+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
66733 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
66734+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
66735
66736 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
66737 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
66738 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
66739 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
66740 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
66741+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
66742 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
66743 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
66744+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
66745
66746 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
66747+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
66748 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
66749 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
66750 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
66751diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
66752index 725612b..9cc513a 100644
66753--- a/include/asm-generic/pgtable-nopmd.h
66754+++ b/include/asm-generic/pgtable-nopmd.h
66755@@ -1,14 +1,19 @@
66756 #ifndef _PGTABLE_NOPMD_H
66757 #define _PGTABLE_NOPMD_H
66758
66759-#ifndef __ASSEMBLY__
66760-
66761 #include <asm-generic/pgtable-nopud.h>
66762
66763-struct mm_struct;
66764-
66765 #define __PAGETABLE_PMD_FOLDED
66766
66767+#define PMD_SHIFT PUD_SHIFT
66768+#define PTRS_PER_PMD 1
66769+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
66770+#define PMD_MASK (~(PMD_SIZE-1))
66771+
66772+#ifndef __ASSEMBLY__
66773+
66774+struct mm_struct;
66775+
66776 /*
66777 * Having the pmd type consist of a pud gets the size right, and allows
66778 * us to conceptually access the pud entry that this pmd is folded into
66779@@ -16,11 +21,6 @@ struct mm_struct;
66780 */
66781 typedef struct { pud_t pud; } pmd_t;
66782
66783-#define PMD_SHIFT PUD_SHIFT
66784-#define PTRS_PER_PMD 1
66785-#define PMD_SIZE (1UL << PMD_SHIFT)
66786-#define PMD_MASK (~(PMD_SIZE-1))
66787-
66788 /*
66789 * The "pud_xxx()" functions here are trivial for a folded two-level
66790 * setup: the pmd is never bad, and a pmd always exists (as it's folded
66791diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
66792index 810431d..0ec4804f 100644
66793--- a/include/asm-generic/pgtable-nopud.h
66794+++ b/include/asm-generic/pgtable-nopud.h
66795@@ -1,10 +1,15 @@
66796 #ifndef _PGTABLE_NOPUD_H
66797 #define _PGTABLE_NOPUD_H
66798
66799-#ifndef __ASSEMBLY__
66800-
66801 #define __PAGETABLE_PUD_FOLDED
66802
66803+#define PUD_SHIFT PGDIR_SHIFT
66804+#define PTRS_PER_PUD 1
66805+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
66806+#define PUD_MASK (~(PUD_SIZE-1))
66807+
66808+#ifndef __ASSEMBLY__
66809+
66810 /*
66811 * Having the pud type consist of a pgd gets the size right, and allows
66812 * us to conceptually access the pgd entry that this pud is folded into
66813@@ -12,11 +17,6 @@
66814 */
66815 typedef struct { pgd_t pgd; } pud_t;
66816
66817-#define PUD_SHIFT PGDIR_SHIFT
66818-#define PTRS_PER_PUD 1
66819-#define PUD_SIZE (1UL << PUD_SHIFT)
66820-#define PUD_MASK (~(PUD_SIZE-1))
66821-
66822 /*
66823 * The "pgd_xxx()" functions here are trivial for a folded two-level
66824 * setup: the pud is never bad, and a pud always exists (as it's folded
66825@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
66826 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
66827
66828 #define pgd_populate(mm, pgd, pud) do { } while (0)
66829+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
66830 /*
66831 * (puds are folded into pgds so this doesn't get actually called,
66832 * but the define is needed for a generic inline function.)
66833diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
66834index 5cf680a..4b74d62 100644
66835--- a/include/asm-generic/pgtable.h
66836+++ b/include/asm-generic/pgtable.h
66837@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
66838 }
66839 #endif /* CONFIG_NUMA_BALANCING */
66840
66841+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
66842+static inline unsigned long pax_open_kernel(void) { return 0; }
66843+#endif
66844+
66845+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
66846+static inline unsigned long pax_close_kernel(void) { return 0; }
66847+#endif
66848+
66849 #endif /* CONFIG_MMU */
66850
66851 #endif /* !__ASSEMBLY__ */
66852diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
66853index d1ea7ce..b1ebf2a 100644
66854--- a/include/asm-generic/vmlinux.lds.h
66855+++ b/include/asm-generic/vmlinux.lds.h
66856@@ -218,6 +218,7 @@
66857 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
66858 VMLINUX_SYMBOL(__start_rodata) = .; \
66859 *(.rodata) *(.rodata.*) \
66860+ *(.data..read_only) \
66861 *(__vermagic) /* Kernel version magic */ \
66862 . = ALIGN(8); \
66863 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
66864@@ -725,17 +726,18 @@
66865 * section in the linker script will go there too. @phdr should have
66866 * a leading colon.
66867 *
66868- * Note that this macros defines __per_cpu_load as an absolute symbol.
66869+ * Note that this macros defines per_cpu_load as an absolute symbol.
66870 * If there is no need to put the percpu section at a predetermined
66871 * address, use PERCPU_SECTION.
66872 */
66873 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
66874- VMLINUX_SYMBOL(__per_cpu_load) = .; \
66875- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
66876+ per_cpu_load = .; \
66877+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
66878 - LOAD_OFFSET) { \
66879+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
66880 PERCPU_INPUT(cacheline) \
66881 } phdr \
66882- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
66883+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
66884
66885 /**
66886 * PERCPU_SECTION - define output section for percpu area, simple version
66887diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
66888index 418d270..bfd2794 100644
66889--- a/include/crypto/algapi.h
66890+++ b/include/crypto/algapi.h
66891@@ -34,7 +34,7 @@ struct crypto_type {
66892 unsigned int maskclear;
66893 unsigned int maskset;
66894 unsigned int tfmsize;
66895-};
66896+} __do_const;
66897
66898 struct crypto_instance {
66899 struct crypto_alg alg;
66900diff --git a/include/drm/drmP.h b/include/drm/drmP.h
66901index fad21c9..ab858bc 100644
66902--- a/include/drm/drmP.h
66903+++ b/include/drm/drmP.h
66904@@ -72,6 +72,7 @@
66905 #include <linux/workqueue.h>
66906 #include <linux/poll.h>
66907 #include <asm/pgalloc.h>
66908+#include <asm/local.h>
66909 #include <drm/drm.h>
66910 #include <drm/drm_sarea.h>
66911
66912@@ -293,10 +294,12 @@ do { \
66913 * \param cmd command.
66914 * \param arg argument.
66915 */
66916-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
66917+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
66918+ struct drm_file *file_priv);
66919+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
66920 struct drm_file *file_priv);
66921
66922-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
66923+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
66924 unsigned long arg);
66925
66926 #define DRM_IOCTL_NR(n) _IOC_NR(n)
66927@@ -311,9 +314,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
66928 struct drm_ioctl_desc {
66929 unsigned int cmd;
66930 int flags;
66931- drm_ioctl_t *func;
66932+ drm_ioctl_t func;
66933 unsigned int cmd_drv;
66934-};
66935+} __do_const;
66936
66937 /**
66938 * Creates a driver or general drm_ioctl_desc array entry for the given
66939@@ -995,7 +998,7 @@ struct drm_info_list {
66940 int (*show)(struct seq_file*, void*); /** show callback */
66941 u32 driver_features; /**< Required driver features for this entry */
66942 void *data;
66943-};
66944+} __do_const;
66945
66946 /**
66947 * debugfs node structure. This structure represents a debugfs file.
66948@@ -1068,7 +1071,7 @@ struct drm_device {
66949
66950 /** \name Usage Counters */
66951 /*@{ */
66952- int open_count; /**< Outstanding files open */
66953+ local_t open_count; /**< Outstanding files open */
66954 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
66955 atomic_t vma_count; /**< Outstanding vma areas open */
66956 int buf_use; /**< Buffers in use -- cannot alloc */
66957@@ -1079,7 +1082,7 @@ struct drm_device {
66958 /*@{ */
66959 unsigned long counters;
66960 enum drm_stat_type types[15];
66961- atomic_t counts[15];
66962+ atomic_unchecked_t counts[15];
66963 /*@} */
66964
66965 struct list_head filelist;
66966diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
66967index f43d556..94d9343 100644
66968--- a/include/drm/drm_crtc_helper.h
66969+++ b/include/drm/drm_crtc_helper.h
66970@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
66971 struct drm_connector *connector);
66972 /* disable encoder when not in use - more explicit than dpms off */
66973 void (*disable)(struct drm_encoder *encoder);
66974-};
66975+} __no_const;
66976
66977 /**
66978 * drm_connector_helper_funcs - helper operations for connectors
66979diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
66980index 72dcbe8..8db58d7 100644
66981--- a/include/drm/ttm/ttm_memory.h
66982+++ b/include/drm/ttm/ttm_memory.h
66983@@ -48,7 +48,7 @@
66984
66985 struct ttm_mem_shrink {
66986 int (*do_shrink) (struct ttm_mem_shrink *);
66987-};
66988+} __no_const;
66989
66990 /**
66991 * struct ttm_mem_global - Global memory accounting structure.
66992diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
66993index 4b840e8..155d235 100644
66994--- a/include/keys/asymmetric-subtype.h
66995+++ b/include/keys/asymmetric-subtype.h
66996@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
66997 /* Verify the signature on a key of this subtype (optional) */
66998 int (*verify_signature)(const struct key *key,
66999 const struct public_key_signature *sig);
67000-};
67001+} __do_const;
67002
67003 /**
67004 * asymmetric_key_subtype - Get the subtype from an asymmetric key
67005diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
67006index c1da539..1dcec55 100644
67007--- a/include/linux/atmdev.h
67008+++ b/include/linux/atmdev.h
67009@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
67010 #endif
67011
67012 struct k_atm_aal_stats {
67013-#define __HANDLE_ITEM(i) atomic_t i
67014+#define __HANDLE_ITEM(i) atomic_unchecked_t i
67015 __AAL_STAT_ITEMS
67016 #undef __HANDLE_ITEM
67017 };
67018@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
67019 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
67020 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
67021 struct module *owner;
67022-};
67023+} __do_const ;
67024
67025 struct atmphy_ops {
67026 int (*start)(struct atm_dev *dev);
67027diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
67028index 0530b98..96a8ac0 100644
67029--- a/include/linux/binfmts.h
67030+++ b/include/linux/binfmts.h
67031@@ -73,8 +73,9 @@ struct linux_binfmt {
67032 int (*load_binary)(struct linux_binprm *);
67033 int (*load_shlib)(struct file *);
67034 int (*core_dump)(struct coredump_params *cprm);
67035+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
67036 unsigned long min_coredump; /* minimal dump size */
67037-};
67038+} __do_const;
67039
67040 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
67041
67042diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
67043index f94bc83..62b9cfe 100644
67044--- a/include/linux/blkdev.h
67045+++ b/include/linux/blkdev.h
67046@@ -1498,7 +1498,7 @@ struct block_device_operations {
67047 /* this callback is with swap_lock and sometimes page table lock held */
67048 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
67049 struct module *owner;
67050-};
67051+} __do_const;
67052
67053 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
67054 unsigned long);
67055diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
67056index 7c2e030..b72475d 100644
67057--- a/include/linux/blktrace_api.h
67058+++ b/include/linux/blktrace_api.h
67059@@ -23,7 +23,7 @@ struct blk_trace {
67060 struct dentry *dir;
67061 struct dentry *dropped_file;
67062 struct dentry *msg_file;
67063- atomic_t dropped;
67064+ atomic_unchecked_t dropped;
67065 };
67066
67067 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
67068diff --git a/include/linux/cache.h b/include/linux/cache.h
67069index 4c57065..4307975 100644
67070--- a/include/linux/cache.h
67071+++ b/include/linux/cache.h
67072@@ -16,6 +16,10 @@
67073 #define __read_mostly
67074 #endif
67075
67076+#ifndef __read_only
67077+#define __read_only __read_mostly
67078+#endif
67079+
67080 #ifndef ____cacheline_aligned
67081 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
67082 #endif
67083diff --git a/include/linux/capability.h b/include/linux/capability.h
67084index 98503b7..cc36d18 100644
67085--- a/include/linux/capability.h
67086+++ b/include/linux/capability.h
67087@@ -211,8 +211,13 @@ extern bool capable(int cap);
67088 extern bool ns_capable(struct user_namespace *ns, int cap);
67089 extern bool nsown_capable(int cap);
67090 extern bool inode_capable(const struct inode *inode, int cap);
67091+extern bool capable_nolog(int cap);
67092+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
67093+extern bool inode_capable_nolog(const struct inode *inode, int cap);
67094
67095 /* audit system wants to get cap info from files as well */
67096 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
67097
67098+extern int is_privileged_binary(const struct dentry *dentry);
67099+
67100 #endif /* !_LINUX_CAPABILITY_H */
67101diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
67102index 8609d57..86e4d79 100644
67103--- a/include/linux/cdrom.h
67104+++ b/include/linux/cdrom.h
67105@@ -87,7 +87,6 @@ struct cdrom_device_ops {
67106
67107 /* driver specifications */
67108 const int capability; /* capability flags */
67109- int n_minors; /* number of active minor devices */
67110 /* handle uniform packets for scsi type devices (scsi,atapi) */
67111 int (*generic_packet) (struct cdrom_device_info *,
67112 struct packet_command *);
67113diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
67114index 42e55de..1cd0e66 100644
67115--- a/include/linux/cleancache.h
67116+++ b/include/linux/cleancache.h
67117@@ -31,7 +31,7 @@ struct cleancache_ops {
67118 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
67119 void (*invalidate_inode)(int, struct cleancache_filekey);
67120 void (*invalidate_fs)(int);
67121-};
67122+} __no_const;
67123
67124 extern struct cleancache_ops
67125 cleancache_register_ops(struct cleancache_ops *ops);
67126diff --git a/include/linux/compat.h b/include/linux/compat.h
67127index dec7e2d..45db13f 100644
67128--- a/include/linux/compat.h
67129+++ b/include/linux/compat.h
67130@@ -311,14 +311,14 @@ long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
67131 long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
67132 int version, void __user *uptr);
67133 long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
67134- void __user *uptr);
67135+ void __user *uptr) __intentional_overflow(0);
67136 #else
67137 long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
67138 long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
67139 compat_ssize_t msgsz, int msgflg);
67140 long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
67141 compat_ssize_t msgsz, long msgtyp, int msgflg);
67142-long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
67143+long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
67144 #endif
67145 long compat_sys_msgctl(int first, int second, void __user *uptr);
67146 long compat_sys_shmctl(int first, int second, void __user *uptr);
67147@@ -414,7 +414,7 @@ extern int compat_ptrace_request(struct task_struct *child,
67148 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
67149 compat_ulong_t addr, compat_ulong_t data);
67150 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67151- compat_long_t addr, compat_long_t data);
67152+ compat_ulong_t addr, compat_ulong_t data);
67153
67154 /*
67155 * epoll (fs/eventpoll.c) compat bits follow ...
67156diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
67157index 662fd1b..e801992 100644
67158--- a/include/linux/compiler-gcc4.h
67159+++ b/include/linux/compiler-gcc4.h
67160@@ -34,6 +34,21 @@
67161 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
67162
67163 #if __GNUC_MINOR__ >= 5
67164+
67165+#ifdef CONSTIFY_PLUGIN
67166+#define __no_const __attribute__((no_const))
67167+#define __do_const __attribute__((do_const))
67168+#endif
67169+
67170+#ifdef SIZE_OVERFLOW_PLUGIN
67171+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
67172+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
67173+#endif
67174+
67175+#ifdef LATENT_ENTROPY_PLUGIN
67176+#define __latent_entropy __attribute__((latent_entropy))
67177+#endif
67178+
67179 /*
67180 * Mark a position in code as unreachable. This can be used to
67181 * suppress control flow warnings after asm blocks that transfer
67182@@ -49,6 +64,11 @@
67183 #define __noclone __attribute__((__noclone__))
67184
67185 #endif
67186+
67187+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
67188+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
67189+#define __bos0(ptr) __bos((ptr), 0)
67190+#define __bos1(ptr) __bos((ptr), 1)
67191 #endif
67192
67193 #if __GNUC_MINOR__ >= 6
67194diff --git a/include/linux/compiler.h b/include/linux/compiler.h
67195index dd852b7..72924c0 100644
67196--- a/include/linux/compiler.h
67197+++ b/include/linux/compiler.h
67198@@ -5,11 +5,14 @@
67199
67200 #ifdef __CHECKER__
67201 # define __user __attribute__((noderef, address_space(1)))
67202+# define __force_user __force __user
67203 # define __kernel __attribute__((address_space(0)))
67204+# define __force_kernel __force __kernel
67205 # define __safe __attribute__((safe))
67206 # define __force __attribute__((force))
67207 # define __nocast __attribute__((nocast))
67208 # define __iomem __attribute__((noderef, address_space(2)))
67209+# define __force_iomem __force __iomem
67210 # define __must_hold(x) __attribute__((context(x,1,1)))
67211 # define __acquires(x) __attribute__((context(x,0,1)))
67212 # define __releases(x) __attribute__((context(x,1,0)))
67213@@ -17,20 +20,48 @@
67214 # define __release(x) __context__(x,-1)
67215 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
67216 # define __percpu __attribute__((noderef, address_space(3)))
67217+# define __force_percpu __force __percpu
67218 #ifdef CONFIG_SPARSE_RCU_POINTER
67219 # define __rcu __attribute__((noderef, address_space(4)))
67220+# define __force_rcu __force __rcu
67221 #else
67222 # define __rcu
67223+# define __force_rcu
67224 #endif
67225 extern void __chk_user_ptr(const volatile void __user *);
67226 extern void __chk_io_ptr(const volatile void __iomem *);
67227+#elif defined(CHECKER_PLUGIN)
67228+//# define __user
67229+//# define __force_user
67230+//# define __kernel
67231+//# define __force_kernel
67232+# define __safe
67233+# define __force
67234+# define __nocast
67235+# define __iomem
67236+# define __force_iomem
67237+# define __chk_user_ptr(x) (void)0
67238+# define __chk_io_ptr(x) (void)0
67239+# define __builtin_warning(x, y...) (1)
67240+# define __acquires(x)
67241+# define __releases(x)
67242+# define __acquire(x) (void)0
67243+# define __release(x) (void)0
67244+# define __cond_lock(x,c) (c)
67245+# define __percpu
67246+# define __force_percpu
67247+# define __rcu
67248+# define __force_rcu
67249 #else
67250 # define __user
67251+# define __force_user
67252 # define __kernel
67253+# define __force_kernel
67254 # define __safe
67255 # define __force
67256 # define __nocast
67257 # define __iomem
67258+# define __force_iomem
67259 # define __chk_user_ptr(x) (void)0
67260 # define __chk_io_ptr(x) (void)0
67261 # define __builtin_warning(x, y...) (1)
67262@@ -41,7 +72,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
67263 # define __release(x) (void)0
67264 # define __cond_lock(x,c) (c)
67265 # define __percpu
67266+# define __force_percpu
67267 # define __rcu
67268+# define __force_rcu
67269 #endif
67270
67271 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
67272@@ -275,6 +308,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67273 # define __attribute_const__ /* unimplemented */
67274 #endif
67275
67276+#ifndef __no_const
67277+# define __no_const
67278+#endif
67279+
67280+#ifndef __do_const
67281+# define __do_const
67282+#endif
67283+
67284+#ifndef __size_overflow
67285+# define __size_overflow(...)
67286+#endif
67287+
67288+#ifndef __intentional_overflow
67289+# define __intentional_overflow(...)
67290+#endif
67291+
67292+#ifndef __latent_entropy
67293+# define __latent_entropy
67294+#endif
67295+
67296 /*
67297 * Tell gcc if a function is cold. The compiler will assume any path
67298 * directly leading to the call is unlikely.
67299@@ -284,6 +337,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67300 #define __cold
67301 #endif
67302
67303+#ifndef __alloc_size
67304+#define __alloc_size(...)
67305+#endif
67306+
67307+#ifndef __bos
67308+#define __bos(ptr, arg)
67309+#endif
67310+
67311+#ifndef __bos0
67312+#define __bos0(ptr)
67313+#endif
67314+
67315+#ifndef __bos1
67316+#define __bos1(ptr)
67317+#endif
67318+
67319 /* Simple shorthand for a section definition */
67320 #ifndef __section
67321 # define __section(S) __attribute__ ((__section__(#S)))
67322@@ -323,6 +392,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67323 * use is to mediate communication between process-level code and irq/NMI
67324 * handlers, all running on the same CPU.
67325 */
67326-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
67327+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
67328+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
67329
67330 #endif /* __LINUX_COMPILER_H */
67331diff --git a/include/linux/completion.h b/include/linux/completion.h
67332index 51494e6..0fd1b61 100644
67333--- a/include/linux/completion.h
67334+++ b/include/linux/completion.h
67335@@ -78,13 +78,13 @@ static inline void init_completion(struct completion *x)
67336
67337 extern void wait_for_completion(struct completion *);
67338 extern int wait_for_completion_interruptible(struct completion *x);
67339-extern int wait_for_completion_killable(struct completion *x);
67340+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
67341 extern unsigned long wait_for_completion_timeout(struct completion *x,
67342 unsigned long timeout);
67343 extern long wait_for_completion_interruptible_timeout(
67344- struct completion *x, unsigned long timeout);
67345+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
67346 extern long wait_for_completion_killable_timeout(
67347- struct completion *x, unsigned long timeout);
67348+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
67349 extern bool try_wait_for_completion(struct completion *x);
67350 extern bool completion_done(struct completion *x);
67351
67352diff --git a/include/linux/configfs.h b/include/linux/configfs.h
67353index 34025df..d94bbbc 100644
67354--- a/include/linux/configfs.h
67355+++ b/include/linux/configfs.h
67356@@ -125,7 +125,7 @@ struct configfs_attribute {
67357 const char *ca_name;
67358 struct module *ca_owner;
67359 umode_t ca_mode;
67360-};
67361+} __do_const;
67362
67363 /*
67364 * Users often need to create attribute structures for their configurable
67365diff --git a/include/linux/cpu.h b/include/linux/cpu.h
67366index ce7a074..01ab8ac 100644
67367--- a/include/linux/cpu.h
67368+++ b/include/linux/cpu.h
67369@@ -115,7 +115,7 @@ enum {
67370 /* Need to know about CPUs going up/down? */
67371 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
67372 #define cpu_notifier(fn, pri) { \
67373- static struct notifier_block fn##_nb __cpuinitdata = \
67374+ static struct notifier_block fn##_nb = \
67375 { .notifier_call = fn, .priority = pri }; \
67376 register_cpu_notifier(&fn##_nb); \
67377 }
67378diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
67379index a55b88e..fba90c5 100644
67380--- a/include/linux/cpufreq.h
67381+++ b/include/linux/cpufreq.h
67382@@ -240,7 +240,7 @@ struct cpufreq_driver {
67383 int (*suspend) (struct cpufreq_policy *policy);
67384 int (*resume) (struct cpufreq_policy *policy);
67385 struct freq_attr **attr;
67386-};
67387+} __do_const;
67388
67389 /* flags */
67390
67391@@ -299,6 +299,7 @@ struct global_attr {
67392 ssize_t (*store)(struct kobject *a, struct attribute *b,
67393 const char *c, size_t count);
67394 };
67395+typedef struct global_attr __no_const global_attr_no_const;
67396
67397 #define define_one_global_ro(_name) \
67398 static struct global_attr _name = \
67399diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
67400index 24cd1037..20a63aae 100644
67401--- a/include/linux/cpuidle.h
67402+++ b/include/linux/cpuidle.h
67403@@ -54,7 +54,8 @@ struct cpuidle_state {
67404 int index);
67405
67406 int (*enter_dead) (struct cpuidle_device *dev, int index);
67407-};
67408+} __do_const;
67409+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
67410
67411 /* Idle State Flags */
67412 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
67413@@ -216,7 +217,7 @@ struct cpuidle_governor {
67414 void (*reflect) (struct cpuidle_device *dev, int index);
67415
67416 struct module *owner;
67417-};
67418+} __do_const;
67419
67420 #ifdef CONFIG_CPU_IDLE
67421
67422diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
67423index 0325602..5e9feff 100644
67424--- a/include/linux/cpumask.h
67425+++ b/include/linux/cpumask.h
67426@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
67427 }
67428
67429 /* Valid inputs for n are -1 and 0. */
67430-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67431+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
67432 {
67433 return n+1;
67434 }
67435
67436-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67437+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
67438 {
67439 return n+1;
67440 }
67441
67442-static inline unsigned int cpumask_next_and(int n,
67443+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
67444 const struct cpumask *srcp,
67445 const struct cpumask *andp)
67446 {
67447@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
67448 *
67449 * Returns >= nr_cpu_ids if no further cpus set.
67450 */
67451-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67452+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
67453 {
67454 /* -1 is a legal arg here. */
67455 if (n != -1)
67456@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67457 *
67458 * Returns >= nr_cpu_ids if no further cpus unset.
67459 */
67460-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67461+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
67462 {
67463 /* -1 is a legal arg here. */
67464 if (n != -1)
67465@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67466 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
67467 }
67468
67469-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
67470+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
67471 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
67472
67473 /**
67474diff --git a/include/linux/cred.h b/include/linux/cred.h
67475index 04421e8..6bce4ef 100644
67476--- a/include/linux/cred.h
67477+++ b/include/linux/cred.h
67478@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
67479 static inline void validate_process_creds(void)
67480 {
67481 }
67482+static inline void validate_task_creds(struct task_struct *task)
67483+{
67484+}
67485 #endif
67486
67487 /**
67488diff --git a/include/linux/crypto.h b/include/linux/crypto.h
67489index b92eadf..b4ecdc1 100644
67490--- a/include/linux/crypto.h
67491+++ b/include/linux/crypto.h
67492@@ -373,7 +373,7 @@ struct cipher_tfm {
67493 const u8 *key, unsigned int keylen);
67494 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
67495 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
67496-};
67497+} __no_const;
67498
67499 struct hash_tfm {
67500 int (*init)(struct hash_desc *desc);
67501@@ -394,13 +394,13 @@ struct compress_tfm {
67502 int (*cot_decompress)(struct crypto_tfm *tfm,
67503 const u8 *src, unsigned int slen,
67504 u8 *dst, unsigned int *dlen);
67505-};
67506+} __no_const;
67507
67508 struct rng_tfm {
67509 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
67510 unsigned int dlen);
67511 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
67512-};
67513+} __no_const;
67514
67515 #define crt_ablkcipher crt_u.ablkcipher
67516 #define crt_aead crt_u.aead
67517diff --git a/include/linux/ctype.h b/include/linux/ctype.h
67518index 8acfe31..6ffccd63 100644
67519--- a/include/linux/ctype.h
67520+++ b/include/linux/ctype.h
67521@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
67522 * Fast implementation of tolower() for internal usage. Do not use in your
67523 * code.
67524 */
67525-static inline char _tolower(const char c)
67526+static inline unsigned char _tolower(const unsigned char c)
67527 {
67528 return c | 0x20;
67529 }
67530diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
67531index 7925bf0..d5143d2 100644
67532--- a/include/linux/decompress/mm.h
67533+++ b/include/linux/decompress/mm.h
67534@@ -77,7 +77,7 @@ static void free(void *where)
67535 * warnings when not needed (indeed large_malloc / large_free are not
67536 * needed by inflate */
67537
67538-#define malloc(a) kmalloc(a, GFP_KERNEL)
67539+#define malloc(a) kmalloc((a), GFP_KERNEL)
67540 #define free(a) kfree(a)
67541
67542 #define large_malloc(a) vmalloc(a)
67543diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
67544index e83ef39..33e0eb3 100644
67545--- a/include/linux/devfreq.h
67546+++ b/include/linux/devfreq.h
67547@@ -114,7 +114,7 @@ struct devfreq_governor {
67548 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
67549 int (*event_handler)(struct devfreq *devfreq,
67550 unsigned int event, void *data);
67551-};
67552+} __do_const;
67553
67554 /**
67555 * struct devfreq - Device devfreq structure
67556diff --git a/include/linux/device.h b/include/linux/device.h
67557index 43dcda9..7a1fb65 100644
67558--- a/include/linux/device.h
67559+++ b/include/linux/device.h
67560@@ -294,7 +294,7 @@ struct subsys_interface {
67561 struct list_head node;
67562 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
67563 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
67564-};
67565+} __do_const;
67566
67567 int subsys_interface_register(struct subsys_interface *sif);
67568 void subsys_interface_unregister(struct subsys_interface *sif);
67569@@ -474,7 +474,7 @@ struct device_type {
67570 void (*release)(struct device *dev);
67571
67572 const struct dev_pm_ops *pm;
67573-};
67574+} __do_const;
67575
67576 /* interface for exporting device attributes */
67577 struct device_attribute {
67578@@ -484,11 +484,12 @@ struct device_attribute {
67579 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
67580 const char *buf, size_t count);
67581 };
67582+typedef struct device_attribute __no_const device_attribute_no_const;
67583
67584 struct dev_ext_attribute {
67585 struct device_attribute attr;
67586 void *var;
67587-};
67588+} __do_const;
67589
67590 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
67591 char *buf);
67592diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
67593index 94af418..b1ca7a2 100644
67594--- a/include/linux/dma-mapping.h
67595+++ b/include/linux/dma-mapping.h
67596@@ -54,7 +54,7 @@ struct dma_map_ops {
67597 u64 (*get_required_mask)(struct device *dev);
67598 #endif
67599 int is_phys;
67600-};
67601+} __do_const;
67602
67603 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
67604
67605diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
67606index d3201e4..8281e63 100644
67607--- a/include/linux/dmaengine.h
67608+++ b/include/linux/dmaengine.h
67609@@ -1018,9 +1018,9 @@ struct dma_pinned_list {
67610 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
67611 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
67612
67613-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
67614+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
67615 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
67616-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
67617+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
67618 struct dma_pinned_list *pinned_list, struct page *page,
67619 unsigned int offset, size_t len);
67620
67621diff --git a/include/linux/efi.h b/include/linux/efi.h
67622index 7a9498a..155713d 100644
67623--- a/include/linux/efi.h
67624+++ b/include/linux/efi.h
67625@@ -733,6 +733,7 @@ struct efivar_operations {
67626 efi_set_variable_t *set_variable;
67627 efi_query_variable_info_t *query_variable_info;
67628 };
67629+typedef struct efivar_operations __no_const efivar_operations_no_const;
67630
67631 struct efivars {
67632 /*
67633diff --git a/include/linux/elf.h b/include/linux/elf.h
67634index 8c9048e..16a4665 100644
67635--- a/include/linux/elf.h
67636+++ b/include/linux/elf.h
67637@@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
67638 #define elf_note elf32_note
67639 #define elf_addr_t Elf32_Off
67640 #define Elf_Half Elf32_Half
67641+#define elf_dyn Elf32_Dyn
67642
67643 #else
67644
67645@@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
67646 #define elf_note elf64_note
67647 #define elf_addr_t Elf64_Off
67648 #define Elf_Half Elf64_Half
67649+#define elf_dyn Elf64_Dyn
67650
67651 #endif
67652
67653diff --git a/include/linux/err.h b/include/linux/err.h
67654index f2edce2..cc2082c 100644
67655--- a/include/linux/err.h
67656+++ b/include/linux/err.h
67657@@ -19,12 +19,12 @@
67658
67659 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
67660
67661-static inline void * __must_check ERR_PTR(long error)
67662+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
67663 {
67664 return (void *) error;
67665 }
67666
67667-static inline long __must_check PTR_ERR(const void *ptr)
67668+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
67669 {
67670 return (long) ptr;
67671 }
67672diff --git a/include/linux/extcon.h b/include/linux/extcon.h
67673index fcb51c8..bdafcf6 100644
67674--- a/include/linux/extcon.h
67675+++ b/include/linux/extcon.h
67676@@ -134,7 +134,7 @@ struct extcon_dev {
67677 /* /sys/class/extcon/.../mutually_exclusive/... */
67678 struct attribute_group attr_g_muex;
67679 struct attribute **attrs_muex;
67680- struct device_attribute *d_attrs_muex;
67681+ device_attribute_no_const *d_attrs_muex;
67682 };
67683
67684 /**
67685diff --git a/include/linux/fb.h b/include/linux/fb.h
67686index c7a9571..02eeffe 100644
67687--- a/include/linux/fb.h
67688+++ b/include/linux/fb.h
67689@@ -302,7 +302,7 @@ struct fb_ops {
67690 /* called at KDB enter and leave time to prepare the console */
67691 int (*fb_debug_enter)(struct fb_info *info);
67692 int (*fb_debug_leave)(struct fb_info *info);
67693-};
67694+} __do_const;
67695
67696 #ifdef CONFIG_FB_TILEBLITTING
67697 #define FB_TILE_CURSOR_NONE 0
67698diff --git a/include/linux/filter.h b/include/linux/filter.h
67699index c45eabc..baa0be5 100644
67700--- a/include/linux/filter.h
67701+++ b/include/linux/filter.h
67702@@ -20,6 +20,7 @@ struct compat_sock_fprog {
67703
67704 struct sk_buff;
67705 struct sock;
67706+struct bpf_jit_work;
67707
67708 struct sk_filter
67709 {
67710@@ -27,6 +28,9 @@ struct sk_filter
67711 unsigned int len; /* Number of filter blocks */
67712 unsigned int (*bpf_func)(const struct sk_buff *skb,
67713 const struct sock_filter *filter);
67714+#ifdef CONFIG_BPF_JIT
67715+ struct bpf_jit_work *work;
67716+#endif
67717 struct rcu_head rcu;
67718 struct sock_filter insns[0];
67719 };
67720diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
67721index 3044254..9767f41 100644
67722--- a/include/linux/frontswap.h
67723+++ b/include/linux/frontswap.h
67724@@ -11,7 +11,7 @@ struct frontswap_ops {
67725 int (*load)(unsigned, pgoff_t, struct page *);
67726 void (*invalidate_page)(unsigned, pgoff_t);
67727 void (*invalidate_area)(unsigned);
67728-};
67729+} __no_const;
67730
67731 extern bool frontswap_enabled;
67732 extern struct frontswap_ops
67733diff --git a/include/linux/fs.h b/include/linux/fs.h
67734index 7617ee0..b575199 100644
67735--- a/include/linux/fs.h
67736+++ b/include/linux/fs.h
67737@@ -1541,7 +1541,8 @@ struct file_operations {
67738 long (*fallocate)(struct file *file, int mode, loff_t offset,
67739 loff_t len);
67740 int (*show_fdinfo)(struct seq_file *m, struct file *f);
67741-};
67742+} __do_const;
67743+typedef struct file_operations __no_const file_operations_no_const;
67744
67745 struct inode_operations {
67746 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
67747@@ -2665,4 +2666,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
67748 inode->i_flags |= S_NOSEC;
67749 }
67750
67751+static inline bool is_sidechannel_device(const struct inode *inode)
67752+{
67753+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
67754+ umode_t mode = inode->i_mode;
67755+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
67756+#else
67757+ return false;
67758+#endif
67759+}
67760+
67761 #endif /* _LINUX_FS_H */
67762diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
67763index d0ae3a8..0244b34 100644
67764--- a/include/linux/fs_struct.h
67765+++ b/include/linux/fs_struct.h
67766@@ -6,7 +6,7 @@
67767 #include <linux/seqlock.h>
67768
67769 struct fs_struct {
67770- int users;
67771+ atomic_t users;
67772 spinlock_t lock;
67773 seqcount_t seq;
67774 int umask;
67775diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
67776index 5dfa0aa..6acf322 100644
67777--- a/include/linux/fscache-cache.h
67778+++ b/include/linux/fscache-cache.h
67779@@ -112,7 +112,7 @@ struct fscache_operation {
67780 fscache_operation_release_t release;
67781 };
67782
67783-extern atomic_t fscache_op_debug_id;
67784+extern atomic_unchecked_t fscache_op_debug_id;
67785 extern void fscache_op_work_func(struct work_struct *work);
67786
67787 extern void fscache_enqueue_operation(struct fscache_operation *);
67788@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
67789 INIT_WORK(&op->work, fscache_op_work_func);
67790 atomic_set(&op->usage, 1);
67791 op->state = FSCACHE_OP_ST_INITIALISED;
67792- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
67793+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
67794 op->processor = processor;
67795 op->release = release;
67796 INIT_LIST_HEAD(&op->pend_link);
67797diff --git a/include/linux/fscache.h b/include/linux/fscache.h
67798index 7a08623..4c07b0f 100644
67799--- a/include/linux/fscache.h
67800+++ b/include/linux/fscache.h
67801@@ -152,7 +152,7 @@ struct fscache_cookie_def {
67802 * - this is mandatory for any object that may have data
67803 */
67804 void (*now_uncached)(void *cookie_netfs_data);
67805-};
67806+} __do_const;
67807
67808 /*
67809 * fscache cached network filesystem type
67810diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
67811index 0fbfb46..508eb0d 100644
67812--- a/include/linux/fsnotify.h
67813+++ b/include/linux/fsnotify.h
67814@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
67815 struct inode *inode = path->dentry->d_inode;
67816 __u32 mask = FS_ACCESS;
67817
67818+ if (is_sidechannel_device(inode))
67819+ return;
67820+
67821 if (S_ISDIR(inode->i_mode))
67822 mask |= FS_ISDIR;
67823
67824@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
67825 struct inode *inode = path->dentry->d_inode;
67826 __u32 mask = FS_MODIFY;
67827
67828+ if (is_sidechannel_device(inode))
67829+ return;
67830+
67831 if (S_ISDIR(inode->i_mode))
67832 mask |= FS_ISDIR;
67833
67834@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
67835 */
67836 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
67837 {
67838- return kstrdup(name, GFP_KERNEL);
67839+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
67840 }
67841
67842 /*
67843diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
67844index a3d4895..ddd2a50 100644
67845--- a/include/linux/ftrace_event.h
67846+++ b/include/linux/ftrace_event.h
67847@@ -272,7 +272,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
67848 extern int trace_add_event_call(struct ftrace_event_call *call);
67849 extern void trace_remove_event_call(struct ftrace_event_call *call);
67850
67851-#define is_signed_type(type) (((type)(-1)) < 0)
67852+#define is_signed_type(type) (((type)(-1)) < (type)1)
67853
67854 int trace_set_clr_event(const char *system, const char *event, int set);
67855
67856diff --git a/include/linux/genhd.h b/include/linux/genhd.h
67857index 79b8bba..86b539e 100644
67858--- a/include/linux/genhd.h
67859+++ b/include/linux/genhd.h
67860@@ -194,7 +194,7 @@ struct gendisk {
67861 struct kobject *slave_dir;
67862
67863 struct timer_rand_state *random;
67864- atomic_t sync_io; /* RAID */
67865+ atomic_unchecked_t sync_io; /* RAID */
67866 struct disk_events *ev;
67867 #ifdef CONFIG_BLK_DEV_INTEGRITY
67868 struct blk_integrity *integrity;
67869diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
67870index 023bc34..b02b46a 100644
67871--- a/include/linux/genl_magic_func.h
67872+++ b/include/linux/genl_magic_func.h
67873@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
67874 },
67875
67876 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
67877-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
67878+static struct genl_ops ZZZ_genl_ops[] = {
67879 #include GENL_MAGIC_INCLUDE_FILE
67880 };
67881
67882diff --git a/include/linux/gfp.h b/include/linux/gfp.h
67883index 0f615eb..5c3832f 100644
67884--- a/include/linux/gfp.h
67885+++ b/include/linux/gfp.h
67886@@ -35,6 +35,13 @@ struct vm_area_struct;
67887 #define ___GFP_NO_KSWAPD 0x400000u
67888 #define ___GFP_OTHER_NODE 0x800000u
67889 #define ___GFP_WRITE 0x1000000u
67890+
67891+#ifdef CONFIG_PAX_USERCOPY_SLABS
67892+#define ___GFP_USERCOPY 0x2000000u
67893+#else
67894+#define ___GFP_USERCOPY 0
67895+#endif
67896+
67897 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
67898
67899 /*
67900@@ -92,6 +99,7 @@ struct vm_area_struct;
67901 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
67902 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
67903 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
67904+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
67905
67906 /*
67907 * This may seem redundant, but it's a way of annotating false positives vs.
67908@@ -99,7 +107,7 @@ struct vm_area_struct;
67909 */
67910 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
67911
67912-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
67913+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
67914 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
67915
67916 /* This equals 0, but use constants in case they ever change */
67917@@ -153,6 +161,8 @@ struct vm_area_struct;
67918 /* 4GB DMA on some platforms */
67919 #define GFP_DMA32 __GFP_DMA32
67920
67921+#define GFP_USERCOPY __GFP_USERCOPY
67922+
67923 /* Convert GFP flags to their corresponding migrate type */
67924 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
67925 {
67926diff --git a/include/linux/gracl.h b/include/linux/gracl.h
67927new file mode 100644
67928index 0000000..ebe6d72
67929--- /dev/null
67930+++ b/include/linux/gracl.h
67931@@ -0,0 +1,319 @@
67932+#ifndef GR_ACL_H
67933+#define GR_ACL_H
67934+
67935+#include <linux/grdefs.h>
67936+#include <linux/resource.h>
67937+#include <linux/capability.h>
67938+#include <linux/dcache.h>
67939+#include <asm/resource.h>
67940+
67941+/* Major status information */
67942+
67943+#define GR_VERSION "grsecurity 2.9.1"
67944+#define GRSECURITY_VERSION 0x2901
67945+
67946+enum {
67947+ GR_SHUTDOWN = 0,
67948+ GR_ENABLE = 1,
67949+ GR_SPROLE = 2,
67950+ GR_RELOAD = 3,
67951+ GR_SEGVMOD = 4,
67952+ GR_STATUS = 5,
67953+ GR_UNSPROLE = 6,
67954+ GR_PASSSET = 7,
67955+ GR_SPROLEPAM = 8,
67956+};
67957+
67958+/* Password setup definitions
67959+ * kernel/grhash.c */
67960+enum {
67961+ GR_PW_LEN = 128,
67962+ GR_SALT_LEN = 16,
67963+ GR_SHA_LEN = 32,
67964+};
67965+
67966+enum {
67967+ GR_SPROLE_LEN = 64,
67968+};
67969+
67970+enum {
67971+ GR_NO_GLOB = 0,
67972+ GR_REG_GLOB,
67973+ GR_CREATE_GLOB
67974+};
67975+
67976+#define GR_NLIMITS 32
67977+
67978+/* Begin Data Structures */
67979+
67980+struct sprole_pw {
67981+ unsigned char *rolename;
67982+ unsigned char salt[GR_SALT_LEN];
67983+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
67984+};
67985+
67986+struct name_entry {
67987+ __u32 key;
67988+ ino_t inode;
67989+ dev_t device;
67990+ char *name;
67991+ __u16 len;
67992+ __u8 deleted;
67993+ struct name_entry *prev;
67994+ struct name_entry *next;
67995+};
67996+
67997+struct inodev_entry {
67998+ struct name_entry *nentry;
67999+ struct inodev_entry *prev;
68000+ struct inodev_entry *next;
68001+};
68002+
68003+struct acl_role_db {
68004+ struct acl_role_label **r_hash;
68005+ __u32 r_size;
68006+};
68007+
68008+struct inodev_db {
68009+ struct inodev_entry **i_hash;
68010+ __u32 i_size;
68011+};
68012+
68013+struct name_db {
68014+ struct name_entry **n_hash;
68015+ __u32 n_size;
68016+};
68017+
68018+struct crash_uid {
68019+ uid_t uid;
68020+ unsigned long expires;
68021+};
68022+
68023+struct gr_hash_struct {
68024+ void **table;
68025+ void **nametable;
68026+ void *first;
68027+ __u32 table_size;
68028+ __u32 used_size;
68029+ int type;
68030+};
68031+
68032+/* Userspace Grsecurity ACL data structures */
68033+
68034+struct acl_subject_label {
68035+ char *filename;
68036+ ino_t inode;
68037+ dev_t device;
68038+ __u32 mode;
68039+ kernel_cap_t cap_mask;
68040+ kernel_cap_t cap_lower;
68041+ kernel_cap_t cap_invert_audit;
68042+
68043+ struct rlimit res[GR_NLIMITS];
68044+ __u32 resmask;
68045+
68046+ __u8 user_trans_type;
68047+ __u8 group_trans_type;
68048+ uid_t *user_transitions;
68049+ gid_t *group_transitions;
68050+ __u16 user_trans_num;
68051+ __u16 group_trans_num;
68052+
68053+ __u32 sock_families[2];
68054+ __u32 ip_proto[8];
68055+ __u32 ip_type;
68056+ struct acl_ip_label **ips;
68057+ __u32 ip_num;
68058+ __u32 inaddr_any_override;
68059+
68060+ __u32 crashes;
68061+ unsigned long expires;
68062+
68063+ struct acl_subject_label *parent_subject;
68064+ struct gr_hash_struct *hash;
68065+ struct acl_subject_label *prev;
68066+ struct acl_subject_label *next;
68067+
68068+ struct acl_object_label **obj_hash;
68069+ __u32 obj_hash_size;
68070+ __u16 pax_flags;
68071+};
68072+
68073+struct role_allowed_ip {
68074+ __u32 addr;
68075+ __u32 netmask;
68076+
68077+ struct role_allowed_ip *prev;
68078+ struct role_allowed_ip *next;
68079+};
68080+
68081+struct role_transition {
68082+ char *rolename;
68083+
68084+ struct role_transition *prev;
68085+ struct role_transition *next;
68086+};
68087+
68088+struct acl_role_label {
68089+ char *rolename;
68090+ uid_t uidgid;
68091+ __u16 roletype;
68092+
68093+ __u16 auth_attempts;
68094+ unsigned long expires;
68095+
68096+ struct acl_subject_label *root_label;
68097+ struct gr_hash_struct *hash;
68098+
68099+ struct acl_role_label *prev;
68100+ struct acl_role_label *next;
68101+
68102+ struct role_transition *transitions;
68103+ struct role_allowed_ip *allowed_ips;
68104+ uid_t *domain_children;
68105+ __u16 domain_child_num;
68106+
68107+ umode_t umask;
68108+
68109+ struct acl_subject_label **subj_hash;
68110+ __u32 subj_hash_size;
68111+};
68112+
68113+struct user_acl_role_db {
68114+ struct acl_role_label **r_table;
68115+ __u32 num_pointers; /* Number of allocations to track */
68116+ __u32 num_roles; /* Number of roles */
68117+ __u32 num_domain_children; /* Number of domain children */
68118+ __u32 num_subjects; /* Number of subjects */
68119+ __u32 num_objects; /* Number of objects */
68120+};
68121+
68122+struct acl_object_label {
68123+ char *filename;
68124+ ino_t inode;
68125+ dev_t device;
68126+ __u32 mode;
68127+
68128+ struct acl_subject_label *nested;
68129+ struct acl_object_label *globbed;
68130+
68131+ /* next two structures not used */
68132+
68133+ struct acl_object_label *prev;
68134+ struct acl_object_label *next;
68135+};
68136+
68137+struct acl_ip_label {
68138+ char *iface;
68139+ __u32 addr;
68140+ __u32 netmask;
68141+ __u16 low, high;
68142+ __u8 mode;
68143+ __u32 type;
68144+ __u32 proto[8];
68145+
68146+ /* next two structures not used */
68147+
68148+ struct acl_ip_label *prev;
68149+ struct acl_ip_label *next;
68150+};
68151+
68152+struct gr_arg {
68153+ struct user_acl_role_db role_db;
68154+ unsigned char pw[GR_PW_LEN];
68155+ unsigned char salt[GR_SALT_LEN];
68156+ unsigned char sum[GR_SHA_LEN];
68157+ unsigned char sp_role[GR_SPROLE_LEN];
68158+ struct sprole_pw *sprole_pws;
68159+ dev_t segv_device;
68160+ ino_t segv_inode;
68161+ uid_t segv_uid;
68162+ __u16 num_sprole_pws;
68163+ __u16 mode;
68164+};
68165+
68166+struct gr_arg_wrapper {
68167+ struct gr_arg *arg;
68168+ __u32 version;
68169+ __u32 size;
68170+};
68171+
68172+struct subject_map {
68173+ struct acl_subject_label *user;
68174+ struct acl_subject_label *kernel;
68175+ struct subject_map *prev;
68176+ struct subject_map *next;
68177+};
68178+
68179+struct acl_subj_map_db {
68180+ struct subject_map **s_hash;
68181+ __u32 s_size;
68182+};
68183+
68184+/* End Data Structures Section */
68185+
68186+/* Hash functions generated by empirical testing by Brad Spengler
68187+ Makes good use of the low bits of the inode. Generally 0-1 times
68188+ in loop for successful match. 0-3 for unsuccessful match.
68189+ Shift/add algorithm with modulus of table size and an XOR*/
68190+
68191+static __inline__ unsigned int
68192+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
68193+{
68194+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
68195+}
68196+
68197+ static __inline__ unsigned int
68198+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
68199+{
68200+ return ((const unsigned long)userp % sz);
68201+}
68202+
68203+static __inline__ unsigned int
68204+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
68205+{
68206+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
68207+}
68208+
68209+static __inline__ unsigned int
68210+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
68211+{
68212+ return full_name_hash((const unsigned char *)name, len) % sz;
68213+}
68214+
68215+#define FOR_EACH_ROLE_START(role) \
68216+ role = role_list; \
68217+ while (role) {
68218+
68219+#define FOR_EACH_ROLE_END(role) \
68220+ role = role->prev; \
68221+ }
68222+
68223+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
68224+ subj = NULL; \
68225+ iter = 0; \
68226+ while (iter < role->subj_hash_size) { \
68227+ if (subj == NULL) \
68228+ subj = role->subj_hash[iter]; \
68229+ if (subj == NULL) { \
68230+ iter++; \
68231+ continue; \
68232+ }
68233+
68234+#define FOR_EACH_SUBJECT_END(subj,iter) \
68235+ subj = subj->next; \
68236+ if (subj == NULL) \
68237+ iter++; \
68238+ }
68239+
68240+
68241+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
68242+ subj = role->hash->first; \
68243+ while (subj != NULL) {
68244+
68245+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
68246+ subj = subj->next; \
68247+ }
68248+
68249+#endif
68250+
68251diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
68252new file mode 100644
68253index 0000000..323ecf2
68254--- /dev/null
68255+++ b/include/linux/gralloc.h
68256@@ -0,0 +1,9 @@
68257+#ifndef __GRALLOC_H
68258+#define __GRALLOC_H
68259+
68260+void acl_free_all(void);
68261+int acl_alloc_stack_init(unsigned long size);
68262+void *acl_alloc(unsigned long len);
68263+void *acl_alloc_num(unsigned long num, unsigned long len);
68264+
68265+#endif
68266diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
68267new file mode 100644
68268index 0000000..be66033
68269--- /dev/null
68270+++ b/include/linux/grdefs.h
68271@@ -0,0 +1,140 @@
68272+#ifndef GRDEFS_H
68273+#define GRDEFS_H
68274+
68275+/* Begin grsecurity status declarations */
68276+
68277+enum {
68278+ GR_READY = 0x01,
68279+ GR_STATUS_INIT = 0x00 // disabled state
68280+};
68281+
68282+/* Begin ACL declarations */
68283+
68284+/* Role flags */
68285+
68286+enum {
68287+ GR_ROLE_USER = 0x0001,
68288+ GR_ROLE_GROUP = 0x0002,
68289+ GR_ROLE_DEFAULT = 0x0004,
68290+ GR_ROLE_SPECIAL = 0x0008,
68291+ GR_ROLE_AUTH = 0x0010,
68292+ GR_ROLE_NOPW = 0x0020,
68293+ GR_ROLE_GOD = 0x0040,
68294+ GR_ROLE_LEARN = 0x0080,
68295+ GR_ROLE_TPE = 0x0100,
68296+ GR_ROLE_DOMAIN = 0x0200,
68297+ GR_ROLE_PAM = 0x0400,
68298+ GR_ROLE_PERSIST = 0x0800
68299+};
68300+
68301+/* ACL Subject and Object mode flags */
68302+enum {
68303+ GR_DELETED = 0x80000000
68304+};
68305+
68306+/* ACL Object-only mode flags */
68307+enum {
68308+ GR_READ = 0x00000001,
68309+ GR_APPEND = 0x00000002,
68310+ GR_WRITE = 0x00000004,
68311+ GR_EXEC = 0x00000008,
68312+ GR_FIND = 0x00000010,
68313+ GR_INHERIT = 0x00000020,
68314+ GR_SETID = 0x00000040,
68315+ GR_CREATE = 0x00000080,
68316+ GR_DELETE = 0x00000100,
68317+ GR_LINK = 0x00000200,
68318+ GR_AUDIT_READ = 0x00000400,
68319+ GR_AUDIT_APPEND = 0x00000800,
68320+ GR_AUDIT_WRITE = 0x00001000,
68321+ GR_AUDIT_EXEC = 0x00002000,
68322+ GR_AUDIT_FIND = 0x00004000,
68323+ GR_AUDIT_INHERIT= 0x00008000,
68324+ GR_AUDIT_SETID = 0x00010000,
68325+ GR_AUDIT_CREATE = 0x00020000,
68326+ GR_AUDIT_DELETE = 0x00040000,
68327+ GR_AUDIT_LINK = 0x00080000,
68328+ GR_PTRACERD = 0x00100000,
68329+ GR_NOPTRACE = 0x00200000,
68330+ GR_SUPPRESS = 0x00400000,
68331+ GR_NOLEARN = 0x00800000,
68332+ GR_INIT_TRANSFER= 0x01000000
68333+};
68334+
68335+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
68336+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
68337+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
68338+
68339+/* ACL subject-only mode flags */
68340+enum {
68341+ GR_KILL = 0x00000001,
68342+ GR_VIEW = 0x00000002,
68343+ GR_PROTECTED = 0x00000004,
68344+ GR_LEARN = 0x00000008,
68345+ GR_OVERRIDE = 0x00000010,
68346+ /* just a placeholder, this mode is only used in userspace */
68347+ GR_DUMMY = 0x00000020,
68348+ GR_PROTSHM = 0x00000040,
68349+ GR_KILLPROC = 0x00000080,
68350+ GR_KILLIPPROC = 0x00000100,
68351+ /* just a placeholder, this mode is only used in userspace */
68352+ GR_NOTROJAN = 0x00000200,
68353+ GR_PROTPROCFD = 0x00000400,
68354+ GR_PROCACCT = 0x00000800,
68355+ GR_RELAXPTRACE = 0x00001000,
68356+ //GR_NESTED = 0x00002000,
68357+ GR_INHERITLEARN = 0x00004000,
68358+ GR_PROCFIND = 0x00008000,
68359+ GR_POVERRIDE = 0x00010000,
68360+ GR_KERNELAUTH = 0x00020000,
68361+ GR_ATSECURE = 0x00040000,
68362+ GR_SHMEXEC = 0x00080000
68363+};
68364+
68365+enum {
68366+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
68367+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
68368+ GR_PAX_ENABLE_MPROTECT = 0x0004,
68369+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
68370+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
68371+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
68372+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
68373+ GR_PAX_DISABLE_MPROTECT = 0x0400,
68374+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
68375+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
68376+};
68377+
68378+enum {
68379+ GR_ID_USER = 0x01,
68380+ GR_ID_GROUP = 0x02,
68381+};
68382+
68383+enum {
68384+ GR_ID_ALLOW = 0x01,
68385+ GR_ID_DENY = 0x02,
68386+};
68387+
68388+#define GR_CRASH_RES 31
68389+#define GR_UIDTABLE_MAX 500
68390+
68391+/* begin resource learning section */
68392+enum {
68393+ GR_RLIM_CPU_BUMP = 60,
68394+ GR_RLIM_FSIZE_BUMP = 50000,
68395+ GR_RLIM_DATA_BUMP = 10000,
68396+ GR_RLIM_STACK_BUMP = 1000,
68397+ GR_RLIM_CORE_BUMP = 10000,
68398+ GR_RLIM_RSS_BUMP = 500000,
68399+ GR_RLIM_NPROC_BUMP = 1,
68400+ GR_RLIM_NOFILE_BUMP = 5,
68401+ GR_RLIM_MEMLOCK_BUMP = 50000,
68402+ GR_RLIM_AS_BUMP = 500000,
68403+ GR_RLIM_LOCKS_BUMP = 2,
68404+ GR_RLIM_SIGPENDING_BUMP = 5,
68405+ GR_RLIM_MSGQUEUE_BUMP = 10000,
68406+ GR_RLIM_NICE_BUMP = 1,
68407+ GR_RLIM_RTPRIO_BUMP = 1,
68408+ GR_RLIM_RTTIME_BUMP = 1000000
68409+};
68410+
68411+#endif
68412diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
68413new file mode 100644
68414index 0000000..9bb6662
68415--- /dev/null
68416+++ b/include/linux/grinternal.h
68417@@ -0,0 +1,215 @@
68418+#ifndef __GRINTERNAL_H
68419+#define __GRINTERNAL_H
68420+
68421+#ifdef CONFIG_GRKERNSEC
68422+
68423+#include <linux/fs.h>
68424+#include <linux/mnt_namespace.h>
68425+#include <linux/nsproxy.h>
68426+#include <linux/gracl.h>
68427+#include <linux/grdefs.h>
68428+#include <linux/grmsg.h>
68429+
68430+void gr_add_learn_entry(const char *fmt, ...)
68431+ __attribute__ ((format (printf, 1, 2)));
68432+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
68433+ const struct vfsmount *mnt);
68434+__u32 gr_check_create(const struct dentry *new_dentry,
68435+ const struct dentry *parent,
68436+ const struct vfsmount *mnt, const __u32 mode);
68437+int gr_check_protected_task(const struct task_struct *task);
68438+__u32 to_gr_audit(const __u32 reqmode);
68439+int gr_set_acls(const int type);
68440+int gr_apply_subject_to_task(struct task_struct *task);
68441+int gr_acl_is_enabled(void);
68442+char gr_roletype_to_char(void);
68443+
68444+void gr_handle_alertkill(struct task_struct *task);
68445+char *gr_to_filename(const struct dentry *dentry,
68446+ const struct vfsmount *mnt);
68447+char *gr_to_filename1(const struct dentry *dentry,
68448+ const struct vfsmount *mnt);
68449+char *gr_to_filename2(const struct dentry *dentry,
68450+ const struct vfsmount *mnt);
68451+char *gr_to_filename3(const struct dentry *dentry,
68452+ const struct vfsmount *mnt);
68453+
68454+extern int grsec_enable_ptrace_readexec;
68455+extern int grsec_enable_harden_ptrace;
68456+extern int grsec_enable_link;
68457+extern int grsec_enable_fifo;
68458+extern int grsec_enable_execve;
68459+extern int grsec_enable_shm;
68460+extern int grsec_enable_execlog;
68461+extern int grsec_enable_signal;
68462+extern int grsec_enable_audit_ptrace;
68463+extern int grsec_enable_forkfail;
68464+extern int grsec_enable_time;
68465+extern int grsec_enable_rofs;
68466+extern int grsec_enable_chroot_shmat;
68467+extern int grsec_enable_chroot_mount;
68468+extern int grsec_enable_chroot_double;
68469+extern int grsec_enable_chroot_pivot;
68470+extern int grsec_enable_chroot_chdir;
68471+extern int grsec_enable_chroot_chmod;
68472+extern int grsec_enable_chroot_mknod;
68473+extern int grsec_enable_chroot_fchdir;
68474+extern int grsec_enable_chroot_nice;
68475+extern int grsec_enable_chroot_execlog;
68476+extern int grsec_enable_chroot_caps;
68477+extern int grsec_enable_chroot_sysctl;
68478+extern int grsec_enable_chroot_unix;
68479+extern int grsec_enable_symlinkown;
68480+extern kgid_t grsec_symlinkown_gid;
68481+extern int grsec_enable_tpe;
68482+extern kgid_t grsec_tpe_gid;
68483+extern int grsec_enable_tpe_all;
68484+extern int grsec_enable_tpe_invert;
68485+extern int grsec_enable_socket_all;
68486+extern kgid_t grsec_socket_all_gid;
68487+extern int grsec_enable_socket_client;
68488+extern kgid_t grsec_socket_client_gid;
68489+extern int grsec_enable_socket_server;
68490+extern kgid_t grsec_socket_server_gid;
68491+extern kgid_t grsec_audit_gid;
68492+extern int grsec_enable_group;
68493+extern int grsec_enable_audit_textrel;
68494+extern int grsec_enable_log_rwxmaps;
68495+extern int grsec_enable_mount;
68496+extern int grsec_enable_chdir;
68497+extern int grsec_resource_logging;
68498+extern int grsec_enable_blackhole;
68499+extern int grsec_lastack_retries;
68500+extern int grsec_enable_brute;
68501+extern int grsec_lock;
68502+
68503+extern spinlock_t grsec_alert_lock;
68504+extern unsigned long grsec_alert_wtime;
68505+extern unsigned long grsec_alert_fyet;
68506+
68507+extern spinlock_t grsec_audit_lock;
68508+
68509+extern rwlock_t grsec_exec_file_lock;
68510+
68511+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
68512+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
68513+ (tsk)->exec_file->f_vfsmnt) : "/")
68514+
68515+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
68516+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
68517+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
68518+
68519+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
68520+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
68521+ (tsk)->exec_file->f_vfsmnt) : "/")
68522+
68523+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
68524+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
68525+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
68526+
68527+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
68528+
68529+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
68530+
68531+#define GR_CHROOT_CAPS {{ \
68532+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
68533+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
68534+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
68535+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
68536+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
68537+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
68538+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
68539+
68540+#define security_learn(normal_msg,args...) \
68541+({ \
68542+ read_lock(&grsec_exec_file_lock); \
68543+ gr_add_learn_entry(normal_msg "\n", ## args); \
68544+ read_unlock(&grsec_exec_file_lock); \
68545+})
68546+
68547+enum {
68548+ GR_DO_AUDIT,
68549+ GR_DONT_AUDIT,
68550+ /* used for non-audit messages that we shouldn't kill the task on */
68551+ GR_DONT_AUDIT_GOOD
68552+};
68553+
68554+enum {
68555+ GR_TTYSNIFF,
68556+ GR_RBAC,
68557+ GR_RBAC_STR,
68558+ GR_STR_RBAC,
68559+ GR_RBAC_MODE2,
68560+ GR_RBAC_MODE3,
68561+ GR_FILENAME,
68562+ GR_SYSCTL_HIDDEN,
68563+ GR_NOARGS,
68564+ GR_ONE_INT,
68565+ GR_ONE_INT_TWO_STR,
68566+ GR_ONE_STR,
68567+ GR_STR_INT,
68568+ GR_TWO_STR_INT,
68569+ GR_TWO_INT,
68570+ GR_TWO_U64,
68571+ GR_THREE_INT,
68572+ GR_FIVE_INT_TWO_STR,
68573+ GR_TWO_STR,
68574+ GR_THREE_STR,
68575+ GR_FOUR_STR,
68576+ GR_STR_FILENAME,
68577+ GR_FILENAME_STR,
68578+ GR_FILENAME_TWO_INT,
68579+ GR_FILENAME_TWO_INT_STR,
68580+ GR_TEXTREL,
68581+ GR_PTRACE,
68582+ GR_RESOURCE,
68583+ GR_CAP,
68584+ GR_SIG,
68585+ GR_SIG2,
68586+ GR_CRASH1,
68587+ GR_CRASH2,
68588+ GR_PSACCT,
68589+ GR_RWXMAP
68590+};
68591+
68592+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
68593+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
68594+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
68595+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
68596+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
68597+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
68598+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
68599+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
68600+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
68601+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
68602+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
68603+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
68604+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
68605+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
68606+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
68607+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
68608+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
68609+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
68610+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
68611+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
68612+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
68613+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
68614+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
68615+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
68616+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
68617+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
68618+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
68619+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
68620+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
68621+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
68622+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
68623+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
68624+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
68625+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
68626+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
68627+
68628+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
68629+
68630+#endif
68631+
68632+#endif
68633diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
68634new file mode 100644
68635index 0000000..2bd4c8d
68636--- /dev/null
68637+++ b/include/linux/grmsg.h
68638@@ -0,0 +1,111 @@
68639+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
68640+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
68641+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
68642+#define GR_STOPMOD_MSG "denied modification of module state by "
68643+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
68644+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
68645+#define GR_IOPERM_MSG "denied use of ioperm() by "
68646+#define GR_IOPL_MSG "denied use of iopl() by "
68647+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
68648+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
68649+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
68650+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
68651+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
68652+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
68653+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
68654+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
68655+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
68656+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
68657+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
68658+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
68659+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
68660+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
68661+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
68662+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
68663+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
68664+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
68665+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
68666+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
68667+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
68668+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
68669+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
68670+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
68671+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
68672+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
68673+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
68674+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
68675+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
68676+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
68677+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
68678+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
68679+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
68680+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
68681+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
68682+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
68683+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
68684+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
68685+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
68686+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
68687+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
68688+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
68689+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
68690+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
68691+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
68692+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
68693+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
68694+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
68695+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
68696+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
68697+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
68698+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
68699+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
68700+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
68701+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
68702+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
68703+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
68704+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
68705+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
68706+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
68707+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
68708+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
68709+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
68710+#define GR_FAILFORK_MSG "failed fork with errno %s by "
68711+#define GR_NICE_CHROOT_MSG "denied priority change by "
68712+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
68713+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
68714+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
68715+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
68716+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
68717+#define GR_TIME_MSG "time set by "
68718+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
68719+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
68720+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
68721+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
68722+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
68723+#define GR_BIND_MSG "denied bind() by "
68724+#define GR_CONNECT_MSG "denied connect() by "
68725+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
68726+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
68727+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
68728+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
68729+#define GR_CAP_ACL_MSG "use of %s denied for "
68730+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
68731+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
68732+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
68733+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
68734+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
68735+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
68736+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
68737+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
68738+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
68739+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
68740+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
68741+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
68742+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
68743+#define GR_VM86_MSG "denied use of vm86 by "
68744+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
68745+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
68746+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
68747+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
68748+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
68749+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
68750diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
68751new file mode 100644
68752index 0000000..8da63a4
68753--- /dev/null
68754+++ b/include/linux/grsecurity.h
68755@@ -0,0 +1,242 @@
68756+#ifndef GR_SECURITY_H
68757+#define GR_SECURITY_H
68758+#include <linux/fs.h>
68759+#include <linux/fs_struct.h>
68760+#include <linux/binfmts.h>
68761+#include <linux/gracl.h>
68762+
68763+/* notify of brain-dead configs */
68764+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68765+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
68766+#endif
68767+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
68768+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
68769+#endif
68770+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
68771+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
68772+#endif
68773+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
68774+#error "CONFIG_PAX enabled, but no PaX options are enabled."
68775+#endif
68776+
68777+void gr_handle_brute_attach(unsigned long mm_flags);
68778+void gr_handle_brute_check(void);
68779+void gr_handle_kernel_exploit(void);
68780+int gr_process_user_ban(void);
68781+
68782+char gr_roletype_to_char(void);
68783+
68784+int gr_acl_enable_at_secure(void);
68785+
68786+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
68787+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
68788+
68789+void gr_del_task_from_ip_table(struct task_struct *p);
68790+
68791+int gr_pid_is_chrooted(struct task_struct *p);
68792+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
68793+int gr_handle_chroot_nice(void);
68794+int gr_handle_chroot_sysctl(const int op);
68795+int gr_handle_chroot_setpriority(struct task_struct *p,
68796+ const int niceval);
68797+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
68798+int gr_handle_chroot_chroot(const struct dentry *dentry,
68799+ const struct vfsmount *mnt);
68800+void gr_handle_chroot_chdir(struct path *path);
68801+int gr_handle_chroot_chmod(const struct dentry *dentry,
68802+ const struct vfsmount *mnt, const int mode);
68803+int gr_handle_chroot_mknod(const struct dentry *dentry,
68804+ const struct vfsmount *mnt, const int mode);
68805+int gr_handle_chroot_mount(const struct dentry *dentry,
68806+ const struct vfsmount *mnt,
68807+ const char *dev_name);
68808+int gr_handle_chroot_pivot(void);
68809+int gr_handle_chroot_unix(const pid_t pid);
68810+
68811+int gr_handle_rawio(const struct inode *inode);
68812+
68813+void gr_handle_ioperm(void);
68814+void gr_handle_iopl(void);
68815+
68816+umode_t gr_acl_umask(void);
68817+
68818+int gr_tpe_allow(const struct file *file);
68819+
68820+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
68821+void gr_clear_chroot_entries(struct task_struct *task);
68822+
68823+void gr_log_forkfail(const int retval);
68824+void gr_log_timechange(void);
68825+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
68826+void gr_log_chdir(const struct dentry *dentry,
68827+ const struct vfsmount *mnt);
68828+void gr_log_chroot_exec(const struct dentry *dentry,
68829+ const struct vfsmount *mnt);
68830+void gr_log_remount(const char *devname, const int retval);
68831+void gr_log_unmount(const char *devname, const int retval);
68832+void gr_log_mount(const char *from, const char *to, const int retval);
68833+void gr_log_textrel(struct vm_area_struct *vma);
68834+void gr_log_rwxmmap(struct file *file);
68835+void gr_log_rwxmprotect(struct file *file);
68836+
68837+int gr_handle_follow_link(const struct inode *parent,
68838+ const struct inode *inode,
68839+ const struct dentry *dentry,
68840+ const struct vfsmount *mnt);
68841+int gr_handle_fifo(const struct dentry *dentry,
68842+ const struct vfsmount *mnt,
68843+ const struct dentry *dir, const int flag,
68844+ const int acc_mode);
68845+int gr_handle_hardlink(const struct dentry *dentry,
68846+ const struct vfsmount *mnt,
68847+ struct inode *inode,
68848+ const int mode, const struct filename *to);
68849+
68850+int gr_is_capable(const int cap);
68851+int gr_is_capable_nolog(const int cap);
68852+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
68853+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
68854+
68855+void gr_copy_label(struct task_struct *tsk);
68856+void gr_handle_crash(struct task_struct *task, const int sig);
68857+int gr_handle_signal(const struct task_struct *p, const int sig);
68858+int gr_check_crash_uid(const kuid_t uid);
68859+int gr_check_protected_task(const struct task_struct *task);
68860+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
68861+int gr_acl_handle_mmap(const struct file *file,
68862+ const unsigned long prot);
68863+int gr_acl_handle_mprotect(const struct file *file,
68864+ const unsigned long prot);
68865+int gr_check_hidden_task(const struct task_struct *tsk);
68866+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
68867+ const struct vfsmount *mnt);
68868+__u32 gr_acl_handle_utime(const struct dentry *dentry,
68869+ const struct vfsmount *mnt);
68870+__u32 gr_acl_handle_access(const struct dentry *dentry,
68871+ const struct vfsmount *mnt, const int fmode);
68872+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
68873+ const struct vfsmount *mnt, umode_t *mode);
68874+__u32 gr_acl_handle_chown(const struct dentry *dentry,
68875+ const struct vfsmount *mnt);
68876+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
68877+ const struct vfsmount *mnt);
68878+int gr_handle_ptrace(struct task_struct *task, const long request);
68879+int gr_handle_proc_ptrace(struct task_struct *task);
68880+__u32 gr_acl_handle_execve(const struct dentry *dentry,
68881+ const struct vfsmount *mnt);
68882+int gr_check_crash_exec(const struct file *filp);
68883+int gr_acl_is_enabled(void);
68884+void gr_set_kernel_label(struct task_struct *task);
68885+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
68886+ const kgid_t gid);
68887+int gr_set_proc_label(const struct dentry *dentry,
68888+ const struct vfsmount *mnt,
68889+ const int unsafe_flags);
68890+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
68891+ const struct vfsmount *mnt);
68892+__u32 gr_acl_handle_open(const struct dentry *dentry,
68893+ const struct vfsmount *mnt, int acc_mode);
68894+__u32 gr_acl_handle_creat(const struct dentry *dentry,
68895+ const struct dentry *p_dentry,
68896+ const struct vfsmount *p_mnt,
68897+ int open_flags, int acc_mode, const int imode);
68898+void gr_handle_create(const struct dentry *dentry,
68899+ const struct vfsmount *mnt);
68900+void gr_handle_proc_create(const struct dentry *dentry,
68901+ const struct inode *inode);
68902+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
68903+ const struct dentry *parent_dentry,
68904+ const struct vfsmount *parent_mnt,
68905+ const int mode);
68906+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
68907+ const struct dentry *parent_dentry,
68908+ const struct vfsmount *parent_mnt);
68909+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
68910+ const struct vfsmount *mnt);
68911+void gr_handle_delete(const ino_t ino, const dev_t dev);
68912+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
68913+ const struct vfsmount *mnt);
68914+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
68915+ const struct dentry *parent_dentry,
68916+ const struct vfsmount *parent_mnt,
68917+ const struct filename *from);
68918+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
68919+ const struct dentry *parent_dentry,
68920+ const struct vfsmount *parent_mnt,
68921+ const struct dentry *old_dentry,
68922+ const struct vfsmount *old_mnt, const struct filename *to);
68923+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
68924+int gr_acl_handle_rename(struct dentry *new_dentry,
68925+ struct dentry *parent_dentry,
68926+ const struct vfsmount *parent_mnt,
68927+ struct dentry *old_dentry,
68928+ struct inode *old_parent_inode,
68929+ struct vfsmount *old_mnt, const struct filename *newname);
68930+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68931+ struct dentry *old_dentry,
68932+ struct dentry *new_dentry,
68933+ struct vfsmount *mnt, const __u8 replace);
68934+__u32 gr_check_link(const struct dentry *new_dentry,
68935+ const struct dentry *parent_dentry,
68936+ const struct vfsmount *parent_mnt,
68937+ const struct dentry *old_dentry,
68938+ const struct vfsmount *old_mnt);
68939+int gr_acl_handle_filldir(const struct file *file, const char *name,
68940+ const unsigned int namelen, const ino_t ino);
68941+
68942+__u32 gr_acl_handle_unix(const struct dentry *dentry,
68943+ const struct vfsmount *mnt);
68944+void gr_acl_handle_exit(void);
68945+void gr_acl_handle_psacct(struct task_struct *task, const long code);
68946+int gr_acl_handle_procpidmem(const struct task_struct *task);
68947+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
68948+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
68949+void gr_audit_ptrace(struct task_struct *task);
68950+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
68951+void gr_put_exec_file(struct task_struct *task);
68952+
68953+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
68954+
68955+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
68956+extern void gr_learn_resource(const struct task_struct *task, const int res,
68957+ const unsigned long wanted, const int gt);
68958+#else
68959+static inline void gr_learn_resource(const struct task_struct *task, const int res,
68960+ const unsigned long wanted, const int gt)
68961+{
68962+}
68963+#endif
68964+
68965+#ifdef CONFIG_GRKERNSEC_RESLOG
68966+extern void gr_log_resource(const struct task_struct *task, const int res,
68967+ const unsigned long wanted, const int gt);
68968+#else
68969+static inline void gr_log_resource(const struct task_struct *task, const int res,
68970+ const unsigned long wanted, const int gt)
68971+{
68972+}
68973+#endif
68974+
68975+#ifdef CONFIG_GRKERNSEC
68976+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
68977+void gr_handle_vm86(void);
68978+void gr_handle_mem_readwrite(u64 from, u64 to);
68979+
68980+void gr_log_badprocpid(const char *entry);
68981+
68982+extern int grsec_enable_dmesg;
68983+extern int grsec_disable_privio;
68984+
68985+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68986+extern kgid_t grsec_proc_gid;
68987+#endif
68988+
68989+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68990+extern int grsec_enable_chroot_findtask;
68991+#endif
68992+#ifdef CONFIG_GRKERNSEC_SETXID
68993+extern int grsec_enable_setxid;
68994+#endif
68995+#endif
68996+
68997+#endif
68998diff --git a/include/linux/grsock.h b/include/linux/grsock.h
68999new file mode 100644
69000index 0000000..e7ffaaf
69001--- /dev/null
69002+++ b/include/linux/grsock.h
69003@@ -0,0 +1,19 @@
69004+#ifndef __GRSOCK_H
69005+#define __GRSOCK_H
69006+
69007+extern void gr_attach_curr_ip(const struct sock *sk);
69008+extern int gr_handle_sock_all(const int family, const int type,
69009+ const int protocol);
69010+extern int gr_handle_sock_server(const struct sockaddr *sck);
69011+extern int gr_handle_sock_server_other(const struct sock *sck);
69012+extern int gr_handle_sock_client(const struct sockaddr *sck);
69013+extern int gr_search_connect(struct socket * sock,
69014+ struct sockaddr_in * addr);
69015+extern int gr_search_bind(struct socket * sock,
69016+ struct sockaddr_in * addr);
69017+extern int gr_search_listen(struct socket * sock);
69018+extern int gr_search_accept(struct socket * sock);
69019+extern int gr_search_socket(const int domain, const int type,
69020+ const int protocol);
69021+
69022+#endif
69023diff --git a/include/linux/highmem.h b/include/linux/highmem.h
69024index ef788b5..ac41b7b 100644
69025--- a/include/linux/highmem.h
69026+++ b/include/linux/highmem.h
69027@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
69028 kunmap_atomic(kaddr);
69029 }
69030
69031+static inline void sanitize_highpage(struct page *page)
69032+{
69033+ void *kaddr;
69034+ unsigned long flags;
69035+
69036+ local_irq_save(flags);
69037+ kaddr = kmap_atomic(page);
69038+ clear_page(kaddr);
69039+ kunmap_atomic(kaddr);
69040+ local_irq_restore(flags);
69041+}
69042+
69043 static inline void zero_user_segments(struct page *page,
69044 unsigned start1, unsigned end1,
69045 unsigned start2, unsigned end2)
69046diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
69047index 1c7b89a..7f52502 100644
69048--- a/include/linux/hwmon-sysfs.h
69049+++ b/include/linux/hwmon-sysfs.h
69050@@ -25,7 +25,8 @@
69051 struct sensor_device_attribute{
69052 struct device_attribute dev_attr;
69053 int index;
69054-};
69055+} __do_const;
69056+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
69057 #define to_sensor_dev_attr(_dev_attr) \
69058 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
69059
69060@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
69061 struct device_attribute dev_attr;
69062 u8 index;
69063 u8 nr;
69064-};
69065+} __do_const;
69066 #define to_sensor_dev_attr_2(_dev_attr) \
69067 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
69068
69069diff --git a/include/linux/i2c.h b/include/linux/i2c.h
69070index d0c4db7..61b3577 100644
69071--- a/include/linux/i2c.h
69072+++ b/include/linux/i2c.h
69073@@ -369,6 +369,7 @@ struct i2c_algorithm {
69074 /* To determine what the adapter supports */
69075 u32 (*functionality) (struct i2c_adapter *);
69076 };
69077+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
69078
69079 /*
69080 * i2c_adapter is the structure used to identify a physical i2c bus along
69081diff --git a/include/linux/i2o.h b/include/linux/i2o.h
69082index d23c3c2..eb63c81 100644
69083--- a/include/linux/i2o.h
69084+++ b/include/linux/i2o.h
69085@@ -565,7 +565,7 @@ struct i2o_controller {
69086 struct i2o_device *exec; /* Executive */
69087 #if BITS_PER_LONG == 64
69088 spinlock_t context_list_lock; /* lock for context_list */
69089- atomic_t context_list_counter; /* needed for unique contexts */
69090+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
69091 struct list_head context_list; /* list of context id's
69092 and pointers */
69093 #endif
69094diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
69095index aff7ad8..3942bbd 100644
69096--- a/include/linux/if_pppox.h
69097+++ b/include/linux/if_pppox.h
69098@@ -76,7 +76,7 @@ struct pppox_proto {
69099 int (*ioctl)(struct socket *sock, unsigned int cmd,
69100 unsigned long arg);
69101 struct module *owner;
69102-};
69103+} __do_const;
69104
69105 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
69106 extern void unregister_pppox_proto(int proto_num);
69107diff --git a/include/linux/init.h b/include/linux/init.h
69108index 10ed4f4..8e8490d 100644
69109--- a/include/linux/init.h
69110+++ b/include/linux/init.h
69111@@ -39,9 +39,36 @@
69112 * Also note, that this data cannot be "const".
69113 */
69114
69115+#ifdef MODULE
69116+#define add_init_latent_entropy
69117+#define add_devinit_latent_entropy
69118+#define add_cpuinit_latent_entropy
69119+#define add_meminit_latent_entropy
69120+#else
69121+#define add_init_latent_entropy __latent_entropy
69122+
69123+#ifdef CONFIG_HOTPLUG
69124+#define add_devinit_latent_entropy
69125+#else
69126+#define add_devinit_latent_entropy __latent_entropy
69127+#endif
69128+
69129+#ifdef CONFIG_HOTPLUG_CPU
69130+#define add_cpuinit_latent_entropy
69131+#else
69132+#define add_cpuinit_latent_entropy __latent_entropy
69133+#endif
69134+
69135+#ifdef CONFIG_MEMORY_HOTPLUG
69136+#define add_meminit_latent_entropy
69137+#else
69138+#define add_meminit_latent_entropy __latent_entropy
69139+#endif
69140+#endif
69141+
69142 /* These are for everybody (although not all archs will actually
69143 discard it in modules) */
69144-#define __init __section(.init.text) __cold notrace
69145+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
69146 #define __initdata __section(.init.data)
69147 #define __initconst __constsection(.init.rodata)
69148 #define __exitdata __section(.exit.data)
69149@@ -94,7 +121,7 @@
69150 #define __exit __section(.exit.text) __exitused __cold notrace
69151
69152 /* Used for HOTPLUG_CPU */
69153-#define __cpuinit __section(.cpuinit.text) __cold notrace
69154+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
69155 #define __cpuinitdata __section(.cpuinit.data)
69156 #define __cpuinitconst __constsection(.cpuinit.rodata)
69157 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
69158@@ -102,7 +129,7 @@
69159 #define __cpuexitconst __constsection(.cpuexit.rodata)
69160
69161 /* Used for MEMORY_HOTPLUG */
69162-#define __meminit __section(.meminit.text) __cold notrace
69163+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
69164 #define __meminitdata __section(.meminit.data)
69165 #define __meminitconst __constsection(.meminit.rodata)
69166 #define __memexit __section(.memexit.text) __exitused __cold notrace
69167diff --git a/include/linux/init_task.h b/include/linux/init_task.h
69168index 6d087c5..401cab8 100644
69169--- a/include/linux/init_task.h
69170+++ b/include/linux/init_task.h
69171@@ -143,6 +143,12 @@ extern struct task_group root_task_group;
69172
69173 #define INIT_TASK_COMM "swapper"
69174
69175+#ifdef CONFIG_X86
69176+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
69177+#else
69178+#define INIT_TASK_THREAD_INFO
69179+#endif
69180+
69181 /*
69182 * INIT_TASK is used to set up the first task table, touch at
69183 * your own risk!. Base=0, limit=0x1fffff (=2MB)
69184@@ -182,6 +188,7 @@ extern struct task_group root_task_group;
69185 RCU_POINTER_INITIALIZER(cred, &init_cred), \
69186 .comm = INIT_TASK_COMM, \
69187 .thread = INIT_THREAD, \
69188+ INIT_TASK_THREAD_INFO \
69189 .fs = &init_fs, \
69190 .files = &init_files, \
69191 .signal = &init_signals, \
69192diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
69193index 5fa5afe..ac55b25 100644
69194--- a/include/linux/interrupt.h
69195+++ b/include/linux/interrupt.h
69196@@ -430,7 +430,7 @@ enum
69197 /* map softirq index to softirq name. update 'softirq_to_name' in
69198 * kernel/softirq.c when adding a new softirq.
69199 */
69200-extern char *softirq_to_name[NR_SOFTIRQS];
69201+extern const char * const softirq_to_name[NR_SOFTIRQS];
69202
69203 /* softirq mask and active fields moved to irq_cpustat_t in
69204 * asm/hardirq.h to get better cache usage. KAO
69205@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
69206
69207 struct softirq_action
69208 {
69209- void (*action)(struct softirq_action *);
69210-};
69211+ void (*action)(void);
69212+} __no_const;
69213
69214 asmlinkage void do_softirq(void);
69215 asmlinkage void __do_softirq(void);
69216-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
69217+extern void open_softirq(int nr, void (*action)(void));
69218 extern void softirq_init(void);
69219 extern void __raise_softirq_irqoff(unsigned int nr);
69220
69221diff --git a/include/linux/iommu.h b/include/linux/iommu.h
69222index f3b99e1..9b73cee 100644
69223--- a/include/linux/iommu.h
69224+++ b/include/linux/iommu.h
69225@@ -101,7 +101,7 @@ struct iommu_ops {
69226 int (*domain_set_attr)(struct iommu_domain *domain,
69227 enum iommu_attr attr, void *data);
69228 unsigned long pgsize_bitmap;
69229-};
69230+} __do_const;
69231
69232 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
69233 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
69234diff --git a/include/linux/irq.h b/include/linux/irq.h
69235index fdf2c4a..5332486 100644
69236--- a/include/linux/irq.h
69237+++ b/include/linux/irq.h
69238@@ -328,7 +328,8 @@ struct irq_chip {
69239 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
69240
69241 unsigned long flags;
69242-};
69243+} __do_const;
69244+typedef struct irq_chip __no_const irq_chip_no_const;
69245
69246 /*
69247 * irq_chip specific flags
69248diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
69249index 6883e19..06992b1 100644
69250--- a/include/linux/kallsyms.h
69251+++ b/include/linux/kallsyms.h
69252@@ -15,7 +15,8 @@
69253
69254 struct module;
69255
69256-#ifdef CONFIG_KALLSYMS
69257+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
69258+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
69259 /* Lookup the address for a symbol. Returns 0 if not found. */
69260 unsigned long kallsyms_lookup_name(const char *name);
69261
69262@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
69263 /* Stupid that this does nothing, but I didn't create this mess. */
69264 #define __print_symbol(fmt, addr)
69265 #endif /*CONFIG_KALLSYMS*/
69266+#else /* when included by kallsyms.c, vsnprintf.c, or
69267+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
69268+extern void __print_symbol(const char *fmt, unsigned long address);
69269+extern int sprint_backtrace(char *buffer, unsigned long address);
69270+extern int sprint_symbol(char *buffer, unsigned long address);
69271+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
69272+const char *kallsyms_lookup(unsigned long addr,
69273+ unsigned long *symbolsize,
69274+ unsigned long *offset,
69275+ char **modname, char *namebuf);
69276+#endif
69277
69278 /* This macro allows us to keep printk typechecking */
69279 static __printf(1, 2)
69280diff --git a/include/linux/key-type.h b/include/linux/key-type.h
69281index 518a53a..5e28358 100644
69282--- a/include/linux/key-type.h
69283+++ b/include/linux/key-type.h
69284@@ -125,7 +125,7 @@ struct key_type {
69285 /* internal fields */
69286 struct list_head link; /* link in types list */
69287 struct lock_class_key lock_class; /* key->sem lock class */
69288-};
69289+} __do_const;
69290
69291 extern struct key_type key_type_keyring;
69292
69293diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
69294index 4dff0c6..1ca9b72 100644
69295--- a/include/linux/kgdb.h
69296+++ b/include/linux/kgdb.h
69297@@ -53,7 +53,7 @@ extern int kgdb_connected;
69298 extern int kgdb_io_module_registered;
69299
69300 extern atomic_t kgdb_setting_breakpoint;
69301-extern atomic_t kgdb_cpu_doing_single_step;
69302+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
69303
69304 extern struct task_struct *kgdb_usethread;
69305 extern struct task_struct *kgdb_contthread;
69306@@ -255,7 +255,7 @@ struct kgdb_arch {
69307 void (*correct_hw_break)(void);
69308
69309 void (*enable_nmi)(bool on);
69310-};
69311+} __do_const;
69312
69313 /**
69314 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
69315@@ -280,7 +280,7 @@ struct kgdb_io {
69316 void (*pre_exception) (void);
69317 void (*post_exception) (void);
69318 int is_console;
69319-};
69320+} __do_const;
69321
69322 extern struct kgdb_arch arch_kgdb_ops;
69323
69324diff --git a/include/linux/kmod.h b/include/linux/kmod.h
69325index 5398d58..5883a34 100644
69326--- a/include/linux/kmod.h
69327+++ b/include/linux/kmod.h
69328@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
69329 * usually useless though. */
69330 extern __printf(2, 3)
69331 int __request_module(bool wait, const char *name, ...);
69332+extern __printf(3, 4)
69333+int ___request_module(bool wait, char *param_name, const char *name, ...);
69334 #define request_module(mod...) __request_module(true, mod)
69335 #define request_module_nowait(mod...) __request_module(false, mod)
69336 #define try_then_request_module(x, mod...) \
69337diff --git a/include/linux/kobject.h b/include/linux/kobject.h
69338index 939b112..ed6ed51 100644
69339--- a/include/linux/kobject.h
69340+++ b/include/linux/kobject.h
69341@@ -111,7 +111,7 @@ struct kobj_type {
69342 struct attribute **default_attrs;
69343 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
69344 const void *(*namespace)(struct kobject *kobj);
69345-};
69346+} __do_const;
69347
69348 struct kobj_uevent_env {
69349 char *envp[UEVENT_NUM_ENVP];
69350@@ -134,6 +134,7 @@ struct kobj_attribute {
69351 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
69352 const char *buf, size_t count);
69353 };
69354+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
69355
69356 extern const struct sysfs_ops kobj_sysfs_ops;
69357
69358diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
69359index f66b065..c2c29b4 100644
69360--- a/include/linux/kobject_ns.h
69361+++ b/include/linux/kobject_ns.h
69362@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
69363 const void *(*netlink_ns)(struct sock *sk);
69364 const void *(*initial_ns)(void);
69365 void (*drop_ns)(void *);
69366-};
69367+} __do_const;
69368
69369 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
69370 int kobj_ns_type_registered(enum kobj_ns_type type);
69371diff --git a/include/linux/kref.h b/include/linux/kref.h
69372index 4972e6e..de4d19b 100644
69373--- a/include/linux/kref.h
69374+++ b/include/linux/kref.h
69375@@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
69376 static inline int kref_sub(struct kref *kref, unsigned int count,
69377 void (*release)(struct kref *kref))
69378 {
69379- WARN_ON(release == NULL);
69380+ BUG_ON(release == NULL);
69381
69382 if (atomic_sub_and_test((int) count, &kref->refcount)) {
69383 release(kref);
69384diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
69385index 2c497ab..afe32f5 100644
69386--- a/include/linux/kvm_host.h
69387+++ b/include/linux/kvm_host.h
69388@@ -418,7 +418,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
69389 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
69390 void vcpu_put(struct kvm_vcpu *vcpu);
69391
69392-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69393+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69394 struct module *module);
69395 void kvm_exit(void);
69396
69397@@ -574,7 +574,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
69398 struct kvm_guest_debug *dbg);
69399 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
69400
69401-int kvm_arch_init(void *opaque);
69402+int kvm_arch_init(const void *opaque);
69403 void kvm_arch_exit(void);
69404
69405 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
69406diff --git a/include/linux/libata.h b/include/linux/libata.h
69407index 649e5f8..ead5194 100644
69408--- a/include/linux/libata.h
69409+++ b/include/linux/libata.h
69410@@ -915,7 +915,7 @@ struct ata_port_operations {
69411 * fields must be pointers.
69412 */
69413 const struct ata_port_operations *inherits;
69414-};
69415+} __do_const;
69416
69417 struct ata_port_info {
69418 unsigned long flags;
69419diff --git a/include/linux/list.h b/include/linux/list.h
69420index cc6d2aa..c10ee83 100644
69421--- a/include/linux/list.h
69422+++ b/include/linux/list.h
69423@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
69424 extern void list_del(struct list_head *entry);
69425 #endif
69426
69427+extern void __pax_list_add(struct list_head *new,
69428+ struct list_head *prev,
69429+ struct list_head *next);
69430+static inline void pax_list_add(struct list_head *new, struct list_head *head)
69431+{
69432+ __pax_list_add(new, head, head->next);
69433+}
69434+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
69435+{
69436+ __pax_list_add(new, head->prev, head);
69437+}
69438+extern void pax_list_del(struct list_head *entry);
69439+
69440 /**
69441 * list_replace - replace old entry by new one
69442 * @old : the element to be replaced
69443@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
69444 INIT_LIST_HEAD(entry);
69445 }
69446
69447+extern void pax_list_del_init(struct list_head *entry);
69448+
69449 /**
69450 * list_move - delete from one list and add as another's head
69451 * @list: the entry to move
69452diff --git a/include/linux/math64.h b/include/linux/math64.h
69453index b8ba855..0148090 100644
69454--- a/include/linux/math64.h
69455+++ b/include/linux/math64.h
69456@@ -14,7 +14,7 @@
69457 * This is commonly provided by 32bit archs to provide an optimized 64bit
69458 * divide.
69459 */
69460-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69461+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69462 {
69463 *remainder = dividend % divisor;
69464 return dividend / divisor;
69465@@ -50,7 +50,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
69466 #define div64_long(x,y) div_s64((x),(y))
69467
69468 #ifndef div_u64_rem
69469-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69470+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69471 {
69472 *remainder = do_div(dividend, divisor);
69473 return dividend;
69474@@ -79,7 +79,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
69475 * divide.
69476 */
69477 #ifndef div_u64
69478-static inline u64 div_u64(u64 dividend, u32 divisor)
69479+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
69480 {
69481 u32 remainder;
69482 return div_u64_rem(dividend, divisor, &remainder);
69483diff --git a/include/linux/mm.h b/include/linux/mm.h
69484index 66e2f7c..a398fb2 100644
69485--- a/include/linux/mm.h
69486+++ b/include/linux/mm.h
69487@@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
69488 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
69489 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
69490 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
69491+
69492+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69493+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
69494+#endif
69495+
69496 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
69497
69498 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
69499@@ -231,6 +236,7 @@ struct vm_operations_struct {
69500 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
69501 unsigned long size, pgoff_t pgoff);
69502 };
69503+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
69504
69505 struct mmu_gather;
69506 struct inode;
69507@@ -1068,34 +1074,6 @@ int set_page_dirty(struct page *page);
69508 int set_page_dirty_lock(struct page *page);
69509 int clear_page_dirty_for_io(struct page *page);
69510
69511-/* Is the vma a continuation of the stack vma above it? */
69512-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
69513-{
69514- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
69515-}
69516-
69517-static inline int stack_guard_page_start(struct vm_area_struct *vma,
69518- unsigned long addr)
69519-{
69520- return (vma->vm_flags & VM_GROWSDOWN) &&
69521- (vma->vm_start == addr) &&
69522- !vma_growsdown(vma->vm_prev, addr);
69523-}
69524-
69525-/* Is the vma a continuation of the stack vma below it? */
69526-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
69527-{
69528- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
69529-}
69530-
69531-static inline int stack_guard_page_end(struct vm_area_struct *vma,
69532- unsigned long addr)
69533-{
69534- return (vma->vm_flags & VM_GROWSUP) &&
69535- (vma->vm_end == addr) &&
69536- !vma_growsup(vma->vm_next, addr);
69537-}
69538-
69539 extern pid_t
69540 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
69541
69542@@ -1198,6 +1176,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
69543 }
69544 #endif
69545
69546+#ifdef CONFIG_MMU
69547+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
69548+#else
69549+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
69550+{
69551+ return __pgprot(0);
69552+}
69553+#endif
69554+
69555 int vma_wants_writenotify(struct vm_area_struct *vma);
69556
69557 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
69558@@ -1216,8 +1203,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
69559 {
69560 return 0;
69561 }
69562+
69563+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
69564+ unsigned long address)
69565+{
69566+ return 0;
69567+}
69568 #else
69569 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
69570+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
69571 #endif
69572
69573 #ifdef __PAGETABLE_PMD_FOLDED
69574@@ -1226,8 +1220,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
69575 {
69576 return 0;
69577 }
69578+
69579+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
69580+ unsigned long address)
69581+{
69582+ return 0;
69583+}
69584 #else
69585 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
69586+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
69587 #endif
69588
69589 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
69590@@ -1245,11 +1246,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
69591 NULL: pud_offset(pgd, address);
69592 }
69593
69594+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
69595+{
69596+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
69597+ NULL: pud_offset(pgd, address);
69598+}
69599+
69600 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
69601 {
69602 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
69603 NULL: pmd_offset(pud, address);
69604 }
69605+
69606+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
69607+{
69608+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
69609+ NULL: pmd_offset(pud, address);
69610+}
69611 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
69612
69613 #if USE_SPLIT_PTLOCKS
69614@@ -1479,6 +1492,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
69615 unsigned long, unsigned long,
69616 unsigned long, unsigned long);
69617 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
69618+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
69619
69620 /* These take the mm semaphore themselves */
69621 extern unsigned long vm_brk(unsigned long, unsigned long);
69622@@ -1573,6 +1587,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
69623 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
69624 struct vm_area_struct **pprev);
69625
69626+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
69627+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
69628+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
69629+
69630 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
69631 NULL if none. Assume start_addr < end_addr. */
69632 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
69633@@ -1601,15 +1619,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
69634 return vma;
69635 }
69636
69637-#ifdef CONFIG_MMU
69638-pgprot_t vm_get_page_prot(unsigned long vm_flags);
69639-#else
69640-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
69641-{
69642- return __pgprot(0);
69643-}
69644-#endif
69645-
69646 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
69647 unsigned long change_prot_numa(struct vm_area_struct *vma,
69648 unsigned long start, unsigned long end);
69649@@ -1649,6 +1658,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
69650 static inline void vm_stat_account(struct mm_struct *mm,
69651 unsigned long flags, struct file *file, long pages)
69652 {
69653+
69654+#ifdef CONFIG_PAX_RANDMMAP
69655+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
69656+#endif
69657+
69658 mm->total_vm += pages;
69659 }
69660 #endif /* CONFIG_PROC_FS */
69661@@ -1721,7 +1735,7 @@ extern int unpoison_memory(unsigned long pfn);
69662 extern int sysctl_memory_failure_early_kill;
69663 extern int sysctl_memory_failure_recovery;
69664 extern void shake_page(struct page *p, int access);
69665-extern atomic_long_t mce_bad_pages;
69666+extern atomic_long_unchecked_t mce_bad_pages;
69667 extern int soft_offline_page(struct page *page, int flags);
69668
69669 extern void dump_page(struct page *page);
69670@@ -1752,5 +1766,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
69671 static inline bool page_is_guard(struct page *page) { return false; }
69672 #endif /* CONFIG_DEBUG_PAGEALLOC */
69673
69674+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69675+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
69676+#else
69677+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
69678+#endif
69679+
69680 #endif /* __KERNEL__ */
69681 #endif /* _LINUX_MM_H */
69682diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
69683index f8f5162..3aaf20f 100644
69684--- a/include/linux/mm_types.h
69685+++ b/include/linux/mm_types.h
69686@@ -288,6 +288,8 @@ struct vm_area_struct {
69687 #ifdef CONFIG_NUMA
69688 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
69689 #endif
69690+
69691+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
69692 };
69693
69694 struct core_thread {
69695@@ -436,6 +438,24 @@ struct mm_struct {
69696 int first_nid;
69697 #endif
69698 struct uprobes_state uprobes_state;
69699+
69700+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69701+ unsigned long pax_flags;
69702+#endif
69703+
69704+#ifdef CONFIG_PAX_DLRESOLVE
69705+ unsigned long call_dl_resolve;
69706+#endif
69707+
69708+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
69709+ unsigned long call_syscall;
69710+#endif
69711+
69712+#ifdef CONFIG_PAX_ASLR
69713+ unsigned long delta_mmap; /* randomized offset */
69714+ unsigned long delta_stack; /* randomized offset */
69715+#endif
69716+
69717 };
69718
69719 /* first nid will either be a valid NID or one of these values */
69720diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
69721index c5d5278..f0b68c8 100644
69722--- a/include/linux/mmiotrace.h
69723+++ b/include/linux/mmiotrace.h
69724@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
69725 /* Called from ioremap.c */
69726 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
69727 void __iomem *addr);
69728-extern void mmiotrace_iounmap(volatile void __iomem *addr);
69729+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
69730
69731 /* For anyone to insert markers. Remember trailing newline. */
69732 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
69733@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
69734 {
69735 }
69736
69737-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
69738+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
69739 {
69740 }
69741
69742diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
69743index 73b64a3..6562925 100644
69744--- a/include/linux/mmzone.h
69745+++ b/include/linux/mmzone.h
69746@@ -412,7 +412,7 @@ struct zone {
69747 unsigned long flags; /* zone flags, see below */
69748
69749 /* Zone statistics */
69750- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69751+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69752
69753 /*
69754 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
69755diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
69756index fed3def..c933f99 100644
69757--- a/include/linux/mod_devicetable.h
69758+++ b/include/linux/mod_devicetable.h
69759@@ -12,7 +12,7 @@
69760 typedef unsigned long kernel_ulong_t;
69761 #endif
69762
69763-#define PCI_ANY_ID (~0)
69764+#define PCI_ANY_ID ((__u16)~0)
69765
69766 struct pci_device_id {
69767 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
69768@@ -139,7 +139,7 @@ struct usb_device_id {
69769 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
69770 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
69771
69772-#define HID_ANY_ID (~0)
69773+#define HID_ANY_ID (~0U)
69774 #define HID_BUS_ANY 0xffff
69775 #define HID_GROUP_ANY 0x0000
69776
69777@@ -498,7 +498,7 @@ struct dmi_system_id {
69778 const char *ident;
69779 struct dmi_strmatch matches[4];
69780 void *driver_data;
69781-};
69782+} __do_const;
69783 /*
69784 * struct dmi_device_id appears during expansion of
69785 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
69786diff --git a/include/linux/module.h b/include/linux/module.h
69787index 1375ee3..ced8177 100644
69788--- a/include/linux/module.h
69789+++ b/include/linux/module.h
69790@@ -17,9 +17,11 @@
69791 #include <linux/moduleparam.h>
69792 #include <linux/tracepoint.h>
69793 #include <linux/export.h>
69794+#include <linux/fs.h>
69795
69796 #include <linux/percpu.h>
69797 #include <asm/module.h>
69798+#include <asm/pgtable.h>
69799
69800 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
69801 #define MODULE_SIG_STRING "~Module signature appended~\n"
69802@@ -54,12 +56,13 @@ struct module_attribute {
69803 int (*test)(struct module *);
69804 void (*free)(struct module *);
69805 };
69806+typedef struct module_attribute __no_const module_attribute_no_const;
69807
69808 struct module_version_attribute {
69809 struct module_attribute mattr;
69810 const char *module_name;
69811 const char *version;
69812-} __attribute__ ((__aligned__(sizeof(void *))));
69813+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
69814
69815 extern ssize_t __modver_version_show(struct module_attribute *,
69816 struct module_kobject *, char *);
69817@@ -232,7 +235,7 @@ struct module
69818
69819 /* Sysfs stuff. */
69820 struct module_kobject mkobj;
69821- struct module_attribute *modinfo_attrs;
69822+ module_attribute_no_const *modinfo_attrs;
69823 const char *version;
69824 const char *srcversion;
69825 struct kobject *holders_dir;
69826@@ -281,19 +284,16 @@ struct module
69827 int (*init)(void);
69828
69829 /* If this is non-NULL, vfree after init() returns */
69830- void *module_init;
69831+ void *module_init_rx, *module_init_rw;
69832
69833 /* Here is the actual code + data, vfree'd on unload. */
69834- void *module_core;
69835+ void *module_core_rx, *module_core_rw;
69836
69837 /* Here are the sizes of the init and core sections */
69838- unsigned int init_size, core_size;
69839+ unsigned int init_size_rw, core_size_rw;
69840
69841 /* The size of the executable code in each section. */
69842- unsigned int init_text_size, core_text_size;
69843-
69844- /* Size of RO sections of the module (text+rodata) */
69845- unsigned int init_ro_size, core_ro_size;
69846+ unsigned int init_size_rx, core_size_rx;
69847
69848 /* Arch-specific module values */
69849 struct mod_arch_specific arch;
69850@@ -349,6 +349,10 @@ struct module
69851 #ifdef CONFIG_EVENT_TRACING
69852 struct ftrace_event_call **trace_events;
69853 unsigned int num_trace_events;
69854+ struct file_operations trace_id;
69855+ struct file_operations trace_enable;
69856+ struct file_operations trace_format;
69857+ struct file_operations trace_filter;
69858 #endif
69859 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
69860 unsigned int num_ftrace_callsites;
69861@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
69862 bool is_module_percpu_address(unsigned long addr);
69863 bool is_module_text_address(unsigned long addr);
69864
69865+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
69866+{
69867+
69868+#ifdef CONFIG_PAX_KERNEXEC
69869+ if (ktla_ktva(addr) >= (unsigned long)start &&
69870+ ktla_ktva(addr) < (unsigned long)start + size)
69871+ return 1;
69872+#endif
69873+
69874+ return ((void *)addr >= start && (void *)addr < start + size);
69875+}
69876+
69877+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
69878+{
69879+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
69880+}
69881+
69882+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
69883+{
69884+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
69885+}
69886+
69887+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
69888+{
69889+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
69890+}
69891+
69892+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
69893+{
69894+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
69895+}
69896+
69897 static inline int within_module_core(unsigned long addr, struct module *mod)
69898 {
69899- return (unsigned long)mod->module_core <= addr &&
69900- addr < (unsigned long)mod->module_core + mod->core_size;
69901+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
69902 }
69903
69904 static inline int within_module_init(unsigned long addr, struct module *mod)
69905 {
69906- return (unsigned long)mod->module_init <= addr &&
69907- addr < (unsigned long)mod->module_init + mod->init_size;
69908+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
69909 }
69910
69911 /* Search for module by name: must hold module_mutex. */
69912diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
69913index 560ca53..5ee8d73 100644
69914--- a/include/linux/moduleloader.h
69915+++ b/include/linux/moduleloader.h
69916@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
69917
69918 /* Allocator used for allocating struct module, core sections and init
69919 sections. Returns NULL on failure. */
69920-void *module_alloc(unsigned long size);
69921+void *module_alloc(unsigned long size) __size_overflow(1);
69922+
69923+#ifdef CONFIG_PAX_KERNEXEC
69924+void *module_alloc_exec(unsigned long size) __size_overflow(1);
69925+#else
69926+#define module_alloc_exec(x) module_alloc(x)
69927+#endif
69928
69929 /* Free memory returned from module_alloc. */
69930 void module_free(struct module *mod, void *module_region);
69931
69932+#ifdef CONFIG_PAX_KERNEXEC
69933+void module_free_exec(struct module *mod, void *module_region);
69934+#else
69935+#define module_free_exec(x, y) module_free((x), (y))
69936+#endif
69937+
69938 /*
69939 * Apply the given relocation to the (simplified) ELF. Return -error
69940 * or 0.
69941@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
69942 unsigned int relsec,
69943 struct module *me)
69944 {
69945+#ifdef CONFIG_MODULES
69946 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
69947+#endif
69948 return -ENOEXEC;
69949 }
69950 #endif
69951@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
69952 unsigned int relsec,
69953 struct module *me)
69954 {
69955+#ifdef CONFIG_MODULES
69956 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
69957+#endif
69958 return -ENOEXEC;
69959 }
69960 #endif
69961diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
69962index 137b419..fe663ec 100644
69963--- a/include/linux/moduleparam.h
69964+++ b/include/linux/moduleparam.h
69965@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
69966 * @len is usually just sizeof(string).
69967 */
69968 #define module_param_string(name, string, len, perm) \
69969- static const struct kparam_string __param_string_##name \
69970+ static const struct kparam_string __param_string_##name __used \
69971 = { len, string }; \
69972 __module_param_call(MODULE_PARAM_PREFIX, name, \
69973 &param_ops_string, \
69974@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
69975 */
69976 #define module_param_array_named(name, array, type, nump, perm) \
69977 param_check_##type(name, &(array)[0]); \
69978- static const struct kparam_array __param_arr_##name \
69979+ static const struct kparam_array __param_arr_##name __used \
69980 = { .max = ARRAY_SIZE(array), .num = nump, \
69981 .ops = &param_ops_##type, \
69982 .elemsize = sizeof(array[0]), .elem = array }; \
69983diff --git a/include/linux/namei.h b/include/linux/namei.h
69984index 5a5ff57..5ae5070 100644
69985--- a/include/linux/namei.h
69986+++ b/include/linux/namei.h
69987@@ -19,7 +19,7 @@ struct nameidata {
69988 unsigned seq;
69989 int last_type;
69990 unsigned depth;
69991- char *saved_names[MAX_NESTED_LINKS + 1];
69992+ const char *saved_names[MAX_NESTED_LINKS + 1];
69993 };
69994
69995 /*
69996@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
69997
69998 extern void nd_jump_link(struct nameidata *nd, struct path *path);
69999
70000-static inline void nd_set_link(struct nameidata *nd, char *path)
70001+static inline void nd_set_link(struct nameidata *nd, const char *path)
70002 {
70003 nd->saved_names[nd->depth] = path;
70004 }
70005
70006-static inline char *nd_get_link(struct nameidata *nd)
70007+static inline const char *nd_get_link(const struct nameidata *nd)
70008 {
70009 return nd->saved_names[nd->depth];
70010 }
70011diff --git a/include/linux/net.h b/include/linux/net.h
70012index aa16731..514b875 100644
70013--- a/include/linux/net.h
70014+++ b/include/linux/net.h
70015@@ -183,7 +183,7 @@ struct net_proto_family {
70016 int (*create)(struct net *net, struct socket *sock,
70017 int protocol, int kern);
70018 struct module *owner;
70019-};
70020+} __do_const;
70021
70022 struct iovec;
70023 struct kvec;
70024diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
70025index 9ef07d0..130a5d9 100644
70026--- a/include/linux/netdevice.h
70027+++ b/include/linux/netdevice.h
70028@@ -1012,6 +1012,7 @@ struct net_device_ops {
70029 u32 pid, u32 seq,
70030 struct net_device *dev);
70031 };
70032+typedef struct net_device_ops __no_const net_device_ops_no_const;
70033
70034 /*
70035 * The DEVICE structure.
70036@@ -1078,7 +1079,7 @@ struct net_device {
70037 int iflink;
70038
70039 struct net_device_stats stats;
70040- atomic_long_t rx_dropped; /* dropped packets by core network
70041+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
70042 * Do not use this in drivers.
70043 */
70044
70045diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
70046index ee14284..bc65d63 100644
70047--- a/include/linux/netfilter.h
70048+++ b/include/linux/netfilter.h
70049@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
70050 #endif
70051 /* Use the module struct to lock set/get code in place */
70052 struct module *owner;
70053-};
70054+} __do_const;
70055
70056 /* Function to register/unregister hook points. */
70057 int nf_register_hook(struct nf_hook_ops *reg);
70058diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
70059index 7958e84..ed74d7a 100644
70060--- a/include/linux/netfilter/ipset/ip_set.h
70061+++ b/include/linux/netfilter/ipset/ip_set.h
70062@@ -98,7 +98,7 @@ struct ip_set_type_variant {
70063 /* Return true if "b" set is the same as "a"
70064 * according to the create set parameters */
70065 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
70066-};
70067+} __do_const;
70068
70069 /* The core set type structure */
70070 struct ip_set_type {
70071diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
70072index 4966dde..7d8ce06 100644
70073--- a/include/linux/netfilter/nfnetlink.h
70074+++ b/include/linux/netfilter/nfnetlink.h
70075@@ -16,7 +16,7 @@ struct nfnl_callback {
70076 const struct nlattr * const cda[]);
70077 const struct nla_policy *policy; /* netlink attribute policy */
70078 const u_int16_t attr_count; /* number of nlattr's */
70079-};
70080+} __do_const;
70081
70082 struct nfnetlink_subsystem {
70083 const char *name;
70084diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
70085new file mode 100644
70086index 0000000..33f4af8
70087--- /dev/null
70088+++ b/include/linux/netfilter/xt_gradm.h
70089@@ -0,0 +1,9 @@
70090+#ifndef _LINUX_NETFILTER_XT_GRADM_H
70091+#define _LINUX_NETFILTER_XT_GRADM_H 1
70092+
70093+struct xt_gradm_mtinfo {
70094+ __u16 flags;
70095+ __u16 invflags;
70096+};
70097+
70098+#endif
70099diff --git a/include/linux/nls.h b/include/linux/nls.h
70100index 5dc635f..35f5e11 100644
70101--- a/include/linux/nls.h
70102+++ b/include/linux/nls.h
70103@@ -31,7 +31,7 @@ struct nls_table {
70104 const unsigned char *charset2upper;
70105 struct module *owner;
70106 struct nls_table *next;
70107-};
70108+} __do_const;
70109
70110 /* this value hold the maximum octet of charset */
70111 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
70112diff --git a/include/linux/notifier.h b/include/linux/notifier.h
70113index d65746e..62e72c2 100644
70114--- a/include/linux/notifier.h
70115+++ b/include/linux/notifier.h
70116@@ -51,7 +51,8 @@ struct notifier_block {
70117 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
70118 struct notifier_block __rcu *next;
70119 int priority;
70120-};
70121+} __do_const;
70122+typedef struct notifier_block __no_const notifier_block_no_const;
70123
70124 struct atomic_notifier_head {
70125 spinlock_t lock;
70126diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
70127index a4c5624..79d6d88 100644
70128--- a/include/linux/oprofile.h
70129+++ b/include/linux/oprofile.h
70130@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
70131 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
70132 char const * name, ulong * val);
70133
70134-/** Create a file for read-only access to an atomic_t. */
70135+/** Create a file for read-only access to an atomic_unchecked_t. */
70136 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
70137- char const * name, atomic_t * val);
70138+ char const * name, atomic_unchecked_t * val);
70139
70140 /** create a directory */
70141 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
70142diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
70143index 45fc162..01a4068 100644
70144--- a/include/linux/pci_hotplug.h
70145+++ b/include/linux/pci_hotplug.h
70146@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
70147 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
70148 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
70149 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
70150-};
70151+} __do_const;
70152+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
70153
70154 /**
70155 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
70156diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
70157index a280650..2b67b91 100644
70158--- a/include/linux/perf_event.h
70159+++ b/include/linux/perf_event.h
70160@@ -328,8 +328,8 @@ struct perf_event {
70161
70162 enum perf_event_active_state state;
70163 unsigned int attach_state;
70164- local64_t count;
70165- atomic64_t child_count;
70166+ local64_t count; /* PaX: fix it one day */
70167+ atomic64_unchecked_t child_count;
70168
70169 /*
70170 * These are the total time in nanoseconds that the event
70171@@ -380,8 +380,8 @@ struct perf_event {
70172 * These accumulate total time (in nanoseconds) that children
70173 * events have been enabled and running, respectively.
70174 */
70175- atomic64_t child_total_time_enabled;
70176- atomic64_t child_total_time_running;
70177+ atomic64_unchecked_t child_total_time_enabled;
70178+ atomic64_unchecked_t child_total_time_running;
70179
70180 /*
70181 * Protect attach/detach and child_list:
70182@@ -807,7 +807,7 @@ static inline void perf_restore_debug_store(void) { }
70183 */
70184 #define perf_cpu_notifier(fn) \
70185 do { \
70186- static struct notifier_block fn##_nb __cpuinitdata = \
70187+ static struct notifier_block fn##_nb = \
70188 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
70189 unsigned long cpu = smp_processor_id(); \
70190 unsigned long flags; \
70191diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
70192index ad1a427..6419649 100644
70193--- a/include/linux/pipe_fs_i.h
70194+++ b/include/linux/pipe_fs_i.h
70195@@ -45,9 +45,9 @@ struct pipe_buffer {
70196 struct pipe_inode_info {
70197 wait_queue_head_t wait;
70198 unsigned int nrbufs, curbuf, buffers;
70199- unsigned int readers;
70200- unsigned int writers;
70201- unsigned int waiting_writers;
70202+ atomic_t readers;
70203+ atomic_t writers;
70204+ atomic_t waiting_writers;
70205 unsigned int r_counter;
70206 unsigned int w_counter;
70207 struct page *tmp_page;
70208diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
70209index 5f28cae..3d23723 100644
70210--- a/include/linux/platform_data/usb-ehci-s5p.h
70211+++ b/include/linux/platform_data/usb-ehci-s5p.h
70212@@ -14,7 +14,7 @@
70213 struct s5p_ehci_platdata {
70214 int (*phy_init)(struct platform_device *pdev, int type);
70215 int (*phy_exit)(struct platform_device *pdev, int type);
70216-};
70217+} __no_const;
70218
70219 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
70220
70221diff --git a/include/linux/platform_data/usb-exynos.h b/include/linux/platform_data/usb-exynos.h
70222index c256c59..8ea94c7 100644
70223--- a/include/linux/platform_data/usb-exynos.h
70224+++ b/include/linux/platform_data/usb-exynos.h
70225@@ -14,7 +14,7 @@
70226 struct exynos4_ohci_platdata {
70227 int (*phy_init)(struct platform_device *pdev, int type);
70228 int (*phy_exit)(struct platform_device *pdev, int type);
70229-};
70230+} __no_const;
70231
70232 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
70233
70234diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
70235index 7c1d252..c5c773e 100644
70236--- a/include/linux/pm_domain.h
70237+++ b/include/linux/pm_domain.h
70238@@ -48,7 +48,7 @@ struct gpd_dev_ops {
70239
70240 struct gpd_cpu_data {
70241 unsigned int saved_exit_latency;
70242- struct cpuidle_state *idle_state;
70243+ cpuidle_state_no_const *idle_state;
70244 };
70245
70246 struct generic_pm_domain {
70247diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
70248index f271860..6b3bec5 100644
70249--- a/include/linux/pm_runtime.h
70250+++ b/include/linux/pm_runtime.h
70251@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
70252
70253 static inline void pm_runtime_mark_last_busy(struct device *dev)
70254 {
70255- ACCESS_ONCE(dev->power.last_busy) = jiffies;
70256+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
70257 }
70258
70259 #else /* !CONFIG_PM_RUNTIME */
70260diff --git a/include/linux/pnp.h b/include/linux/pnp.h
70261index 195aafc..49a7bc2 100644
70262--- a/include/linux/pnp.h
70263+++ b/include/linux/pnp.h
70264@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
70265 struct pnp_fixup {
70266 char id[7];
70267 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
70268-};
70269+} __do_const;
70270
70271 /* config parameters */
70272 #define PNP_CONFIG_NORMAL 0x0001
70273diff --git a/include/linux/poison.h b/include/linux/poison.h
70274index 2110a81..13a11bb 100644
70275--- a/include/linux/poison.h
70276+++ b/include/linux/poison.h
70277@@ -19,8 +19,8 @@
70278 * under normal circumstances, used to verify that nobody uses
70279 * non-initialized list entries.
70280 */
70281-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
70282-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
70283+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
70284+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
70285
70286 /********** include/linux/timer.h **********/
70287 /*
70288diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
70289index c0f44c2..1572583 100644
70290--- a/include/linux/power/smartreflex.h
70291+++ b/include/linux/power/smartreflex.h
70292@@ -238,7 +238,7 @@ struct omap_sr_class_data {
70293 int (*notify)(struct omap_sr *sr, u32 status);
70294 u8 notify_flags;
70295 u8 class_type;
70296-};
70297+} __do_const;
70298
70299 /**
70300 * struct omap_sr_nvalue_table - Smartreflex n-target value info
70301diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
70302index 4ea1d37..80f4b33 100644
70303--- a/include/linux/ppp-comp.h
70304+++ b/include/linux/ppp-comp.h
70305@@ -84,7 +84,7 @@ struct compressor {
70306 struct module *owner;
70307 /* Extra skb space needed by the compressor algorithm */
70308 unsigned int comp_extra;
70309-};
70310+} __do_const;
70311
70312 /*
70313 * The return value from decompress routine is the length of the
70314diff --git a/include/linux/printk.h b/include/linux/printk.h
70315index 9afc01e..92c32e8 100644
70316--- a/include/linux/printk.h
70317+++ b/include/linux/printk.h
70318@@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
70319 extern int printk_needs_cpu(int cpu);
70320 extern void printk_tick(void);
70321
70322+extern int kptr_restrict;
70323+
70324 #ifdef CONFIG_PRINTK
70325 asmlinkage __printf(5, 0)
70326 int vprintk_emit(int facility, int level,
70327@@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
70328
70329 extern int printk_delay_msec;
70330 extern int dmesg_restrict;
70331-extern int kptr_restrict;
70332
70333 void log_buf_kexec_setup(void);
70334 void __init setup_log_buf(int early);
70335diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
70336index 32676b3..8f7a182 100644
70337--- a/include/linux/proc_fs.h
70338+++ b/include/linux/proc_fs.h
70339@@ -159,6 +159,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
70340 return proc_create_data(name, mode, parent, proc_fops, NULL);
70341 }
70342
70343+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
70344+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
70345+{
70346+#ifdef CONFIG_GRKERNSEC_PROC_USER
70347+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
70348+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70349+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
70350+#else
70351+ return proc_create_data(name, mode, parent, proc_fops, NULL);
70352+#endif
70353+}
70354+
70355 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
70356 umode_t mode, struct proc_dir_entry *base,
70357 read_proc_t *read_proc, void * data)
70358diff --git a/include/linux/random.h b/include/linux/random.h
70359index d984608..d6f0042 100644
70360--- a/include/linux/random.h
70361+++ b/include/linux/random.h
70362@@ -39,6 +39,11 @@ void prandom_seed(u32 seed);
70363 u32 prandom_u32_state(struct rnd_state *);
70364 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
70365
70366+static inline unsigned long pax_get_random_long(void)
70367+{
70368+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
70369+}
70370+
70371 /*
70372 * Handle minimum values for seeds
70373 */
70374diff --git a/include/linux/rculist.h b/include/linux/rculist.h
70375index c92dd28..08f4eab 100644
70376--- a/include/linux/rculist.h
70377+++ b/include/linux/rculist.h
70378@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
70379 struct list_head *prev, struct list_head *next);
70380 #endif
70381
70382+extern void __pax_list_add_rcu(struct list_head *new,
70383+ struct list_head *prev, struct list_head *next);
70384+
70385 /**
70386 * list_add_rcu - add a new entry to rcu-protected list
70387 * @new: new entry to be added
70388@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
70389 __list_add_rcu(new, head, head->next);
70390 }
70391
70392+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
70393+{
70394+ __pax_list_add_rcu(new, head, head->next);
70395+}
70396+
70397 /**
70398 * list_add_tail_rcu - add a new entry to rcu-protected list
70399 * @new: new entry to be added
70400@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
70401 __list_add_rcu(new, head->prev, head);
70402 }
70403
70404+static inline void pax_list_add_tail_rcu(struct list_head *new,
70405+ struct list_head *head)
70406+{
70407+ __pax_list_add_rcu(new, head->prev, head);
70408+}
70409+
70410 /**
70411 * list_del_rcu - deletes entry from list without re-initialization
70412 * @entry: the element to delete from the list.
70413@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
70414 entry->prev = LIST_POISON2;
70415 }
70416
70417+extern void pax_list_del_rcu(struct list_head *entry);
70418+
70419 /**
70420 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
70421 * @n: the element to delete from the hash list.
70422diff --git a/include/linux/reboot.h b/include/linux/reboot.h
70423index 23b3630..e1bc12b 100644
70424--- a/include/linux/reboot.h
70425+++ b/include/linux/reboot.h
70426@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
70427 * Architecture-specific implementations of sys_reboot commands.
70428 */
70429
70430-extern void machine_restart(char *cmd);
70431-extern void machine_halt(void);
70432-extern void machine_power_off(void);
70433+extern void machine_restart(char *cmd) __noreturn;
70434+extern void machine_halt(void) __noreturn;
70435+extern void machine_power_off(void) __noreturn;
70436
70437 extern void machine_shutdown(void);
70438 struct pt_regs;
70439@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
70440 */
70441
70442 extern void kernel_restart_prepare(char *cmd);
70443-extern void kernel_restart(char *cmd);
70444-extern void kernel_halt(void);
70445-extern void kernel_power_off(void);
70446+extern void kernel_restart(char *cmd) __noreturn;
70447+extern void kernel_halt(void) __noreturn;
70448+extern void kernel_power_off(void) __noreturn;
70449
70450 extern int C_A_D; /* for sysctl */
70451 void ctrl_alt_del(void);
70452@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
70453 * Emergency restart, callable from an interrupt handler.
70454 */
70455
70456-extern void emergency_restart(void);
70457+extern void emergency_restart(void) __noreturn;
70458 #include <asm/emergency-restart.h>
70459
70460 #endif /* _LINUX_REBOOT_H */
70461diff --git a/include/linux/regset.h b/include/linux/regset.h
70462index 8e0c9fe..ac4d221 100644
70463--- a/include/linux/regset.h
70464+++ b/include/linux/regset.h
70465@@ -161,7 +161,8 @@ struct user_regset {
70466 unsigned int align;
70467 unsigned int bias;
70468 unsigned int core_note_type;
70469-};
70470+} __do_const;
70471+typedef struct user_regset __no_const user_regset_no_const;
70472
70473 /**
70474 * struct user_regset_view - available regsets
70475diff --git a/include/linux/relay.h b/include/linux/relay.h
70476index 91cacc3..b55ff74 100644
70477--- a/include/linux/relay.h
70478+++ b/include/linux/relay.h
70479@@ -160,7 +160,7 @@ struct rchan_callbacks
70480 * The callback should return 0 if successful, negative if not.
70481 */
70482 int (*remove_buf_file)(struct dentry *dentry);
70483-};
70484+} __no_const;
70485
70486 /*
70487 * CONFIG_RELAY kernel API, kernel/relay.c
70488diff --git a/include/linux/rio.h b/include/linux/rio.h
70489index a3e7842..d973ca6 100644
70490--- a/include/linux/rio.h
70491+++ b/include/linux/rio.h
70492@@ -339,7 +339,7 @@ struct rio_ops {
70493 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
70494 u64 rstart, u32 size, u32 flags);
70495 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
70496-};
70497+} __no_const;
70498
70499 #define RIO_RESOURCE_MEM 0x00000100
70500 #define RIO_RESOURCE_DOORBELL 0x00000200
70501diff --git a/include/linux/rmap.h b/include/linux/rmap.h
70502index c20635c..2f5def4 100644
70503--- a/include/linux/rmap.h
70504+++ b/include/linux/rmap.h
70505@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
70506 void anon_vma_init(void); /* create anon_vma_cachep */
70507 int anon_vma_prepare(struct vm_area_struct *);
70508 void unlink_anon_vmas(struct vm_area_struct *);
70509-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
70510-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
70511+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
70512+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
70513
70514 static inline void anon_vma_merge(struct vm_area_struct *vma,
70515 struct vm_area_struct *next)
70516diff --git a/include/linux/sched.h b/include/linux/sched.h
70517index d211247..eac6c2c 100644
70518--- a/include/linux/sched.h
70519+++ b/include/linux/sched.h
70520@@ -61,6 +61,7 @@ struct bio_list;
70521 struct fs_struct;
70522 struct perf_event_context;
70523 struct blk_plug;
70524+struct linux_binprm;
70525
70526 /*
70527 * List of flags we want to share for kernel threads,
70528@@ -327,7 +328,7 @@ extern char __sched_text_start[], __sched_text_end[];
70529 extern int in_sched_functions(unsigned long addr);
70530
70531 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
70532-extern signed long schedule_timeout(signed long timeout);
70533+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
70534 extern signed long schedule_timeout_interruptible(signed long timeout);
70535 extern signed long schedule_timeout_killable(signed long timeout);
70536 extern signed long schedule_timeout_uninterruptible(signed long timeout);
70537@@ -354,10 +355,23 @@ struct user_namespace;
70538 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
70539
70540 extern int sysctl_max_map_count;
70541+extern unsigned long sysctl_heap_stack_gap;
70542
70543 #include <linux/aio.h>
70544
70545 #ifdef CONFIG_MMU
70546+
70547+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
70548+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
70549+#else
70550+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
70551+{
70552+ return 0;
70553+}
70554+#endif
70555+
70556+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
70557+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
70558 extern void arch_pick_mmap_layout(struct mm_struct *mm);
70559 extern unsigned long
70560 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
70561@@ -639,6 +653,17 @@ struct signal_struct {
70562 #ifdef CONFIG_TASKSTATS
70563 struct taskstats *stats;
70564 #endif
70565+
70566+#ifdef CONFIG_GRKERNSEC
70567+ u32 curr_ip;
70568+ u32 saved_ip;
70569+ u32 gr_saddr;
70570+ u32 gr_daddr;
70571+ u16 gr_sport;
70572+ u16 gr_dport;
70573+ u8 used_accept:1;
70574+#endif
70575+
70576 #ifdef CONFIG_AUDIT
70577 unsigned audit_tty;
70578 struct tty_audit_buf *tty_audit_buf;
70579@@ -717,6 +742,11 @@ struct user_struct {
70580 struct key *session_keyring; /* UID's default session keyring */
70581 #endif
70582
70583+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
70584+ unsigned int banned;
70585+ unsigned long ban_expires;
70586+#endif
70587+
70588 /* Hash table maintenance information */
70589 struct hlist_node uidhash_node;
70590 kuid_t uid;
70591@@ -1116,7 +1146,7 @@ struct sched_class {
70592 #ifdef CONFIG_FAIR_GROUP_SCHED
70593 void (*task_move_group) (struct task_struct *p, int on_rq);
70594 #endif
70595-};
70596+} __do_const;
70597
70598 struct load_weight {
70599 unsigned long weight, inv_weight;
70600@@ -1360,8 +1390,8 @@ struct task_struct {
70601 struct list_head thread_group;
70602
70603 struct completion *vfork_done; /* for vfork() */
70604- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
70605- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
70606+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
70607+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
70608
70609 cputime_t utime, stime, utimescaled, stimescaled;
70610 cputime_t gtime;
70611@@ -1377,11 +1407,6 @@ struct task_struct {
70612 struct task_cputime cputime_expires;
70613 struct list_head cpu_timers[3];
70614
70615-/* process credentials */
70616- const struct cred __rcu *real_cred; /* objective and real subjective task
70617- * credentials (COW) */
70618- const struct cred __rcu *cred; /* effective (overridable) subjective task
70619- * credentials (COW) */
70620 char comm[TASK_COMM_LEN]; /* executable name excluding path
70621 - access with [gs]et_task_comm (which lock
70622 it with task_lock())
70623@@ -1398,6 +1423,10 @@ struct task_struct {
70624 #endif
70625 /* CPU-specific state of this task */
70626 struct thread_struct thread;
70627+/* thread_info moved to task_struct */
70628+#ifdef CONFIG_X86
70629+ struct thread_info tinfo;
70630+#endif
70631 /* filesystem information */
70632 struct fs_struct *fs;
70633 /* open file information */
70634@@ -1471,6 +1500,10 @@ struct task_struct {
70635 gfp_t lockdep_reclaim_gfp;
70636 #endif
70637
70638+/* process credentials */
70639+ const struct cred __rcu *real_cred; /* objective and real subjective task
70640+ * credentials (COW) */
70641+
70642 /* journalling filesystem info */
70643 void *journal_info;
70644
70645@@ -1509,6 +1542,10 @@ struct task_struct {
70646 /* cg_list protected by css_set_lock and tsk->alloc_lock */
70647 struct list_head cg_list;
70648 #endif
70649+
70650+ const struct cred __rcu *cred; /* effective (overridable) subjective task
70651+ * credentials (COW) */
70652+
70653 #ifdef CONFIG_FUTEX
70654 struct robust_list_head __user *robust_list;
70655 #ifdef CONFIG_COMPAT
70656@@ -1605,8 +1642,74 @@ struct task_struct {
70657 #ifdef CONFIG_UPROBES
70658 struct uprobe_task *utask;
70659 #endif
70660+
70661+#ifdef CONFIG_GRKERNSEC
70662+ /* grsecurity */
70663+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70664+ u64 exec_id;
70665+#endif
70666+#ifdef CONFIG_GRKERNSEC_SETXID
70667+ const struct cred *delayed_cred;
70668+#endif
70669+ struct dentry *gr_chroot_dentry;
70670+ struct acl_subject_label *acl;
70671+ struct acl_role_label *role;
70672+ struct file *exec_file;
70673+ unsigned long brute_expires;
70674+ u16 acl_role_id;
70675+ /* is this the task that authenticated to the special role */
70676+ u8 acl_sp_role;
70677+ u8 is_writable;
70678+ u8 brute;
70679+ u8 gr_is_chrooted;
70680+#endif
70681+
70682 };
70683
70684+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
70685+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
70686+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
70687+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
70688+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
70689+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
70690+
70691+#ifdef CONFIG_PAX_SOFTMODE
70692+extern int pax_softmode;
70693+#endif
70694+
70695+extern int pax_check_flags(unsigned long *);
70696+
70697+/* if tsk != current then task_lock must be held on it */
70698+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
70699+static inline unsigned long pax_get_flags(struct task_struct *tsk)
70700+{
70701+ if (likely(tsk->mm))
70702+ return tsk->mm->pax_flags;
70703+ else
70704+ return 0UL;
70705+}
70706+
70707+/* if tsk != current then task_lock must be held on it */
70708+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
70709+{
70710+ if (likely(tsk->mm)) {
70711+ tsk->mm->pax_flags = flags;
70712+ return 0;
70713+ }
70714+ return -EINVAL;
70715+}
70716+#endif
70717+
70718+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
70719+extern void pax_set_initial_flags(struct linux_binprm *bprm);
70720+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
70721+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
70722+#endif
70723+
70724+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
70725+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
70726+extern void pax_report_refcount_overflow(struct pt_regs *regs);
70727+
70728 /* Future-safe accessor for struct task_struct's cpus_allowed. */
70729 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
70730
70731@@ -1696,7 +1799,7 @@ struct pid_namespace;
70732 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
70733 struct pid_namespace *ns);
70734
70735-static inline pid_t task_pid_nr(struct task_struct *tsk)
70736+static inline pid_t task_pid_nr(const struct task_struct *tsk)
70737 {
70738 return tsk->pid;
70739 }
70740@@ -2155,7 +2258,9 @@ void yield(void);
70741 extern struct exec_domain default_exec_domain;
70742
70743 union thread_union {
70744+#ifndef CONFIG_X86
70745 struct thread_info thread_info;
70746+#endif
70747 unsigned long stack[THREAD_SIZE/sizeof(long)];
70748 };
70749
70750@@ -2188,6 +2293,7 @@ extern struct pid_namespace init_pid_ns;
70751 */
70752
70753 extern struct task_struct *find_task_by_vpid(pid_t nr);
70754+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
70755 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
70756 struct pid_namespace *ns);
70757
70758@@ -2344,7 +2450,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
70759 extern void exit_itimers(struct signal_struct *);
70760 extern void flush_itimer_signals(void);
70761
70762-extern void do_group_exit(int);
70763+extern __noreturn void do_group_exit(int);
70764
70765 extern int allow_signal(int);
70766 extern int disallow_signal(int);
70767@@ -2545,9 +2651,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
70768
70769 #endif
70770
70771-static inline int object_is_on_stack(void *obj)
70772+static inline int object_starts_on_stack(void *obj)
70773 {
70774- void *stack = task_stack_page(current);
70775+ const void *stack = task_stack_page(current);
70776
70777 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
70778 }
70779diff --git a/include/linux/security.h b/include/linux/security.h
70780index eee7478..290f7ba 100644
70781--- a/include/linux/security.h
70782+++ b/include/linux/security.h
70783@@ -26,6 +26,7 @@
70784 #include <linux/capability.h>
70785 #include <linux/slab.h>
70786 #include <linux/err.h>
70787+#include <linux/grsecurity.h>
70788
70789 struct linux_binprm;
70790 struct cred;
70791diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
70792index 68a04a3..866e6a1 100644
70793--- a/include/linux/seq_file.h
70794+++ b/include/linux/seq_file.h
70795@@ -26,6 +26,9 @@ struct seq_file {
70796 struct mutex lock;
70797 const struct seq_operations *op;
70798 int poll_event;
70799+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70800+ u64 exec_id;
70801+#endif
70802 #ifdef CONFIG_USER_NS
70803 struct user_namespace *user_ns;
70804 #endif
70805@@ -38,6 +41,7 @@ struct seq_operations {
70806 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
70807 int (*show) (struct seq_file *m, void *v);
70808 };
70809+typedef struct seq_operations __no_const seq_operations_no_const;
70810
70811 #define SEQ_SKIP 1
70812
70813diff --git a/include/linux/shm.h b/include/linux/shm.h
70814index 429c199..4d42e38 100644
70815--- a/include/linux/shm.h
70816+++ b/include/linux/shm.h
70817@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
70818
70819 /* The task created the shm object. NULL if the task is dead. */
70820 struct task_struct *shm_creator;
70821+#ifdef CONFIG_GRKERNSEC
70822+ time_t shm_createtime;
70823+ pid_t shm_lapid;
70824+#endif
70825 };
70826
70827 /* shm_mode upper byte flags */
70828diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
70829index 320e976..fd52553 100644
70830--- a/include/linux/skbuff.h
70831+++ b/include/linux/skbuff.h
70832@@ -590,7 +590,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
70833 extern struct sk_buff *__alloc_skb(unsigned int size,
70834 gfp_t priority, int flags, int node);
70835 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
70836-static inline struct sk_buff *alloc_skb(unsigned int size,
70837+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
70838 gfp_t priority)
70839 {
70840 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
70841@@ -700,7 +700,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
70842 */
70843 static inline int skb_queue_empty(const struct sk_buff_head *list)
70844 {
70845- return list->next == (struct sk_buff *)list;
70846+ return list->next == (const struct sk_buff *)list;
70847 }
70848
70849 /**
70850@@ -713,7 +713,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
70851 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
70852 const struct sk_buff *skb)
70853 {
70854- return skb->next == (struct sk_buff *)list;
70855+ return skb->next == (const struct sk_buff *)list;
70856 }
70857
70858 /**
70859@@ -726,7 +726,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
70860 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
70861 const struct sk_buff *skb)
70862 {
70863- return skb->prev == (struct sk_buff *)list;
70864+ return skb->prev == (const struct sk_buff *)list;
70865 }
70866
70867 /**
70868@@ -1722,7 +1722,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
70869 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
70870 */
70871 #ifndef NET_SKB_PAD
70872-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
70873+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
70874 #endif
70875
70876 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
70877@@ -2300,7 +2300,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
70878 int noblock, int *err);
70879 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
70880 struct poll_table_struct *wait);
70881-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
70882+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
70883 int offset, struct iovec *to,
70884 int size);
70885 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
70886diff --git a/include/linux/slab.h b/include/linux/slab.h
70887index 5d168d7..720bff3 100644
70888--- a/include/linux/slab.h
70889+++ b/include/linux/slab.h
70890@@ -12,13 +12,20 @@
70891 #include <linux/gfp.h>
70892 #include <linux/types.h>
70893 #include <linux/workqueue.h>
70894-
70895+#include <linux/err.h>
70896
70897 /*
70898 * Flags to pass to kmem_cache_create().
70899 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
70900 */
70901 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
70902+
70903+#ifdef CONFIG_PAX_USERCOPY_SLABS
70904+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
70905+#else
70906+#define SLAB_USERCOPY 0x00000000UL
70907+#endif
70908+
70909 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
70910 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
70911 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
70912@@ -89,10 +96,13 @@
70913 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
70914 * Both make kfree a no-op.
70915 */
70916-#define ZERO_SIZE_PTR ((void *)16)
70917+#define ZERO_SIZE_PTR \
70918+({ \
70919+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
70920+ (void *)(-MAX_ERRNO-1L); \
70921+})
70922
70923-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
70924- (unsigned long)ZERO_SIZE_PTR)
70925+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
70926
70927 /*
70928 * Common fields provided in kmem_cache by all slab allocators
70929@@ -112,7 +122,7 @@ struct kmem_cache {
70930 unsigned int align; /* Alignment as calculated */
70931 unsigned long flags; /* Active flags on the slab */
70932 const char *name; /* Slab name for sysfs */
70933- int refcount; /* Use counter */
70934+ atomic_t refcount; /* Use counter */
70935 void (*ctor)(void *); /* Called on object slot creation */
70936 struct list_head list; /* List of all slab caches on the system */
70937 };
70938@@ -232,6 +242,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
70939 void kfree(const void *);
70940 void kzfree(const void *);
70941 size_t ksize(const void *);
70942+const char *check_heap_object(const void *ptr, unsigned long n);
70943+bool is_usercopy_object(const void *ptr);
70944
70945 /*
70946 * Allocator specific definitions. These are mainly used to establish optimized
70947@@ -311,6 +323,7 @@ size_t ksize(const void *);
70948 * for general use, and so are not documented here. For a full list of
70949 * potential flags, always refer to linux/gfp.h.
70950 */
70951+
70952 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
70953 {
70954 if (size != 0 && n > SIZE_MAX / size)
70955@@ -370,7 +383,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
70956 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
70957 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
70958 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
70959-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
70960+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
70961 #define kmalloc_track_caller(size, flags) \
70962 __kmalloc_track_caller(size, flags, _RET_IP_)
70963 #else
70964@@ -390,7 +403,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
70965 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
70966 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
70967 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
70968-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
70969+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
70970 #define kmalloc_node_track_caller(size, flags, node) \
70971 __kmalloc_node_track_caller(size, flags, node, \
70972 _RET_IP_)
70973diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
70974index 8bb6e0e..8eb0dbe 100644
70975--- a/include/linux/slab_def.h
70976+++ b/include/linux/slab_def.h
70977@@ -52,7 +52,7 @@ struct kmem_cache {
70978 /* 4) cache creation/removal */
70979 const char *name;
70980 struct list_head list;
70981- int refcount;
70982+ atomic_t refcount;
70983 int object_size;
70984 int align;
70985
70986@@ -68,10 +68,10 @@ struct kmem_cache {
70987 unsigned long node_allocs;
70988 unsigned long node_frees;
70989 unsigned long node_overflow;
70990- atomic_t allochit;
70991- atomic_t allocmiss;
70992- atomic_t freehit;
70993- atomic_t freemiss;
70994+ atomic_unchecked_t allochit;
70995+ atomic_unchecked_t allocmiss;
70996+ atomic_unchecked_t freehit;
70997+ atomic_unchecked_t freemiss;
70998
70999 /*
71000 * If debugging is enabled, then the allocator can add additional
71001@@ -111,11 +111,16 @@ struct cache_sizes {
71002 #ifdef CONFIG_ZONE_DMA
71003 struct kmem_cache *cs_dmacachep;
71004 #endif
71005+
71006+#ifdef CONFIG_PAX_USERCOPY_SLABS
71007+ struct kmem_cache *cs_usercopycachep;
71008+#endif
71009+
71010 };
71011 extern struct cache_sizes malloc_sizes[];
71012
71013 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
71014-void *__kmalloc(size_t size, gfp_t flags);
71015+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
71016
71017 #ifdef CONFIG_TRACING
71018 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
71019@@ -152,6 +157,13 @@ found:
71020 cachep = malloc_sizes[i].cs_dmacachep;
71021 else
71022 #endif
71023+
71024+#ifdef CONFIG_PAX_USERCOPY_SLABS
71025+ if (flags & GFP_USERCOPY)
71026+ cachep = malloc_sizes[i].cs_usercopycachep;
71027+ else
71028+#endif
71029+
71030 cachep = malloc_sizes[i].cs_cachep;
71031
71032 ret = kmem_cache_alloc_trace(cachep, flags, size);
71033@@ -162,7 +174,7 @@ found:
71034 }
71035
71036 #ifdef CONFIG_NUMA
71037-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
71038+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71039 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
71040
71041 #ifdef CONFIG_TRACING
71042@@ -205,6 +217,13 @@ found:
71043 cachep = malloc_sizes[i].cs_dmacachep;
71044 else
71045 #endif
71046+
71047+#ifdef CONFIG_PAX_USERCOPY_SLABS
71048+ if (flags & GFP_USERCOPY)
71049+ cachep = malloc_sizes[i].cs_usercopycachep;
71050+ else
71051+#endif
71052+
71053 cachep = malloc_sizes[i].cs_cachep;
71054
71055 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
71056diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
71057index f28e14a..7831211 100644
71058--- a/include/linux/slob_def.h
71059+++ b/include/linux/slob_def.h
71060@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
71061 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
71062 }
71063
71064-void *__kmalloc_node(size_t size, gfp_t flags, int node);
71065+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71066
71067 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
71068 {
71069@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
71070 return __kmalloc_node(size, flags, NUMA_NO_NODE);
71071 }
71072
71073-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
71074+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
71075 {
71076 return kmalloc(size, flags);
71077 }
71078diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
71079index 9db4825..ed42fb5 100644
71080--- a/include/linux/slub_def.h
71081+++ b/include/linux/slub_def.h
71082@@ -91,7 +91,7 @@ struct kmem_cache {
71083 struct kmem_cache_order_objects max;
71084 struct kmem_cache_order_objects min;
71085 gfp_t allocflags; /* gfp flags to use on each alloc */
71086- int refcount; /* Refcount for slab cache destroy */
71087+ atomic_t refcount; /* Refcount for slab cache destroy */
71088 void (*ctor)(void *);
71089 int inuse; /* Offset to metadata */
71090 int align; /* Alignment */
71091@@ -156,7 +156,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
71092 * Sorry that the following has to be that ugly but some versions of GCC
71093 * have trouble with constant propagation and loops.
71094 */
71095-static __always_inline int kmalloc_index(size_t size)
71096+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
71097 {
71098 if (!size)
71099 return 0;
71100@@ -221,7 +221,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
71101 }
71102
71103 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
71104-void *__kmalloc(size_t size, gfp_t flags);
71105+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
71106
71107 static __always_inline void *
71108 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
71109@@ -265,7 +265,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
71110 }
71111 #endif
71112
71113-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
71114+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
71115 {
71116 unsigned int order = get_order(size);
71117 return kmalloc_order_trace(size, flags, order);
71118@@ -290,7 +290,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
71119 }
71120
71121 #ifdef CONFIG_NUMA
71122-void *__kmalloc_node(size_t size, gfp_t flags, int node);
71123+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71124 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
71125
71126 #ifdef CONFIG_TRACING
71127diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
71128index e8d702e..0a56eb4 100644
71129--- a/include/linux/sock_diag.h
71130+++ b/include/linux/sock_diag.h
71131@@ -10,7 +10,7 @@ struct sock;
71132 struct sock_diag_handler {
71133 __u8 family;
71134 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
71135-};
71136+} __do_const;
71137
71138 int sock_diag_register(const struct sock_diag_handler *h);
71139 void sock_diag_unregister(const struct sock_diag_handler *h);
71140diff --git a/include/linux/sonet.h b/include/linux/sonet.h
71141index 680f9a3..f13aeb0 100644
71142--- a/include/linux/sonet.h
71143+++ b/include/linux/sonet.h
71144@@ -7,7 +7,7 @@
71145 #include <uapi/linux/sonet.h>
71146
71147 struct k_sonet_stats {
71148-#define __HANDLE_ITEM(i) atomic_t i
71149+#define __HANDLE_ITEM(i) atomic_unchecked_t i
71150 __SONET_ITEMS
71151 #undef __HANDLE_ITEM
71152 };
71153diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
71154index 34206b8..3db7f1c 100644
71155--- a/include/linux/sunrpc/clnt.h
71156+++ b/include/linux/sunrpc/clnt.h
71157@@ -96,7 +96,7 @@ struct rpc_procinfo {
71158 unsigned int p_timer; /* Which RTT timer to use */
71159 u32 p_statidx; /* Which procedure to account */
71160 const char * p_name; /* name of procedure */
71161-};
71162+} __do_const;
71163
71164 #ifdef __KERNEL__
71165
71166@@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
71167 {
71168 switch (sap->sa_family) {
71169 case AF_INET:
71170- return ntohs(((struct sockaddr_in *)sap)->sin_port);
71171+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
71172 case AF_INET6:
71173- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
71174+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
71175 }
71176 return 0;
71177 }
71178@@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
71179 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
71180 const struct sockaddr *src)
71181 {
71182- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
71183+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
71184 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
71185
71186 dsin->sin_family = ssin->sin_family;
71187@@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
71188 if (sa->sa_family != AF_INET6)
71189 return 0;
71190
71191- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
71192+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
71193 }
71194
71195 #endif /* __KERNEL__ */
71196diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
71197index 676ddf5..4c519a1 100644
71198--- a/include/linux/sunrpc/svc.h
71199+++ b/include/linux/sunrpc/svc.h
71200@@ -410,7 +410,7 @@ struct svc_procedure {
71201 unsigned int pc_count; /* call count */
71202 unsigned int pc_cachetype; /* cache info (NFS) */
71203 unsigned int pc_xdrressize; /* maximum size of XDR reply */
71204-};
71205+} __do_const;
71206
71207 /*
71208 * Function prototypes.
71209diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
71210index 0b8e3e6..33e0a01 100644
71211--- a/include/linux/sunrpc/svc_rdma.h
71212+++ b/include/linux/sunrpc/svc_rdma.h
71213@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
71214 extern unsigned int svcrdma_max_requests;
71215 extern unsigned int svcrdma_max_req_size;
71216
71217-extern atomic_t rdma_stat_recv;
71218-extern atomic_t rdma_stat_read;
71219-extern atomic_t rdma_stat_write;
71220-extern atomic_t rdma_stat_sq_starve;
71221-extern atomic_t rdma_stat_rq_starve;
71222-extern atomic_t rdma_stat_rq_poll;
71223-extern atomic_t rdma_stat_rq_prod;
71224-extern atomic_t rdma_stat_sq_poll;
71225-extern atomic_t rdma_stat_sq_prod;
71226+extern atomic_unchecked_t rdma_stat_recv;
71227+extern atomic_unchecked_t rdma_stat_read;
71228+extern atomic_unchecked_t rdma_stat_write;
71229+extern atomic_unchecked_t rdma_stat_sq_starve;
71230+extern atomic_unchecked_t rdma_stat_rq_starve;
71231+extern atomic_unchecked_t rdma_stat_rq_poll;
71232+extern atomic_unchecked_t rdma_stat_rq_prod;
71233+extern atomic_unchecked_t rdma_stat_sq_poll;
71234+extern atomic_unchecked_t rdma_stat_sq_prod;
71235
71236 #define RPCRDMA_VERSION 1
71237
71238diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
71239index dd74084a..7f509d5 100644
71240--- a/include/linux/sunrpc/svcauth.h
71241+++ b/include/linux/sunrpc/svcauth.h
71242@@ -109,7 +109,7 @@ struct auth_ops {
71243 int (*release)(struct svc_rqst *rq);
71244 void (*domain_release)(struct auth_domain *);
71245 int (*set_client)(struct svc_rqst *rq);
71246-};
71247+} __do_const;
71248
71249 #define SVC_GARBAGE 1
71250 #define SVC_SYSERR 2
71251diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
71252index 071d62c..4ccc7ac 100644
71253--- a/include/linux/swiotlb.h
71254+++ b/include/linux/swiotlb.h
71255@@ -59,7 +59,8 @@ extern void
71256
71257 extern void
71258 swiotlb_free_coherent(struct device *hwdev, size_t size,
71259- void *vaddr, dma_addr_t dma_handle);
71260+ void *vaddr, dma_addr_t dma_handle,
71261+ struct dma_attrs *attrs);
71262
71263 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
71264 unsigned long offset, size_t size,
71265diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
71266index 27b3b0b..e093dd9 100644
71267--- a/include/linux/syscore_ops.h
71268+++ b/include/linux/syscore_ops.h
71269@@ -16,7 +16,7 @@ struct syscore_ops {
71270 int (*suspend)(void);
71271 void (*resume)(void);
71272 void (*shutdown)(void);
71273-};
71274+} __do_const;
71275
71276 extern void register_syscore_ops(struct syscore_ops *ops);
71277 extern void unregister_syscore_ops(struct syscore_ops *ops);
71278diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
71279index 14a8ff2..af52bad 100644
71280--- a/include/linux/sysctl.h
71281+++ b/include/linux/sysctl.h
71282@@ -34,13 +34,13 @@ struct ctl_table_root;
71283 struct ctl_table_header;
71284 struct ctl_dir;
71285
71286-typedef struct ctl_table ctl_table;
71287-
71288 typedef int proc_handler (struct ctl_table *ctl, int write,
71289 void __user *buffer, size_t *lenp, loff_t *ppos);
71290
71291 extern int proc_dostring(struct ctl_table *, int,
71292 void __user *, size_t *, loff_t *);
71293+extern int proc_dostring_modpriv(struct ctl_table *, int,
71294+ void __user *, size_t *, loff_t *);
71295 extern int proc_dointvec(struct ctl_table *, int,
71296 void __user *, size_t *, loff_t *);
71297 extern int proc_dointvec_minmax(struct ctl_table *, int,
71298@@ -115,7 +115,9 @@ struct ctl_table
71299 struct ctl_table_poll *poll;
71300 void *extra1;
71301 void *extra2;
71302-};
71303+} __do_const;
71304+typedef struct ctl_table __no_const ctl_table_no_const;
71305+typedef struct ctl_table ctl_table;
71306
71307 struct ctl_node {
71308 struct rb_node node;
71309diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
71310index 381f06d..dc16cc7 100644
71311--- a/include/linux/sysfs.h
71312+++ b/include/linux/sysfs.h
71313@@ -31,7 +31,8 @@ struct attribute {
71314 struct lock_class_key *key;
71315 struct lock_class_key skey;
71316 #endif
71317-};
71318+} __do_const;
71319+typedef struct attribute __no_const attribute_no_const;
71320
71321 /**
71322 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
71323@@ -59,8 +60,8 @@ struct attribute_group {
71324 umode_t (*is_visible)(struct kobject *,
71325 struct attribute *, int);
71326 struct attribute **attrs;
71327-};
71328-
71329+} __do_const;
71330+typedef struct attribute_group __no_const attribute_group_no_const;
71331
71332
71333 /**
71334@@ -107,7 +108,8 @@ struct bin_attribute {
71335 char *, loff_t, size_t);
71336 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
71337 struct vm_area_struct *vma);
71338-};
71339+} __do_const;
71340+typedef struct bin_attribute __no_const bin_attribute_no_const;
71341
71342 /**
71343 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
71344diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
71345index 7faf933..4657127 100644
71346--- a/include/linux/sysrq.h
71347+++ b/include/linux/sysrq.h
71348@@ -15,7 +15,9 @@
71349 #define _LINUX_SYSRQ_H
71350
71351 #include <linux/errno.h>
71352+#include <linux/compiler.h>
71353 #include <linux/types.h>
71354+#include <linux/compiler.h>
71355
71356 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
71357 #define SYSRQ_DEFAULT_ENABLE 1
71358@@ -36,7 +38,7 @@ struct sysrq_key_op {
71359 char *help_msg;
71360 char *action_msg;
71361 int enable_mask;
71362-};
71363+} __do_const;
71364
71365 #ifdef CONFIG_MAGIC_SYSRQ
71366
71367diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
71368index e7e0473..39b7b52 100644
71369--- a/include/linux/thread_info.h
71370+++ b/include/linux/thread_info.h
71371@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
71372 #error "no set_restore_sigmask() provided and default one won't work"
71373 #endif
71374
71375+extern void __check_object_size(const void *ptr, unsigned long n, bool to);
71376+static inline void check_object_size(const void *ptr, unsigned long n, bool to)
71377+{
71378+#ifndef CONFIG_PAX_USERCOPY_DEBUG
71379+ if (!__builtin_constant_p(n))
71380+#endif
71381+ __check_object_size(ptr, n, to);
71382+}
71383+
71384 #endif /* __KERNEL__ */
71385
71386 #endif /* _LINUX_THREAD_INFO_H */
71387diff --git a/include/linux/tty.h b/include/linux/tty.h
71388index 8db1b56..c16a040 100644
71389--- a/include/linux/tty.h
71390+++ b/include/linux/tty.h
71391@@ -194,7 +194,7 @@ struct tty_port {
71392 const struct tty_port_operations *ops; /* Port operations */
71393 spinlock_t lock; /* Lock protecting tty field */
71394 int blocked_open; /* Waiting to open */
71395- int count; /* Usage count */
71396+ atomic_t count; /* Usage count */
71397 wait_queue_head_t open_wait; /* Open waiters */
71398 wait_queue_head_t close_wait; /* Close waiters */
71399 wait_queue_head_t delta_msr_wait; /* Modem status change */
71400@@ -490,7 +490,7 @@ extern int tty_port_open(struct tty_port *port,
71401 struct tty_struct *tty, struct file *filp);
71402 static inline int tty_port_users(struct tty_port *port)
71403 {
71404- return port->count + port->blocked_open;
71405+ return atomic_read(&port->count) + port->blocked_open;
71406 }
71407
71408 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
71409diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
71410index dd976cf..e272742 100644
71411--- a/include/linux/tty_driver.h
71412+++ b/include/linux/tty_driver.h
71413@@ -284,7 +284,7 @@ struct tty_operations {
71414 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
71415 #endif
71416 const struct file_operations *proc_fops;
71417-};
71418+} __do_const;
71419
71420 struct tty_driver {
71421 int magic; /* magic number for this structure */
71422diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
71423index fb79dd8d..07d4773 100644
71424--- a/include/linux/tty_ldisc.h
71425+++ b/include/linux/tty_ldisc.h
71426@@ -149,7 +149,7 @@ struct tty_ldisc_ops {
71427
71428 struct module *owner;
71429
71430- int refcount;
71431+ atomic_t refcount;
71432 };
71433
71434 struct tty_ldisc {
71435diff --git a/include/linux/types.h b/include/linux/types.h
71436index 4d118ba..c3ee9bf 100644
71437--- a/include/linux/types.h
71438+++ b/include/linux/types.h
71439@@ -176,10 +176,26 @@ typedef struct {
71440 int counter;
71441 } atomic_t;
71442
71443+#ifdef CONFIG_PAX_REFCOUNT
71444+typedef struct {
71445+ int counter;
71446+} atomic_unchecked_t;
71447+#else
71448+typedef atomic_t atomic_unchecked_t;
71449+#endif
71450+
71451 #ifdef CONFIG_64BIT
71452 typedef struct {
71453 long counter;
71454 } atomic64_t;
71455+
71456+#ifdef CONFIG_PAX_REFCOUNT
71457+typedef struct {
71458+ long counter;
71459+} atomic64_unchecked_t;
71460+#else
71461+typedef atomic64_t atomic64_unchecked_t;
71462+#endif
71463 #endif
71464
71465 struct list_head {
71466diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
71467index 5ca0951..ab496a5 100644
71468--- a/include/linux/uaccess.h
71469+++ b/include/linux/uaccess.h
71470@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
71471 long ret; \
71472 mm_segment_t old_fs = get_fs(); \
71473 \
71474- set_fs(KERNEL_DS); \
71475 pagefault_disable(); \
71476- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
71477- pagefault_enable(); \
71478+ set_fs(KERNEL_DS); \
71479+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
71480 set_fs(old_fs); \
71481+ pagefault_enable(); \
71482 ret; \
71483 })
71484
71485diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
71486index 8e522cbc..aa8572d 100644
71487--- a/include/linux/uidgid.h
71488+++ b/include/linux/uidgid.h
71489@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
71490
71491 #endif /* CONFIG_USER_NS */
71492
71493+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
71494+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
71495+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
71496+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
71497+
71498 #endif /* _LINUX_UIDGID_H */
71499diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
71500index 99c1b4d..562e6f3 100644
71501--- a/include/linux/unaligned/access_ok.h
71502+++ b/include/linux/unaligned/access_ok.h
71503@@ -4,34 +4,34 @@
71504 #include <linux/kernel.h>
71505 #include <asm/byteorder.h>
71506
71507-static inline u16 get_unaligned_le16(const void *p)
71508+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
71509 {
71510- return le16_to_cpup((__le16 *)p);
71511+ return le16_to_cpup((const __le16 *)p);
71512 }
71513
71514-static inline u32 get_unaligned_le32(const void *p)
71515+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
71516 {
71517- return le32_to_cpup((__le32 *)p);
71518+ return le32_to_cpup((const __le32 *)p);
71519 }
71520
71521-static inline u64 get_unaligned_le64(const void *p)
71522+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
71523 {
71524- return le64_to_cpup((__le64 *)p);
71525+ return le64_to_cpup((const __le64 *)p);
71526 }
71527
71528-static inline u16 get_unaligned_be16(const void *p)
71529+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
71530 {
71531- return be16_to_cpup((__be16 *)p);
71532+ return be16_to_cpup((const __be16 *)p);
71533 }
71534
71535-static inline u32 get_unaligned_be32(const void *p)
71536+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
71537 {
71538- return be32_to_cpup((__be32 *)p);
71539+ return be32_to_cpup((const __be32 *)p);
71540 }
71541
71542-static inline u64 get_unaligned_be64(const void *p)
71543+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
71544 {
71545- return be64_to_cpup((__be64 *)p);
71546+ return be64_to_cpup((const __be64 *)p);
71547 }
71548
71549 static inline void put_unaligned_le16(u16 val, void *p)
71550diff --git a/include/linux/usb.h b/include/linux/usb.h
71551index 4d22d0f..8d0e8f8 100644
71552--- a/include/linux/usb.h
71553+++ b/include/linux/usb.h
71554@@ -554,7 +554,7 @@ struct usb_device {
71555 int maxchild;
71556
71557 u32 quirks;
71558- atomic_t urbnum;
71559+ atomic_unchecked_t urbnum;
71560
71561 unsigned long active_duration;
71562
71563@@ -1604,7 +1604,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
71564
71565 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
71566 __u8 request, __u8 requesttype, __u16 value, __u16 index,
71567- void *data, __u16 size, int timeout);
71568+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
71569 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
71570 void *data, int len, int *actual_length, int timeout);
71571 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
71572diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
71573index c5d36c6..108f4f9 100644
71574--- a/include/linux/usb/renesas_usbhs.h
71575+++ b/include/linux/usb/renesas_usbhs.h
71576@@ -39,7 +39,7 @@ enum {
71577 */
71578 struct renesas_usbhs_driver_callback {
71579 int (*notify_hotplug)(struct platform_device *pdev);
71580-};
71581+} __no_const;
71582
71583 /*
71584 * callback functions for platform
71585diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
71586index b9bd2e6..4ce0093 100644
71587--- a/include/linux/user_namespace.h
71588+++ b/include/linux/user_namespace.h
71589@@ -21,7 +21,7 @@ struct user_namespace {
71590 struct uid_gid_map uid_map;
71591 struct uid_gid_map gid_map;
71592 struct uid_gid_map projid_map;
71593- struct kref kref;
71594+ atomic_t count;
71595 struct user_namespace *parent;
71596 kuid_t owner;
71597 kgid_t group;
71598@@ -35,18 +35,18 @@ extern struct user_namespace init_user_ns;
71599 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
71600 {
71601 if (ns)
71602- kref_get(&ns->kref);
71603+ atomic_inc(&ns->count);
71604 return ns;
71605 }
71606
71607 extern int create_user_ns(struct cred *new);
71608 extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
71609-extern void free_user_ns(struct kref *kref);
71610+extern void free_user_ns(struct user_namespace *ns);
71611
71612 static inline void put_user_ns(struct user_namespace *ns)
71613 {
71614- if (ns)
71615- kref_put(&ns->kref, free_user_ns);
71616+ if (ns && atomic_dec_and_test(&ns->count))
71617+ free_user_ns(ns);
71618 }
71619
71620 struct seq_operations;
71621diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
71622index 6f8fbcf..8259001 100644
71623--- a/include/linux/vermagic.h
71624+++ b/include/linux/vermagic.h
71625@@ -25,9 +25,35 @@
71626 #define MODULE_ARCH_VERMAGIC ""
71627 #endif
71628
71629+#ifdef CONFIG_PAX_REFCOUNT
71630+#define MODULE_PAX_REFCOUNT "REFCOUNT "
71631+#else
71632+#define MODULE_PAX_REFCOUNT ""
71633+#endif
71634+
71635+#ifdef CONSTIFY_PLUGIN
71636+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
71637+#else
71638+#define MODULE_CONSTIFY_PLUGIN ""
71639+#endif
71640+
71641+#ifdef STACKLEAK_PLUGIN
71642+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
71643+#else
71644+#define MODULE_STACKLEAK_PLUGIN ""
71645+#endif
71646+
71647+#ifdef CONFIG_GRKERNSEC
71648+#define MODULE_GRSEC "GRSEC "
71649+#else
71650+#define MODULE_GRSEC ""
71651+#endif
71652+
71653 #define VERMAGIC_STRING \
71654 UTS_RELEASE " " \
71655 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
71656 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
71657- MODULE_ARCH_VERMAGIC
71658+ MODULE_ARCH_VERMAGIC \
71659+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
71660+ MODULE_GRSEC
71661
71662diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
71663index 6071e91..ca6a489 100644
71664--- a/include/linux/vmalloc.h
71665+++ b/include/linux/vmalloc.h
71666@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
71667 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
71668 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
71669 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
71670+
71671+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71672+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
71673+#endif
71674+
71675 /* bits [20..32] reserved for arch specific ioremap internals */
71676
71677 /*
71678@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
71679 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
71680 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
71681 unsigned long start, unsigned long end, gfp_t gfp_mask,
71682- pgprot_t prot, int node, const void *caller);
71683+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
71684 extern void vfree(const void *addr);
71685
71686 extern void *vmap(struct page **pages, unsigned int count,
71687@@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
71688 extern void free_vm_area(struct vm_struct *area);
71689
71690 /* for /dev/kmem */
71691-extern long vread(char *buf, char *addr, unsigned long count);
71692-extern long vwrite(char *buf, char *addr, unsigned long count);
71693+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
71694+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
71695
71696 /*
71697 * Internals. Dont't use..
71698diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
71699index a13291f..af51fa3 100644
71700--- a/include/linux/vmstat.h
71701+++ b/include/linux/vmstat.h
71702@@ -95,18 +95,18 @@ static inline void vm_events_fold_cpu(int cpu)
71703 /*
71704 * Zone based page accounting with per cpu differentials.
71705 */
71706-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71707+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71708
71709 static inline void zone_page_state_add(long x, struct zone *zone,
71710 enum zone_stat_item item)
71711 {
71712- atomic_long_add(x, &zone->vm_stat[item]);
71713- atomic_long_add(x, &vm_stat[item]);
71714+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
71715+ atomic_long_add_unchecked(x, &vm_stat[item]);
71716 }
71717
71718 static inline unsigned long global_page_state(enum zone_stat_item item)
71719 {
71720- long x = atomic_long_read(&vm_stat[item]);
71721+ long x = atomic_long_read_unchecked(&vm_stat[item]);
71722 #ifdef CONFIG_SMP
71723 if (x < 0)
71724 x = 0;
71725@@ -117,7 +117,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
71726 static inline unsigned long zone_page_state(struct zone *zone,
71727 enum zone_stat_item item)
71728 {
71729- long x = atomic_long_read(&zone->vm_stat[item]);
71730+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
71731 #ifdef CONFIG_SMP
71732 if (x < 0)
71733 x = 0;
71734@@ -134,7 +134,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
71735 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
71736 enum zone_stat_item item)
71737 {
71738- long x = atomic_long_read(&zone->vm_stat[item]);
71739+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
71740
71741 #ifdef CONFIG_SMP
71742 int cpu;
71743@@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
71744
71745 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
71746 {
71747- atomic_long_inc(&zone->vm_stat[item]);
71748- atomic_long_inc(&vm_stat[item]);
71749+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
71750+ atomic_long_inc_unchecked(&vm_stat[item]);
71751 }
71752
71753 static inline void __inc_zone_page_state(struct page *page,
71754@@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
71755
71756 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
71757 {
71758- atomic_long_dec(&zone->vm_stat[item]);
71759- atomic_long_dec(&vm_stat[item]);
71760+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
71761+ atomic_long_dec_unchecked(&vm_stat[item]);
71762 }
71763
71764 static inline void __dec_zone_page_state(struct page *page,
71765diff --git a/include/linux/xattr.h b/include/linux/xattr.h
71766index fdbafc6..b7ffd47 100644
71767--- a/include/linux/xattr.h
71768+++ b/include/linux/xattr.h
71769@@ -28,7 +28,7 @@ struct xattr_handler {
71770 size_t size, int handler_flags);
71771 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
71772 size_t size, int flags, int handler_flags);
71773-};
71774+} __do_const;
71775
71776 struct xattr {
71777 char *name;
71778diff --git a/include/linux/zlib.h b/include/linux/zlib.h
71779index 9c5a6b4..09c9438 100644
71780--- a/include/linux/zlib.h
71781+++ b/include/linux/zlib.h
71782@@ -31,6 +31,7 @@
71783 #define _ZLIB_H
71784
71785 #include <linux/zconf.h>
71786+#include <linux/compiler.h>
71787
71788 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
71789 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
71790@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
71791
71792 /* basic functions */
71793
71794-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
71795+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
71796 /*
71797 Returns the number of bytes that needs to be allocated for a per-
71798 stream workspace with the specified parameters. A pointer to this
71799diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
71800index 95d1c91..6798cca 100644
71801--- a/include/media/v4l2-dev.h
71802+++ b/include/media/v4l2-dev.h
71803@@ -76,7 +76,7 @@ struct v4l2_file_operations {
71804 int (*mmap) (struct file *, struct vm_area_struct *);
71805 int (*open) (struct file *);
71806 int (*release) (struct file *);
71807-};
71808+} __do_const;
71809
71810 /*
71811 * Newer version of video_device, handled by videodev2.c
71812diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
71813index 4118ad1..cb7e25f 100644
71814--- a/include/media/v4l2-ioctl.h
71815+++ b/include/media/v4l2-ioctl.h
71816@@ -284,7 +284,6 @@ struct v4l2_ioctl_ops {
71817 bool valid_prio, int cmd, void *arg);
71818 };
71819
71820-
71821 /* v4l debugging and diagnostics */
71822
71823 /* Debug bitmask flags to be used on V4L2 */
71824diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
71825index adcbb20..62c2559 100644
71826--- a/include/net/9p/transport.h
71827+++ b/include/net/9p/transport.h
71828@@ -57,7 +57,7 @@ struct p9_trans_module {
71829 int (*cancel) (struct p9_client *, struct p9_req_t *req);
71830 int (*zc_request)(struct p9_client *, struct p9_req_t *,
71831 char *, char *, int , int, int, int);
71832-};
71833+} __do_const;
71834
71835 void v9fs_register_trans(struct p9_trans_module *m);
71836 void v9fs_unregister_trans(struct p9_trans_module *m);
71837diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
71838index 7588ef4..e62d35f 100644
71839--- a/include/net/bluetooth/l2cap.h
71840+++ b/include/net/bluetooth/l2cap.h
71841@@ -552,7 +552,7 @@ struct l2cap_ops {
71842 void (*defer) (struct l2cap_chan *chan);
71843 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
71844 unsigned long len, int nb);
71845-};
71846+} __do_const;
71847
71848 struct l2cap_conn {
71849 struct hci_conn *hcon;
71850diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
71851index 9e5425b..8136ffc 100644
71852--- a/include/net/caif/cfctrl.h
71853+++ b/include/net/caif/cfctrl.h
71854@@ -52,7 +52,7 @@ struct cfctrl_rsp {
71855 void (*radioset_rsp)(void);
71856 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
71857 struct cflayer *client_layer);
71858-};
71859+} __no_const;
71860
71861 /* Link Setup Parameters for CAIF-Links. */
71862 struct cfctrl_link_param {
71863@@ -101,8 +101,8 @@ struct cfctrl_request_info {
71864 struct cfctrl {
71865 struct cfsrvl serv;
71866 struct cfctrl_rsp res;
71867- atomic_t req_seq_no;
71868- atomic_t rsp_seq_no;
71869+ atomic_unchecked_t req_seq_no;
71870+ atomic_unchecked_t rsp_seq_no;
71871 struct list_head list;
71872 /* Protects from simultaneous access to first_req list */
71873 spinlock_t info_list_lock;
71874diff --git a/include/net/flow.h b/include/net/flow.h
71875index 628e11b..4c475df 100644
71876--- a/include/net/flow.h
71877+++ b/include/net/flow.h
71878@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
71879
71880 extern void flow_cache_flush(void);
71881 extern void flow_cache_flush_deferred(void);
71882-extern atomic_t flow_cache_genid;
71883+extern atomic_unchecked_t flow_cache_genid;
71884
71885 #endif
71886diff --git a/include/net/genetlink.h b/include/net/genetlink.h
71887index bdfbe68..4402ebe 100644
71888--- a/include/net/genetlink.h
71889+++ b/include/net/genetlink.h
71890@@ -118,7 +118,7 @@ struct genl_ops {
71891 struct netlink_callback *cb);
71892 int (*done)(struct netlink_callback *cb);
71893 struct list_head ops_list;
71894-};
71895+} __do_const;
71896
71897 extern int genl_register_family(struct genl_family *family);
71898 extern int genl_register_family_with_ops(struct genl_family *family,
71899diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
71900index e5062c9..48a9a4b 100644
71901--- a/include/net/gro_cells.h
71902+++ b/include/net/gro_cells.h
71903@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
71904 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
71905
71906 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
71907- atomic_long_inc(&dev->rx_dropped);
71908+ atomic_long_inc_unchecked(&dev->rx_dropped);
71909 kfree_skb(skb);
71910 return;
71911 }
71912@@ -73,8 +73,8 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
71913 int i;
71914
71915 gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
71916- gcells->cells = kcalloc(sizeof(struct gro_cell),
71917- gcells->gro_cells_mask + 1,
71918+ gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
71919+ sizeof(struct gro_cell),
71920 GFP_KERNEL);
71921 if (!gcells->cells)
71922 return -ENOMEM;
71923diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
71924index 1832927..ce39aea 100644
71925--- a/include/net/inet_connection_sock.h
71926+++ b/include/net/inet_connection_sock.h
71927@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
71928 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
71929 int (*bind_conflict)(const struct sock *sk,
71930 const struct inet_bind_bucket *tb, bool relax);
71931-};
71932+} __do_const;
71933
71934 /** inet_connection_sock - INET connection oriented sock
71935 *
71936diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
71937index 53f464d..ba76aaa 100644
71938--- a/include/net/inetpeer.h
71939+++ b/include/net/inetpeer.h
71940@@ -47,8 +47,8 @@ struct inet_peer {
71941 */
71942 union {
71943 struct {
71944- atomic_t rid; /* Frag reception counter */
71945- atomic_t ip_id_count; /* IP ID for the next packet */
71946+ atomic_unchecked_t rid; /* Frag reception counter */
71947+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
71948 };
71949 struct rcu_head rcu;
71950 struct inet_peer *gc_next;
71951@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
71952 more++;
71953 inet_peer_refcheck(p);
71954 do {
71955- old = atomic_read(&p->ip_id_count);
71956+ old = atomic_read_unchecked(&p->ip_id_count);
71957 new = old + more;
71958 if (!new)
71959 new = 1;
71960- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
71961+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
71962 return new;
71963 }
71964
71965diff --git a/include/net/ip.h b/include/net/ip.h
71966index a68f838..74518ab 100644
71967--- a/include/net/ip.h
71968+++ b/include/net/ip.h
71969@@ -202,7 +202,7 @@ extern struct local_ports {
71970 } sysctl_local_ports;
71971 extern void inet_get_local_port_range(int *low, int *high);
71972
71973-extern unsigned long *sysctl_local_reserved_ports;
71974+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
71975 static inline int inet_is_reserved_local_port(int port)
71976 {
71977 return test_bit(port, sysctl_local_reserved_ports);
71978diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
71979index 9497be1..5a4fafe 100644
71980--- a/include/net/ip_fib.h
71981+++ b/include/net/ip_fib.h
71982@@ -169,7 +169,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
71983
71984 #define FIB_RES_SADDR(net, res) \
71985 ((FIB_RES_NH(res).nh_saddr_genid == \
71986- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
71987+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
71988 FIB_RES_NH(res).nh_saddr : \
71989 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
71990 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
71991diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
71992index 68c69d5..bdab192 100644
71993--- a/include/net/ip_vs.h
71994+++ b/include/net/ip_vs.h
71995@@ -599,7 +599,7 @@ struct ip_vs_conn {
71996 struct ip_vs_conn *control; /* Master control connection */
71997 atomic_t n_control; /* Number of controlled ones */
71998 struct ip_vs_dest *dest; /* real server */
71999- atomic_t in_pkts; /* incoming packet counter */
72000+ atomic_unchecked_t in_pkts; /* incoming packet counter */
72001
72002 /* packet transmitter for different forwarding methods. If it
72003 mangles the packet, it must return NF_DROP or better NF_STOLEN,
72004@@ -737,7 +737,7 @@ struct ip_vs_dest {
72005 __be16 port; /* port number of the server */
72006 union nf_inet_addr addr; /* IP address of the server */
72007 volatile unsigned int flags; /* dest status flags */
72008- atomic_t conn_flags; /* flags to copy to conn */
72009+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
72010 atomic_t weight; /* server weight */
72011
72012 atomic_t refcnt; /* reference counter */
72013@@ -980,11 +980,11 @@ struct netns_ipvs {
72014 /* ip_vs_lblc */
72015 int sysctl_lblc_expiration;
72016 struct ctl_table_header *lblc_ctl_header;
72017- struct ctl_table *lblc_ctl_table;
72018+ ctl_table_no_const *lblc_ctl_table;
72019 /* ip_vs_lblcr */
72020 int sysctl_lblcr_expiration;
72021 struct ctl_table_header *lblcr_ctl_header;
72022- struct ctl_table *lblcr_ctl_table;
72023+ ctl_table_no_const *lblcr_ctl_table;
72024 /* ip_vs_est */
72025 struct list_head est_list; /* estimator list */
72026 spinlock_t est_lock;
72027diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
72028index 80ffde3..968b0f4 100644
72029--- a/include/net/irda/ircomm_tty.h
72030+++ b/include/net/irda/ircomm_tty.h
72031@@ -35,6 +35,7 @@
72032 #include <linux/termios.h>
72033 #include <linux/timer.h>
72034 #include <linux/tty.h> /* struct tty_struct */
72035+#include <asm/local.h>
72036
72037 #include <net/irda/irias_object.h>
72038 #include <net/irda/ircomm_core.h>
72039diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
72040index cc7c197..9f2da2a 100644
72041--- a/include/net/iucv/af_iucv.h
72042+++ b/include/net/iucv/af_iucv.h
72043@@ -141,7 +141,7 @@ struct iucv_sock {
72044 struct iucv_sock_list {
72045 struct hlist_head head;
72046 rwlock_t lock;
72047- atomic_t autobind_name;
72048+ atomic_unchecked_t autobind_name;
72049 };
72050
72051 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
72052diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
72053index df83f69..9b640b8 100644
72054--- a/include/net/llc_c_ac.h
72055+++ b/include/net/llc_c_ac.h
72056@@ -87,7 +87,7 @@
72057 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
72058 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
72059
72060-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
72061+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
72062
72063 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
72064 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
72065diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
72066index 6ca3113..f8026dd 100644
72067--- a/include/net/llc_c_ev.h
72068+++ b/include/net/llc_c_ev.h
72069@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
72070 return (struct llc_conn_state_ev *)skb->cb;
72071 }
72072
72073-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
72074-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
72075+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
72076+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
72077
72078 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
72079 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
72080diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
72081index 0e79cfb..f46db31 100644
72082--- a/include/net/llc_c_st.h
72083+++ b/include/net/llc_c_st.h
72084@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
72085 u8 next_state;
72086 llc_conn_ev_qfyr_t *ev_qualifiers;
72087 llc_conn_action_t *ev_actions;
72088-};
72089+} __do_const;
72090
72091 struct llc_conn_state {
72092 u8 current_state;
72093diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
72094index 37a3bbd..55a4241 100644
72095--- a/include/net/llc_s_ac.h
72096+++ b/include/net/llc_s_ac.h
72097@@ -23,7 +23,7 @@
72098 #define SAP_ACT_TEST_IND 9
72099
72100 /* All action functions must look like this */
72101-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
72102+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
72103
72104 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
72105 struct sk_buff *skb);
72106diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
72107index 567c681..cd73ac0 100644
72108--- a/include/net/llc_s_st.h
72109+++ b/include/net/llc_s_st.h
72110@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
72111 llc_sap_ev_t ev;
72112 u8 next_state;
72113 llc_sap_action_t *ev_actions;
72114-};
72115+} __do_const;
72116
72117 struct llc_sap_state {
72118 u8 curr_state;
72119diff --git a/include/net/mac80211.h b/include/net/mac80211.h
72120index ee50c5e..1bc3b1a 100644
72121--- a/include/net/mac80211.h
72122+++ b/include/net/mac80211.h
72123@@ -3996,7 +3996,7 @@ struct rate_control_ops {
72124 void (*add_sta_debugfs)(void *priv, void *priv_sta,
72125 struct dentry *dir);
72126 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
72127-};
72128+} __do_const;
72129
72130 static inline int rate_supported(struct ieee80211_sta *sta,
72131 enum ieee80211_band band,
72132diff --git a/include/net/neighbour.h b/include/net/neighbour.h
72133index 0dab173..1b76af0 100644
72134--- a/include/net/neighbour.h
72135+++ b/include/net/neighbour.h
72136@@ -123,7 +123,7 @@ struct neigh_ops {
72137 void (*error_report)(struct neighbour *, struct sk_buff *);
72138 int (*output)(struct neighbour *, struct sk_buff *);
72139 int (*connected_output)(struct neighbour *, struct sk_buff *);
72140-};
72141+} __do_const;
72142
72143 struct pneigh_entry {
72144 struct pneigh_entry *next;
72145diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
72146index de644bc..351fd4e 100644
72147--- a/include/net/net_namespace.h
72148+++ b/include/net/net_namespace.h
72149@@ -115,7 +115,7 @@ struct net {
72150 #endif
72151 struct netns_ipvs *ipvs;
72152 struct sock *diag_nlsk;
72153- atomic_t rt_genid;
72154+ atomic_unchecked_t rt_genid;
72155 };
72156
72157 /*
72158@@ -282,7 +282,7 @@ struct pernet_operations {
72159 void (*exit_batch)(struct list_head *net_exit_list);
72160 int *id;
72161 size_t size;
72162-};
72163+} __do_const;
72164
72165 /*
72166 * Use these carefully. If you implement a network device and it
72167@@ -330,12 +330,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
72168
72169 static inline int rt_genid(struct net *net)
72170 {
72171- return atomic_read(&net->rt_genid);
72172+ return atomic_read_unchecked(&net->rt_genid);
72173 }
72174
72175 static inline void rt_genid_bump(struct net *net)
72176 {
72177- atomic_inc(&net->rt_genid);
72178+ atomic_inc_unchecked(&net->rt_genid);
72179 }
72180
72181 #endif /* __NET_NET_NAMESPACE_H */
72182diff --git a/include/net/netdma.h b/include/net/netdma.h
72183index 8ba8ce2..99b7fff 100644
72184--- a/include/net/netdma.h
72185+++ b/include/net/netdma.h
72186@@ -24,7 +24,7 @@
72187 #include <linux/dmaengine.h>
72188 #include <linux/skbuff.h>
72189
72190-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
72191+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
72192 struct sk_buff *skb, int offset, struct iovec *to,
72193 size_t len, struct dma_pinned_list *pinned_list);
72194
72195diff --git a/include/net/netlink.h b/include/net/netlink.h
72196index 9690b0f..87aded7 100644
72197--- a/include/net/netlink.h
72198+++ b/include/net/netlink.h
72199@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
72200 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
72201 {
72202 if (mark)
72203- skb_trim(skb, (unsigned char *) mark - skb->data);
72204+ skb_trim(skb, (const unsigned char *) mark - skb->data);
72205 }
72206
72207 /**
72208diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
72209index 923cb20..deae816 100644
72210--- a/include/net/netns/conntrack.h
72211+++ b/include/net/netns/conntrack.h
72212@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
72213 struct nf_proto_net {
72214 #ifdef CONFIG_SYSCTL
72215 struct ctl_table_header *ctl_table_header;
72216- struct ctl_table *ctl_table;
72217+ ctl_table_no_const *ctl_table;
72218 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
72219 struct ctl_table_header *ctl_compat_header;
72220- struct ctl_table *ctl_compat_table;
72221+ ctl_table_no_const *ctl_compat_table;
72222 #endif
72223 #endif
72224 unsigned int users;
72225@@ -58,7 +58,7 @@ struct nf_ip_net {
72226 struct nf_icmp_net icmpv6;
72227 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
72228 struct ctl_table_header *ctl_table_header;
72229- struct ctl_table *ctl_table;
72230+ ctl_table_no_const *ctl_table;
72231 #endif
72232 };
72233
72234diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
72235index 2ae2b83..dbdc85e 100644
72236--- a/include/net/netns/ipv4.h
72237+++ b/include/net/netns/ipv4.h
72238@@ -64,7 +64,7 @@ struct netns_ipv4 {
72239 kgid_t sysctl_ping_group_range[2];
72240 long sysctl_tcp_mem[3];
72241
72242- atomic_t dev_addr_genid;
72243+ atomic_unchecked_t dev_addr_genid;
72244
72245 #ifdef CONFIG_IP_MROUTE
72246 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
72247diff --git a/include/net/protocol.h b/include/net/protocol.h
72248index 047c047..b9dad15 100644
72249--- a/include/net/protocol.h
72250+++ b/include/net/protocol.h
72251@@ -44,7 +44,7 @@ struct net_protocol {
72252 void (*err_handler)(struct sk_buff *skb, u32 info);
72253 unsigned int no_policy:1,
72254 netns_ok:1;
72255-};
72256+} __do_const;
72257
72258 #if IS_ENABLED(CONFIG_IPV6)
72259 struct inet6_protocol {
72260@@ -57,7 +57,7 @@ struct inet6_protocol {
72261 u8 type, u8 code, int offset,
72262 __be32 info);
72263 unsigned int flags; /* INET6_PROTO_xxx */
72264-};
72265+} __do_const;
72266
72267 #define INET6_PROTO_NOPOLICY 0x1
72268 #define INET6_PROTO_FINAL 0x2
72269diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
72270index 5a15fab..d799ea7 100644
72271--- a/include/net/rtnetlink.h
72272+++ b/include/net/rtnetlink.h
72273@@ -81,7 +81,7 @@ struct rtnl_link_ops {
72274 const struct net_device *dev);
72275 unsigned int (*get_num_tx_queues)(void);
72276 unsigned int (*get_num_rx_queues)(void);
72277-};
72278+} __do_const;
72279
72280 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
72281 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
72282diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
72283index 7fdf298..197e9f7 100644
72284--- a/include/net/sctp/sctp.h
72285+++ b/include/net/sctp/sctp.h
72286@@ -330,9 +330,9 @@ do { \
72287
72288 #else /* SCTP_DEBUG */
72289
72290-#define SCTP_DEBUG_PRINTK(whatever...)
72291-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
72292-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
72293+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
72294+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
72295+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
72296 #define SCTP_ENABLE_DEBUG
72297 #define SCTP_DISABLE_DEBUG
72298 #define SCTP_ASSERT(expr, str, func)
72299diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
72300index 2a82d13..62a31c2 100644
72301--- a/include/net/sctp/sm.h
72302+++ b/include/net/sctp/sm.h
72303@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
72304 typedef struct {
72305 sctp_state_fn_t *fn;
72306 const char *name;
72307-} sctp_sm_table_entry_t;
72308+} __do_const sctp_sm_table_entry_t;
72309
72310 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
72311 * currently in use.
72312@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
72313 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
72314
72315 /* Extern declarations for major data structures. */
72316-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
72317+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
72318
72319
72320 /* Get the size of a DATA chunk payload. */
72321diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
72322index fdeb85a..1329d95 100644
72323--- a/include/net/sctp/structs.h
72324+++ b/include/net/sctp/structs.h
72325@@ -517,7 +517,7 @@ struct sctp_pf {
72326 struct sctp_association *asoc);
72327 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
72328 struct sctp_af *af;
72329-};
72330+} __do_const;
72331
72332
72333 /* Structure to track chunk fragments that have been acked, but peer
72334diff --git a/include/net/sock.h b/include/net/sock.h
72335index 25afaa0..8bb0070 100644
72336--- a/include/net/sock.h
72337+++ b/include/net/sock.h
72338@@ -322,7 +322,7 @@ struct sock {
72339 #ifdef CONFIG_RPS
72340 __u32 sk_rxhash;
72341 #endif
72342- atomic_t sk_drops;
72343+ atomic_unchecked_t sk_drops;
72344 int sk_rcvbuf;
72345
72346 struct sk_filter __rcu *sk_filter;
72347@@ -1781,7 +1781,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
72348 }
72349
72350 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
72351- char __user *from, char *to,
72352+ char __user *from, unsigned char *to,
72353 int copy, int offset)
72354 {
72355 if (skb->ip_summed == CHECKSUM_NONE) {
72356@@ -2040,7 +2040,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
72357 }
72358 }
72359
72360-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
72361+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
72362
72363 /**
72364 * sk_page_frag - return an appropriate page_frag
72365diff --git a/include/net/tcp.h b/include/net/tcp.h
72366index aed42c7..43890c6 100644
72367--- a/include/net/tcp.h
72368+++ b/include/net/tcp.h
72369@@ -530,7 +530,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
72370 extern void tcp_xmit_retransmit_queue(struct sock *);
72371 extern void tcp_simple_retransmit(struct sock *);
72372 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
72373-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
72374+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
72375
72376 extern void tcp_send_probe0(struct sock *);
72377 extern void tcp_send_partial(struct sock *);
72378@@ -701,8 +701,8 @@ struct tcp_skb_cb {
72379 struct inet6_skb_parm h6;
72380 #endif
72381 } header; /* For incoming frames */
72382- __u32 seq; /* Starting sequence number */
72383- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
72384+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
72385+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
72386 __u32 when; /* used to compute rtt's */
72387 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
72388
72389@@ -716,7 +716,7 @@ struct tcp_skb_cb {
72390
72391 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
72392 /* 1 byte hole */
72393- __u32 ack_seq; /* Sequence number ACK'd */
72394+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
72395 };
72396
72397 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
72398diff --git a/include/net/xfrm.h b/include/net/xfrm.h
72399index 63445ed..d6fc34f 100644
72400--- a/include/net/xfrm.h
72401+++ b/include/net/xfrm.h
72402@@ -304,7 +304,7 @@ struct xfrm_policy_afinfo {
72403 struct net_device *dev,
72404 const struct flowi *fl);
72405 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
72406-};
72407+} __do_const;
72408
72409 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
72410 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
72411@@ -340,7 +340,7 @@ struct xfrm_state_afinfo {
72412 struct sk_buff *skb);
72413 int (*transport_finish)(struct sk_buff *skb,
72414 int async);
72415-};
72416+} __do_const;
72417
72418 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
72419 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
72420@@ -423,7 +423,7 @@ struct xfrm_mode {
72421 struct module *owner;
72422 unsigned int encap;
72423 int flags;
72424-};
72425+} __do_const;
72426
72427 /* Flags for xfrm_mode. */
72428 enum {
72429@@ -514,7 +514,7 @@ struct xfrm_policy {
72430 struct timer_list timer;
72431
72432 struct flow_cache_object flo;
72433- atomic_t genid;
72434+ atomic_unchecked_t genid;
72435 u32 priority;
72436 u32 index;
72437 struct xfrm_mark mark;
72438diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
72439index 1a046b1..ee0bef0 100644
72440--- a/include/rdma/iw_cm.h
72441+++ b/include/rdma/iw_cm.h
72442@@ -122,7 +122,7 @@ struct iw_cm_verbs {
72443 int backlog);
72444
72445 int (*destroy_listen)(struct iw_cm_id *cm_id);
72446-};
72447+} __no_const;
72448
72449 /**
72450 * iw_create_cm_id - Create an IW CM identifier.
72451diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
72452index 399162b..b337f1a 100644
72453--- a/include/scsi/libfc.h
72454+++ b/include/scsi/libfc.h
72455@@ -762,6 +762,7 @@ struct libfc_function_template {
72456 */
72457 void (*disc_stop_final) (struct fc_lport *);
72458 };
72459+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
72460
72461 /**
72462 * struct fc_disc - Discovery context
72463@@ -866,7 +867,7 @@ struct fc_lport {
72464 struct fc_vport *vport;
72465
72466 /* Operational Information */
72467- struct libfc_function_template tt;
72468+ libfc_function_template_no_const tt;
72469 u8 link_up;
72470 u8 qfull;
72471 enum fc_lport_state state;
72472diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
72473index e65c62e..aa2e5a2 100644
72474--- a/include/scsi/scsi_device.h
72475+++ b/include/scsi/scsi_device.h
72476@@ -170,9 +170,9 @@ struct scsi_device {
72477 unsigned int max_device_blocked; /* what device_blocked counts down from */
72478 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
72479
72480- atomic_t iorequest_cnt;
72481- atomic_t iodone_cnt;
72482- atomic_t ioerr_cnt;
72483+ atomic_unchecked_t iorequest_cnt;
72484+ atomic_unchecked_t iodone_cnt;
72485+ atomic_unchecked_t ioerr_cnt;
72486
72487 struct device sdev_gendev,
72488 sdev_dev;
72489diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
72490index b797e8f..8e2c3aa 100644
72491--- a/include/scsi/scsi_transport_fc.h
72492+++ b/include/scsi/scsi_transport_fc.h
72493@@ -751,7 +751,8 @@ struct fc_function_template {
72494 unsigned long show_host_system_hostname:1;
72495
72496 unsigned long disable_target_scan:1;
72497-};
72498+} __do_const;
72499+typedef struct fc_function_template __no_const fc_function_template_no_const;
72500
72501
72502 /**
72503diff --git a/include/sound/soc.h b/include/sound/soc.h
72504index bc56738..a4be132 100644
72505--- a/include/sound/soc.h
72506+++ b/include/sound/soc.h
72507@@ -771,7 +771,7 @@ struct snd_soc_codec_driver {
72508 /* probe ordering - for components with runtime dependencies */
72509 int probe_order;
72510 int remove_order;
72511-};
72512+} __do_const;
72513
72514 /* SoC platform interface */
72515 struct snd_soc_platform_driver {
72516@@ -817,7 +817,7 @@ struct snd_soc_platform_driver {
72517 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
72518 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
72519 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
72520-};
72521+} __do_const;
72522
72523 struct snd_soc_platform {
72524 const char *name;
72525diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
72526index 663e34a..91b306a 100644
72527--- a/include/target/target_core_base.h
72528+++ b/include/target/target_core_base.h
72529@@ -654,7 +654,7 @@ struct se_device {
72530 spinlock_t stats_lock;
72531 /* Active commands on this virtual SE device */
72532 atomic_t simple_cmds;
72533- atomic_t dev_ordered_id;
72534+ atomic_unchecked_t dev_ordered_id;
72535 atomic_t dev_ordered_sync;
72536 atomic_t dev_qf_count;
72537 int export_count;
72538diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
72539new file mode 100644
72540index 0000000..fb634b7
72541--- /dev/null
72542+++ b/include/trace/events/fs.h
72543@@ -0,0 +1,53 @@
72544+#undef TRACE_SYSTEM
72545+#define TRACE_SYSTEM fs
72546+
72547+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
72548+#define _TRACE_FS_H
72549+
72550+#include <linux/fs.h>
72551+#include <linux/tracepoint.h>
72552+
72553+TRACE_EVENT(do_sys_open,
72554+
72555+ TP_PROTO(const char *filename, int flags, int mode),
72556+
72557+ TP_ARGS(filename, flags, mode),
72558+
72559+ TP_STRUCT__entry(
72560+ __string( filename, filename )
72561+ __field( int, flags )
72562+ __field( int, mode )
72563+ ),
72564+
72565+ TP_fast_assign(
72566+ __assign_str(filename, filename);
72567+ __entry->flags = flags;
72568+ __entry->mode = mode;
72569+ ),
72570+
72571+ TP_printk("\"%s\" %x %o",
72572+ __get_str(filename), __entry->flags, __entry->mode)
72573+);
72574+
72575+TRACE_EVENT(open_exec,
72576+
72577+ TP_PROTO(const char *filename),
72578+
72579+ TP_ARGS(filename),
72580+
72581+ TP_STRUCT__entry(
72582+ __string( filename, filename )
72583+ ),
72584+
72585+ TP_fast_assign(
72586+ __assign_str(filename, filename);
72587+ ),
72588+
72589+ TP_printk("\"%s\"",
72590+ __get_str(filename))
72591+);
72592+
72593+#endif /* _TRACE_FS_H */
72594+
72595+/* This part must be outside protection */
72596+#include <trace/define_trace.h>
72597diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
72598index 1c09820..7f5ec79 100644
72599--- a/include/trace/events/irq.h
72600+++ b/include/trace/events/irq.h
72601@@ -36,7 +36,7 @@ struct softirq_action;
72602 */
72603 TRACE_EVENT(irq_handler_entry,
72604
72605- TP_PROTO(int irq, struct irqaction *action),
72606+ TP_PROTO(int irq, const struct irqaction *action),
72607
72608 TP_ARGS(irq, action),
72609
72610@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
72611 */
72612 TRACE_EVENT(irq_handler_exit,
72613
72614- TP_PROTO(int irq, struct irqaction *action, int ret),
72615+ TP_PROTO(int irq, const struct irqaction *action, int ret),
72616
72617 TP_ARGS(irq, action, ret),
72618
72619diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
72620index 7caf44c..23c6f27 100644
72621--- a/include/uapi/linux/a.out.h
72622+++ b/include/uapi/linux/a.out.h
72623@@ -39,6 +39,14 @@ enum machine_type {
72624 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
72625 };
72626
72627+/* Constants for the N_FLAGS field */
72628+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
72629+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
72630+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
72631+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
72632+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
72633+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
72634+
72635 #if !defined (N_MAGIC)
72636 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
72637 #endif
72638diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
72639index d876736..ccce5c0 100644
72640--- a/include/uapi/linux/byteorder/little_endian.h
72641+++ b/include/uapi/linux/byteorder/little_endian.h
72642@@ -42,51 +42,51 @@
72643
72644 static inline __le64 __cpu_to_le64p(const __u64 *p)
72645 {
72646- return (__force __le64)*p;
72647+ return (__force const __le64)*p;
72648 }
72649-static inline __u64 __le64_to_cpup(const __le64 *p)
72650+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
72651 {
72652- return (__force __u64)*p;
72653+ return (__force const __u64)*p;
72654 }
72655 static inline __le32 __cpu_to_le32p(const __u32 *p)
72656 {
72657- return (__force __le32)*p;
72658+ return (__force const __le32)*p;
72659 }
72660 static inline __u32 __le32_to_cpup(const __le32 *p)
72661 {
72662- return (__force __u32)*p;
72663+ return (__force const __u32)*p;
72664 }
72665 static inline __le16 __cpu_to_le16p(const __u16 *p)
72666 {
72667- return (__force __le16)*p;
72668+ return (__force const __le16)*p;
72669 }
72670 static inline __u16 __le16_to_cpup(const __le16 *p)
72671 {
72672- return (__force __u16)*p;
72673+ return (__force const __u16)*p;
72674 }
72675 static inline __be64 __cpu_to_be64p(const __u64 *p)
72676 {
72677- return (__force __be64)__swab64p(p);
72678+ return (__force const __be64)__swab64p(p);
72679 }
72680 static inline __u64 __be64_to_cpup(const __be64 *p)
72681 {
72682- return __swab64p((__u64 *)p);
72683+ return __swab64p((const __u64 *)p);
72684 }
72685 static inline __be32 __cpu_to_be32p(const __u32 *p)
72686 {
72687- return (__force __be32)__swab32p(p);
72688+ return (__force const __be32)__swab32p(p);
72689 }
72690-static inline __u32 __be32_to_cpup(const __be32 *p)
72691+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
72692 {
72693- return __swab32p((__u32 *)p);
72694+ return __swab32p((const __u32 *)p);
72695 }
72696 static inline __be16 __cpu_to_be16p(const __u16 *p)
72697 {
72698- return (__force __be16)__swab16p(p);
72699+ return (__force const __be16)__swab16p(p);
72700 }
72701 static inline __u16 __be16_to_cpup(const __be16 *p)
72702 {
72703- return __swab16p((__u16 *)p);
72704+ return __swab16p((const __u16 *)p);
72705 }
72706 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
72707 #define __le64_to_cpus(x) do { (void)(x); } while (0)
72708diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
72709index 126a817..d522bd1 100644
72710--- a/include/uapi/linux/elf.h
72711+++ b/include/uapi/linux/elf.h
72712@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
72713 #define PT_GNU_EH_FRAME 0x6474e550
72714
72715 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
72716+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
72717+
72718+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
72719+
72720+/* Constants for the e_flags field */
72721+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
72722+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
72723+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
72724+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
72725+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
72726+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
72727
72728 /*
72729 * Extended Numbering
72730@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
72731 #define DT_DEBUG 21
72732 #define DT_TEXTREL 22
72733 #define DT_JMPREL 23
72734+#define DT_FLAGS 30
72735+ #define DF_TEXTREL 0x00000004
72736 #define DT_ENCODING 32
72737 #define OLD_DT_LOOS 0x60000000
72738 #define DT_LOOS 0x6000000d
72739@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
72740 #define PF_W 0x2
72741 #define PF_X 0x1
72742
72743+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
72744+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
72745+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
72746+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
72747+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
72748+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
72749+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
72750+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
72751+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
72752+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
72753+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
72754+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
72755+
72756 typedef struct elf32_phdr{
72757 Elf32_Word p_type;
72758 Elf32_Off p_offset;
72759@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
72760 #define EI_OSABI 7
72761 #define EI_PAD 8
72762
72763+#define EI_PAX 14
72764+
72765 #define ELFMAG0 0x7f /* EI_MAG */
72766 #define ELFMAG1 'E'
72767 #define ELFMAG2 'L'
72768diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
72769index aa169c4..6a2771d 100644
72770--- a/include/uapi/linux/personality.h
72771+++ b/include/uapi/linux/personality.h
72772@@ -30,6 +30,7 @@ enum {
72773 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
72774 ADDR_NO_RANDOMIZE | \
72775 ADDR_COMPAT_LAYOUT | \
72776+ ADDR_LIMIT_3GB | \
72777 MMAP_PAGE_ZERO)
72778
72779 /*
72780diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
72781index 7530e74..e714828 100644
72782--- a/include/uapi/linux/screen_info.h
72783+++ b/include/uapi/linux/screen_info.h
72784@@ -43,7 +43,8 @@ struct screen_info {
72785 __u16 pages; /* 0x32 */
72786 __u16 vesa_attributes; /* 0x34 */
72787 __u32 capabilities; /* 0x36 */
72788- __u8 _reserved[6]; /* 0x3a */
72789+ __u16 vesapm_size; /* 0x3a */
72790+ __u8 _reserved[4]; /* 0x3c */
72791 } __attribute__((packed));
72792
72793 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
72794diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
72795index 0e011eb..82681b1 100644
72796--- a/include/uapi/linux/swab.h
72797+++ b/include/uapi/linux/swab.h
72798@@ -43,7 +43,7 @@
72799 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
72800 */
72801
72802-static inline __attribute_const__ __u16 __fswab16(__u16 val)
72803+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
72804 {
72805 #ifdef __HAVE_BUILTIN_BSWAP16__
72806 return __builtin_bswap16(val);
72807@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
72808 #endif
72809 }
72810
72811-static inline __attribute_const__ __u32 __fswab32(__u32 val)
72812+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
72813 {
72814 #ifdef __HAVE_BUILTIN_BSWAP32__
72815 return __builtin_bswap32(val);
72816@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
72817 #endif
72818 }
72819
72820-static inline __attribute_const__ __u64 __fswab64(__u64 val)
72821+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
72822 {
72823 #ifdef __HAVE_BUILTIN_BSWAP64__
72824 return __builtin_bswap64(val);
72825diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
72826index 6d67213..8dab561 100644
72827--- a/include/uapi/linux/sysctl.h
72828+++ b/include/uapi/linux/sysctl.h
72829@@ -155,7 +155,11 @@ enum
72830 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
72831 };
72832
72833-
72834+#ifdef CONFIG_PAX_SOFTMODE
72835+enum {
72836+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
72837+};
72838+#endif
72839
72840 /* CTL_VM names: */
72841 enum
72842diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
72843index 26607bd..588b65f 100644
72844--- a/include/uapi/linux/xattr.h
72845+++ b/include/uapi/linux/xattr.h
72846@@ -60,5 +60,9 @@
72847 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
72848 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
72849
72850+/* User namespace */
72851+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
72852+#define XATTR_PAX_FLAGS_SUFFIX "flags"
72853+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
72854
72855 #endif /* _UAPI_LINUX_XATTR_H */
72856diff --git a/include/video/udlfb.h b/include/video/udlfb.h
72857index f9466fa..f4e2b81 100644
72858--- a/include/video/udlfb.h
72859+++ b/include/video/udlfb.h
72860@@ -53,10 +53,10 @@ struct dlfb_data {
72861 u32 pseudo_palette[256];
72862 int blank_mode; /*one of FB_BLANK_ */
72863 /* blit-only rendering path metrics, exposed through sysfs */
72864- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
72865- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
72866- atomic_t bytes_sent; /* to usb, after compression including overhead */
72867- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
72868+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
72869+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
72870+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
72871+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
72872 };
72873
72874 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
72875diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
72876index 0993a22..32ba2fe 100644
72877--- a/include/video/uvesafb.h
72878+++ b/include/video/uvesafb.h
72879@@ -177,6 +177,7 @@ struct uvesafb_par {
72880 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
72881 u8 pmi_setpal; /* PMI for palette changes */
72882 u16 *pmi_base; /* protected mode interface location */
72883+ u8 *pmi_code; /* protected mode code location */
72884 void *pmi_start;
72885 void *pmi_pal;
72886 u8 *vbe_state_orig; /*
72887diff --git a/init/Kconfig b/init/Kconfig
72888index be8b7f5..1eeca9b 100644
72889--- a/init/Kconfig
72890+++ b/init/Kconfig
72891@@ -990,6 +990,7 @@ endif # CGROUPS
72892
72893 config CHECKPOINT_RESTORE
72894 bool "Checkpoint/restore support" if EXPERT
72895+ depends on !GRKERNSEC
72896 default n
72897 help
72898 Enables additional kernel features in a sake of checkpoint/restore.
72899@@ -1468,7 +1469,7 @@ config SLUB_DEBUG
72900
72901 config COMPAT_BRK
72902 bool "Disable heap randomization"
72903- default y
72904+ default n
72905 help
72906 Randomizing heap placement makes heap exploits harder, but it
72907 also breaks ancient binaries (including anything libc5 based).
72908@@ -1711,7 +1712,7 @@ config INIT_ALL_POSSIBLE
72909 config STOP_MACHINE
72910 bool
72911 default y
72912- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
72913+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
72914 help
72915 Need stop_machine() primitive.
72916
72917diff --git a/init/Makefile b/init/Makefile
72918index 7bc47ee..6da2dc7 100644
72919--- a/init/Makefile
72920+++ b/init/Makefile
72921@@ -2,6 +2,9 @@
72922 # Makefile for the linux kernel.
72923 #
72924
72925+ccflags-y := $(GCC_PLUGINS_CFLAGS)
72926+asflags-y := $(GCC_PLUGINS_AFLAGS)
72927+
72928 obj-y := main.o version.o mounts.o
72929 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
72930 obj-y += noinitramfs.o
72931diff --git a/init/do_mounts.c b/init/do_mounts.c
72932index 1d1b634..a1c810f 100644
72933--- a/init/do_mounts.c
72934+++ b/init/do_mounts.c
72935@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
72936 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
72937 {
72938 struct super_block *s;
72939- int err = sys_mount(name, "/root", fs, flags, data);
72940+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
72941 if (err)
72942 return err;
72943
72944- sys_chdir("/root");
72945+ sys_chdir((const char __force_user *)"/root");
72946 s = current->fs->pwd.dentry->d_sb;
72947 ROOT_DEV = s->s_dev;
72948 printk(KERN_INFO
72949@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
72950 va_start(args, fmt);
72951 vsprintf(buf, fmt, args);
72952 va_end(args);
72953- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
72954+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
72955 if (fd >= 0) {
72956 sys_ioctl(fd, FDEJECT, 0);
72957 sys_close(fd);
72958 }
72959 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
72960- fd = sys_open("/dev/console", O_RDWR, 0);
72961+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
72962 if (fd >= 0) {
72963 sys_ioctl(fd, TCGETS, (long)&termios);
72964 termios.c_lflag &= ~ICANON;
72965 sys_ioctl(fd, TCSETSF, (long)&termios);
72966- sys_read(fd, &c, 1);
72967+ sys_read(fd, (char __user *)&c, 1);
72968 termios.c_lflag |= ICANON;
72969 sys_ioctl(fd, TCSETSF, (long)&termios);
72970 sys_close(fd);
72971@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
72972 mount_root();
72973 out:
72974 devtmpfs_mount("dev");
72975- sys_mount(".", "/", NULL, MS_MOVE, NULL);
72976- sys_chroot(".");
72977+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
72978+ sys_chroot((const char __force_user *)".");
72979 }
72980diff --git a/init/do_mounts.h b/init/do_mounts.h
72981index f5b978a..69dbfe8 100644
72982--- a/init/do_mounts.h
72983+++ b/init/do_mounts.h
72984@@ -15,15 +15,15 @@ extern int root_mountflags;
72985
72986 static inline int create_dev(char *name, dev_t dev)
72987 {
72988- sys_unlink(name);
72989- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
72990+ sys_unlink((char __force_user *)name);
72991+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
72992 }
72993
72994 #if BITS_PER_LONG == 32
72995 static inline u32 bstat(char *name)
72996 {
72997 struct stat64 stat;
72998- if (sys_stat64(name, &stat) != 0)
72999+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
73000 return 0;
73001 if (!S_ISBLK(stat.st_mode))
73002 return 0;
73003@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
73004 static inline u32 bstat(char *name)
73005 {
73006 struct stat stat;
73007- if (sys_newstat(name, &stat) != 0)
73008+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
73009 return 0;
73010 if (!S_ISBLK(stat.st_mode))
73011 return 0;
73012diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
73013index f9acf71..1e19144 100644
73014--- a/init/do_mounts_initrd.c
73015+++ b/init/do_mounts_initrd.c
73016@@ -58,8 +58,8 @@ static void __init handle_initrd(void)
73017 create_dev("/dev/root.old", Root_RAM0);
73018 /* mount initrd on rootfs' /root */
73019 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
73020- sys_mkdir("/old", 0700);
73021- sys_chdir("/old");
73022+ sys_mkdir((const char __force_user *)"/old", 0700);
73023+ sys_chdir((const char __force_user *)"/old");
73024
73025 /*
73026 * In case that a resume from disk is carried out by linuxrc or one of
73027@@ -73,31 +73,31 @@ static void __init handle_initrd(void)
73028 current->flags &= ~PF_FREEZER_SKIP;
73029
73030 /* move initrd to rootfs' /old */
73031- sys_mount("..", ".", NULL, MS_MOVE, NULL);
73032+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
73033 /* switch root and cwd back to / of rootfs */
73034- sys_chroot("..");
73035+ sys_chroot((const char __force_user *)"..");
73036
73037 if (new_decode_dev(real_root_dev) == Root_RAM0) {
73038- sys_chdir("/old");
73039+ sys_chdir((const char __force_user *)"/old");
73040 return;
73041 }
73042
73043- sys_chdir("/");
73044+ sys_chdir((const char __force_user *)"/");
73045 ROOT_DEV = new_decode_dev(real_root_dev);
73046 mount_root();
73047
73048 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
73049- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
73050+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
73051 if (!error)
73052 printk("okay\n");
73053 else {
73054- int fd = sys_open("/dev/root.old", O_RDWR, 0);
73055+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
73056 if (error == -ENOENT)
73057 printk("/initrd does not exist. Ignored.\n");
73058 else
73059 printk("failed\n");
73060 printk(KERN_NOTICE "Unmounting old root\n");
73061- sys_umount("/old", MNT_DETACH);
73062+ sys_umount((char __force_user *)"/old", MNT_DETACH);
73063 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
73064 if (fd < 0) {
73065 error = fd;
73066@@ -120,11 +120,11 @@ int __init initrd_load(void)
73067 * mounted in the normal path.
73068 */
73069 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
73070- sys_unlink("/initrd.image");
73071+ sys_unlink((const char __force_user *)"/initrd.image");
73072 handle_initrd();
73073 return 1;
73074 }
73075 }
73076- sys_unlink("/initrd.image");
73077+ sys_unlink((const char __force_user *)"/initrd.image");
73078 return 0;
73079 }
73080diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
73081index 8cb6db5..d729f50 100644
73082--- a/init/do_mounts_md.c
73083+++ b/init/do_mounts_md.c
73084@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
73085 partitioned ? "_d" : "", minor,
73086 md_setup_args[ent].device_names);
73087
73088- fd = sys_open(name, 0, 0);
73089+ fd = sys_open((char __force_user *)name, 0, 0);
73090 if (fd < 0) {
73091 printk(KERN_ERR "md: open failed - cannot start "
73092 "array %s\n", name);
73093@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
73094 * array without it
73095 */
73096 sys_close(fd);
73097- fd = sys_open(name, 0, 0);
73098+ fd = sys_open((char __force_user *)name, 0, 0);
73099 sys_ioctl(fd, BLKRRPART, 0);
73100 }
73101 sys_close(fd);
73102@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
73103
73104 wait_for_device_probe();
73105
73106- fd = sys_open("/dev/md0", 0, 0);
73107+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
73108 if (fd >= 0) {
73109 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
73110 sys_close(fd);
73111diff --git a/init/init_task.c b/init/init_task.c
73112index 8b2f399..f0797c9 100644
73113--- a/init/init_task.c
73114+++ b/init/init_task.c
73115@@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
73116 * Initial thread structure. Alignment of this is handled by a special
73117 * linker map entry.
73118 */
73119+#ifdef CONFIG_X86
73120+union thread_union init_thread_union __init_task_data;
73121+#else
73122 union thread_union init_thread_union __init_task_data =
73123 { INIT_THREAD_INFO(init_task) };
73124+#endif
73125diff --git a/init/initramfs.c b/init/initramfs.c
73126index 84c6bf1..8899338 100644
73127--- a/init/initramfs.c
73128+++ b/init/initramfs.c
73129@@ -84,7 +84,7 @@ static void __init free_hash(void)
73130 }
73131 }
73132
73133-static long __init do_utime(char *filename, time_t mtime)
73134+static long __init do_utime(char __force_user *filename, time_t mtime)
73135 {
73136 struct timespec t[2];
73137
73138@@ -119,7 +119,7 @@ static void __init dir_utime(void)
73139 struct dir_entry *de, *tmp;
73140 list_for_each_entry_safe(de, tmp, &dir_list, list) {
73141 list_del(&de->list);
73142- do_utime(de->name, de->mtime);
73143+ do_utime((char __force_user *)de->name, de->mtime);
73144 kfree(de->name);
73145 kfree(de);
73146 }
73147@@ -281,7 +281,7 @@ static int __init maybe_link(void)
73148 if (nlink >= 2) {
73149 char *old = find_link(major, minor, ino, mode, collected);
73150 if (old)
73151- return (sys_link(old, collected) < 0) ? -1 : 1;
73152+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
73153 }
73154 return 0;
73155 }
73156@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
73157 {
73158 struct stat st;
73159
73160- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
73161+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
73162 if (S_ISDIR(st.st_mode))
73163- sys_rmdir(path);
73164+ sys_rmdir((char __force_user *)path);
73165 else
73166- sys_unlink(path);
73167+ sys_unlink((char __force_user *)path);
73168 }
73169 }
73170
73171@@ -315,7 +315,7 @@ static int __init do_name(void)
73172 int openflags = O_WRONLY|O_CREAT;
73173 if (ml != 1)
73174 openflags |= O_TRUNC;
73175- wfd = sys_open(collected, openflags, mode);
73176+ wfd = sys_open((char __force_user *)collected, openflags, mode);
73177
73178 if (wfd >= 0) {
73179 sys_fchown(wfd, uid, gid);
73180@@ -327,17 +327,17 @@ static int __init do_name(void)
73181 }
73182 }
73183 } else if (S_ISDIR(mode)) {
73184- sys_mkdir(collected, mode);
73185- sys_chown(collected, uid, gid);
73186- sys_chmod(collected, mode);
73187+ sys_mkdir((char __force_user *)collected, mode);
73188+ sys_chown((char __force_user *)collected, uid, gid);
73189+ sys_chmod((char __force_user *)collected, mode);
73190 dir_add(collected, mtime);
73191 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
73192 S_ISFIFO(mode) || S_ISSOCK(mode)) {
73193 if (maybe_link() == 0) {
73194- sys_mknod(collected, mode, rdev);
73195- sys_chown(collected, uid, gid);
73196- sys_chmod(collected, mode);
73197- do_utime(collected, mtime);
73198+ sys_mknod((char __force_user *)collected, mode, rdev);
73199+ sys_chown((char __force_user *)collected, uid, gid);
73200+ sys_chmod((char __force_user *)collected, mode);
73201+ do_utime((char __force_user *)collected, mtime);
73202 }
73203 }
73204 return 0;
73205@@ -346,15 +346,15 @@ static int __init do_name(void)
73206 static int __init do_copy(void)
73207 {
73208 if (count >= body_len) {
73209- sys_write(wfd, victim, body_len);
73210+ sys_write(wfd, (char __force_user *)victim, body_len);
73211 sys_close(wfd);
73212- do_utime(vcollected, mtime);
73213+ do_utime((char __force_user *)vcollected, mtime);
73214 kfree(vcollected);
73215 eat(body_len);
73216 state = SkipIt;
73217 return 0;
73218 } else {
73219- sys_write(wfd, victim, count);
73220+ sys_write(wfd, (char __force_user *)victim, count);
73221 body_len -= count;
73222 eat(count);
73223 return 1;
73224@@ -365,9 +365,9 @@ static int __init do_symlink(void)
73225 {
73226 collected[N_ALIGN(name_len) + body_len] = '\0';
73227 clean_path(collected, 0);
73228- sys_symlink(collected + N_ALIGN(name_len), collected);
73229- sys_lchown(collected, uid, gid);
73230- do_utime(collected, mtime);
73231+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
73232+ sys_lchown((char __force_user *)collected, uid, gid);
73233+ do_utime((char __force_user *)collected, mtime);
73234 state = SkipIt;
73235 next_state = Reset;
73236 return 0;
73237diff --git a/init/main.c b/init/main.c
73238index cee4b5c..360e10a 100644
73239--- a/init/main.c
73240+++ b/init/main.c
73241@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
73242 extern void tc_init(void);
73243 #endif
73244
73245+extern void grsecurity_init(void);
73246+
73247 /*
73248 * Debug helper: via this flag we know that we are in 'early bootup code'
73249 * where only the boot processor is running with IRQ disabled. This means
73250@@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
73251
73252 __setup("reset_devices", set_reset_devices);
73253
73254+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73255+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
73256+static int __init setup_grsec_proc_gid(char *str)
73257+{
73258+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
73259+ return 1;
73260+}
73261+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
73262+#endif
73263+
73264+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
73265+extern char pax_enter_kernel_user[];
73266+extern char pax_exit_kernel_user[];
73267+extern pgdval_t clone_pgd_mask;
73268+#endif
73269+
73270+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
73271+static int __init setup_pax_nouderef(char *str)
73272+{
73273+#ifdef CONFIG_X86_32
73274+ unsigned int cpu;
73275+ struct desc_struct *gdt;
73276+
73277+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
73278+ gdt = get_cpu_gdt_table(cpu);
73279+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
73280+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
73281+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
73282+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
73283+ }
73284+ loadsegment(ds, __KERNEL_DS);
73285+ loadsegment(es, __KERNEL_DS);
73286+ loadsegment(ss, __KERNEL_DS);
73287+#else
73288+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
73289+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
73290+ clone_pgd_mask = ~(pgdval_t)0UL;
73291+#endif
73292+
73293+ return 0;
73294+}
73295+early_param("pax_nouderef", setup_pax_nouderef);
73296+#endif
73297+
73298+#ifdef CONFIG_PAX_SOFTMODE
73299+int pax_softmode;
73300+
73301+static int __init setup_pax_softmode(char *str)
73302+{
73303+ get_option(&str, &pax_softmode);
73304+ return 1;
73305+}
73306+__setup("pax_softmode=", setup_pax_softmode);
73307+#endif
73308+
73309 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
73310 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
73311 static const char *panic_later, *panic_param;
73312@@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
73313 {
73314 int count = preempt_count();
73315 int ret;
73316+ const char *msg1 = "", *msg2 = "";
73317
73318 if (initcall_debug)
73319 ret = do_one_initcall_debug(fn);
73320@@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
73321 sprintf(msgbuf, "error code %d ", ret);
73322
73323 if (preempt_count() != count) {
73324- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
73325+ msg1 = " preemption imbalance";
73326 preempt_count() = count;
73327 }
73328 if (irqs_disabled()) {
73329- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
73330+ msg2 = " disabled interrupts";
73331 local_irq_enable();
73332 }
73333- if (msgbuf[0]) {
73334- printk("initcall %pF returned with %s\n", fn, msgbuf);
73335+ if (msgbuf[0] || *msg1 || *msg2) {
73336+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
73337 }
73338
73339 return ret;
73340@@ -755,8 +813,14 @@ static void __init do_initcall_level(int level)
73341 level, level,
73342 &repair_env_string);
73343
73344- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
73345+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
73346 do_one_initcall(*fn);
73347+
73348+#ifdef LATENT_ENTROPY_PLUGIN
73349+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
73350+#endif
73351+
73352+ }
73353 }
73354
73355 static void __init do_initcalls(void)
73356@@ -790,8 +854,14 @@ static void __init do_pre_smp_initcalls(void)
73357 {
73358 initcall_t *fn;
73359
73360- for (fn = __initcall_start; fn < __initcall0_start; fn++)
73361+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
73362 do_one_initcall(*fn);
73363+
73364+#ifdef LATENT_ENTROPY_PLUGIN
73365+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
73366+#endif
73367+
73368+ }
73369 }
73370
73371 static int run_init_process(const char *init_filename)
73372@@ -877,7 +947,7 @@ static noinline void __init kernel_init_freeable(void)
73373 do_basic_setup();
73374
73375 /* Open the /dev/console on the rootfs, this should never fail */
73376- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
73377+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
73378 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
73379
73380 (void) sys_dup(0);
73381@@ -890,11 +960,13 @@ static noinline void __init kernel_init_freeable(void)
73382 if (!ramdisk_execute_command)
73383 ramdisk_execute_command = "/init";
73384
73385- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
73386+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
73387 ramdisk_execute_command = NULL;
73388 prepare_namespace();
73389 }
73390
73391+ grsecurity_init();
73392+
73393 /*
73394 * Ok, we have completed the initial bootup, and
73395 * we're essentially up and running. Get rid of the
73396diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
73397index 130dfec..cc88451 100644
73398--- a/ipc/ipc_sysctl.c
73399+++ b/ipc/ipc_sysctl.c
73400@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
73401 static int proc_ipc_dointvec(ctl_table *table, int write,
73402 void __user *buffer, size_t *lenp, loff_t *ppos)
73403 {
73404- struct ctl_table ipc_table;
73405+ ctl_table_no_const ipc_table;
73406
73407 memcpy(&ipc_table, table, sizeof(ipc_table));
73408 ipc_table.data = get_ipc(table);
73409@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
73410 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
73411 void __user *buffer, size_t *lenp, loff_t *ppos)
73412 {
73413- struct ctl_table ipc_table;
73414+ ctl_table_no_const ipc_table;
73415
73416 memcpy(&ipc_table, table, sizeof(ipc_table));
73417 ipc_table.data = get_ipc(table);
73418@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
73419 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
73420 void __user *buffer, size_t *lenp, loff_t *ppos)
73421 {
73422- struct ctl_table ipc_table;
73423+ ctl_table_no_const ipc_table;
73424 size_t lenp_bef = *lenp;
73425 int rc;
73426
73427@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
73428 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
73429 void __user *buffer, size_t *lenp, loff_t *ppos)
73430 {
73431- struct ctl_table ipc_table;
73432+ ctl_table_no_const ipc_table;
73433 memcpy(&ipc_table, table, sizeof(ipc_table));
73434 ipc_table.data = get_ipc(table);
73435
73436@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
73437 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
73438 void __user *buffer, size_t *lenp, loff_t *ppos)
73439 {
73440- struct ctl_table ipc_table;
73441+ ctl_table_no_const ipc_table;
73442 size_t lenp_bef = *lenp;
73443 int oldval;
73444 int rc;
73445diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
73446index 383d638..943fdbb 100644
73447--- a/ipc/mq_sysctl.c
73448+++ b/ipc/mq_sysctl.c
73449@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
73450 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
73451 void __user *buffer, size_t *lenp, loff_t *ppos)
73452 {
73453- struct ctl_table mq_table;
73454+ ctl_table_no_const mq_table;
73455 memcpy(&mq_table, table, sizeof(mq_table));
73456 mq_table.data = get_mq(table);
73457
73458diff --git a/ipc/mqueue.c b/ipc/mqueue.c
73459index 71a3ca1..c750fff 100644
73460--- a/ipc/mqueue.c
73461+++ b/ipc/mqueue.c
73462@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
73463 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
73464 info->attr.mq_msgsize);
73465
73466+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
73467 spin_lock(&mq_lock);
73468 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
73469 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
73470@@ -840,7 +841,8 @@ out_putfd:
73471 fd = error;
73472 }
73473 mutex_unlock(&root->d_inode->i_mutex);
73474- mnt_drop_write(mnt);
73475+ if (!ro)
73476+ mnt_drop_write(mnt);
73477 out_putname:
73478 putname(name);
73479 return fd;
73480diff --git a/ipc/msg.c b/ipc/msg.c
73481index 31cd1bf..362ea07 100644
73482--- a/ipc/msg.c
73483+++ b/ipc/msg.c
73484@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
73485 return security_msg_queue_associate(msq, msgflg);
73486 }
73487
73488+static struct ipc_ops msg_ops = {
73489+ .getnew = newque,
73490+ .associate = msg_security,
73491+ .more_checks = NULL
73492+};
73493+
73494 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
73495 {
73496 struct ipc_namespace *ns;
73497- struct ipc_ops msg_ops;
73498 struct ipc_params msg_params;
73499
73500 ns = current->nsproxy->ipc_ns;
73501
73502- msg_ops.getnew = newque;
73503- msg_ops.associate = msg_security;
73504- msg_ops.more_checks = NULL;
73505-
73506 msg_params.key = key;
73507 msg_params.flg = msgflg;
73508
73509diff --git a/ipc/sem.c b/ipc/sem.c
73510index 58d31f1..cce7a55 100644
73511--- a/ipc/sem.c
73512+++ b/ipc/sem.c
73513@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
73514 return 0;
73515 }
73516
73517+static struct ipc_ops sem_ops = {
73518+ .getnew = newary,
73519+ .associate = sem_security,
73520+ .more_checks = sem_more_checks
73521+};
73522+
73523 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
73524 {
73525 struct ipc_namespace *ns;
73526- struct ipc_ops sem_ops;
73527 struct ipc_params sem_params;
73528
73529 ns = current->nsproxy->ipc_ns;
73530@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
73531 if (nsems < 0 || nsems > ns->sc_semmsl)
73532 return -EINVAL;
73533
73534- sem_ops.getnew = newary;
73535- sem_ops.associate = sem_security;
73536- sem_ops.more_checks = sem_more_checks;
73537-
73538 sem_params.key = key;
73539 sem_params.flg = semflg;
73540 sem_params.u.nsems = nsems;
73541diff --git a/ipc/shm.c b/ipc/shm.c
73542index 4fa6d8f..55cff14 100644
73543--- a/ipc/shm.c
73544+++ b/ipc/shm.c
73545@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
73546 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73547 #endif
73548
73549+#ifdef CONFIG_GRKERNSEC
73550+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73551+ const time_t shm_createtime, const kuid_t cuid,
73552+ const int shmid);
73553+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73554+ const time_t shm_createtime);
73555+#endif
73556+
73557 void shm_init_ns(struct ipc_namespace *ns)
73558 {
73559 ns->shm_ctlmax = SHMMAX;
73560@@ -521,6 +529,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
73561 shp->shm_lprid = 0;
73562 shp->shm_atim = shp->shm_dtim = 0;
73563 shp->shm_ctim = get_seconds();
73564+#ifdef CONFIG_GRKERNSEC
73565+ {
73566+ struct timespec timeval;
73567+ do_posix_clock_monotonic_gettime(&timeval);
73568+
73569+ shp->shm_createtime = timeval.tv_sec;
73570+ }
73571+#endif
73572 shp->shm_segsz = size;
73573 shp->shm_nattch = 0;
73574 shp->shm_file = file;
73575@@ -572,18 +588,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
73576 return 0;
73577 }
73578
73579+static struct ipc_ops shm_ops = {
73580+ .getnew = newseg,
73581+ .associate = shm_security,
73582+ .more_checks = shm_more_checks
73583+};
73584+
73585 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
73586 {
73587 struct ipc_namespace *ns;
73588- struct ipc_ops shm_ops;
73589 struct ipc_params shm_params;
73590
73591 ns = current->nsproxy->ipc_ns;
73592
73593- shm_ops.getnew = newseg;
73594- shm_ops.associate = shm_security;
73595- shm_ops.more_checks = shm_more_checks;
73596-
73597 shm_params.key = key;
73598 shm_params.flg = shmflg;
73599 shm_params.u.size = size;
73600@@ -1004,6 +1021,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
73601 f_mode = FMODE_READ | FMODE_WRITE;
73602 }
73603 if (shmflg & SHM_EXEC) {
73604+
73605+#ifdef CONFIG_PAX_MPROTECT
73606+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
73607+ goto out;
73608+#endif
73609+
73610 prot |= PROT_EXEC;
73611 acc_mode |= S_IXUGO;
73612 }
73613@@ -1027,9 +1050,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
73614 if (err)
73615 goto out_unlock;
73616
73617+#ifdef CONFIG_GRKERNSEC
73618+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
73619+ shp->shm_perm.cuid, shmid) ||
73620+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
73621+ err = -EACCES;
73622+ goto out_unlock;
73623+ }
73624+#endif
73625+
73626 path = shp->shm_file->f_path;
73627 path_get(&path);
73628 shp->shm_nattch++;
73629+#ifdef CONFIG_GRKERNSEC
73630+ shp->shm_lapid = current->pid;
73631+#endif
73632 size = i_size_read(path.dentry->d_inode);
73633 shm_unlock(shp);
73634
73635diff --git a/kernel/acct.c b/kernel/acct.c
73636index 051e071..15e0920 100644
73637--- a/kernel/acct.c
73638+++ b/kernel/acct.c
73639@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
73640 */
73641 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
73642 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
73643- file->f_op->write(file, (char *)&ac,
73644+ file->f_op->write(file, (char __force_user *)&ac,
73645 sizeof(acct_t), &file->f_pos);
73646 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
73647 set_fs(fs);
73648diff --git a/kernel/audit.c b/kernel/audit.c
73649index d596e53..dbef3c3 100644
73650--- a/kernel/audit.c
73651+++ b/kernel/audit.c
73652@@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
73653 3) suppressed due to audit_rate_limit
73654 4) suppressed due to audit_backlog_limit
73655 */
73656-static atomic_t audit_lost = ATOMIC_INIT(0);
73657+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
73658
73659 /* The netlink socket. */
73660 static struct sock *audit_sock;
73661@@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
73662 unsigned long now;
73663 int print;
73664
73665- atomic_inc(&audit_lost);
73666+ atomic_inc_unchecked(&audit_lost);
73667
73668 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
73669
73670@@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
73671 printk(KERN_WARNING
73672 "audit: audit_lost=%d audit_rate_limit=%d "
73673 "audit_backlog_limit=%d\n",
73674- atomic_read(&audit_lost),
73675+ atomic_read_unchecked(&audit_lost),
73676 audit_rate_limit,
73677 audit_backlog_limit);
73678 audit_panic(message);
73679@@ -681,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
73680 status_set.pid = audit_pid;
73681 status_set.rate_limit = audit_rate_limit;
73682 status_set.backlog_limit = audit_backlog_limit;
73683- status_set.lost = atomic_read(&audit_lost);
73684+ status_set.lost = atomic_read_unchecked(&audit_lost);
73685 status_set.backlog = skb_queue_len(&audit_skb_queue);
73686 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
73687 &status_set, sizeof(status_set));
73688diff --git a/kernel/auditsc.c b/kernel/auditsc.c
73689index a371f85..da826c1 100644
73690--- a/kernel/auditsc.c
73691+++ b/kernel/auditsc.c
73692@@ -2292,7 +2292,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
73693 }
73694
73695 /* global counter which is incremented every time something logs in */
73696-static atomic_t session_id = ATOMIC_INIT(0);
73697+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
73698
73699 /**
73700 * audit_set_loginuid - set current task's audit_context loginuid
73701@@ -2316,7 +2316,7 @@ int audit_set_loginuid(kuid_t loginuid)
73702 return -EPERM;
73703 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
73704
73705- sessionid = atomic_inc_return(&session_id);
73706+ sessionid = atomic_inc_return_unchecked(&session_id);
73707 if (context && context->in_syscall) {
73708 struct audit_buffer *ab;
73709
73710diff --git a/kernel/capability.c b/kernel/capability.c
73711index 493d972..f87dfbd 100644
73712--- a/kernel/capability.c
73713+++ b/kernel/capability.c
73714@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
73715 * before modification is attempted and the application
73716 * fails.
73717 */
73718+ if (tocopy > ARRAY_SIZE(kdata))
73719+ return -EFAULT;
73720+
73721 if (copy_to_user(dataptr, kdata, tocopy
73722 * sizeof(struct __user_cap_data_struct))) {
73723 return -EFAULT;
73724@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
73725 int ret;
73726
73727 rcu_read_lock();
73728- ret = security_capable(__task_cred(t), ns, cap);
73729+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
73730+ gr_task_is_capable(t, __task_cred(t), cap);
73731 rcu_read_unlock();
73732
73733- return (ret == 0);
73734+ return ret;
73735 }
73736
73737 /**
73738@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
73739 int ret;
73740
73741 rcu_read_lock();
73742- ret = security_capable_noaudit(__task_cred(t), ns, cap);
73743+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
73744 rcu_read_unlock();
73745
73746- return (ret == 0);
73747+ return ret;
73748 }
73749
73750 /**
73751@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
73752 BUG();
73753 }
73754
73755- if (security_capable(current_cred(), ns, cap) == 0) {
73756+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
73757 current->flags |= PF_SUPERPRIV;
73758 return true;
73759 }
73760@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
73761 }
73762 EXPORT_SYMBOL(ns_capable);
73763
73764+bool ns_capable_nolog(struct user_namespace *ns, int cap)
73765+{
73766+ if (unlikely(!cap_valid(cap))) {
73767+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
73768+ BUG();
73769+ }
73770+
73771+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
73772+ current->flags |= PF_SUPERPRIV;
73773+ return true;
73774+ }
73775+ return false;
73776+}
73777+EXPORT_SYMBOL(ns_capable_nolog);
73778+
73779 /**
73780 * capable - Determine if the current task has a superior capability in effect
73781 * @cap: The capability to be tested for
73782@@ -408,6 +427,12 @@ bool capable(int cap)
73783 }
73784 EXPORT_SYMBOL(capable);
73785
73786+bool capable_nolog(int cap)
73787+{
73788+ return ns_capable_nolog(&init_user_ns, cap);
73789+}
73790+EXPORT_SYMBOL(capable_nolog);
73791+
73792 /**
73793 * nsown_capable - Check superior capability to one's own user_ns
73794 * @cap: The capability in question
73795@@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
73796
73797 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
73798 }
73799+
73800+bool inode_capable_nolog(const struct inode *inode, int cap)
73801+{
73802+ struct user_namespace *ns = current_user_ns();
73803+
73804+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
73805+}
73806diff --git a/kernel/cgroup.c b/kernel/cgroup.c
73807index 1e23664..570a83d 100644
73808--- a/kernel/cgroup.c
73809+++ b/kernel/cgroup.c
73810@@ -5543,7 +5543,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
73811 struct css_set *cg = link->cg;
73812 struct task_struct *task;
73813 int count = 0;
73814- seq_printf(seq, "css_set %p\n", cg);
73815+ seq_printf(seq, "css_set %pK\n", cg);
73816 list_for_each_entry(task, &cg->tasks, cg_list) {
73817 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
73818 seq_puts(seq, " ...\n");
73819diff --git a/kernel/compat.c b/kernel/compat.c
73820index 36700e9..73d770c 100644
73821--- a/kernel/compat.c
73822+++ b/kernel/compat.c
73823@@ -13,6 +13,7 @@
73824
73825 #include <linux/linkage.h>
73826 #include <linux/compat.h>
73827+#include <linux/module.h>
73828 #include <linux/errno.h>
73829 #include <linux/time.h>
73830 #include <linux/signal.h>
73831@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
73832 mm_segment_t oldfs;
73833 long ret;
73834
73835- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
73836+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
73837 oldfs = get_fs();
73838 set_fs(KERNEL_DS);
73839 ret = hrtimer_nanosleep_restart(restart);
73840@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
73841 oldfs = get_fs();
73842 set_fs(KERNEL_DS);
73843 ret = hrtimer_nanosleep(&tu,
73844- rmtp ? (struct timespec __user *)&rmt : NULL,
73845+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
73846 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
73847 set_fs(oldfs);
73848
73849@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
73850 mm_segment_t old_fs = get_fs();
73851
73852 set_fs(KERNEL_DS);
73853- ret = sys_sigpending((old_sigset_t __user *) &s);
73854+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
73855 set_fs(old_fs);
73856 if (ret == 0)
73857 ret = put_user(s, set);
73858@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
73859 mm_segment_t old_fs = get_fs();
73860
73861 set_fs(KERNEL_DS);
73862- ret = sys_old_getrlimit(resource, &r);
73863+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
73864 set_fs(old_fs);
73865
73866 if (!ret) {
73867@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
73868 mm_segment_t old_fs = get_fs();
73869
73870 set_fs(KERNEL_DS);
73871- ret = sys_getrusage(who, (struct rusage __user *) &r);
73872+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
73873 set_fs(old_fs);
73874
73875 if (ret)
73876@@ -552,8 +553,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
73877 set_fs (KERNEL_DS);
73878 ret = sys_wait4(pid,
73879 (stat_addr ?
73880- (unsigned int __user *) &status : NULL),
73881- options, (struct rusage __user *) &r);
73882+ (unsigned int __force_user *) &status : NULL),
73883+ options, (struct rusage __force_user *) &r);
73884 set_fs (old_fs);
73885
73886 if (ret > 0) {
73887@@ -579,8 +580,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
73888 memset(&info, 0, sizeof(info));
73889
73890 set_fs(KERNEL_DS);
73891- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
73892- uru ? (struct rusage __user *)&ru : NULL);
73893+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
73894+ uru ? (struct rusage __force_user *)&ru : NULL);
73895 set_fs(old_fs);
73896
73897 if ((ret < 0) || (info.si_signo == 0))
73898@@ -714,8 +715,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
73899 oldfs = get_fs();
73900 set_fs(KERNEL_DS);
73901 err = sys_timer_settime(timer_id, flags,
73902- (struct itimerspec __user *) &newts,
73903- (struct itimerspec __user *) &oldts);
73904+ (struct itimerspec __force_user *) &newts,
73905+ (struct itimerspec __force_user *) &oldts);
73906 set_fs(oldfs);
73907 if (!err && old && put_compat_itimerspec(old, &oldts))
73908 return -EFAULT;
73909@@ -732,7 +733,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
73910 oldfs = get_fs();
73911 set_fs(KERNEL_DS);
73912 err = sys_timer_gettime(timer_id,
73913- (struct itimerspec __user *) &ts);
73914+ (struct itimerspec __force_user *) &ts);
73915 set_fs(oldfs);
73916 if (!err && put_compat_itimerspec(setting, &ts))
73917 return -EFAULT;
73918@@ -751,7 +752,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
73919 oldfs = get_fs();
73920 set_fs(KERNEL_DS);
73921 err = sys_clock_settime(which_clock,
73922- (struct timespec __user *) &ts);
73923+ (struct timespec __force_user *) &ts);
73924 set_fs(oldfs);
73925 return err;
73926 }
73927@@ -766,7 +767,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
73928 oldfs = get_fs();
73929 set_fs(KERNEL_DS);
73930 err = sys_clock_gettime(which_clock,
73931- (struct timespec __user *) &ts);
73932+ (struct timespec __force_user *) &ts);
73933 set_fs(oldfs);
73934 if (!err && put_compat_timespec(&ts, tp))
73935 return -EFAULT;
73936@@ -786,7 +787,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
73937
73938 oldfs = get_fs();
73939 set_fs(KERNEL_DS);
73940- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
73941+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
73942 set_fs(oldfs);
73943
73944 err = compat_put_timex(utp, &txc);
73945@@ -806,7 +807,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
73946 oldfs = get_fs();
73947 set_fs(KERNEL_DS);
73948 err = sys_clock_getres(which_clock,
73949- (struct timespec __user *) &ts);
73950+ (struct timespec __force_user *) &ts);
73951 set_fs(oldfs);
73952 if (!err && tp && put_compat_timespec(&ts, tp))
73953 return -EFAULT;
73954@@ -818,9 +819,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
73955 long err;
73956 mm_segment_t oldfs;
73957 struct timespec tu;
73958- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
73959+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
73960
73961- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
73962+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
73963 oldfs = get_fs();
73964 set_fs(KERNEL_DS);
73965 err = clock_nanosleep_restart(restart);
73966@@ -852,8 +853,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
73967 oldfs = get_fs();
73968 set_fs(KERNEL_DS);
73969 err = sys_clock_nanosleep(which_clock, flags,
73970- (struct timespec __user *) &in,
73971- (struct timespec __user *) &out);
73972+ (struct timespec __force_user *) &in,
73973+ (struct timespec __force_user *) &out);
73974 set_fs(oldfs);
73975
73976 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
73977diff --git a/kernel/configs.c b/kernel/configs.c
73978index 42e8fa0..9e7406b 100644
73979--- a/kernel/configs.c
73980+++ b/kernel/configs.c
73981@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
73982 struct proc_dir_entry *entry;
73983
73984 /* create the current config file */
73985+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
73986+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
73987+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
73988+ &ikconfig_file_ops);
73989+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73990+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
73991+ &ikconfig_file_ops);
73992+#endif
73993+#else
73994 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
73995 &ikconfig_file_ops);
73996+#endif
73997+
73998 if (!entry)
73999 return -ENOMEM;
74000
74001diff --git a/kernel/cred.c b/kernel/cred.c
74002index e0573a4..3874e41 100644
74003--- a/kernel/cred.c
74004+++ b/kernel/cred.c
74005@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
74006 validate_creds(cred);
74007 alter_cred_subscribers(cred, -1);
74008 put_cred(cred);
74009+
74010+#ifdef CONFIG_GRKERNSEC_SETXID
74011+ cred = (struct cred *) tsk->delayed_cred;
74012+ if (cred != NULL) {
74013+ tsk->delayed_cred = NULL;
74014+ validate_creds(cred);
74015+ alter_cred_subscribers(cred, -1);
74016+ put_cred(cred);
74017+ }
74018+#endif
74019 }
74020
74021 /**
74022@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
74023 * Always returns 0 thus allowing this function to be tail-called at the end
74024 * of, say, sys_setgid().
74025 */
74026-int commit_creds(struct cred *new)
74027+static int __commit_creds(struct cred *new)
74028 {
74029 struct task_struct *task = current;
74030 const struct cred *old = task->real_cred;
74031@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
74032
74033 get_cred(new); /* we will require a ref for the subj creds too */
74034
74035+ gr_set_role_label(task, new->uid, new->gid);
74036+
74037 /* dumpability changes */
74038 if (!uid_eq(old->euid, new->euid) ||
74039 !gid_eq(old->egid, new->egid) ||
74040@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
74041 put_cred(old);
74042 return 0;
74043 }
74044+#ifdef CONFIG_GRKERNSEC_SETXID
74045+extern int set_user(struct cred *new);
74046+
74047+void gr_delayed_cred_worker(void)
74048+{
74049+ const struct cred *new = current->delayed_cred;
74050+ struct cred *ncred;
74051+
74052+ current->delayed_cred = NULL;
74053+
74054+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
74055+ // from doing get_cred on it when queueing this
74056+ put_cred(new);
74057+ return;
74058+ } else if (new == NULL)
74059+ return;
74060+
74061+ ncred = prepare_creds();
74062+ if (!ncred)
74063+ goto die;
74064+ // uids
74065+ ncred->uid = new->uid;
74066+ ncred->euid = new->euid;
74067+ ncred->suid = new->suid;
74068+ ncred->fsuid = new->fsuid;
74069+ // gids
74070+ ncred->gid = new->gid;
74071+ ncred->egid = new->egid;
74072+ ncred->sgid = new->sgid;
74073+ ncred->fsgid = new->fsgid;
74074+ // groups
74075+ if (set_groups(ncred, new->group_info) < 0) {
74076+ abort_creds(ncred);
74077+ goto die;
74078+ }
74079+ // caps
74080+ ncred->securebits = new->securebits;
74081+ ncred->cap_inheritable = new->cap_inheritable;
74082+ ncred->cap_permitted = new->cap_permitted;
74083+ ncred->cap_effective = new->cap_effective;
74084+ ncred->cap_bset = new->cap_bset;
74085+
74086+ if (set_user(ncred)) {
74087+ abort_creds(ncred);
74088+ goto die;
74089+ }
74090+
74091+ // from doing get_cred on it when queueing this
74092+ put_cred(new);
74093+
74094+ __commit_creds(ncred);
74095+ return;
74096+die:
74097+ // from doing get_cred on it when queueing this
74098+ put_cred(new);
74099+ do_group_exit(SIGKILL);
74100+}
74101+#endif
74102+
74103+int commit_creds(struct cred *new)
74104+{
74105+#ifdef CONFIG_GRKERNSEC_SETXID
74106+ int ret;
74107+ int schedule_it = 0;
74108+ struct task_struct *t;
74109+
74110+ /* we won't get called with tasklist_lock held for writing
74111+ and interrupts disabled as the cred struct in that case is
74112+ init_cred
74113+ */
74114+ if (grsec_enable_setxid && !current_is_single_threaded() &&
74115+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
74116+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
74117+ schedule_it = 1;
74118+ }
74119+ ret = __commit_creds(new);
74120+ if (schedule_it) {
74121+ rcu_read_lock();
74122+ read_lock(&tasklist_lock);
74123+ for (t = next_thread(current); t != current;
74124+ t = next_thread(t)) {
74125+ if (t->delayed_cred == NULL) {
74126+ t->delayed_cred = get_cred(new);
74127+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
74128+ set_tsk_need_resched(t);
74129+ }
74130+ }
74131+ read_unlock(&tasklist_lock);
74132+ rcu_read_unlock();
74133+ }
74134+ return ret;
74135+#else
74136+ return __commit_creds(new);
74137+#endif
74138+}
74139+
74140 EXPORT_SYMBOL(commit_creds);
74141
74142 /**
74143diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
74144index 9a61738..c5c8f3a 100644
74145--- a/kernel/debug/debug_core.c
74146+++ b/kernel/debug/debug_core.c
74147@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
74148 */
74149 static atomic_t masters_in_kgdb;
74150 static atomic_t slaves_in_kgdb;
74151-static atomic_t kgdb_break_tasklet_var;
74152+static atomic_unchecked_t kgdb_break_tasklet_var;
74153 atomic_t kgdb_setting_breakpoint;
74154
74155 struct task_struct *kgdb_usethread;
74156@@ -132,7 +132,7 @@ int kgdb_single_step;
74157 static pid_t kgdb_sstep_pid;
74158
74159 /* to keep track of the CPU which is doing the single stepping*/
74160-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
74161+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
74162
74163 /*
74164 * If you are debugging a problem where roundup (the collection of
74165@@ -540,7 +540,7 @@ return_normal:
74166 * kernel will only try for the value of sstep_tries before
74167 * giving up and continuing on.
74168 */
74169- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
74170+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
74171 (kgdb_info[cpu].task &&
74172 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
74173 atomic_set(&kgdb_active, -1);
74174@@ -634,8 +634,8 @@ cpu_master_loop:
74175 }
74176
74177 kgdb_restore:
74178- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
74179- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
74180+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
74181+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
74182 if (kgdb_info[sstep_cpu].task)
74183 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
74184 else
74185@@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
74186 static void kgdb_tasklet_bpt(unsigned long ing)
74187 {
74188 kgdb_breakpoint();
74189- atomic_set(&kgdb_break_tasklet_var, 0);
74190+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
74191 }
74192
74193 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
74194
74195 void kgdb_schedule_breakpoint(void)
74196 {
74197- if (atomic_read(&kgdb_break_tasklet_var) ||
74198+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
74199 atomic_read(&kgdb_active) != -1 ||
74200 atomic_read(&kgdb_setting_breakpoint))
74201 return;
74202- atomic_inc(&kgdb_break_tasklet_var);
74203+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
74204 tasklet_schedule(&kgdb_tasklet_breakpoint);
74205 }
74206 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
74207diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
74208index 8875254..7cf4928 100644
74209--- a/kernel/debug/kdb/kdb_main.c
74210+++ b/kernel/debug/kdb/kdb_main.c
74211@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
74212 continue;
74213
74214 kdb_printf("%-20s%8u 0x%p ", mod->name,
74215- mod->core_size, (void *)mod);
74216+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
74217 #ifdef CONFIG_MODULE_UNLOAD
74218 kdb_printf("%4ld ", module_refcount(mod));
74219 #endif
74220@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
74221 kdb_printf(" (Loading)");
74222 else
74223 kdb_printf(" (Live)");
74224- kdb_printf(" 0x%p", mod->module_core);
74225+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
74226
74227 #ifdef CONFIG_MODULE_UNLOAD
74228 {
74229diff --git a/kernel/events/core.c b/kernel/events/core.c
74230index 7b6646a..3cb1135 100644
74231--- a/kernel/events/core.c
74232+++ b/kernel/events/core.c
74233@@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
74234 return 0;
74235 }
74236
74237-static atomic64_t perf_event_id;
74238+static atomic64_unchecked_t perf_event_id;
74239
74240 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
74241 enum event_type_t event_type);
74242@@ -2677,7 +2677,7 @@ static void __perf_event_read(void *info)
74243
74244 static inline u64 perf_event_count(struct perf_event *event)
74245 {
74246- return local64_read(&event->count) + atomic64_read(&event->child_count);
74247+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
74248 }
74249
74250 static u64 perf_event_read(struct perf_event *event)
74251@@ -3007,9 +3007,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
74252 mutex_lock(&event->child_mutex);
74253 total += perf_event_read(event);
74254 *enabled += event->total_time_enabled +
74255- atomic64_read(&event->child_total_time_enabled);
74256+ atomic64_read_unchecked(&event->child_total_time_enabled);
74257 *running += event->total_time_running +
74258- atomic64_read(&event->child_total_time_running);
74259+ atomic64_read_unchecked(&event->child_total_time_running);
74260
74261 list_for_each_entry(child, &event->child_list, child_list) {
74262 total += perf_event_read(child);
74263@@ -3412,10 +3412,10 @@ void perf_event_update_userpage(struct perf_event *event)
74264 userpg->offset -= local64_read(&event->hw.prev_count);
74265
74266 userpg->time_enabled = enabled +
74267- atomic64_read(&event->child_total_time_enabled);
74268+ atomic64_read_unchecked(&event->child_total_time_enabled);
74269
74270 userpg->time_running = running +
74271- atomic64_read(&event->child_total_time_running);
74272+ atomic64_read_unchecked(&event->child_total_time_running);
74273
74274 arch_perf_update_userpage(userpg, now);
74275
74276@@ -3974,11 +3974,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74277 values[n++] = perf_event_count(event);
74278 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74279 values[n++] = enabled +
74280- atomic64_read(&event->child_total_time_enabled);
74281+ atomic64_read_unchecked(&event->child_total_time_enabled);
74282 }
74283 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74284 values[n++] = running +
74285- atomic64_read(&event->child_total_time_running);
74286+ atomic64_read_unchecked(&event->child_total_time_running);
74287 }
74288 if (read_format & PERF_FORMAT_ID)
74289 values[n++] = primary_event_id(event);
74290@@ -4721,12 +4721,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74291 * need to add enough zero bytes after the string to handle
74292 * the 64bit alignment we do later.
74293 */
74294- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74295+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
74296 if (!buf) {
74297 name = strncpy(tmp, "//enomem", sizeof(tmp));
74298 goto got_name;
74299 }
74300- name = d_path(&file->f_path, buf, PATH_MAX);
74301+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74302 if (IS_ERR(name)) {
74303 name = strncpy(tmp, "//toolong", sizeof(tmp));
74304 goto got_name;
74305@@ -6165,7 +6165,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
74306 event->parent = parent_event;
74307
74308 event->ns = get_pid_ns(task_active_pid_ns(current));
74309- event->id = atomic64_inc_return(&perf_event_id);
74310+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
74311
74312 event->state = PERF_EVENT_STATE_INACTIVE;
74313
74314@@ -6790,10 +6790,10 @@ static void sync_child_event(struct perf_event *child_event,
74315 /*
74316 * Add back the child's count to the parent's count:
74317 */
74318- atomic64_add(child_val, &parent_event->child_count);
74319- atomic64_add(child_event->total_time_enabled,
74320+ atomic64_add_unchecked(child_val, &parent_event->child_count);
74321+ atomic64_add_unchecked(child_event->total_time_enabled,
74322 &parent_event->child_total_time_enabled);
74323- atomic64_add(child_event->total_time_running,
74324+ atomic64_add_unchecked(child_event->total_time_running,
74325 &parent_event->child_total_time_running);
74326
74327 /*
74328diff --git a/kernel/exit.c b/kernel/exit.c
74329index b4df219..f13c02d 100644
74330--- a/kernel/exit.c
74331+++ b/kernel/exit.c
74332@@ -170,6 +170,10 @@ void release_task(struct task_struct * p)
74333 struct task_struct *leader;
74334 int zap_leader;
74335 repeat:
74336+#ifdef CONFIG_NET
74337+ gr_del_task_from_ip_table(p);
74338+#endif
74339+
74340 /* don't need to get the RCU readlock here - the process is dead and
74341 * can't be modifying its own credentials. But shut RCU-lockdep up */
74342 rcu_read_lock();
74343@@ -338,7 +342,7 @@ int allow_signal(int sig)
74344 * know it'll be handled, so that they don't get converted to
74345 * SIGKILL or just silently dropped.
74346 */
74347- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
74348+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
74349 recalc_sigpending();
74350 spin_unlock_irq(&current->sighand->siglock);
74351 return 0;
74352@@ -708,6 +712,8 @@ void do_exit(long code)
74353 struct task_struct *tsk = current;
74354 int group_dead;
74355
74356+ set_fs(USER_DS);
74357+
74358 profile_task_exit(tsk);
74359
74360 WARN_ON(blk_needs_flush_plug(tsk));
74361@@ -724,7 +730,6 @@ void do_exit(long code)
74362 * mm_release()->clear_child_tid() from writing to a user-controlled
74363 * kernel address.
74364 */
74365- set_fs(USER_DS);
74366
74367 ptrace_event(PTRACE_EVENT_EXIT, code);
74368
74369@@ -783,6 +788,9 @@ void do_exit(long code)
74370 tsk->exit_code = code;
74371 taskstats_exit(tsk, group_dead);
74372
74373+ gr_acl_handle_psacct(tsk, code);
74374+ gr_acl_handle_exit();
74375+
74376 exit_mm(tsk);
74377
74378 if (group_dead)
74379@@ -903,7 +911,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
74380 * Take down every thread in the group. This is called by fatal signals
74381 * as well as by sys_exit_group (below).
74382 */
74383-void
74384+__noreturn void
74385 do_group_exit(int exit_code)
74386 {
74387 struct signal_struct *sig = current->signal;
74388diff --git a/kernel/fork.c b/kernel/fork.c
74389index 5630e52..0cee608 100644
74390--- a/kernel/fork.c
74391+++ b/kernel/fork.c
74392@@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
74393 *stackend = STACK_END_MAGIC; /* for overflow detection */
74394
74395 #ifdef CONFIG_CC_STACKPROTECTOR
74396- tsk->stack_canary = get_random_int();
74397+ tsk->stack_canary = pax_get_random_long();
74398 #endif
74399
74400 /*
74401@@ -344,13 +344,81 @@ free_tsk:
74402 }
74403
74404 #ifdef CONFIG_MMU
74405+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
74406+{
74407+ struct vm_area_struct *tmp;
74408+ unsigned long charge;
74409+ struct mempolicy *pol;
74410+ struct file *file;
74411+
74412+ charge = 0;
74413+ if (mpnt->vm_flags & VM_ACCOUNT) {
74414+ unsigned long len = vma_pages(mpnt);
74415+
74416+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
74417+ goto fail_nomem;
74418+ charge = len;
74419+ }
74420+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
74421+ if (!tmp)
74422+ goto fail_nomem;
74423+ *tmp = *mpnt;
74424+ tmp->vm_mm = mm;
74425+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
74426+ pol = mpol_dup(vma_policy(mpnt));
74427+ if (IS_ERR(pol))
74428+ goto fail_nomem_policy;
74429+ vma_set_policy(tmp, pol);
74430+ if (anon_vma_fork(tmp, mpnt))
74431+ goto fail_nomem_anon_vma_fork;
74432+ tmp->vm_flags &= ~VM_LOCKED;
74433+ tmp->vm_next = tmp->vm_prev = NULL;
74434+ tmp->vm_mirror = NULL;
74435+ file = tmp->vm_file;
74436+ if (file) {
74437+ struct inode *inode = file->f_path.dentry->d_inode;
74438+ struct address_space *mapping = file->f_mapping;
74439+
74440+ get_file(file);
74441+ if (tmp->vm_flags & VM_DENYWRITE)
74442+ atomic_dec(&inode->i_writecount);
74443+ mutex_lock(&mapping->i_mmap_mutex);
74444+ if (tmp->vm_flags & VM_SHARED)
74445+ mapping->i_mmap_writable++;
74446+ flush_dcache_mmap_lock(mapping);
74447+ /* insert tmp into the share list, just after mpnt */
74448+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
74449+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
74450+ else
74451+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
74452+ flush_dcache_mmap_unlock(mapping);
74453+ mutex_unlock(&mapping->i_mmap_mutex);
74454+ }
74455+
74456+ /*
74457+ * Clear hugetlb-related page reserves for children. This only
74458+ * affects MAP_PRIVATE mappings. Faults generated by the child
74459+ * are not guaranteed to succeed, even if read-only
74460+ */
74461+ if (is_vm_hugetlb_page(tmp))
74462+ reset_vma_resv_huge_pages(tmp);
74463+
74464+ return tmp;
74465+
74466+fail_nomem_anon_vma_fork:
74467+ mpol_put(pol);
74468+fail_nomem_policy:
74469+ kmem_cache_free(vm_area_cachep, tmp);
74470+fail_nomem:
74471+ vm_unacct_memory(charge);
74472+ return NULL;
74473+}
74474+
74475 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74476 {
74477 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
74478 struct rb_node **rb_link, *rb_parent;
74479 int retval;
74480- unsigned long charge;
74481- struct mempolicy *pol;
74482
74483 uprobe_start_dup_mmap();
74484 down_write(&oldmm->mmap_sem);
74485@@ -364,8 +432,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74486 mm->locked_vm = 0;
74487 mm->mmap = NULL;
74488 mm->mmap_cache = NULL;
74489- mm->free_area_cache = oldmm->mmap_base;
74490- mm->cached_hole_size = ~0UL;
74491+ mm->free_area_cache = oldmm->free_area_cache;
74492+ mm->cached_hole_size = oldmm->cached_hole_size;
74493 mm->map_count = 0;
74494 cpumask_clear(mm_cpumask(mm));
74495 mm->mm_rb = RB_ROOT;
74496@@ -381,57 +449,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74497
74498 prev = NULL;
74499 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
74500- struct file *file;
74501-
74502 if (mpnt->vm_flags & VM_DONTCOPY) {
74503 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
74504 -vma_pages(mpnt));
74505 continue;
74506 }
74507- charge = 0;
74508- if (mpnt->vm_flags & VM_ACCOUNT) {
74509- unsigned long len = vma_pages(mpnt);
74510-
74511- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
74512- goto fail_nomem;
74513- charge = len;
74514- }
74515- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
74516- if (!tmp)
74517- goto fail_nomem;
74518- *tmp = *mpnt;
74519- INIT_LIST_HEAD(&tmp->anon_vma_chain);
74520- pol = mpol_dup(vma_policy(mpnt));
74521- retval = PTR_ERR(pol);
74522- if (IS_ERR(pol))
74523- goto fail_nomem_policy;
74524- vma_set_policy(tmp, pol);
74525- tmp->vm_mm = mm;
74526- if (anon_vma_fork(tmp, mpnt))
74527- goto fail_nomem_anon_vma_fork;
74528- tmp->vm_flags &= ~VM_LOCKED;
74529- tmp->vm_next = tmp->vm_prev = NULL;
74530- file = tmp->vm_file;
74531- if (file) {
74532- struct inode *inode = file->f_path.dentry->d_inode;
74533- struct address_space *mapping = file->f_mapping;
74534-
74535- get_file(file);
74536- if (tmp->vm_flags & VM_DENYWRITE)
74537- atomic_dec(&inode->i_writecount);
74538- mutex_lock(&mapping->i_mmap_mutex);
74539- if (tmp->vm_flags & VM_SHARED)
74540- mapping->i_mmap_writable++;
74541- flush_dcache_mmap_lock(mapping);
74542- /* insert tmp into the share list, just after mpnt */
74543- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
74544- vma_nonlinear_insert(tmp,
74545- &mapping->i_mmap_nonlinear);
74546- else
74547- vma_interval_tree_insert_after(tmp, mpnt,
74548- &mapping->i_mmap);
74549- flush_dcache_mmap_unlock(mapping);
74550- mutex_unlock(&mapping->i_mmap_mutex);
74551+ tmp = dup_vma(mm, oldmm, mpnt);
74552+ if (!tmp) {
74553+ retval = -ENOMEM;
74554+ goto out;
74555 }
74556
74557 /*
74558@@ -463,6 +489,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74559 if (retval)
74560 goto out;
74561 }
74562+
74563+#ifdef CONFIG_PAX_SEGMEXEC
74564+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
74565+ struct vm_area_struct *mpnt_m;
74566+
74567+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
74568+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
74569+
74570+ if (!mpnt->vm_mirror)
74571+ continue;
74572+
74573+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
74574+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
74575+ mpnt->vm_mirror = mpnt_m;
74576+ } else {
74577+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
74578+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
74579+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
74580+ mpnt->vm_mirror->vm_mirror = mpnt;
74581+ }
74582+ }
74583+ BUG_ON(mpnt_m);
74584+ }
74585+#endif
74586+
74587 /* a new mm has just been created */
74588 arch_dup_mmap(oldmm, mm);
74589 retval = 0;
74590@@ -472,14 +523,6 @@ out:
74591 up_write(&oldmm->mmap_sem);
74592 uprobe_end_dup_mmap();
74593 return retval;
74594-fail_nomem_anon_vma_fork:
74595- mpol_put(pol);
74596-fail_nomem_policy:
74597- kmem_cache_free(vm_area_cachep, tmp);
74598-fail_nomem:
74599- retval = -ENOMEM;
74600- vm_unacct_memory(charge);
74601- goto out;
74602 }
74603
74604 static inline int mm_alloc_pgd(struct mm_struct *mm)
74605@@ -694,8 +737,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
74606 return ERR_PTR(err);
74607
74608 mm = get_task_mm(task);
74609- if (mm && mm != current->mm &&
74610- !ptrace_may_access(task, mode)) {
74611+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
74612+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
74613 mmput(mm);
74614 mm = ERR_PTR(-EACCES);
74615 }
74616@@ -917,13 +960,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
74617 spin_unlock(&fs->lock);
74618 return -EAGAIN;
74619 }
74620- fs->users++;
74621+ atomic_inc(&fs->users);
74622 spin_unlock(&fs->lock);
74623 return 0;
74624 }
74625 tsk->fs = copy_fs_struct(fs);
74626 if (!tsk->fs)
74627 return -ENOMEM;
74628+ /* Carry through gr_chroot_dentry and is_chrooted instead
74629+ of recomputing it here. Already copied when the task struct
74630+ is duplicated. This allows pivot_root to not be treated as
74631+ a chroot
74632+ */
74633+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
74634+
74635 return 0;
74636 }
74637
74638@@ -1196,6 +1246,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
74639 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
74640 #endif
74641 retval = -EAGAIN;
74642+
74643+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
74644+
74645 if (atomic_read(&p->real_cred->user->processes) >=
74646 task_rlimit(p, RLIMIT_NPROC)) {
74647 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
74648@@ -1435,6 +1488,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
74649 goto bad_fork_free_pid;
74650 }
74651
74652+ /* synchronizes with gr_set_acls()
74653+ we need to call this past the point of no return for fork()
74654+ */
74655+ gr_copy_label(p);
74656+
74657 if (clone_flags & CLONE_THREAD) {
74658 current->signal->nr_threads++;
74659 atomic_inc(&current->signal->live);
74660@@ -1518,6 +1576,8 @@ bad_fork_cleanup_count:
74661 bad_fork_free:
74662 free_task(p);
74663 fork_out:
74664+ gr_log_forkfail(retval);
74665+
74666 return ERR_PTR(retval);
74667 }
74668
74669@@ -1568,6 +1628,23 @@ long do_fork(unsigned long clone_flags,
74670 return -EINVAL;
74671 }
74672
74673+#ifdef CONFIG_GRKERNSEC
74674+ if (clone_flags & CLONE_NEWUSER) {
74675+ /*
74676+ * This doesn't really inspire confidence:
74677+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
74678+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
74679+ * Increases kernel attack surface in areas developers
74680+ * previously cared little about ("low importance due
74681+ * to requiring "root" capability")
74682+ * To be removed when this code receives *proper* review
74683+ */
74684+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
74685+ !capable(CAP_SETGID))
74686+ return -EPERM;
74687+ }
74688+#endif
74689+
74690 /*
74691 * Determine whether and which event to report to ptracer. When
74692 * called from kernel_thread or CLONE_UNTRACED is explicitly
74693@@ -1602,6 +1679,8 @@ long do_fork(unsigned long clone_flags,
74694 if (clone_flags & CLONE_PARENT_SETTID)
74695 put_user(nr, parent_tidptr);
74696
74697+ gr_handle_brute_check();
74698+
74699 if (clone_flags & CLONE_VFORK) {
74700 p->vfork_done = &vfork;
74701 init_completion(&vfork);
74702@@ -1755,7 +1834,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
74703 return 0;
74704
74705 /* don't need lock here; in the worst case we'll do useless copy */
74706- if (fs->users == 1)
74707+ if (atomic_read(&fs->users) == 1)
74708 return 0;
74709
74710 *new_fsp = copy_fs_struct(fs);
74711@@ -1869,7 +1948,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
74712 fs = current->fs;
74713 spin_lock(&fs->lock);
74714 current->fs = new_fs;
74715- if (--fs->users)
74716+ gr_set_chroot_entries(current, &current->fs->root);
74717+ if (atomic_dec_return(&fs->users))
74718 new_fs = NULL;
74719 else
74720 new_fs = fs;
74721diff --git a/kernel/futex.c b/kernel/futex.c
74722index 8879430..31696f1 100644
74723--- a/kernel/futex.c
74724+++ b/kernel/futex.c
74725@@ -54,6 +54,7 @@
74726 #include <linux/mount.h>
74727 #include <linux/pagemap.h>
74728 #include <linux/syscalls.h>
74729+#include <linux/ptrace.h>
74730 #include <linux/signal.h>
74731 #include <linux/export.h>
74732 #include <linux/magic.h>
74733@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
74734 struct page *page, *page_head;
74735 int err, ro = 0;
74736
74737+#ifdef CONFIG_PAX_SEGMEXEC
74738+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
74739+ return -EFAULT;
74740+#endif
74741+
74742 /*
74743 * The futex address must be "naturally" aligned.
74744 */
74745@@ -2731,6 +2737,7 @@ static int __init futex_init(void)
74746 {
74747 u32 curval;
74748 int i;
74749+ mm_segment_t oldfs;
74750
74751 /*
74752 * This will fail and we want it. Some arch implementations do
74753@@ -2742,8 +2749,11 @@ static int __init futex_init(void)
74754 * implementation, the non-functional ones will return
74755 * -ENOSYS.
74756 */
74757+ oldfs = get_fs();
74758+ set_fs(USER_DS);
74759 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
74760 futex_cmpxchg_enabled = 1;
74761+ set_fs(oldfs);
74762
74763 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
74764 plist_head_init(&futex_queues[i].chain);
74765diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
74766index 9b22d03..6295b62 100644
74767--- a/kernel/gcov/base.c
74768+++ b/kernel/gcov/base.c
74769@@ -102,11 +102,6 @@ void gcov_enable_events(void)
74770 }
74771
74772 #ifdef CONFIG_MODULES
74773-static inline int within(void *addr, void *start, unsigned long size)
74774-{
74775- return ((addr >= start) && (addr < start + size));
74776-}
74777-
74778 /* Update list and generate events when modules are unloaded. */
74779 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
74780 void *data)
74781@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
74782 prev = NULL;
74783 /* Remove entries located in module from linked list. */
74784 for (info = gcov_info_head; info; info = info->next) {
74785- if (within(info, mod->module_core, mod->core_size)) {
74786+ if (within_module_core_rw((unsigned long)info, mod)) {
74787 if (prev)
74788 prev->next = info->next;
74789 else
74790diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
74791index cdd5607..c3fc919 100644
74792--- a/kernel/hrtimer.c
74793+++ b/kernel/hrtimer.c
74794@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
74795 local_irq_restore(flags);
74796 }
74797
74798-static void run_hrtimer_softirq(struct softirq_action *h)
74799+static void run_hrtimer_softirq(void)
74800 {
74801 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
74802
74803@@ -1751,7 +1751,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
74804 return NOTIFY_OK;
74805 }
74806
74807-static struct notifier_block __cpuinitdata hrtimers_nb = {
74808+static struct notifier_block hrtimers_nb = {
74809 .notifier_call = hrtimer_cpu_notify,
74810 };
74811
74812diff --git a/kernel/jump_label.c b/kernel/jump_label.c
74813index 60f48fa..7f3a770 100644
74814--- a/kernel/jump_label.c
74815+++ b/kernel/jump_label.c
74816@@ -13,6 +13,7 @@
74817 #include <linux/sort.h>
74818 #include <linux/err.h>
74819 #include <linux/static_key.h>
74820+#include <linux/mm.h>
74821
74822 #ifdef HAVE_JUMP_LABEL
74823
74824@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
74825
74826 size = (((unsigned long)stop - (unsigned long)start)
74827 / sizeof(struct jump_entry));
74828+ pax_open_kernel();
74829 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
74830+ pax_close_kernel();
74831 }
74832
74833 static void jump_label_update(struct static_key *key, int enable);
74834@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
74835 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
74836 struct jump_entry *iter;
74837
74838+ pax_open_kernel();
74839 for (iter = iter_start; iter < iter_stop; iter++) {
74840 if (within_module_init(iter->code, mod))
74841 iter->code = 0;
74842 }
74843+ pax_close_kernel();
74844 }
74845
74846 static int
74847diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
74848index 2169fee..706ccca 100644
74849--- a/kernel/kallsyms.c
74850+++ b/kernel/kallsyms.c
74851@@ -11,6 +11,9 @@
74852 * Changed the compression method from stem compression to "table lookup"
74853 * compression (see scripts/kallsyms.c for a more complete description)
74854 */
74855+#ifdef CONFIG_GRKERNSEC_HIDESYM
74856+#define __INCLUDED_BY_HIDESYM 1
74857+#endif
74858 #include <linux/kallsyms.h>
74859 #include <linux/module.h>
74860 #include <linux/init.h>
74861@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
74862
74863 static inline int is_kernel_inittext(unsigned long addr)
74864 {
74865+ if (system_state != SYSTEM_BOOTING)
74866+ return 0;
74867+
74868 if (addr >= (unsigned long)_sinittext
74869 && addr <= (unsigned long)_einittext)
74870 return 1;
74871 return 0;
74872 }
74873
74874+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74875+#ifdef CONFIG_MODULES
74876+static inline int is_module_text(unsigned long addr)
74877+{
74878+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
74879+ return 1;
74880+
74881+ addr = ktla_ktva(addr);
74882+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
74883+}
74884+#else
74885+static inline int is_module_text(unsigned long addr)
74886+{
74887+ return 0;
74888+}
74889+#endif
74890+#endif
74891+
74892 static inline int is_kernel_text(unsigned long addr)
74893 {
74894 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
74895@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
74896
74897 static inline int is_kernel(unsigned long addr)
74898 {
74899+
74900+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74901+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
74902+ return 1;
74903+
74904+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
74905+#else
74906 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
74907+#endif
74908+
74909 return 1;
74910 return in_gate_area_no_mm(addr);
74911 }
74912
74913 static int is_ksym_addr(unsigned long addr)
74914 {
74915+
74916+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74917+ if (is_module_text(addr))
74918+ return 0;
74919+#endif
74920+
74921 if (all_var)
74922 return is_kernel(addr);
74923
74924@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
74925
74926 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
74927 {
74928- iter->name[0] = '\0';
74929 iter->nameoff = get_symbol_offset(new_pos);
74930 iter->pos = new_pos;
74931 }
74932@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
74933 {
74934 struct kallsym_iter *iter = m->private;
74935
74936+#ifdef CONFIG_GRKERNSEC_HIDESYM
74937+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
74938+ return 0;
74939+#endif
74940+
74941 /* Some debugging symbols have no name. Ignore them. */
74942 if (!iter->name[0])
74943 return 0;
74944@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
74945 */
74946 type = iter->exported ? toupper(iter->type) :
74947 tolower(iter->type);
74948+
74949 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
74950 type, iter->name, iter->module_name);
74951 } else
74952@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
74953 struct kallsym_iter *iter;
74954 int ret;
74955
74956- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
74957+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
74958 if (!iter)
74959 return -ENOMEM;
74960 reset_iter(iter, 0);
74961diff --git a/kernel/kcmp.c b/kernel/kcmp.c
74962index e30ac0f..3528cac 100644
74963--- a/kernel/kcmp.c
74964+++ b/kernel/kcmp.c
74965@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
74966 struct task_struct *task1, *task2;
74967 int ret;
74968
74969+#ifdef CONFIG_GRKERNSEC
74970+ return -ENOSYS;
74971+#endif
74972+
74973 rcu_read_lock();
74974
74975 /*
74976diff --git a/kernel/kexec.c b/kernel/kexec.c
74977index 5e4bd78..00c5b91 100644
74978--- a/kernel/kexec.c
74979+++ b/kernel/kexec.c
74980@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
74981 unsigned long flags)
74982 {
74983 struct compat_kexec_segment in;
74984- struct kexec_segment out, __user *ksegments;
74985+ struct kexec_segment out;
74986+ struct kexec_segment __user *ksegments;
74987 unsigned long i, result;
74988
74989 /* Don't allow clients that don't understand the native
74990diff --git a/kernel/kmod.c b/kernel/kmod.c
74991index 0023a87..9c0c068 100644
74992--- a/kernel/kmod.c
74993+++ b/kernel/kmod.c
74994@@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
74995 kfree(info->argv);
74996 }
74997
74998-static int call_modprobe(char *module_name, int wait)
74999+static int call_modprobe(char *module_name, char *module_param, int wait)
75000 {
75001 static char *envp[] = {
75002 "HOME=/",
75003@@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
75004 NULL
75005 };
75006
75007- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
75008+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
75009 if (!argv)
75010 goto out;
75011
75012@@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
75013 argv[1] = "-q";
75014 argv[2] = "--";
75015 argv[3] = module_name; /* check free_modprobe_argv() */
75016- argv[4] = NULL;
75017+ argv[4] = module_param;
75018+ argv[5] = NULL;
75019
75020 return call_usermodehelper_fns(modprobe_path, argv, envp,
75021 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
75022@@ -120,9 +121,8 @@ out:
75023 * If module auto-loading support is disabled then this function
75024 * becomes a no-operation.
75025 */
75026-int __request_module(bool wait, const char *fmt, ...)
75027+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
75028 {
75029- va_list args;
75030 char module_name[MODULE_NAME_LEN];
75031 unsigned int max_modprobes;
75032 int ret;
75033@@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
75034 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
75035 static int kmod_loop_msg;
75036
75037- va_start(args, fmt);
75038- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
75039- va_end(args);
75040+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
75041 if (ret >= MODULE_NAME_LEN)
75042 return -ENAMETOOLONG;
75043
75044@@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
75045 if (ret)
75046 return ret;
75047
75048+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75049+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
75050+ /* hack to workaround consolekit/udisks stupidity */
75051+ read_lock(&tasklist_lock);
75052+ if (!strcmp(current->comm, "mount") &&
75053+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
75054+ read_unlock(&tasklist_lock);
75055+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
75056+ return -EPERM;
75057+ }
75058+ read_unlock(&tasklist_lock);
75059+ }
75060+#endif
75061+
75062 /* If modprobe needs a service that is in a module, we get a recursive
75063 * loop. Limit the number of running kmod threads to max_threads/2 or
75064 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
75065@@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
75066
75067 trace_module_request(module_name, wait, _RET_IP_);
75068
75069- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
75070+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
75071
75072 atomic_dec(&kmod_concurrent);
75073 return ret;
75074 }
75075+
75076+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
75077+{
75078+ va_list args;
75079+ int ret;
75080+
75081+ va_start(args, fmt);
75082+ ret = ____request_module(wait, module_param, fmt, args);
75083+ va_end(args);
75084+
75085+ return ret;
75086+}
75087+
75088+int __request_module(bool wait, const char *fmt, ...)
75089+{
75090+ va_list args;
75091+ int ret;
75092+
75093+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75094+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
75095+ char module_param[MODULE_NAME_LEN];
75096+
75097+ memset(module_param, 0, sizeof(module_param));
75098+
75099+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
75100+
75101+ va_start(args, fmt);
75102+ ret = ____request_module(wait, module_param, fmt, args);
75103+ va_end(args);
75104+
75105+ return ret;
75106+ }
75107+#endif
75108+
75109+ va_start(args, fmt);
75110+ ret = ____request_module(wait, NULL, fmt, args);
75111+ va_end(args);
75112+
75113+ return ret;
75114+}
75115+
75116 EXPORT_SYMBOL(__request_module);
75117 #endif /* CONFIG_MODULES */
75118
75119@@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
75120 *
75121 * Thus the __user pointer cast is valid here.
75122 */
75123- sys_wait4(pid, (int __user *)&ret, 0, NULL);
75124+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
75125
75126 /*
75127 * If ret is 0, either ____call_usermodehelper failed and the
75128@@ -635,7 +688,7 @@ EXPORT_SYMBOL(call_usermodehelper_fns);
75129 static int proc_cap_handler(struct ctl_table *table, int write,
75130 void __user *buffer, size_t *lenp, loff_t *ppos)
75131 {
75132- struct ctl_table t;
75133+ ctl_table_no_const t;
75134 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
75135 kernel_cap_t new_cap;
75136 int err, i;
75137diff --git a/kernel/kprobes.c b/kernel/kprobes.c
75138index 098f396..fe85ff1 100644
75139--- a/kernel/kprobes.c
75140+++ b/kernel/kprobes.c
75141@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
75142 * kernel image and loaded module images reside. This is required
75143 * so x86_64 can correctly handle the %rip-relative fixups.
75144 */
75145- kip->insns = module_alloc(PAGE_SIZE);
75146+ kip->insns = module_alloc_exec(PAGE_SIZE);
75147 if (!kip->insns) {
75148 kfree(kip);
75149 return NULL;
75150@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
75151 */
75152 if (!list_is_singular(&kip->list)) {
75153 list_del(&kip->list);
75154- module_free(NULL, kip->insns);
75155+ module_free_exec(NULL, kip->insns);
75156 kfree(kip);
75157 }
75158 return 1;
75159@@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
75160 {
75161 int i, err = 0;
75162 unsigned long offset = 0, size = 0;
75163- char *modname, namebuf[128];
75164+ char *modname, namebuf[KSYM_NAME_LEN];
75165 const char *symbol_name;
75166 void *addr;
75167 struct kprobe_blackpoint *kb;
75168@@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
75169 kprobe_type = "k";
75170
75171 if (sym)
75172- seq_printf(pi, "%p %s %s+0x%x %s ",
75173+ seq_printf(pi, "%pK %s %s+0x%x %s ",
75174 p->addr, kprobe_type, sym, offset,
75175 (modname ? modname : " "));
75176 else
75177- seq_printf(pi, "%p %s %p ",
75178+ seq_printf(pi, "%pK %s %pK ",
75179 p->addr, kprobe_type, p->addr);
75180
75181 if (!pp)
75182@@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
75183 const char *sym = NULL;
75184 unsigned int i = *(loff_t *) v;
75185 unsigned long offset = 0;
75186- char *modname, namebuf[128];
75187+ char *modname, namebuf[KSYM_NAME_LEN];
75188
75189 head = &kprobe_table[i];
75190 preempt_disable();
75191diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
75192index 6ada93c..dce7d5d 100644
75193--- a/kernel/ksysfs.c
75194+++ b/kernel/ksysfs.c
75195@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
75196 {
75197 if (count+1 > UEVENT_HELPER_PATH_LEN)
75198 return -ENOENT;
75199+ if (!capable(CAP_SYS_ADMIN))
75200+ return -EPERM;
75201 memcpy(uevent_helper, buf, count);
75202 uevent_helper[count] = '\0';
75203 if (count && uevent_helper[count-1] == '\n')
75204@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
75205 return count;
75206 }
75207
75208-static struct bin_attribute notes_attr = {
75209+static bin_attribute_no_const notes_attr __read_only = {
75210 .attr = {
75211 .name = "notes",
75212 .mode = S_IRUGO,
75213diff --git a/kernel/lockdep.c b/kernel/lockdep.c
75214index 7981e5b..7f2105c 100644
75215--- a/kernel/lockdep.c
75216+++ b/kernel/lockdep.c
75217@@ -590,6 +590,10 @@ static int static_obj(void *obj)
75218 end = (unsigned long) &_end,
75219 addr = (unsigned long) obj;
75220
75221+#ifdef CONFIG_PAX_KERNEXEC
75222+ start = ktla_ktva(start);
75223+#endif
75224+
75225 /*
75226 * static variable?
75227 */
75228@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
75229 if (!static_obj(lock->key)) {
75230 debug_locks_off();
75231 printk("INFO: trying to register non-static key.\n");
75232+ printk("lock:%pS key:%pS.\n", lock, lock->key);
75233 printk("the code is fine but needs lockdep annotation.\n");
75234 printk("turning off the locking correctness validator.\n");
75235 dump_stack();
75236@@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
75237 if (!class)
75238 return 0;
75239 }
75240- atomic_inc((atomic_t *)&class->ops);
75241+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
75242 if (very_verbose(class)) {
75243 printk("\nacquire class [%p] %s", class->key, class->name);
75244 if (class->name_version > 1)
75245diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
75246index b2c71c5..7b88d63 100644
75247--- a/kernel/lockdep_proc.c
75248+++ b/kernel/lockdep_proc.c
75249@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
75250 return 0;
75251 }
75252
75253- seq_printf(m, "%p", class->key);
75254+ seq_printf(m, "%pK", class->key);
75255 #ifdef CONFIG_DEBUG_LOCKDEP
75256 seq_printf(m, " OPS:%8ld", class->ops);
75257 #endif
75258@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
75259
75260 list_for_each_entry(entry, &class->locks_after, entry) {
75261 if (entry->distance == 1) {
75262- seq_printf(m, " -> [%p] ", entry->class->key);
75263+ seq_printf(m, " -> [%pK] ", entry->class->key);
75264 print_name(m, entry->class);
75265 seq_puts(m, "\n");
75266 }
75267@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
75268 if (!class->key)
75269 continue;
75270
75271- seq_printf(m, "[%p] ", class->key);
75272+ seq_printf(m, "[%pK] ", class->key);
75273 print_name(m, class);
75274 seq_puts(m, "\n");
75275 }
75276@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
75277 if (!i)
75278 seq_line(m, '-', 40-namelen, namelen);
75279
75280- snprintf(ip, sizeof(ip), "[<%p>]",
75281+ snprintf(ip, sizeof(ip), "[<%pK>]",
75282 (void *)class->contention_point[i]);
75283 seq_printf(m, "%40s %14lu %29s %pS\n",
75284 name, stats->contention_point[i],
75285@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
75286 if (!i)
75287 seq_line(m, '-', 40-namelen, namelen);
75288
75289- snprintf(ip, sizeof(ip), "[<%p>]",
75290+ snprintf(ip, sizeof(ip), "[<%pK>]",
75291 (void *)class->contending_point[i]);
75292 seq_printf(m, "%40s %14lu %29s %pS\n",
75293 name, stats->contending_point[i],
75294diff --git a/kernel/module.c b/kernel/module.c
75295index eab0827..f488603 100644
75296--- a/kernel/module.c
75297+++ b/kernel/module.c
75298@@ -61,6 +61,7 @@
75299 #include <linux/pfn.h>
75300 #include <linux/bsearch.h>
75301 #include <linux/fips.h>
75302+#include <linux/grsecurity.h>
75303 #include <uapi/linux/module.h>
75304 #include "module-internal.h"
75305
75306@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
75307
75308 /* Bounds of module allocation, for speeding __module_address.
75309 * Protected by module_mutex. */
75310-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
75311+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
75312+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
75313
75314 int register_module_notifier(struct notifier_block * nb)
75315 {
75316@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
75317 return true;
75318
75319 list_for_each_entry_rcu(mod, &modules, list) {
75320- struct symsearch arr[] = {
75321+ struct symsearch modarr[] = {
75322 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
75323 NOT_GPL_ONLY, false },
75324 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
75325@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
75326 if (mod->state == MODULE_STATE_UNFORMED)
75327 continue;
75328
75329- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
75330+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
75331 return true;
75332 }
75333 return false;
75334@@ -484,7 +486,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
75335 static int percpu_modalloc(struct module *mod,
75336 unsigned long size, unsigned long align)
75337 {
75338- if (align > PAGE_SIZE) {
75339+ if (align-1 >= PAGE_SIZE) {
75340 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
75341 mod->name, align, PAGE_SIZE);
75342 align = PAGE_SIZE;
75343@@ -1088,7 +1090,7 @@ struct module_attribute module_uevent =
75344 static ssize_t show_coresize(struct module_attribute *mattr,
75345 struct module_kobject *mk, char *buffer)
75346 {
75347- return sprintf(buffer, "%u\n", mk->mod->core_size);
75348+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
75349 }
75350
75351 static struct module_attribute modinfo_coresize =
75352@@ -1097,7 +1099,7 @@ static struct module_attribute modinfo_coresize =
75353 static ssize_t show_initsize(struct module_attribute *mattr,
75354 struct module_kobject *mk, char *buffer)
75355 {
75356- return sprintf(buffer, "%u\n", mk->mod->init_size);
75357+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
75358 }
75359
75360 static struct module_attribute modinfo_initsize =
75361@@ -1311,7 +1313,7 @@ resolve_symbol_wait(struct module *mod,
75362 */
75363 #ifdef CONFIG_SYSFS
75364
75365-#ifdef CONFIG_KALLSYMS
75366+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
75367 static inline bool sect_empty(const Elf_Shdr *sect)
75368 {
75369 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
75370@@ -1451,7 +1453,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
75371 {
75372 unsigned int notes, loaded, i;
75373 struct module_notes_attrs *notes_attrs;
75374- struct bin_attribute *nattr;
75375+ bin_attribute_no_const *nattr;
75376
75377 /* failed to create section attributes, so can't create notes */
75378 if (!mod->sect_attrs)
75379@@ -1563,7 +1565,7 @@ static void del_usage_links(struct module *mod)
75380 static int module_add_modinfo_attrs(struct module *mod)
75381 {
75382 struct module_attribute *attr;
75383- struct module_attribute *temp_attr;
75384+ module_attribute_no_const *temp_attr;
75385 int error = 0;
75386 int i;
75387
75388@@ -1777,21 +1779,21 @@ static void set_section_ro_nx(void *base,
75389
75390 static void unset_module_core_ro_nx(struct module *mod)
75391 {
75392- set_page_attributes(mod->module_core + mod->core_text_size,
75393- mod->module_core + mod->core_size,
75394+ set_page_attributes(mod->module_core_rw,
75395+ mod->module_core_rw + mod->core_size_rw,
75396 set_memory_x);
75397- set_page_attributes(mod->module_core,
75398- mod->module_core + mod->core_ro_size,
75399+ set_page_attributes(mod->module_core_rx,
75400+ mod->module_core_rx + mod->core_size_rx,
75401 set_memory_rw);
75402 }
75403
75404 static void unset_module_init_ro_nx(struct module *mod)
75405 {
75406- set_page_attributes(mod->module_init + mod->init_text_size,
75407- mod->module_init + mod->init_size,
75408+ set_page_attributes(mod->module_init_rw,
75409+ mod->module_init_rw + mod->init_size_rw,
75410 set_memory_x);
75411- set_page_attributes(mod->module_init,
75412- mod->module_init + mod->init_ro_size,
75413+ set_page_attributes(mod->module_init_rx,
75414+ mod->module_init_rx + mod->init_size_rx,
75415 set_memory_rw);
75416 }
75417
75418@@ -1804,14 +1806,14 @@ void set_all_modules_text_rw(void)
75419 list_for_each_entry_rcu(mod, &modules, list) {
75420 if (mod->state == MODULE_STATE_UNFORMED)
75421 continue;
75422- if ((mod->module_core) && (mod->core_text_size)) {
75423- set_page_attributes(mod->module_core,
75424- mod->module_core + mod->core_text_size,
75425+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
75426+ set_page_attributes(mod->module_core_rx,
75427+ mod->module_core_rx + mod->core_size_rx,
75428 set_memory_rw);
75429 }
75430- if ((mod->module_init) && (mod->init_text_size)) {
75431- set_page_attributes(mod->module_init,
75432- mod->module_init + mod->init_text_size,
75433+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
75434+ set_page_attributes(mod->module_init_rx,
75435+ mod->module_init_rx + mod->init_size_rx,
75436 set_memory_rw);
75437 }
75438 }
75439@@ -1827,14 +1829,14 @@ void set_all_modules_text_ro(void)
75440 list_for_each_entry_rcu(mod, &modules, list) {
75441 if (mod->state == MODULE_STATE_UNFORMED)
75442 continue;
75443- if ((mod->module_core) && (mod->core_text_size)) {
75444- set_page_attributes(mod->module_core,
75445- mod->module_core + mod->core_text_size,
75446+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
75447+ set_page_attributes(mod->module_core_rx,
75448+ mod->module_core_rx + mod->core_size_rx,
75449 set_memory_ro);
75450 }
75451- if ((mod->module_init) && (mod->init_text_size)) {
75452- set_page_attributes(mod->module_init,
75453- mod->module_init + mod->init_text_size,
75454+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
75455+ set_page_attributes(mod->module_init_rx,
75456+ mod->module_init_rx + mod->init_size_rx,
75457 set_memory_ro);
75458 }
75459 }
75460@@ -1880,16 +1882,19 @@ static void free_module(struct module *mod)
75461
75462 /* This may be NULL, but that's OK */
75463 unset_module_init_ro_nx(mod);
75464- module_free(mod, mod->module_init);
75465+ module_free(mod, mod->module_init_rw);
75466+ module_free_exec(mod, mod->module_init_rx);
75467 kfree(mod->args);
75468 percpu_modfree(mod);
75469
75470 /* Free lock-classes: */
75471- lockdep_free_key_range(mod->module_core, mod->core_size);
75472+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
75473+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
75474
75475 /* Finally, free the core (containing the module structure) */
75476 unset_module_core_ro_nx(mod);
75477- module_free(mod, mod->module_core);
75478+ module_free_exec(mod, mod->module_core_rx);
75479+ module_free(mod, mod->module_core_rw);
75480
75481 #ifdef CONFIG_MPU
75482 update_protections(current->mm);
75483@@ -1959,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75484 int ret = 0;
75485 const struct kernel_symbol *ksym;
75486
75487+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75488+ int is_fs_load = 0;
75489+ int register_filesystem_found = 0;
75490+ char *p;
75491+
75492+ p = strstr(mod->args, "grsec_modharden_fs");
75493+ if (p) {
75494+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
75495+ /* copy \0 as well */
75496+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
75497+ is_fs_load = 1;
75498+ }
75499+#endif
75500+
75501 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
75502 const char *name = info->strtab + sym[i].st_name;
75503
75504+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75505+ /* it's a real shame this will never get ripped and copied
75506+ upstream! ;(
75507+ */
75508+ if (is_fs_load && !strcmp(name, "register_filesystem"))
75509+ register_filesystem_found = 1;
75510+#endif
75511+
75512 switch (sym[i].st_shndx) {
75513 case SHN_COMMON:
75514 /* We compiled with -fno-common. These are not
75515@@ -1982,7 +2009,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75516 ksym = resolve_symbol_wait(mod, info, name);
75517 /* Ok if resolved. */
75518 if (ksym && !IS_ERR(ksym)) {
75519+ pax_open_kernel();
75520 sym[i].st_value = ksym->value;
75521+ pax_close_kernel();
75522 break;
75523 }
75524
75525@@ -2001,11 +2030,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75526 secbase = (unsigned long)mod_percpu(mod);
75527 else
75528 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
75529+ pax_open_kernel();
75530 sym[i].st_value += secbase;
75531+ pax_close_kernel();
75532 break;
75533 }
75534 }
75535
75536+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75537+ if (is_fs_load && !register_filesystem_found) {
75538+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
75539+ ret = -EPERM;
75540+ }
75541+#endif
75542+
75543 return ret;
75544 }
75545
75546@@ -2089,22 +2127,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
75547 || s->sh_entsize != ~0UL
75548 || strstarts(sname, ".init"))
75549 continue;
75550- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
75551+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
75552+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
75553+ else
75554+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
75555 pr_debug("\t%s\n", sname);
75556 }
75557- switch (m) {
75558- case 0: /* executable */
75559- mod->core_size = debug_align(mod->core_size);
75560- mod->core_text_size = mod->core_size;
75561- break;
75562- case 1: /* RO: text and ro-data */
75563- mod->core_size = debug_align(mod->core_size);
75564- mod->core_ro_size = mod->core_size;
75565- break;
75566- case 3: /* whole core */
75567- mod->core_size = debug_align(mod->core_size);
75568- break;
75569- }
75570 }
75571
75572 pr_debug("Init section allocation order:\n");
75573@@ -2118,23 +2146,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
75574 || s->sh_entsize != ~0UL
75575 || !strstarts(sname, ".init"))
75576 continue;
75577- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
75578- | INIT_OFFSET_MASK);
75579+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
75580+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
75581+ else
75582+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
75583+ s->sh_entsize |= INIT_OFFSET_MASK;
75584 pr_debug("\t%s\n", sname);
75585 }
75586- switch (m) {
75587- case 0: /* executable */
75588- mod->init_size = debug_align(mod->init_size);
75589- mod->init_text_size = mod->init_size;
75590- break;
75591- case 1: /* RO: text and ro-data */
75592- mod->init_size = debug_align(mod->init_size);
75593- mod->init_ro_size = mod->init_size;
75594- break;
75595- case 3: /* whole init */
75596- mod->init_size = debug_align(mod->init_size);
75597- break;
75598- }
75599 }
75600 }
75601
75602@@ -2306,7 +2324,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
75603
75604 /* Put symbol section at end of init part of module. */
75605 symsect->sh_flags |= SHF_ALLOC;
75606- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
75607+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
75608 info->index.sym) | INIT_OFFSET_MASK;
75609 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
75610
75611@@ -2323,13 +2341,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
75612 }
75613
75614 /* Append room for core symbols at end of core part. */
75615- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
75616- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
75617- mod->core_size += strtab_size;
75618+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
75619+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
75620+ mod->core_size_rx += strtab_size;
75621
75622 /* Put string table section at end of init part of module. */
75623 strsect->sh_flags |= SHF_ALLOC;
75624- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
75625+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
75626 info->index.str) | INIT_OFFSET_MASK;
75627 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
75628 }
75629@@ -2347,12 +2365,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
75630 /* Make sure we get permanent strtab: don't use info->strtab. */
75631 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
75632
75633+ pax_open_kernel();
75634+
75635 /* Set types up while we still have access to sections. */
75636 for (i = 0; i < mod->num_symtab; i++)
75637 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
75638
75639- mod->core_symtab = dst = mod->module_core + info->symoffs;
75640- mod->core_strtab = s = mod->module_core + info->stroffs;
75641+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
75642+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
75643 src = mod->symtab;
75644 for (ndst = i = 0; i < mod->num_symtab; i++) {
75645 if (i == 0 ||
75646@@ -2364,6 +2384,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
75647 }
75648 }
75649 mod->core_num_syms = ndst;
75650+
75651+ pax_close_kernel();
75652 }
75653 #else
75654 static inline void layout_symtab(struct module *mod, struct load_info *info)
75655@@ -2397,17 +2419,33 @@ void * __weak module_alloc(unsigned long size)
75656 return vmalloc_exec(size);
75657 }
75658
75659-static void *module_alloc_update_bounds(unsigned long size)
75660+static void *module_alloc_update_bounds_rw(unsigned long size)
75661 {
75662 void *ret = module_alloc(size);
75663
75664 if (ret) {
75665 mutex_lock(&module_mutex);
75666 /* Update module bounds. */
75667- if ((unsigned long)ret < module_addr_min)
75668- module_addr_min = (unsigned long)ret;
75669- if ((unsigned long)ret + size > module_addr_max)
75670- module_addr_max = (unsigned long)ret + size;
75671+ if ((unsigned long)ret < module_addr_min_rw)
75672+ module_addr_min_rw = (unsigned long)ret;
75673+ if ((unsigned long)ret + size > module_addr_max_rw)
75674+ module_addr_max_rw = (unsigned long)ret + size;
75675+ mutex_unlock(&module_mutex);
75676+ }
75677+ return ret;
75678+}
75679+
75680+static void *module_alloc_update_bounds_rx(unsigned long size)
75681+{
75682+ void *ret = module_alloc_exec(size);
75683+
75684+ if (ret) {
75685+ mutex_lock(&module_mutex);
75686+ /* Update module bounds. */
75687+ if ((unsigned long)ret < module_addr_min_rx)
75688+ module_addr_min_rx = (unsigned long)ret;
75689+ if ((unsigned long)ret + size > module_addr_max_rx)
75690+ module_addr_max_rx = (unsigned long)ret + size;
75691 mutex_unlock(&module_mutex);
75692 }
75693 return ret;
75694@@ -2683,8 +2721,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
75695 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
75696 {
75697 const char *modmagic = get_modinfo(info, "vermagic");
75698+ const char *license = get_modinfo(info, "license");
75699 int err;
75700
75701+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
75702+ if (!license || !license_is_gpl_compatible(license))
75703+ return -ENOEXEC;
75704+#endif
75705+
75706 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
75707 modmagic = NULL;
75708
75709@@ -2710,7 +2754,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
75710 }
75711
75712 /* Set up license info based on the info section */
75713- set_license(mod, get_modinfo(info, "license"));
75714+ set_license(mod, license);
75715
75716 return 0;
75717 }
75718@@ -2804,7 +2848,7 @@ static int move_module(struct module *mod, struct load_info *info)
75719 void *ptr;
75720
75721 /* Do the allocs. */
75722- ptr = module_alloc_update_bounds(mod->core_size);
75723+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
75724 /*
75725 * The pointer to this block is stored in the module structure
75726 * which is inside the block. Just mark it as not being a
75727@@ -2814,11 +2858,11 @@ static int move_module(struct module *mod, struct load_info *info)
75728 if (!ptr)
75729 return -ENOMEM;
75730
75731- memset(ptr, 0, mod->core_size);
75732- mod->module_core = ptr;
75733+ memset(ptr, 0, mod->core_size_rw);
75734+ mod->module_core_rw = ptr;
75735
75736- if (mod->init_size) {
75737- ptr = module_alloc_update_bounds(mod->init_size);
75738+ if (mod->init_size_rw) {
75739+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
75740 /*
75741 * The pointer to this block is stored in the module structure
75742 * which is inside the block. This block doesn't need to be
75743@@ -2827,13 +2871,45 @@ static int move_module(struct module *mod, struct load_info *info)
75744 */
75745 kmemleak_ignore(ptr);
75746 if (!ptr) {
75747- module_free(mod, mod->module_core);
75748+ module_free(mod, mod->module_core_rw);
75749 return -ENOMEM;
75750 }
75751- memset(ptr, 0, mod->init_size);
75752- mod->module_init = ptr;
75753+ memset(ptr, 0, mod->init_size_rw);
75754+ mod->module_init_rw = ptr;
75755 } else
75756- mod->module_init = NULL;
75757+ mod->module_init_rw = NULL;
75758+
75759+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
75760+ kmemleak_not_leak(ptr);
75761+ if (!ptr) {
75762+ if (mod->module_init_rw)
75763+ module_free(mod, mod->module_init_rw);
75764+ module_free(mod, mod->module_core_rw);
75765+ return -ENOMEM;
75766+ }
75767+
75768+ pax_open_kernel();
75769+ memset(ptr, 0, mod->core_size_rx);
75770+ pax_close_kernel();
75771+ mod->module_core_rx = ptr;
75772+
75773+ if (mod->init_size_rx) {
75774+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
75775+ kmemleak_ignore(ptr);
75776+ if (!ptr && mod->init_size_rx) {
75777+ module_free_exec(mod, mod->module_core_rx);
75778+ if (mod->module_init_rw)
75779+ module_free(mod, mod->module_init_rw);
75780+ module_free(mod, mod->module_core_rw);
75781+ return -ENOMEM;
75782+ }
75783+
75784+ pax_open_kernel();
75785+ memset(ptr, 0, mod->init_size_rx);
75786+ pax_close_kernel();
75787+ mod->module_init_rx = ptr;
75788+ } else
75789+ mod->module_init_rx = NULL;
75790
75791 /* Transfer each section which specifies SHF_ALLOC */
75792 pr_debug("final section addresses:\n");
75793@@ -2844,16 +2920,45 @@ static int move_module(struct module *mod, struct load_info *info)
75794 if (!(shdr->sh_flags & SHF_ALLOC))
75795 continue;
75796
75797- if (shdr->sh_entsize & INIT_OFFSET_MASK)
75798- dest = mod->module_init
75799- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
75800- else
75801- dest = mod->module_core + shdr->sh_entsize;
75802+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
75803+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
75804+ dest = mod->module_init_rw
75805+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
75806+ else
75807+ dest = mod->module_init_rx
75808+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
75809+ } else {
75810+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
75811+ dest = mod->module_core_rw + shdr->sh_entsize;
75812+ else
75813+ dest = mod->module_core_rx + shdr->sh_entsize;
75814+ }
75815+
75816+ if (shdr->sh_type != SHT_NOBITS) {
75817+
75818+#ifdef CONFIG_PAX_KERNEXEC
75819+#ifdef CONFIG_X86_64
75820+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
75821+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
75822+#endif
75823+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
75824+ pax_open_kernel();
75825+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
75826+ pax_close_kernel();
75827+ } else
75828+#endif
75829
75830- if (shdr->sh_type != SHT_NOBITS)
75831 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
75832+ }
75833 /* Update sh_addr to point to copy in image. */
75834- shdr->sh_addr = (unsigned long)dest;
75835+
75836+#ifdef CONFIG_PAX_KERNEXEC
75837+ if (shdr->sh_flags & SHF_EXECINSTR)
75838+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
75839+ else
75840+#endif
75841+
75842+ shdr->sh_addr = (unsigned long)dest;
75843 pr_debug("\t0x%lx %s\n",
75844 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
75845 }
75846@@ -2908,12 +3013,12 @@ static void flush_module_icache(const struct module *mod)
75847 * Do it before processing of module parameters, so the module
75848 * can provide parameter accessor functions of its own.
75849 */
75850- if (mod->module_init)
75851- flush_icache_range((unsigned long)mod->module_init,
75852- (unsigned long)mod->module_init
75853- + mod->init_size);
75854- flush_icache_range((unsigned long)mod->module_core,
75855- (unsigned long)mod->module_core + mod->core_size);
75856+ if (mod->module_init_rx)
75857+ flush_icache_range((unsigned long)mod->module_init_rx,
75858+ (unsigned long)mod->module_init_rx
75859+ + mod->init_size_rx);
75860+ flush_icache_range((unsigned long)mod->module_core_rx,
75861+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
75862
75863 set_fs(old_fs);
75864 }
75865@@ -2983,8 +3088,10 @@ out:
75866 static void module_deallocate(struct module *mod, struct load_info *info)
75867 {
75868 percpu_modfree(mod);
75869- module_free(mod, mod->module_init);
75870- module_free(mod, mod->module_core);
75871+ module_free_exec(mod, mod->module_init_rx);
75872+ module_free_exec(mod, mod->module_core_rx);
75873+ module_free(mod, mod->module_init_rw);
75874+ module_free(mod, mod->module_core_rw);
75875 }
75876
75877 int __weak module_finalize(const Elf_Ehdr *hdr,
75878@@ -2997,7 +3104,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
75879 static int post_relocation(struct module *mod, const struct load_info *info)
75880 {
75881 /* Sort exception table now relocations are done. */
75882+ pax_open_kernel();
75883 sort_extable(mod->extable, mod->extable + mod->num_exentries);
75884+ pax_close_kernel();
75885
75886 /* Copy relocated percpu area over. */
75887 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
75888@@ -3051,16 +3160,16 @@ static int do_init_module(struct module *mod)
75889 MODULE_STATE_COMING, mod);
75890
75891 /* Set RO and NX regions for core */
75892- set_section_ro_nx(mod->module_core,
75893- mod->core_text_size,
75894- mod->core_ro_size,
75895- mod->core_size);
75896+ set_section_ro_nx(mod->module_core_rx,
75897+ mod->core_size_rx,
75898+ mod->core_size_rx,
75899+ mod->core_size_rx);
75900
75901 /* Set RO and NX regions for init */
75902- set_section_ro_nx(mod->module_init,
75903- mod->init_text_size,
75904- mod->init_ro_size,
75905- mod->init_size);
75906+ set_section_ro_nx(mod->module_init_rx,
75907+ mod->init_size_rx,
75908+ mod->init_size_rx,
75909+ mod->init_size_rx);
75910
75911 do_mod_ctors(mod);
75912 /* Start the module */
75913@@ -3122,11 +3231,12 @@ static int do_init_module(struct module *mod)
75914 mod->strtab = mod->core_strtab;
75915 #endif
75916 unset_module_init_ro_nx(mod);
75917- module_free(mod, mod->module_init);
75918- mod->module_init = NULL;
75919- mod->init_size = 0;
75920- mod->init_ro_size = 0;
75921- mod->init_text_size = 0;
75922+ module_free(mod, mod->module_init_rw);
75923+ module_free_exec(mod, mod->module_init_rx);
75924+ mod->module_init_rw = NULL;
75925+ mod->module_init_rx = NULL;
75926+ mod->init_size_rw = 0;
75927+ mod->init_size_rx = 0;
75928 mutex_unlock(&module_mutex);
75929 wake_up_all(&module_wq);
75930
75931@@ -3209,9 +3319,38 @@ again:
75932 if (err)
75933 goto free_unload;
75934
75935+ /* Now copy in args */
75936+ mod->args = strndup_user(uargs, ~0UL >> 1);
75937+ if (IS_ERR(mod->args)) {
75938+ err = PTR_ERR(mod->args);
75939+ goto free_unload;
75940+ }
75941+
75942 /* Set up MODINFO_ATTR fields */
75943 setup_modinfo(mod, info);
75944
75945+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75946+ {
75947+ char *p, *p2;
75948+
75949+ if (strstr(mod->args, "grsec_modharden_netdev")) {
75950+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
75951+ err = -EPERM;
75952+ goto free_modinfo;
75953+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
75954+ p += sizeof("grsec_modharden_normal") - 1;
75955+ p2 = strstr(p, "_");
75956+ if (p2) {
75957+ *p2 = '\0';
75958+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
75959+ *p2 = '_';
75960+ }
75961+ err = -EPERM;
75962+ goto free_modinfo;
75963+ }
75964+ }
75965+#endif
75966+
75967 /* Fix up syms, so that st_value is a pointer to location. */
75968 err = simplify_symbols(mod, info);
75969 if (err < 0)
75970@@ -3227,13 +3366,6 @@ again:
75971
75972 flush_module_icache(mod);
75973
75974- /* Now copy in args */
75975- mod->args = strndup_user(uargs, ~0UL >> 1);
75976- if (IS_ERR(mod->args)) {
75977- err = PTR_ERR(mod->args);
75978- goto free_arch_cleanup;
75979- }
75980-
75981 dynamic_debug_setup(info->debug, info->num_debug);
75982
75983 mutex_lock(&module_mutex);
75984@@ -3278,11 +3410,10 @@ again:
75985 mutex_unlock(&module_mutex);
75986 dynamic_debug_remove(info->debug);
75987 synchronize_sched();
75988- kfree(mod->args);
75989- free_arch_cleanup:
75990 module_arch_cleanup(mod);
75991 free_modinfo:
75992 free_modinfo(mod);
75993+ kfree(mod->args);
75994 free_unload:
75995 module_unload_free(mod);
75996 unlink_mod:
75997@@ -3365,10 +3496,16 @@ static const char *get_ksymbol(struct module *mod,
75998 unsigned long nextval;
75999
76000 /* At worse, next value is at end of module */
76001- if (within_module_init(addr, mod))
76002- nextval = (unsigned long)mod->module_init+mod->init_text_size;
76003+ if (within_module_init_rx(addr, mod))
76004+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
76005+ else if (within_module_init_rw(addr, mod))
76006+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
76007+ else if (within_module_core_rx(addr, mod))
76008+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
76009+ else if (within_module_core_rw(addr, mod))
76010+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
76011 else
76012- nextval = (unsigned long)mod->module_core+mod->core_text_size;
76013+ return NULL;
76014
76015 /* Scan for closest preceding symbol, and next symbol. (ELF
76016 starts real symbols at 1). */
76017@@ -3621,7 +3758,7 @@ static int m_show(struct seq_file *m, void *p)
76018 return 0;
76019
76020 seq_printf(m, "%s %u",
76021- mod->name, mod->init_size + mod->core_size);
76022+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
76023 print_unload_info(m, mod);
76024
76025 /* Informative for users. */
76026@@ -3630,7 +3767,7 @@ static int m_show(struct seq_file *m, void *p)
76027 mod->state == MODULE_STATE_COMING ? "Loading":
76028 "Live");
76029 /* Used by oprofile and other similar tools. */
76030- seq_printf(m, " 0x%pK", mod->module_core);
76031+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
76032
76033 /* Taints info */
76034 if (mod->taints)
76035@@ -3666,7 +3803,17 @@ static const struct file_operations proc_modules_operations = {
76036
76037 static int __init proc_modules_init(void)
76038 {
76039+#ifndef CONFIG_GRKERNSEC_HIDESYM
76040+#ifdef CONFIG_GRKERNSEC_PROC_USER
76041+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
76042+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
76043+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
76044+#else
76045 proc_create("modules", 0, NULL, &proc_modules_operations);
76046+#endif
76047+#else
76048+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
76049+#endif
76050 return 0;
76051 }
76052 module_init(proc_modules_init);
76053@@ -3727,14 +3874,14 @@ struct module *__module_address(unsigned long addr)
76054 {
76055 struct module *mod;
76056
76057- if (addr < module_addr_min || addr > module_addr_max)
76058+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
76059+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
76060 return NULL;
76061
76062 list_for_each_entry_rcu(mod, &modules, list) {
76063 if (mod->state == MODULE_STATE_UNFORMED)
76064 continue;
76065- if (within_module_core(addr, mod)
76066- || within_module_init(addr, mod))
76067+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
76068 return mod;
76069 }
76070 return NULL;
76071@@ -3769,11 +3916,20 @@ bool is_module_text_address(unsigned long addr)
76072 */
76073 struct module *__module_text_address(unsigned long addr)
76074 {
76075- struct module *mod = __module_address(addr);
76076+ struct module *mod;
76077+
76078+#ifdef CONFIG_X86_32
76079+ addr = ktla_ktva(addr);
76080+#endif
76081+
76082+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
76083+ return NULL;
76084+
76085+ mod = __module_address(addr);
76086+
76087 if (mod) {
76088 /* Make sure it's within the text section. */
76089- if (!within(addr, mod->module_init, mod->init_text_size)
76090- && !within(addr, mod->module_core, mod->core_text_size))
76091+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
76092 mod = NULL;
76093 }
76094 return mod;
76095diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
76096index 7e3443f..b2a1e6b 100644
76097--- a/kernel/mutex-debug.c
76098+++ b/kernel/mutex-debug.c
76099@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
76100 }
76101
76102 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76103- struct thread_info *ti)
76104+ struct task_struct *task)
76105 {
76106 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
76107
76108 /* Mark the current thread as blocked on the lock: */
76109- ti->task->blocked_on = waiter;
76110+ task->blocked_on = waiter;
76111 }
76112
76113 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76114- struct thread_info *ti)
76115+ struct task_struct *task)
76116 {
76117 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
76118- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
76119- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
76120- ti->task->blocked_on = NULL;
76121+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
76122+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
76123+ task->blocked_on = NULL;
76124
76125 list_del_init(&waiter->list);
76126 waiter->task = NULL;
76127diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
76128index 0799fd3..d06ae3b 100644
76129--- a/kernel/mutex-debug.h
76130+++ b/kernel/mutex-debug.h
76131@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
76132 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
76133 extern void debug_mutex_add_waiter(struct mutex *lock,
76134 struct mutex_waiter *waiter,
76135- struct thread_info *ti);
76136+ struct task_struct *task);
76137 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76138- struct thread_info *ti);
76139+ struct task_struct *task);
76140 extern void debug_mutex_unlock(struct mutex *lock);
76141 extern void debug_mutex_init(struct mutex *lock, const char *name,
76142 struct lock_class_key *key);
76143diff --git a/kernel/mutex.c b/kernel/mutex.c
76144index a307cc9..27fd2e9 100644
76145--- a/kernel/mutex.c
76146+++ b/kernel/mutex.c
76147@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76148 spin_lock_mutex(&lock->wait_lock, flags);
76149
76150 debug_mutex_lock_common(lock, &waiter);
76151- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
76152+ debug_mutex_add_waiter(lock, &waiter, task);
76153
76154 /* add waiting tasks to the end of the waitqueue (FIFO): */
76155 list_add_tail(&waiter.list, &lock->wait_list);
76156@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76157 * TASK_UNINTERRUPTIBLE case.)
76158 */
76159 if (unlikely(signal_pending_state(state, task))) {
76160- mutex_remove_waiter(lock, &waiter,
76161- task_thread_info(task));
76162+ mutex_remove_waiter(lock, &waiter, task);
76163 mutex_release(&lock->dep_map, 1, ip);
76164 spin_unlock_mutex(&lock->wait_lock, flags);
76165
76166@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76167 done:
76168 lock_acquired(&lock->dep_map, ip);
76169 /* got the lock - rejoice! */
76170- mutex_remove_waiter(lock, &waiter, current_thread_info());
76171+ mutex_remove_waiter(lock, &waiter, task);
76172 mutex_set_owner(lock);
76173
76174 /* set it to 0 if there are no waiters left: */
76175diff --git a/kernel/notifier.c b/kernel/notifier.c
76176index 2d5cc4c..d9ea600 100644
76177--- a/kernel/notifier.c
76178+++ b/kernel/notifier.c
76179@@ -5,6 +5,7 @@
76180 #include <linux/rcupdate.h>
76181 #include <linux/vmalloc.h>
76182 #include <linux/reboot.h>
76183+#include <linux/mm.h>
76184
76185 /*
76186 * Notifier list for kernel code which wants to be called
76187@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
76188 while ((*nl) != NULL) {
76189 if (n->priority > (*nl)->priority)
76190 break;
76191- nl = &((*nl)->next);
76192+ nl = (struct notifier_block **)&((*nl)->next);
76193 }
76194- n->next = *nl;
76195+ pax_open_kernel();
76196+ *(const void **)&n->next = *nl;
76197 rcu_assign_pointer(*nl, n);
76198+ pax_close_kernel();
76199 return 0;
76200 }
76201
76202@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
76203 return 0;
76204 if (n->priority > (*nl)->priority)
76205 break;
76206- nl = &((*nl)->next);
76207+ nl = (struct notifier_block **)&((*nl)->next);
76208 }
76209- n->next = *nl;
76210+ pax_open_kernel();
76211+ *(const void **)&n->next = *nl;
76212 rcu_assign_pointer(*nl, n);
76213+ pax_close_kernel();
76214 return 0;
76215 }
76216
76217@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
76218 {
76219 while ((*nl) != NULL) {
76220 if ((*nl) == n) {
76221+ pax_open_kernel();
76222 rcu_assign_pointer(*nl, n->next);
76223+ pax_close_kernel();
76224 return 0;
76225 }
76226- nl = &((*nl)->next);
76227+ nl = (struct notifier_block **)&((*nl)->next);
76228 }
76229 return -ENOENT;
76230 }
76231diff --git a/kernel/panic.c b/kernel/panic.c
76232index e1b2822..5edc1d9 100644
76233--- a/kernel/panic.c
76234+++ b/kernel/panic.c
76235@@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
76236 const char *board;
76237
76238 printk(KERN_WARNING "------------[ cut here ]------------\n");
76239- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
76240+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
76241 board = dmi_get_system_info(DMI_PRODUCT_NAME);
76242 if (board)
76243 printk(KERN_WARNING "Hardware name: %s\n", board);
76244@@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
76245 */
76246 void __stack_chk_fail(void)
76247 {
76248- panic("stack-protector: Kernel stack is corrupted in: %p\n",
76249+ dump_stack();
76250+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
76251 __builtin_return_address(0));
76252 }
76253 EXPORT_SYMBOL(__stack_chk_fail);
76254diff --git a/kernel/pid.c b/kernel/pid.c
76255index f2c6a68..4922d97 100644
76256--- a/kernel/pid.c
76257+++ b/kernel/pid.c
76258@@ -33,6 +33,7 @@
76259 #include <linux/rculist.h>
76260 #include <linux/bootmem.h>
76261 #include <linux/hash.h>
76262+#include <linux/security.h>
76263 #include <linux/pid_namespace.h>
76264 #include <linux/init_task.h>
76265 #include <linux/syscalls.h>
76266@@ -46,7 +47,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
76267
76268 int pid_max = PID_MAX_DEFAULT;
76269
76270-#define RESERVED_PIDS 300
76271+#define RESERVED_PIDS 500
76272
76273 int pid_max_min = RESERVED_PIDS + 1;
76274 int pid_max_max = PID_MAX_LIMIT;
76275@@ -441,10 +442,18 @@ EXPORT_SYMBOL(pid_task);
76276 */
76277 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
76278 {
76279+ struct task_struct *task;
76280+
76281 rcu_lockdep_assert(rcu_read_lock_held(),
76282 "find_task_by_pid_ns() needs rcu_read_lock()"
76283 " protection");
76284- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
76285+
76286+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
76287+
76288+ if (gr_pid_is_chrooted(task))
76289+ return NULL;
76290+
76291+ return task;
76292 }
76293
76294 struct task_struct *find_task_by_vpid(pid_t vnr)
76295@@ -452,6 +461,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
76296 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
76297 }
76298
76299+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
76300+{
76301+ rcu_lockdep_assert(rcu_read_lock_held(),
76302+ "find_task_by_pid_ns() needs rcu_read_lock()"
76303+ " protection");
76304+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
76305+}
76306+
76307 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
76308 {
76309 struct pid *pid;
76310diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
76311index c1c3dc1..bbeaf31 100644
76312--- a/kernel/pid_namespace.c
76313+++ b/kernel/pid_namespace.c
76314@@ -248,7 +248,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
76315 void __user *buffer, size_t *lenp, loff_t *ppos)
76316 {
76317 struct pid_namespace *pid_ns = task_active_pid_ns(current);
76318- struct ctl_table tmp = *table;
76319+ ctl_table_no_const tmp = *table;
76320
76321 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
76322 return -EPERM;
76323diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
76324index 942ca27..111e609 100644
76325--- a/kernel/posix-cpu-timers.c
76326+++ b/kernel/posix-cpu-timers.c
76327@@ -1576,14 +1576,14 @@ struct k_clock clock_posix_cpu = {
76328
76329 static __init int init_posix_cpu_timers(void)
76330 {
76331- struct k_clock process = {
76332+ static struct k_clock process = {
76333 .clock_getres = process_cpu_clock_getres,
76334 .clock_get = process_cpu_clock_get,
76335 .timer_create = process_cpu_timer_create,
76336 .nsleep = process_cpu_nsleep,
76337 .nsleep_restart = process_cpu_nsleep_restart,
76338 };
76339- struct k_clock thread = {
76340+ static struct k_clock thread = {
76341 .clock_getres = thread_cpu_clock_getres,
76342 .clock_get = thread_cpu_clock_get,
76343 .timer_create = thread_cpu_timer_create,
76344diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
76345index e885be1..380fe76 100644
76346--- a/kernel/posix-timers.c
76347+++ b/kernel/posix-timers.c
76348@@ -43,6 +43,7 @@
76349 #include <linux/idr.h>
76350 #include <linux/posix-clock.h>
76351 #include <linux/posix-timers.h>
76352+#include <linux/grsecurity.h>
76353 #include <linux/syscalls.h>
76354 #include <linux/wait.h>
76355 #include <linux/workqueue.h>
76356@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
76357 * which we beg off on and pass to do_sys_settimeofday().
76358 */
76359
76360-static struct k_clock posix_clocks[MAX_CLOCKS];
76361+static struct k_clock *posix_clocks[MAX_CLOCKS];
76362
76363 /*
76364 * These ones are defined below.
76365@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
76366 */
76367 static __init int init_posix_timers(void)
76368 {
76369- struct k_clock clock_realtime = {
76370+ static struct k_clock clock_realtime = {
76371 .clock_getres = hrtimer_get_res,
76372 .clock_get = posix_clock_realtime_get,
76373 .clock_set = posix_clock_realtime_set,
76374@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
76375 .timer_get = common_timer_get,
76376 .timer_del = common_timer_del,
76377 };
76378- struct k_clock clock_monotonic = {
76379+ static struct k_clock clock_monotonic = {
76380 .clock_getres = hrtimer_get_res,
76381 .clock_get = posix_ktime_get_ts,
76382 .nsleep = common_nsleep,
76383@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
76384 .timer_get = common_timer_get,
76385 .timer_del = common_timer_del,
76386 };
76387- struct k_clock clock_monotonic_raw = {
76388+ static struct k_clock clock_monotonic_raw = {
76389 .clock_getres = hrtimer_get_res,
76390 .clock_get = posix_get_monotonic_raw,
76391 };
76392- struct k_clock clock_realtime_coarse = {
76393+ static struct k_clock clock_realtime_coarse = {
76394 .clock_getres = posix_get_coarse_res,
76395 .clock_get = posix_get_realtime_coarse,
76396 };
76397- struct k_clock clock_monotonic_coarse = {
76398+ static struct k_clock clock_monotonic_coarse = {
76399 .clock_getres = posix_get_coarse_res,
76400 .clock_get = posix_get_monotonic_coarse,
76401 };
76402- struct k_clock clock_boottime = {
76403+ static struct k_clock clock_boottime = {
76404 .clock_getres = hrtimer_get_res,
76405 .clock_get = posix_get_boottime,
76406 .nsleep = common_nsleep,
76407@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
76408 return;
76409 }
76410
76411- posix_clocks[clock_id] = *new_clock;
76412+ posix_clocks[clock_id] = new_clock;
76413 }
76414 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
76415
76416@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
76417 return (id & CLOCKFD_MASK) == CLOCKFD ?
76418 &clock_posix_dynamic : &clock_posix_cpu;
76419
76420- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
76421+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
76422 return NULL;
76423- return &posix_clocks[id];
76424+ return posix_clocks[id];
76425 }
76426
76427 static int common_timer_create(struct k_itimer *new_timer)
76428@@ -966,6 +967,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
76429 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
76430 return -EFAULT;
76431
76432+ /* only the CLOCK_REALTIME clock can be set, all other clocks
76433+ have their clock_set fptr set to a nosettime dummy function
76434+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
76435+ call common_clock_set, which calls do_sys_settimeofday, which
76436+ we hook
76437+ */
76438+
76439 return kc->clock_set(which_clock, &new_tp);
76440 }
76441
76442diff --git a/kernel/power/process.c b/kernel/power/process.c
76443index d5a258b..4271191 100644
76444--- a/kernel/power/process.c
76445+++ b/kernel/power/process.c
76446@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
76447 u64 elapsed_csecs64;
76448 unsigned int elapsed_csecs;
76449 bool wakeup = false;
76450+ bool timedout = false;
76451
76452 do_gettimeofday(&start);
76453
76454@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
76455
76456 while (true) {
76457 todo = 0;
76458+ if (time_after(jiffies, end_time))
76459+ timedout = true;
76460 read_lock(&tasklist_lock);
76461 do_each_thread(g, p) {
76462 if (p == current || !freeze_task(p))
76463 continue;
76464
76465- if (!freezer_should_skip(p))
76466+ if (!freezer_should_skip(p)) {
76467 todo++;
76468+ if (timedout) {
76469+ printk(KERN_ERR "Task refusing to freeze:\n");
76470+ sched_show_task(p);
76471+ }
76472+ }
76473 } while_each_thread(g, p);
76474 read_unlock(&tasklist_lock);
76475
76476@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
76477 todo += wq_busy;
76478 }
76479
76480- if (!todo || time_after(jiffies, end_time))
76481+ if (!todo || timedout)
76482 break;
76483
76484 if (pm_wakeup_pending()) {
76485diff --git a/kernel/printk.c b/kernel/printk.c
76486index 267ce78..2487112 100644
76487--- a/kernel/printk.c
76488+++ b/kernel/printk.c
76489@@ -609,11 +609,17 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
76490 return ret;
76491 }
76492
76493+static int check_syslog_permissions(int type, bool from_file);
76494+
76495 static int devkmsg_open(struct inode *inode, struct file *file)
76496 {
76497 struct devkmsg_user *user;
76498 int err;
76499
76500+ err = check_syslog_permissions(SYSLOG_ACTION_OPEN, SYSLOG_FROM_FILE);
76501+ if (err)
76502+ return err;
76503+
76504 /* write-only does not need any file context */
76505 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
76506 return 0;
76507@@ -822,7 +828,7 @@ static int syslog_action_restricted(int type)
76508 if (dmesg_restrict)
76509 return 1;
76510 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
76511- return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
76512+ return type != SYSLOG_ACTION_OPEN && type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
76513 }
76514
76515 static int check_syslog_permissions(int type, bool from_file)
76516@@ -834,6 +840,11 @@ static int check_syslog_permissions(int type, bool from_file)
76517 if (from_file && type != SYSLOG_ACTION_OPEN)
76518 return 0;
76519
76520+#ifdef CONFIG_GRKERNSEC_DMESG
76521+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
76522+ return -EPERM;
76523+#endif
76524+
76525 if (syslog_action_restricted(type)) {
76526 if (capable(CAP_SYSLOG))
76527 return 0;
76528diff --git a/kernel/profile.c b/kernel/profile.c
76529index 1f39181..86093471 100644
76530--- a/kernel/profile.c
76531+++ b/kernel/profile.c
76532@@ -40,7 +40,7 @@ struct profile_hit {
76533 /* Oprofile timer tick hook */
76534 static int (*timer_hook)(struct pt_regs *) __read_mostly;
76535
76536-static atomic_t *prof_buffer;
76537+static atomic_unchecked_t *prof_buffer;
76538 static unsigned long prof_len, prof_shift;
76539
76540 int prof_on __read_mostly;
76541@@ -282,7 +282,7 @@ static void profile_flip_buffers(void)
76542 hits[i].pc = 0;
76543 continue;
76544 }
76545- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
76546+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
76547 hits[i].hits = hits[i].pc = 0;
76548 }
76549 }
76550@@ -343,9 +343,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
76551 * Add the current hit(s) and flush the write-queue out
76552 * to the global buffer:
76553 */
76554- atomic_add(nr_hits, &prof_buffer[pc]);
76555+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
76556 for (i = 0; i < NR_PROFILE_HIT; ++i) {
76557- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
76558+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
76559 hits[i].pc = hits[i].hits = 0;
76560 }
76561 out:
76562@@ -420,7 +420,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
76563 {
76564 unsigned long pc;
76565 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
76566- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
76567+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
76568 }
76569 #endif /* !CONFIG_SMP */
76570
76571@@ -518,7 +518,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
76572 return -EFAULT;
76573 buf++; p++; count--; read++;
76574 }
76575- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
76576+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
76577 if (copy_to_user(buf, (void *)pnt, count))
76578 return -EFAULT;
76579 read += count;
76580@@ -549,7 +549,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
76581 }
76582 #endif
76583 profile_discard_flip_buffers();
76584- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
76585+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
76586 return count;
76587 }
76588
76589diff --git a/kernel/ptrace.c b/kernel/ptrace.c
76590index 6cbeaae..cfe7ff0 100644
76591--- a/kernel/ptrace.c
76592+++ b/kernel/ptrace.c
76593@@ -324,7 +324,7 @@ static int ptrace_attach(struct task_struct *task, long request,
76594 if (seize)
76595 flags |= PT_SEIZED;
76596 rcu_read_lock();
76597- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
76598+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
76599 flags |= PT_PTRACE_CAP;
76600 rcu_read_unlock();
76601 task->ptrace = flags;
76602@@ -535,7 +535,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
76603 break;
76604 return -EIO;
76605 }
76606- if (copy_to_user(dst, buf, retval))
76607+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
76608 return -EFAULT;
76609 copied += retval;
76610 src += retval;
76611@@ -720,7 +720,7 @@ int ptrace_request(struct task_struct *child, long request,
76612 bool seized = child->ptrace & PT_SEIZED;
76613 int ret = -EIO;
76614 siginfo_t siginfo, *si;
76615- void __user *datavp = (void __user *) data;
76616+ void __user *datavp = (__force void __user *) data;
76617 unsigned long __user *datalp = datavp;
76618 unsigned long flags;
76619
76620@@ -922,14 +922,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
76621 goto out;
76622 }
76623
76624+ if (gr_handle_ptrace(child, request)) {
76625+ ret = -EPERM;
76626+ goto out_put_task_struct;
76627+ }
76628+
76629 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
76630 ret = ptrace_attach(child, request, addr, data);
76631 /*
76632 * Some architectures need to do book-keeping after
76633 * a ptrace attach.
76634 */
76635- if (!ret)
76636+ if (!ret) {
76637 arch_ptrace_attach(child);
76638+ gr_audit_ptrace(child);
76639+ }
76640 goto out_put_task_struct;
76641 }
76642
76643@@ -957,7 +964,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
76644 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
76645 if (copied != sizeof(tmp))
76646 return -EIO;
76647- return put_user(tmp, (unsigned long __user *)data);
76648+ return put_user(tmp, (__force unsigned long __user *)data);
76649 }
76650
76651 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
76652@@ -1051,7 +1058,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
76653 }
76654
76655 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
76656- compat_long_t addr, compat_long_t data)
76657+ compat_ulong_t addr, compat_ulong_t data)
76658 {
76659 struct task_struct *child;
76660 long ret;
76661@@ -1067,14 +1074,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
76662 goto out;
76663 }
76664
76665+ if (gr_handle_ptrace(child, request)) {
76666+ ret = -EPERM;
76667+ goto out_put_task_struct;
76668+ }
76669+
76670 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
76671 ret = ptrace_attach(child, request, addr, data);
76672 /*
76673 * Some architectures need to do book-keeping after
76674 * a ptrace attach.
76675 */
76676- if (!ret)
76677+ if (!ret) {
76678 arch_ptrace_attach(child);
76679+ gr_audit_ptrace(child);
76680+ }
76681 goto out_put_task_struct;
76682 }
76683
76684diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
76685index e7dce58..ad0d7b7 100644
76686--- a/kernel/rcutiny.c
76687+++ b/kernel/rcutiny.c
76688@@ -46,7 +46,7 @@
76689 struct rcu_ctrlblk;
76690 static void invoke_rcu_callbacks(void);
76691 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
76692-static void rcu_process_callbacks(struct softirq_action *unused);
76693+static void rcu_process_callbacks(void);
76694 static void __call_rcu(struct rcu_head *head,
76695 void (*func)(struct rcu_head *rcu),
76696 struct rcu_ctrlblk *rcp);
76697@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
76698 rcu_is_callbacks_kthread()));
76699 }
76700
76701-static void rcu_process_callbacks(struct softirq_action *unused)
76702+static void rcu_process_callbacks(void)
76703 {
76704 __rcu_process_callbacks(&rcu_sched_ctrlblk);
76705 __rcu_process_callbacks(&rcu_bh_ctrlblk);
76706diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
76707index f85016a..91cb03b 100644
76708--- a/kernel/rcutiny_plugin.h
76709+++ b/kernel/rcutiny_plugin.h
76710@@ -896,7 +896,7 @@ static int rcu_kthread(void *arg)
76711 have_rcu_kthread_work = morework;
76712 local_irq_restore(flags);
76713 if (work)
76714- rcu_process_callbacks(NULL);
76715+ rcu_process_callbacks();
76716 schedule_timeout_interruptible(1); /* Leave CPU for others. */
76717 }
76718
76719diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
76720index 31dea01..ad91ffb 100644
76721--- a/kernel/rcutorture.c
76722+++ b/kernel/rcutorture.c
76723@@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
76724 { 0 };
76725 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
76726 { 0 };
76727-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
76728-static atomic_t n_rcu_torture_alloc;
76729-static atomic_t n_rcu_torture_alloc_fail;
76730-static atomic_t n_rcu_torture_free;
76731-static atomic_t n_rcu_torture_mberror;
76732-static atomic_t n_rcu_torture_error;
76733+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
76734+static atomic_unchecked_t n_rcu_torture_alloc;
76735+static atomic_unchecked_t n_rcu_torture_alloc_fail;
76736+static atomic_unchecked_t n_rcu_torture_free;
76737+static atomic_unchecked_t n_rcu_torture_mberror;
76738+static atomic_unchecked_t n_rcu_torture_error;
76739 static long n_rcu_torture_barrier_error;
76740 static long n_rcu_torture_boost_ktrerror;
76741 static long n_rcu_torture_boost_rterror;
76742@@ -272,11 +272,11 @@ rcu_torture_alloc(void)
76743
76744 spin_lock_bh(&rcu_torture_lock);
76745 if (list_empty(&rcu_torture_freelist)) {
76746- atomic_inc(&n_rcu_torture_alloc_fail);
76747+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
76748 spin_unlock_bh(&rcu_torture_lock);
76749 return NULL;
76750 }
76751- atomic_inc(&n_rcu_torture_alloc);
76752+ atomic_inc_unchecked(&n_rcu_torture_alloc);
76753 p = rcu_torture_freelist.next;
76754 list_del_init(p);
76755 spin_unlock_bh(&rcu_torture_lock);
76756@@ -289,7 +289,7 @@ rcu_torture_alloc(void)
76757 static void
76758 rcu_torture_free(struct rcu_torture *p)
76759 {
76760- atomic_inc(&n_rcu_torture_free);
76761+ atomic_inc_unchecked(&n_rcu_torture_free);
76762 spin_lock_bh(&rcu_torture_lock);
76763 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
76764 spin_unlock_bh(&rcu_torture_lock);
76765@@ -409,7 +409,7 @@ rcu_torture_cb(struct rcu_head *p)
76766 i = rp->rtort_pipe_count;
76767 if (i > RCU_TORTURE_PIPE_LEN)
76768 i = RCU_TORTURE_PIPE_LEN;
76769- atomic_inc(&rcu_torture_wcount[i]);
76770+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
76771 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
76772 rp->rtort_mbtest = 0;
76773 rcu_torture_free(rp);
76774@@ -457,7 +457,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
76775 i = rp->rtort_pipe_count;
76776 if (i > RCU_TORTURE_PIPE_LEN)
76777 i = RCU_TORTURE_PIPE_LEN;
76778- atomic_inc(&rcu_torture_wcount[i]);
76779+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
76780 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
76781 rp->rtort_mbtest = 0;
76782 list_del(&rp->rtort_free);
76783@@ -975,7 +975,7 @@ rcu_torture_writer(void *arg)
76784 i = old_rp->rtort_pipe_count;
76785 if (i > RCU_TORTURE_PIPE_LEN)
76786 i = RCU_TORTURE_PIPE_LEN;
76787- atomic_inc(&rcu_torture_wcount[i]);
76788+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
76789 old_rp->rtort_pipe_count++;
76790 cur_ops->deferred_free(old_rp);
76791 }
76792@@ -1060,7 +1060,7 @@ static void rcu_torture_timer(unsigned long unused)
76793 }
76794 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
76795 if (p->rtort_mbtest == 0)
76796- atomic_inc(&n_rcu_torture_mberror);
76797+ atomic_inc_unchecked(&n_rcu_torture_mberror);
76798 spin_lock(&rand_lock);
76799 cur_ops->read_delay(&rand);
76800 n_rcu_torture_timers++;
76801@@ -1124,7 +1124,7 @@ rcu_torture_reader(void *arg)
76802 }
76803 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
76804 if (p->rtort_mbtest == 0)
76805- atomic_inc(&n_rcu_torture_mberror);
76806+ atomic_inc_unchecked(&n_rcu_torture_mberror);
76807 cur_ops->read_delay(&rand);
76808 preempt_disable();
76809 pipe_count = p->rtort_pipe_count;
76810@@ -1183,11 +1183,11 @@ rcu_torture_printk(char *page)
76811 rcu_torture_current,
76812 rcu_torture_current_version,
76813 list_empty(&rcu_torture_freelist),
76814- atomic_read(&n_rcu_torture_alloc),
76815- atomic_read(&n_rcu_torture_alloc_fail),
76816- atomic_read(&n_rcu_torture_free));
76817+ atomic_read_unchecked(&n_rcu_torture_alloc),
76818+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
76819+ atomic_read_unchecked(&n_rcu_torture_free));
76820 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
76821- atomic_read(&n_rcu_torture_mberror),
76822+ atomic_read_unchecked(&n_rcu_torture_mberror),
76823 n_rcu_torture_boost_ktrerror,
76824 n_rcu_torture_boost_rterror);
76825 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
76826@@ -1206,14 +1206,14 @@ rcu_torture_printk(char *page)
76827 n_barrier_attempts,
76828 n_rcu_torture_barrier_error);
76829 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
76830- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
76831+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
76832 n_rcu_torture_barrier_error != 0 ||
76833 n_rcu_torture_boost_ktrerror != 0 ||
76834 n_rcu_torture_boost_rterror != 0 ||
76835 n_rcu_torture_boost_failure != 0 ||
76836 i > 1) {
76837 cnt += sprintf(&page[cnt], "!!! ");
76838- atomic_inc(&n_rcu_torture_error);
76839+ atomic_inc_unchecked(&n_rcu_torture_error);
76840 WARN_ON_ONCE(1);
76841 }
76842 cnt += sprintf(&page[cnt], "Reader Pipe: ");
76843@@ -1227,7 +1227,7 @@ rcu_torture_printk(char *page)
76844 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
76845 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
76846 cnt += sprintf(&page[cnt], " %d",
76847- atomic_read(&rcu_torture_wcount[i]));
76848+ atomic_read_unchecked(&rcu_torture_wcount[i]));
76849 }
76850 cnt += sprintf(&page[cnt], "\n");
76851 if (cur_ops->stats)
76852@@ -1920,7 +1920,7 @@ rcu_torture_cleanup(void)
76853
76854 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
76855
76856- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
76857+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
76858 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
76859 else if (n_online_successes != n_online_attempts ||
76860 n_offline_successes != n_offline_attempts)
76861@@ -1989,18 +1989,18 @@ rcu_torture_init(void)
76862
76863 rcu_torture_current = NULL;
76864 rcu_torture_current_version = 0;
76865- atomic_set(&n_rcu_torture_alloc, 0);
76866- atomic_set(&n_rcu_torture_alloc_fail, 0);
76867- atomic_set(&n_rcu_torture_free, 0);
76868- atomic_set(&n_rcu_torture_mberror, 0);
76869- atomic_set(&n_rcu_torture_error, 0);
76870+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
76871+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
76872+ atomic_set_unchecked(&n_rcu_torture_free, 0);
76873+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
76874+ atomic_set_unchecked(&n_rcu_torture_error, 0);
76875 n_rcu_torture_barrier_error = 0;
76876 n_rcu_torture_boost_ktrerror = 0;
76877 n_rcu_torture_boost_rterror = 0;
76878 n_rcu_torture_boost_failure = 0;
76879 n_rcu_torture_boosts = 0;
76880 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
76881- atomic_set(&rcu_torture_wcount[i], 0);
76882+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
76883 for_each_possible_cpu(cpu) {
76884 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
76885 per_cpu(rcu_torture_count, cpu)[i] = 0;
76886diff --git a/kernel/rcutree.c b/kernel/rcutree.c
76887index e441b77..dd54f17 100644
76888--- a/kernel/rcutree.c
76889+++ b/kernel/rcutree.c
76890@@ -349,9 +349,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
76891 rcu_prepare_for_idle(smp_processor_id());
76892 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
76893 smp_mb__before_atomic_inc(); /* See above. */
76894- atomic_inc(&rdtp->dynticks);
76895+ atomic_inc_unchecked(&rdtp->dynticks);
76896 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
76897- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
76898+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
76899
76900 /*
76901 * It is illegal to enter an extended quiescent state while
76902@@ -487,10 +487,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
76903 int user)
76904 {
76905 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
76906- atomic_inc(&rdtp->dynticks);
76907+ atomic_inc_unchecked(&rdtp->dynticks);
76908 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
76909 smp_mb__after_atomic_inc(); /* See above. */
76910- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
76911+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
76912 rcu_cleanup_after_idle(smp_processor_id());
76913 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
76914 if (!user && !is_idle_task(current)) {
76915@@ -629,14 +629,14 @@ void rcu_nmi_enter(void)
76916 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
76917
76918 if (rdtp->dynticks_nmi_nesting == 0 &&
76919- (atomic_read(&rdtp->dynticks) & 0x1))
76920+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
76921 return;
76922 rdtp->dynticks_nmi_nesting++;
76923 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
76924- atomic_inc(&rdtp->dynticks);
76925+ atomic_inc_unchecked(&rdtp->dynticks);
76926 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
76927 smp_mb__after_atomic_inc(); /* See above. */
76928- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
76929+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
76930 }
76931
76932 /**
76933@@ -655,9 +655,9 @@ void rcu_nmi_exit(void)
76934 return;
76935 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
76936 smp_mb__before_atomic_inc(); /* See above. */
76937- atomic_inc(&rdtp->dynticks);
76938+ atomic_inc_unchecked(&rdtp->dynticks);
76939 smp_mb__after_atomic_inc(); /* Force delay to next write. */
76940- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
76941+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
76942 }
76943
76944 /**
76945@@ -671,7 +671,7 @@ int rcu_is_cpu_idle(void)
76946 int ret;
76947
76948 preempt_disable();
76949- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
76950+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
76951 preempt_enable();
76952 return ret;
76953 }
76954@@ -739,7 +739,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
76955 */
76956 static int dyntick_save_progress_counter(struct rcu_data *rdp)
76957 {
76958- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
76959+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
76960 return (rdp->dynticks_snap & 0x1) == 0;
76961 }
76962
76963@@ -754,7 +754,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
76964 unsigned int curr;
76965 unsigned int snap;
76966
76967- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
76968+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
76969 snap = (unsigned int)rdp->dynticks_snap;
76970
76971 /*
76972@@ -802,10 +802,10 @@ static int jiffies_till_stall_check(void)
76973 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
76974 */
76975 if (till_stall_check < 3) {
76976- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
76977+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
76978 till_stall_check = 3;
76979 } else if (till_stall_check > 300) {
76980- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
76981+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
76982 till_stall_check = 300;
76983 }
76984 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
76985@@ -1592,7 +1592,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
76986 rsp->qlen += rdp->qlen;
76987 rdp->n_cbs_orphaned += rdp->qlen;
76988 rdp->qlen_lazy = 0;
76989- ACCESS_ONCE(rdp->qlen) = 0;
76990+ ACCESS_ONCE_RW(rdp->qlen) = 0;
76991 }
76992
76993 /*
76994@@ -1838,7 +1838,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
76995 }
76996 smp_mb(); /* List handling before counting for rcu_barrier(). */
76997 rdp->qlen_lazy -= count_lazy;
76998- ACCESS_ONCE(rdp->qlen) -= count;
76999+ ACCESS_ONCE_RW(rdp->qlen) -= count;
77000 rdp->n_cbs_invoked += count;
77001
77002 /* Reinstate batch limit if we have worked down the excess. */
77003@@ -2031,7 +2031,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
77004 /*
77005 * Do RCU core processing for the current CPU.
77006 */
77007-static void rcu_process_callbacks(struct softirq_action *unused)
77008+static void rcu_process_callbacks(void)
77009 {
77010 struct rcu_state *rsp;
77011
77012@@ -2154,7 +2154,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
77013 local_irq_restore(flags);
77014 return;
77015 }
77016- ACCESS_ONCE(rdp->qlen)++;
77017+ ACCESS_ONCE_RW(rdp->qlen)++;
77018 if (lazy)
77019 rdp->qlen_lazy++;
77020 else
77021@@ -2363,11 +2363,11 @@ void synchronize_sched_expedited(void)
77022 * counter wrap on a 32-bit system. Quite a few more CPUs would of
77023 * course be required on a 64-bit system.
77024 */
77025- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
77026+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
77027 (ulong)atomic_long_read(&rsp->expedited_done) +
77028 ULONG_MAX / 8)) {
77029 synchronize_sched();
77030- atomic_long_inc(&rsp->expedited_wrap);
77031+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
77032 return;
77033 }
77034
77035@@ -2375,7 +2375,7 @@ void synchronize_sched_expedited(void)
77036 * Take a ticket. Note that atomic_inc_return() implies a
77037 * full memory barrier.
77038 */
77039- snap = atomic_long_inc_return(&rsp->expedited_start);
77040+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
77041 firstsnap = snap;
77042 get_online_cpus();
77043 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
77044@@ -2388,14 +2388,14 @@ void synchronize_sched_expedited(void)
77045 synchronize_sched_expedited_cpu_stop,
77046 NULL) == -EAGAIN) {
77047 put_online_cpus();
77048- atomic_long_inc(&rsp->expedited_tryfail);
77049+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
77050
77051 /* Check to see if someone else did our work for us. */
77052 s = atomic_long_read(&rsp->expedited_done);
77053 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
77054 /* ensure test happens before caller kfree */
77055 smp_mb__before_atomic_inc(); /* ^^^ */
77056- atomic_long_inc(&rsp->expedited_workdone1);
77057+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
77058 return;
77059 }
77060
77061@@ -2404,7 +2404,7 @@ void synchronize_sched_expedited(void)
77062 udelay(trycount * num_online_cpus());
77063 } else {
77064 wait_rcu_gp(call_rcu_sched);
77065- atomic_long_inc(&rsp->expedited_normal);
77066+ atomic_long_inc_unchecked(&rsp->expedited_normal);
77067 return;
77068 }
77069
77070@@ -2413,7 +2413,7 @@ void synchronize_sched_expedited(void)
77071 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
77072 /* ensure test happens before caller kfree */
77073 smp_mb__before_atomic_inc(); /* ^^^ */
77074- atomic_long_inc(&rsp->expedited_workdone2);
77075+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
77076 return;
77077 }
77078
77079@@ -2425,10 +2425,10 @@ void synchronize_sched_expedited(void)
77080 * period works for us.
77081 */
77082 get_online_cpus();
77083- snap = atomic_long_read(&rsp->expedited_start);
77084+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
77085 smp_mb(); /* ensure read is before try_stop_cpus(). */
77086 }
77087- atomic_long_inc(&rsp->expedited_stoppedcpus);
77088+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
77089
77090 /*
77091 * Everyone up to our most recent fetch is covered by our grace
77092@@ -2437,16 +2437,16 @@ void synchronize_sched_expedited(void)
77093 * than we did already did their update.
77094 */
77095 do {
77096- atomic_long_inc(&rsp->expedited_done_tries);
77097+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
77098 s = atomic_long_read(&rsp->expedited_done);
77099 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
77100 /* ensure test happens before caller kfree */
77101 smp_mb__before_atomic_inc(); /* ^^^ */
77102- atomic_long_inc(&rsp->expedited_done_lost);
77103+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
77104 break;
77105 }
77106 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
77107- atomic_long_inc(&rsp->expedited_done_exit);
77108+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
77109
77110 put_online_cpus();
77111 }
77112@@ -2620,7 +2620,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
77113 * ACCESS_ONCE() to prevent the compiler from speculating
77114 * the increment to precede the early-exit check.
77115 */
77116- ACCESS_ONCE(rsp->n_barrier_done)++;
77117+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
77118 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
77119 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
77120 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
77121@@ -2670,7 +2670,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
77122
77123 /* Increment ->n_barrier_done to prevent duplicate work. */
77124 smp_mb(); /* Keep increment after above mechanism. */
77125- ACCESS_ONCE(rsp->n_barrier_done)++;
77126+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
77127 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
77128 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
77129 smp_mb(); /* Keep increment before caller's subsequent code. */
77130@@ -2715,10 +2715,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
77131 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
77132 init_callback_list(rdp);
77133 rdp->qlen_lazy = 0;
77134- ACCESS_ONCE(rdp->qlen) = 0;
77135+ ACCESS_ONCE_RW(rdp->qlen) = 0;
77136 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
77137 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
77138- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
77139+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
77140 #ifdef CONFIG_RCU_USER_QS
77141 WARN_ON_ONCE(rdp->dynticks->in_user);
77142 #endif
77143@@ -2754,8 +2754,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
77144 rdp->blimit = blimit;
77145 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
77146 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
77147- atomic_set(&rdp->dynticks->dynticks,
77148- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
77149+ atomic_set_unchecked(&rdp->dynticks->dynticks,
77150+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
77151 rcu_prepare_for_idle_init(cpu);
77152 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
77153
77154diff --git a/kernel/rcutree.h b/kernel/rcutree.h
77155index 4b69291..704c92e 100644
77156--- a/kernel/rcutree.h
77157+++ b/kernel/rcutree.h
77158@@ -86,7 +86,7 @@ struct rcu_dynticks {
77159 long long dynticks_nesting; /* Track irq/process nesting level. */
77160 /* Process level is worth LLONG_MAX/2. */
77161 int dynticks_nmi_nesting; /* Track NMI nesting level. */
77162- atomic_t dynticks; /* Even value for idle, else odd. */
77163+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
77164 #ifdef CONFIG_RCU_FAST_NO_HZ
77165 int dyntick_drain; /* Prepare-for-idle state variable. */
77166 unsigned long dyntick_holdoff;
77167@@ -423,17 +423,17 @@ struct rcu_state {
77168 /* _rcu_barrier(). */
77169 /* End of fields guarded by barrier_mutex. */
77170
77171- atomic_long_t expedited_start; /* Starting ticket. */
77172- atomic_long_t expedited_done; /* Done ticket. */
77173- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
77174- atomic_long_t expedited_tryfail; /* # acquisition failures. */
77175- atomic_long_t expedited_workdone1; /* # done by others #1. */
77176- atomic_long_t expedited_workdone2; /* # done by others #2. */
77177- atomic_long_t expedited_normal; /* # fallbacks to normal. */
77178- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
77179- atomic_long_t expedited_done_tries; /* # tries to update _done. */
77180- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
77181- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
77182+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
77183+ atomic_long_t expedited_done; /* Done ticket. */
77184+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
77185+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
77186+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
77187+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
77188+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
77189+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
77190+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
77191+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
77192+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
77193
77194 unsigned long jiffies_force_qs; /* Time at which to invoke */
77195 /* force_quiescent_state(). */
77196diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
77197index c1cc7e1..f62e436 100644
77198--- a/kernel/rcutree_plugin.h
77199+++ b/kernel/rcutree_plugin.h
77200@@ -892,7 +892,7 @@ void synchronize_rcu_expedited(void)
77201
77202 /* Clean up and exit. */
77203 smp_mb(); /* ensure expedited GP seen before counter increment. */
77204- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
77205+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
77206 unlock_mb_ret:
77207 mutex_unlock(&sync_rcu_preempt_exp_mutex);
77208 mb_ret:
77209@@ -1440,7 +1440,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
77210 free_cpumask_var(cm);
77211 }
77212
77213-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
77214+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
77215 .store = &rcu_cpu_kthread_task,
77216 .thread_should_run = rcu_cpu_kthread_should_run,
77217 .thread_fn = rcu_cpu_kthread,
77218@@ -2072,7 +2072,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
77219 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
77220 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
77221 cpu, ticks_value, ticks_title,
77222- atomic_read(&rdtp->dynticks) & 0xfff,
77223+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
77224 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
77225 fast_no_hz);
77226 }
77227@@ -2192,7 +2192,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
77228
77229 /* Enqueue the callback on the nocb list and update counts. */
77230 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
77231- ACCESS_ONCE(*old_rhpp) = rhp;
77232+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
77233 atomic_long_add(rhcount, &rdp->nocb_q_count);
77234 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
77235
77236@@ -2384,12 +2384,12 @@ static int rcu_nocb_kthread(void *arg)
77237 * Extract queued callbacks, update counts, and wait
77238 * for a grace period to elapse.
77239 */
77240- ACCESS_ONCE(rdp->nocb_head) = NULL;
77241+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
77242 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
77243 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
77244 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
77245- ACCESS_ONCE(rdp->nocb_p_count) += c;
77246- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
77247+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
77248+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
77249 wait_rcu_gp(rdp->rsp->call_remote);
77250
77251 /* Each pass through the following loop invokes a callback. */
77252@@ -2411,8 +2411,8 @@ static int rcu_nocb_kthread(void *arg)
77253 list = next;
77254 }
77255 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
77256- ACCESS_ONCE(rdp->nocb_p_count) -= c;
77257- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
77258+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
77259+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
77260 rdp->n_nocbs_invoked += c;
77261 }
77262 return 0;
77263@@ -2438,7 +2438,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
77264 rdp = per_cpu_ptr(rsp->rda, cpu);
77265 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
77266 BUG_ON(IS_ERR(t));
77267- ACCESS_ONCE(rdp->nocb_kthread) = t;
77268+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
77269 }
77270 }
77271
77272diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
77273index 0d095dc..1985b19 100644
77274--- a/kernel/rcutree_trace.c
77275+++ b/kernel/rcutree_trace.c
77276@@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
77277 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
77278 rdp->passed_quiesce, rdp->qs_pending);
77279 seq_printf(m, " dt=%d/%llx/%d df=%lu",
77280- atomic_read(&rdp->dynticks->dynticks),
77281+ atomic_read_unchecked(&rdp->dynticks->dynticks),
77282 rdp->dynticks->dynticks_nesting,
77283 rdp->dynticks->dynticks_nmi_nesting,
77284 rdp->dynticks_fqs);
77285@@ -184,17 +184,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
77286 struct rcu_state *rsp = (struct rcu_state *)m->private;
77287
77288 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
77289- atomic_long_read(&rsp->expedited_start),
77290+ atomic_long_read_unchecked(&rsp->expedited_start),
77291 atomic_long_read(&rsp->expedited_done),
77292- atomic_long_read(&rsp->expedited_wrap),
77293- atomic_long_read(&rsp->expedited_tryfail),
77294- atomic_long_read(&rsp->expedited_workdone1),
77295- atomic_long_read(&rsp->expedited_workdone2),
77296- atomic_long_read(&rsp->expedited_normal),
77297- atomic_long_read(&rsp->expedited_stoppedcpus),
77298- atomic_long_read(&rsp->expedited_done_tries),
77299- atomic_long_read(&rsp->expedited_done_lost),
77300- atomic_long_read(&rsp->expedited_done_exit));
77301+ atomic_long_read_unchecked(&rsp->expedited_wrap),
77302+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
77303+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
77304+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
77305+ atomic_long_read_unchecked(&rsp->expedited_normal),
77306+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
77307+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
77308+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
77309+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
77310 return 0;
77311 }
77312
77313diff --git a/kernel/resource.c b/kernel/resource.c
77314index 73f35d4..4684fc4 100644
77315--- a/kernel/resource.c
77316+++ b/kernel/resource.c
77317@@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
77318
77319 static int __init ioresources_init(void)
77320 {
77321+#ifdef CONFIG_GRKERNSEC_PROC_ADD
77322+#ifdef CONFIG_GRKERNSEC_PROC_USER
77323+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
77324+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
77325+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77326+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
77327+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
77328+#endif
77329+#else
77330 proc_create("ioports", 0, NULL, &proc_ioports_operations);
77331 proc_create("iomem", 0, NULL, &proc_iomem_operations);
77332+#endif
77333 return 0;
77334 }
77335 __initcall(ioresources_init);
77336diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
77337index 98ec494..4241d6d 100644
77338--- a/kernel/rtmutex-tester.c
77339+++ b/kernel/rtmutex-tester.c
77340@@ -20,7 +20,7 @@
77341 #define MAX_RT_TEST_MUTEXES 8
77342
77343 static spinlock_t rttest_lock;
77344-static atomic_t rttest_event;
77345+static atomic_unchecked_t rttest_event;
77346
77347 struct test_thread_data {
77348 int opcode;
77349@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77350
77351 case RTTEST_LOCKCONT:
77352 td->mutexes[td->opdata] = 1;
77353- td->event = atomic_add_return(1, &rttest_event);
77354+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77355 return 0;
77356
77357 case RTTEST_RESET:
77358@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77359 return 0;
77360
77361 case RTTEST_RESETEVENT:
77362- atomic_set(&rttest_event, 0);
77363+ atomic_set_unchecked(&rttest_event, 0);
77364 return 0;
77365
77366 default:
77367@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77368 return ret;
77369
77370 td->mutexes[id] = 1;
77371- td->event = atomic_add_return(1, &rttest_event);
77372+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77373 rt_mutex_lock(&mutexes[id]);
77374- td->event = atomic_add_return(1, &rttest_event);
77375+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77376 td->mutexes[id] = 4;
77377 return 0;
77378
77379@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77380 return ret;
77381
77382 td->mutexes[id] = 1;
77383- td->event = atomic_add_return(1, &rttest_event);
77384+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77385 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
77386- td->event = atomic_add_return(1, &rttest_event);
77387+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77388 td->mutexes[id] = ret ? 0 : 4;
77389 return ret ? -EINTR : 0;
77390
77391@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77392 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
77393 return ret;
77394
77395- td->event = atomic_add_return(1, &rttest_event);
77396+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77397 rt_mutex_unlock(&mutexes[id]);
77398- td->event = atomic_add_return(1, &rttest_event);
77399+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77400 td->mutexes[id] = 0;
77401 return 0;
77402
77403@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77404 break;
77405
77406 td->mutexes[dat] = 2;
77407- td->event = atomic_add_return(1, &rttest_event);
77408+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77409 break;
77410
77411 default:
77412@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77413 return;
77414
77415 td->mutexes[dat] = 3;
77416- td->event = atomic_add_return(1, &rttest_event);
77417+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77418 break;
77419
77420 case RTTEST_LOCKNOWAIT:
77421@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77422 return;
77423
77424 td->mutexes[dat] = 1;
77425- td->event = atomic_add_return(1, &rttest_event);
77426+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77427 return;
77428
77429 default:
77430diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
77431index 0984a21..939f183 100644
77432--- a/kernel/sched/auto_group.c
77433+++ b/kernel/sched/auto_group.c
77434@@ -11,7 +11,7 @@
77435
77436 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
77437 static struct autogroup autogroup_default;
77438-static atomic_t autogroup_seq_nr;
77439+static atomic_unchecked_t autogroup_seq_nr;
77440
77441 void __init autogroup_init(struct task_struct *init_task)
77442 {
77443@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
77444
77445 kref_init(&ag->kref);
77446 init_rwsem(&ag->lock);
77447- ag->id = atomic_inc_return(&autogroup_seq_nr);
77448+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
77449 ag->tg = tg;
77450 #ifdef CONFIG_RT_GROUP_SCHED
77451 /*
77452diff --git a/kernel/sched/core.c b/kernel/sched/core.c
77453index 26058d0..e315889 100644
77454--- a/kernel/sched/core.c
77455+++ b/kernel/sched/core.c
77456@@ -3367,7 +3367,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
77457 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
77458 * positive (at least 1, or number of jiffies left till timeout) if completed.
77459 */
77460-long __sched
77461+long __sched __intentional_overflow(-1)
77462 wait_for_completion_interruptible_timeout(struct completion *x,
77463 unsigned long timeout)
77464 {
77465@@ -3384,7 +3384,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
77466 *
77467 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
77468 */
77469-int __sched wait_for_completion_killable(struct completion *x)
77470+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
77471 {
77472 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
77473 if (t == -ERESTARTSYS)
77474@@ -3405,7 +3405,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
77475 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
77476 * positive (at least 1, or number of jiffies left till timeout) if completed.
77477 */
77478-long __sched
77479+long __sched __intentional_overflow(-1)
77480 wait_for_completion_killable_timeout(struct completion *x,
77481 unsigned long timeout)
77482 {
77483@@ -3631,6 +3631,8 @@ int can_nice(const struct task_struct *p, const int nice)
77484 /* convert nice value [19,-20] to rlimit style value [1,40] */
77485 int nice_rlim = 20 - nice;
77486
77487+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
77488+
77489 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
77490 capable(CAP_SYS_NICE));
77491 }
77492@@ -3664,7 +3666,8 @@ SYSCALL_DEFINE1(nice, int, increment)
77493 if (nice > 19)
77494 nice = 19;
77495
77496- if (increment < 0 && !can_nice(current, nice))
77497+ if (increment < 0 && (!can_nice(current, nice) ||
77498+ gr_handle_chroot_nice()))
77499 return -EPERM;
77500
77501 retval = security_task_setnice(current, nice);
77502@@ -3818,6 +3821,7 @@ recheck:
77503 unsigned long rlim_rtprio =
77504 task_rlimit(p, RLIMIT_RTPRIO);
77505
77506+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
77507 /* can't set/change the rt policy */
77508 if (policy != p->policy && !rlim_rtprio)
77509 return -EPERM;
77510@@ -4901,7 +4905,7 @@ static void migrate_tasks(unsigned int dead_cpu)
77511
77512 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
77513
77514-static struct ctl_table sd_ctl_dir[] = {
77515+static ctl_table_no_const sd_ctl_dir[] __read_only = {
77516 {
77517 .procname = "sched_domain",
77518 .mode = 0555,
77519@@ -4918,17 +4922,17 @@ static struct ctl_table sd_ctl_root[] = {
77520 {}
77521 };
77522
77523-static struct ctl_table *sd_alloc_ctl_entry(int n)
77524+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
77525 {
77526- struct ctl_table *entry =
77527+ ctl_table_no_const *entry =
77528 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
77529
77530 return entry;
77531 }
77532
77533-static void sd_free_ctl_entry(struct ctl_table **tablep)
77534+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
77535 {
77536- struct ctl_table *entry;
77537+ ctl_table_no_const *entry;
77538
77539 /*
77540 * In the intermediate directories, both the child directory and
77541@@ -4936,22 +4940,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
77542 * will always be set. In the lowest directory the names are
77543 * static strings and all have proc handlers.
77544 */
77545- for (entry = *tablep; entry->mode; entry++) {
77546- if (entry->child)
77547- sd_free_ctl_entry(&entry->child);
77548+ for (entry = tablep; entry->mode; entry++) {
77549+ if (entry->child) {
77550+ sd_free_ctl_entry(entry->child);
77551+ pax_open_kernel();
77552+ entry->child = NULL;
77553+ pax_close_kernel();
77554+ }
77555 if (entry->proc_handler == NULL)
77556 kfree(entry->procname);
77557 }
77558
77559- kfree(*tablep);
77560- *tablep = NULL;
77561+ kfree(tablep);
77562 }
77563
77564 static int min_load_idx = 0;
77565 static int max_load_idx = CPU_LOAD_IDX_MAX;
77566
77567 static void
77568-set_table_entry(struct ctl_table *entry,
77569+set_table_entry(ctl_table_no_const *entry,
77570 const char *procname, void *data, int maxlen,
77571 umode_t mode, proc_handler *proc_handler,
77572 bool load_idx)
77573@@ -4971,7 +4978,7 @@ set_table_entry(struct ctl_table *entry,
77574 static struct ctl_table *
77575 sd_alloc_ctl_domain_table(struct sched_domain *sd)
77576 {
77577- struct ctl_table *table = sd_alloc_ctl_entry(13);
77578+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
77579
77580 if (table == NULL)
77581 return NULL;
77582@@ -5006,9 +5013,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
77583 return table;
77584 }
77585
77586-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
77587+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
77588 {
77589- struct ctl_table *entry, *table;
77590+ ctl_table_no_const *entry, *table;
77591 struct sched_domain *sd;
77592 int domain_num = 0, i;
77593 char buf[32];
77594@@ -5035,11 +5042,13 @@ static struct ctl_table_header *sd_sysctl_header;
77595 static void register_sched_domain_sysctl(void)
77596 {
77597 int i, cpu_num = num_possible_cpus();
77598- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
77599+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
77600 char buf[32];
77601
77602 WARN_ON(sd_ctl_dir[0].child);
77603+ pax_open_kernel();
77604 sd_ctl_dir[0].child = entry;
77605+ pax_close_kernel();
77606
77607 if (entry == NULL)
77608 return;
77609@@ -5062,8 +5071,12 @@ static void unregister_sched_domain_sysctl(void)
77610 if (sd_sysctl_header)
77611 unregister_sysctl_table(sd_sysctl_header);
77612 sd_sysctl_header = NULL;
77613- if (sd_ctl_dir[0].child)
77614- sd_free_ctl_entry(&sd_ctl_dir[0].child);
77615+ if (sd_ctl_dir[0].child) {
77616+ sd_free_ctl_entry(sd_ctl_dir[0].child);
77617+ pax_open_kernel();
77618+ sd_ctl_dir[0].child = NULL;
77619+ pax_close_kernel();
77620+ }
77621 }
77622 #else
77623 static void register_sched_domain_sysctl(void)
77624@@ -5162,7 +5175,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
77625 * happens before everything else. This has to be lower priority than
77626 * the notifier in the perf_event subsystem, though.
77627 */
77628-static struct notifier_block __cpuinitdata migration_notifier = {
77629+static struct notifier_block migration_notifier = {
77630 .notifier_call = migration_call,
77631 .priority = CPU_PRI_MIGRATION,
77632 };
77633diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
77634index 81fa536..6ccf96a 100644
77635--- a/kernel/sched/fair.c
77636+++ b/kernel/sched/fair.c
77637@@ -830,7 +830,7 @@ void task_numa_fault(int node, int pages, bool migrated)
77638
77639 static void reset_ptenuma_scan(struct task_struct *p)
77640 {
77641- ACCESS_ONCE(p->mm->numa_scan_seq)++;
77642+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
77643 p->mm->numa_scan_offset = 0;
77644 }
77645
77646@@ -3254,25 +3254,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
77647 */
77648 static int select_idle_sibling(struct task_struct *p, int target)
77649 {
77650- int cpu = smp_processor_id();
77651- int prev_cpu = task_cpu(p);
77652 struct sched_domain *sd;
77653 struct sched_group *sg;
77654- int i;
77655+ int i = task_cpu(p);
77656
77657- /*
77658- * If the task is going to be woken-up on this cpu and if it is
77659- * already idle, then it is the right target.
77660- */
77661- if (target == cpu && idle_cpu(cpu))
77662- return cpu;
77663+ if (idle_cpu(target))
77664+ return target;
77665
77666 /*
77667- * If the task is going to be woken-up on the cpu where it previously
77668- * ran and if it is currently idle, then it the right target.
77669+ * If the prevous cpu is cache affine and idle, don't be stupid.
77670 */
77671- if (target == prev_cpu && idle_cpu(prev_cpu))
77672- return prev_cpu;
77673+ if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
77674+ return i;
77675
77676 /*
77677 * Otherwise, iterate the domains and find an elegible idle cpu.
77678@@ -3286,7 +3279,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
77679 goto next;
77680
77681 for_each_cpu(i, sched_group_cpus(sg)) {
77682- if (!idle_cpu(i))
77683+ if (i == target || !idle_cpu(i))
77684 goto next;
77685 }
77686
77687@@ -5663,7 +5656,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
77688 * run_rebalance_domains is triggered when needed from the scheduler tick.
77689 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
77690 */
77691-static void run_rebalance_domains(struct softirq_action *h)
77692+static void run_rebalance_domains(void)
77693 {
77694 int this_cpu = smp_processor_id();
77695 struct rq *this_rq = cpu_rq(this_cpu);
77696diff --git a/kernel/signal.c b/kernel/signal.c
77697index 7591ccc..8988390 100644
77698--- a/kernel/signal.c
77699+++ b/kernel/signal.c
77700@@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
77701
77702 int print_fatal_signals __read_mostly;
77703
77704-static void __user *sig_handler(struct task_struct *t, int sig)
77705+static __sighandler_t sig_handler(struct task_struct *t, int sig)
77706 {
77707 return t->sighand->action[sig - 1].sa.sa_handler;
77708 }
77709
77710-static int sig_handler_ignored(void __user *handler, int sig)
77711+static int sig_handler_ignored(__sighandler_t handler, int sig)
77712 {
77713 /* Is it explicitly or implicitly ignored? */
77714 return handler == SIG_IGN ||
77715@@ -64,7 +64,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
77716
77717 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
77718 {
77719- void __user *handler;
77720+ __sighandler_t handler;
77721
77722 handler = sig_handler(t, sig);
77723
77724@@ -368,6 +368,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
77725 atomic_inc(&user->sigpending);
77726 rcu_read_unlock();
77727
77728+ if (!override_rlimit)
77729+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
77730+
77731 if (override_rlimit ||
77732 atomic_read(&user->sigpending) <=
77733 task_rlimit(t, RLIMIT_SIGPENDING)) {
77734@@ -495,7 +498,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
77735
77736 int unhandled_signal(struct task_struct *tsk, int sig)
77737 {
77738- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
77739+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
77740 if (is_global_init(tsk))
77741 return 1;
77742 if (handler != SIG_IGN && handler != SIG_DFL)
77743@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
77744 }
77745 }
77746
77747+ /* allow glibc communication via tgkill to other threads in our
77748+ thread group */
77749+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
77750+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
77751+ && gr_handle_signal(t, sig))
77752+ return -EPERM;
77753+
77754 return security_task_kill(t, info, sig, 0);
77755 }
77756
77757@@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
77758 return send_signal(sig, info, p, 1);
77759 }
77760
77761-static int
77762+int
77763 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
77764 {
77765 return send_signal(sig, info, t, 0);
77766@@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
77767 unsigned long int flags;
77768 int ret, blocked, ignored;
77769 struct k_sigaction *action;
77770+ int is_unhandled = 0;
77771
77772 spin_lock_irqsave(&t->sighand->siglock, flags);
77773 action = &t->sighand->action[sig-1];
77774@@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
77775 }
77776 if (action->sa.sa_handler == SIG_DFL)
77777 t->signal->flags &= ~SIGNAL_UNKILLABLE;
77778+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
77779+ is_unhandled = 1;
77780 ret = specific_send_sig_info(sig, info, t);
77781 spin_unlock_irqrestore(&t->sighand->siglock, flags);
77782
77783+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
77784+ normal operation */
77785+ if (is_unhandled) {
77786+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
77787+ gr_handle_crash(t, sig);
77788+ }
77789+
77790 return ret;
77791 }
77792
77793@@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
77794 ret = check_kill_permission(sig, info, p);
77795 rcu_read_unlock();
77796
77797- if (!ret && sig)
77798+ if (!ret && sig) {
77799 ret = do_send_sig_info(sig, info, p, true);
77800+ if (!ret)
77801+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
77802+ }
77803
77804 return ret;
77805 }
77806@@ -2855,7 +2878,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
77807 int error = -ESRCH;
77808
77809 rcu_read_lock();
77810- p = find_task_by_vpid(pid);
77811+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77812+ /* allow glibc communication via tgkill to other threads in our
77813+ thread group */
77814+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
77815+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
77816+ p = find_task_by_vpid_unrestricted(pid);
77817+ else
77818+#endif
77819+ p = find_task_by_vpid(pid);
77820 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
77821 error = check_kill_permission(sig, info, p);
77822 /*
77823@@ -3138,8 +3169,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
77824 }
77825 seg = get_fs();
77826 set_fs(KERNEL_DS);
77827- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
77828- (stack_t __force __user *) &uoss,
77829+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
77830+ (stack_t __force_user *) &uoss,
77831 compat_user_stack_pointer());
77832 set_fs(seg);
77833 if (ret >= 0 && uoss_ptr) {
77834diff --git a/kernel/smp.c b/kernel/smp.c
77835index 69f38bd..77bbf12 100644
77836--- a/kernel/smp.c
77837+++ b/kernel/smp.c
77838@@ -77,7 +77,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
77839 return NOTIFY_OK;
77840 }
77841
77842-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
77843+static struct notifier_block hotplug_cfd_notifier = {
77844 .notifier_call = hotplug_cfd,
77845 };
77846
77847diff --git a/kernel/smpboot.c b/kernel/smpboot.c
77848index d6c5fc0..530560c 100644
77849--- a/kernel/smpboot.c
77850+++ b/kernel/smpboot.c
77851@@ -275,7 +275,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
77852 }
77853 smpboot_unpark_thread(plug_thread, cpu);
77854 }
77855- list_add(&plug_thread->list, &hotplug_threads);
77856+ pax_list_add(&plug_thread->list, &hotplug_threads);
77857 out:
77858 mutex_unlock(&smpboot_threads_lock);
77859 return ret;
77860@@ -292,7 +292,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
77861 {
77862 get_online_cpus();
77863 mutex_lock(&smpboot_threads_lock);
77864- list_del(&plug_thread->list);
77865+ pax_list_del(&plug_thread->list);
77866 smpboot_destroy_threads(plug_thread);
77867 mutex_unlock(&smpboot_threads_lock);
77868 put_online_cpus();
77869diff --git a/kernel/softirq.c b/kernel/softirq.c
77870index ed567ba..e71dabf 100644
77871--- a/kernel/softirq.c
77872+++ b/kernel/softirq.c
77873@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
77874 EXPORT_SYMBOL(irq_stat);
77875 #endif
77876
77877-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
77878+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
77879
77880 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
77881
77882-char *softirq_to_name[NR_SOFTIRQS] = {
77883+const char * const softirq_to_name[NR_SOFTIRQS] = {
77884 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
77885 "TASKLET", "SCHED", "HRTIMER", "RCU"
77886 };
77887@@ -244,7 +244,7 @@ restart:
77888 kstat_incr_softirqs_this_cpu(vec_nr);
77889
77890 trace_softirq_entry(vec_nr);
77891- h->action(h);
77892+ h->action();
77893 trace_softirq_exit(vec_nr);
77894 if (unlikely(prev_count != preempt_count())) {
77895 printk(KERN_ERR "huh, entered softirq %u %s %p"
77896@@ -391,7 +391,7 @@ void __raise_softirq_irqoff(unsigned int nr)
77897 or_softirq_pending(1UL << nr);
77898 }
77899
77900-void open_softirq(int nr, void (*action)(struct softirq_action *))
77901+void __init open_softirq(int nr, void (*action)(void))
77902 {
77903 softirq_vec[nr].action = action;
77904 }
77905@@ -447,7 +447,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
77906
77907 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
77908
77909-static void tasklet_action(struct softirq_action *a)
77910+static void tasklet_action(void)
77911 {
77912 struct tasklet_struct *list;
77913
77914@@ -482,7 +482,7 @@ static void tasklet_action(struct softirq_action *a)
77915 }
77916 }
77917
77918-static void tasklet_hi_action(struct softirq_action *a)
77919+static void tasklet_hi_action(void)
77920 {
77921 struct tasklet_struct *list;
77922
77923@@ -718,7 +718,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
77924 return NOTIFY_OK;
77925 }
77926
77927-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
77928+static struct notifier_block remote_softirq_cpu_notifier = {
77929 .notifier_call = remote_softirq_cpu_notify,
77930 };
77931
77932@@ -835,11 +835,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
77933 return NOTIFY_OK;
77934 }
77935
77936-static struct notifier_block __cpuinitdata cpu_nfb = {
77937+static struct notifier_block cpu_nfb = {
77938 .notifier_call = cpu_callback
77939 };
77940
77941-static struct smp_hotplug_thread softirq_threads = {
77942+static struct smp_hotplug_thread softirq_threads __read_only = {
77943 .store = &ksoftirqd,
77944 .thread_should_run = ksoftirqd_should_run,
77945 .thread_fn = run_ksoftirqd,
77946diff --git a/kernel/srcu.c b/kernel/srcu.c
77947index 2b85982..d52ab26 100644
77948--- a/kernel/srcu.c
77949+++ b/kernel/srcu.c
77950@@ -305,9 +305,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
77951 preempt_disable();
77952 idx = rcu_dereference_index_check(sp->completed,
77953 rcu_read_lock_sched_held()) & 0x1;
77954- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
77955+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
77956 smp_mb(); /* B */ /* Avoid leaking the critical section. */
77957- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
77958+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
77959 preempt_enable();
77960 return idx;
77961 }
77962@@ -323,7 +323,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
77963 {
77964 preempt_disable();
77965 smp_mb(); /* C */ /* Avoid leaking the critical section. */
77966- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
77967+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
77968 preempt_enable();
77969 }
77970 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
77971diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
77972index 2f194e9..2c05ea9 100644
77973--- a/kernel/stop_machine.c
77974+++ b/kernel/stop_machine.c
77975@@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
77976 * cpu notifiers. It currently shares the same priority as sched
77977 * migration_notifier.
77978 */
77979-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
77980+static struct notifier_block cpu_stop_cpu_notifier = {
77981 .notifier_call = cpu_stop_cpu_callback,
77982 .priority = 10,
77983 };
77984diff --git a/kernel/sys.c b/kernel/sys.c
77985index 265b376..4e42ef5 100644
77986--- a/kernel/sys.c
77987+++ b/kernel/sys.c
77988@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
77989 error = -EACCES;
77990 goto out;
77991 }
77992+
77993+ if (gr_handle_chroot_setpriority(p, niceval)) {
77994+ error = -EACCES;
77995+ goto out;
77996+ }
77997+
77998 no_nice = security_task_setnice(p, niceval);
77999 if (no_nice) {
78000 error = no_nice;
78001@@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
78002 goto error;
78003 }
78004
78005+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
78006+ goto error;
78007+
78008 if (rgid != (gid_t) -1 ||
78009 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
78010 new->sgid = new->egid;
78011@@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
78012 old = current_cred();
78013
78014 retval = -EPERM;
78015+
78016+ if (gr_check_group_change(kgid, kgid, kgid))
78017+ goto error;
78018+
78019 if (nsown_capable(CAP_SETGID))
78020 new->gid = new->egid = new->sgid = new->fsgid = kgid;
78021 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
78022@@ -647,7 +660,7 @@ error:
78023 /*
78024 * change the user struct in a credentials set to match the new UID
78025 */
78026-static int set_user(struct cred *new)
78027+int set_user(struct cred *new)
78028 {
78029 struct user_struct *new_user;
78030
78031@@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
78032 goto error;
78033 }
78034
78035+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
78036+ goto error;
78037+
78038 if (!uid_eq(new->uid, old->uid)) {
78039 retval = set_user(new);
78040 if (retval < 0)
78041@@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
78042 old = current_cred();
78043
78044 retval = -EPERM;
78045+
78046+ if (gr_check_crash_uid(kuid))
78047+ goto error;
78048+ if (gr_check_user_change(kuid, kuid, kuid))
78049+ goto error;
78050+
78051 if (nsown_capable(CAP_SETUID)) {
78052 new->suid = new->uid = kuid;
78053 if (!uid_eq(kuid, old->uid)) {
78054@@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
78055 goto error;
78056 }
78057
78058+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
78059+ goto error;
78060+
78061 if (ruid != (uid_t) -1) {
78062 new->uid = kruid;
78063 if (!uid_eq(kruid, old->uid)) {
78064@@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
78065 goto error;
78066 }
78067
78068+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
78069+ goto error;
78070+
78071 if (rgid != (gid_t) -1)
78072 new->gid = krgid;
78073 if (egid != (gid_t) -1)
78074@@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
78075 if (!uid_valid(kuid))
78076 return old_fsuid;
78077
78078+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
78079+ goto error;
78080+
78081 new = prepare_creds();
78082 if (!new)
78083 return old_fsuid;
78084@@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
78085 }
78086 }
78087
78088+error:
78089 abort_creds(new);
78090 return old_fsuid;
78091
78092@@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
78093 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
78094 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
78095 nsown_capable(CAP_SETGID)) {
78096+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
78097+ goto error;
78098+
78099 if (!gid_eq(kgid, old->fsgid)) {
78100 new->fsgid = kgid;
78101 goto change_okay;
78102 }
78103 }
78104
78105+error:
78106 abort_creds(new);
78107 return old_fsgid;
78108
78109@@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
78110 return -EFAULT;
78111
78112 down_read(&uts_sem);
78113- error = __copy_to_user(&name->sysname, &utsname()->sysname,
78114+ error = __copy_to_user(name->sysname, &utsname()->sysname,
78115 __OLD_UTS_LEN);
78116 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
78117- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
78118+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
78119 __OLD_UTS_LEN);
78120 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
78121- error |= __copy_to_user(&name->release, &utsname()->release,
78122+ error |= __copy_to_user(name->release, &utsname()->release,
78123 __OLD_UTS_LEN);
78124 error |= __put_user(0, name->release + __OLD_UTS_LEN);
78125- error |= __copy_to_user(&name->version, &utsname()->version,
78126+ error |= __copy_to_user(name->version, &utsname()->version,
78127 __OLD_UTS_LEN);
78128 error |= __put_user(0, name->version + __OLD_UTS_LEN);
78129- error |= __copy_to_user(&name->machine, &utsname()->machine,
78130+ error |= __copy_to_user(name->machine, &utsname()->machine,
78131 __OLD_UTS_LEN);
78132 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
78133 up_read(&uts_sem);
78134@@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
78135 error = get_dumpable(me->mm);
78136 break;
78137 case PR_SET_DUMPABLE:
78138- if (arg2 < 0 || arg2 > 1) {
78139+ if (arg2 > 1) {
78140 error = -EINVAL;
78141 break;
78142 }
78143diff --git a/kernel/sysctl.c b/kernel/sysctl.c
78144index c88878d..e4fa5d1 100644
78145--- a/kernel/sysctl.c
78146+++ b/kernel/sysctl.c
78147@@ -92,7 +92,6 @@
78148
78149
78150 #if defined(CONFIG_SYSCTL)
78151-
78152 /* External variables not in a header file. */
78153 extern int sysctl_overcommit_memory;
78154 extern int sysctl_overcommit_ratio;
78155@@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
78156 void __user *buffer, size_t *lenp, loff_t *ppos);
78157 #endif
78158
78159-#ifdef CONFIG_PRINTK
78160 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78161 void __user *buffer, size_t *lenp, loff_t *ppos);
78162-#endif
78163
78164 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
78165 void __user *buffer, size_t *lenp, loff_t *ppos);
78166@@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
78167
78168 #endif
78169
78170+extern struct ctl_table grsecurity_table[];
78171+
78172 static struct ctl_table kern_table[];
78173 static struct ctl_table vm_table[];
78174 static struct ctl_table fs_table[];
78175@@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
78176 int sysctl_legacy_va_layout;
78177 #endif
78178
78179+#ifdef CONFIG_PAX_SOFTMODE
78180+static ctl_table pax_table[] = {
78181+ {
78182+ .procname = "softmode",
78183+ .data = &pax_softmode,
78184+ .maxlen = sizeof(unsigned int),
78185+ .mode = 0600,
78186+ .proc_handler = &proc_dointvec,
78187+ },
78188+
78189+ { }
78190+};
78191+#endif
78192+
78193 /* The default sysctl tables: */
78194
78195 static struct ctl_table sysctl_base_table[] = {
78196@@ -268,6 +281,22 @@ static int max_extfrag_threshold = 1000;
78197 #endif
78198
78199 static struct ctl_table kern_table[] = {
78200+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
78201+ {
78202+ .procname = "grsecurity",
78203+ .mode = 0500,
78204+ .child = grsecurity_table,
78205+ },
78206+#endif
78207+
78208+#ifdef CONFIG_PAX_SOFTMODE
78209+ {
78210+ .procname = "pax",
78211+ .mode = 0500,
78212+ .child = pax_table,
78213+ },
78214+#endif
78215+
78216 {
78217 .procname = "sched_child_runs_first",
78218 .data = &sysctl_sched_child_runs_first,
78219@@ -593,7 +622,7 @@ static struct ctl_table kern_table[] = {
78220 .data = &modprobe_path,
78221 .maxlen = KMOD_PATH_LEN,
78222 .mode = 0644,
78223- .proc_handler = proc_dostring,
78224+ .proc_handler = proc_dostring_modpriv,
78225 },
78226 {
78227 .procname = "modules_disabled",
78228@@ -760,16 +789,20 @@ static struct ctl_table kern_table[] = {
78229 .extra1 = &zero,
78230 .extra2 = &one,
78231 },
78232+#endif
78233 {
78234 .procname = "kptr_restrict",
78235 .data = &kptr_restrict,
78236 .maxlen = sizeof(int),
78237 .mode = 0644,
78238 .proc_handler = proc_dointvec_minmax_sysadmin,
78239+#ifdef CONFIG_GRKERNSEC_HIDESYM
78240+ .extra1 = &two,
78241+#else
78242 .extra1 = &zero,
78243+#endif
78244 .extra2 = &two,
78245 },
78246-#endif
78247 {
78248 .procname = "ngroups_max",
78249 .data = &ngroups_max,
78250@@ -1266,6 +1299,13 @@ static struct ctl_table vm_table[] = {
78251 .proc_handler = proc_dointvec_minmax,
78252 .extra1 = &zero,
78253 },
78254+ {
78255+ .procname = "heap_stack_gap",
78256+ .data = &sysctl_heap_stack_gap,
78257+ .maxlen = sizeof(sysctl_heap_stack_gap),
78258+ .mode = 0644,
78259+ .proc_handler = proc_doulongvec_minmax,
78260+ },
78261 #else
78262 {
78263 .procname = "nr_trim_pages",
78264@@ -1716,6 +1756,16 @@ int proc_dostring(struct ctl_table *table, int write,
78265 buffer, lenp, ppos);
78266 }
78267
78268+int proc_dostring_modpriv(struct ctl_table *table, int write,
78269+ void __user *buffer, size_t *lenp, loff_t *ppos)
78270+{
78271+ if (write && !capable(CAP_SYS_MODULE))
78272+ return -EPERM;
78273+
78274+ return _proc_do_string(table->data, table->maxlen, write,
78275+ buffer, lenp, ppos);
78276+}
78277+
78278 static size_t proc_skip_spaces(char **buf)
78279 {
78280 size_t ret;
78281@@ -1821,6 +1871,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
78282 len = strlen(tmp);
78283 if (len > *size)
78284 len = *size;
78285+ if (len > sizeof(tmp))
78286+ len = sizeof(tmp);
78287 if (copy_to_user(*buf, tmp, len))
78288 return -EFAULT;
78289 *size -= len;
78290@@ -1985,7 +2037,7 @@ int proc_dointvec(struct ctl_table *table, int write,
78291 static int proc_taint(struct ctl_table *table, int write,
78292 void __user *buffer, size_t *lenp, loff_t *ppos)
78293 {
78294- struct ctl_table t;
78295+ ctl_table_no_const t;
78296 unsigned long tmptaint = get_taint();
78297 int err;
78298
78299@@ -2013,7 +2065,6 @@ static int proc_taint(struct ctl_table *table, int write,
78300 return err;
78301 }
78302
78303-#ifdef CONFIG_PRINTK
78304 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78305 void __user *buffer, size_t *lenp, loff_t *ppos)
78306 {
78307@@ -2022,7 +2073,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78308
78309 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
78310 }
78311-#endif
78312
78313 struct do_proc_dointvec_minmax_conv_param {
78314 int *min;
78315@@ -2169,8 +2219,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
78316 *i = val;
78317 } else {
78318 val = convdiv * (*i) / convmul;
78319- if (!first)
78320+ if (!first) {
78321 err = proc_put_char(&buffer, &left, '\t');
78322+ if (err)
78323+ break;
78324+ }
78325 err = proc_put_long(&buffer, &left, val, false);
78326 if (err)
78327 break;
78328@@ -2562,6 +2615,12 @@ int proc_dostring(struct ctl_table *table, int write,
78329 return -ENOSYS;
78330 }
78331
78332+int proc_dostring_modpriv(struct ctl_table *table, int write,
78333+ void __user *buffer, size_t *lenp, loff_t *ppos)
78334+{
78335+ return -ENOSYS;
78336+}
78337+
78338 int proc_dointvec(struct ctl_table *table, int write,
78339 void __user *buffer, size_t *lenp, loff_t *ppos)
78340 {
78341@@ -2618,5 +2677,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
78342 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
78343 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
78344 EXPORT_SYMBOL(proc_dostring);
78345+EXPORT_SYMBOL(proc_dostring_modpriv);
78346 EXPORT_SYMBOL(proc_doulongvec_minmax);
78347 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
78348diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
78349index 0ddf3a0..a199f50 100644
78350--- a/kernel/sysctl_binary.c
78351+++ b/kernel/sysctl_binary.c
78352@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
78353 int i;
78354
78355 set_fs(KERNEL_DS);
78356- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
78357+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
78358 set_fs(old_fs);
78359 if (result < 0)
78360 goto out_kfree;
78361@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
78362 }
78363
78364 set_fs(KERNEL_DS);
78365- result = vfs_write(file, buffer, str - buffer, &pos);
78366+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
78367 set_fs(old_fs);
78368 if (result < 0)
78369 goto out_kfree;
78370@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
78371 int i;
78372
78373 set_fs(KERNEL_DS);
78374- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
78375+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
78376 set_fs(old_fs);
78377 if (result < 0)
78378 goto out_kfree;
78379@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
78380 }
78381
78382 set_fs(KERNEL_DS);
78383- result = vfs_write(file, buffer, str - buffer, &pos);
78384+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
78385 set_fs(old_fs);
78386 if (result < 0)
78387 goto out_kfree;
78388@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
78389 int i;
78390
78391 set_fs(KERNEL_DS);
78392- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
78393+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
78394 set_fs(old_fs);
78395 if (result < 0)
78396 goto out;
78397@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
78398 __le16 dnaddr;
78399
78400 set_fs(KERNEL_DS);
78401- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
78402+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
78403 set_fs(old_fs);
78404 if (result < 0)
78405 goto out;
78406@@ -1234,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
78407 le16_to_cpu(dnaddr) & 0x3ff);
78408
78409 set_fs(KERNEL_DS);
78410- result = vfs_write(file, buf, len, &pos);
78411+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
78412 set_fs(old_fs);
78413 if (result < 0)
78414 goto out;
78415diff --git a/kernel/taskstats.c b/kernel/taskstats.c
78416index 145bb4d..b2aa969 100644
78417--- a/kernel/taskstats.c
78418+++ b/kernel/taskstats.c
78419@@ -28,9 +28,12 @@
78420 #include <linux/fs.h>
78421 #include <linux/file.h>
78422 #include <linux/pid_namespace.h>
78423+#include <linux/grsecurity.h>
78424 #include <net/genetlink.h>
78425 #include <linux/atomic.h>
78426
78427+extern int gr_is_taskstats_denied(int pid);
78428+
78429 /*
78430 * Maximum length of a cpumask that can be specified in
78431 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
78432@@ -570,6 +573,9 @@ err:
78433
78434 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
78435 {
78436+ if (gr_is_taskstats_denied(current->pid))
78437+ return -EACCES;
78438+
78439 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
78440 return cmd_attr_register_cpumask(info);
78441 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
78442diff --git a/kernel/time.c b/kernel/time.c
78443index d226c6a..2f0d217 100644
78444--- a/kernel/time.c
78445+++ b/kernel/time.c
78446@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
78447 return error;
78448
78449 if (tz) {
78450+ /* we log in do_settimeofday called below, so don't log twice
78451+ */
78452+ if (!tv)
78453+ gr_log_timechange();
78454+
78455 sys_tz = *tz;
78456 update_vsyscall_tz();
78457 if (firsttime) {
78458@@ -493,7 +498,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
78459 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
78460 * value to a scaled second value.
78461 */
78462-unsigned long
78463+unsigned long __intentional_overflow(-1)
78464 timespec_to_jiffies(const struct timespec *value)
78465 {
78466 unsigned long sec = value->tv_sec;
78467diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
78468index f11d83b..d016d91 100644
78469--- a/kernel/time/alarmtimer.c
78470+++ b/kernel/time/alarmtimer.c
78471@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
78472 struct platform_device *pdev;
78473 int error = 0;
78474 int i;
78475- struct k_clock alarm_clock = {
78476+ static struct k_clock alarm_clock = {
78477 .clock_getres = alarm_clock_getres,
78478 .clock_get = alarm_clock_get,
78479 .timer_create = alarm_timer_create,
78480diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
78481index f113755..ec24223 100644
78482--- a/kernel/time/tick-broadcast.c
78483+++ b/kernel/time/tick-broadcast.c
78484@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
78485 * then clear the broadcast bit.
78486 */
78487 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
78488- int cpu = smp_processor_id();
78489+ cpu = smp_processor_id();
78490
78491 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
78492 tick_broadcast_clear_oneshot(cpu);
78493diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
78494index cbc6acb..3a77191 100644
78495--- a/kernel/time/timekeeping.c
78496+++ b/kernel/time/timekeeping.c
78497@@ -15,6 +15,7 @@
78498 #include <linux/init.h>
78499 #include <linux/mm.h>
78500 #include <linux/sched.h>
78501+#include <linux/grsecurity.h>
78502 #include <linux/syscore_ops.h>
78503 #include <linux/clocksource.h>
78504 #include <linux/jiffies.h>
78505@@ -412,6 +413,8 @@ int do_settimeofday(const struct timespec *tv)
78506 if (!timespec_valid_strict(tv))
78507 return -EINVAL;
78508
78509+ gr_log_timechange();
78510+
78511 write_seqlock_irqsave(&tk->lock, flags);
78512
78513 timekeeping_forward_now(tk);
78514diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
78515index af5a7e9..715611a 100644
78516--- a/kernel/time/timer_list.c
78517+++ b/kernel/time/timer_list.c
78518@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
78519
78520 static void print_name_offset(struct seq_file *m, void *sym)
78521 {
78522+#ifdef CONFIG_GRKERNSEC_HIDESYM
78523+ SEQ_printf(m, "<%p>", NULL);
78524+#else
78525 char symname[KSYM_NAME_LEN];
78526
78527 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
78528 SEQ_printf(m, "<%pK>", sym);
78529 else
78530 SEQ_printf(m, "%s", symname);
78531+#endif
78532 }
78533
78534 static void
78535@@ -112,7 +116,11 @@ next_one:
78536 static void
78537 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
78538 {
78539+#ifdef CONFIG_GRKERNSEC_HIDESYM
78540+ SEQ_printf(m, " .base: %p\n", NULL);
78541+#else
78542 SEQ_printf(m, " .base: %pK\n", base);
78543+#endif
78544 SEQ_printf(m, " .index: %d\n",
78545 base->index);
78546 SEQ_printf(m, " .resolution: %Lu nsecs\n",
78547@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
78548 {
78549 struct proc_dir_entry *pe;
78550
78551+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78552+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
78553+#else
78554 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
78555+#endif
78556 if (!pe)
78557 return -ENOMEM;
78558 return 0;
78559diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
78560index 0b537f2..40d6c20 100644
78561--- a/kernel/time/timer_stats.c
78562+++ b/kernel/time/timer_stats.c
78563@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
78564 static unsigned long nr_entries;
78565 static struct entry entries[MAX_ENTRIES];
78566
78567-static atomic_t overflow_count;
78568+static atomic_unchecked_t overflow_count;
78569
78570 /*
78571 * The entries are in a hash-table, for fast lookup:
78572@@ -140,7 +140,7 @@ static void reset_entries(void)
78573 nr_entries = 0;
78574 memset(entries, 0, sizeof(entries));
78575 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
78576- atomic_set(&overflow_count, 0);
78577+ atomic_set_unchecked(&overflow_count, 0);
78578 }
78579
78580 static struct entry *alloc_entry(void)
78581@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
78582 if (likely(entry))
78583 entry->count++;
78584 else
78585- atomic_inc(&overflow_count);
78586+ atomic_inc_unchecked(&overflow_count);
78587
78588 out_unlock:
78589 raw_spin_unlock_irqrestore(lock, flags);
78590@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
78591
78592 static void print_name_offset(struct seq_file *m, unsigned long addr)
78593 {
78594+#ifdef CONFIG_GRKERNSEC_HIDESYM
78595+ seq_printf(m, "<%p>", NULL);
78596+#else
78597 char symname[KSYM_NAME_LEN];
78598
78599 if (lookup_symbol_name(addr, symname) < 0)
78600- seq_printf(m, "<%p>", (void *)addr);
78601+ seq_printf(m, "<%pK>", (void *)addr);
78602 else
78603 seq_printf(m, "%s", symname);
78604+#endif
78605 }
78606
78607 static int tstats_show(struct seq_file *m, void *v)
78608@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
78609
78610 seq_puts(m, "Timer Stats Version: v0.2\n");
78611 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
78612- if (atomic_read(&overflow_count))
78613+ if (atomic_read_unchecked(&overflow_count))
78614 seq_printf(m, "Overflow: %d entries\n",
78615- atomic_read(&overflow_count));
78616+ atomic_read_unchecked(&overflow_count));
78617
78618 for (i = 0; i < nr_entries; i++) {
78619 entry = entries + i;
78620@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
78621 {
78622 struct proc_dir_entry *pe;
78623
78624+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78625+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
78626+#else
78627 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
78628+#endif
78629 if (!pe)
78630 return -ENOMEM;
78631 return 0;
78632diff --git a/kernel/timer.c b/kernel/timer.c
78633index 367d008..5dee98f 100644
78634--- a/kernel/timer.c
78635+++ b/kernel/timer.c
78636@@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
78637 /*
78638 * This function runs timers and the timer-tq in bottom half context.
78639 */
78640-static void run_timer_softirq(struct softirq_action *h)
78641+static void run_timer_softirq(void)
78642 {
78643 struct tvec_base *base = __this_cpu_read(tvec_bases);
78644
78645@@ -1481,7 +1481,7 @@ static void process_timeout(unsigned long __data)
78646 *
78647 * In all cases the return value is guaranteed to be non-negative.
78648 */
78649-signed long __sched schedule_timeout(signed long timeout)
78650+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
78651 {
78652 struct timer_list timer;
78653 unsigned long expire;
78654@@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
78655 return NOTIFY_OK;
78656 }
78657
78658-static struct notifier_block __cpuinitdata timers_nb = {
78659+static struct notifier_block timers_nb = {
78660 .notifier_call = timer_cpu_notify,
78661 };
78662
78663diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
78664index c0bd030..62a1927 100644
78665--- a/kernel/trace/blktrace.c
78666+++ b/kernel/trace/blktrace.c
78667@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
78668 struct blk_trace *bt = filp->private_data;
78669 char buf[16];
78670
78671- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
78672+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
78673
78674 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
78675 }
78676@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
78677 return 1;
78678
78679 bt = buf->chan->private_data;
78680- atomic_inc(&bt->dropped);
78681+ atomic_inc_unchecked(&bt->dropped);
78682 return 0;
78683 }
78684
78685@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
78686
78687 bt->dir = dir;
78688 bt->dev = dev;
78689- atomic_set(&bt->dropped, 0);
78690+ atomic_set_unchecked(&bt->dropped, 0);
78691
78692 ret = -EIO;
78693 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
78694diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
78695index 43defd1..76da436 100644
78696--- a/kernel/trace/ftrace.c
78697+++ b/kernel/trace/ftrace.c
78698@@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
78699 if (unlikely(ftrace_disabled))
78700 return 0;
78701
78702+ ret = ftrace_arch_code_modify_prepare();
78703+ FTRACE_WARN_ON(ret);
78704+ if (ret)
78705+ return 0;
78706+
78707 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
78708+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
78709 if (ret) {
78710 ftrace_bug(ret, ip);
78711- return 0;
78712 }
78713- return 1;
78714+ return ret ? 0 : 1;
78715 }
78716
78717 /*
78718@@ -2965,7 +2970,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
78719
78720 int
78721 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
78722- void *data)
78723+ void *data)
78724 {
78725 struct ftrace_func_probe *entry;
78726 struct ftrace_page *pg;
78727@@ -3832,8 +3837,10 @@ static int ftrace_process_locs(struct module *mod,
78728 if (!count)
78729 return 0;
78730
78731+ pax_open_kernel();
78732 sort(start, count, sizeof(*start),
78733 ftrace_cmp_ips, ftrace_swap_ips);
78734+ pax_close_kernel();
78735
78736 start_pg = ftrace_allocate_pages(count);
78737 if (!start_pg)
78738@@ -4559,8 +4566,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
78739 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
78740
78741 static int ftrace_graph_active;
78742-static struct notifier_block ftrace_suspend_notifier;
78743-
78744 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
78745 {
78746 return 0;
78747@@ -4704,6 +4709,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
78748 return NOTIFY_DONE;
78749 }
78750
78751+static struct notifier_block ftrace_suspend_notifier = {
78752+ .notifier_call = ftrace_suspend_notifier_call
78753+};
78754+
78755 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
78756 trace_func_graph_ent_t entryfunc)
78757 {
78758@@ -4717,7 +4726,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
78759 goto out;
78760 }
78761
78762- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
78763 register_pm_notifier(&ftrace_suspend_notifier);
78764
78765 ftrace_graph_active++;
78766diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
78767index ce8514f..8233573 100644
78768--- a/kernel/trace/ring_buffer.c
78769+++ b/kernel/trace/ring_buffer.c
78770@@ -346,9 +346,9 @@ struct buffer_data_page {
78771 */
78772 struct buffer_page {
78773 struct list_head list; /* list of buffer pages */
78774- local_t write; /* index for next write */
78775+ local_unchecked_t write; /* index for next write */
78776 unsigned read; /* index for next read */
78777- local_t entries; /* entries on this page */
78778+ local_unchecked_t entries; /* entries on this page */
78779 unsigned long real_end; /* real end of data */
78780 struct buffer_data_page *page; /* Actual data page */
78781 };
78782@@ -461,8 +461,8 @@ struct ring_buffer_per_cpu {
78783 unsigned long last_overrun;
78784 local_t entries_bytes;
78785 local_t entries;
78786- local_t overrun;
78787- local_t commit_overrun;
78788+ local_unchecked_t overrun;
78789+ local_unchecked_t commit_overrun;
78790 local_t dropped_events;
78791 local_t committing;
78792 local_t commits;
78793@@ -861,8 +861,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
78794 *
78795 * We add a counter to the write field to denote this.
78796 */
78797- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
78798- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
78799+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
78800+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
78801
78802 /*
78803 * Just make sure we have seen our old_write and synchronize
78804@@ -890,8 +890,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
78805 * cmpxchg to only update if an interrupt did not already
78806 * do it for us. If the cmpxchg fails, we don't care.
78807 */
78808- (void)local_cmpxchg(&next_page->write, old_write, val);
78809- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
78810+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
78811+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
78812
78813 /*
78814 * No need to worry about races with clearing out the commit.
78815@@ -1250,12 +1250,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
78816
78817 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
78818 {
78819- return local_read(&bpage->entries) & RB_WRITE_MASK;
78820+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
78821 }
78822
78823 static inline unsigned long rb_page_write(struct buffer_page *bpage)
78824 {
78825- return local_read(&bpage->write) & RB_WRITE_MASK;
78826+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
78827 }
78828
78829 static int
78830@@ -1350,7 +1350,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
78831 * bytes consumed in ring buffer from here.
78832 * Increment overrun to account for the lost events.
78833 */
78834- local_add(page_entries, &cpu_buffer->overrun);
78835+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
78836 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
78837 }
78838
78839@@ -1906,7 +1906,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
78840 * it is our responsibility to update
78841 * the counters.
78842 */
78843- local_add(entries, &cpu_buffer->overrun);
78844+ local_add_unchecked(entries, &cpu_buffer->overrun);
78845 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
78846
78847 /*
78848@@ -2056,7 +2056,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
78849 if (tail == BUF_PAGE_SIZE)
78850 tail_page->real_end = 0;
78851
78852- local_sub(length, &tail_page->write);
78853+ local_sub_unchecked(length, &tail_page->write);
78854 return;
78855 }
78856
78857@@ -2091,7 +2091,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
78858 rb_event_set_padding(event);
78859
78860 /* Set the write back to the previous setting */
78861- local_sub(length, &tail_page->write);
78862+ local_sub_unchecked(length, &tail_page->write);
78863 return;
78864 }
78865
78866@@ -2103,7 +2103,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
78867
78868 /* Set write to end of buffer */
78869 length = (tail + length) - BUF_PAGE_SIZE;
78870- local_sub(length, &tail_page->write);
78871+ local_sub_unchecked(length, &tail_page->write);
78872 }
78873
78874 /*
78875@@ -2129,7 +2129,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
78876 * about it.
78877 */
78878 if (unlikely(next_page == commit_page)) {
78879- local_inc(&cpu_buffer->commit_overrun);
78880+ local_inc_unchecked(&cpu_buffer->commit_overrun);
78881 goto out_reset;
78882 }
78883
78884@@ -2185,7 +2185,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
78885 cpu_buffer->tail_page) &&
78886 (cpu_buffer->commit_page ==
78887 cpu_buffer->reader_page))) {
78888- local_inc(&cpu_buffer->commit_overrun);
78889+ local_inc_unchecked(&cpu_buffer->commit_overrun);
78890 goto out_reset;
78891 }
78892 }
78893@@ -2233,7 +2233,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
78894 length += RB_LEN_TIME_EXTEND;
78895
78896 tail_page = cpu_buffer->tail_page;
78897- write = local_add_return(length, &tail_page->write);
78898+ write = local_add_return_unchecked(length, &tail_page->write);
78899
78900 /* set write to only the index of the write */
78901 write &= RB_WRITE_MASK;
78902@@ -2250,7 +2250,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
78903 kmemcheck_annotate_bitfield(event, bitfield);
78904 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
78905
78906- local_inc(&tail_page->entries);
78907+ local_inc_unchecked(&tail_page->entries);
78908
78909 /*
78910 * If this is the first commit on the page, then update
78911@@ -2283,7 +2283,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
78912
78913 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
78914 unsigned long write_mask =
78915- local_read(&bpage->write) & ~RB_WRITE_MASK;
78916+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
78917 unsigned long event_length = rb_event_length(event);
78918 /*
78919 * This is on the tail page. It is possible that
78920@@ -2293,7 +2293,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
78921 */
78922 old_index += write_mask;
78923 new_index += write_mask;
78924- index = local_cmpxchg(&bpage->write, old_index, new_index);
78925+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
78926 if (index == old_index) {
78927 /* update counters */
78928 local_sub(event_length, &cpu_buffer->entries_bytes);
78929@@ -2632,7 +2632,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
78930
78931 /* Do the likely case first */
78932 if (likely(bpage->page == (void *)addr)) {
78933- local_dec(&bpage->entries);
78934+ local_dec_unchecked(&bpage->entries);
78935 return;
78936 }
78937
78938@@ -2644,7 +2644,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
78939 start = bpage;
78940 do {
78941 if (bpage->page == (void *)addr) {
78942- local_dec(&bpage->entries);
78943+ local_dec_unchecked(&bpage->entries);
78944 return;
78945 }
78946 rb_inc_page(cpu_buffer, &bpage);
78947@@ -2926,7 +2926,7 @@ static inline unsigned long
78948 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
78949 {
78950 return local_read(&cpu_buffer->entries) -
78951- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
78952+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
78953 }
78954
78955 /**
78956@@ -3015,7 +3015,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
78957 return 0;
78958
78959 cpu_buffer = buffer->buffers[cpu];
78960- ret = local_read(&cpu_buffer->overrun);
78961+ ret = local_read_unchecked(&cpu_buffer->overrun);
78962
78963 return ret;
78964 }
78965@@ -3038,7 +3038,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
78966 return 0;
78967
78968 cpu_buffer = buffer->buffers[cpu];
78969- ret = local_read(&cpu_buffer->commit_overrun);
78970+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
78971
78972 return ret;
78973 }
78974@@ -3105,7 +3105,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
78975 /* if you care about this being correct, lock the buffer */
78976 for_each_buffer_cpu(buffer, cpu) {
78977 cpu_buffer = buffer->buffers[cpu];
78978- overruns += local_read(&cpu_buffer->overrun);
78979+ overruns += local_read_unchecked(&cpu_buffer->overrun);
78980 }
78981
78982 return overruns;
78983@@ -3281,8 +3281,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
78984 /*
78985 * Reset the reader page to size zero.
78986 */
78987- local_set(&cpu_buffer->reader_page->write, 0);
78988- local_set(&cpu_buffer->reader_page->entries, 0);
78989+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
78990+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
78991 local_set(&cpu_buffer->reader_page->page->commit, 0);
78992 cpu_buffer->reader_page->real_end = 0;
78993
78994@@ -3316,7 +3316,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
78995 * want to compare with the last_overrun.
78996 */
78997 smp_mb();
78998- overwrite = local_read(&(cpu_buffer->overrun));
78999+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
79000
79001 /*
79002 * Here's the tricky part.
79003@@ -3886,8 +3886,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
79004
79005 cpu_buffer->head_page
79006 = list_entry(cpu_buffer->pages, struct buffer_page, list);
79007- local_set(&cpu_buffer->head_page->write, 0);
79008- local_set(&cpu_buffer->head_page->entries, 0);
79009+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
79010+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
79011 local_set(&cpu_buffer->head_page->page->commit, 0);
79012
79013 cpu_buffer->head_page->read = 0;
79014@@ -3897,14 +3897,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
79015
79016 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
79017 INIT_LIST_HEAD(&cpu_buffer->new_pages);
79018- local_set(&cpu_buffer->reader_page->write, 0);
79019- local_set(&cpu_buffer->reader_page->entries, 0);
79020+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
79021+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
79022 local_set(&cpu_buffer->reader_page->page->commit, 0);
79023 cpu_buffer->reader_page->read = 0;
79024
79025 local_set(&cpu_buffer->entries_bytes, 0);
79026- local_set(&cpu_buffer->overrun, 0);
79027- local_set(&cpu_buffer->commit_overrun, 0);
79028+ local_set_unchecked(&cpu_buffer->overrun, 0);
79029+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
79030 local_set(&cpu_buffer->dropped_events, 0);
79031 local_set(&cpu_buffer->entries, 0);
79032 local_set(&cpu_buffer->committing, 0);
79033@@ -4308,8 +4308,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
79034 rb_init_page(bpage);
79035 bpage = reader->page;
79036 reader->page = *data_page;
79037- local_set(&reader->write, 0);
79038- local_set(&reader->entries, 0);
79039+ local_set_unchecked(&reader->write, 0);
79040+ local_set_unchecked(&reader->entries, 0);
79041 reader->read = 0;
79042 *data_page = bpage;
79043
79044diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
79045index 3c13e46..883d039 100644
79046--- a/kernel/trace/trace.c
79047+++ b/kernel/trace/trace.c
79048@@ -4465,10 +4465,9 @@ static const struct file_operations tracing_dyn_info_fops = {
79049 };
79050 #endif
79051
79052-static struct dentry *d_tracer;
79053-
79054 struct dentry *tracing_init_dentry(void)
79055 {
79056+ static struct dentry *d_tracer;
79057 static int once;
79058
79059 if (d_tracer)
79060@@ -4488,10 +4487,9 @@ struct dentry *tracing_init_dentry(void)
79061 return d_tracer;
79062 }
79063
79064-static struct dentry *d_percpu;
79065-
79066 struct dentry *tracing_dentry_percpu(void)
79067 {
79068+ static struct dentry *d_percpu;
79069 static int once;
79070 struct dentry *d_tracer;
79071
79072diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
79073index 880073d..42db7c3 100644
79074--- a/kernel/trace/trace_events.c
79075+++ b/kernel/trace/trace_events.c
79076@@ -1330,10 +1330,6 @@ static LIST_HEAD(ftrace_module_file_list);
79077 struct ftrace_module_file_ops {
79078 struct list_head list;
79079 struct module *mod;
79080- struct file_operations id;
79081- struct file_operations enable;
79082- struct file_operations format;
79083- struct file_operations filter;
79084 };
79085
79086 static struct ftrace_module_file_ops *
79087@@ -1354,17 +1350,12 @@ trace_create_file_ops(struct module *mod)
79088
79089 file_ops->mod = mod;
79090
79091- file_ops->id = ftrace_event_id_fops;
79092- file_ops->id.owner = mod;
79093-
79094- file_ops->enable = ftrace_enable_fops;
79095- file_ops->enable.owner = mod;
79096-
79097- file_ops->filter = ftrace_event_filter_fops;
79098- file_ops->filter.owner = mod;
79099-
79100- file_ops->format = ftrace_event_format_fops;
79101- file_ops->format.owner = mod;
79102+ pax_open_kernel();
79103+ mod->trace_id.owner = mod;
79104+ mod->trace_enable.owner = mod;
79105+ mod->trace_filter.owner = mod;
79106+ mod->trace_format.owner = mod;
79107+ pax_close_kernel();
79108
79109 list_add(&file_ops->list, &ftrace_module_file_list);
79110
79111@@ -1388,8 +1379,8 @@ static void trace_module_add_events(struct module *mod)
79112
79113 for_each_event(call, start, end) {
79114 __trace_add_event_call(*call, mod,
79115- &file_ops->id, &file_ops->enable,
79116- &file_ops->filter, &file_ops->format);
79117+ &mod->trace_id, &mod->trace_enable,
79118+ &mod->trace_filter, &mod->trace_format);
79119 }
79120 }
79121
79122diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
79123index fd3c8aa..5f324a6 100644
79124--- a/kernel/trace/trace_mmiotrace.c
79125+++ b/kernel/trace/trace_mmiotrace.c
79126@@ -24,7 +24,7 @@ struct header_iter {
79127 static struct trace_array *mmio_trace_array;
79128 static bool overrun_detected;
79129 static unsigned long prev_overruns;
79130-static atomic_t dropped_count;
79131+static atomic_unchecked_t dropped_count;
79132
79133 static void mmio_reset_data(struct trace_array *tr)
79134 {
79135@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
79136
79137 static unsigned long count_overruns(struct trace_iterator *iter)
79138 {
79139- unsigned long cnt = atomic_xchg(&dropped_count, 0);
79140+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
79141 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
79142
79143 if (over > prev_overruns)
79144@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
79145 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
79146 sizeof(*entry), 0, pc);
79147 if (!event) {
79148- atomic_inc(&dropped_count);
79149+ atomic_inc_unchecked(&dropped_count);
79150 return;
79151 }
79152 entry = ring_buffer_event_data(event);
79153@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
79154 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
79155 sizeof(*entry), 0, pc);
79156 if (!event) {
79157- atomic_inc(&dropped_count);
79158+ atomic_inc_unchecked(&dropped_count);
79159 return;
79160 }
79161 entry = ring_buffer_event_data(event);
79162diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
79163index 194d796..76edb8f 100644
79164--- a/kernel/trace/trace_output.c
79165+++ b/kernel/trace/trace_output.c
79166@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
79167
79168 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
79169 if (!IS_ERR(p)) {
79170- p = mangle_path(s->buffer + s->len, p, "\n");
79171+ p = mangle_path(s->buffer + s->len, p, "\n\\");
79172 if (p) {
79173 s->len = p - s->buffer;
79174 return 1;
79175@@ -852,14 +852,16 @@ int register_ftrace_event(struct trace_event *event)
79176 goto out;
79177 }
79178
79179+ pax_open_kernel();
79180 if (event->funcs->trace == NULL)
79181- event->funcs->trace = trace_nop_print;
79182+ *(void **)&event->funcs->trace = trace_nop_print;
79183 if (event->funcs->raw == NULL)
79184- event->funcs->raw = trace_nop_print;
79185+ *(void **)&event->funcs->raw = trace_nop_print;
79186 if (event->funcs->hex == NULL)
79187- event->funcs->hex = trace_nop_print;
79188+ *(void **)&event->funcs->hex = trace_nop_print;
79189 if (event->funcs->binary == NULL)
79190- event->funcs->binary = trace_nop_print;
79191+ *(void **)&event->funcs->binary = trace_nop_print;
79192+ pax_close_kernel();
79193
79194 key = event->type & (EVENT_HASHSIZE - 1);
79195
79196diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
79197index 42ca822..cdcacc6 100644
79198--- a/kernel/trace/trace_stack.c
79199+++ b/kernel/trace/trace_stack.c
79200@@ -52,7 +52,7 @@ static inline void check_stack(void)
79201 return;
79202
79203 /* we do not handle interrupt stacks yet */
79204- if (!object_is_on_stack(&this_size))
79205+ if (!object_starts_on_stack(&this_size))
79206 return;
79207
79208 local_irq_save(flags);
79209diff --git a/kernel/user.c b/kernel/user.c
79210index 33acb5e..57ebfd4 100644
79211--- a/kernel/user.c
79212+++ b/kernel/user.c
79213@@ -47,9 +47,7 @@ struct user_namespace init_user_ns = {
79214 .count = 4294967295U,
79215 },
79216 },
79217- .kref = {
79218- .refcount = ATOMIC_INIT(3),
79219- },
79220+ .count = ATOMIC_INIT(3),
79221 .owner = GLOBAL_ROOT_UID,
79222 .group = GLOBAL_ROOT_GID,
79223 .proc_inum = PROC_USER_INIT_INO,
79224diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
79225index dbfe36a7..2a3c1df 100644
79226--- a/kernel/user_namespace.c
79227+++ b/kernel/user_namespace.c
79228@@ -79,7 +79,7 @@ int create_user_ns(struct cred *new)
79229 return ret;
79230 }
79231
79232- kref_init(&ns->kref);
79233+ atomic_set(&ns->count, 1);
79234 /* Leave the new->user_ns reference with the new user namespace. */
79235 ns->parent = parent_ns;
79236 ns->owner = owner;
79237@@ -105,15 +105,16 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
79238 return create_user_ns(cred);
79239 }
79240
79241-void free_user_ns(struct kref *kref)
79242+void free_user_ns(struct user_namespace *ns)
79243 {
79244- struct user_namespace *parent, *ns =
79245- container_of(kref, struct user_namespace, kref);
79246+ struct user_namespace *parent;
79247
79248- parent = ns->parent;
79249- proc_free_inum(ns->proc_inum);
79250- kmem_cache_free(user_ns_cachep, ns);
79251- put_user_ns(parent);
79252+ do {
79253+ parent = ns->parent;
79254+ proc_free_inum(ns->proc_inum);
79255+ kmem_cache_free(user_ns_cachep, ns);
79256+ ns = parent;
79257+ } while (atomic_dec_and_test(&parent->count));
79258 }
79259 EXPORT_SYMBOL(free_user_ns);
79260
79261@@ -804,7 +805,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
79262 if (atomic_read(&current->mm->mm_users) > 1)
79263 return -EINVAL;
79264
79265- if (current->fs->users != 1)
79266+ if (atomic_read(&current->fs->users) != 1)
79267 return -EINVAL;
79268
79269 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
79270diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
79271index 63da38c..639904e 100644
79272--- a/kernel/utsname_sysctl.c
79273+++ b/kernel/utsname_sysctl.c
79274@@ -46,7 +46,7 @@ static void put_uts(ctl_table *table, int write, void *which)
79275 static int proc_do_uts_string(ctl_table *table, int write,
79276 void __user *buffer, size_t *lenp, loff_t *ppos)
79277 {
79278- struct ctl_table uts_table;
79279+ ctl_table_no_const uts_table;
79280 int r;
79281 memcpy(&uts_table, table, sizeof(uts_table));
79282 uts_table.data = get_uts(table, write);
79283diff --git a/kernel/watchdog.c b/kernel/watchdog.c
79284index 75a2ab3..5961da7 100644
79285--- a/kernel/watchdog.c
79286+++ b/kernel/watchdog.c
79287@@ -527,7 +527,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
79288 }
79289 #endif /* CONFIG_SYSCTL */
79290
79291-static struct smp_hotplug_thread watchdog_threads = {
79292+static struct smp_hotplug_thread watchdog_threads __read_only = {
79293 .store = &softlockup_watchdog,
79294 .thread_should_run = watchdog_should_run,
79295 .thread_fn = watchdog,
79296diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
79297index 67604e5..fe94fb1 100644
79298--- a/lib/Kconfig.debug
79299+++ b/lib/Kconfig.debug
79300@@ -550,7 +550,7 @@ config DEBUG_MUTEXES
79301
79302 config DEBUG_LOCK_ALLOC
79303 bool "Lock debugging: detect incorrect freeing of live locks"
79304- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79305+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79306 select DEBUG_SPINLOCK
79307 select DEBUG_MUTEXES
79308 select LOCKDEP
79309@@ -564,7 +564,7 @@ config DEBUG_LOCK_ALLOC
79310
79311 config PROVE_LOCKING
79312 bool "Lock debugging: prove locking correctness"
79313- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79314+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79315 select LOCKDEP
79316 select DEBUG_SPINLOCK
79317 select DEBUG_MUTEXES
79318@@ -670,7 +670,7 @@ config LOCKDEP
79319
79320 config LOCK_STAT
79321 bool "Lock usage statistics"
79322- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79323+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79324 select LOCKDEP
79325 select DEBUG_SPINLOCK
79326 select DEBUG_MUTEXES
79327@@ -1278,6 +1278,7 @@ config LATENCYTOP
79328 depends on DEBUG_KERNEL
79329 depends on STACKTRACE_SUPPORT
79330 depends on PROC_FS
79331+ depends on !GRKERNSEC_HIDESYM
79332 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
79333 select KALLSYMS
79334 select KALLSYMS_ALL
79335@@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
79336
79337 config PROVIDE_OHCI1394_DMA_INIT
79338 bool "Remote debugging over FireWire early on boot"
79339- depends on PCI && X86
79340+ depends on PCI && X86 && !GRKERNSEC
79341 help
79342 If you want to debug problems which hang or crash the kernel early
79343 on boot and the crashing machine has a FireWire port, you can use
79344@@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
79345
79346 config FIREWIRE_OHCI_REMOTE_DMA
79347 bool "Remote debugging over FireWire with firewire-ohci"
79348- depends on FIREWIRE_OHCI
79349+ depends on FIREWIRE_OHCI && !GRKERNSEC
79350 help
79351 This option lets you use the FireWire bus for remote debugging
79352 with help of the firewire-ohci driver. It enables unfiltered
79353diff --git a/lib/Makefile b/lib/Makefile
79354index 02ed6c0..bd243da 100644
79355--- a/lib/Makefile
79356+++ b/lib/Makefile
79357@@ -47,7 +47,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
79358
79359 obj-$(CONFIG_BTREE) += btree.o
79360 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
79361-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
79362+obj-y += list_debug.o
79363 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
79364
79365 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
79366diff --git a/lib/bitmap.c b/lib/bitmap.c
79367index 06f7e4f..f3cf2b0 100644
79368--- a/lib/bitmap.c
79369+++ b/lib/bitmap.c
79370@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
79371 {
79372 int c, old_c, totaldigits, ndigits, nchunks, nbits;
79373 u32 chunk;
79374- const char __user __force *ubuf = (const char __user __force *)buf;
79375+ const char __user *ubuf = (const char __force_user *)buf;
79376
79377 bitmap_zero(maskp, nmaskbits);
79378
79379@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
79380 {
79381 if (!access_ok(VERIFY_READ, ubuf, ulen))
79382 return -EFAULT;
79383- return __bitmap_parse((const char __force *)ubuf,
79384+ return __bitmap_parse((const char __force_kernel *)ubuf,
79385 ulen, 1, maskp, nmaskbits);
79386
79387 }
79388@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
79389 {
79390 unsigned a, b;
79391 int c, old_c, totaldigits;
79392- const char __user __force *ubuf = (const char __user __force *)buf;
79393+ const char __user *ubuf = (const char __force_user *)buf;
79394 int exp_digit, in_range;
79395
79396 totaldigits = c = 0;
79397@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
79398 {
79399 if (!access_ok(VERIFY_READ, ubuf, ulen))
79400 return -EFAULT;
79401- return __bitmap_parselist((const char __force *)ubuf,
79402+ return __bitmap_parselist((const char __force_kernel *)ubuf,
79403 ulen, 1, maskp, nmaskbits);
79404 }
79405 EXPORT_SYMBOL(bitmap_parselist_user);
79406diff --git a/lib/bug.c b/lib/bug.c
79407index d0cdf14..4d07bd2 100644
79408--- a/lib/bug.c
79409+++ b/lib/bug.c
79410@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
79411 return BUG_TRAP_TYPE_NONE;
79412
79413 bug = find_bug(bugaddr);
79414+ if (!bug)
79415+ return BUG_TRAP_TYPE_NONE;
79416
79417 file = NULL;
79418 line = 0;
79419diff --git a/lib/debugobjects.c b/lib/debugobjects.c
79420index d11808c..dc2d6f8 100644
79421--- a/lib/debugobjects.c
79422+++ b/lib/debugobjects.c
79423@@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
79424 if (limit > 4)
79425 return;
79426
79427- is_on_stack = object_is_on_stack(addr);
79428+ is_on_stack = object_starts_on_stack(addr);
79429 if (is_on_stack == onstack)
79430 return;
79431
79432diff --git a/lib/devres.c b/lib/devres.c
79433index 80b9c76..9e32279 100644
79434--- a/lib/devres.c
79435+++ b/lib/devres.c
79436@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
79437 void devm_iounmap(struct device *dev, void __iomem *addr)
79438 {
79439 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
79440- (void *)addr));
79441+ (void __force *)addr));
79442 iounmap(addr);
79443 }
79444 EXPORT_SYMBOL(devm_iounmap);
79445@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
79446 {
79447 ioport_unmap(addr);
79448 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
79449- devm_ioport_map_match, (void *)addr));
79450+ devm_ioport_map_match, (void __force *)addr));
79451 }
79452 EXPORT_SYMBOL(devm_ioport_unmap);
79453
79454diff --git a/lib/div64.c b/lib/div64.c
79455index a163b6c..9618fa5 100644
79456--- a/lib/div64.c
79457+++ b/lib/div64.c
79458@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
79459 EXPORT_SYMBOL(__div64_32);
79460
79461 #ifndef div_s64_rem
79462-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
79463+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
79464 {
79465 u64 quotient;
79466
79467@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
79468 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
79469 */
79470 #ifndef div64_u64
79471-u64 div64_u64(u64 dividend, u64 divisor)
79472+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
79473 {
79474 u32 high = divisor >> 32;
79475 u64 quot;
79476diff --git a/lib/dma-debug.c b/lib/dma-debug.c
79477index 5e396ac..58d5de1 100644
79478--- a/lib/dma-debug.c
79479+++ b/lib/dma-debug.c
79480@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
79481
79482 void dma_debug_add_bus(struct bus_type *bus)
79483 {
79484- struct notifier_block *nb;
79485+ notifier_block_no_const *nb;
79486
79487 if (global_disable)
79488 return;
79489@@ -942,7 +942,7 @@ out:
79490
79491 static void check_for_stack(struct device *dev, void *addr)
79492 {
79493- if (object_is_on_stack(addr))
79494+ if (object_starts_on_stack(addr))
79495 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
79496 "stack [addr=%p]\n", addr);
79497 }
79498diff --git a/lib/inflate.c b/lib/inflate.c
79499index 013a761..c28f3fc 100644
79500--- a/lib/inflate.c
79501+++ b/lib/inflate.c
79502@@ -269,7 +269,7 @@ static void free(void *where)
79503 malloc_ptr = free_mem_ptr;
79504 }
79505 #else
79506-#define malloc(a) kmalloc(a, GFP_KERNEL)
79507+#define malloc(a) kmalloc((a), GFP_KERNEL)
79508 #define free(a) kfree(a)
79509 #endif
79510
79511diff --git a/lib/ioremap.c b/lib/ioremap.c
79512index 0c9216c..863bd89 100644
79513--- a/lib/ioremap.c
79514+++ b/lib/ioremap.c
79515@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
79516 unsigned long next;
79517
79518 phys_addr -= addr;
79519- pmd = pmd_alloc(&init_mm, pud, addr);
79520+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
79521 if (!pmd)
79522 return -ENOMEM;
79523 do {
79524@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
79525 unsigned long next;
79526
79527 phys_addr -= addr;
79528- pud = pud_alloc(&init_mm, pgd, addr);
79529+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
79530 if (!pud)
79531 return -ENOMEM;
79532 do {
79533diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
79534index bd2bea9..6b3c95e 100644
79535--- a/lib/is_single_threaded.c
79536+++ b/lib/is_single_threaded.c
79537@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
79538 struct task_struct *p, *t;
79539 bool ret;
79540
79541+ if (!mm)
79542+ return true;
79543+
79544 if (atomic_read(&task->signal->live) != 1)
79545 return false;
79546
79547diff --git a/lib/kobject.c b/lib/kobject.c
79548index e07ee1f..998489d 100644
79549--- a/lib/kobject.c
79550+++ b/lib/kobject.c
79551@@ -852,9 +852,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
79552
79553
79554 static DEFINE_SPINLOCK(kobj_ns_type_lock);
79555-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
79556+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
79557
79558-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
79559+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
79560 {
79561 enum kobj_ns_type type = ops->type;
79562 int error;
79563diff --git a/lib/list_debug.c b/lib/list_debug.c
79564index c24c2f7..0475b78 100644
79565--- a/lib/list_debug.c
79566+++ b/lib/list_debug.c
79567@@ -11,7 +11,9 @@
79568 #include <linux/bug.h>
79569 #include <linux/kernel.h>
79570 #include <linux/rculist.h>
79571+#include <linux/mm.h>
79572
79573+#ifdef CONFIG_DEBUG_LIST
79574 /*
79575 * Insert a new entry between two known consecutive entries.
79576 *
79577@@ -19,21 +21,32 @@
79578 * the prev/next entries already!
79579 */
79580
79581-void __list_add(struct list_head *new,
79582- struct list_head *prev,
79583- struct list_head *next)
79584+static bool __list_add_debug(struct list_head *new,
79585+ struct list_head *prev,
79586+ struct list_head *next)
79587 {
79588- WARN(next->prev != prev,
79589+ if (WARN(next->prev != prev,
79590 "list_add corruption. next->prev should be "
79591 "prev (%p), but was %p. (next=%p).\n",
79592- prev, next->prev, next);
79593- WARN(prev->next != next,
79594+ prev, next->prev, next) ||
79595+ WARN(prev->next != next,
79596 "list_add corruption. prev->next should be "
79597 "next (%p), but was %p. (prev=%p).\n",
79598- next, prev->next, prev);
79599- WARN(new == prev || new == next,
79600+ next, prev->next, prev) ||
79601+ WARN(new == prev || new == next,
79602 "list_add double add: new=%p, prev=%p, next=%p.\n",
79603- new, prev, next);
79604+ new, prev, next))
79605+ return false;
79606+ return true;
79607+}
79608+
79609+void __list_add(struct list_head *new,
79610+ struct list_head *prev,
79611+ struct list_head *next)
79612+{
79613+ if (!__list_add_debug(new, prev, next))
79614+ return;
79615+
79616 next->prev = new;
79617 new->next = next;
79618 new->prev = prev;
79619@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
79620 }
79621 EXPORT_SYMBOL(__list_add);
79622
79623-void __list_del_entry(struct list_head *entry)
79624+static bool __list_del_entry_debug(struct list_head *entry)
79625 {
79626 struct list_head *prev, *next;
79627
79628@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
79629 WARN(next->prev != entry,
79630 "list_del corruption. next->prev should be %p, "
79631 "but was %p\n", entry, next->prev))
79632+ return false;
79633+ return true;
79634+}
79635+
79636+void __list_del_entry(struct list_head *entry)
79637+{
79638+ if (!__list_del_entry_debug(entry))
79639 return;
79640
79641- __list_del(prev, next);
79642+ __list_del(entry->prev, entry->next);
79643 }
79644 EXPORT_SYMBOL(__list_del_entry);
79645
79646@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
79647 void __list_add_rcu(struct list_head *new,
79648 struct list_head *prev, struct list_head *next)
79649 {
79650- WARN(next->prev != prev,
79651- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
79652- prev, next->prev, next);
79653- WARN(prev->next != next,
79654- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
79655- next, prev->next, prev);
79656+ if (!__list_add_debug(new, prev, next))
79657+ return;
79658+
79659 new->next = next;
79660 new->prev = prev;
79661 rcu_assign_pointer(list_next_rcu(prev), new);
79662 next->prev = new;
79663 }
79664 EXPORT_SYMBOL(__list_add_rcu);
79665+#endif
79666+
79667+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
79668+{
79669+#ifdef CONFIG_DEBUG_LIST
79670+ if (!__list_add_debug(new, prev, next))
79671+ return;
79672+#endif
79673+
79674+ pax_open_kernel();
79675+ next->prev = new;
79676+ new->next = next;
79677+ new->prev = prev;
79678+ prev->next = new;
79679+ pax_close_kernel();
79680+}
79681+EXPORT_SYMBOL(__pax_list_add);
79682+
79683+void pax_list_del(struct list_head *entry)
79684+{
79685+#ifdef CONFIG_DEBUG_LIST
79686+ if (!__list_del_entry_debug(entry))
79687+ return;
79688+#endif
79689+
79690+ pax_open_kernel();
79691+ __list_del(entry->prev, entry->next);
79692+ entry->next = LIST_POISON1;
79693+ entry->prev = LIST_POISON2;
79694+ pax_close_kernel();
79695+}
79696+EXPORT_SYMBOL(pax_list_del);
79697+
79698+void pax_list_del_init(struct list_head *entry)
79699+{
79700+ pax_open_kernel();
79701+ __list_del(entry->prev, entry->next);
79702+ INIT_LIST_HEAD(entry);
79703+ pax_close_kernel();
79704+}
79705+EXPORT_SYMBOL(pax_list_del_init);
79706+
79707+void __pax_list_add_rcu(struct list_head *new,
79708+ struct list_head *prev, struct list_head *next)
79709+{
79710+#ifdef CONFIG_DEBUG_LIST
79711+ if (!__list_add_debug(new, prev, next))
79712+ return;
79713+#endif
79714+
79715+ pax_open_kernel();
79716+ new->next = next;
79717+ new->prev = prev;
79718+ rcu_assign_pointer(list_next_rcu(prev), new);
79719+ next->prev = new;
79720+ pax_close_kernel();
79721+}
79722+EXPORT_SYMBOL(__pax_list_add_rcu);
79723+
79724+void pax_list_del_rcu(struct list_head *entry)
79725+{
79726+#ifdef CONFIG_DEBUG_LIST
79727+ if (!__list_del_entry_debug(entry))
79728+ return;
79729+#endif
79730+
79731+ pax_open_kernel();
79732+ __list_del(entry->prev, entry->next);
79733+ entry->next = LIST_POISON1;
79734+ entry->prev = LIST_POISON2;
79735+ pax_close_kernel();
79736+}
79737+EXPORT_SYMBOL(pax_list_del_rcu);
79738diff --git a/lib/radix-tree.c b/lib/radix-tree.c
79739index e796429..6e38f9f 100644
79740--- a/lib/radix-tree.c
79741+++ b/lib/radix-tree.c
79742@@ -92,7 +92,7 @@ struct radix_tree_preload {
79743 int nr;
79744 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
79745 };
79746-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
79747+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
79748
79749 static inline void *ptr_to_indirect(void *ptr)
79750 {
79751diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
79752index bb2b201..46abaf9 100644
79753--- a/lib/strncpy_from_user.c
79754+++ b/lib/strncpy_from_user.c
79755@@ -21,7 +21,7 @@
79756 */
79757 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
79758 {
79759- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
79760+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
79761 long res = 0;
79762
79763 /*
79764diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
79765index a28df52..3d55877 100644
79766--- a/lib/strnlen_user.c
79767+++ b/lib/strnlen_user.c
79768@@ -26,7 +26,7 @@
79769 */
79770 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
79771 {
79772- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
79773+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
79774 long align, res = 0;
79775 unsigned long c;
79776
79777diff --git a/lib/swiotlb.c b/lib/swiotlb.c
79778index 196b069..358f342 100644
79779--- a/lib/swiotlb.c
79780+++ b/lib/swiotlb.c
79781@@ -642,7 +642,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
79782
79783 void
79784 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
79785- dma_addr_t dev_addr)
79786+ dma_addr_t dev_addr, struct dma_attrs *attrs)
79787 {
79788 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
79789
79790diff --git a/lib/vsprintf.c b/lib/vsprintf.c
79791index fab33a9..3b5fe68 100644
79792--- a/lib/vsprintf.c
79793+++ b/lib/vsprintf.c
79794@@ -16,6 +16,9 @@
79795 * - scnprintf and vscnprintf
79796 */
79797
79798+#ifdef CONFIG_GRKERNSEC_HIDESYM
79799+#define __INCLUDED_BY_HIDESYM 1
79800+#endif
79801 #include <stdarg.h>
79802 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
79803 #include <linux/types.h>
79804@@ -541,7 +544,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
79805 char sym[KSYM_SYMBOL_LEN];
79806 if (ext == 'B')
79807 sprint_backtrace(sym, value);
79808- else if (ext != 'f' && ext != 's')
79809+ else if (ext != 'f' && ext != 's' && ext != 'a')
79810 sprint_symbol(sym, value);
79811 else
79812 sprint_symbol_no_offset(sym, value);
79813@@ -974,7 +977,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
79814 return number(buf, end, *(const netdev_features_t *)addr, spec);
79815 }
79816
79817+#ifdef CONFIG_GRKERNSEC_HIDESYM
79818+int kptr_restrict __read_mostly = 2;
79819+#else
79820 int kptr_restrict __read_mostly;
79821+#endif
79822
79823 /*
79824 * Show a '%p' thing. A kernel extension is that the '%p' is followed
79825@@ -988,6 +995,8 @@ int kptr_restrict __read_mostly;
79826 * - 'S' For symbolic direct pointers with offset
79827 * - 's' For symbolic direct pointers without offset
79828 * - 'B' For backtraced symbolic direct pointers with offset
79829+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
79830+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
79831 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
79832 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
79833 * - 'M' For a 6-byte MAC address, it prints the address in the
79834@@ -1043,12 +1052,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
79835
79836 if (!ptr && *fmt != 'K') {
79837 /*
79838- * Print (null) with the same width as a pointer so it makes
79839+ * Print (nil) with the same width as a pointer so it makes
79840 * tabular output look nice.
79841 */
79842 if (spec.field_width == -1)
79843 spec.field_width = default_width;
79844- return string(buf, end, "(null)", spec);
79845+ return string(buf, end, "(nil)", spec);
79846 }
79847
79848 switch (*fmt) {
79849@@ -1058,6 +1067,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
79850 /* Fallthrough */
79851 case 'S':
79852 case 's':
79853+#ifdef CONFIG_GRKERNSEC_HIDESYM
79854+ break;
79855+#else
79856+ return symbol_string(buf, end, ptr, spec, *fmt);
79857+#endif
79858+ case 'A':
79859+ case 'a':
79860 case 'B':
79861 return symbol_string(buf, end, ptr, spec, *fmt);
79862 case 'R':
79863@@ -1098,6 +1114,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
79864 va_end(va);
79865 return buf;
79866 }
79867+ case 'P':
79868+ break;
79869 case 'K':
79870 /*
79871 * %pK cannot be used in IRQ context because its test
79872@@ -1121,6 +1139,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
79873 }
79874 break;
79875 }
79876+
79877+#ifdef CONFIG_GRKERNSEC_HIDESYM
79878+ /* 'P' = approved pointers to copy to userland,
79879+ as in the /proc/kallsyms case, as we make it display nothing
79880+ for non-root users, and the real contents for root users
79881+ Also ignore 'K' pointers, since we force their NULLing for non-root users
79882+ above
79883+ */
79884+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
79885+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
79886+ dump_stack();
79887+ ptr = NULL;
79888+ }
79889+#endif
79890+
79891 spec.flags |= SMALL;
79892 if (spec.field_width == -1) {
79893 spec.field_width = default_width;
79894@@ -1842,11 +1875,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
79895 typeof(type) value; \
79896 if (sizeof(type) == 8) { \
79897 args = PTR_ALIGN(args, sizeof(u32)); \
79898- *(u32 *)&value = *(u32 *)args; \
79899- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
79900+ *(u32 *)&value = *(const u32 *)args; \
79901+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
79902 } else { \
79903 args = PTR_ALIGN(args, sizeof(type)); \
79904- value = *(typeof(type) *)args; \
79905+ value = *(const typeof(type) *)args; \
79906 } \
79907 args += sizeof(type); \
79908 value; \
79909@@ -1909,7 +1942,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
79910 case FORMAT_TYPE_STR: {
79911 const char *str_arg = args;
79912 args += strlen(str_arg) + 1;
79913- str = string(str, end, (char *)str_arg, spec);
79914+ str = string(str, end, str_arg, spec);
79915 break;
79916 }
79917
79918diff --git a/localversion-grsec b/localversion-grsec
79919new file mode 100644
79920index 0000000..7cd6065
79921--- /dev/null
79922+++ b/localversion-grsec
79923@@ -0,0 +1 @@
79924+-grsec
79925diff --git a/mm/Kconfig b/mm/Kconfig
79926index 278e3ab..87c384d 100644
79927--- a/mm/Kconfig
79928+++ b/mm/Kconfig
79929@@ -286,10 +286,10 @@ config KSM
79930 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
79931
79932 config DEFAULT_MMAP_MIN_ADDR
79933- int "Low address space to protect from user allocation"
79934+ int "Low address space to protect from user allocation"
79935 depends on MMU
79936- default 4096
79937- help
79938+ default 65536
79939+ help
79940 This is the portion of low virtual memory which should be protected
79941 from userspace allocation. Keeping a user from writing to low pages
79942 can help reduce the impact of kernel NULL pointer bugs.
79943@@ -320,7 +320,7 @@ config MEMORY_FAILURE
79944
79945 config HWPOISON_INJECT
79946 tristate "HWPoison pages injector"
79947- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
79948+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
79949 select PROC_PAGE_MONITOR
79950
79951 config NOMMU_INITIAL_TRIM_EXCESS
79952diff --git a/mm/filemap.c b/mm/filemap.c
79953index 83efee7..3f99381 100644
79954--- a/mm/filemap.c
79955+++ b/mm/filemap.c
79956@@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
79957 struct address_space *mapping = file->f_mapping;
79958
79959 if (!mapping->a_ops->readpage)
79960- return -ENOEXEC;
79961+ return -ENODEV;
79962 file_accessed(file);
79963 vma->vm_ops = &generic_file_vm_ops;
79964 return 0;
79965@@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
79966 *pos = i_size_read(inode);
79967
79968 if (limit != RLIM_INFINITY) {
79969+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
79970 if (*pos >= limit) {
79971 send_sig(SIGXFSZ, current, 0);
79972 return -EFBIG;
79973diff --git a/mm/fremap.c b/mm/fremap.c
79974index a0aaf0e..20325c3 100644
79975--- a/mm/fremap.c
79976+++ b/mm/fremap.c
79977@@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
79978 retry:
79979 vma = find_vma(mm, start);
79980
79981+#ifdef CONFIG_PAX_SEGMEXEC
79982+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
79983+ goto out;
79984+#endif
79985+
79986 /*
79987 * Make sure the vma is shared, that it supports prefaulting,
79988 * and that the remapped range is valid and fully within
79989diff --git a/mm/highmem.c b/mm/highmem.c
79990index b32b70c..e512eb0 100644
79991--- a/mm/highmem.c
79992+++ b/mm/highmem.c
79993@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
79994 * So no dangers, even with speculative execution.
79995 */
79996 page = pte_page(pkmap_page_table[i]);
79997+ pax_open_kernel();
79998 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
79999-
80000+ pax_close_kernel();
80001 set_page_address(page, NULL);
80002 need_flush = 1;
80003 }
80004@@ -198,9 +199,11 @@ start:
80005 }
80006 }
80007 vaddr = PKMAP_ADDR(last_pkmap_nr);
80008+
80009+ pax_open_kernel();
80010 set_pte_at(&init_mm, vaddr,
80011 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
80012-
80013+ pax_close_kernel();
80014 pkmap_count[last_pkmap_nr] = 1;
80015 set_page_address(page, (void *)vaddr);
80016
80017diff --git a/mm/hugetlb.c b/mm/hugetlb.c
80018index 546db81..34830af 100644
80019--- a/mm/hugetlb.c
80020+++ b/mm/hugetlb.c
80021@@ -2008,15 +2008,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
80022 struct hstate *h = &default_hstate;
80023 unsigned long tmp;
80024 int ret;
80025+ ctl_table_no_const hugetlb_table;
80026
80027 tmp = h->max_huge_pages;
80028
80029 if (write && h->order >= MAX_ORDER)
80030 return -EINVAL;
80031
80032- table->data = &tmp;
80033- table->maxlen = sizeof(unsigned long);
80034- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
80035+ hugetlb_table = *table;
80036+ hugetlb_table.data = &tmp;
80037+ hugetlb_table.maxlen = sizeof(unsigned long);
80038+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
80039 if (ret)
80040 goto out;
80041
80042@@ -2073,15 +2075,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
80043 struct hstate *h = &default_hstate;
80044 unsigned long tmp;
80045 int ret;
80046+ ctl_table_no_const hugetlb_table;
80047
80048 tmp = h->nr_overcommit_huge_pages;
80049
80050 if (write && h->order >= MAX_ORDER)
80051 return -EINVAL;
80052
80053- table->data = &tmp;
80054- table->maxlen = sizeof(unsigned long);
80055- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
80056+ hugetlb_table = *table;
80057+ hugetlb_table.data = &tmp;
80058+ hugetlb_table.maxlen = sizeof(unsigned long);
80059+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
80060 if (ret)
80061 goto out;
80062
80063@@ -2511,6 +2515,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
80064 return 1;
80065 }
80066
80067+#ifdef CONFIG_PAX_SEGMEXEC
80068+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
80069+{
80070+ struct mm_struct *mm = vma->vm_mm;
80071+ struct vm_area_struct *vma_m;
80072+ unsigned long address_m;
80073+ pte_t *ptep_m;
80074+
80075+ vma_m = pax_find_mirror_vma(vma);
80076+ if (!vma_m)
80077+ return;
80078+
80079+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
80080+ address_m = address + SEGMEXEC_TASK_SIZE;
80081+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
80082+ get_page(page_m);
80083+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
80084+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
80085+}
80086+#endif
80087+
80088 /*
80089 * Hugetlb_cow() should be called with page lock of the original hugepage held.
80090 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
80091@@ -2629,6 +2654,11 @@ retry_avoidcopy:
80092 make_huge_pte(vma, new_page, 1));
80093 page_remove_rmap(old_page);
80094 hugepage_add_new_anon_rmap(new_page, vma, address);
80095+
80096+#ifdef CONFIG_PAX_SEGMEXEC
80097+ pax_mirror_huge_pte(vma, address, new_page);
80098+#endif
80099+
80100 /* Make the old page be freed below */
80101 new_page = old_page;
80102 }
80103@@ -2788,6 +2818,10 @@ retry:
80104 && (vma->vm_flags & VM_SHARED)));
80105 set_huge_pte_at(mm, address, ptep, new_pte);
80106
80107+#ifdef CONFIG_PAX_SEGMEXEC
80108+ pax_mirror_huge_pte(vma, address, page);
80109+#endif
80110+
80111 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
80112 /* Optimization, do the COW without a second fault */
80113 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
80114@@ -2817,6 +2851,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80115 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
80116 struct hstate *h = hstate_vma(vma);
80117
80118+#ifdef CONFIG_PAX_SEGMEXEC
80119+ struct vm_area_struct *vma_m;
80120+#endif
80121+
80122 address &= huge_page_mask(h);
80123
80124 ptep = huge_pte_offset(mm, address);
80125@@ -2830,6 +2868,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80126 VM_FAULT_SET_HINDEX(hstate_index(h));
80127 }
80128
80129+#ifdef CONFIG_PAX_SEGMEXEC
80130+ vma_m = pax_find_mirror_vma(vma);
80131+ if (vma_m) {
80132+ unsigned long address_m;
80133+
80134+ if (vma->vm_start > vma_m->vm_start) {
80135+ address_m = address;
80136+ address -= SEGMEXEC_TASK_SIZE;
80137+ vma = vma_m;
80138+ h = hstate_vma(vma);
80139+ } else
80140+ address_m = address + SEGMEXEC_TASK_SIZE;
80141+
80142+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
80143+ return VM_FAULT_OOM;
80144+ address_m &= HPAGE_MASK;
80145+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
80146+ }
80147+#endif
80148+
80149 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
80150 if (!ptep)
80151 return VM_FAULT_OOM;
80152diff --git a/mm/internal.h b/mm/internal.h
80153index 9ba2110..eaf0674 100644
80154--- a/mm/internal.h
80155+++ b/mm/internal.h
80156@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
80157 * in mm/page_alloc.c
80158 */
80159 extern void __free_pages_bootmem(struct page *page, unsigned int order);
80160+extern void free_compound_page(struct page *page);
80161 extern void prep_compound_page(struct page *page, unsigned long order);
80162 #ifdef CONFIG_MEMORY_FAILURE
80163 extern bool is_free_buddy_page(struct page *page);
80164diff --git a/mm/kmemleak.c b/mm/kmemleak.c
80165index 752a705..6c3102e 100644
80166--- a/mm/kmemleak.c
80167+++ b/mm/kmemleak.c
80168@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
80169
80170 for (i = 0; i < object->trace_len; i++) {
80171 void *ptr = (void *)object->trace[i];
80172- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
80173+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
80174 }
80175 }
80176
80177@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
80178 return -ENOMEM;
80179 }
80180
80181- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
80182+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
80183 &kmemleak_fops);
80184 if (!dentry)
80185 pr_warning("Failed to create the debugfs kmemleak file\n");
80186diff --git a/mm/maccess.c b/mm/maccess.c
80187index d53adf9..03a24bf 100644
80188--- a/mm/maccess.c
80189+++ b/mm/maccess.c
80190@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
80191 set_fs(KERNEL_DS);
80192 pagefault_disable();
80193 ret = __copy_from_user_inatomic(dst,
80194- (__force const void __user *)src, size);
80195+ (const void __force_user *)src, size);
80196 pagefault_enable();
80197 set_fs(old_fs);
80198
80199@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
80200
80201 set_fs(KERNEL_DS);
80202 pagefault_disable();
80203- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
80204+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
80205 pagefault_enable();
80206 set_fs(old_fs);
80207
80208diff --git a/mm/madvise.c b/mm/madvise.c
80209index 03dfa5c..b032917 100644
80210--- a/mm/madvise.c
80211+++ b/mm/madvise.c
80212@@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
80213 pgoff_t pgoff;
80214 unsigned long new_flags = vma->vm_flags;
80215
80216+#ifdef CONFIG_PAX_SEGMEXEC
80217+ struct vm_area_struct *vma_m;
80218+#endif
80219+
80220 switch (behavior) {
80221 case MADV_NORMAL:
80222 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
80223@@ -123,6 +127,13 @@ success:
80224 /*
80225 * vm_flags is protected by the mmap_sem held in write mode.
80226 */
80227+
80228+#ifdef CONFIG_PAX_SEGMEXEC
80229+ vma_m = pax_find_mirror_vma(vma);
80230+ if (vma_m)
80231+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
80232+#endif
80233+
80234 vma->vm_flags = new_flags;
80235
80236 out:
80237@@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
80238 struct vm_area_struct ** prev,
80239 unsigned long start, unsigned long end)
80240 {
80241+
80242+#ifdef CONFIG_PAX_SEGMEXEC
80243+ struct vm_area_struct *vma_m;
80244+#endif
80245+
80246 *prev = vma;
80247 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
80248 return -EINVAL;
80249@@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
80250 zap_page_range(vma, start, end - start, &details);
80251 } else
80252 zap_page_range(vma, start, end - start, NULL);
80253+
80254+#ifdef CONFIG_PAX_SEGMEXEC
80255+ vma_m = pax_find_mirror_vma(vma);
80256+ if (vma_m) {
80257+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
80258+ struct zap_details details = {
80259+ .nonlinear_vma = vma_m,
80260+ .last_index = ULONG_MAX,
80261+ };
80262+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
80263+ } else
80264+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
80265+ }
80266+#endif
80267+
80268 return 0;
80269 }
80270
80271@@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
80272 if (end < start)
80273 goto out;
80274
80275+#ifdef CONFIG_PAX_SEGMEXEC
80276+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
80277+ if (end > SEGMEXEC_TASK_SIZE)
80278+ goto out;
80279+ } else
80280+#endif
80281+
80282+ if (end > TASK_SIZE)
80283+ goto out;
80284+
80285 error = 0;
80286 if (end == start)
80287 goto out;
80288diff --git a/mm/memory-failure.c b/mm/memory-failure.c
80289index c6e4dd3..1f41988 100644
80290--- a/mm/memory-failure.c
80291+++ b/mm/memory-failure.c
80292@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
80293
80294 int sysctl_memory_failure_recovery __read_mostly = 1;
80295
80296-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
80297+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
80298
80299 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
80300
80301@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
80302 pfn, t->comm, t->pid);
80303 si.si_signo = SIGBUS;
80304 si.si_errno = 0;
80305- si.si_addr = (void *)addr;
80306+ si.si_addr = (void __user *)addr;
80307 #ifdef __ARCH_SI_TRAPNO
80308 si.si_trapno = trapno;
80309 #endif
80310@@ -760,7 +760,7 @@ static struct page_state {
80311 unsigned long res;
80312 char *msg;
80313 int (*action)(struct page *p, unsigned long pfn);
80314-} error_states[] = {
80315+} __do_const error_states[] = {
80316 { reserved, reserved, "reserved kernel", me_kernel },
80317 /*
80318 * free pages are specially detected outside this table:
80319@@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80320 }
80321
80322 nr_pages = 1 << compound_trans_order(hpage);
80323- atomic_long_add(nr_pages, &mce_bad_pages);
80324+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
80325
80326 /*
80327 * We need/can do nothing about count=0 pages.
80328@@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80329 if (!PageHWPoison(hpage)
80330 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
80331 || (p != hpage && TestSetPageHWPoison(hpage))) {
80332- atomic_long_sub(nr_pages, &mce_bad_pages);
80333+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80334 return 0;
80335 }
80336 set_page_hwpoison_huge_page(hpage);
80337@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80338 }
80339 if (hwpoison_filter(p)) {
80340 if (TestClearPageHWPoison(p))
80341- atomic_long_sub(nr_pages, &mce_bad_pages);
80342+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80343 unlock_page(hpage);
80344 put_page(hpage);
80345 return 0;
80346@@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
80347 return 0;
80348 }
80349 if (TestClearPageHWPoison(p))
80350- atomic_long_sub(nr_pages, &mce_bad_pages);
80351+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80352 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
80353 return 0;
80354 }
80355@@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
80356 */
80357 if (TestClearPageHWPoison(page)) {
80358 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
80359- atomic_long_sub(nr_pages, &mce_bad_pages);
80360+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80361 freeit = 1;
80362 if (PageHuge(page))
80363 clear_page_hwpoison_huge_page(page);
80364@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
80365 }
80366 done:
80367 if (!PageHWPoison(hpage))
80368- atomic_long_add(1 << compound_trans_order(hpage),
80369+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
80370 &mce_bad_pages);
80371 set_page_hwpoison_huge_page(hpage);
80372 dequeue_hwpoisoned_huge_page(hpage);
80373@@ -1583,7 +1583,7 @@ int soft_offline_page(struct page *page, int flags)
80374 return ret;
80375
80376 done:
80377- atomic_long_add(1, &mce_bad_pages);
80378+ atomic_long_add_unchecked(1, &mce_bad_pages);
80379 SetPageHWPoison(page);
80380 /* keep elevated page count for bad page */
80381 return ret;
80382diff --git a/mm/memory.c b/mm/memory.c
80383index bb1369f..efb96b5 100644
80384--- a/mm/memory.c
80385+++ b/mm/memory.c
80386@@ -433,6 +433,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
80387 free_pte_range(tlb, pmd, addr);
80388 } while (pmd++, addr = next, addr != end);
80389
80390+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
80391 start &= PUD_MASK;
80392 if (start < floor)
80393 return;
80394@@ -447,6 +448,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
80395 pmd = pmd_offset(pud, start);
80396 pud_clear(pud);
80397 pmd_free_tlb(tlb, pmd, start);
80398+#endif
80399+
80400 }
80401
80402 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80403@@ -466,6 +469,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80404 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
80405 } while (pud++, addr = next, addr != end);
80406
80407+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
80408 start &= PGDIR_MASK;
80409 if (start < floor)
80410 return;
80411@@ -480,6 +484,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80412 pud = pud_offset(pgd, start);
80413 pgd_clear(pgd);
80414 pud_free_tlb(tlb, pud, start);
80415+#endif
80416+
80417 }
80418
80419 /*
80420@@ -1618,12 +1624,6 @@ no_page_table:
80421 return page;
80422 }
80423
80424-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
80425-{
80426- return stack_guard_page_start(vma, addr) ||
80427- stack_guard_page_end(vma, addr+PAGE_SIZE);
80428-}
80429-
80430 /**
80431 * __get_user_pages() - pin user pages in memory
80432 * @tsk: task_struct of target task
80433@@ -1709,10 +1709,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80434
80435 i = 0;
80436
80437- do {
80438+ while (nr_pages) {
80439 struct vm_area_struct *vma;
80440
80441- vma = find_extend_vma(mm, start);
80442+ vma = find_vma(mm, start);
80443 if (!vma && in_gate_area(mm, start)) {
80444 unsigned long pg = start & PAGE_MASK;
80445 pgd_t *pgd;
80446@@ -1760,7 +1760,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80447 goto next_page;
80448 }
80449
80450- if (!vma ||
80451+ if (!vma || start < vma->vm_start ||
80452 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
80453 !(vm_flags & vma->vm_flags))
80454 return i ? : -EFAULT;
80455@@ -1787,11 +1787,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80456 int ret;
80457 unsigned int fault_flags = 0;
80458
80459- /* For mlock, just skip the stack guard page. */
80460- if (foll_flags & FOLL_MLOCK) {
80461- if (stack_guard_page(vma, start))
80462- goto next_page;
80463- }
80464 if (foll_flags & FOLL_WRITE)
80465 fault_flags |= FAULT_FLAG_WRITE;
80466 if (nonblocking)
80467@@ -1865,7 +1860,7 @@ next_page:
80468 start += PAGE_SIZE;
80469 nr_pages--;
80470 } while (nr_pages && start < vma->vm_end);
80471- } while (nr_pages);
80472+ }
80473 return i;
80474 }
80475 EXPORT_SYMBOL(__get_user_pages);
80476@@ -2072,6 +2067,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
80477 page_add_file_rmap(page);
80478 set_pte_at(mm, addr, pte, mk_pte(page, prot));
80479
80480+#ifdef CONFIG_PAX_SEGMEXEC
80481+ pax_mirror_file_pte(vma, addr, page, ptl);
80482+#endif
80483+
80484 retval = 0;
80485 pte_unmap_unlock(pte, ptl);
80486 return retval;
80487@@ -2116,9 +2115,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
80488 if (!page_count(page))
80489 return -EINVAL;
80490 if (!(vma->vm_flags & VM_MIXEDMAP)) {
80491+
80492+#ifdef CONFIG_PAX_SEGMEXEC
80493+ struct vm_area_struct *vma_m;
80494+#endif
80495+
80496 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
80497 BUG_ON(vma->vm_flags & VM_PFNMAP);
80498 vma->vm_flags |= VM_MIXEDMAP;
80499+
80500+#ifdef CONFIG_PAX_SEGMEXEC
80501+ vma_m = pax_find_mirror_vma(vma);
80502+ if (vma_m)
80503+ vma_m->vm_flags |= VM_MIXEDMAP;
80504+#endif
80505+
80506 }
80507 return insert_page(vma, addr, page, vma->vm_page_prot);
80508 }
80509@@ -2201,6 +2212,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
80510 unsigned long pfn)
80511 {
80512 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
80513+ BUG_ON(vma->vm_mirror);
80514
80515 if (addr < vma->vm_start || addr >= vma->vm_end)
80516 return -EFAULT;
80517@@ -2401,7 +2413,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
80518
80519 BUG_ON(pud_huge(*pud));
80520
80521- pmd = pmd_alloc(mm, pud, addr);
80522+ pmd = (mm == &init_mm) ?
80523+ pmd_alloc_kernel(mm, pud, addr) :
80524+ pmd_alloc(mm, pud, addr);
80525 if (!pmd)
80526 return -ENOMEM;
80527 do {
80528@@ -2421,7 +2435,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
80529 unsigned long next;
80530 int err;
80531
80532- pud = pud_alloc(mm, pgd, addr);
80533+ pud = (mm == &init_mm) ?
80534+ pud_alloc_kernel(mm, pgd, addr) :
80535+ pud_alloc(mm, pgd, addr);
80536 if (!pud)
80537 return -ENOMEM;
80538 do {
80539@@ -2509,6 +2525,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
80540 copy_user_highpage(dst, src, va, vma);
80541 }
80542
80543+#ifdef CONFIG_PAX_SEGMEXEC
80544+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
80545+{
80546+ struct mm_struct *mm = vma->vm_mm;
80547+ spinlock_t *ptl;
80548+ pte_t *pte, entry;
80549+
80550+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
80551+ entry = *pte;
80552+ if (!pte_present(entry)) {
80553+ if (!pte_none(entry)) {
80554+ BUG_ON(pte_file(entry));
80555+ free_swap_and_cache(pte_to_swp_entry(entry));
80556+ pte_clear_not_present_full(mm, address, pte, 0);
80557+ }
80558+ } else {
80559+ struct page *page;
80560+
80561+ flush_cache_page(vma, address, pte_pfn(entry));
80562+ entry = ptep_clear_flush(vma, address, pte);
80563+ BUG_ON(pte_dirty(entry));
80564+ page = vm_normal_page(vma, address, entry);
80565+ if (page) {
80566+ update_hiwater_rss(mm);
80567+ if (PageAnon(page))
80568+ dec_mm_counter_fast(mm, MM_ANONPAGES);
80569+ else
80570+ dec_mm_counter_fast(mm, MM_FILEPAGES);
80571+ page_remove_rmap(page);
80572+ page_cache_release(page);
80573+ }
80574+ }
80575+ pte_unmap_unlock(pte, ptl);
80576+}
80577+
80578+/* PaX: if vma is mirrored, synchronize the mirror's PTE
80579+ *
80580+ * the ptl of the lower mapped page is held on entry and is not released on exit
80581+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
80582+ */
80583+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
80584+{
80585+ struct mm_struct *mm = vma->vm_mm;
80586+ unsigned long address_m;
80587+ spinlock_t *ptl_m;
80588+ struct vm_area_struct *vma_m;
80589+ pmd_t *pmd_m;
80590+ pte_t *pte_m, entry_m;
80591+
80592+ BUG_ON(!page_m || !PageAnon(page_m));
80593+
80594+ vma_m = pax_find_mirror_vma(vma);
80595+ if (!vma_m)
80596+ return;
80597+
80598+ BUG_ON(!PageLocked(page_m));
80599+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
80600+ address_m = address + SEGMEXEC_TASK_SIZE;
80601+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
80602+ pte_m = pte_offset_map(pmd_m, address_m);
80603+ ptl_m = pte_lockptr(mm, pmd_m);
80604+ if (ptl != ptl_m) {
80605+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
80606+ if (!pte_none(*pte_m))
80607+ goto out;
80608+ }
80609+
80610+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
80611+ page_cache_get(page_m);
80612+ page_add_anon_rmap(page_m, vma_m, address_m);
80613+ inc_mm_counter_fast(mm, MM_ANONPAGES);
80614+ set_pte_at(mm, address_m, pte_m, entry_m);
80615+ update_mmu_cache(vma_m, address_m, entry_m);
80616+out:
80617+ if (ptl != ptl_m)
80618+ spin_unlock(ptl_m);
80619+ pte_unmap(pte_m);
80620+ unlock_page(page_m);
80621+}
80622+
80623+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
80624+{
80625+ struct mm_struct *mm = vma->vm_mm;
80626+ unsigned long address_m;
80627+ spinlock_t *ptl_m;
80628+ struct vm_area_struct *vma_m;
80629+ pmd_t *pmd_m;
80630+ pte_t *pte_m, entry_m;
80631+
80632+ BUG_ON(!page_m || PageAnon(page_m));
80633+
80634+ vma_m = pax_find_mirror_vma(vma);
80635+ if (!vma_m)
80636+ return;
80637+
80638+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
80639+ address_m = address + SEGMEXEC_TASK_SIZE;
80640+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
80641+ pte_m = pte_offset_map(pmd_m, address_m);
80642+ ptl_m = pte_lockptr(mm, pmd_m);
80643+ if (ptl != ptl_m) {
80644+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
80645+ if (!pte_none(*pte_m))
80646+ goto out;
80647+ }
80648+
80649+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
80650+ page_cache_get(page_m);
80651+ page_add_file_rmap(page_m);
80652+ inc_mm_counter_fast(mm, MM_FILEPAGES);
80653+ set_pte_at(mm, address_m, pte_m, entry_m);
80654+ update_mmu_cache(vma_m, address_m, entry_m);
80655+out:
80656+ if (ptl != ptl_m)
80657+ spin_unlock(ptl_m);
80658+ pte_unmap(pte_m);
80659+}
80660+
80661+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
80662+{
80663+ struct mm_struct *mm = vma->vm_mm;
80664+ unsigned long address_m;
80665+ spinlock_t *ptl_m;
80666+ struct vm_area_struct *vma_m;
80667+ pmd_t *pmd_m;
80668+ pte_t *pte_m, entry_m;
80669+
80670+ vma_m = pax_find_mirror_vma(vma);
80671+ if (!vma_m)
80672+ return;
80673+
80674+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
80675+ address_m = address + SEGMEXEC_TASK_SIZE;
80676+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
80677+ pte_m = pte_offset_map(pmd_m, address_m);
80678+ ptl_m = pte_lockptr(mm, pmd_m);
80679+ if (ptl != ptl_m) {
80680+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
80681+ if (!pte_none(*pte_m))
80682+ goto out;
80683+ }
80684+
80685+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
80686+ set_pte_at(mm, address_m, pte_m, entry_m);
80687+out:
80688+ if (ptl != ptl_m)
80689+ spin_unlock(ptl_m);
80690+ pte_unmap(pte_m);
80691+}
80692+
80693+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
80694+{
80695+ struct page *page_m;
80696+ pte_t entry;
80697+
80698+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
80699+ goto out;
80700+
80701+ entry = *pte;
80702+ page_m = vm_normal_page(vma, address, entry);
80703+ if (!page_m)
80704+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
80705+ else if (PageAnon(page_m)) {
80706+ if (pax_find_mirror_vma(vma)) {
80707+ pte_unmap_unlock(pte, ptl);
80708+ lock_page(page_m);
80709+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
80710+ if (pte_same(entry, *pte))
80711+ pax_mirror_anon_pte(vma, address, page_m, ptl);
80712+ else
80713+ unlock_page(page_m);
80714+ }
80715+ } else
80716+ pax_mirror_file_pte(vma, address, page_m, ptl);
80717+
80718+out:
80719+ pte_unmap_unlock(pte, ptl);
80720+}
80721+#endif
80722+
80723 /*
80724 * This routine handles present pages, when users try to write
80725 * to a shared page. It is done by copying the page to a new address
80726@@ -2725,6 +2921,12 @@ gotten:
80727 */
80728 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
80729 if (likely(pte_same(*page_table, orig_pte))) {
80730+
80731+#ifdef CONFIG_PAX_SEGMEXEC
80732+ if (pax_find_mirror_vma(vma))
80733+ BUG_ON(!trylock_page(new_page));
80734+#endif
80735+
80736 if (old_page) {
80737 if (!PageAnon(old_page)) {
80738 dec_mm_counter_fast(mm, MM_FILEPAGES);
80739@@ -2776,6 +2978,10 @@ gotten:
80740 page_remove_rmap(old_page);
80741 }
80742
80743+#ifdef CONFIG_PAX_SEGMEXEC
80744+ pax_mirror_anon_pte(vma, address, new_page, ptl);
80745+#endif
80746+
80747 /* Free the old page.. */
80748 new_page = old_page;
80749 ret |= VM_FAULT_WRITE;
80750@@ -3051,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
80751 swap_free(entry);
80752 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
80753 try_to_free_swap(page);
80754+
80755+#ifdef CONFIG_PAX_SEGMEXEC
80756+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
80757+#endif
80758+
80759 unlock_page(page);
80760 if (swapcache) {
80761 /*
80762@@ -3074,6 +3285,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
80763
80764 /* No need to invalidate - it was non-present before */
80765 update_mmu_cache(vma, address, page_table);
80766+
80767+#ifdef CONFIG_PAX_SEGMEXEC
80768+ pax_mirror_anon_pte(vma, address, page, ptl);
80769+#endif
80770+
80771 unlock:
80772 pte_unmap_unlock(page_table, ptl);
80773 out:
80774@@ -3093,40 +3309,6 @@ out_release:
80775 }
80776
80777 /*
80778- * This is like a special single-page "expand_{down|up}wards()",
80779- * except we must first make sure that 'address{-|+}PAGE_SIZE'
80780- * doesn't hit another vma.
80781- */
80782-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
80783-{
80784- address &= PAGE_MASK;
80785- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
80786- struct vm_area_struct *prev = vma->vm_prev;
80787-
80788- /*
80789- * Is there a mapping abutting this one below?
80790- *
80791- * That's only ok if it's the same stack mapping
80792- * that has gotten split..
80793- */
80794- if (prev && prev->vm_end == address)
80795- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
80796-
80797- expand_downwards(vma, address - PAGE_SIZE);
80798- }
80799- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
80800- struct vm_area_struct *next = vma->vm_next;
80801-
80802- /* As VM_GROWSDOWN but s/below/above/ */
80803- if (next && next->vm_start == address + PAGE_SIZE)
80804- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
80805-
80806- expand_upwards(vma, address + PAGE_SIZE);
80807- }
80808- return 0;
80809-}
80810-
80811-/*
80812 * We enter with non-exclusive mmap_sem (to exclude vma changes,
80813 * but allow concurrent faults), and pte mapped but not yet locked.
80814 * We return with mmap_sem still held, but pte unmapped and unlocked.
80815@@ -3135,27 +3317,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
80816 unsigned long address, pte_t *page_table, pmd_t *pmd,
80817 unsigned int flags)
80818 {
80819- struct page *page;
80820+ struct page *page = NULL;
80821 spinlock_t *ptl;
80822 pte_t entry;
80823
80824- pte_unmap(page_table);
80825-
80826- /* Check if we need to add a guard page to the stack */
80827- if (check_stack_guard_page(vma, address) < 0)
80828- return VM_FAULT_SIGBUS;
80829-
80830- /* Use the zero-page for reads */
80831 if (!(flags & FAULT_FLAG_WRITE)) {
80832 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
80833 vma->vm_page_prot));
80834- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
80835+ ptl = pte_lockptr(mm, pmd);
80836+ spin_lock(ptl);
80837 if (!pte_none(*page_table))
80838 goto unlock;
80839 goto setpte;
80840 }
80841
80842 /* Allocate our own private page. */
80843+ pte_unmap(page_table);
80844+
80845 if (unlikely(anon_vma_prepare(vma)))
80846 goto oom;
80847 page = alloc_zeroed_user_highpage_movable(vma, address);
80848@@ -3174,6 +3352,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
80849 if (!pte_none(*page_table))
80850 goto release;
80851
80852+#ifdef CONFIG_PAX_SEGMEXEC
80853+ if (pax_find_mirror_vma(vma))
80854+ BUG_ON(!trylock_page(page));
80855+#endif
80856+
80857 inc_mm_counter_fast(mm, MM_ANONPAGES);
80858 page_add_new_anon_rmap(page, vma, address);
80859 setpte:
80860@@ -3181,6 +3364,12 @@ setpte:
80861
80862 /* No need to invalidate - it was non-present before */
80863 update_mmu_cache(vma, address, page_table);
80864+
80865+#ifdef CONFIG_PAX_SEGMEXEC
80866+ if (page)
80867+ pax_mirror_anon_pte(vma, address, page, ptl);
80868+#endif
80869+
80870 unlock:
80871 pte_unmap_unlock(page_table, ptl);
80872 return 0;
80873@@ -3324,6 +3513,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80874 */
80875 /* Only go through if we didn't race with anybody else... */
80876 if (likely(pte_same(*page_table, orig_pte))) {
80877+
80878+#ifdef CONFIG_PAX_SEGMEXEC
80879+ if (anon && pax_find_mirror_vma(vma))
80880+ BUG_ON(!trylock_page(page));
80881+#endif
80882+
80883 flush_icache_page(vma, page);
80884 entry = mk_pte(page, vma->vm_page_prot);
80885 if (flags & FAULT_FLAG_WRITE)
80886@@ -3343,6 +3538,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80887
80888 /* no need to invalidate: a not-present page won't be cached */
80889 update_mmu_cache(vma, address, page_table);
80890+
80891+#ifdef CONFIG_PAX_SEGMEXEC
80892+ if (anon)
80893+ pax_mirror_anon_pte(vma, address, page, ptl);
80894+ else
80895+ pax_mirror_file_pte(vma, address, page, ptl);
80896+#endif
80897+
80898 } else {
80899 if (cow_page)
80900 mem_cgroup_uncharge_page(cow_page);
80901@@ -3664,6 +3867,12 @@ int handle_pte_fault(struct mm_struct *mm,
80902 if (flags & FAULT_FLAG_WRITE)
80903 flush_tlb_fix_spurious_fault(vma, address);
80904 }
80905+
80906+#ifdef CONFIG_PAX_SEGMEXEC
80907+ pax_mirror_pte(vma, address, pte, pmd, ptl);
80908+ return 0;
80909+#endif
80910+
80911 unlock:
80912 pte_unmap_unlock(pte, ptl);
80913 return 0;
80914@@ -3680,6 +3889,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80915 pmd_t *pmd;
80916 pte_t *pte;
80917
80918+#ifdef CONFIG_PAX_SEGMEXEC
80919+ struct vm_area_struct *vma_m;
80920+#endif
80921+
80922 __set_current_state(TASK_RUNNING);
80923
80924 count_vm_event(PGFAULT);
80925@@ -3691,6 +3904,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80926 if (unlikely(is_vm_hugetlb_page(vma)))
80927 return hugetlb_fault(mm, vma, address, flags);
80928
80929+#ifdef CONFIG_PAX_SEGMEXEC
80930+ vma_m = pax_find_mirror_vma(vma);
80931+ if (vma_m) {
80932+ unsigned long address_m;
80933+ pgd_t *pgd_m;
80934+ pud_t *pud_m;
80935+ pmd_t *pmd_m;
80936+
80937+ if (vma->vm_start > vma_m->vm_start) {
80938+ address_m = address;
80939+ address -= SEGMEXEC_TASK_SIZE;
80940+ vma = vma_m;
80941+ } else
80942+ address_m = address + SEGMEXEC_TASK_SIZE;
80943+
80944+ pgd_m = pgd_offset(mm, address_m);
80945+ pud_m = pud_alloc(mm, pgd_m, address_m);
80946+ if (!pud_m)
80947+ return VM_FAULT_OOM;
80948+ pmd_m = pmd_alloc(mm, pud_m, address_m);
80949+ if (!pmd_m)
80950+ return VM_FAULT_OOM;
80951+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
80952+ return VM_FAULT_OOM;
80953+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
80954+ }
80955+#endif
80956+
80957 retry:
80958 pgd = pgd_offset(mm, address);
80959 pud = pud_alloc(mm, pgd, address);
80960@@ -3789,6 +4030,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
80961 spin_unlock(&mm->page_table_lock);
80962 return 0;
80963 }
80964+
80965+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
80966+{
80967+ pud_t *new = pud_alloc_one(mm, address);
80968+ if (!new)
80969+ return -ENOMEM;
80970+
80971+ smp_wmb(); /* See comment in __pte_alloc */
80972+
80973+ spin_lock(&mm->page_table_lock);
80974+ if (pgd_present(*pgd)) /* Another has populated it */
80975+ pud_free(mm, new);
80976+ else
80977+ pgd_populate_kernel(mm, pgd, new);
80978+ spin_unlock(&mm->page_table_lock);
80979+ return 0;
80980+}
80981 #endif /* __PAGETABLE_PUD_FOLDED */
80982
80983 #ifndef __PAGETABLE_PMD_FOLDED
80984@@ -3819,6 +4077,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
80985 spin_unlock(&mm->page_table_lock);
80986 return 0;
80987 }
80988+
80989+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
80990+{
80991+ pmd_t *new = pmd_alloc_one(mm, address);
80992+ if (!new)
80993+ return -ENOMEM;
80994+
80995+ smp_wmb(); /* See comment in __pte_alloc */
80996+
80997+ spin_lock(&mm->page_table_lock);
80998+#ifndef __ARCH_HAS_4LEVEL_HACK
80999+ if (pud_present(*pud)) /* Another has populated it */
81000+ pmd_free(mm, new);
81001+ else
81002+ pud_populate_kernel(mm, pud, new);
81003+#else
81004+ if (pgd_present(*pud)) /* Another has populated it */
81005+ pmd_free(mm, new);
81006+ else
81007+ pgd_populate_kernel(mm, pud, new);
81008+#endif /* __ARCH_HAS_4LEVEL_HACK */
81009+ spin_unlock(&mm->page_table_lock);
81010+ return 0;
81011+}
81012 #endif /* __PAGETABLE_PMD_FOLDED */
81013
81014 int make_pages_present(unsigned long addr, unsigned long end)
81015@@ -3856,7 +4138,7 @@ static int __init gate_vma_init(void)
81016 gate_vma.vm_start = FIXADDR_USER_START;
81017 gate_vma.vm_end = FIXADDR_USER_END;
81018 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
81019- gate_vma.vm_page_prot = __P101;
81020+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
81021
81022 return 0;
81023 }
81024diff --git a/mm/mempolicy.c b/mm/mempolicy.c
81025index 3df6d12..a11056a 100644
81026--- a/mm/mempolicy.c
81027+++ b/mm/mempolicy.c
81028@@ -721,6 +721,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
81029 unsigned long vmstart;
81030 unsigned long vmend;
81031
81032+#ifdef CONFIG_PAX_SEGMEXEC
81033+ struct vm_area_struct *vma_m;
81034+#endif
81035+
81036 vma = find_vma(mm, start);
81037 if (!vma || vma->vm_start > start)
81038 return -EFAULT;
81039@@ -757,9 +761,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
81040 if (err)
81041 goto out;
81042 }
81043+
81044 err = vma_replace_policy(vma, new_pol);
81045 if (err)
81046 goto out;
81047+
81048+#ifdef CONFIG_PAX_SEGMEXEC
81049+ vma_m = pax_find_mirror_vma(vma);
81050+ if (vma_m) {
81051+ err = vma_replace_policy(vma_m, new_pol);
81052+ if (err)
81053+ goto out;
81054+ }
81055+#endif
81056+
81057 }
81058
81059 out:
81060@@ -1216,6 +1231,17 @@ static long do_mbind(unsigned long start, unsigned long len,
81061
81062 if (end < start)
81063 return -EINVAL;
81064+
81065+#ifdef CONFIG_PAX_SEGMEXEC
81066+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
81067+ if (end > SEGMEXEC_TASK_SIZE)
81068+ return -EINVAL;
81069+ } else
81070+#endif
81071+
81072+ if (end > TASK_SIZE)
81073+ return -EINVAL;
81074+
81075 if (end == start)
81076 return 0;
81077
81078@@ -1445,8 +1471,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
81079 */
81080 tcred = __task_cred(task);
81081 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
81082- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
81083- !capable(CAP_SYS_NICE)) {
81084+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
81085 rcu_read_unlock();
81086 err = -EPERM;
81087 goto out_put;
81088@@ -1477,6 +1502,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
81089 goto out;
81090 }
81091
81092+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
81093+ if (mm != current->mm &&
81094+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
81095+ mmput(mm);
81096+ err = -EPERM;
81097+ goto out;
81098+ }
81099+#endif
81100+
81101 err = do_migrate_pages(mm, old, new,
81102 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
81103
81104diff --git a/mm/migrate.c b/mm/migrate.c
81105index 2fd8b4a..d70358f 100644
81106--- a/mm/migrate.c
81107+++ b/mm/migrate.c
81108@@ -1401,8 +1401,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
81109 */
81110 tcred = __task_cred(task);
81111 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
81112- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
81113- !capable(CAP_SYS_NICE)) {
81114+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
81115 rcu_read_unlock();
81116 err = -EPERM;
81117 goto out;
81118diff --git a/mm/mlock.c b/mm/mlock.c
81119index c9bd528..da8d069 100644
81120--- a/mm/mlock.c
81121+++ b/mm/mlock.c
81122@@ -13,6 +13,7 @@
81123 #include <linux/pagemap.h>
81124 #include <linux/mempolicy.h>
81125 #include <linux/syscalls.h>
81126+#include <linux/security.h>
81127 #include <linux/sched.h>
81128 #include <linux/export.h>
81129 #include <linux/rmap.h>
81130@@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
81131 {
81132 unsigned long nstart, end, tmp;
81133 struct vm_area_struct * vma, * prev;
81134- int error;
81135+ int error = 0;
81136
81137 VM_BUG_ON(start & ~PAGE_MASK);
81138 VM_BUG_ON(len != PAGE_ALIGN(len));
81139@@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
81140 return -EINVAL;
81141 if (end == start)
81142 return 0;
81143+ if (end > TASK_SIZE)
81144+ return -EINVAL;
81145+
81146 vma = find_vma(current->mm, start);
81147 if (!vma || vma->vm_start > start)
81148 return -ENOMEM;
81149@@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
81150 for (nstart = start ; ; ) {
81151 vm_flags_t newflags;
81152
81153+#ifdef CONFIG_PAX_SEGMEXEC
81154+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
81155+ break;
81156+#endif
81157+
81158 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
81159
81160 newflags = vma->vm_flags | VM_LOCKED;
81161@@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
81162 lock_limit >>= PAGE_SHIFT;
81163
81164 /* check against resource limits */
81165+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
81166 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
81167 error = do_mlock(start, len, 1);
81168 up_write(&current->mm->mmap_sem);
81169@@ -528,6 +538,12 @@ static int do_mlockall(int flags)
81170 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
81171 vm_flags_t newflags;
81172
81173+#ifdef CONFIG_PAX_SEGMEXEC
81174+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
81175+ break;
81176+#endif
81177+
81178+ BUG_ON(vma->vm_end > TASK_SIZE);
81179 newflags = vma->vm_flags | VM_LOCKED;
81180 if (!(flags & MCL_CURRENT))
81181 newflags &= ~VM_LOCKED;
81182@@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
81183 lock_limit >>= PAGE_SHIFT;
81184
81185 ret = -ENOMEM;
81186+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
81187 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
81188 capable(CAP_IPC_LOCK))
81189 ret = do_mlockall(flags);
81190diff --git a/mm/mmap.c b/mm/mmap.c
81191index 8832b87..4bbb1b2 100644
81192--- a/mm/mmap.c
81193+++ b/mm/mmap.c
81194@@ -32,6 +32,7 @@
81195 #include <linux/khugepaged.h>
81196 #include <linux/uprobes.h>
81197 #include <linux/rbtree_augmented.h>
81198+#include <linux/random.h>
81199
81200 #include <asm/uaccess.h>
81201 #include <asm/cacheflush.h>
81202@@ -48,6 +49,16 @@
81203 #define arch_rebalance_pgtables(addr, len) (addr)
81204 #endif
81205
81206+static inline void verify_mm_writelocked(struct mm_struct *mm)
81207+{
81208+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
81209+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
81210+ up_read(&mm->mmap_sem);
81211+ BUG();
81212+ }
81213+#endif
81214+}
81215+
81216 static void unmap_region(struct mm_struct *mm,
81217 struct vm_area_struct *vma, struct vm_area_struct *prev,
81218 unsigned long start, unsigned long end);
81219@@ -67,22 +78,32 @@ static void unmap_region(struct mm_struct *mm,
81220 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
81221 *
81222 */
81223-pgprot_t protection_map[16] = {
81224+pgprot_t protection_map[16] __read_only = {
81225 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
81226 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
81227 };
81228
81229-pgprot_t vm_get_page_prot(unsigned long vm_flags)
81230+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
81231 {
81232- return __pgprot(pgprot_val(protection_map[vm_flags &
81233+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
81234 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
81235 pgprot_val(arch_vm_get_page_prot(vm_flags)));
81236+
81237+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
81238+ if (!(__supported_pte_mask & _PAGE_NX) &&
81239+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
81240+ (vm_flags & (VM_READ | VM_WRITE)))
81241+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
81242+#endif
81243+
81244+ return prot;
81245 }
81246 EXPORT_SYMBOL(vm_get_page_prot);
81247
81248 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
81249 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
81250 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
81251+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
81252 /*
81253 * Make sure vm_committed_as in one cacheline and not cacheline shared with
81254 * other variables. It can be updated by several CPUs frequently.
81255@@ -238,6 +259,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
81256 struct vm_area_struct *next = vma->vm_next;
81257
81258 might_sleep();
81259+ BUG_ON(vma->vm_mirror);
81260 if (vma->vm_ops && vma->vm_ops->close)
81261 vma->vm_ops->close(vma);
81262 if (vma->vm_file)
81263@@ -281,6 +303,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
81264 * not page aligned -Ram Gupta
81265 */
81266 rlim = rlimit(RLIMIT_DATA);
81267+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
81268 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
81269 (mm->end_data - mm->start_data) > rlim)
81270 goto out;
81271@@ -888,6 +911,12 @@ static int
81272 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
81273 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
81274 {
81275+
81276+#ifdef CONFIG_PAX_SEGMEXEC
81277+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
81278+ return 0;
81279+#endif
81280+
81281 if (is_mergeable_vma(vma, file, vm_flags) &&
81282 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
81283 if (vma->vm_pgoff == vm_pgoff)
81284@@ -907,6 +936,12 @@ static int
81285 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
81286 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
81287 {
81288+
81289+#ifdef CONFIG_PAX_SEGMEXEC
81290+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
81291+ return 0;
81292+#endif
81293+
81294 if (is_mergeable_vma(vma, file, vm_flags) &&
81295 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
81296 pgoff_t vm_pglen;
81297@@ -949,13 +984,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
81298 struct vm_area_struct *vma_merge(struct mm_struct *mm,
81299 struct vm_area_struct *prev, unsigned long addr,
81300 unsigned long end, unsigned long vm_flags,
81301- struct anon_vma *anon_vma, struct file *file,
81302+ struct anon_vma *anon_vma, struct file *file,
81303 pgoff_t pgoff, struct mempolicy *policy)
81304 {
81305 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
81306 struct vm_area_struct *area, *next;
81307 int err;
81308
81309+#ifdef CONFIG_PAX_SEGMEXEC
81310+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
81311+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
81312+
81313+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
81314+#endif
81315+
81316 /*
81317 * We later require that vma->vm_flags == vm_flags,
81318 * so this tests vma->vm_flags & VM_SPECIAL, too.
81319@@ -971,6 +1013,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81320 if (next && next->vm_end == end) /* cases 6, 7, 8 */
81321 next = next->vm_next;
81322
81323+#ifdef CONFIG_PAX_SEGMEXEC
81324+ if (prev)
81325+ prev_m = pax_find_mirror_vma(prev);
81326+ if (area)
81327+ area_m = pax_find_mirror_vma(area);
81328+ if (next)
81329+ next_m = pax_find_mirror_vma(next);
81330+#endif
81331+
81332 /*
81333 * Can it merge with the predecessor?
81334 */
81335@@ -990,9 +1041,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81336 /* cases 1, 6 */
81337 err = vma_adjust(prev, prev->vm_start,
81338 next->vm_end, prev->vm_pgoff, NULL);
81339- } else /* cases 2, 5, 7 */
81340+
81341+#ifdef CONFIG_PAX_SEGMEXEC
81342+ if (!err && prev_m)
81343+ err = vma_adjust(prev_m, prev_m->vm_start,
81344+ next_m->vm_end, prev_m->vm_pgoff, NULL);
81345+#endif
81346+
81347+ } else { /* cases 2, 5, 7 */
81348 err = vma_adjust(prev, prev->vm_start,
81349 end, prev->vm_pgoff, NULL);
81350+
81351+#ifdef CONFIG_PAX_SEGMEXEC
81352+ if (!err && prev_m)
81353+ err = vma_adjust(prev_m, prev_m->vm_start,
81354+ end_m, prev_m->vm_pgoff, NULL);
81355+#endif
81356+
81357+ }
81358 if (err)
81359 return NULL;
81360 khugepaged_enter_vma_merge(prev);
81361@@ -1006,12 +1072,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81362 mpol_equal(policy, vma_policy(next)) &&
81363 can_vma_merge_before(next, vm_flags,
81364 anon_vma, file, pgoff+pglen)) {
81365- if (prev && addr < prev->vm_end) /* case 4 */
81366+ if (prev && addr < prev->vm_end) { /* case 4 */
81367 err = vma_adjust(prev, prev->vm_start,
81368 addr, prev->vm_pgoff, NULL);
81369- else /* cases 3, 8 */
81370+
81371+#ifdef CONFIG_PAX_SEGMEXEC
81372+ if (!err && prev_m)
81373+ err = vma_adjust(prev_m, prev_m->vm_start,
81374+ addr_m, prev_m->vm_pgoff, NULL);
81375+#endif
81376+
81377+ } else { /* cases 3, 8 */
81378 err = vma_adjust(area, addr, next->vm_end,
81379 next->vm_pgoff - pglen, NULL);
81380+
81381+#ifdef CONFIG_PAX_SEGMEXEC
81382+ if (!err && area_m)
81383+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
81384+ next_m->vm_pgoff - pglen, NULL);
81385+#endif
81386+
81387+ }
81388 if (err)
81389 return NULL;
81390 khugepaged_enter_vma_merge(area);
81391@@ -1120,8 +1201,10 @@ none:
81392 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
81393 struct file *file, long pages)
81394 {
81395- const unsigned long stack_flags
81396- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
81397+
81398+#ifdef CONFIG_PAX_RANDMMAP
81399+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
81400+#endif
81401
81402 mm->total_vm += pages;
81403
81404@@ -1129,7 +1212,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
81405 mm->shared_vm += pages;
81406 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
81407 mm->exec_vm += pages;
81408- } else if (flags & stack_flags)
81409+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
81410 mm->stack_vm += pages;
81411 }
81412 #endif /* CONFIG_PROC_FS */
81413@@ -1165,7 +1248,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81414 * (the exception is when the underlying filesystem is noexec
81415 * mounted, in which case we dont add PROT_EXEC.)
81416 */
81417- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
81418+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
81419 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
81420 prot |= PROT_EXEC;
81421
81422@@ -1191,7 +1274,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81423 /* Obtain the address to map to. we verify (or select) it and ensure
81424 * that it represents a valid section of the address space.
81425 */
81426- addr = get_unmapped_area(file, addr, len, pgoff, flags);
81427+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
81428 if (addr & ~PAGE_MASK)
81429 return addr;
81430
81431@@ -1202,6 +1285,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81432 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
81433 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
81434
81435+#ifdef CONFIG_PAX_MPROTECT
81436+ if (mm->pax_flags & MF_PAX_MPROTECT) {
81437+#ifndef CONFIG_PAX_MPROTECT_COMPAT
81438+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
81439+ gr_log_rwxmmap(file);
81440+
81441+#ifdef CONFIG_PAX_EMUPLT
81442+ vm_flags &= ~VM_EXEC;
81443+#else
81444+ return -EPERM;
81445+#endif
81446+
81447+ }
81448+
81449+ if (!(vm_flags & VM_EXEC))
81450+ vm_flags &= ~VM_MAYEXEC;
81451+#else
81452+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
81453+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
81454+#endif
81455+ else
81456+ vm_flags &= ~VM_MAYWRITE;
81457+ }
81458+#endif
81459+
81460+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
81461+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
81462+ vm_flags &= ~VM_PAGEEXEC;
81463+#endif
81464+
81465 if (flags & MAP_LOCKED)
81466 if (!can_do_mlock())
81467 return -EPERM;
81468@@ -1213,6 +1326,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81469 locked += mm->locked_vm;
81470 lock_limit = rlimit(RLIMIT_MEMLOCK);
81471 lock_limit >>= PAGE_SHIFT;
81472+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
81473 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
81474 return -EAGAIN;
81475 }
81476@@ -1279,6 +1393,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81477 }
81478 }
81479
81480+ if (!gr_acl_handle_mmap(file, prot))
81481+ return -EACCES;
81482+
81483 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
81484 }
81485
81486@@ -1356,7 +1473,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
81487 vm_flags_t vm_flags = vma->vm_flags;
81488
81489 /* If it was private or non-writable, the write bit is already clear */
81490- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
81491+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
81492 return 0;
81493
81494 /* The backer wishes to know when pages are first written to? */
81495@@ -1405,16 +1522,30 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
81496 unsigned long charged = 0;
81497 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
81498
81499+#ifdef CONFIG_PAX_SEGMEXEC
81500+ struct vm_area_struct *vma_m = NULL;
81501+#endif
81502+
81503+ /*
81504+ * mm->mmap_sem is required to protect against another thread
81505+ * changing the mappings in case we sleep.
81506+ */
81507+ verify_mm_writelocked(mm);
81508+
81509 /* Clear old maps */
81510 error = -ENOMEM;
81511-munmap_back:
81512 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
81513 if (do_munmap(mm, addr, len))
81514 return -ENOMEM;
81515- goto munmap_back;
81516+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
81517 }
81518
81519 /* Check against address space limit. */
81520+
81521+#ifdef CONFIG_PAX_RANDMMAP
81522+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
81523+#endif
81524+
81525 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
81526 return -ENOMEM;
81527
81528@@ -1460,6 +1591,16 @@ munmap_back:
81529 goto unacct_error;
81530 }
81531
81532+#ifdef CONFIG_PAX_SEGMEXEC
81533+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
81534+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
81535+ if (!vma_m) {
81536+ error = -ENOMEM;
81537+ goto free_vma;
81538+ }
81539+ }
81540+#endif
81541+
81542 vma->vm_mm = mm;
81543 vma->vm_start = addr;
81544 vma->vm_end = addr + len;
81545@@ -1484,6 +1625,13 @@ munmap_back:
81546 if (error)
81547 goto unmap_and_free_vma;
81548
81549+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
81550+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
81551+ vma->vm_flags |= VM_PAGEEXEC;
81552+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
81553+ }
81554+#endif
81555+
81556 /* Can addr have changed??
81557 *
81558 * Answer: Yes, several device drivers can do it in their
81559@@ -1522,6 +1670,11 @@ munmap_back:
81560 vma_link(mm, vma, prev, rb_link, rb_parent);
81561 file = vma->vm_file;
81562
81563+#ifdef CONFIG_PAX_SEGMEXEC
81564+ if (vma_m)
81565+ BUG_ON(pax_mirror_vma(vma_m, vma));
81566+#endif
81567+
81568 /* Once vma denies write, undo our temporary denial count */
81569 if (correct_wcount)
81570 atomic_inc(&inode->i_writecount);
81571@@ -1529,6 +1682,7 @@ out:
81572 perf_event_mmap(vma);
81573
81574 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
81575+ track_exec_limit(mm, addr, addr + len, vm_flags);
81576 if (vm_flags & VM_LOCKED) {
81577 if (!mlock_vma_pages_range(vma, addr, addr + len))
81578 mm->locked_vm += (len >> PAGE_SHIFT);
81579@@ -1550,6 +1704,12 @@ unmap_and_free_vma:
81580 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
81581 charged = 0;
81582 free_vma:
81583+
81584+#ifdef CONFIG_PAX_SEGMEXEC
81585+ if (vma_m)
81586+ kmem_cache_free(vm_area_cachep, vma_m);
81587+#endif
81588+
81589 kmem_cache_free(vm_area_cachep, vma);
81590 unacct_error:
81591 if (charged)
81592@@ -1557,6 +1717,62 @@ unacct_error:
81593 return error;
81594 }
81595
81596+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
81597+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
81598+{
81599+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
81600+ return (random32() & 0xFF) << PAGE_SHIFT;
81601+
81602+ return 0;
81603+}
81604+#endif
81605+
81606+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
81607+{
81608+ if (!vma) {
81609+#ifdef CONFIG_STACK_GROWSUP
81610+ if (addr > sysctl_heap_stack_gap)
81611+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
81612+ else
81613+ vma = find_vma(current->mm, 0);
81614+ if (vma && (vma->vm_flags & VM_GROWSUP))
81615+ return false;
81616+#endif
81617+ return true;
81618+ }
81619+
81620+ if (addr + len > vma->vm_start)
81621+ return false;
81622+
81623+ if (vma->vm_flags & VM_GROWSDOWN)
81624+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
81625+#ifdef CONFIG_STACK_GROWSUP
81626+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
81627+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
81628+#endif
81629+ else if (offset)
81630+ return offset <= vma->vm_start - addr - len;
81631+
81632+ return true;
81633+}
81634+
81635+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
81636+{
81637+ if (vma->vm_start < len)
81638+ return -ENOMEM;
81639+
81640+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
81641+ if (offset <= vma->vm_start - len)
81642+ return vma->vm_start - len - offset;
81643+ else
81644+ return -ENOMEM;
81645+ }
81646+
81647+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
81648+ return vma->vm_start - len - sysctl_heap_stack_gap;
81649+ return -ENOMEM;
81650+}
81651+
81652 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
81653 {
81654 /*
81655@@ -1776,6 +1992,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
81656 struct mm_struct *mm = current->mm;
81657 struct vm_area_struct *vma;
81658 struct vm_unmapped_area_info info;
81659+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
81660
81661 if (len > TASK_SIZE)
81662 return -ENOMEM;
81663@@ -1783,17 +2000,26 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
81664 if (flags & MAP_FIXED)
81665 return addr;
81666
81667+#ifdef CONFIG_PAX_RANDMMAP
81668+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
81669+#endif
81670+
81671 if (addr) {
81672 addr = PAGE_ALIGN(addr);
81673 vma = find_vma(mm, addr);
81674- if (TASK_SIZE - len >= addr &&
81675- (!vma || addr + len <= vma->vm_start))
81676+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
81677 return addr;
81678 }
81679
81680 info.flags = 0;
81681 info.length = len;
81682 info.low_limit = TASK_UNMAPPED_BASE;
81683+
81684+#ifdef CONFIG_PAX_RANDMMAP
81685+ if (mm->pax_flags & MF_PAX_RANDMMAP)
81686+ info.low_limit += mm->delta_mmap;
81687+#endif
81688+
81689 info.high_limit = TASK_SIZE;
81690 info.align_mask = 0;
81691 return vm_unmapped_area(&info);
81692@@ -1802,10 +2028,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
81693
81694 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
81695 {
81696+
81697+#ifdef CONFIG_PAX_SEGMEXEC
81698+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
81699+ return;
81700+#endif
81701+
81702 /*
81703 * Is this a new hole at the lowest possible address?
81704 */
81705- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
81706+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
81707 mm->free_area_cache = addr;
81708 }
81709
81710@@ -1823,6 +2055,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
81711 struct mm_struct *mm = current->mm;
81712 unsigned long addr = addr0;
81713 struct vm_unmapped_area_info info;
81714+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
81715
81716 /* requested length too big for entire address space */
81717 if (len > TASK_SIZE)
81718@@ -1831,12 +2064,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
81719 if (flags & MAP_FIXED)
81720 return addr;
81721
81722+#ifdef CONFIG_PAX_RANDMMAP
81723+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
81724+#endif
81725+
81726 /* requesting a specific address */
81727 if (addr) {
81728 addr = PAGE_ALIGN(addr);
81729 vma = find_vma(mm, addr);
81730- if (TASK_SIZE - len >= addr &&
81731- (!vma || addr + len <= vma->vm_start))
81732+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
81733 return addr;
81734 }
81735
81736@@ -1857,6 +2093,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
81737 VM_BUG_ON(addr != -ENOMEM);
81738 info.flags = 0;
81739 info.low_limit = TASK_UNMAPPED_BASE;
81740+
81741+#ifdef CONFIG_PAX_RANDMMAP
81742+ if (mm->pax_flags & MF_PAX_RANDMMAP)
81743+ info.low_limit += mm->delta_mmap;
81744+#endif
81745+
81746 info.high_limit = TASK_SIZE;
81747 addr = vm_unmapped_area(&info);
81748 }
81749@@ -1867,6 +2109,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
81750
81751 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
81752 {
81753+
81754+#ifdef CONFIG_PAX_SEGMEXEC
81755+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
81756+ return;
81757+#endif
81758+
81759 /*
81760 * Is this a new hole at the highest possible address?
81761 */
81762@@ -1874,8 +2122,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
81763 mm->free_area_cache = addr;
81764
81765 /* dont allow allocations above current base */
81766- if (mm->free_area_cache > mm->mmap_base)
81767+ if (mm->free_area_cache > mm->mmap_base) {
81768 mm->free_area_cache = mm->mmap_base;
81769+ mm->cached_hole_size = ~0UL;
81770+ }
81771 }
81772
81773 unsigned long
81774@@ -1974,6 +2224,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
81775 return vma;
81776 }
81777
81778+#ifdef CONFIG_PAX_SEGMEXEC
81779+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
81780+{
81781+ struct vm_area_struct *vma_m;
81782+
81783+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
81784+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
81785+ BUG_ON(vma->vm_mirror);
81786+ return NULL;
81787+ }
81788+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
81789+ vma_m = vma->vm_mirror;
81790+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
81791+ BUG_ON(vma->vm_file != vma_m->vm_file);
81792+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
81793+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
81794+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
81795+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
81796+ return vma_m;
81797+}
81798+#endif
81799+
81800 /*
81801 * Verify that the stack growth is acceptable and
81802 * update accounting. This is shared with both the
81803@@ -1990,6 +2262,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
81804 return -ENOMEM;
81805
81806 /* Stack limit test */
81807+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
81808 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
81809 return -ENOMEM;
81810
81811@@ -2000,6 +2273,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
81812 locked = mm->locked_vm + grow;
81813 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
81814 limit >>= PAGE_SHIFT;
81815+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
81816 if (locked > limit && !capable(CAP_IPC_LOCK))
81817 return -ENOMEM;
81818 }
81819@@ -2029,37 +2303,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
81820 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
81821 * vma is the last one with address > vma->vm_end. Have to extend vma.
81822 */
81823+#ifndef CONFIG_IA64
81824+static
81825+#endif
81826 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
81827 {
81828 int error;
81829+ bool locknext;
81830
81831 if (!(vma->vm_flags & VM_GROWSUP))
81832 return -EFAULT;
81833
81834+ /* Also guard against wrapping around to address 0. */
81835+ if (address < PAGE_ALIGN(address+1))
81836+ address = PAGE_ALIGN(address+1);
81837+ else
81838+ return -ENOMEM;
81839+
81840 /*
81841 * We must make sure the anon_vma is allocated
81842 * so that the anon_vma locking is not a noop.
81843 */
81844 if (unlikely(anon_vma_prepare(vma)))
81845 return -ENOMEM;
81846+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
81847+ if (locknext && anon_vma_prepare(vma->vm_next))
81848+ return -ENOMEM;
81849 vma_lock_anon_vma(vma);
81850+ if (locknext)
81851+ vma_lock_anon_vma(vma->vm_next);
81852
81853 /*
81854 * vma->vm_start/vm_end cannot change under us because the caller
81855 * is required to hold the mmap_sem in read mode. We need the
81856- * anon_vma lock to serialize against concurrent expand_stacks.
81857- * Also guard against wrapping around to address 0.
81858+ * anon_vma locks to serialize against concurrent expand_stacks
81859+ * and expand_upwards.
81860 */
81861- if (address < PAGE_ALIGN(address+4))
81862- address = PAGE_ALIGN(address+4);
81863- else {
81864- vma_unlock_anon_vma(vma);
81865- return -ENOMEM;
81866- }
81867 error = 0;
81868
81869 /* Somebody else might have raced and expanded it already */
81870- if (address > vma->vm_end) {
81871+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
81872+ error = -ENOMEM;
81873+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
81874 unsigned long size, grow;
81875
81876 size = address - vma->vm_start;
81877@@ -2094,6 +2379,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
81878 }
81879 }
81880 }
81881+ if (locknext)
81882+ vma_unlock_anon_vma(vma->vm_next);
81883 vma_unlock_anon_vma(vma);
81884 khugepaged_enter_vma_merge(vma);
81885 validate_mm(vma->vm_mm);
81886@@ -2108,6 +2395,8 @@ int expand_downwards(struct vm_area_struct *vma,
81887 unsigned long address)
81888 {
81889 int error;
81890+ bool lockprev = false;
81891+ struct vm_area_struct *prev;
81892
81893 /*
81894 * We must make sure the anon_vma is allocated
81895@@ -2121,6 +2410,15 @@ int expand_downwards(struct vm_area_struct *vma,
81896 if (error)
81897 return error;
81898
81899+ prev = vma->vm_prev;
81900+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
81901+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
81902+#endif
81903+ if (lockprev && anon_vma_prepare(prev))
81904+ return -ENOMEM;
81905+ if (lockprev)
81906+ vma_lock_anon_vma(prev);
81907+
81908 vma_lock_anon_vma(vma);
81909
81910 /*
81911@@ -2130,9 +2428,17 @@ int expand_downwards(struct vm_area_struct *vma,
81912 */
81913
81914 /* Somebody else might have raced and expanded it already */
81915- if (address < vma->vm_start) {
81916+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
81917+ error = -ENOMEM;
81918+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
81919 unsigned long size, grow;
81920
81921+#ifdef CONFIG_PAX_SEGMEXEC
81922+ struct vm_area_struct *vma_m;
81923+
81924+ vma_m = pax_find_mirror_vma(vma);
81925+#endif
81926+
81927 size = vma->vm_end - address;
81928 grow = (vma->vm_start - address) >> PAGE_SHIFT;
81929
81930@@ -2157,6 +2463,18 @@ int expand_downwards(struct vm_area_struct *vma,
81931 vma->vm_pgoff -= grow;
81932 anon_vma_interval_tree_post_update_vma(vma);
81933 vma_gap_update(vma);
81934+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
81935+
81936+#ifdef CONFIG_PAX_SEGMEXEC
81937+ if (vma_m) {
81938+ anon_vma_interval_tree_pre_update_vma(vma_m);
81939+ vma_m->vm_start -= grow << PAGE_SHIFT;
81940+ vma_m->vm_pgoff -= grow;
81941+ anon_vma_interval_tree_post_update_vma(vma_m);
81942+ vma_gap_update(vma_m);
81943+ }
81944+#endif
81945+
81946 spin_unlock(&vma->vm_mm->page_table_lock);
81947
81948 perf_event_mmap(vma);
81949@@ -2263,6 +2581,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
81950 do {
81951 long nrpages = vma_pages(vma);
81952
81953+#ifdef CONFIG_PAX_SEGMEXEC
81954+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
81955+ vma = remove_vma(vma);
81956+ continue;
81957+ }
81958+#endif
81959+
81960 if (vma->vm_flags & VM_ACCOUNT)
81961 nr_accounted += nrpages;
81962 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
81963@@ -2308,6 +2633,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
81964 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
81965 vma->vm_prev = NULL;
81966 do {
81967+
81968+#ifdef CONFIG_PAX_SEGMEXEC
81969+ if (vma->vm_mirror) {
81970+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
81971+ vma->vm_mirror->vm_mirror = NULL;
81972+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
81973+ vma->vm_mirror = NULL;
81974+ }
81975+#endif
81976+
81977 vma_rb_erase(vma, &mm->mm_rb);
81978 mm->map_count--;
81979 tail_vma = vma;
81980@@ -2339,14 +2674,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81981 struct vm_area_struct *new;
81982 int err = -ENOMEM;
81983
81984+#ifdef CONFIG_PAX_SEGMEXEC
81985+ struct vm_area_struct *vma_m, *new_m = NULL;
81986+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
81987+#endif
81988+
81989 if (is_vm_hugetlb_page(vma) && (addr &
81990 ~(huge_page_mask(hstate_vma(vma)))))
81991 return -EINVAL;
81992
81993+#ifdef CONFIG_PAX_SEGMEXEC
81994+ vma_m = pax_find_mirror_vma(vma);
81995+#endif
81996+
81997 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
81998 if (!new)
81999 goto out_err;
82000
82001+#ifdef CONFIG_PAX_SEGMEXEC
82002+ if (vma_m) {
82003+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
82004+ if (!new_m) {
82005+ kmem_cache_free(vm_area_cachep, new);
82006+ goto out_err;
82007+ }
82008+ }
82009+#endif
82010+
82011 /* most fields are the same, copy all, and then fixup */
82012 *new = *vma;
82013
82014@@ -2359,6 +2713,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82015 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
82016 }
82017
82018+#ifdef CONFIG_PAX_SEGMEXEC
82019+ if (vma_m) {
82020+ *new_m = *vma_m;
82021+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
82022+ new_m->vm_mirror = new;
82023+ new->vm_mirror = new_m;
82024+
82025+ if (new_below)
82026+ new_m->vm_end = addr_m;
82027+ else {
82028+ new_m->vm_start = addr_m;
82029+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
82030+ }
82031+ }
82032+#endif
82033+
82034 pol = mpol_dup(vma_policy(vma));
82035 if (IS_ERR(pol)) {
82036 err = PTR_ERR(pol);
82037@@ -2381,6 +2751,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82038 else
82039 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
82040
82041+#ifdef CONFIG_PAX_SEGMEXEC
82042+ if (!err && vma_m) {
82043+ if (anon_vma_clone(new_m, vma_m))
82044+ goto out_free_mpol;
82045+
82046+ mpol_get(pol);
82047+ vma_set_policy(new_m, pol);
82048+
82049+ if (new_m->vm_file)
82050+ get_file(new_m->vm_file);
82051+
82052+ if (new_m->vm_ops && new_m->vm_ops->open)
82053+ new_m->vm_ops->open(new_m);
82054+
82055+ if (new_below)
82056+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
82057+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
82058+ else
82059+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
82060+
82061+ if (err) {
82062+ if (new_m->vm_ops && new_m->vm_ops->close)
82063+ new_m->vm_ops->close(new_m);
82064+ if (new_m->vm_file)
82065+ fput(new_m->vm_file);
82066+ mpol_put(pol);
82067+ }
82068+ }
82069+#endif
82070+
82071 /* Success. */
82072 if (!err)
82073 return 0;
82074@@ -2390,10 +2790,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82075 new->vm_ops->close(new);
82076 if (new->vm_file)
82077 fput(new->vm_file);
82078- unlink_anon_vmas(new);
82079 out_free_mpol:
82080 mpol_put(pol);
82081 out_free_vma:
82082+
82083+#ifdef CONFIG_PAX_SEGMEXEC
82084+ if (new_m) {
82085+ unlink_anon_vmas(new_m);
82086+ kmem_cache_free(vm_area_cachep, new_m);
82087+ }
82088+#endif
82089+
82090+ unlink_anon_vmas(new);
82091 kmem_cache_free(vm_area_cachep, new);
82092 out_err:
82093 return err;
82094@@ -2406,6 +2814,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82095 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
82096 unsigned long addr, int new_below)
82097 {
82098+
82099+#ifdef CONFIG_PAX_SEGMEXEC
82100+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
82101+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
82102+ if (mm->map_count >= sysctl_max_map_count-1)
82103+ return -ENOMEM;
82104+ } else
82105+#endif
82106+
82107 if (mm->map_count >= sysctl_max_map_count)
82108 return -ENOMEM;
82109
82110@@ -2417,11 +2834,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
82111 * work. This now handles partial unmappings.
82112 * Jeremy Fitzhardinge <jeremy@goop.org>
82113 */
82114+#ifdef CONFIG_PAX_SEGMEXEC
82115 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82116 {
82117+ int ret = __do_munmap(mm, start, len);
82118+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
82119+ return ret;
82120+
82121+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
82122+}
82123+
82124+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82125+#else
82126+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82127+#endif
82128+{
82129 unsigned long end;
82130 struct vm_area_struct *vma, *prev, *last;
82131
82132+ /*
82133+ * mm->mmap_sem is required to protect against another thread
82134+ * changing the mappings in case we sleep.
82135+ */
82136+ verify_mm_writelocked(mm);
82137+
82138 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
82139 return -EINVAL;
82140
82141@@ -2496,6 +2932,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82142 /* Fix up all other VM information */
82143 remove_vma_list(mm, vma);
82144
82145+ track_exec_limit(mm, start, end, 0UL);
82146+
82147 return 0;
82148 }
82149
82150@@ -2504,6 +2942,13 @@ int vm_munmap(unsigned long start, size_t len)
82151 int ret;
82152 struct mm_struct *mm = current->mm;
82153
82154+
82155+#ifdef CONFIG_PAX_SEGMEXEC
82156+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
82157+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
82158+ return -EINVAL;
82159+#endif
82160+
82161 down_write(&mm->mmap_sem);
82162 ret = do_munmap(mm, start, len);
82163 up_write(&mm->mmap_sem);
82164@@ -2517,16 +2962,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
82165 return vm_munmap(addr, len);
82166 }
82167
82168-static inline void verify_mm_writelocked(struct mm_struct *mm)
82169-{
82170-#ifdef CONFIG_DEBUG_VM
82171- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
82172- WARN_ON(1);
82173- up_read(&mm->mmap_sem);
82174- }
82175-#endif
82176-}
82177-
82178 /*
82179 * this is really a simplified "do_mmap". it only handles
82180 * anonymous maps. eventually we may be able to do some
82181@@ -2540,6 +2975,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82182 struct rb_node ** rb_link, * rb_parent;
82183 pgoff_t pgoff = addr >> PAGE_SHIFT;
82184 int error;
82185+ unsigned long charged;
82186
82187 len = PAGE_ALIGN(len);
82188 if (!len)
82189@@ -2547,16 +2983,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82190
82191 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
82192
82193+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
82194+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
82195+ flags &= ~VM_EXEC;
82196+
82197+#ifdef CONFIG_PAX_MPROTECT
82198+ if (mm->pax_flags & MF_PAX_MPROTECT)
82199+ flags &= ~VM_MAYEXEC;
82200+#endif
82201+
82202+ }
82203+#endif
82204+
82205 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
82206 if (error & ~PAGE_MASK)
82207 return error;
82208
82209+ charged = len >> PAGE_SHIFT;
82210+
82211 /*
82212 * mlock MCL_FUTURE?
82213 */
82214 if (mm->def_flags & VM_LOCKED) {
82215 unsigned long locked, lock_limit;
82216- locked = len >> PAGE_SHIFT;
82217+ locked = charged;
82218 locked += mm->locked_vm;
82219 lock_limit = rlimit(RLIMIT_MEMLOCK);
82220 lock_limit >>= PAGE_SHIFT;
82221@@ -2573,21 +3023,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82222 /*
82223 * Clear old maps. this also does some error checking for us
82224 */
82225- munmap_back:
82226 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
82227 if (do_munmap(mm, addr, len))
82228 return -ENOMEM;
82229- goto munmap_back;
82230+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
82231 }
82232
82233 /* Check against address space limits *after* clearing old maps... */
82234- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
82235+ if (!may_expand_vm(mm, charged))
82236 return -ENOMEM;
82237
82238 if (mm->map_count > sysctl_max_map_count)
82239 return -ENOMEM;
82240
82241- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
82242+ if (security_vm_enough_memory_mm(mm, charged))
82243 return -ENOMEM;
82244
82245 /* Can we just expand an old private anonymous mapping? */
82246@@ -2601,7 +3050,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82247 */
82248 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82249 if (!vma) {
82250- vm_unacct_memory(len >> PAGE_SHIFT);
82251+ vm_unacct_memory(charged);
82252 return -ENOMEM;
82253 }
82254
82255@@ -2615,11 +3064,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82256 vma_link(mm, vma, prev, rb_link, rb_parent);
82257 out:
82258 perf_event_mmap(vma);
82259- mm->total_vm += len >> PAGE_SHIFT;
82260+ mm->total_vm += charged;
82261 if (flags & VM_LOCKED) {
82262 if (!mlock_vma_pages_range(vma, addr, addr + len))
82263- mm->locked_vm += (len >> PAGE_SHIFT);
82264+ mm->locked_vm += charged;
82265 }
82266+ track_exec_limit(mm, addr, addr + len, flags);
82267 return addr;
82268 }
82269
82270@@ -2677,6 +3127,7 @@ void exit_mmap(struct mm_struct *mm)
82271 while (vma) {
82272 if (vma->vm_flags & VM_ACCOUNT)
82273 nr_accounted += vma_pages(vma);
82274+ vma->vm_mirror = NULL;
82275 vma = remove_vma(vma);
82276 }
82277 vm_unacct_memory(nr_accounted);
82278@@ -2693,6 +3144,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
82279 struct vm_area_struct *prev;
82280 struct rb_node **rb_link, *rb_parent;
82281
82282+#ifdef CONFIG_PAX_SEGMEXEC
82283+ struct vm_area_struct *vma_m = NULL;
82284+#endif
82285+
82286+ if (security_mmap_addr(vma->vm_start))
82287+ return -EPERM;
82288+
82289 /*
82290 * The vm_pgoff of a purely anonymous vma should be irrelevant
82291 * until its first write fault, when page's anon_vma and index
82292@@ -2716,7 +3174,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
82293 security_vm_enough_memory_mm(mm, vma_pages(vma)))
82294 return -ENOMEM;
82295
82296+#ifdef CONFIG_PAX_SEGMEXEC
82297+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
82298+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82299+ if (!vma_m)
82300+ return -ENOMEM;
82301+ }
82302+#endif
82303+
82304 vma_link(mm, vma, prev, rb_link, rb_parent);
82305+
82306+#ifdef CONFIG_PAX_SEGMEXEC
82307+ if (vma_m)
82308+ BUG_ON(pax_mirror_vma(vma_m, vma));
82309+#endif
82310+
82311 return 0;
82312 }
82313
82314@@ -2736,6 +3208,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
82315 struct mempolicy *pol;
82316 bool faulted_in_anon_vma = true;
82317
82318+ BUG_ON(vma->vm_mirror);
82319+
82320 /*
82321 * If anonymous vma has not yet been faulted, update new pgoff
82322 * to match new location, to increase its chance of merging.
82323@@ -2802,6 +3276,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
82324 return NULL;
82325 }
82326
82327+#ifdef CONFIG_PAX_SEGMEXEC
82328+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
82329+{
82330+ struct vm_area_struct *prev_m;
82331+ struct rb_node **rb_link_m, *rb_parent_m;
82332+ struct mempolicy *pol_m;
82333+
82334+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
82335+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
82336+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
82337+ *vma_m = *vma;
82338+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
82339+ if (anon_vma_clone(vma_m, vma))
82340+ return -ENOMEM;
82341+ pol_m = vma_policy(vma_m);
82342+ mpol_get(pol_m);
82343+ vma_set_policy(vma_m, pol_m);
82344+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
82345+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
82346+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
82347+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
82348+ if (vma_m->vm_file)
82349+ get_file(vma_m->vm_file);
82350+ if (vma_m->vm_ops && vma_m->vm_ops->open)
82351+ vma_m->vm_ops->open(vma_m);
82352+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
82353+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
82354+ vma_m->vm_mirror = vma;
82355+ vma->vm_mirror = vma_m;
82356+ return 0;
82357+}
82358+#endif
82359+
82360 /*
82361 * Return true if the calling process may expand its vm space by the passed
82362 * number of pages
82363@@ -2813,6 +3320,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
82364
82365 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
82366
82367+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
82368 if (cur + npages > lim)
82369 return 0;
82370 return 1;
82371@@ -2883,6 +3391,22 @@ int install_special_mapping(struct mm_struct *mm,
82372 vma->vm_start = addr;
82373 vma->vm_end = addr + len;
82374
82375+#ifdef CONFIG_PAX_MPROTECT
82376+ if (mm->pax_flags & MF_PAX_MPROTECT) {
82377+#ifndef CONFIG_PAX_MPROTECT_COMPAT
82378+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
82379+ return -EPERM;
82380+ if (!(vm_flags & VM_EXEC))
82381+ vm_flags &= ~VM_MAYEXEC;
82382+#else
82383+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
82384+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
82385+#endif
82386+ else
82387+ vm_flags &= ~VM_MAYWRITE;
82388+ }
82389+#endif
82390+
82391 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
82392 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
82393
82394diff --git a/mm/mprotect.c b/mm/mprotect.c
82395index 94722a4..9837984 100644
82396--- a/mm/mprotect.c
82397+++ b/mm/mprotect.c
82398@@ -23,10 +23,17 @@
82399 #include <linux/mmu_notifier.h>
82400 #include <linux/migrate.h>
82401 #include <linux/perf_event.h>
82402+
82403+#ifdef CONFIG_PAX_MPROTECT
82404+#include <linux/elf.h>
82405+#include <linux/binfmts.h>
82406+#endif
82407+
82408 #include <asm/uaccess.h>
82409 #include <asm/pgtable.h>
82410 #include <asm/cacheflush.h>
82411 #include <asm/tlbflush.h>
82412+#include <asm/mmu_context.h>
82413
82414 #ifndef pgprot_modify
82415 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
82416@@ -233,6 +240,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
82417 return pages;
82418 }
82419
82420+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
82421+/* called while holding the mmap semaphor for writing except stack expansion */
82422+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
82423+{
82424+ unsigned long oldlimit, newlimit = 0UL;
82425+
82426+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
82427+ return;
82428+
82429+ spin_lock(&mm->page_table_lock);
82430+ oldlimit = mm->context.user_cs_limit;
82431+ if ((prot & VM_EXEC) && oldlimit < end)
82432+ /* USER_CS limit moved up */
82433+ newlimit = end;
82434+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
82435+ /* USER_CS limit moved down */
82436+ newlimit = start;
82437+
82438+ if (newlimit) {
82439+ mm->context.user_cs_limit = newlimit;
82440+
82441+#ifdef CONFIG_SMP
82442+ wmb();
82443+ cpus_clear(mm->context.cpu_user_cs_mask);
82444+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
82445+#endif
82446+
82447+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
82448+ }
82449+ spin_unlock(&mm->page_table_lock);
82450+ if (newlimit == end) {
82451+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
82452+
82453+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
82454+ if (is_vm_hugetlb_page(vma))
82455+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
82456+ else
82457+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
82458+ }
82459+}
82460+#endif
82461+
82462 int
82463 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
82464 unsigned long start, unsigned long end, unsigned long newflags)
82465@@ -245,11 +294,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
82466 int error;
82467 int dirty_accountable = 0;
82468
82469+#ifdef CONFIG_PAX_SEGMEXEC
82470+ struct vm_area_struct *vma_m = NULL;
82471+ unsigned long start_m, end_m;
82472+
82473+ start_m = start + SEGMEXEC_TASK_SIZE;
82474+ end_m = end + SEGMEXEC_TASK_SIZE;
82475+#endif
82476+
82477 if (newflags == oldflags) {
82478 *pprev = vma;
82479 return 0;
82480 }
82481
82482+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
82483+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
82484+
82485+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
82486+ return -ENOMEM;
82487+
82488+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
82489+ return -ENOMEM;
82490+ }
82491+
82492 /*
82493 * If we make a private mapping writable we increase our commit;
82494 * but (without finer accounting) cannot reduce our commit if we
82495@@ -266,6 +333,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
82496 }
82497 }
82498
82499+#ifdef CONFIG_PAX_SEGMEXEC
82500+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
82501+ if (start != vma->vm_start) {
82502+ error = split_vma(mm, vma, start, 1);
82503+ if (error)
82504+ goto fail;
82505+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
82506+ *pprev = (*pprev)->vm_next;
82507+ }
82508+
82509+ if (end != vma->vm_end) {
82510+ error = split_vma(mm, vma, end, 0);
82511+ if (error)
82512+ goto fail;
82513+ }
82514+
82515+ if (pax_find_mirror_vma(vma)) {
82516+ error = __do_munmap(mm, start_m, end_m - start_m);
82517+ if (error)
82518+ goto fail;
82519+ } else {
82520+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82521+ if (!vma_m) {
82522+ error = -ENOMEM;
82523+ goto fail;
82524+ }
82525+ vma->vm_flags = newflags;
82526+ error = pax_mirror_vma(vma_m, vma);
82527+ if (error) {
82528+ vma->vm_flags = oldflags;
82529+ goto fail;
82530+ }
82531+ }
82532+ }
82533+#endif
82534+
82535 /*
82536 * First try to merge with previous and/or next vma.
82537 */
82538@@ -296,9 +399,21 @@ success:
82539 * vm_flags and vm_page_prot are protected by the mmap_sem
82540 * held in write mode.
82541 */
82542+
82543+#ifdef CONFIG_PAX_SEGMEXEC
82544+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
82545+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
82546+#endif
82547+
82548 vma->vm_flags = newflags;
82549+
82550+#ifdef CONFIG_PAX_MPROTECT
82551+ if (mm->binfmt && mm->binfmt->handle_mprotect)
82552+ mm->binfmt->handle_mprotect(vma, newflags);
82553+#endif
82554+
82555 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
82556- vm_get_page_prot(newflags));
82557+ vm_get_page_prot(vma->vm_flags));
82558
82559 if (vma_wants_writenotify(vma)) {
82560 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
82561@@ -337,6 +452,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
82562 end = start + len;
82563 if (end <= start)
82564 return -ENOMEM;
82565+
82566+#ifdef CONFIG_PAX_SEGMEXEC
82567+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
82568+ if (end > SEGMEXEC_TASK_SIZE)
82569+ return -EINVAL;
82570+ } else
82571+#endif
82572+
82573+ if (end > TASK_SIZE)
82574+ return -EINVAL;
82575+
82576 if (!arch_validate_prot(prot))
82577 return -EINVAL;
82578
82579@@ -344,7 +470,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
82580 /*
82581 * Does the application expect PROT_READ to imply PROT_EXEC:
82582 */
82583- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
82584+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
82585 prot |= PROT_EXEC;
82586
82587 vm_flags = calc_vm_prot_bits(prot);
82588@@ -376,6 +502,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
82589 if (start > vma->vm_start)
82590 prev = vma;
82591
82592+#ifdef CONFIG_PAX_MPROTECT
82593+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
82594+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
82595+#endif
82596+
82597 for (nstart = start ; ; ) {
82598 unsigned long newflags;
82599
82600@@ -386,6 +517,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
82601
82602 /* newflags >> 4 shift VM_MAY% in place of VM_% */
82603 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
82604+ if (prot & (PROT_WRITE | PROT_EXEC))
82605+ gr_log_rwxmprotect(vma->vm_file);
82606+
82607+ error = -EACCES;
82608+ goto out;
82609+ }
82610+
82611+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
82612 error = -EACCES;
82613 goto out;
82614 }
82615@@ -400,6 +539,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
82616 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
82617 if (error)
82618 goto out;
82619+
82620+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
82621+
82622 nstart = tmp;
82623
82624 if (nstart < prev->vm_end)
82625diff --git a/mm/mremap.c b/mm/mremap.c
82626index e1031e1..1f2a0a1 100644
82627--- a/mm/mremap.c
82628+++ b/mm/mremap.c
82629@@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
82630 continue;
82631 pte = ptep_get_and_clear(mm, old_addr, old_pte);
82632 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
82633+
82634+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
82635+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
82636+ pte = pte_exprotect(pte);
82637+#endif
82638+
82639 set_pte_at(mm, new_addr, new_pte, pte);
82640 }
82641
82642@@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
82643 if (is_vm_hugetlb_page(vma))
82644 goto Einval;
82645
82646+#ifdef CONFIG_PAX_SEGMEXEC
82647+ if (pax_find_mirror_vma(vma))
82648+ goto Einval;
82649+#endif
82650+
82651 /* We can't remap across vm area boundaries */
82652 if (old_len > vma->vm_end - addr)
82653 goto Efault;
82654@@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
82655 unsigned long ret = -EINVAL;
82656 unsigned long charged = 0;
82657 unsigned long map_flags;
82658+ unsigned long pax_task_size = TASK_SIZE;
82659
82660 if (new_addr & ~PAGE_MASK)
82661 goto out;
82662
82663- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
82664+#ifdef CONFIG_PAX_SEGMEXEC
82665+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
82666+ pax_task_size = SEGMEXEC_TASK_SIZE;
82667+#endif
82668+
82669+ pax_task_size -= PAGE_SIZE;
82670+
82671+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
82672 goto out;
82673
82674 /* Check if the location we're moving into overlaps the
82675 * old location at all, and fail if it does.
82676 */
82677- if ((new_addr <= addr) && (new_addr+new_len) > addr)
82678- goto out;
82679-
82680- if ((addr <= new_addr) && (addr+old_len) > new_addr)
82681+ if (addr + old_len > new_addr && new_addr + new_len > addr)
82682 goto out;
82683
82684 ret = do_munmap(mm, new_addr, new_len);
82685@@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
82686 struct vm_area_struct *vma;
82687 unsigned long ret = -EINVAL;
82688 unsigned long charged = 0;
82689+ unsigned long pax_task_size = TASK_SIZE;
82690
82691 down_write(&current->mm->mmap_sem);
82692
82693@@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
82694 if (!new_len)
82695 goto out;
82696
82697+#ifdef CONFIG_PAX_SEGMEXEC
82698+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
82699+ pax_task_size = SEGMEXEC_TASK_SIZE;
82700+#endif
82701+
82702+ pax_task_size -= PAGE_SIZE;
82703+
82704+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
82705+ old_len > pax_task_size || addr > pax_task_size-old_len)
82706+ goto out;
82707+
82708 if (flags & MREMAP_FIXED) {
82709 if (flags & MREMAP_MAYMOVE)
82710 ret = mremap_to(addr, old_len, new_addr, new_len);
82711@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
82712 addr + new_len);
82713 }
82714 ret = addr;
82715+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
82716 goto out;
82717 }
82718 }
82719@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
82720 goto out;
82721 }
82722
82723+ map_flags = vma->vm_flags;
82724 ret = move_vma(vma, addr, old_len, new_len, new_addr);
82725+ if (!(ret & ~PAGE_MASK)) {
82726+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
82727+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
82728+ }
82729 }
82730 out:
82731 if (ret & ~PAGE_MASK)
82732diff --git a/mm/nommu.c b/mm/nommu.c
82733index 79c3cac..4d357e0 100644
82734--- a/mm/nommu.c
82735+++ b/mm/nommu.c
82736@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
82737 int sysctl_overcommit_ratio = 50; /* default is 50% */
82738 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
82739 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
82740-int heap_stack_gap = 0;
82741
82742 atomic_long_t mmap_pages_allocated;
82743
82744@@ -839,15 +838,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
82745 EXPORT_SYMBOL(find_vma);
82746
82747 /*
82748- * find a VMA
82749- * - we don't extend stack VMAs under NOMMU conditions
82750- */
82751-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
82752-{
82753- return find_vma(mm, addr);
82754-}
82755-
82756-/*
82757 * expand a stack to a given address
82758 * - not supported under NOMMU conditions
82759 */
82760@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
82761
82762 /* most fields are the same, copy all, and then fixup */
82763 *new = *vma;
82764+ INIT_LIST_HEAD(&new->anon_vma_chain);
82765 *region = *vma->vm_region;
82766 new->vm_region = region;
82767
82768diff --git a/mm/page-writeback.c b/mm/page-writeback.c
82769index 0713bfb..b95bb87 100644
82770--- a/mm/page-writeback.c
82771+++ b/mm/page-writeback.c
82772@@ -655,7 +655,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
82773 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
82774 * - the bdi dirty thresh drops quickly due to change of JBOD workload
82775 */
82776-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
82777+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
82778 unsigned long thresh,
82779 unsigned long bg_thresh,
82780 unsigned long dirty,
82781@@ -1630,7 +1630,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
82782 }
82783 }
82784
82785-static struct notifier_block __cpuinitdata ratelimit_nb = {
82786+static struct notifier_block ratelimit_nb = {
82787 .notifier_call = ratelimit_handler,
82788 .next = NULL,
82789 };
82790diff --git a/mm/page_alloc.c b/mm/page_alloc.c
82791index 6a83cd3..3ab04ef 100644
82792--- a/mm/page_alloc.c
82793+++ b/mm/page_alloc.c
82794@@ -58,6 +58,7 @@
82795 #include <linux/prefetch.h>
82796 #include <linux/migrate.h>
82797 #include <linux/page-debug-flags.h>
82798+#include <linux/random.h>
82799
82800 #include <asm/tlbflush.h>
82801 #include <asm/div64.h>
82802@@ -338,7 +339,7 @@ out:
82803 * This usage means that zero-order pages may not be compound.
82804 */
82805
82806-static void free_compound_page(struct page *page)
82807+void free_compound_page(struct page *page)
82808 {
82809 __free_pages_ok(page, compound_order(page));
82810 }
82811@@ -693,6 +694,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
82812 int i;
82813 int bad = 0;
82814
82815+#ifdef CONFIG_PAX_MEMORY_SANITIZE
82816+ unsigned long index = 1UL << order;
82817+#endif
82818+
82819 trace_mm_page_free(page, order);
82820 kmemcheck_free_shadow(page, order);
82821
82822@@ -708,6 +713,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
82823 debug_check_no_obj_freed(page_address(page),
82824 PAGE_SIZE << order);
82825 }
82826+
82827+#ifdef CONFIG_PAX_MEMORY_SANITIZE
82828+ for (; index; --index)
82829+ sanitize_highpage(page + index - 1);
82830+#endif
82831+
82832 arch_free_page(page, order);
82833 kernel_map_pages(page, 1 << order, 0);
82834
82835@@ -730,6 +741,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
82836 local_irq_restore(flags);
82837 }
82838
82839+#ifdef CONFIG_PAX_LATENT_ENTROPY
82840+bool __meminitdata extra_latent_entropy;
82841+
82842+static int __init setup_pax_extra_latent_entropy(char *str)
82843+{
82844+ extra_latent_entropy = true;
82845+ return 0;
82846+}
82847+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
82848+
82849+volatile u64 latent_entropy;
82850+#endif
82851+
82852 /*
82853 * Read access to zone->managed_pages is safe because it's unsigned long,
82854 * but we still need to serialize writers. Currently all callers of
82855@@ -752,6 +776,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
82856 set_page_count(p, 0);
82857 }
82858
82859+#ifdef CONFIG_PAX_LATENT_ENTROPY
82860+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
82861+ u64 hash = 0;
82862+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
82863+ const u64 *data = lowmem_page_address(page);
82864+
82865+ for (index = 0; index < end; index++)
82866+ hash ^= hash + data[index];
82867+ latent_entropy ^= hash;
82868+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
82869+ }
82870+#endif
82871+
82872 page_zone(page)->managed_pages += 1 << order;
82873 set_page_refcounted(page);
82874 __free_pages(page, order);
82875@@ -861,8 +898,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
82876 arch_alloc_page(page, order);
82877 kernel_map_pages(page, 1 << order, 1);
82878
82879+#ifndef CONFIG_PAX_MEMORY_SANITIZE
82880 if (gfp_flags & __GFP_ZERO)
82881 prep_zero_page(page, order, gfp_flags);
82882+#endif
82883
82884 if (order && (gfp_flags & __GFP_COMP))
82885 prep_compound_page(page, order);
82886@@ -3752,7 +3791,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
82887 unsigned long pfn;
82888
82889 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
82890+#ifdef CONFIG_X86_32
82891+ /* boot failures in VMware 8 on 32bit vanilla since
82892+ this change */
82893+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
82894+#else
82895 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
82896+#endif
82897 return 1;
82898 }
82899 return 0;
82900diff --git a/mm/percpu.c b/mm/percpu.c
82901index 8c8e08f..73a5cda 100644
82902--- a/mm/percpu.c
82903+++ b/mm/percpu.c
82904@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
82905 static unsigned int pcpu_high_unit_cpu __read_mostly;
82906
82907 /* the address of the first chunk which starts with the kernel static area */
82908-void *pcpu_base_addr __read_mostly;
82909+void *pcpu_base_addr __read_only;
82910 EXPORT_SYMBOL_GPL(pcpu_base_addr);
82911
82912 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
82913diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
82914index fd26d04..0cea1b0 100644
82915--- a/mm/process_vm_access.c
82916+++ b/mm/process_vm_access.c
82917@@ -13,6 +13,7 @@
82918 #include <linux/uio.h>
82919 #include <linux/sched.h>
82920 #include <linux/highmem.h>
82921+#include <linux/security.h>
82922 #include <linux/ptrace.h>
82923 #include <linux/slab.h>
82924 #include <linux/syscalls.h>
82925@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
82926 size_t iov_l_curr_offset = 0;
82927 ssize_t iov_len;
82928
82929+ return -ENOSYS; // PaX: until properly audited
82930+
82931 /*
82932 * Work out how many pages of struct pages we're going to need
82933 * when eventually calling get_user_pages
82934 */
82935 for (i = 0; i < riovcnt; i++) {
82936 iov_len = rvec[i].iov_len;
82937- if (iov_len > 0) {
82938- nr_pages_iov = ((unsigned long)rvec[i].iov_base
82939- + iov_len)
82940- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
82941- / PAGE_SIZE + 1;
82942- nr_pages = max(nr_pages, nr_pages_iov);
82943- }
82944+ if (iov_len <= 0)
82945+ continue;
82946+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
82947+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
82948+ nr_pages = max(nr_pages, nr_pages_iov);
82949 }
82950
82951 if (nr_pages == 0)
82952@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
82953 goto free_proc_pages;
82954 }
82955
82956+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
82957+ rc = -EPERM;
82958+ goto put_task_struct;
82959+ }
82960+
82961 mm = mm_access(task, PTRACE_MODE_ATTACH);
82962 if (!mm || IS_ERR(mm)) {
82963 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
82964diff --git a/mm/rmap.c b/mm/rmap.c
82965index 2c78f8c..9e9c624 100644
82966--- a/mm/rmap.c
82967+++ b/mm/rmap.c
82968@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
82969 struct anon_vma *anon_vma = vma->anon_vma;
82970 struct anon_vma_chain *avc;
82971
82972+#ifdef CONFIG_PAX_SEGMEXEC
82973+ struct anon_vma_chain *avc_m = NULL;
82974+#endif
82975+
82976 might_sleep();
82977 if (unlikely(!anon_vma)) {
82978 struct mm_struct *mm = vma->vm_mm;
82979@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
82980 if (!avc)
82981 goto out_enomem;
82982
82983+#ifdef CONFIG_PAX_SEGMEXEC
82984+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
82985+ if (!avc_m)
82986+ goto out_enomem_free_avc;
82987+#endif
82988+
82989 anon_vma = find_mergeable_anon_vma(vma);
82990 allocated = NULL;
82991 if (!anon_vma) {
82992@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
82993 /* page_table_lock to protect against threads */
82994 spin_lock(&mm->page_table_lock);
82995 if (likely(!vma->anon_vma)) {
82996+
82997+#ifdef CONFIG_PAX_SEGMEXEC
82998+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
82999+
83000+ if (vma_m) {
83001+ BUG_ON(vma_m->anon_vma);
83002+ vma_m->anon_vma = anon_vma;
83003+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
83004+ avc_m = NULL;
83005+ }
83006+#endif
83007+
83008 vma->anon_vma = anon_vma;
83009 anon_vma_chain_link(vma, avc, anon_vma);
83010 allocated = NULL;
83011@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83012
83013 if (unlikely(allocated))
83014 put_anon_vma(allocated);
83015+
83016+#ifdef CONFIG_PAX_SEGMEXEC
83017+ if (unlikely(avc_m))
83018+ anon_vma_chain_free(avc_m);
83019+#endif
83020+
83021 if (unlikely(avc))
83022 anon_vma_chain_free(avc);
83023 }
83024 return 0;
83025
83026 out_enomem_free_avc:
83027+
83028+#ifdef CONFIG_PAX_SEGMEXEC
83029+ if (avc_m)
83030+ anon_vma_chain_free(avc_m);
83031+#endif
83032+
83033 anon_vma_chain_free(avc);
83034 out_enomem:
83035 return -ENOMEM;
83036@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
83037 * Attach the anon_vmas from src to dst.
83038 * Returns 0 on success, -ENOMEM on failure.
83039 */
83040-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
83041+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
83042 {
83043 struct anon_vma_chain *avc, *pavc;
83044 struct anon_vma *root = NULL;
83045@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
83046 * the corresponding VMA in the parent process is attached to.
83047 * Returns 0 on success, non-zero on failure.
83048 */
83049-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
83050+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
83051 {
83052 struct anon_vma_chain *avc;
83053 struct anon_vma *anon_vma;
83054diff --git a/mm/shmem.c b/mm/shmem.c
83055index efd0b3a..994b702 100644
83056--- a/mm/shmem.c
83057+++ b/mm/shmem.c
83058@@ -31,7 +31,7 @@
83059 #include <linux/export.h>
83060 #include <linux/swap.h>
83061
83062-static struct vfsmount *shm_mnt;
83063+struct vfsmount *shm_mnt;
83064
83065 #ifdef CONFIG_SHMEM
83066 /*
83067@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
83068 #define BOGO_DIRENT_SIZE 20
83069
83070 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
83071-#define SHORT_SYMLINK_LEN 128
83072+#define SHORT_SYMLINK_LEN 64
83073
83074 /*
83075 * shmem_fallocate and shmem_writepage communicate via inode->i_private
83076@@ -2202,6 +2202,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
83077 static int shmem_xattr_validate(const char *name)
83078 {
83079 struct { const char *prefix; size_t len; } arr[] = {
83080+
83081+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
83082+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
83083+#endif
83084+
83085 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
83086 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
83087 };
83088@@ -2257,6 +2262,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
83089 if (err)
83090 return err;
83091
83092+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
83093+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
83094+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
83095+ return -EOPNOTSUPP;
83096+ if (size > 8)
83097+ return -EINVAL;
83098+ }
83099+#endif
83100+
83101 return simple_xattr_set(&info->xattrs, name, value, size, flags);
83102 }
83103
83104@@ -2562,8 +2576,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
83105 int err = -ENOMEM;
83106
83107 /* Round up to L1_CACHE_BYTES to resist false sharing */
83108- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
83109- L1_CACHE_BYTES), GFP_KERNEL);
83110+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
83111 if (!sbinfo)
83112 return -ENOMEM;
83113
83114diff --git a/mm/slab.c b/mm/slab.c
83115index e7667a3..b62c169 100644
83116--- a/mm/slab.c
83117+++ b/mm/slab.c
83118@@ -306,7 +306,7 @@ struct kmem_list3 {
83119 * Need this for bootstrapping a per node allocator.
83120 */
83121 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
83122-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
83123+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
83124 #define CACHE_CACHE 0
83125 #define SIZE_AC MAX_NUMNODES
83126 #define SIZE_L3 (2 * MAX_NUMNODES)
83127@@ -407,10 +407,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
83128 if ((x)->max_freeable < i) \
83129 (x)->max_freeable = i; \
83130 } while (0)
83131-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
83132-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
83133-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
83134-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
83135+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
83136+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
83137+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
83138+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
83139 #else
83140 #define STATS_INC_ACTIVE(x) do { } while (0)
83141 #define STATS_DEC_ACTIVE(x) do { } while (0)
83142@@ -518,7 +518,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
83143 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
83144 */
83145 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
83146- const struct slab *slab, void *obj)
83147+ const struct slab *slab, const void *obj)
83148 {
83149 u32 offset = (obj - slab->s_mem);
83150 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
83151@@ -539,12 +539,13 @@ EXPORT_SYMBOL(malloc_sizes);
83152 struct cache_names {
83153 char *name;
83154 char *name_dma;
83155+ char *name_usercopy;
83156 };
83157
83158 static struct cache_names __initdata cache_names[] = {
83159-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
83160+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
83161 #include <linux/kmalloc_sizes.h>
83162- {NULL,}
83163+ {NULL}
83164 #undef CACHE
83165 };
83166
83167@@ -729,6 +730,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
83168 if (unlikely(gfpflags & GFP_DMA))
83169 return csizep->cs_dmacachep;
83170 #endif
83171+
83172+#ifdef CONFIG_PAX_USERCOPY_SLABS
83173+ if (unlikely(gfpflags & GFP_USERCOPY))
83174+ return csizep->cs_usercopycachep;
83175+#endif
83176+
83177 return csizep->cs_cachep;
83178 }
83179
83180@@ -1482,7 +1489,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
83181 return notifier_from_errno(err);
83182 }
83183
83184-static struct notifier_block __cpuinitdata cpucache_notifier = {
83185+static struct notifier_block cpucache_notifier = {
83186 &cpuup_callback, NULL, 0
83187 };
83188
83189@@ -1667,12 +1674,12 @@ void __init kmem_cache_init(void)
83190 */
83191
83192 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
83193- sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
83194+ sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83195
83196 if (INDEX_AC != INDEX_L3)
83197 sizes[INDEX_L3].cs_cachep =
83198 create_kmalloc_cache(names[INDEX_L3].name,
83199- sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
83200+ sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83201
83202 slab_early_init = 0;
83203
83204@@ -1686,13 +1693,20 @@ void __init kmem_cache_init(void)
83205 */
83206 if (!sizes->cs_cachep)
83207 sizes->cs_cachep = create_kmalloc_cache(names->name,
83208- sizes->cs_size, ARCH_KMALLOC_FLAGS);
83209+ sizes->cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83210
83211 #ifdef CONFIG_ZONE_DMA
83212 sizes->cs_dmacachep = create_kmalloc_cache(
83213 names->name_dma, sizes->cs_size,
83214 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
83215 #endif
83216+
83217+#ifdef CONFIG_PAX_USERCOPY_SLABS
83218+ sizes->cs_usercopycachep = create_kmalloc_cache(
83219+ names->name_usercopy, sizes->cs_size,
83220+ ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83221+#endif
83222+
83223 sizes++;
83224 names++;
83225 }
83226@@ -4365,10 +4379,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
83227 }
83228 /* cpu stats */
83229 {
83230- unsigned long allochit = atomic_read(&cachep->allochit);
83231- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
83232- unsigned long freehit = atomic_read(&cachep->freehit);
83233- unsigned long freemiss = atomic_read(&cachep->freemiss);
83234+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
83235+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
83236+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
83237+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
83238
83239 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
83240 allochit, allocmiss, freehit, freemiss);
83241@@ -4600,13 +4614,71 @@ static const struct file_operations proc_slabstats_operations = {
83242 static int __init slab_proc_init(void)
83243 {
83244 #ifdef CONFIG_DEBUG_SLAB_LEAK
83245- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
83246+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
83247 #endif
83248 return 0;
83249 }
83250 module_init(slab_proc_init);
83251 #endif
83252
83253+bool is_usercopy_object(const void *ptr)
83254+{
83255+ struct page *page;
83256+ struct kmem_cache *cachep;
83257+
83258+ if (ZERO_OR_NULL_PTR(ptr))
83259+ return false;
83260+
83261+ if (!slab_is_available())
83262+ return false;
83263+
83264+ if (!virt_addr_valid(ptr))
83265+ return false;
83266+
83267+ page = virt_to_head_page(ptr);
83268+
83269+ if (!PageSlab(page))
83270+ return false;
83271+
83272+ cachep = page->slab_cache;
83273+ return cachep->flags & SLAB_USERCOPY;
83274+}
83275+
83276+#ifdef CONFIG_PAX_USERCOPY
83277+const char *check_heap_object(const void *ptr, unsigned long n)
83278+{
83279+ struct page *page;
83280+ struct kmem_cache *cachep;
83281+ struct slab *slabp;
83282+ unsigned int objnr;
83283+ unsigned long offset;
83284+
83285+ if (ZERO_OR_NULL_PTR(ptr))
83286+ return "<null>";
83287+
83288+ if (!virt_addr_valid(ptr))
83289+ return NULL;
83290+
83291+ page = virt_to_head_page(ptr);
83292+
83293+ if (!PageSlab(page))
83294+ return NULL;
83295+
83296+ cachep = page->slab_cache;
83297+ if (!(cachep->flags & SLAB_USERCOPY))
83298+ return cachep->name;
83299+
83300+ slabp = page->slab_page;
83301+ objnr = obj_to_index(cachep, slabp, ptr);
83302+ BUG_ON(objnr >= cachep->num);
83303+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
83304+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
83305+ return NULL;
83306+
83307+ return cachep->name;
83308+}
83309+#endif
83310+
83311 /**
83312 * ksize - get the actual amount of memory allocated for a given object
83313 * @objp: Pointer to the object
83314diff --git a/mm/slab.h b/mm/slab.h
83315index 34a98d6..73633d1 100644
83316--- a/mm/slab.h
83317+++ b/mm/slab.h
83318@@ -58,7 +58,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
83319
83320 /* Legal flag mask for kmem_cache_create(), for various configurations */
83321 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
83322- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
83323+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
83324
83325 #if defined(CONFIG_DEBUG_SLAB)
83326 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
83327@@ -220,6 +220,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
83328 return s;
83329
83330 page = virt_to_head_page(x);
83331+
83332+ BUG_ON(!PageSlab(page));
83333+
83334 cachep = page->slab_cache;
83335 if (slab_equal_or_root(cachep, s))
83336 return cachep;
83337diff --git a/mm/slab_common.c b/mm/slab_common.c
83338index 3f3cd97..93b0236 100644
83339--- a/mm/slab_common.c
83340+++ b/mm/slab_common.c
83341@@ -22,7 +22,7 @@
83342
83343 #include "slab.h"
83344
83345-enum slab_state slab_state;
83346+enum slab_state slab_state __read_only;
83347 LIST_HEAD(slab_caches);
83348 DEFINE_MUTEX(slab_mutex);
83349 struct kmem_cache *kmem_cache;
83350@@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
83351
83352 err = __kmem_cache_create(s, flags);
83353 if (!err) {
83354- s->refcount = 1;
83355+ atomic_set(&s->refcount, 1);
83356 list_add(&s->list, &slab_caches);
83357 memcg_cache_list_add(memcg, s);
83358 } else {
83359@@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
83360
83361 get_online_cpus();
83362 mutex_lock(&slab_mutex);
83363- s->refcount--;
83364- if (!s->refcount) {
83365+ if (atomic_dec_and_test(&s->refcount)) {
83366 list_del(&s->list);
83367
83368 if (!__kmem_cache_shutdown(s)) {
83369@@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
83370 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
83371 name, size, err);
83372
83373- s->refcount = -1; /* Exempt from merging for now */
83374+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
83375 }
83376
83377 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
83378@@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
83379
83380 create_boot_cache(s, name, size, flags);
83381 list_add(&s->list, &slab_caches);
83382- s->refcount = 1;
83383+ atomic_set(&s->refcount, 1);
83384 return s;
83385 }
83386
83387diff --git a/mm/slob.c b/mm/slob.c
83388index a99fdf7..f5b6577 100644
83389--- a/mm/slob.c
83390+++ b/mm/slob.c
83391@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
83392 /*
83393 * Return the size of a slob block.
83394 */
83395-static slobidx_t slob_units(slob_t *s)
83396+static slobidx_t slob_units(const slob_t *s)
83397 {
83398 if (s->units > 0)
83399 return s->units;
83400@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
83401 /*
83402 * Return the next free slob block pointer after this one.
83403 */
83404-static slob_t *slob_next(slob_t *s)
83405+static slob_t *slob_next(const slob_t *s)
83406 {
83407 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
83408 slobidx_t next;
83409@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
83410 /*
83411 * Returns true if s is the last free block in its page.
83412 */
83413-static int slob_last(slob_t *s)
83414+static int slob_last(const slob_t *s)
83415 {
83416 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
83417 }
83418
83419-static void *slob_new_pages(gfp_t gfp, int order, int node)
83420+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
83421 {
83422- void *page;
83423+ struct page *page;
83424
83425 #ifdef CONFIG_NUMA
83426 if (node != NUMA_NO_NODE)
83427@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
83428 if (!page)
83429 return NULL;
83430
83431- return page_address(page);
83432+ __SetPageSlab(page);
83433+ return page;
83434 }
83435
83436-static void slob_free_pages(void *b, int order)
83437+static void slob_free_pages(struct page *sp, int order)
83438 {
83439 if (current->reclaim_state)
83440 current->reclaim_state->reclaimed_slab += 1 << order;
83441- free_pages((unsigned long)b, order);
83442+ __ClearPageSlab(sp);
83443+ reset_page_mapcount(sp);
83444+ sp->private = 0;
83445+ __free_pages(sp, order);
83446 }
83447
83448 /*
83449@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
83450
83451 /* Not enough space: must allocate a new page */
83452 if (!b) {
83453- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
83454- if (!b)
83455+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
83456+ if (!sp)
83457 return NULL;
83458- sp = virt_to_page(b);
83459- __SetPageSlab(sp);
83460+ b = page_address(sp);
83461
83462 spin_lock_irqsave(&slob_lock, flags);
83463 sp->units = SLOB_UNITS(PAGE_SIZE);
83464 sp->freelist = b;
83465+ sp->private = 0;
83466 INIT_LIST_HEAD(&sp->list);
83467 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
83468 set_slob_page_free(sp, slob_list);
83469@@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
83470 if (slob_page_free(sp))
83471 clear_slob_page_free(sp);
83472 spin_unlock_irqrestore(&slob_lock, flags);
83473- __ClearPageSlab(sp);
83474- reset_page_mapcount(sp);
83475- slob_free_pages(b, 0);
83476+ slob_free_pages(sp, 0);
83477 return;
83478 }
83479
83480@@ -424,11 +426,10 @@ out:
83481 */
83482
83483 static __always_inline void *
83484-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
83485+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
83486 {
83487- unsigned int *m;
83488- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
83489- void *ret;
83490+ slob_t *m;
83491+ void *ret = NULL;
83492
83493 gfp &= gfp_allowed_mask;
83494
83495@@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
83496
83497 if (!m)
83498 return NULL;
83499- *m = size;
83500+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
83501+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
83502+ m[0].units = size;
83503+ m[1].units = align;
83504 ret = (void *)m + align;
83505
83506 trace_kmalloc_node(caller, ret,
83507 size, size + align, gfp, node);
83508 } else {
83509 unsigned int order = get_order(size);
83510+ struct page *page;
83511
83512 if (likely(order))
83513 gfp |= __GFP_COMP;
83514- ret = slob_new_pages(gfp, order, node);
83515+ page = slob_new_pages(gfp, order, node);
83516+ if (page) {
83517+ ret = page_address(page);
83518+ page->private = size;
83519+ }
83520
83521 trace_kmalloc_node(caller, ret,
83522 size, PAGE_SIZE << order, gfp, node);
83523 }
83524
83525- kmemleak_alloc(ret, size, 1, gfp);
83526+ return ret;
83527+}
83528+
83529+static __always_inline void *
83530+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
83531+{
83532+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
83533+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
83534+
83535+ if (!ZERO_OR_NULL_PTR(ret))
83536+ kmemleak_alloc(ret, size, 1, gfp);
83537 return ret;
83538 }
83539
83540@@ -494,33 +513,110 @@ void kfree(const void *block)
83541 kmemleak_free(block);
83542
83543 sp = virt_to_page(block);
83544- if (PageSlab(sp)) {
83545+ VM_BUG_ON(!PageSlab(sp));
83546+ if (!sp->private) {
83547 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
83548- unsigned int *m = (unsigned int *)(block - align);
83549- slob_free(m, *m + align);
83550- } else
83551+ slob_t *m = (slob_t *)(block - align);
83552+ slob_free(m, m[0].units + align);
83553+ } else {
83554+ __ClearPageSlab(sp);
83555+ reset_page_mapcount(sp);
83556+ sp->private = 0;
83557 __free_pages(sp, compound_order(sp));
83558+ }
83559 }
83560 EXPORT_SYMBOL(kfree);
83561
83562+bool is_usercopy_object(const void *ptr)
83563+{
83564+ if (!slab_is_available())
83565+ return false;
83566+
83567+ // PAX: TODO
83568+
83569+ return false;
83570+}
83571+
83572+#ifdef CONFIG_PAX_USERCOPY
83573+const char *check_heap_object(const void *ptr, unsigned long n)
83574+{
83575+ struct page *page;
83576+ const slob_t *free;
83577+ const void *base;
83578+ unsigned long flags;
83579+
83580+ if (ZERO_OR_NULL_PTR(ptr))
83581+ return "<null>";
83582+
83583+ if (!virt_addr_valid(ptr))
83584+ return NULL;
83585+
83586+ page = virt_to_head_page(ptr);
83587+ if (!PageSlab(page))
83588+ return NULL;
83589+
83590+ if (page->private) {
83591+ base = page;
83592+ if (base <= ptr && n <= page->private - (ptr - base))
83593+ return NULL;
83594+ return "<slob>";
83595+ }
83596+
83597+ /* some tricky double walking to find the chunk */
83598+ spin_lock_irqsave(&slob_lock, flags);
83599+ base = (void *)((unsigned long)ptr & PAGE_MASK);
83600+ free = page->freelist;
83601+
83602+ while (!slob_last(free) && (void *)free <= ptr) {
83603+ base = free + slob_units(free);
83604+ free = slob_next(free);
83605+ }
83606+
83607+ while (base < (void *)free) {
83608+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
83609+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
83610+ int offset;
83611+
83612+ if (ptr < base + align)
83613+ break;
83614+
83615+ offset = ptr - base - align;
83616+ if (offset >= m) {
83617+ base += size;
83618+ continue;
83619+ }
83620+
83621+ if (n > m - offset)
83622+ break;
83623+
83624+ spin_unlock_irqrestore(&slob_lock, flags);
83625+ return NULL;
83626+ }
83627+
83628+ spin_unlock_irqrestore(&slob_lock, flags);
83629+ return "<slob>";
83630+}
83631+#endif
83632+
83633 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
83634 size_t ksize(const void *block)
83635 {
83636 struct page *sp;
83637 int align;
83638- unsigned int *m;
83639+ slob_t *m;
83640
83641 BUG_ON(!block);
83642 if (unlikely(block == ZERO_SIZE_PTR))
83643 return 0;
83644
83645 sp = virt_to_page(block);
83646- if (unlikely(!PageSlab(sp)))
83647- return PAGE_SIZE << compound_order(sp);
83648+ VM_BUG_ON(!PageSlab(sp));
83649+ if (sp->private)
83650+ return sp->private;
83651
83652 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
83653- m = (unsigned int *)(block - align);
83654- return SLOB_UNITS(*m) * SLOB_UNIT;
83655+ m = (slob_t *)(block - align);
83656+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
83657 }
83658 EXPORT_SYMBOL(ksize);
83659
83660@@ -536,23 +632,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
83661
83662 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
83663 {
83664- void *b;
83665+ void *b = NULL;
83666
83667 flags &= gfp_allowed_mask;
83668
83669 lockdep_trace_alloc(flags);
83670
83671+#ifdef CONFIG_PAX_USERCOPY_SLABS
83672+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
83673+#else
83674 if (c->size < PAGE_SIZE) {
83675 b = slob_alloc(c->size, flags, c->align, node);
83676 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
83677 SLOB_UNITS(c->size) * SLOB_UNIT,
83678 flags, node);
83679 } else {
83680- b = slob_new_pages(flags, get_order(c->size), node);
83681+ struct page *sp;
83682+
83683+ sp = slob_new_pages(flags, get_order(c->size), node);
83684+ if (sp) {
83685+ b = page_address(sp);
83686+ sp->private = c->size;
83687+ }
83688 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
83689 PAGE_SIZE << get_order(c->size),
83690 flags, node);
83691 }
83692+#endif
83693
83694 if (c->ctor)
83695 c->ctor(b);
83696@@ -564,10 +670,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
83697
83698 static void __kmem_cache_free(void *b, int size)
83699 {
83700- if (size < PAGE_SIZE)
83701+ struct page *sp;
83702+
83703+ sp = virt_to_page(b);
83704+ BUG_ON(!PageSlab(sp));
83705+ if (!sp->private)
83706 slob_free(b, size);
83707 else
83708- slob_free_pages(b, get_order(size));
83709+ slob_free_pages(sp, get_order(size));
83710 }
83711
83712 static void kmem_rcu_free(struct rcu_head *head)
83713@@ -580,17 +690,31 @@ static void kmem_rcu_free(struct rcu_head *head)
83714
83715 void kmem_cache_free(struct kmem_cache *c, void *b)
83716 {
83717+ int size = c->size;
83718+
83719+#ifdef CONFIG_PAX_USERCOPY_SLABS
83720+ if (size + c->align < PAGE_SIZE) {
83721+ size += c->align;
83722+ b -= c->align;
83723+ }
83724+#endif
83725+
83726 kmemleak_free_recursive(b, c->flags);
83727 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
83728 struct slob_rcu *slob_rcu;
83729- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
83730- slob_rcu->size = c->size;
83731+ slob_rcu = b + (size - sizeof(struct slob_rcu));
83732+ slob_rcu->size = size;
83733 call_rcu(&slob_rcu->head, kmem_rcu_free);
83734 } else {
83735- __kmem_cache_free(b, c->size);
83736+ __kmem_cache_free(b, size);
83737 }
83738
83739+#ifdef CONFIG_PAX_USERCOPY_SLABS
83740+ trace_kfree(_RET_IP_, b);
83741+#else
83742 trace_kmem_cache_free(_RET_IP_, b);
83743+#endif
83744+
83745 }
83746 EXPORT_SYMBOL(kmem_cache_free);
83747
83748diff --git a/mm/slub.c b/mm/slub.c
83749index ba2ca53..00b1f4e 100644
83750--- a/mm/slub.c
83751+++ b/mm/slub.c
83752@@ -197,7 +197,7 @@ struct track {
83753
83754 enum track_item { TRACK_ALLOC, TRACK_FREE };
83755
83756-#ifdef CONFIG_SYSFS
83757+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83758 static int sysfs_slab_add(struct kmem_cache *);
83759 static int sysfs_slab_alias(struct kmem_cache *, const char *);
83760 static void sysfs_slab_remove(struct kmem_cache *);
83761@@ -518,7 +518,7 @@ static void print_track(const char *s, struct track *t)
83762 if (!t->addr)
83763 return;
83764
83765- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
83766+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
83767 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
83768 #ifdef CONFIG_STACKTRACE
83769 {
83770@@ -2653,7 +2653,7 @@ static int slub_min_objects;
83771 * Merge control. If this is set then no merging of slab caches will occur.
83772 * (Could be removed. This was introduced to pacify the merge skeptics.)
83773 */
83774-static int slub_nomerge;
83775+static int slub_nomerge = 1;
83776
83777 /*
83778 * Calculate the order of allocation given an slab object size.
83779@@ -3181,6 +3181,10 @@ EXPORT_SYMBOL(kmalloc_caches);
83780 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
83781 #endif
83782
83783+#ifdef CONFIG_PAX_USERCOPY_SLABS
83784+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
83785+#endif
83786+
83787 static int __init setup_slub_min_order(char *str)
83788 {
83789 get_option(&str, &slub_min_order);
83790@@ -3272,6 +3276,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
83791 return kmalloc_dma_caches[index];
83792
83793 #endif
83794+
83795+#ifdef CONFIG_PAX_USERCOPY_SLABS
83796+ if (flags & SLAB_USERCOPY)
83797+ return kmalloc_usercopy_caches[index];
83798+
83799+#endif
83800+
83801 return kmalloc_caches[index];
83802 }
83803
83804@@ -3340,6 +3351,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
83805 EXPORT_SYMBOL(__kmalloc_node);
83806 #endif
83807
83808+bool is_usercopy_object(const void *ptr)
83809+{
83810+ struct page *page;
83811+ struct kmem_cache *s;
83812+
83813+ if (ZERO_OR_NULL_PTR(ptr))
83814+ return false;
83815+
83816+ if (!slab_is_available())
83817+ return false;
83818+
83819+ if (!virt_addr_valid(ptr))
83820+ return false;
83821+
83822+ page = virt_to_head_page(ptr);
83823+
83824+ if (!PageSlab(page))
83825+ return false;
83826+
83827+ s = page->slab_cache;
83828+ return s->flags & SLAB_USERCOPY;
83829+}
83830+
83831+#ifdef CONFIG_PAX_USERCOPY
83832+const char *check_heap_object(const void *ptr, unsigned long n)
83833+{
83834+ struct page *page;
83835+ struct kmem_cache *s;
83836+ unsigned long offset;
83837+
83838+ if (ZERO_OR_NULL_PTR(ptr))
83839+ return "<null>";
83840+
83841+ if (!virt_addr_valid(ptr))
83842+ return NULL;
83843+
83844+ page = virt_to_head_page(ptr);
83845+
83846+ if (!PageSlab(page))
83847+ return NULL;
83848+
83849+ s = page->slab_cache;
83850+ if (!(s->flags & SLAB_USERCOPY))
83851+ return s->name;
83852+
83853+ offset = (ptr - page_address(page)) % s->size;
83854+ if (offset <= s->object_size && n <= s->object_size - offset)
83855+ return NULL;
83856+
83857+ return s->name;
83858+}
83859+#endif
83860+
83861 size_t ksize(const void *object)
83862 {
83863 struct page *page;
83864@@ -3712,17 +3776,17 @@ void __init kmem_cache_init(void)
83865
83866 /* Caches that are not of the two-to-the-power-of size */
83867 if (KMALLOC_MIN_SIZE <= 32) {
83868- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
83869+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
83870 caches++;
83871 }
83872
83873 if (KMALLOC_MIN_SIZE <= 64) {
83874- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
83875+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
83876 caches++;
83877 }
83878
83879 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
83880- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
83881+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
83882 caches++;
83883 }
83884
83885@@ -3764,6 +3828,22 @@ void __init kmem_cache_init(void)
83886 }
83887 }
83888 #endif
83889+
83890+#ifdef CONFIG_PAX_USERCOPY_SLABS
83891+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
83892+ struct kmem_cache *s = kmalloc_caches[i];
83893+
83894+ if (s && s->size) {
83895+ char *name = kasprintf(GFP_NOWAIT,
83896+ "usercopy-kmalloc-%d", s->object_size);
83897+
83898+ BUG_ON(!name);
83899+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
83900+ s->object_size, SLAB_USERCOPY);
83901+ }
83902+ }
83903+#endif
83904+
83905 printk(KERN_INFO
83906 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
83907 " CPUs=%d, Nodes=%d\n",
83908@@ -3790,7 +3870,7 @@ static int slab_unmergeable(struct kmem_cache *s)
83909 /*
83910 * We may have set a slab to be unmergeable during bootstrap.
83911 */
83912- if (s->refcount < 0)
83913+ if (atomic_read(&s->refcount) < 0)
83914 return 1;
83915
83916 return 0;
83917@@ -3848,7 +3928,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
83918
83919 s = find_mergeable(memcg, size, align, flags, name, ctor);
83920 if (s) {
83921- s->refcount++;
83922+ atomic_inc(&s->refcount);
83923 /*
83924 * Adjust the object sizes so that we clear
83925 * the complete object on kzalloc.
83926@@ -3857,7 +3937,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
83927 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
83928
83929 if (sysfs_slab_alias(s, name)) {
83930- s->refcount--;
83931+ atomic_dec(&s->refcount);
83932 s = NULL;
83933 }
83934 }
83935@@ -3919,7 +3999,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
83936 return NOTIFY_OK;
83937 }
83938
83939-static struct notifier_block __cpuinitdata slab_notifier = {
83940+static struct notifier_block slab_notifier = {
83941 .notifier_call = slab_cpuup_callback
83942 };
83943
83944@@ -3977,7 +4057,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
83945 }
83946 #endif
83947
83948-#ifdef CONFIG_SYSFS
83949+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83950 static int count_inuse(struct page *page)
83951 {
83952 return page->inuse;
83953@@ -4364,12 +4444,12 @@ static void resiliency_test(void)
83954 validate_slab_cache(kmalloc_caches[9]);
83955 }
83956 #else
83957-#ifdef CONFIG_SYSFS
83958+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83959 static void resiliency_test(void) {};
83960 #endif
83961 #endif
83962
83963-#ifdef CONFIG_SYSFS
83964+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83965 enum slab_stat_type {
83966 SL_ALL, /* All slabs */
83967 SL_PARTIAL, /* Only partially allocated slabs */
83968@@ -4613,7 +4693,7 @@ SLAB_ATTR_RO(ctor);
83969
83970 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
83971 {
83972- return sprintf(buf, "%d\n", s->refcount - 1);
83973+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
83974 }
83975 SLAB_ATTR_RO(aliases);
83976
83977@@ -5266,6 +5346,7 @@ static char *create_unique_id(struct kmem_cache *s)
83978 return name;
83979 }
83980
83981+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83982 static int sysfs_slab_add(struct kmem_cache *s)
83983 {
83984 int err;
83985@@ -5323,6 +5404,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
83986 kobject_del(&s->kobj);
83987 kobject_put(&s->kobj);
83988 }
83989+#endif
83990
83991 /*
83992 * Need to buffer aliases during bootup until sysfs becomes
83993@@ -5336,6 +5418,7 @@ struct saved_alias {
83994
83995 static struct saved_alias *alias_list;
83996
83997+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83998 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
83999 {
84000 struct saved_alias *al;
84001@@ -5358,6 +5441,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
84002 alias_list = al;
84003 return 0;
84004 }
84005+#endif
84006
84007 static int __init slab_sysfs_init(void)
84008 {
84009diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
84010index 1b7e22a..3fcd4f3 100644
84011--- a/mm/sparse-vmemmap.c
84012+++ b/mm/sparse-vmemmap.c
84013@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
84014 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
84015 if (!p)
84016 return NULL;
84017- pud_populate(&init_mm, pud, p);
84018+ pud_populate_kernel(&init_mm, pud, p);
84019 }
84020 return pud;
84021 }
84022@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
84023 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
84024 if (!p)
84025 return NULL;
84026- pgd_populate(&init_mm, pgd, p);
84027+ pgd_populate_kernel(&init_mm, pgd, p);
84028 }
84029 return pgd;
84030 }
84031diff --git a/mm/sparse.c b/mm/sparse.c
84032index 6b5fb76..db0c190 100644
84033--- a/mm/sparse.c
84034+++ b/mm/sparse.c
84035@@ -782,7 +782,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
84036
84037 for (i = 0; i < PAGES_PER_SECTION; i++) {
84038 if (PageHWPoison(&memmap[i])) {
84039- atomic_long_sub(1, &mce_bad_pages);
84040+ atomic_long_sub_unchecked(1, &mce_bad_pages);
84041 ClearPageHWPoison(&memmap[i]);
84042 }
84043 }
84044diff --git a/mm/swap.c b/mm/swap.c
84045index 6310dc2..3662b3f 100644
84046--- a/mm/swap.c
84047+++ b/mm/swap.c
84048@@ -30,6 +30,7 @@
84049 #include <linux/backing-dev.h>
84050 #include <linux/memcontrol.h>
84051 #include <linux/gfp.h>
84052+#include <linux/hugetlb.h>
84053
84054 #include "internal.h"
84055
84056@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
84057
84058 __page_cache_release(page);
84059 dtor = get_compound_page_dtor(page);
84060+ if (!PageHuge(page))
84061+ BUG_ON(dtor != free_compound_page);
84062 (*dtor)(page);
84063 }
84064
84065diff --git a/mm/swapfile.c b/mm/swapfile.c
84066index e97a0e5..b50e796 100644
84067--- a/mm/swapfile.c
84068+++ b/mm/swapfile.c
84069@@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
84070
84071 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
84072 /* Activity counter to indicate that a swapon or swapoff has occurred */
84073-static atomic_t proc_poll_event = ATOMIC_INIT(0);
84074+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
84075
84076 static inline unsigned char swap_count(unsigned char ent)
84077 {
84078@@ -1608,7 +1608,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
84079 }
84080 filp_close(swap_file, NULL);
84081 err = 0;
84082- atomic_inc(&proc_poll_event);
84083+ atomic_inc_unchecked(&proc_poll_event);
84084 wake_up_interruptible(&proc_poll_wait);
84085
84086 out_dput:
84087@@ -1625,8 +1625,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
84088
84089 poll_wait(file, &proc_poll_wait, wait);
84090
84091- if (seq->poll_event != atomic_read(&proc_poll_event)) {
84092- seq->poll_event = atomic_read(&proc_poll_event);
84093+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
84094+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
84095 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
84096 }
84097
84098@@ -1724,7 +1724,7 @@ static int swaps_open(struct inode *inode, struct file *file)
84099 return ret;
84100
84101 seq = file->private_data;
84102- seq->poll_event = atomic_read(&proc_poll_event);
84103+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
84104 return 0;
84105 }
84106
84107@@ -2066,7 +2066,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
84108 (frontswap_map) ? "FS" : "");
84109
84110 mutex_unlock(&swapon_mutex);
84111- atomic_inc(&proc_poll_event);
84112+ atomic_inc_unchecked(&proc_poll_event);
84113 wake_up_interruptible(&proc_poll_wait);
84114
84115 if (S_ISREG(inode->i_mode))
84116diff --git a/mm/util.c b/mm/util.c
84117index c55e26b..3f913a9 100644
84118--- a/mm/util.c
84119+++ b/mm/util.c
84120@@ -292,6 +292,12 @@ done:
84121 void arch_pick_mmap_layout(struct mm_struct *mm)
84122 {
84123 mm->mmap_base = TASK_UNMAPPED_BASE;
84124+
84125+#ifdef CONFIG_PAX_RANDMMAP
84126+ if (mm->pax_flags & MF_PAX_RANDMMAP)
84127+ mm->mmap_base += mm->delta_mmap;
84128+#endif
84129+
84130 mm->get_unmapped_area = arch_get_unmapped_area;
84131 mm->unmap_area = arch_unmap_area;
84132 }
84133diff --git a/mm/vmalloc.c b/mm/vmalloc.c
84134index 5123a16..f234a48 100644
84135--- a/mm/vmalloc.c
84136+++ b/mm/vmalloc.c
84137@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
84138
84139 pte = pte_offset_kernel(pmd, addr);
84140 do {
84141- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
84142- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
84143+
84144+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84145+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
84146+ BUG_ON(!pte_exec(*pte));
84147+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
84148+ continue;
84149+ }
84150+#endif
84151+
84152+ {
84153+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
84154+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
84155+ }
84156 } while (pte++, addr += PAGE_SIZE, addr != end);
84157 }
84158
84159@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
84160 pte = pte_alloc_kernel(pmd, addr);
84161 if (!pte)
84162 return -ENOMEM;
84163+
84164+ pax_open_kernel();
84165 do {
84166 struct page *page = pages[*nr];
84167
84168- if (WARN_ON(!pte_none(*pte)))
84169+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84170+ if (pgprot_val(prot) & _PAGE_NX)
84171+#endif
84172+
84173+ if (!pte_none(*pte)) {
84174+ pax_close_kernel();
84175+ WARN_ON(1);
84176 return -EBUSY;
84177- if (WARN_ON(!page))
84178+ }
84179+ if (!page) {
84180+ pax_close_kernel();
84181+ WARN_ON(1);
84182 return -ENOMEM;
84183+ }
84184 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
84185 (*nr)++;
84186 } while (pte++, addr += PAGE_SIZE, addr != end);
84187+ pax_close_kernel();
84188 return 0;
84189 }
84190
84191@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
84192 pmd_t *pmd;
84193 unsigned long next;
84194
84195- pmd = pmd_alloc(&init_mm, pud, addr);
84196+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
84197 if (!pmd)
84198 return -ENOMEM;
84199 do {
84200@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
84201 pud_t *pud;
84202 unsigned long next;
84203
84204- pud = pud_alloc(&init_mm, pgd, addr);
84205+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
84206 if (!pud)
84207 return -ENOMEM;
84208 do {
84209@@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
84210 * and fall back on vmalloc() if that fails. Others
84211 * just put it in the vmalloc space.
84212 */
84213-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
84214+#ifdef CONFIG_MODULES
84215+#ifdef MODULES_VADDR
84216 unsigned long addr = (unsigned long)x;
84217 if (addr >= MODULES_VADDR && addr < MODULES_END)
84218 return 1;
84219 #endif
84220+
84221+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84222+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
84223+ return 1;
84224+#endif
84225+
84226+#endif
84227+
84228 return is_vmalloc_addr(x);
84229 }
84230
84231@@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
84232
84233 if (!pgd_none(*pgd)) {
84234 pud_t *pud = pud_offset(pgd, addr);
84235+#ifdef CONFIG_X86
84236+ if (!pud_large(*pud))
84237+#endif
84238 if (!pud_none(*pud)) {
84239 pmd_t *pmd = pmd_offset(pud, addr);
84240+#ifdef CONFIG_X86
84241+ if (!pmd_large(*pmd))
84242+#endif
84243 if (!pmd_none(*pmd)) {
84244 pte_t *ptep, pte;
84245
84246@@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
84247 * Allocate a region of KVA of the specified size and alignment, within the
84248 * vstart and vend.
84249 */
84250-static struct vmap_area *alloc_vmap_area(unsigned long size,
84251+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
84252 unsigned long align,
84253 unsigned long vstart, unsigned long vend,
84254 int node, gfp_t gfp_mask)
84255@@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
84256 struct vm_struct *area;
84257
84258 BUG_ON(in_interrupt());
84259+
84260+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84261+ if (flags & VM_KERNEXEC) {
84262+ if (start != VMALLOC_START || end != VMALLOC_END)
84263+ return NULL;
84264+ start = (unsigned long)MODULES_EXEC_VADDR;
84265+ end = (unsigned long)MODULES_EXEC_END;
84266+ }
84267+#endif
84268+
84269 if (flags & VM_IOREMAP) {
84270 int bit = fls(size);
84271
84272@@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
84273 if (count > totalram_pages)
84274 return NULL;
84275
84276+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84277+ if (!(pgprot_val(prot) & _PAGE_NX))
84278+ flags |= VM_KERNEXEC;
84279+#endif
84280+
84281 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
84282 __builtin_return_address(0));
84283 if (!area)
84284@@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
84285 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
84286 goto fail;
84287
84288+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84289+ if (!(pgprot_val(prot) & _PAGE_NX))
84290+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
84291+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
84292+ else
84293+#endif
84294+
84295 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
84296 start, end, node, gfp_mask, caller);
84297 if (!area)
84298@@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
84299 * For tight control over page level allocator and protection flags
84300 * use __vmalloc() instead.
84301 */
84302-
84303 void *vmalloc_exec(unsigned long size)
84304 {
84305- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
84306+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
84307 -1, __builtin_return_address(0));
84308 }
84309
84310@@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
84311 unsigned long uaddr = vma->vm_start;
84312 unsigned long usize = vma->vm_end - vma->vm_start;
84313
84314+ BUG_ON(vma->vm_mirror);
84315+
84316 if ((PAGE_SIZE-1) & (unsigned long)addr)
84317 return -EINVAL;
84318
84319@@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
84320 v->addr, v->addr + v->size, v->size);
84321
84322 if (v->caller)
84323+#ifdef CONFIG_GRKERNSEC_HIDESYM
84324+ seq_printf(m, " %pK", v->caller);
84325+#else
84326 seq_printf(m, " %pS", v->caller);
84327+#endif
84328
84329 if (v->nr_pages)
84330 seq_printf(m, " pages=%d", v->nr_pages);
84331diff --git a/mm/vmstat.c b/mm/vmstat.c
84332index 9800306..76b4b27 100644
84333--- a/mm/vmstat.c
84334+++ b/mm/vmstat.c
84335@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
84336 *
84337 * vm_stat contains the global counters
84338 */
84339-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
84340+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
84341 EXPORT_SYMBOL(vm_stat);
84342
84343 #ifdef CONFIG_SMP
84344@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
84345 v = p->vm_stat_diff[i];
84346 p->vm_stat_diff[i] = 0;
84347 local_irq_restore(flags);
84348- atomic_long_add(v, &zone->vm_stat[i]);
84349+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
84350 global_diff[i] += v;
84351 #ifdef CONFIG_NUMA
84352 /* 3 seconds idle till flush */
84353@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
84354
84355 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
84356 if (global_diff[i])
84357- atomic_long_add(global_diff[i], &vm_stat[i]);
84358+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
84359 }
84360
84361 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
84362@@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
84363 if (pset->vm_stat_diff[i]) {
84364 int v = pset->vm_stat_diff[i];
84365 pset->vm_stat_diff[i] = 0;
84366- atomic_long_add(v, &zone->vm_stat[i]);
84367- atomic_long_add(v, &vm_stat[i]);
84368+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
84369+ atomic_long_add_unchecked(v, &vm_stat[i]);
84370 }
84371 }
84372 #endif
84373@@ -1223,7 +1223,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
84374 return NOTIFY_OK;
84375 }
84376
84377-static struct notifier_block __cpuinitdata vmstat_notifier =
84378+static struct notifier_block vmstat_notifier =
84379 { &vmstat_cpuup_callback, NULL, 0 };
84380 #endif
84381
84382@@ -1238,10 +1238,20 @@ static int __init setup_vmstat(void)
84383 start_cpu_timer(cpu);
84384 #endif
84385 #ifdef CONFIG_PROC_FS
84386- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
84387- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
84388- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
84389- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
84390+ {
84391+ mode_t gr_mode = S_IRUGO;
84392+#ifdef CONFIG_GRKERNSEC_PROC_ADD
84393+ gr_mode = S_IRUSR;
84394+#endif
84395+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
84396+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
84397+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
84398+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
84399+#else
84400+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
84401+#endif
84402+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
84403+ }
84404 #endif
84405 return 0;
84406 }
84407diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
84408index a292e80..785ee68 100644
84409--- a/net/8021q/vlan.c
84410+++ b/net/8021q/vlan.c
84411@@ -485,7 +485,7 @@ out:
84412 return NOTIFY_DONE;
84413 }
84414
84415-static struct notifier_block vlan_notifier_block __read_mostly = {
84416+static struct notifier_block vlan_notifier_block = {
84417 .notifier_call = vlan_device_event,
84418 };
84419
84420@@ -560,8 +560,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
84421 err = -EPERM;
84422 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
84423 break;
84424- if ((args.u.name_type >= 0) &&
84425- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
84426+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
84427 struct vlan_net *vn;
84428
84429 vn = net_generic(net, vlan_net_id);
84430diff --git a/net/9p/mod.c b/net/9p/mod.c
84431index 6ab36ae..6f1841b 100644
84432--- a/net/9p/mod.c
84433+++ b/net/9p/mod.c
84434@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
84435 void v9fs_register_trans(struct p9_trans_module *m)
84436 {
84437 spin_lock(&v9fs_trans_lock);
84438- list_add_tail(&m->list, &v9fs_trans_list);
84439+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
84440 spin_unlock(&v9fs_trans_lock);
84441 }
84442 EXPORT_SYMBOL(v9fs_register_trans);
84443@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
84444 void v9fs_unregister_trans(struct p9_trans_module *m)
84445 {
84446 spin_lock(&v9fs_trans_lock);
84447- list_del_init(&m->list);
84448+ pax_list_del_init((struct list_head *)&m->list);
84449 spin_unlock(&v9fs_trans_lock);
84450 }
84451 EXPORT_SYMBOL(v9fs_unregister_trans);
84452diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
84453index 02efb25..41541a9 100644
84454--- a/net/9p/trans_fd.c
84455+++ b/net/9p/trans_fd.c
84456@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
84457 oldfs = get_fs();
84458 set_fs(get_ds());
84459 /* The cast to a user pointer is valid due to the set_fs() */
84460- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
84461+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
84462 set_fs(oldfs);
84463
84464 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
84465diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
84466index 876fbe8..8bbea9f 100644
84467--- a/net/atm/atm_misc.c
84468+++ b/net/atm/atm_misc.c
84469@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
84470 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
84471 return 1;
84472 atm_return(vcc, truesize);
84473- atomic_inc(&vcc->stats->rx_drop);
84474+ atomic_inc_unchecked(&vcc->stats->rx_drop);
84475 return 0;
84476 }
84477 EXPORT_SYMBOL(atm_charge);
84478@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
84479 }
84480 }
84481 atm_return(vcc, guess);
84482- atomic_inc(&vcc->stats->rx_drop);
84483+ atomic_inc_unchecked(&vcc->stats->rx_drop);
84484 return NULL;
84485 }
84486 EXPORT_SYMBOL(atm_alloc_charge);
84487@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
84488
84489 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
84490 {
84491-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
84492+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
84493 __SONET_ITEMS
84494 #undef __HANDLE_ITEM
84495 }
84496@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
84497
84498 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
84499 {
84500-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
84501+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
84502 __SONET_ITEMS
84503 #undef __HANDLE_ITEM
84504 }
84505diff --git a/net/atm/lec.h b/net/atm/lec.h
84506index a86aff9..3a0d6f6 100644
84507--- a/net/atm/lec.h
84508+++ b/net/atm/lec.h
84509@@ -48,7 +48,7 @@ struct lane2_ops {
84510 const u8 *tlvs, u32 sizeoftlvs);
84511 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
84512 const u8 *tlvs, u32 sizeoftlvs);
84513-};
84514+} __no_const;
84515
84516 /*
84517 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
84518diff --git a/net/atm/proc.c b/net/atm/proc.c
84519index 0d020de..011c7bb 100644
84520--- a/net/atm/proc.c
84521+++ b/net/atm/proc.c
84522@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
84523 const struct k_atm_aal_stats *stats)
84524 {
84525 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
84526- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
84527- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
84528- atomic_read(&stats->rx_drop));
84529+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
84530+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
84531+ atomic_read_unchecked(&stats->rx_drop));
84532 }
84533
84534 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
84535diff --git a/net/atm/resources.c b/net/atm/resources.c
84536index 0447d5d..3cf4728 100644
84537--- a/net/atm/resources.c
84538+++ b/net/atm/resources.c
84539@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
84540 static void copy_aal_stats(struct k_atm_aal_stats *from,
84541 struct atm_aal_stats *to)
84542 {
84543-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
84544+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
84545 __AAL_STAT_ITEMS
84546 #undef __HANDLE_ITEM
84547 }
84548@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
84549 static void subtract_aal_stats(struct k_atm_aal_stats *from,
84550 struct atm_aal_stats *to)
84551 {
84552-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
84553+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
84554 __AAL_STAT_ITEMS
84555 #undef __HANDLE_ITEM
84556 }
84557diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
84558index d5744b7..506bae3 100644
84559--- a/net/ax25/sysctl_net_ax25.c
84560+++ b/net/ax25/sysctl_net_ax25.c
84561@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
84562 {
84563 char path[sizeof("net/ax25/") + IFNAMSIZ];
84564 int k;
84565- struct ctl_table *table;
84566+ ctl_table_no_const *table;
84567
84568 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
84569 if (!table)
84570diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
84571index 7d02ebd..4d4cc01 100644
84572--- a/net/batman-adv/bat_iv_ogm.c
84573+++ b/net/batman-adv/bat_iv_ogm.c
84574@@ -63,7 +63,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
84575
84576 /* randomize initial seqno to avoid collision */
84577 get_random_bytes(&random_seqno, sizeof(random_seqno));
84578- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
84579+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
84580
84581 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
84582 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
84583@@ -615,9 +615,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
84584 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
84585
84586 /* change sequence number to network order */
84587- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
84588+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
84589 batadv_ogm_packet->seqno = htonl(seqno);
84590- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
84591+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
84592
84593 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
84594 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
84595@@ -1022,7 +1022,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
84596 return;
84597
84598 /* could be changed by schedule_own_packet() */
84599- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
84600+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
84601
84602 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
84603 has_directlink_flag = 1;
84604diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
84605index f1d37cd..4190879 100644
84606--- a/net/batman-adv/hard-interface.c
84607+++ b/net/batman-adv/hard-interface.c
84608@@ -370,7 +370,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
84609 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
84610 dev_add_pack(&hard_iface->batman_adv_ptype);
84611
84612- atomic_set(&hard_iface->frag_seqno, 1);
84613+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
84614 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
84615 hard_iface->net_dev->name);
84616
84617@@ -493,7 +493,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
84618 /* This can't be called via a bat_priv callback because
84619 * we have no bat_priv yet.
84620 */
84621- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
84622+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
84623 hard_iface->bat_iv.ogm_buff = NULL;
84624
84625 return hard_iface;
84626diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
84627index 6b548fd..fc32c8d 100644
84628--- a/net/batman-adv/soft-interface.c
84629+++ b/net/batman-adv/soft-interface.c
84630@@ -252,7 +252,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
84631 primary_if->net_dev->dev_addr, ETH_ALEN);
84632
84633 /* set broadcast sequence number */
84634- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
84635+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
84636 bcast_packet->seqno = htonl(seqno);
84637
84638 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
84639@@ -497,7 +497,7 @@ struct net_device *batadv_softif_create(const char *name)
84640 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
84641
84642 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
84643- atomic_set(&bat_priv->bcast_seqno, 1);
84644+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
84645 atomic_set(&bat_priv->tt.vn, 0);
84646 atomic_set(&bat_priv->tt.local_changes, 0);
84647 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
84648diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
84649index ae9ac9a..11e0fe7 100644
84650--- a/net/batman-adv/types.h
84651+++ b/net/batman-adv/types.h
84652@@ -48,7 +48,7 @@
84653 struct batadv_hard_iface_bat_iv {
84654 unsigned char *ogm_buff;
84655 int ogm_buff_len;
84656- atomic_t ogm_seqno;
84657+ atomic_unchecked_t ogm_seqno;
84658 };
84659
84660 struct batadv_hard_iface {
84661@@ -56,7 +56,7 @@ struct batadv_hard_iface {
84662 int16_t if_num;
84663 char if_status;
84664 struct net_device *net_dev;
84665- atomic_t frag_seqno;
84666+ atomic_unchecked_t frag_seqno;
84667 struct kobject *hardif_obj;
84668 atomic_t refcount;
84669 struct packet_type batman_adv_ptype;
84670@@ -284,7 +284,7 @@ struct batadv_priv {
84671 atomic_t orig_interval; /* uint */
84672 atomic_t hop_penalty; /* uint */
84673 atomic_t log_level; /* uint */
84674- atomic_t bcast_seqno;
84675+ atomic_unchecked_t bcast_seqno;
84676 atomic_t bcast_queue_left;
84677 atomic_t batman_queue_left;
84678 char num_ifaces;
84679diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
84680index 10aff49..ea8e021 100644
84681--- a/net/batman-adv/unicast.c
84682+++ b/net/batman-adv/unicast.c
84683@@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
84684 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
84685 frag2->flags = large_tail;
84686
84687- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
84688+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
84689 frag1->seqno = htons(seqno - 1);
84690 frag2->seqno = htons(seqno);
84691
84692diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
84693index 07f0739..3c42e34 100644
84694--- a/net/bluetooth/hci_sock.c
84695+++ b/net/bluetooth/hci_sock.c
84696@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
84697 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
84698 }
84699
84700- len = min_t(unsigned int, len, sizeof(uf));
84701+ len = min((size_t)len, sizeof(uf));
84702 if (copy_from_user(&uf, optval, len)) {
84703 err = -EFAULT;
84704 break;
84705diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
84706index 22e6583..426e2f3 100644
84707--- a/net/bluetooth/l2cap_core.c
84708+++ b/net/bluetooth/l2cap_core.c
84709@@ -3400,8 +3400,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
84710 break;
84711
84712 case L2CAP_CONF_RFC:
84713- if (olen == sizeof(rfc))
84714- memcpy(&rfc, (void *)val, olen);
84715+ if (olen != sizeof(rfc))
84716+ break;
84717+
84718+ memcpy(&rfc, (void *)val, olen);
84719
84720 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
84721 rfc.mode != chan->mode)
84722diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
84723index 1bcfb84..dad9f98 100644
84724--- a/net/bluetooth/l2cap_sock.c
84725+++ b/net/bluetooth/l2cap_sock.c
84726@@ -479,7 +479,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
84727 struct sock *sk = sock->sk;
84728 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
84729 struct l2cap_options opts;
84730- int len, err = 0;
84731+ int err = 0;
84732+ size_t len = optlen;
84733 u32 opt;
84734
84735 BT_DBG("sk %p", sk);
84736@@ -501,7 +502,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
84737 opts.max_tx = chan->max_tx;
84738 opts.txwin_size = chan->tx_win;
84739
84740- len = min_t(unsigned int, sizeof(opts), optlen);
84741+ len = min(sizeof(opts), len);
84742 if (copy_from_user((char *) &opts, optval, len)) {
84743 err = -EFAULT;
84744 break;
84745@@ -581,7 +582,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
84746 struct bt_security sec;
84747 struct bt_power pwr;
84748 struct l2cap_conn *conn;
84749- int len, err = 0;
84750+ int err = 0;
84751+ size_t len = optlen;
84752 u32 opt;
84753
84754 BT_DBG("sk %p", sk);
84755@@ -604,7 +606,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
84756
84757 sec.level = BT_SECURITY_LOW;
84758
84759- len = min_t(unsigned int, sizeof(sec), optlen);
84760+ len = min(sizeof(sec), len);
84761 if (copy_from_user((char *) &sec, optval, len)) {
84762 err = -EFAULT;
84763 break;
84764@@ -701,7 +703,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
84765
84766 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
84767
84768- len = min_t(unsigned int, sizeof(pwr), optlen);
84769+ len = min(sizeof(pwr), len);
84770 if (copy_from_user((char *) &pwr, optval, len)) {
84771 err = -EFAULT;
84772 break;
84773diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
84774index ce3f665..2c7d08f 100644
84775--- a/net/bluetooth/rfcomm/sock.c
84776+++ b/net/bluetooth/rfcomm/sock.c
84777@@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
84778 struct sock *sk = sock->sk;
84779 struct bt_security sec;
84780 int err = 0;
84781- size_t len;
84782+ size_t len = optlen;
84783 u32 opt;
84784
84785 BT_DBG("sk %p", sk);
84786@@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
84787
84788 sec.level = BT_SECURITY_LOW;
84789
84790- len = min_t(unsigned int, sizeof(sec), optlen);
84791+ len = min(sizeof(sec), len);
84792 if (copy_from_user((char *) &sec, optval, len)) {
84793 err = -EFAULT;
84794 break;
84795diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
84796index bd6fd0f..6492cba 100644
84797--- a/net/bluetooth/rfcomm/tty.c
84798+++ b/net/bluetooth/rfcomm/tty.c
84799@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
84800 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
84801
84802 spin_lock_irqsave(&dev->port.lock, flags);
84803- if (dev->port.count > 0) {
84804+ if (atomic_read(&dev->port.count) > 0) {
84805 spin_unlock_irqrestore(&dev->port.lock, flags);
84806 return;
84807 }
84808@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
84809 return -ENODEV;
84810
84811 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
84812- dev->channel, dev->port.count);
84813+ dev->channel, atomic_read(&dev->port.count));
84814
84815 spin_lock_irqsave(&dev->port.lock, flags);
84816- if (++dev->port.count > 1) {
84817+ if (atomic_inc_return(&dev->port.count) > 1) {
84818 spin_unlock_irqrestore(&dev->port.lock, flags);
84819 return 0;
84820 }
84821@@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
84822 return;
84823
84824 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
84825- dev->port.count);
84826+ atomic_read(&dev->port.count));
84827
84828 spin_lock_irqsave(&dev->port.lock, flags);
84829- if (!--dev->port.count) {
84830+ if (!atomic_dec_return(&dev->port.count)) {
84831 spin_unlock_irqrestore(&dev->port.lock, flags);
84832 if (dev->tty_dev->parent)
84833 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
84834diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
84835index 5fe2ff3..121d696 100644
84836--- a/net/bridge/netfilter/ebtables.c
84837+++ b/net/bridge/netfilter/ebtables.c
84838@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
84839 tmp.valid_hooks = t->table->valid_hooks;
84840 }
84841 mutex_unlock(&ebt_mutex);
84842- if (copy_to_user(user, &tmp, *len) != 0){
84843+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
84844 BUGPRINT("c2u Didn't work\n");
84845 ret = -EFAULT;
84846 break;
84847@@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
84848 goto out;
84849 tmp.valid_hooks = t->valid_hooks;
84850
84851- if (copy_to_user(user, &tmp, *len) != 0) {
84852+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
84853 ret = -EFAULT;
84854 break;
84855 }
84856@@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
84857 tmp.entries_size = t->table->entries_size;
84858 tmp.valid_hooks = t->table->valid_hooks;
84859
84860- if (copy_to_user(user, &tmp, *len) != 0) {
84861+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
84862 ret = -EFAULT;
84863 break;
84864 }
84865diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
84866index a376ec1..1fbd6be 100644
84867--- a/net/caif/cfctrl.c
84868+++ b/net/caif/cfctrl.c
84869@@ -10,6 +10,7 @@
84870 #include <linux/spinlock.h>
84871 #include <linux/slab.h>
84872 #include <linux/pkt_sched.h>
84873+#include <linux/sched.h>
84874 #include <net/caif/caif_layer.h>
84875 #include <net/caif/cfpkt.h>
84876 #include <net/caif/cfctrl.h>
84877@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
84878 memset(&dev_info, 0, sizeof(dev_info));
84879 dev_info.id = 0xff;
84880 cfsrvl_init(&this->serv, 0, &dev_info, false);
84881- atomic_set(&this->req_seq_no, 1);
84882- atomic_set(&this->rsp_seq_no, 1);
84883+ atomic_set_unchecked(&this->req_seq_no, 1);
84884+ atomic_set_unchecked(&this->rsp_seq_no, 1);
84885 this->serv.layer.receive = cfctrl_recv;
84886 sprintf(this->serv.layer.name, "ctrl");
84887 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
84888@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
84889 struct cfctrl_request_info *req)
84890 {
84891 spin_lock_bh(&ctrl->info_list_lock);
84892- atomic_inc(&ctrl->req_seq_no);
84893- req->sequence_no = atomic_read(&ctrl->req_seq_no);
84894+ atomic_inc_unchecked(&ctrl->req_seq_no);
84895+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
84896 list_add_tail(&req->list, &ctrl->list);
84897 spin_unlock_bh(&ctrl->info_list_lock);
84898 }
84899@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
84900 if (p != first)
84901 pr_warn("Requests are not received in order\n");
84902
84903- atomic_set(&ctrl->rsp_seq_no,
84904+ atomic_set_unchecked(&ctrl->rsp_seq_no,
84905 p->sequence_no);
84906 list_del(&p->list);
84907 goto out;
84908diff --git a/net/can/af_can.c b/net/can/af_can.c
84909index ddac1ee..3ee0a78 100644
84910--- a/net/can/af_can.c
84911+++ b/net/can/af_can.c
84912@@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
84913 };
84914
84915 /* notifier block for netdevice event */
84916-static struct notifier_block can_netdev_notifier __read_mostly = {
84917+static struct notifier_block can_netdev_notifier = {
84918 .notifier_call = can_notifier,
84919 };
84920
84921diff --git a/net/can/gw.c b/net/can/gw.c
84922index 574dda78e..3d2b3da 100644
84923--- a/net/can/gw.c
84924+++ b/net/can/gw.c
84925@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
84926 MODULE_ALIAS("can-gw");
84927
84928 static HLIST_HEAD(cgw_list);
84929-static struct notifier_block notifier;
84930
84931 static struct kmem_cache *cgw_cache __read_mostly;
84932
84933@@ -893,6 +892,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
84934 return err;
84935 }
84936
84937+static struct notifier_block notifier = {
84938+ .notifier_call = cgw_notifier
84939+};
84940+
84941 static __init int cgw_module_init(void)
84942 {
84943 printk(banner);
84944@@ -904,7 +907,6 @@ static __init int cgw_module_init(void)
84945 return -ENOMEM;
84946
84947 /* set notifier */
84948- notifier.notifier_call = cgw_notifier;
84949 register_netdevice_notifier(&notifier);
84950
84951 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
84952diff --git a/net/compat.c b/net/compat.c
84953index 79ae884..17c5c09 100644
84954--- a/net/compat.c
84955+++ b/net/compat.c
84956@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
84957 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
84958 __get_user(kmsg->msg_flags, &umsg->msg_flags))
84959 return -EFAULT;
84960- kmsg->msg_name = compat_ptr(tmp1);
84961- kmsg->msg_iov = compat_ptr(tmp2);
84962- kmsg->msg_control = compat_ptr(tmp3);
84963+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
84964+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
84965+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
84966 return 0;
84967 }
84968
84969@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
84970
84971 if (kern_msg->msg_namelen) {
84972 if (mode == VERIFY_READ) {
84973- int err = move_addr_to_kernel(kern_msg->msg_name,
84974+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
84975 kern_msg->msg_namelen,
84976 kern_address);
84977 if (err < 0)
84978@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
84979 kern_msg->msg_name = NULL;
84980
84981 tot_len = iov_from_user_compat_to_kern(kern_iov,
84982- (struct compat_iovec __user *)kern_msg->msg_iov,
84983+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
84984 kern_msg->msg_iovlen);
84985 if (tot_len >= 0)
84986 kern_msg->msg_iov = kern_iov;
84987@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
84988
84989 #define CMSG_COMPAT_FIRSTHDR(msg) \
84990 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
84991- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
84992+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
84993 (struct compat_cmsghdr __user *)NULL)
84994
84995 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
84996 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
84997 (ucmlen) <= (unsigned long) \
84998 ((mhdr)->msg_controllen - \
84999- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
85000+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
85001
85002 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
85003 struct compat_cmsghdr __user *cmsg, int cmsg_len)
85004 {
85005 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
85006- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
85007+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
85008 msg->msg_controllen)
85009 return NULL;
85010 return (struct compat_cmsghdr __user *)ptr;
85011@@ -219,7 +219,7 @@ Efault:
85012
85013 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
85014 {
85015- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
85016+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
85017 struct compat_cmsghdr cmhdr;
85018 struct compat_timeval ctv;
85019 struct compat_timespec cts[3];
85020@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
85021
85022 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
85023 {
85024- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
85025+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
85026 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
85027 int fdnum = scm->fp->count;
85028 struct file **fp = scm->fp->fp;
85029@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
85030 return -EFAULT;
85031 old_fs = get_fs();
85032 set_fs(KERNEL_DS);
85033- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
85034+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
85035 set_fs(old_fs);
85036
85037 return err;
85038@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
85039 len = sizeof(ktime);
85040 old_fs = get_fs();
85041 set_fs(KERNEL_DS);
85042- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
85043+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
85044 set_fs(old_fs);
85045
85046 if (!err) {
85047@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85048 case MCAST_JOIN_GROUP:
85049 case MCAST_LEAVE_GROUP:
85050 {
85051- struct compat_group_req __user *gr32 = (void *)optval;
85052+ struct compat_group_req __user *gr32 = (void __user *)optval;
85053 struct group_req __user *kgr =
85054 compat_alloc_user_space(sizeof(struct group_req));
85055 u32 interface;
85056@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85057 case MCAST_BLOCK_SOURCE:
85058 case MCAST_UNBLOCK_SOURCE:
85059 {
85060- struct compat_group_source_req __user *gsr32 = (void *)optval;
85061+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
85062 struct group_source_req __user *kgsr = compat_alloc_user_space(
85063 sizeof(struct group_source_req));
85064 u32 interface;
85065@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85066 }
85067 case MCAST_MSFILTER:
85068 {
85069- struct compat_group_filter __user *gf32 = (void *)optval;
85070+ struct compat_group_filter __user *gf32 = (void __user *)optval;
85071 struct group_filter __user *kgf;
85072 u32 interface, fmode, numsrc;
85073
85074@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
85075 char __user *optval, int __user *optlen,
85076 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
85077 {
85078- struct compat_group_filter __user *gf32 = (void *)optval;
85079+ struct compat_group_filter __user *gf32 = (void __user *)optval;
85080 struct group_filter __user *kgf;
85081 int __user *koptlen;
85082 u32 interface, fmode, numsrc;
85083@@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
85084
85085 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
85086 return -EINVAL;
85087- if (copy_from_user(a, args, nas[call]))
85088+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
85089 return -EFAULT;
85090 a0 = a[0];
85091 a1 = a[1];
85092diff --git a/net/core/datagram.c b/net/core/datagram.c
85093index 368f9c3..f82d4a3 100644
85094--- a/net/core/datagram.c
85095+++ b/net/core/datagram.c
85096@@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
85097 }
85098
85099 kfree_skb(skb);
85100- atomic_inc(&sk->sk_drops);
85101+ atomic_inc_unchecked(&sk->sk_drops);
85102 sk_mem_reclaim_partial(sk);
85103
85104 return err;
85105diff --git a/net/core/dev.c b/net/core/dev.c
85106index 1339f77..6fd27dc 100644
85107--- a/net/core/dev.c
85108+++ b/net/core/dev.c
85109@@ -1250,9 +1250,13 @@ void dev_load(struct net *net, const char *name)
85110 if (no_module && capable(CAP_NET_ADMIN))
85111 no_module = request_module("netdev-%s", name);
85112 if (no_module && capable(CAP_SYS_MODULE)) {
85113+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85114+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
85115+#else
85116 if (!request_module("%s", name))
85117 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
85118 name);
85119+#endif
85120 }
85121 }
85122 EXPORT_SYMBOL(dev_load);
85123@@ -1715,7 +1719,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
85124 {
85125 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
85126 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
85127- atomic_long_inc(&dev->rx_dropped);
85128+ atomic_long_inc_unchecked(&dev->rx_dropped);
85129 kfree_skb(skb);
85130 return NET_RX_DROP;
85131 }
85132@@ -1725,7 +1729,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
85133 nf_reset(skb);
85134
85135 if (unlikely(!is_skb_forwardable(dev, skb))) {
85136- atomic_long_inc(&dev->rx_dropped);
85137+ atomic_long_inc_unchecked(&dev->rx_dropped);
85138 kfree_skb(skb);
85139 return NET_RX_DROP;
85140 }
85141@@ -2180,7 +2184,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
85142
85143 struct dev_gso_cb {
85144 void (*destructor)(struct sk_buff *skb);
85145-};
85146+} __no_const;
85147
85148 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
85149
85150@@ -3053,7 +3057,7 @@ enqueue:
85151
85152 local_irq_restore(flags);
85153
85154- atomic_long_inc(&skb->dev->rx_dropped);
85155+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
85156 kfree_skb(skb);
85157 return NET_RX_DROP;
85158 }
85159@@ -3125,7 +3129,7 @@ int netif_rx_ni(struct sk_buff *skb)
85160 }
85161 EXPORT_SYMBOL(netif_rx_ni);
85162
85163-static void net_tx_action(struct softirq_action *h)
85164+static void net_tx_action(void)
85165 {
85166 struct softnet_data *sd = &__get_cpu_var(softnet_data);
85167
85168@@ -3457,7 +3461,7 @@ ncls:
85169 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
85170 } else {
85171 drop:
85172- atomic_long_inc(&skb->dev->rx_dropped);
85173+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
85174 kfree_skb(skb);
85175 /* Jamal, now you will not able to escape explaining
85176 * me how you were going to use this. :-)
85177@@ -4040,7 +4044,7 @@ void netif_napi_del(struct napi_struct *napi)
85178 }
85179 EXPORT_SYMBOL(netif_napi_del);
85180
85181-static void net_rx_action(struct softirq_action *h)
85182+static void net_rx_action(void)
85183 {
85184 struct softnet_data *sd = &__get_cpu_var(softnet_data);
85185 unsigned long time_limit = jiffies + 2;
85186@@ -4524,8 +4528,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
85187 else
85188 seq_printf(seq, "%04x", ntohs(pt->type));
85189
85190+#ifdef CONFIG_GRKERNSEC_HIDESYM
85191+ seq_printf(seq, " %-8s %p\n",
85192+ pt->dev ? pt->dev->name : "", NULL);
85193+#else
85194 seq_printf(seq, " %-8s %pF\n",
85195 pt->dev ? pt->dev->name : "", pt->func);
85196+#endif
85197 }
85198
85199 return 0;
85200@@ -6097,7 +6106,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
85201 } else {
85202 netdev_stats_to_stats64(storage, &dev->stats);
85203 }
85204- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
85205+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
85206 return storage;
85207 }
85208 EXPORT_SYMBOL(dev_get_stats);
85209diff --git a/net/core/flow.c b/net/core/flow.c
85210index b0901ee..7d3c2ca 100644
85211--- a/net/core/flow.c
85212+++ b/net/core/flow.c
85213@@ -61,7 +61,7 @@ struct flow_cache {
85214 struct timer_list rnd_timer;
85215 };
85216
85217-atomic_t flow_cache_genid = ATOMIC_INIT(0);
85218+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
85219 EXPORT_SYMBOL(flow_cache_genid);
85220 static struct flow_cache flow_cache_global;
85221 static struct kmem_cache *flow_cachep __read_mostly;
85222@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
85223
85224 static int flow_entry_valid(struct flow_cache_entry *fle)
85225 {
85226- if (atomic_read(&flow_cache_genid) != fle->genid)
85227+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
85228 return 0;
85229 if (fle->object && !fle->object->ops->check(fle->object))
85230 return 0;
85231@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
85232 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
85233 fcp->hash_count++;
85234 }
85235- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
85236+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
85237 flo = fle->object;
85238 if (!flo)
85239 goto ret_object;
85240@@ -280,7 +280,7 @@ nocache:
85241 }
85242 flo = resolver(net, key, family, dir, flo, ctx);
85243 if (fle) {
85244- fle->genid = atomic_read(&flow_cache_genid);
85245+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
85246 if (!IS_ERR(flo))
85247 fle->object = flo;
85248 else
85249diff --git a/net/core/iovec.c b/net/core/iovec.c
85250index 7e7aeb0..2a998cb 100644
85251--- a/net/core/iovec.c
85252+++ b/net/core/iovec.c
85253@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
85254 if (m->msg_namelen) {
85255 if (mode == VERIFY_READ) {
85256 void __user *namep;
85257- namep = (void __user __force *) m->msg_name;
85258+ namep = (void __force_user *) m->msg_name;
85259 err = move_addr_to_kernel(namep, m->msg_namelen,
85260 address);
85261 if (err < 0)
85262@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
85263 }
85264
85265 size = m->msg_iovlen * sizeof(struct iovec);
85266- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
85267+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
85268 return -EFAULT;
85269
85270 m->msg_iov = iov;
85271diff --git a/net/core/neighbour.c b/net/core/neighbour.c
85272index c815f28..e6403f2 100644
85273--- a/net/core/neighbour.c
85274+++ b/net/core/neighbour.c
85275@@ -2776,7 +2776,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
85276 size_t *lenp, loff_t *ppos)
85277 {
85278 int size, ret;
85279- ctl_table tmp = *ctl;
85280+ ctl_table_no_const tmp = *ctl;
85281
85282 tmp.extra1 = &zero;
85283 tmp.extra2 = &unres_qlen_max;
85284diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
85285index 28c5f5a..7edf2e2 100644
85286--- a/net/core/net-sysfs.c
85287+++ b/net/core/net-sysfs.c
85288@@ -1455,7 +1455,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
85289 }
85290 EXPORT_SYMBOL(netdev_class_remove_file);
85291
85292-int netdev_kobject_init(void)
85293+int __init netdev_kobject_init(void)
85294 {
85295 kobj_ns_type_register(&net_ns_type_operations);
85296 return class_register(&net_class);
85297diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
85298index 8acce01..2e306bb 100644
85299--- a/net/core/net_namespace.c
85300+++ b/net/core/net_namespace.c
85301@@ -442,7 +442,7 @@ static int __register_pernet_operations(struct list_head *list,
85302 int error;
85303 LIST_HEAD(net_exit_list);
85304
85305- list_add_tail(&ops->list, list);
85306+ pax_list_add_tail((struct list_head *)&ops->list, list);
85307 if (ops->init || (ops->id && ops->size)) {
85308 for_each_net(net) {
85309 error = ops_init(ops, net);
85310@@ -455,7 +455,7 @@ static int __register_pernet_operations(struct list_head *list,
85311
85312 out_undo:
85313 /* If I have an error cleanup all namespaces I initialized */
85314- list_del(&ops->list);
85315+ pax_list_del((struct list_head *)&ops->list);
85316 ops_exit_list(ops, &net_exit_list);
85317 ops_free_list(ops, &net_exit_list);
85318 return error;
85319@@ -466,7 +466,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
85320 struct net *net;
85321 LIST_HEAD(net_exit_list);
85322
85323- list_del(&ops->list);
85324+ pax_list_del((struct list_head *)&ops->list);
85325 for_each_net(net)
85326 list_add_tail(&net->exit_list, &net_exit_list);
85327 ops_exit_list(ops, &net_exit_list);
85328@@ -600,7 +600,7 @@ int register_pernet_device(struct pernet_operations *ops)
85329 mutex_lock(&net_mutex);
85330 error = register_pernet_operations(&pernet_list, ops);
85331 if (!error && (first_device == &pernet_list))
85332- first_device = &ops->list;
85333+ first_device = (struct list_head *)&ops->list;
85334 mutex_unlock(&net_mutex);
85335 return error;
85336 }
85337diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
85338index 798f920..e2261f5 100644
85339--- a/net/core/rtnetlink.c
85340+++ b/net/core/rtnetlink.c
85341@@ -58,7 +58,7 @@ struct rtnl_link {
85342 rtnl_doit_func doit;
85343 rtnl_dumpit_func dumpit;
85344 rtnl_calcit_func calcit;
85345-};
85346+} __no_const;
85347
85348 static DEFINE_MUTEX(rtnl_mutex);
85349
85350@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
85351 if (rtnl_link_ops_get(ops->kind))
85352 return -EEXIST;
85353
85354- if (!ops->dellink)
85355- ops->dellink = unregister_netdevice_queue;
85356+ if (!ops->dellink) {
85357+ pax_open_kernel();
85358+ *(void **)&ops->dellink = unregister_netdevice_queue;
85359+ pax_close_kernel();
85360+ }
85361
85362- list_add_tail(&ops->list, &link_ops);
85363+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
85364 return 0;
85365 }
85366 EXPORT_SYMBOL_GPL(__rtnl_link_register);
85367@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
85368 for_each_net(net) {
85369 __rtnl_kill_links(net, ops);
85370 }
85371- list_del(&ops->list);
85372+ pax_list_del((struct list_head *)&ops->list);
85373 }
85374 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
85375
85376diff --git a/net/core/scm.c b/net/core/scm.c
85377index 905dcc6..14ee2d6 100644
85378--- a/net/core/scm.c
85379+++ b/net/core/scm.c
85380@@ -224,7 +224,7 @@ EXPORT_SYMBOL(__scm_send);
85381 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
85382 {
85383 struct cmsghdr __user *cm
85384- = (__force struct cmsghdr __user *)msg->msg_control;
85385+ = (struct cmsghdr __force_user *)msg->msg_control;
85386 struct cmsghdr cmhdr;
85387 int cmlen = CMSG_LEN(len);
85388 int err;
85389@@ -247,7 +247,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
85390 err = -EFAULT;
85391 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
85392 goto out;
85393- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
85394+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
85395 goto out;
85396 cmlen = CMSG_SPACE(len);
85397 if (msg->msg_controllen < cmlen)
85398@@ -263,7 +263,7 @@ EXPORT_SYMBOL(put_cmsg);
85399 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
85400 {
85401 struct cmsghdr __user *cm
85402- = (__force struct cmsghdr __user*)msg->msg_control;
85403+ = (struct cmsghdr __force_user *)msg->msg_control;
85404
85405 int fdmax = 0;
85406 int fdnum = scm->fp->count;
85407@@ -283,7 +283,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
85408 if (fdnum < fdmax)
85409 fdmax = fdnum;
85410
85411- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
85412+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
85413 i++, cmfptr++)
85414 {
85415 struct socket *sock;
85416diff --git a/net/core/sock.c b/net/core/sock.c
85417index bc131d4..029e378 100644
85418--- a/net/core/sock.c
85419+++ b/net/core/sock.c
85420@@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
85421 struct sk_buff_head *list = &sk->sk_receive_queue;
85422
85423 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
85424- atomic_inc(&sk->sk_drops);
85425+ atomic_inc_unchecked(&sk->sk_drops);
85426 trace_sock_rcvqueue_full(sk, skb);
85427 return -ENOMEM;
85428 }
85429@@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
85430 return err;
85431
85432 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
85433- atomic_inc(&sk->sk_drops);
85434+ atomic_inc_unchecked(&sk->sk_drops);
85435 return -ENOBUFS;
85436 }
85437
85438@@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
85439 skb_dst_force(skb);
85440
85441 spin_lock_irqsave(&list->lock, flags);
85442- skb->dropcount = atomic_read(&sk->sk_drops);
85443+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
85444 __skb_queue_tail(list, skb);
85445 spin_unlock_irqrestore(&list->lock, flags);
85446
85447@@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
85448 skb->dev = NULL;
85449
85450 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
85451- atomic_inc(&sk->sk_drops);
85452+ atomic_inc_unchecked(&sk->sk_drops);
85453 goto discard_and_relse;
85454 }
85455 if (nested)
85456@@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
85457 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
85458 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
85459 bh_unlock_sock(sk);
85460- atomic_inc(&sk->sk_drops);
85461+ atomic_inc_unchecked(&sk->sk_drops);
85462 goto discard_and_relse;
85463 }
85464
85465@@ -930,12 +930,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
85466 struct timeval tm;
85467 } v;
85468
85469- int lv = sizeof(int);
85470- int len;
85471+ unsigned int lv = sizeof(int);
85472+ unsigned int len;
85473
85474 if (get_user(len, optlen))
85475 return -EFAULT;
85476- if (len < 0)
85477+ if (len > INT_MAX)
85478 return -EINVAL;
85479
85480 memset(&v, 0, sizeof(v));
85481@@ -1083,11 +1083,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
85482
85483 case SO_PEERNAME:
85484 {
85485- char address[128];
85486+ char address[_K_SS_MAXSIZE];
85487
85488 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
85489 return -ENOTCONN;
85490- if (lv < len)
85491+ if (lv < len || sizeof address < len)
85492 return -EINVAL;
85493 if (copy_to_user(optval, address, len))
85494 return -EFAULT;
85495@@ -1146,7 +1146,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
85496
85497 if (len > lv)
85498 len = lv;
85499- if (copy_to_user(optval, &v, len))
85500+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
85501 return -EFAULT;
85502 lenout:
85503 if (put_user(len, optlen))
85504@@ -2276,7 +2276,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
85505 */
85506 smp_wmb();
85507 atomic_set(&sk->sk_refcnt, 1);
85508- atomic_set(&sk->sk_drops, 0);
85509+ atomic_set_unchecked(&sk->sk_drops, 0);
85510 }
85511 EXPORT_SYMBOL(sock_init_data);
85512
85513diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
85514index 750f44f..922399c 100644
85515--- a/net/core/sock_diag.c
85516+++ b/net/core/sock_diag.c
85517@@ -9,26 +9,33 @@
85518 #include <linux/inet_diag.h>
85519 #include <linux/sock_diag.h>
85520
85521-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
85522+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
85523 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
85524 static DEFINE_MUTEX(sock_diag_table_mutex);
85525
85526 int sock_diag_check_cookie(void *sk, __u32 *cookie)
85527 {
85528+#ifndef CONFIG_GRKERNSEC_HIDESYM
85529 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
85530 cookie[1] != INET_DIAG_NOCOOKIE) &&
85531 ((u32)(unsigned long)sk != cookie[0] ||
85532 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
85533 return -ESTALE;
85534 else
85535+#endif
85536 return 0;
85537 }
85538 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
85539
85540 void sock_diag_save_cookie(void *sk, __u32 *cookie)
85541 {
85542+#ifdef CONFIG_GRKERNSEC_HIDESYM
85543+ cookie[0] = 0;
85544+ cookie[1] = 0;
85545+#else
85546 cookie[0] = (u32)(unsigned long)sk;
85547 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
85548+#endif
85549 }
85550 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
85551
85552@@ -75,8 +82,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
85553 mutex_lock(&sock_diag_table_mutex);
85554 if (sock_diag_handlers[hndl->family])
85555 err = -EBUSY;
85556- else
85557+ else {
85558+ pax_open_kernel();
85559 sock_diag_handlers[hndl->family] = hndl;
85560+ pax_close_kernel();
85561+ }
85562 mutex_unlock(&sock_diag_table_mutex);
85563
85564 return err;
85565@@ -92,26 +102,13 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
85566
85567 mutex_lock(&sock_diag_table_mutex);
85568 BUG_ON(sock_diag_handlers[family] != hnld);
85569+ pax_open_kernel();
85570 sock_diag_handlers[family] = NULL;
85571+ pax_close_kernel();
85572 mutex_unlock(&sock_diag_table_mutex);
85573 }
85574 EXPORT_SYMBOL_GPL(sock_diag_unregister);
85575
85576-static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
85577-{
85578- if (sock_diag_handlers[family] == NULL)
85579- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
85580- NETLINK_SOCK_DIAG, family);
85581-
85582- mutex_lock(&sock_diag_table_mutex);
85583- return sock_diag_handlers[family];
85584-}
85585-
85586-static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
85587-{
85588- mutex_unlock(&sock_diag_table_mutex);
85589-}
85590-
85591 static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
85592 {
85593 int err;
85594@@ -124,12 +121,17 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
85595 if (req->sdiag_family >= AF_MAX)
85596 return -EINVAL;
85597
85598- hndl = sock_diag_lock_handler(req->sdiag_family);
85599+ if (sock_diag_handlers[req->sdiag_family] == NULL)
85600+ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
85601+ NETLINK_SOCK_DIAG, req->sdiag_family);
85602+
85603+ mutex_lock(&sock_diag_table_mutex);
85604+ hndl = sock_diag_handlers[req->sdiag_family];
85605 if (hndl == NULL)
85606 err = -ENOENT;
85607 else
85608 err = hndl->dump(skb, nlh);
85609- sock_diag_unlock_handler(hndl);
85610+ mutex_unlock(&sock_diag_table_mutex);
85611
85612 return err;
85613 }
85614diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
85615index d1b0804..4aed0a5 100644
85616--- a/net/core/sysctl_net_core.c
85617+++ b/net/core/sysctl_net_core.c
85618@@ -26,7 +26,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
85619 {
85620 unsigned int orig_size, size;
85621 int ret, i;
85622- ctl_table tmp = {
85623+ ctl_table_no_const tmp = {
85624 .data = &size,
85625 .maxlen = sizeof(size),
85626 .mode = table->mode
85627@@ -205,13 +205,12 @@ static struct ctl_table netns_core_table[] = {
85628
85629 static __net_init int sysctl_core_net_init(struct net *net)
85630 {
85631- struct ctl_table *tbl;
85632+ ctl_table_no_const *tbl = NULL;
85633
85634 net->core.sysctl_somaxconn = SOMAXCONN;
85635
85636- tbl = netns_core_table;
85637 if (!net_eq(net, &init_net)) {
85638- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
85639+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
85640 if (tbl == NULL)
85641 goto err_dup;
85642
85643@@ -221,16 +220,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
85644 if (net->user_ns != &init_user_ns) {
85645 tbl[0].procname = NULL;
85646 }
85647- }
85648-
85649- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
85650+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
85651+ } else
85652+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
85653 if (net->core.sysctl_hdr == NULL)
85654 goto err_reg;
85655
85656 return 0;
85657
85658 err_reg:
85659- if (tbl != netns_core_table)
85660+ if (tbl)
85661 kfree(tbl);
85662 err_dup:
85663 return -ENOMEM;
85664@@ -246,7 +245,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
85665 kfree(tbl);
85666 }
85667
85668-static __net_initdata struct pernet_operations sysctl_core_ops = {
85669+static __net_initconst struct pernet_operations sysctl_core_ops = {
85670 .init = sysctl_core_net_init,
85671 .exit = sysctl_core_net_exit,
85672 };
85673diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
85674index 307c322..78a4c6f 100644
85675--- a/net/decnet/af_decnet.c
85676+++ b/net/decnet/af_decnet.c
85677@@ -468,6 +468,7 @@ static struct proto dn_proto = {
85678 .sysctl_rmem = sysctl_decnet_rmem,
85679 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
85680 .obj_size = sizeof(struct dn_sock),
85681+ .slab_flags = SLAB_USERCOPY,
85682 };
85683
85684 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
85685diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
85686index a55eecc..dd8428c 100644
85687--- a/net/decnet/sysctl_net_decnet.c
85688+++ b/net/decnet/sysctl_net_decnet.c
85689@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
85690
85691 if (len > *lenp) len = *lenp;
85692
85693- if (copy_to_user(buffer, addr, len))
85694+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
85695 return -EFAULT;
85696
85697 *lenp = len;
85698@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
85699
85700 if (len > *lenp) len = *lenp;
85701
85702- if (copy_to_user(buffer, devname, len))
85703+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
85704 return -EFAULT;
85705
85706 *lenp = len;
85707diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
85708index fcf104e..95552d4 100644
85709--- a/net/ipv4/af_inet.c
85710+++ b/net/ipv4/af_inet.c
85711@@ -1717,13 +1717,9 @@ static int __init inet_init(void)
85712
85713 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
85714
85715- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
85716- if (!sysctl_local_reserved_ports)
85717- goto out;
85718-
85719 rc = proto_register(&tcp_prot, 1);
85720 if (rc)
85721- goto out_free_reserved_ports;
85722+ goto out;
85723
85724 rc = proto_register(&udp_prot, 1);
85725 if (rc)
85726@@ -1832,8 +1828,6 @@ out_unregister_udp_proto:
85727 proto_unregister(&udp_prot);
85728 out_unregister_tcp_proto:
85729 proto_unregister(&tcp_prot);
85730-out_free_reserved_ports:
85731- kfree(sysctl_local_reserved_ports);
85732 goto out;
85733 }
85734
85735diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
85736index a69b4e4..dbccba5 100644
85737--- a/net/ipv4/ah4.c
85738+++ b/net/ipv4/ah4.c
85739@@ -421,7 +421,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
85740 return;
85741
85742 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
85743- atomic_inc(&flow_cache_genid);
85744+ atomic_inc_unchecked(&flow_cache_genid);
85745 rt_genid_bump(net);
85746
85747 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
85748diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
85749index a8e4f26..25e5f40 100644
85750--- a/net/ipv4/devinet.c
85751+++ b/net/ipv4/devinet.c
85752@@ -1763,7 +1763,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
85753 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
85754 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
85755
85756-static struct devinet_sysctl_table {
85757+static const struct devinet_sysctl_table {
85758 struct ctl_table_header *sysctl_header;
85759 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
85760 } devinet_sysctl = {
85761@@ -1881,7 +1881,7 @@ static __net_init int devinet_init_net(struct net *net)
85762 int err;
85763 struct ipv4_devconf *all, *dflt;
85764 #ifdef CONFIG_SYSCTL
85765- struct ctl_table *tbl = ctl_forward_entry;
85766+ ctl_table_no_const *tbl = NULL;
85767 struct ctl_table_header *forw_hdr;
85768 #endif
85769
85770@@ -1899,7 +1899,7 @@ static __net_init int devinet_init_net(struct net *net)
85771 goto err_alloc_dflt;
85772
85773 #ifdef CONFIG_SYSCTL
85774- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
85775+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
85776 if (tbl == NULL)
85777 goto err_alloc_ctl;
85778
85779@@ -1919,7 +1919,10 @@ static __net_init int devinet_init_net(struct net *net)
85780 goto err_reg_dflt;
85781
85782 err = -ENOMEM;
85783- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
85784+ if (!net_eq(net, &init_net))
85785+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
85786+ else
85787+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
85788 if (forw_hdr == NULL)
85789 goto err_reg_ctl;
85790 net->ipv4.forw_hdr = forw_hdr;
85791@@ -1935,8 +1938,7 @@ err_reg_ctl:
85792 err_reg_dflt:
85793 __devinet_sysctl_unregister(all);
85794 err_reg_all:
85795- if (tbl != ctl_forward_entry)
85796- kfree(tbl);
85797+ kfree(tbl);
85798 err_alloc_ctl:
85799 #endif
85800 if (dflt != &ipv4_devconf_dflt)
85801diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
85802index 3b4f0cd..8cb864c 100644
85803--- a/net/ipv4/esp4.c
85804+++ b/net/ipv4/esp4.c
85805@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
85806 return;
85807
85808 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
85809- atomic_inc(&flow_cache_genid);
85810+ atomic_inc_unchecked(&flow_cache_genid);
85811 rt_genid_bump(net);
85812
85813 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
85814diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
85815index 5cd75e2..f57ef39 100644
85816--- a/net/ipv4/fib_frontend.c
85817+++ b/net/ipv4/fib_frontend.c
85818@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
85819 #ifdef CONFIG_IP_ROUTE_MULTIPATH
85820 fib_sync_up(dev);
85821 #endif
85822- atomic_inc(&net->ipv4.dev_addr_genid);
85823+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
85824 rt_cache_flush(dev_net(dev));
85825 break;
85826 case NETDEV_DOWN:
85827 fib_del_ifaddr(ifa, NULL);
85828- atomic_inc(&net->ipv4.dev_addr_genid);
85829+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
85830 if (ifa->ifa_dev->ifa_list == NULL) {
85831 /* Last address was deleted from this interface.
85832 * Disable IP.
85833@@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
85834 #ifdef CONFIG_IP_ROUTE_MULTIPATH
85835 fib_sync_up(dev);
85836 #endif
85837- atomic_inc(&net->ipv4.dev_addr_genid);
85838+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
85839 rt_cache_flush(net);
85840 break;
85841 case NETDEV_DOWN:
85842diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
85843index 4797a80..2bd54e9 100644
85844--- a/net/ipv4/fib_semantics.c
85845+++ b/net/ipv4/fib_semantics.c
85846@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
85847 nh->nh_saddr = inet_select_addr(nh->nh_dev,
85848 nh->nh_gw,
85849 nh->nh_parent->fib_scope);
85850- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
85851+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
85852
85853 return nh->nh_saddr;
85854 }
85855diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
85856index d0670f0..744ac80 100644
85857--- a/net/ipv4/inet_connection_sock.c
85858+++ b/net/ipv4/inet_connection_sock.c
85859@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
85860 .range = { 32768, 61000 },
85861 };
85862
85863-unsigned long *sysctl_local_reserved_ports;
85864+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
85865 EXPORT_SYMBOL(sysctl_local_reserved_ports);
85866
85867 void inet_get_local_port_range(int *low, int *high)
85868diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
85869index fa3ae81..0dbe6b8 100644
85870--- a/net/ipv4/inet_hashtables.c
85871+++ b/net/ipv4/inet_hashtables.c
85872@@ -18,12 +18,15 @@
85873 #include <linux/sched.h>
85874 #include <linux/slab.h>
85875 #include <linux/wait.h>
85876+#include <linux/security.h>
85877
85878 #include <net/inet_connection_sock.h>
85879 #include <net/inet_hashtables.h>
85880 #include <net/secure_seq.h>
85881 #include <net/ip.h>
85882
85883+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
85884+
85885 /*
85886 * Allocate and initialize a new local port bind bucket.
85887 * The bindhash mutex for snum's hash chain must be held here.
85888@@ -540,6 +543,8 @@ ok:
85889 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
85890 spin_unlock(&head->lock);
85891
85892+ gr_update_task_in_ip_table(current, inet_sk(sk));
85893+
85894 if (tw) {
85895 inet_twsk_deschedule(tw, death_row);
85896 while (twrefcnt) {
85897diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
85898index 000e3d2..5472da3 100644
85899--- a/net/ipv4/inetpeer.c
85900+++ b/net/ipv4/inetpeer.c
85901@@ -503,8 +503,8 @@ relookup:
85902 if (p) {
85903 p->daddr = *daddr;
85904 atomic_set(&p->refcnt, 1);
85905- atomic_set(&p->rid, 0);
85906- atomic_set(&p->ip_id_count,
85907+ atomic_set_unchecked(&p->rid, 0);
85908+ atomic_set_unchecked(&p->ip_id_count,
85909 (daddr->family == AF_INET) ?
85910 secure_ip_id(daddr->addr.a4) :
85911 secure_ipv6_id(daddr->addr.a6));
85912diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
85913index eb9d63a..31c5372 100644
85914--- a/net/ipv4/ip_fragment.c
85915+++ b/net/ipv4/ip_fragment.c
85916@@ -322,7 +322,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
85917 return 0;
85918
85919 start = qp->rid;
85920- end = atomic_inc_return(&peer->rid);
85921+ end = atomic_inc_return_unchecked(&peer->rid);
85922 qp->rid = end;
85923
85924 rc = qp->q.fragments && (end - start) > max;
85925@@ -789,12 +789,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
85926
85927 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
85928 {
85929- struct ctl_table *table;
85930+ ctl_table_no_const *table = NULL;
85931 struct ctl_table_header *hdr;
85932
85933- table = ip4_frags_ns_ctl_table;
85934 if (!net_eq(net, &init_net)) {
85935- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
85936+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
85937 if (table == NULL)
85938 goto err_alloc;
85939
85940@@ -805,9 +804,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
85941 /* Don't export sysctls to unprivileged users */
85942 if (net->user_ns != &init_user_ns)
85943 table[0].procname = NULL;
85944- }
85945+ hdr = register_net_sysctl(net, "net/ipv4", table);
85946+ } else
85947+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
85948
85949- hdr = register_net_sysctl(net, "net/ipv4", table);
85950 if (hdr == NULL)
85951 goto err_reg;
85952
85953@@ -815,8 +815,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
85954 return 0;
85955
85956 err_reg:
85957- if (!net_eq(net, &init_net))
85958- kfree(table);
85959+ kfree(table);
85960 err_alloc:
85961 return -ENOMEM;
85962 }
85963diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
85964index e81b1ca..6f3b5b9 100644
85965--- a/net/ipv4/ip_gre.c
85966+++ b/net/ipv4/ip_gre.c
85967@@ -124,7 +124,7 @@ static bool log_ecn_error = true;
85968 module_param(log_ecn_error, bool, 0644);
85969 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
85970
85971-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
85972+static struct rtnl_link_ops ipgre_link_ops;
85973 static int ipgre_tunnel_init(struct net_device *dev);
85974 static void ipgre_tunnel_setup(struct net_device *dev);
85975 static int ipgre_tunnel_bind_dev(struct net_device *dev);
85976@@ -1756,7 +1756,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
85977 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
85978 };
85979
85980-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
85981+static struct rtnl_link_ops ipgre_link_ops = {
85982 .kind = "gre",
85983 .maxtype = IFLA_GRE_MAX,
85984 .policy = ipgre_policy,
85985@@ -1769,7 +1769,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
85986 .fill_info = ipgre_fill_info,
85987 };
85988
85989-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
85990+static struct rtnl_link_ops ipgre_tap_ops = {
85991 .kind = "gretap",
85992 .maxtype = IFLA_GRE_MAX,
85993 .policy = ipgre_policy,
85994diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
85995index d9c4f11..02b82db 100644
85996--- a/net/ipv4/ip_sockglue.c
85997+++ b/net/ipv4/ip_sockglue.c
85998@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
85999 len = min_t(unsigned int, len, opt->optlen);
86000 if (put_user(len, optlen))
86001 return -EFAULT;
86002- if (copy_to_user(optval, opt->__data, len))
86003+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
86004+ copy_to_user(optval, opt->__data, len))
86005 return -EFAULT;
86006 return 0;
86007 }
86008@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
86009 if (sk->sk_type != SOCK_STREAM)
86010 return -ENOPROTOOPT;
86011
86012- msg.msg_control = optval;
86013+ msg.msg_control = (void __force_kernel *)optval;
86014 msg.msg_controllen = len;
86015 msg.msg_flags = flags;
86016
86017diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
86018index c3a4233..1412161 100644
86019--- a/net/ipv4/ip_vti.c
86020+++ b/net/ipv4/ip_vti.c
86021@@ -47,7 +47,7 @@
86022 #define HASH_SIZE 16
86023 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
86024
86025-static struct rtnl_link_ops vti_link_ops __read_mostly;
86026+static struct rtnl_link_ops vti_link_ops;
86027
86028 static int vti_net_id __read_mostly;
86029 struct vti_net {
86030@@ -886,7 +886,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
86031 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
86032 };
86033
86034-static struct rtnl_link_ops vti_link_ops __read_mostly = {
86035+static struct rtnl_link_ops vti_link_ops = {
86036 .kind = "vti",
86037 .maxtype = IFLA_VTI_MAX,
86038 .policy = vti_policy,
86039diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
86040index 9a46dae..5f793a0 100644
86041--- a/net/ipv4/ipcomp.c
86042+++ b/net/ipv4/ipcomp.c
86043@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
86044 return;
86045
86046 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
86047- atomic_inc(&flow_cache_genid);
86048+ atomic_inc_unchecked(&flow_cache_genid);
86049 rt_genid_bump(net);
86050
86051 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
86052diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
86053index a2e50ae..e152b7c 100644
86054--- a/net/ipv4/ipconfig.c
86055+++ b/net/ipv4/ipconfig.c
86056@@ -323,7 +323,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
86057
86058 mm_segment_t oldfs = get_fs();
86059 set_fs(get_ds());
86060- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
86061+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
86062 set_fs(oldfs);
86063 return res;
86064 }
86065@@ -334,7 +334,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
86066
86067 mm_segment_t oldfs = get_fs();
86068 set_fs(get_ds());
86069- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
86070+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
86071 set_fs(oldfs);
86072 return res;
86073 }
86074@@ -345,7 +345,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
86075
86076 mm_segment_t oldfs = get_fs();
86077 set_fs(get_ds());
86078- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
86079+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
86080 set_fs(oldfs);
86081 return res;
86082 }
86083diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
86084index 191fc24..1b3b804 100644
86085--- a/net/ipv4/ipip.c
86086+++ b/net/ipv4/ipip.c
86087@@ -138,7 +138,7 @@ struct ipip_net {
86088 static int ipip_tunnel_init(struct net_device *dev);
86089 static void ipip_tunnel_setup(struct net_device *dev);
86090 static void ipip_dev_free(struct net_device *dev);
86091-static struct rtnl_link_ops ipip_link_ops __read_mostly;
86092+static struct rtnl_link_ops ipip_link_ops;
86093
86094 static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
86095 struct rtnl_link_stats64 *tot)
86096@@ -972,7 +972,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
86097 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
86098 };
86099
86100-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
86101+static struct rtnl_link_ops ipip_link_ops = {
86102 .kind = "ipip",
86103 .maxtype = IFLA_IPTUN_MAX,
86104 .policy = ipip_policy,
86105diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
86106index 3ea4127..849297b 100644
86107--- a/net/ipv4/netfilter/arp_tables.c
86108+++ b/net/ipv4/netfilter/arp_tables.c
86109@@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
86110 #endif
86111
86112 static int get_info(struct net *net, void __user *user,
86113- const int *len, int compat)
86114+ int len, int compat)
86115 {
86116 char name[XT_TABLE_MAXNAMELEN];
86117 struct xt_table *t;
86118 int ret;
86119
86120- if (*len != sizeof(struct arpt_getinfo)) {
86121- duprintf("length %u != %Zu\n", *len,
86122+ if (len != sizeof(struct arpt_getinfo)) {
86123+ duprintf("length %u != %Zu\n", len,
86124 sizeof(struct arpt_getinfo));
86125 return -EINVAL;
86126 }
86127@@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
86128 info.size = private->size;
86129 strcpy(info.name, name);
86130
86131- if (copy_to_user(user, &info, *len) != 0)
86132+ if (copy_to_user(user, &info, len) != 0)
86133 ret = -EFAULT;
86134 else
86135 ret = 0;
86136@@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
86137
86138 switch (cmd) {
86139 case ARPT_SO_GET_INFO:
86140- ret = get_info(sock_net(sk), user, len, 1);
86141+ ret = get_info(sock_net(sk), user, *len, 1);
86142 break;
86143 case ARPT_SO_GET_ENTRIES:
86144 ret = compat_get_entries(sock_net(sk), user, len);
86145@@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
86146
86147 switch (cmd) {
86148 case ARPT_SO_GET_INFO:
86149- ret = get_info(sock_net(sk), user, len, 0);
86150+ ret = get_info(sock_net(sk), user, *len, 0);
86151 break;
86152
86153 case ARPT_SO_GET_ENTRIES:
86154diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
86155index 17c5e06..1b91206 100644
86156--- a/net/ipv4/netfilter/ip_tables.c
86157+++ b/net/ipv4/netfilter/ip_tables.c
86158@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
86159 #endif
86160
86161 static int get_info(struct net *net, void __user *user,
86162- const int *len, int compat)
86163+ int len, int compat)
86164 {
86165 char name[XT_TABLE_MAXNAMELEN];
86166 struct xt_table *t;
86167 int ret;
86168
86169- if (*len != sizeof(struct ipt_getinfo)) {
86170- duprintf("length %u != %zu\n", *len,
86171+ if (len != sizeof(struct ipt_getinfo)) {
86172+ duprintf("length %u != %zu\n", len,
86173 sizeof(struct ipt_getinfo));
86174 return -EINVAL;
86175 }
86176@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
86177 info.size = private->size;
86178 strcpy(info.name, name);
86179
86180- if (copy_to_user(user, &info, *len) != 0)
86181+ if (copy_to_user(user, &info, len) != 0)
86182 ret = -EFAULT;
86183 else
86184 ret = 0;
86185@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86186
86187 switch (cmd) {
86188 case IPT_SO_GET_INFO:
86189- ret = get_info(sock_net(sk), user, len, 1);
86190+ ret = get_info(sock_net(sk), user, *len, 1);
86191 break;
86192 case IPT_SO_GET_ENTRIES:
86193 ret = compat_get_entries(sock_net(sk), user, len);
86194@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86195
86196 switch (cmd) {
86197 case IPT_SO_GET_INFO:
86198- ret = get_info(sock_net(sk), user, len, 0);
86199+ ret = get_info(sock_net(sk), user, *len, 0);
86200 break;
86201
86202 case IPT_SO_GET_ENTRIES:
86203diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
86204index dc454cc..5bb917f 100644
86205--- a/net/ipv4/ping.c
86206+++ b/net/ipv4/ping.c
86207@@ -844,7 +844,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
86208 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
86209 0, sock_i_ino(sp),
86210 atomic_read(&sp->sk_refcnt), sp,
86211- atomic_read(&sp->sk_drops), len);
86212+ atomic_read_unchecked(&sp->sk_drops), len);
86213 }
86214
86215 static int ping_seq_show(struct seq_file *seq, void *v)
86216diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
86217index 6f08991..55867ad 100644
86218--- a/net/ipv4/raw.c
86219+++ b/net/ipv4/raw.c
86220@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
86221 int raw_rcv(struct sock *sk, struct sk_buff *skb)
86222 {
86223 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
86224- atomic_inc(&sk->sk_drops);
86225+ atomic_inc_unchecked(&sk->sk_drops);
86226 kfree_skb(skb);
86227 return NET_RX_DROP;
86228 }
86229@@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
86230
86231 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
86232 {
86233+ struct icmp_filter filter;
86234+
86235 if (optlen > sizeof(struct icmp_filter))
86236 optlen = sizeof(struct icmp_filter);
86237- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
86238+ if (copy_from_user(&filter, optval, optlen))
86239 return -EFAULT;
86240+ raw_sk(sk)->filter = filter;
86241 return 0;
86242 }
86243
86244 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
86245 {
86246 int len, ret = -EFAULT;
86247+ struct icmp_filter filter;
86248
86249 if (get_user(len, optlen))
86250 goto out;
86251@@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
86252 if (len > sizeof(struct icmp_filter))
86253 len = sizeof(struct icmp_filter);
86254 ret = -EFAULT;
86255- if (put_user(len, optlen) ||
86256- copy_to_user(optval, &raw_sk(sk)->filter, len))
86257+ filter = raw_sk(sk)->filter;
86258+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
86259 goto out;
86260 ret = 0;
86261 out: return ret;
86262@@ -998,7 +1002,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
86263 0, 0L, 0,
86264 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
86265 0, sock_i_ino(sp),
86266- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
86267+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
86268 }
86269
86270 static int raw_seq_show(struct seq_file *seq, void *v)
86271diff --git a/net/ipv4/route.c b/net/ipv4/route.c
86272index a0fcc47..32e2c89 100644
86273--- a/net/ipv4/route.c
86274+++ b/net/ipv4/route.c
86275@@ -2552,34 +2552,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
86276 .maxlen = sizeof(int),
86277 .mode = 0200,
86278 .proc_handler = ipv4_sysctl_rtcache_flush,
86279+ .extra1 = &init_net,
86280 },
86281 { },
86282 };
86283
86284 static __net_init int sysctl_route_net_init(struct net *net)
86285 {
86286- struct ctl_table *tbl;
86287+ ctl_table_no_const *tbl = NULL;
86288
86289- tbl = ipv4_route_flush_table;
86290 if (!net_eq(net, &init_net)) {
86291- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
86292+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
86293 if (tbl == NULL)
86294 goto err_dup;
86295
86296 /* Don't export sysctls to unprivileged users */
86297 if (net->user_ns != &init_user_ns)
86298 tbl[0].procname = NULL;
86299- }
86300- tbl[0].extra1 = net;
86301+ tbl[0].extra1 = net;
86302+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
86303+ } else
86304+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
86305
86306- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
86307 if (net->ipv4.route_hdr == NULL)
86308 goto err_reg;
86309 return 0;
86310
86311 err_reg:
86312- if (tbl != ipv4_route_flush_table)
86313- kfree(tbl);
86314+ kfree(tbl);
86315 err_dup:
86316 return -ENOMEM;
86317 }
86318@@ -2602,7 +2602,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
86319
86320 static __net_init int rt_genid_init(struct net *net)
86321 {
86322- atomic_set(&net->rt_genid, 0);
86323+ atomic_set_unchecked(&net->rt_genid, 0);
86324 get_random_bytes(&net->ipv4.dev_addr_genid,
86325 sizeof(net->ipv4.dev_addr_genid));
86326 return 0;
86327diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
86328index d84400b..62e066e 100644
86329--- a/net/ipv4/sysctl_net_ipv4.c
86330+++ b/net/ipv4/sysctl_net_ipv4.c
86331@@ -54,7 +54,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
86332 {
86333 int ret;
86334 int range[2];
86335- ctl_table tmp = {
86336+ ctl_table_no_const tmp = {
86337 .data = &range,
86338 .maxlen = sizeof(range),
86339 .mode = table->mode,
86340@@ -107,7 +107,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
86341 int ret;
86342 gid_t urange[2];
86343 kgid_t low, high;
86344- ctl_table tmp = {
86345+ ctl_table_no_const tmp = {
86346 .data = &urange,
86347 .maxlen = sizeof(urange),
86348 .mode = table->mode,
86349@@ -138,7 +138,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
86350 void __user *buffer, size_t *lenp, loff_t *ppos)
86351 {
86352 char val[TCP_CA_NAME_MAX];
86353- ctl_table tbl = {
86354+ ctl_table_no_const tbl = {
86355 .data = val,
86356 .maxlen = TCP_CA_NAME_MAX,
86357 };
86358@@ -157,7 +157,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
86359 void __user *buffer, size_t *lenp,
86360 loff_t *ppos)
86361 {
86362- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
86363+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
86364 int ret;
86365
86366 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
86367@@ -174,7 +174,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
86368 void __user *buffer, size_t *lenp,
86369 loff_t *ppos)
86370 {
86371- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
86372+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
86373 int ret;
86374
86375 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
86376@@ -200,15 +200,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
86377 struct mem_cgroup *memcg;
86378 #endif
86379
86380- ctl_table tmp = {
86381+ ctl_table_no_const tmp = {
86382 .data = &vec,
86383 .maxlen = sizeof(vec),
86384 .mode = ctl->mode,
86385 };
86386
86387 if (!write) {
86388- ctl->data = &net->ipv4.sysctl_tcp_mem;
86389- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
86390+ ctl_table_no_const tcp_mem = *ctl;
86391+
86392+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
86393+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
86394 }
86395
86396 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
86397@@ -235,7 +237,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
86398 int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
86399 size_t *lenp, loff_t *ppos)
86400 {
86401- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
86402+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
86403 struct tcp_fastopen_context *ctxt;
86404 int ret;
86405 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
86406@@ -476,7 +478,7 @@ static struct ctl_table ipv4_table[] = {
86407 },
86408 {
86409 .procname = "ip_local_reserved_ports",
86410- .data = NULL, /* initialized in sysctl_ipv4_init */
86411+ .data = sysctl_local_reserved_ports,
86412 .maxlen = 65536,
86413 .mode = 0644,
86414 .proc_handler = proc_do_large_bitmap,
86415@@ -860,11 +862,10 @@ static struct ctl_table ipv4_net_table[] = {
86416
86417 static __net_init int ipv4_sysctl_init_net(struct net *net)
86418 {
86419- struct ctl_table *table;
86420+ ctl_table_no_const *table = NULL;
86421
86422- table = ipv4_net_table;
86423 if (!net_eq(net, &init_net)) {
86424- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
86425+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
86426 if (table == NULL)
86427 goto err_alloc;
86428
86429@@ -897,15 +898,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
86430
86431 tcp_init_mem(net);
86432
86433- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
86434+ if (!net_eq(net, &init_net))
86435+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
86436+ else
86437+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
86438 if (net->ipv4.ipv4_hdr == NULL)
86439 goto err_reg;
86440
86441 return 0;
86442
86443 err_reg:
86444- if (!net_eq(net, &init_net))
86445- kfree(table);
86446+ kfree(table);
86447 err_alloc:
86448 return -ENOMEM;
86449 }
86450@@ -927,16 +930,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
86451 static __init int sysctl_ipv4_init(void)
86452 {
86453 struct ctl_table_header *hdr;
86454- struct ctl_table *i;
86455-
86456- for (i = ipv4_table; i->procname; i++) {
86457- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
86458- i->data = sysctl_local_reserved_ports;
86459- break;
86460- }
86461- }
86462- if (!i->procname)
86463- return -EINVAL;
86464
86465 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
86466 if (hdr == NULL)
86467diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
86468index 66702d3..31ff8f9 100644
86469--- a/net/ipv4/tcp_input.c
86470+++ b/net/ipv4/tcp_input.c
86471@@ -4733,7 +4733,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
86472 * simplifies code)
86473 */
86474 static void
86475-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
86476+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
86477 struct sk_buff *head, struct sk_buff *tail,
86478 u32 start, u32 end)
86479 {
86480@@ -5850,6 +5850,7 @@ discard:
86481 tcp_paws_reject(&tp->rx_opt, 0))
86482 goto discard_and_undo;
86483
86484+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
86485 if (th->syn) {
86486 /* We see SYN without ACK. It is attempt of
86487 * simultaneous connect with crossed SYNs.
86488@@ -5900,6 +5901,7 @@ discard:
86489 goto discard;
86490 #endif
86491 }
86492+#endif
86493 /* "fifth, if neither of the SYN or RST bits is set then
86494 * drop the segment and return."
86495 */
86496@@ -5944,7 +5946,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
86497 goto discard;
86498
86499 if (th->syn) {
86500- if (th->fin)
86501+ if (th->fin || th->urg || th->psh)
86502 goto discard;
86503 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
86504 return 1;
86505diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
86506index eadb693..e8f7251 100644
86507--- a/net/ipv4/tcp_ipv4.c
86508+++ b/net/ipv4/tcp_ipv4.c
86509@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
86510 EXPORT_SYMBOL(sysctl_tcp_low_latency);
86511
86512
86513+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86514+extern int grsec_enable_blackhole;
86515+#endif
86516+
86517 #ifdef CONFIG_TCP_MD5SIG
86518 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
86519 __be32 daddr, __be32 saddr, const struct tcphdr *th);
86520@@ -1895,6 +1899,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
86521 return 0;
86522
86523 reset:
86524+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86525+ if (!grsec_enable_blackhole)
86526+#endif
86527 tcp_v4_send_reset(rsk, skb);
86528 discard:
86529 kfree_skb(skb);
86530@@ -1994,12 +2001,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
86531 TCP_SKB_CB(skb)->sacked = 0;
86532
86533 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
86534- if (!sk)
86535+ if (!sk) {
86536+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86537+ ret = 1;
86538+#endif
86539 goto no_tcp_socket;
86540-
86541+ }
86542 process:
86543- if (sk->sk_state == TCP_TIME_WAIT)
86544+ if (sk->sk_state == TCP_TIME_WAIT) {
86545+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86546+ ret = 2;
86547+#endif
86548 goto do_time_wait;
86549+ }
86550
86551 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
86552 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
86553@@ -2050,6 +2064,10 @@ no_tcp_socket:
86554 bad_packet:
86555 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
86556 } else {
86557+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86558+ if (!grsec_enable_blackhole || (ret == 1 &&
86559+ (skb->dev->flags & IFF_LOOPBACK)))
86560+#endif
86561 tcp_v4_send_reset(NULL, skb);
86562 }
86563
86564diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
86565index f35f2df..ccb5ca6 100644
86566--- a/net/ipv4/tcp_minisocks.c
86567+++ b/net/ipv4/tcp_minisocks.c
86568@@ -27,6 +27,10 @@
86569 #include <net/inet_common.h>
86570 #include <net/xfrm.h>
86571
86572+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86573+extern int grsec_enable_blackhole;
86574+#endif
86575+
86576 int sysctl_tcp_syncookies __read_mostly = 1;
86577 EXPORT_SYMBOL(sysctl_tcp_syncookies);
86578
86579@@ -742,7 +746,10 @@ embryonic_reset:
86580 * avoid becoming vulnerable to outside attack aiming at
86581 * resetting legit local connections.
86582 */
86583- req->rsk_ops->send_reset(sk, skb);
86584+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86585+ if (!grsec_enable_blackhole)
86586+#endif
86587+ req->rsk_ops->send_reset(sk, skb);
86588 } else if (fastopen) { /* received a valid RST pkt */
86589 reqsk_fastopen_remove(sk, req, true);
86590 tcp_reset(sk);
86591diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
86592index 4526fe6..1a34e43 100644
86593--- a/net/ipv4/tcp_probe.c
86594+++ b/net/ipv4/tcp_probe.c
86595@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
86596 if (cnt + width >= len)
86597 break;
86598
86599- if (copy_to_user(buf + cnt, tbuf, width))
86600+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
86601 return -EFAULT;
86602 cnt += width;
86603 }
86604diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
86605index b78aac3..e18230b 100644
86606--- a/net/ipv4/tcp_timer.c
86607+++ b/net/ipv4/tcp_timer.c
86608@@ -22,6 +22,10 @@
86609 #include <linux/gfp.h>
86610 #include <net/tcp.h>
86611
86612+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86613+extern int grsec_lastack_retries;
86614+#endif
86615+
86616 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
86617 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
86618 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
86619@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
86620 }
86621 }
86622
86623+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86624+ if ((sk->sk_state == TCP_LAST_ACK) &&
86625+ (grsec_lastack_retries > 0) &&
86626+ (grsec_lastack_retries < retry_until))
86627+ retry_until = grsec_lastack_retries;
86628+#endif
86629+
86630 if (retransmits_timed_out(sk, retry_until,
86631 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
86632 /* Has it gone just too far? */
86633diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
86634index 1f4d405..3524677 100644
86635--- a/net/ipv4/udp.c
86636+++ b/net/ipv4/udp.c
86637@@ -87,6 +87,7 @@
86638 #include <linux/types.h>
86639 #include <linux/fcntl.h>
86640 #include <linux/module.h>
86641+#include <linux/security.h>
86642 #include <linux/socket.h>
86643 #include <linux/sockios.h>
86644 #include <linux/igmp.h>
86645@@ -111,6 +112,10 @@
86646 #include <trace/events/skb.h>
86647 #include "udp_impl.h"
86648
86649+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86650+extern int grsec_enable_blackhole;
86651+#endif
86652+
86653 struct udp_table udp_table __read_mostly;
86654 EXPORT_SYMBOL(udp_table);
86655
86656@@ -569,6 +574,9 @@ found:
86657 return s;
86658 }
86659
86660+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
86661+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
86662+
86663 /*
86664 * This routine is called by the ICMP module when it gets some
86665 * sort of error condition. If err < 0 then the socket should
86666@@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
86667 dport = usin->sin_port;
86668 if (dport == 0)
86669 return -EINVAL;
86670+
86671+ err = gr_search_udp_sendmsg(sk, usin);
86672+ if (err)
86673+ return err;
86674 } else {
86675 if (sk->sk_state != TCP_ESTABLISHED)
86676 return -EDESTADDRREQ;
86677+
86678+ err = gr_search_udp_sendmsg(sk, NULL);
86679+ if (err)
86680+ return err;
86681+
86682 daddr = inet->inet_daddr;
86683 dport = inet->inet_dport;
86684 /* Open fast path for connected socket.
86685@@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
86686 udp_lib_checksum_complete(skb)) {
86687 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
86688 IS_UDPLITE(sk));
86689- atomic_inc(&sk->sk_drops);
86690+ atomic_inc_unchecked(&sk->sk_drops);
86691 __skb_unlink(skb, rcvq);
86692 __skb_queue_tail(&list_kill, skb);
86693 }
86694@@ -1194,6 +1211,10 @@ try_again:
86695 if (!skb)
86696 goto out;
86697
86698+ err = gr_search_udp_recvmsg(sk, skb);
86699+ if (err)
86700+ goto out_free;
86701+
86702 ulen = skb->len - sizeof(struct udphdr);
86703 copied = len;
86704 if (copied > ulen)
86705@@ -1227,7 +1248,7 @@ try_again:
86706 if (unlikely(err)) {
86707 trace_kfree_skb(skb, udp_recvmsg);
86708 if (!peeked) {
86709- atomic_inc(&sk->sk_drops);
86710+ atomic_inc_unchecked(&sk->sk_drops);
86711 UDP_INC_STATS_USER(sock_net(sk),
86712 UDP_MIB_INERRORS, is_udplite);
86713 }
86714@@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86715
86716 drop:
86717 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
86718- atomic_inc(&sk->sk_drops);
86719+ atomic_inc_unchecked(&sk->sk_drops);
86720 kfree_skb(skb);
86721 return -1;
86722 }
86723@@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
86724 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
86725
86726 if (!skb1) {
86727- atomic_inc(&sk->sk_drops);
86728+ atomic_inc_unchecked(&sk->sk_drops);
86729 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
86730 IS_UDPLITE(sk));
86731 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
86732@@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
86733 goto csum_error;
86734
86735 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
86736+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86737+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
86738+#endif
86739 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
86740
86741 /*
86742@@ -2120,7 +2144,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
86743 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
86744 0, sock_i_ino(sp),
86745 atomic_read(&sp->sk_refcnt), sp,
86746- atomic_read(&sp->sk_drops), len);
86747+ atomic_read_unchecked(&sp->sk_drops), len);
86748 }
86749
86750 int udp4_seq_show(struct seq_file *seq, void *v)
86751diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
86752index 1b5d8cb..ffb0833 100644
86753--- a/net/ipv6/addrconf.c
86754+++ b/net/ipv6/addrconf.c
86755@@ -2272,7 +2272,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
86756 p.iph.ihl = 5;
86757 p.iph.protocol = IPPROTO_IPV6;
86758 p.iph.ttl = 64;
86759- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
86760+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
86761
86762 if (ops->ndo_do_ioctl) {
86763 mm_segment_t oldfs = get_fs();
86764@@ -4388,7 +4388,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
86765 int *valp = ctl->data;
86766 int val = *valp;
86767 loff_t pos = *ppos;
86768- ctl_table lctl;
86769+ ctl_table_no_const lctl;
86770 int ret;
86771
86772 /*
86773@@ -4470,7 +4470,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
86774 int *valp = ctl->data;
86775 int val = *valp;
86776 loff_t pos = *ppos;
86777- ctl_table lctl;
86778+ ctl_table_no_const lctl;
86779 int ret;
86780
86781 /*
86782diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
86783index fff5bdd..15194fb 100644
86784--- a/net/ipv6/icmp.c
86785+++ b/net/ipv6/icmp.c
86786@@ -973,7 +973,7 @@ ctl_table ipv6_icmp_table_template[] = {
86787
86788 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
86789 {
86790- struct ctl_table *table;
86791+ ctl_table_no_const *table;
86792
86793 table = kmemdup(ipv6_icmp_table_template,
86794 sizeof(ipv6_icmp_table_template),
86795diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
86796index 131dd09..f7ed64f 100644
86797--- a/net/ipv6/ip6_gre.c
86798+++ b/net/ipv6/ip6_gre.c
86799@@ -73,7 +73,7 @@ struct ip6gre_net {
86800 struct net_device *fb_tunnel_dev;
86801 };
86802
86803-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
86804+static struct rtnl_link_ops ip6gre_link_ops;
86805 static int ip6gre_tunnel_init(struct net_device *dev);
86806 static void ip6gre_tunnel_setup(struct net_device *dev);
86807 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
86808@@ -1337,7 +1337,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
86809 }
86810
86811
86812-static struct inet6_protocol ip6gre_protocol __read_mostly = {
86813+static struct inet6_protocol ip6gre_protocol = {
86814 .handler = ip6gre_rcv,
86815 .err_handler = ip6gre_err,
86816 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
86817@@ -1671,7 +1671,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
86818 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
86819 };
86820
86821-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
86822+static struct rtnl_link_ops ip6gre_link_ops = {
86823 .kind = "ip6gre",
86824 .maxtype = IFLA_GRE_MAX,
86825 .policy = ip6gre_policy,
86826@@ -1684,7 +1684,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
86827 .fill_info = ip6gre_fill_info,
86828 };
86829
86830-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
86831+static struct rtnl_link_ops ip6gre_tap_ops = {
86832 .kind = "ip6gretap",
86833 .maxtype = IFLA_GRE_MAX,
86834 .policy = ip6gre_policy,
86835diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
86836index a14f28b..b4b8956 100644
86837--- a/net/ipv6/ip6_tunnel.c
86838+++ b/net/ipv6/ip6_tunnel.c
86839@@ -87,7 +87,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
86840
86841 static int ip6_tnl_dev_init(struct net_device *dev);
86842 static void ip6_tnl_dev_setup(struct net_device *dev);
86843-static struct rtnl_link_ops ip6_link_ops __read_mostly;
86844+static struct rtnl_link_ops ip6_link_ops;
86845
86846 static int ip6_tnl_net_id __read_mostly;
86847 struct ip6_tnl_net {
86848@@ -1686,7 +1686,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
86849 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
86850 };
86851
86852-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
86853+static struct rtnl_link_ops ip6_link_ops = {
86854 .kind = "ip6tnl",
86855 .maxtype = IFLA_IPTUN_MAX,
86856 .policy = ip6_tnl_policy,
86857diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
86858index d1e2e8e..51c19ae 100644
86859--- a/net/ipv6/ipv6_sockglue.c
86860+++ b/net/ipv6/ipv6_sockglue.c
86861@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
86862 if (sk->sk_type != SOCK_STREAM)
86863 return -ENOPROTOOPT;
86864
86865- msg.msg_control = optval;
86866+ msg.msg_control = (void __force_kernel *)optval;
86867 msg.msg_controllen = len;
86868 msg.msg_flags = flags;
86869
86870diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
86871index 125a90d..2a11f36 100644
86872--- a/net/ipv6/netfilter/ip6_tables.c
86873+++ b/net/ipv6/netfilter/ip6_tables.c
86874@@ -1076,14 +1076,14 @@ static int compat_table_info(const struct xt_table_info *info,
86875 #endif
86876
86877 static int get_info(struct net *net, void __user *user,
86878- const int *len, int compat)
86879+ int len, int compat)
86880 {
86881 char name[XT_TABLE_MAXNAMELEN];
86882 struct xt_table *t;
86883 int ret;
86884
86885- if (*len != sizeof(struct ip6t_getinfo)) {
86886- duprintf("length %u != %zu\n", *len,
86887+ if (len != sizeof(struct ip6t_getinfo)) {
86888+ duprintf("length %u != %zu\n", len,
86889 sizeof(struct ip6t_getinfo));
86890 return -EINVAL;
86891 }
86892@@ -1120,7 +1120,7 @@ static int get_info(struct net *net, void __user *user,
86893 info.size = private->size;
86894 strcpy(info.name, name);
86895
86896- if (copy_to_user(user, &info, *len) != 0)
86897+ if (copy_to_user(user, &info, len) != 0)
86898 ret = -EFAULT;
86899 else
86900 ret = 0;
86901@@ -1974,7 +1974,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86902
86903 switch (cmd) {
86904 case IP6T_SO_GET_INFO:
86905- ret = get_info(sock_net(sk), user, len, 1);
86906+ ret = get_info(sock_net(sk), user, *len, 1);
86907 break;
86908 case IP6T_SO_GET_ENTRIES:
86909 ret = compat_get_entries(sock_net(sk), user, len);
86910@@ -2021,7 +2021,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86911
86912 switch (cmd) {
86913 case IP6T_SO_GET_INFO:
86914- ret = get_info(sock_net(sk), user, len, 0);
86915+ ret = get_info(sock_net(sk), user, *len, 0);
86916 break;
86917
86918 case IP6T_SO_GET_ENTRIES:
86919diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
86920index 3dacecc..2939087 100644
86921--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
86922+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
86923@@ -87,12 +87,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
86924
86925 static int nf_ct_frag6_sysctl_register(struct net *net)
86926 {
86927- struct ctl_table *table;
86928+ ctl_table_no_const *table = NULL;
86929 struct ctl_table_header *hdr;
86930
86931- table = nf_ct_frag6_sysctl_table;
86932 if (!net_eq(net, &init_net)) {
86933- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
86934+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
86935 GFP_KERNEL);
86936 if (table == NULL)
86937 goto err_alloc;
86938@@ -100,9 +99,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
86939 table[0].data = &net->ipv6.frags.high_thresh;
86940 table[1].data = &net->ipv6.frags.low_thresh;
86941 table[2].data = &net->ipv6.frags.timeout;
86942- }
86943-
86944- hdr = register_net_sysctl(net, "net/netfilter", table);
86945+ hdr = register_net_sysctl(net, "net/netfilter", table);
86946+ } else
86947+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
86948 if (hdr == NULL)
86949 goto err_reg;
86950
86951@@ -110,8 +109,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
86952 return 0;
86953
86954 err_reg:
86955- if (!net_eq(net, &init_net))
86956- kfree(table);
86957+ kfree(table);
86958 err_alloc:
86959 return -ENOMEM;
86960 }
86961diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
86962index 70fa814..d70c28c 100644
86963--- a/net/ipv6/raw.c
86964+++ b/net/ipv6/raw.c
86965@@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
86966 {
86967 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
86968 skb_checksum_complete(skb)) {
86969- atomic_inc(&sk->sk_drops);
86970+ atomic_inc_unchecked(&sk->sk_drops);
86971 kfree_skb(skb);
86972 return NET_RX_DROP;
86973 }
86974@@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
86975 struct raw6_sock *rp = raw6_sk(sk);
86976
86977 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
86978- atomic_inc(&sk->sk_drops);
86979+ atomic_inc_unchecked(&sk->sk_drops);
86980 kfree_skb(skb);
86981 return NET_RX_DROP;
86982 }
86983@@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
86984
86985 if (inet->hdrincl) {
86986 if (skb_checksum_complete(skb)) {
86987- atomic_inc(&sk->sk_drops);
86988+ atomic_inc_unchecked(&sk->sk_drops);
86989 kfree_skb(skb);
86990 return NET_RX_DROP;
86991 }
86992@@ -604,7 +604,7 @@ out:
86993 return err;
86994 }
86995
86996-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
86997+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
86998 struct flowi6 *fl6, struct dst_entry **dstp,
86999 unsigned int flags)
87000 {
87001@@ -916,12 +916,15 @@ do_confirm:
87002 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
87003 char __user *optval, int optlen)
87004 {
87005+ struct icmp6_filter filter;
87006+
87007 switch (optname) {
87008 case ICMPV6_FILTER:
87009 if (optlen > sizeof(struct icmp6_filter))
87010 optlen = sizeof(struct icmp6_filter);
87011- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
87012+ if (copy_from_user(&filter, optval, optlen))
87013 return -EFAULT;
87014+ raw6_sk(sk)->filter = filter;
87015 return 0;
87016 default:
87017 return -ENOPROTOOPT;
87018@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
87019 char __user *optval, int __user *optlen)
87020 {
87021 int len;
87022+ struct icmp6_filter filter;
87023
87024 switch (optname) {
87025 case ICMPV6_FILTER:
87026@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
87027 len = sizeof(struct icmp6_filter);
87028 if (put_user(len, optlen))
87029 return -EFAULT;
87030- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
87031+ filter = raw6_sk(sk)->filter;
87032+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
87033 return -EFAULT;
87034 return 0;
87035 default:
87036@@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
87037 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
87038 0,
87039 sock_i_ino(sp),
87040- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
87041+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
87042 }
87043
87044 static int raw6_seq_show(struct seq_file *seq, void *v)
87045diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
87046index e5253ec..0410257 100644
87047--- a/net/ipv6/reassembly.c
87048+++ b/net/ipv6/reassembly.c
87049@@ -604,12 +604,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
87050
87051 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87052 {
87053- struct ctl_table *table;
87054+ ctl_table_no_const *table = NULL;
87055 struct ctl_table_header *hdr;
87056
87057- table = ip6_frags_ns_ctl_table;
87058 if (!net_eq(net, &init_net)) {
87059- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
87060+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
87061 if (table == NULL)
87062 goto err_alloc;
87063
87064@@ -620,9 +619,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87065 /* Don't export sysctls to unprivileged users */
87066 if (net->user_ns != &init_user_ns)
87067 table[0].procname = NULL;
87068- }
87069+ hdr = register_net_sysctl(net, "net/ipv6", table);
87070+ } else
87071+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
87072
87073- hdr = register_net_sysctl(net, "net/ipv6", table);
87074 if (hdr == NULL)
87075 goto err_reg;
87076
87077@@ -630,8 +630,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87078 return 0;
87079
87080 err_reg:
87081- if (!net_eq(net, &init_net))
87082- kfree(table);
87083+ kfree(table);
87084 err_alloc:
87085 return -ENOMEM;
87086 }
87087diff --git a/net/ipv6/route.c b/net/ipv6/route.c
87088index 5845613..3af8fc7 100644
87089--- a/net/ipv6/route.c
87090+++ b/net/ipv6/route.c
87091@@ -2966,7 +2966,7 @@ ctl_table ipv6_route_table_template[] = {
87092
87093 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
87094 {
87095- struct ctl_table *table;
87096+ ctl_table_no_const *table;
87097
87098 table = kmemdup(ipv6_route_table_template,
87099 sizeof(ipv6_route_table_template),
87100diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
87101index cfba99b..20ca511 100644
87102--- a/net/ipv6/sit.c
87103+++ b/net/ipv6/sit.c
87104@@ -72,7 +72,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
87105 static int ipip6_tunnel_init(struct net_device *dev);
87106 static void ipip6_tunnel_setup(struct net_device *dev);
87107 static void ipip6_dev_free(struct net_device *dev);
87108-static struct rtnl_link_ops sit_link_ops __read_mostly;
87109+static struct rtnl_link_ops sit_link_ops;
87110
87111 static int sit_net_id __read_mostly;
87112 struct sit_net {
87113@@ -1463,7 +1463,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
87114 #endif
87115 };
87116
87117-static struct rtnl_link_ops sit_link_ops __read_mostly = {
87118+static struct rtnl_link_ops sit_link_ops = {
87119 .kind = "sit",
87120 .maxtype = IFLA_IPTUN_MAX,
87121 .policy = ipip6_policy,
87122diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
87123index e85c48b..b8268d3 100644
87124--- a/net/ipv6/sysctl_net_ipv6.c
87125+++ b/net/ipv6/sysctl_net_ipv6.c
87126@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
87127
87128 static int __net_init ipv6_sysctl_net_init(struct net *net)
87129 {
87130- struct ctl_table *ipv6_table;
87131+ ctl_table_no_const *ipv6_table;
87132 struct ctl_table *ipv6_route_table;
87133 struct ctl_table *ipv6_icmp_table;
87134 int err;
87135diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
87136index 4f435371..5de9da7 100644
87137--- a/net/ipv6/tcp_ipv6.c
87138+++ b/net/ipv6/tcp_ipv6.c
87139@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
87140 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
87141 }
87142
87143+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87144+extern int grsec_enable_blackhole;
87145+#endif
87146+
87147 static void tcp_v6_hash(struct sock *sk)
87148 {
87149 if (sk->sk_state != TCP_CLOSE) {
87150@@ -1433,6 +1437,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
87151 return 0;
87152
87153 reset:
87154+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87155+ if (!grsec_enable_blackhole)
87156+#endif
87157 tcp_v6_send_reset(sk, skb);
87158 discard:
87159 if (opt_skb)
87160@@ -1514,12 +1521,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
87161 TCP_SKB_CB(skb)->sacked = 0;
87162
87163 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
87164- if (!sk)
87165+ if (!sk) {
87166+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87167+ ret = 1;
87168+#endif
87169 goto no_tcp_socket;
87170+ }
87171
87172 process:
87173- if (sk->sk_state == TCP_TIME_WAIT)
87174+ if (sk->sk_state == TCP_TIME_WAIT) {
87175+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87176+ ret = 2;
87177+#endif
87178 goto do_time_wait;
87179+ }
87180
87181 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
87182 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
87183@@ -1568,6 +1583,10 @@ no_tcp_socket:
87184 bad_packet:
87185 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
87186 } else {
87187+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87188+ if (!grsec_enable_blackhole || (ret == 1 &&
87189+ (skb->dev->flags & IFF_LOOPBACK)))
87190+#endif
87191 tcp_v6_send_reset(NULL, skb);
87192 }
87193
87194diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
87195index fb08329..2d6919e 100644
87196--- a/net/ipv6/udp.c
87197+++ b/net/ipv6/udp.c
87198@@ -51,6 +51,10 @@
87199 #include <trace/events/skb.h>
87200 #include "udp_impl.h"
87201
87202+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87203+extern int grsec_enable_blackhole;
87204+#endif
87205+
87206 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
87207 {
87208 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
87209@@ -395,7 +399,7 @@ try_again:
87210 if (unlikely(err)) {
87211 trace_kfree_skb(skb, udpv6_recvmsg);
87212 if (!peeked) {
87213- atomic_inc(&sk->sk_drops);
87214+ atomic_inc_unchecked(&sk->sk_drops);
87215 if (is_udp4)
87216 UDP_INC_STATS_USER(sock_net(sk),
87217 UDP_MIB_INERRORS,
87218@@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87219 return rc;
87220 drop:
87221 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
87222- atomic_inc(&sk->sk_drops);
87223+ atomic_inc_unchecked(&sk->sk_drops);
87224 kfree_skb(skb);
87225 return -1;
87226 }
87227@@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
87228 if (likely(skb1 == NULL))
87229 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
87230 if (!skb1) {
87231- atomic_inc(&sk->sk_drops);
87232+ atomic_inc_unchecked(&sk->sk_drops);
87233 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
87234 IS_UDPLITE(sk));
87235 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87236@@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
87237 goto discard;
87238
87239 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
87240+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87241+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
87242+#endif
87243 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
87244
87245 kfree_skb(skb);
87246@@ -1379,7 +1386,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
87247 0,
87248 sock_i_ino(sp),
87249 atomic_read(&sp->sk_refcnt), sp,
87250- atomic_read(&sp->sk_drops));
87251+ atomic_read_unchecked(&sp->sk_drops));
87252 }
87253
87254 int udp6_seq_show(struct seq_file *seq, void *v)
87255diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
87256index b833677..4d04105 100644
87257--- a/net/irda/af_irda.c
87258+++ b/net/irda/af_irda.c
87259@@ -2584,8 +2584,10 @@ bed:
87260 NULL, NULL, NULL);
87261
87262 /* Check if the we got some results */
87263- if (!self->cachedaddr)
87264- return -EAGAIN; /* Didn't find any devices */
87265+ if (!self->cachedaddr) {
87266+ err = -EAGAIN; /* Didn't find any devices */
87267+ goto out;
87268+ }
87269 daddr = self->cachedaddr;
87270 /* Cleanup */
87271 self->cachedaddr = 0;
87272diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
87273index a68c88c..d55b0c5 100644
87274--- a/net/irda/ircomm/ircomm_tty.c
87275+++ b/net/irda/ircomm/ircomm_tty.c
87276@@ -312,12 +312,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87277 add_wait_queue(&port->open_wait, &wait);
87278
87279 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
87280- __FILE__, __LINE__, tty->driver->name, port->count);
87281+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87282
87283 spin_lock_irqsave(&port->lock, flags);
87284 if (!tty_hung_up_p(filp)) {
87285 extra_count = 1;
87286- port->count--;
87287+ atomic_dec(&port->count);
87288 }
87289 spin_unlock_irqrestore(&port->lock, flags);
87290 port->blocked_open++;
87291@@ -353,7 +353,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87292 }
87293
87294 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
87295- __FILE__, __LINE__, tty->driver->name, port->count);
87296+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87297
87298 schedule();
87299 }
87300@@ -364,13 +364,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87301 if (extra_count) {
87302 /* ++ is not atomic, so this should be protected - Jean II */
87303 spin_lock_irqsave(&port->lock, flags);
87304- port->count++;
87305+ atomic_inc(&port->count);
87306 spin_unlock_irqrestore(&port->lock, flags);
87307 }
87308 port->blocked_open--;
87309
87310 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
87311- __FILE__, __LINE__, tty->driver->name, port->count);
87312+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87313
87314 if (!retval)
87315 port->flags |= ASYNC_NORMAL_ACTIVE;
87316@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
87317
87318 /* ++ is not atomic, so this should be protected - Jean II */
87319 spin_lock_irqsave(&self->port.lock, flags);
87320- self->port.count++;
87321+ atomic_inc(&self->port.count);
87322 spin_unlock_irqrestore(&self->port.lock, flags);
87323 tty_port_tty_set(&self->port, tty);
87324
87325 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
87326- self->line, self->port.count);
87327+ self->line, atomic_read(&self->port.count));
87328
87329 /* Not really used by us, but lets do it anyway */
87330 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
87331@@ -986,7 +986,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
87332 tty_kref_put(port->tty);
87333 }
87334 port->tty = NULL;
87335- port->count = 0;
87336+ atomic_set(&port->count, 0);
87337 spin_unlock_irqrestore(&port->lock, flags);
87338
87339 wake_up_interruptible(&port->open_wait);
87340@@ -1343,7 +1343,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
87341 seq_putc(m, '\n');
87342
87343 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
87344- seq_printf(m, "Open count: %d\n", self->port.count);
87345+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
87346 seq_printf(m, "Max data size: %d\n", self->max_data_size);
87347 seq_printf(m, "Max header size: %d\n", self->max_header_size);
87348
87349diff --git a/net/irda/iriap.c b/net/irda/iriap.c
87350index e71e85b..29340a9 100644
87351--- a/net/irda/iriap.c
87352+++ b/net/irda/iriap.c
87353@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
87354 /* case CS_ISO_8859_9: */
87355 /* case CS_UNICODE: */
87356 default:
87357- IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
87358- __func__, ias_charset_types[charset]);
87359+ IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
87360+ __func__, charset,
87361+ charset < ARRAY_SIZE(ias_charset_types) ?
87362+ ias_charset_types[charset] :
87363+ "(unknown)");
87364
87365 /* Aborting, close connection! */
87366 iriap_disconnect_request(self);
87367diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
87368index cd6f7a9..e63fe89 100644
87369--- a/net/iucv/af_iucv.c
87370+++ b/net/iucv/af_iucv.c
87371@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
87372
87373 write_lock_bh(&iucv_sk_list.lock);
87374
87375- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
87376+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
87377 while (__iucv_get_sock_by_name(name)) {
87378 sprintf(name, "%08x",
87379- atomic_inc_return(&iucv_sk_list.autobind_name));
87380+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
87381 }
87382
87383 write_unlock_bh(&iucv_sk_list.lock);
87384diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
87385index df08250..02021fe 100644
87386--- a/net/iucv/iucv.c
87387+++ b/net/iucv/iucv.c
87388@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
87389 return NOTIFY_OK;
87390 }
87391
87392-static struct notifier_block __refdata iucv_cpu_notifier = {
87393+static struct notifier_block iucv_cpu_notifier = {
87394 .notifier_call = iucv_cpu_notify,
87395 };
87396
87397diff --git a/net/key/af_key.c b/net/key/af_key.c
87398index 5b426a6..970032b 100644
87399--- a/net/key/af_key.c
87400+++ b/net/key/af_key.c
87401@@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
87402 static u32 get_acqseq(void)
87403 {
87404 u32 res;
87405- static atomic_t acqseq;
87406+ static atomic_unchecked_t acqseq;
87407
87408 do {
87409- res = atomic_inc_return(&acqseq);
87410+ res = atomic_inc_return_unchecked(&acqseq);
87411 } while (!res);
87412 return res;
87413 }
87414diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
87415index 0479c64..d031db6 100644
87416--- a/net/mac80211/cfg.c
87417+++ b/net/mac80211/cfg.c
87418@@ -790,7 +790,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
87419 ret = ieee80211_vif_use_channel(sdata, chandef,
87420 IEEE80211_CHANCTX_EXCLUSIVE);
87421 }
87422- } else if (local->open_count == local->monitors) {
87423+ } else if (local_read(&local->open_count) == local->monitors) {
87424 local->_oper_channel = chandef->chan;
87425 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
87426 ieee80211_hw_config(local, 0);
87427@@ -2716,7 +2716,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
87428 else
87429 local->probe_req_reg--;
87430
87431- if (!local->open_count)
87432+ if (!local_read(&local->open_count))
87433 break;
87434
87435 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
87436diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
87437index 2ed065c..948177f 100644
87438--- a/net/mac80211/ieee80211_i.h
87439+++ b/net/mac80211/ieee80211_i.h
87440@@ -28,6 +28,7 @@
87441 #include <net/ieee80211_radiotap.h>
87442 #include <net/cfg80211.h>
87443 #include <net/mac80211.h>
87444+#include <asm/local.h>
87445 #include "key.h"
87446 #include "sta_info.h"
87447 #include "debug.h"
87448@@ -909,7 +910,7 @@ struct ieee80211_local {
87449 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
87450 spinlock_t queue_stop_reason_lock;
87451
87452- int open_count;
87453+ local_t open_count;
87454 int monitors, cooked_mntrs;
87455 /* number of interfaces with corresponding FIF_ flags */
87456 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
87457diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
87458index 8be854e..ad72a69 100644
87459--- a/net/mac80211/iface.c
87460+++ b/net/mac80211/iface.c
87461@@ -546,7 +546,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
87462 break;
87463 }
87464
87465- if (local->open_count == 0) {
87466+ if (local_read(&local->open_count) == 0) {
87467 res = drv_start(local);
87468 if (res)
87469 goto err_del_bss;
87470@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
87471 break;
87472 }
87473
87474- if (local->monitors == 0 && local->open_count == 0) {
87475+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
87476 res = ieee80211_add_virtual_monitor(local);
87477 if (res)
87478 goto err_stop;
87479@@ -699,7 +699,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
87480 mutex_unlock(&local->mtx);
87481
87482 if (coming_up)
87483- local->open_count++;
87484+ local_inc(&local->open_count);
87485
87486 if (hw_reconf_flags)
87487 ieee80211_hw_config(local, hw_reconf_flags);
87488@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
87489 err_del_interface:
87490 drv_remove_interface(local, sdata);
87491 err_stop:
87492- if (!local->open_count)
87493+ if (!local_read(&local->open_count))
87494 drv_stop(local);
87495 err_del_bss:
87496 sdata->bss = NULL;
87497@@ -827,7 +827,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
87498 }
87499
87500 if (going_down)
87501- local->open_count--;
87502+ local_dec(&local->open_count);
87503
87504 switch (sdata->vif.type) {
87505 case NL80211_IFTYPE_AP_VLAN:
87506@@ -884,7 +884,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
87507
87508 ieee80211_recalc_ps(local, -1);
87509
87510- if (local->open_count == 0) {
87511+ if (local_read(&local->open_count) == 0) {
87512 if (local->ops->napi_poll)
87513 napi_disable(&local->napi);
87514 ieee80211_clear_tx_pending(local);
87515@@ -910,7 +910,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
87516 }
87517 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
87518
87519- if (local->monitors == local->open_count && local->monitors > 0)
87520+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
87521 ieee80211_add_virtual_monitor(local);
87522 }
87523
87524diff --git a/net/mac80211/main.c b/net/mac80211/main.c
87525index 1b087ff..bf600e9 100644
87526--- a/net/mac80211/main.c
87527+++ b/net/mac80211/main.c
87528@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
87529 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
87530 IEEE80211_CONF_CHANGE_POWER);
87531
87532- if (changed && local->open_count) {
87533+ if (changed && local_read(&local->open_count)) {
87534 ret = drv_config(local, changed);
87535 /*
87536 * Goal:
87537diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
87538index 79a48f3..5e185c9 100644
87539--- a/net/mac80211/pm.c
87540+++ b/net/mac80211/pm.c
87541@@ -35,7 +35,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
87542 struct sta_info *sta;
87543 struct ieee80211_chanctx *ctx;
87544
87545- if (!local->open_count)
87546+ if (!local_read(&local->open_count))
87547 goto suspend;
87548
87549 ieee80211_scan_cancel(local);
87550@@ -73,7 +73,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
87551 cancel_work_sync(&local->dynamic_ps_enable_work);
87552 del_timer_sync(&local->dynamic_ps_timer);
87553
87554- local->wowlan = wowlan && local->open_count;
87555+ local->wowlan = wowlan && local_read(&local->open_count);
87556 if (local->wowlan) {
87557 int err = drv_suspend(local, wowlan);
87558 if (err < 0) {
87559@@ -187,7 +187,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
87560 mutex_unlock(&local->chanctx_mtx);
87561
87562 /* stop hardware - this must stop RX */
87563- if (local->open_count)
87564+ if (local_read(&local->open_count))
87565 ieee80211_stop_device(local);
87566
87567 suspend:
87568diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
87569index dd88381..eef4dd6 100644
87570--- a/net/mac80211/rate.c
87571+++ b/net/mac80211/rate.c
87572@@ -493,7 +493,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
87573
87574 ASSERT_RTNL();
87575
87576- if (local->open_count)
87577+ if (local_read(&local->open_count))
87578 return -EBUSY;
87579
87580 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
87581diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
87582index c97a065..ff61928 100644
87583--- a/net/mac80211/rc80211_pid_debugfs.c
87584+++ b/net/mac80211/rc80211_pid_debugfs.c
87585@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
87586
87587 spin_unlock_irqrestore(&events->lock, status);
87588
87589- if (copy_to_user(buf, pb, p))
87590+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
87591 return -EFAULT;
87592
87593 return p;
87594diff --git a/net/mac80211/util.c b/net/mac80211/util.c
87595index f11e8c5..08d0013 100644
87596--- a/net/mac80211/util.c
87597+++ b/net/mac80211/util.c
87598@@ -1380,7 +1380,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
87599 }
87600 #endif
87601 /* everything else happens only if HW was up & running */
87602- if (!local->open_count)
87603+ if (!local_read(&local->open_count))
87604 goto wake_up;
87605
87606 /*
87607diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
87608index 49e96df..63a51c3 100644
87609--- a/net/netfilter/Kconfig
87610+++ b/net/netfilter/Kconfig
87611@@ -936,6 +936,16 @@ config NETFILTER_XT_MATCH_ESP
87612
87613 To compile it as a module, choose M here. If unsure, say N.
87614
87615+config NETFILTER_XT_MATCH_GRADM
87616+ tristate '"gradm" match support'
87617+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
87618+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
87619+ ---help---
87620+ The gradm match allows to match on grsecurity RBAC being enabled.
87621+ It is useful when iptables rules are applied early on bootup to
87622+ prevent connections to the machine (except from a trusted host)
87623+ while the RBAC system is disabled.
87624+
87625 config NETFILTER_XT_MATCH_HASHLIMIT
87626 tristate '"hashlimit" match support'
87627 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
87628diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
87629index 3259697..54d5393 100644
87630--- a/net/netfilter/Makefile
87631+++ b/net/netfilter/Makefile
87632@@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
87633 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
87634 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
87635 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
87636+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
87637 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
87638 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
87639 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
87640diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
87641index 6d6d8f2..a676749 100644
87642--- a/net/netfilter/ipset/ip_set_core.c
87643+++ b/net/netfilter/ipset/ip_set_core.c
87644@@ -1800,7 +1800,7 @@ done:
87645 return ret;
87646 }
87647
87648-static struct nf_sockopt_ops so_set __read_mostly = {
87649+static struct nf_sockopt_ops so_set = {
87650 .pf = PF_INET,
87651 .get_optmin = SO_IP_SET,
87652 .get_optmax = SO_IP_SET + 1,
87653diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
87654index 30e764a..c3b6a9d 100644
87655--- a/net/netfilter/ipvs/ip_vs_conn.c
87656+++ b/net/netfilter/ipvs/ip_vs_conn.c
87657@@ -554,7 +554,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
87658 /* Increase the refcnt counter of the dest */
87659 atomic_inc(&dest->refcnt);
87660
87661- conn_flags = atomic_read(&dest->conn_flags);
87662+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
87663 if (cp->protocol != IPPROTO_UDP)
87664 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
87665 flags = cp->flags;
87666@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
87667 atomic_set(&cp->refcnt, 1);
87668
87669 atomic_set(&cp->n_control, 0);
87670- atomic_set(&cp->in_pkts, 0);
87671+ atomic_set_unchecked(&cp->in_pkts, 0);
87672
87673 atomic_inc(&ipvs->conn_count);
87674 if (flags & IP_VS_CONN_F_NO_CPORT)
87675@@ -1180,7 +1180,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
87676
87677 /* Don't drop the entry if its number of incoming packets is not
87678 located in [0, 8] */
87679- i = atomic_read(&cp->in_pkts);
87680+ i = atomic_read_unchecked(&cp->in_pkts);
87681 if (i > 8 || i < 0) return 0;
87682
87683 if (!todrop_rate[i]) return 0;
87684diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
87685index 47edf5a..235b07d 100644
87686--- a/net/netfilter/ipvs/ip_vs_core.c
87687+++ b/net/netfilter/ipvs/ip_vs_core.c
87688@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
87689 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
87690 /* do not touch skb anymore */
87691
87692- atomic_inc(&cp->in_pkts);
87693+ atomic_inc_unchecked(&cp->in_pkts);
87694 ip_vs_conn_put(cp);
87695 return ret;
87696 }
87697@@ -1691,7 +1691,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
87698 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
87699 pkts = sysctl_sync_threshold(ipvs);
87700 else
87701- pkts = atomic_add_return(1, &cp->in_pkts);
87702+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
87703
87704 if (ipvs->sync_state & IP_VS_STATE_MASTER)
87705 ip_vs_sync_conn(net, cp, pkts);
87706diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
87707index ec664cb..7f34a77 100644
87708--- a/net/netfilter/ipvs/ip_vs_ctl.c
87709+++ b/net/netfilter/ipvs/ip_vs_ctl.c
87710@@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
87711 ip_vs_rs_hash(ipvs, dest);
87712 write_unlock_bh(&ipvs->rs_lock);
87713 }
87714- atomic_set(&dest->conn_flags, conn_flags);
87715+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
87716
87717 /* bind the service */
87718 if (!dest->svc) {
87719@@ -1688,7 +1688,7 @@ proc_do_sync_ports(ctl_table *table, int write,
87720 * align with netns init in ip_vs_control_net_init()
87721 */
87722
87723-static struct ctl_table vs_vars[] = {
87724+static ctl_table_no_const vs_vars[] __read_only = {
87725 {
87726 .procname = "amemthresh",
87727 .maxlen = sizeof(int),
87728@@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
87729 " %-7s %-6d %-10d %-10d\n",
87730 &dest->addr.in6,
87731 ntohs(dest->port),
87732- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
87733+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
87734 atomic_read(&dest->weight),
87735 atomic_read(&dest->activeconns),
87736 atomic_read(&dest->inactconns));
87737@@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
87738 "%-7s %-6d %-10d %-10d\n",
87739 ntohl(dest->addr.ip),
87740 ntohs(dest->port),
87741- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
87742+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
87743 atomic_read(&dest->weight),
87744 atomic_read(&dest->activeconns),
87745 atomic_read(&dest->inactconns));
87746@@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
87747
87748 entry.addr = dest->addr.ip;
87749 entry.port = dest->port;
87750- entry.conn_flags = atomic_read(&dest->conn_flags);
87751+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
87752 entry.weight = atomic_read(&dest->weight);
87753 entry.u_threshold = dest->u_threshold;
87754 entry.l_threshold = dest->l_threshold;
87755@@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
87756 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
87757 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
87758 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
87759- (atomic_read(&dest->conn_flags) &
87760+ (atomic_read_unchecked(&dest->conn_flags) &
87761 IP_VS_CONN_F_FWD_MASK)) ||
87762 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
87763 atomic_read(&dest->weight)) ||
87764@@ -3688,7 +3688,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
87765 {
87766 int idx;
87767 struct netns_ipvs *ipvs = net_ipvs(net);
87768- struct ctl_table *tbl;
87769+ ctl_table_no_const *tbl;
87770
87771 atomic_set(&ipvs->dropentry, 0);
87772 spin_lock_init(&ipvs->dropentry_lock);
87773diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
87774index fdd89b9..bd96aa9 100644
87775--- a/net/netfilter/ipvs/ip_vs_lblc.c
87776+++ b/net/netfilter/ipvs/ip_vs_lblc.c
87777@@ -115,7 +115,7 @@ struct ip_vs_lblc_table {
87778 * IPVS LBLC sysctl table
87779 */
87780 #ifdef CONFIG_SYSCTL
87781-static ctl_table vs_vars_table[] = {
87782+static ctl_table_no_const vs_vars_table[] __read_only = {
87783 {
87784 .procname = "lblc_expiration",
87785 .data = NULL,
87786diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
87787index c03b6a3..8ce3681 100644
87788--- a/net/netfilter/ipvs/ip_vs_lblcr.c
87789+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
87790@@ -288,7 +288,7 @@ struct ip_vs_lblcr_table {
87791 * IPVS LBLCR sysctl table
87792 */
87793
87794-static ctl_table vs_vars_table[] = {
87795+static ctl_table_no_const vs_vars_table[] __read_only = {
87796 {
87797 .procname = "lblcr_expiration",
87798 .data = NULL,
87799diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
87800index 44fd10c..2a163b3 100644
87801--- a/net/netfilter/ipvs/ip_vs_sync.c
87802+++ b/net/netfilter/ipvs/ip_vs_sync.c
87803@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
87804 cp = cp->control;
87805 if (cp) {
87806 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
87807- pkts = atomic_add_return(1, &cp->in_pkts);
87808+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
87809 else
87810 pkts = sysctl_sync_threshold(ipvs);
87811 ip_vs_sync_conn(net, cp->control, pkts);
87812@@ -758,7 +758,7 @@ control:
87813 if (!cp)
87814 return;
87815 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
87816- pkts = atomic_add_return(1, &cp->in_pkts);
87817+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
87818 else
87819 pkts = sysctl_sync_threshold(ipvs);
87820 goto sloop;
87821@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
87822
87823 if (opt)
87824 memcpy(&cp->in_seq, opt, sizeof(*opt));
87825- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
87826+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
87827 cp->state = state;
87828 cp->old_state = cp->state;
87829 /*
87830diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
87831index ee6b7a9..f9a89f6 100644
87832--- a/net/netfilter/ipvs/ip_vs_xmit.c
87833+++ b/net/netfilter/ipvs/ip_vs_xmit.c
87834@@ -1210,7 +1210,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
87835 else
87836 rc = NF_ACCEPT;
87837 /* do not touch skb anymore */
87838- atomic_inc(&cp->in_pkts);
87839+ atomic_inc_unchecked(&cp->in_pkts);
87840 goto out;
87841 }
87842
87843@@ -1332,7 +1332,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
87844 else
87845 rc = NF_ACCEPT;
87846 /* do not touch skb anymore */
87847- atomic_inc(&cp->in_pkts);
87848+ atomic_inc_unchecked(&cp->in_pkts);
87849 goto out;
87850 }
87851
87852diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
87853index 7df424e..a527b02 100644
87854--- a/net/netfilter/nf_conntrack_acct.c
87855+++ b/net/netfilter/nf_conntrack_acct.c
87856@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
87857 #ifdef CONFIG_SYSCTL
87858 static int nf_conntrack_acct_init_sysctl(struct net *net)
87859 {
87860- struct ctl_table *table;
87861+ ctl_table_no_const *table;
87862
87863 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
87864 GFP_KERNEL);
87865diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
87866index e4a0c4f..c263f28 100644
87867--- a/net/netfilter/nf_conntrack_core.c
87868+++ b/net/netfilter/nf_conntrack_core.c
87869@@ -1529,6 +1529,10 @@ err_extend:
87870 #define DYING_NULLS_VAL ((1<<30)+1)
87871 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
87872
87873+#ifdef CONFIG_GRKERNSEC_HIDESYM
87874+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
87875+#endif
87876+
87877 static int nf_conntrack_init_net(struct net *net)
87878 {
87879 int ret;
87880@@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
87881 goto err_stat;
87882 }
87883
87884+#ifdef CONFIG_GRKERNSEC_HIDESYM
87885+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
87886+#else
87887 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
87888+#endif
87889 if (!net->ct.slabname) {
87890 ret = -ENOMEM;
87891 goto err_slabname;
87892diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
87893index faa978f..1afb18f 100644
87894--- a/net/netfilter/nf_conntrack_ecache.c
87895+++ b/net/netfilter/nf_conntrack_ecache.c
87896@@ -186,7 +186,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
87897 #ifdef CONFIG_SYSCTL
87898 static int nf_conntrack_event_init_sysctl(struct net *net)
87899 {
87900- struct ctl_table *table;
87901+ ctl_table_no_const *table;
87902
87903 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
87904 GFP_KERNEL);
87905diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
87906index 884f2b3..d53b33a 100644
87907--- a/net/netfilter/nf_conntrack_helper.c
87908+++ b/net/netfilter/nf_conntrack_helper.c
87909@@ -55,7 +55,7 @@ static struct ctl_table helper_sysctl_table[] = {
87910
87911 static int nf_conntrack_helper_init_sysctl(struct net *net)
87912 {
87913- struct ctl_table *table;
87914+ ctl_table_no_const *table;
87915
87916 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
87917 GFP_KERNEL);
87918diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
87919index 51e928d..72a413a 100644
87920--- a/net/netfilter/nf_conntrack_proto.c
87921+++ b/net/netfilter/nf_conntrack_proto.c
87922@@ -51,7 +51,7 @@ nf_ct_register_sysctl(struct net *net,
87923
87924 static void
87925 nf_ct_unregister_sysctl(struct ctl_table_header **header,
87926- struct ctl_table **table,
87927+ ctl_table_no_const **table,
87928 unsigned int users)
87929 {
87930 if (users > 0)
87931diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
87932index e7185c6..4ad6c9c 100644
87933--- a/net/netfilter/nf_conntrack_standalone.c
87934+++ b/net/netfilter/nf_conntrack_standalone.c
87935@@ -470,7 +470,7 @@ static ctl_table nf_ct_netfilter_table[] = {
87936
87937 static int nf_conntrack_standalone_init_sysctl(struct net *net)
87938 {
87939- struct ctl_table *table;
87940+ ctl_table_no_const *table;
87941
87942 if (net_eq(net, &init_net)) {
87943 nf_ct_netfilter_header =
87944diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
87945index 7ea8026..bc9512d 100644
87946--- a/net/netfilter/nf_conntrack_timestamp.c
87947+++ b/net/netfilter/nf_conntrack_timestamp.c
87948@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
87949 #ifdef CONFIG_SYSCTL
87950 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
87951 {
87952- struct ctl_table *table;
87953+ ctl_table_no_const *table;
87954
87955 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
87956 GFP_KERNEL);
87957diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
87958index 9e31269..bc4c1b7 100644
87959--- a/net/netfilter/nf_log.c
87960+++ b/net/netfilter/nf_log.c
87961@@ -215,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
87962
87963 #ifdef CONFIG_SYSCTL
87964 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
87965-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
87966+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
87967 static struct ctl_table_header *nf_log_dir_header;
87968
87969 static int nf_log_proc_dostring(ctl_table *table, int write,
87970@@ -246,14 +246,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
87971 rcu_assign_pointer(nf_loggers[tindex], logger);
87972 mutex_unlock(&nf_log_mutex);
87973 } else {
87974+ ctl_table_no_const nf_log_table = *table;
87975+
87976 mutex_lock(&nf_log_mutex);
87977 logger = rcu_dereference_protected(nf_loggers[tindex],
87978 lockdep_is_held(&nf_log_mutex));
87979 if (!logger)
87980- table->data = "NONE";
87981+ nf_log_table.data = "NONE";
87982 else
87983- table->data = logger->name;
87984- r = proc_dostring(table, write, buffer, lenp, ppos);
87985+ nf_log_table.data = logger->name;
87986+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
87987 mutex_unlock(&nf_log_mutex);
87988 }
87989
87990diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
87991index f042ae5..30ea486 100644
87992--- a/net/netfilter/nf_sockopt.c
87993+++ b/net/netfilter/nf_sockopt.c
87994@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
87995 }
87996 }
87997
87998- list_add(&reg->list, &nf_sockopts);
87999+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
88000 out:
88001 mutex_unlock(&nf_sockopt_mutex);
88002 return ret;
88003@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
88004 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
88005 {
88006 mutex_lock(&nf_sockopt_mutex);
88007- list_del(&reg->list);
88008+ pax_list_del((struct list_head *)&reg->list);
88009 mutex_unlock(&nf_sockopt_mutex);
88010 }
88011 EXPORT_SYMBOL(nf_unregister_sockopt);
88012diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
88013index 92fd8ec..3f6ea4b 100644
88014--- a/net/netfilter/nfnetlink_log.c
88015+++ b/net/netfilter/nfnetlink_log.c
88016@@ -72,7 +72,7 @@ struct nfulnl_instance {
88017 };
88018
88019 static DEFINE_SPINLOCK(instances_lock);
88020-static atomic_t global_seq;
88021+static atomic_unchecked_t global_seq;
88022
88023 #define INSTANCE_BUCKETS 16
88024 static struct hlist_head instance_table[INSTANCE_BUCKETS];
88025@@ -537,7 +537,7 @@ __build_packet_message(struct nfulnl_instance *inst,
88026 /* global sequence number */
88027 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
88028 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
88029- htonl(atomic_inc_return(&global_seq))))
88030+ htonl(atomic_inc_return_unchecked(&global_seq))))
88031 goto nla_put_failure;
88032
88033 if (data_len) {
88034diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
88035new file mode 100644
88036index 0000000..c566332
88037--- /dev/null
88038+++ b/net/netfilter/xt_gradm.c
88039@@ -0,0 +1,51 @@
88040+/*
88041+ * gradm match for netfilter
88042